blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
080850b8e98bccd0346df091c3f44f79b41cfb52
|
5b7a0942ce5cbeaed035098223207b446704fb66
|
/man/lsAPI.Rd
|
9eed417d8ad533f24c69ed486007e2b51f957f50
|
[
"MIT"
] |
permissive
|
k127/LimeRick
|
4f3bcc8c2204c5c67968d0822b558c29bb5392aa
|
a4d634981f5de5afa5b5e3bee72cf6acd284c92a
|
refs/heads/master
| 2023-04-11T21:56:54.854494
| 2020-06-19T18:36:05
| 2020-06-19T18:36:05
| 271,702,292
| 0
| 1
| null | 2020-06-12T03:45:14
| 2020-06-12T03:45:14
| null |
UTF-8
|
R
| false
| true
| 772
|
rd
|
lsAPI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lsAPI.R
\name{lsAPI}
\alias{lsAPI}
\title{Low level API calls}
\usage{
lsAPI(method, params = NULL, lsAPIurl = getOption("lsAPIurl"))
}
\arguments{
\item{method}{The API method name}
\item{params}{A list of \code{method}'s parameters (make sure to keep the element order as specified in API doc)}
\item{lsAPIurl}{\emph{(optional)} The API URL}
}
\value{
Whatever the API responds to this method specific request
}
\description{
Fire a request against the \emph{LimeSurvey RemoteControl 2} JSON-RPC API.
}
\examples{
\dontrun{
lsAPI("get_summary", params = list(iSurveyID = "123456", sStatName = "all"))
}
}
\references{
\url{https://api.limesurvey.org/classes/remotecontrol_handle.html}
}
|
776e13ab668ee97d8ef7ab8511c628ede3a3b833
|
64718fae9f573e3d56f81500ba1466b47c8441c0
|
/Importing and Cleaning Data/readExcel.R
|
e94bf760494d23b4ab75b1187a2e7213aee9c968
|
[] |
no_license
|
sanswons/Datacamp-courses
|
4576366727a9ccda93bc82e5bbe5e89c9fd2062e
|
080442379be67dad1de4b8f9a30866ab05cbd38e
|
refs/heads/master
| 2020-12-31T00:19:38.221742
| 2016-02-24T09:36:46
| 2016-02-24T09:36:46
| 50,502,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
readExcel.R
|
# Load the readxl package
library(readxl)
# Find the names of both spreadsheets: sheets
sheets=excel_sheets("F:/sanjana/books/CSE/Machine Learning/Data Camp/Practice/Datacamp-courses/Importing and Cleaning Data/latitude.xlsx")
# Print sheets
sheets
# Find out the class of the sheets vector
class(sheets)
# The readxl package is already loaded
# Read the first sheet of latitude.xlsx: latitude_1
# Read the second sheet of latitude.xlsx: latitude_2
# Put latitude_1 and latitude_2 in a list: lat_list
# Display the structure of lat_list
|
89366bb7301ca73a69e1048dd16c964f07a00ed8
|
e99928f515a755bf448e12e08dd616918356cbfb
|
/Ragusa2018b/source/data.R
|
bd1efc622e5fdf6add9a0a8ae238d11985e0310b
|
[] |
no_license
|
jragusa/Publications
|
8c6056dd5c7cd67214c0d3ac43202f6b92208773
|
006007a4f7a5770c4b2c92e786d28ad46d1f79d2
|
refs/heads/master
| 2022-06-28T05:32:39.031277
| 2022-06-18T15:36:51
| 2022-06-18T15:36:51
| 88,986,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,334
|
r
|
data.R
|
####
# author: Jérémy Ragusa
# date: 2018-05-22
# description: prepare dataset for Ragusa2018b
####
# library ####
library(cowplot)
library(dplyr)
library(factoextra)
library(ggplot2)
# library(ggtern)
library(magrittr)
library(matrixStats) # colCounts for GrainSize
library(compositions)
# library(reshape2)
library(tidyr)
library(viridis)
source("source/function.R")
# load datasets ####
composition <- read.csv("data/composition.csv", header = TRUE, comment.char = "#")
mutti <- read.csv("data/mutti.csv", header = TRUE, comment.char = "#")
# classification diagrams
ProvenanceTernary <- read.csv("/home/jeremy/UniGE/Git/Provenance/data/ProvenanceTernary.csv", header = TRUE, comment.char = "#")
# reshape dataframe ####
framework <- NULL
framework$raw <- composition %>%
transmute(sample,
location = site,
unit,
QFm = Qms + Kor + Kan + Kpg + Kmi + Kp + Ps + Ptw + Pmy + As + Atw + Qps + QpT,
QFr = Qrv + Qrm + Qrg + Krv + Krg + Krm + Prv + Prg + Prm + Arg + Arv + Arm,
L = Lssc + Lcsf + Lcs + Lss + Lsp + Lch + Lchb + Lva + Lvf + Lvg + Lmf1 + Lmf2 + Lmf3 + Lmf4 + Lmp1 + Lmp2 + Lmp3 + Lmp4 + Lmb1,
Lc = Lcmi + Lcmif + Lcmir + Lcmic + Lcmich,
C,
D = Ap + Gr + Hb + Px + Rt + St + Zr,
M = Mb + Mm + Mrg + Mrm + Chl,
Glt = Gl,
Other = Op + Si,
Ph = Ph + Fph,
Fc,
RA = AR,
Bc = Bz + Sh + Das + Ech + Md + Oo + Ra + Se,
Pore)
composition <- composition %>%
mutate(Qm = Qms,
F = Kor + Kan + Kpg + Kmi + Kp + Ps + Ptw + Pmy + As + Atw,
Lt = Lssc + Lcsf + Lcs + Lss + Lsp + Lch + Lchb + Lva + Lvf + Lvg + Lmf1 + Lmf2 + Lmf3 + Lmf4 + Lmp1 + Lmp2 + Lmp3 + Lmp4 + Lmb1 + Qps + QpT,
Ltc = Lt + Lcmi + Lcmif + Lcmir + Lcmic + Lcmich)
# replace NA values by 0.001 ####
framework$raw[is.na(framework$raw)] <- 0.001
framework$raw[framework$raw==0] <- 0.001
# shift member to formation ####
framework$raw$unit[framework$raw$unit == "AS"] <- "VS" # Allinges Sandstone Mb
framework$raw$unit[framework$raw$unit == "FS"] <- "VS" # Fenalet Sandstone
framework$raw$unit <- factor(framework$raw$unit, levels = c("VS", "VC", "BM", "BS", "Gu")) # stratigraphic order
# clr transformation ####
framework$clr <- cbind(framework$raw[, c(1:3)], clr(framework$raw[, c(4:17)]))
# percentage ####
list.cluster <- c("QFm", "QFr", "L", "Lc", "C", "D", "M", "Glt", "Other", "Ph", "Fc", "RA", "Bc", "Pore")
framework$percent <- cbind(framework$raw[, c(1:3)],framework$raw[list.cluster]/rowSums(framework$raw[, c(4:17)]) * 100)
# cluster analyse ####
row.names(framework$clr) <- framework$clr$sample
fit <- hclust(dist(framework$clr[, -c(1:3)], method = "euclidean"), method = "ward.D")
cluster <- cutree(fit, k = 5)
cluster <- as.data.frame(cluster)
framework$raw$cluster <- cluster$cluster
# manual correction of the cluster distribution
framework$raw$lithofacies[framework$raw$cluster == 1] <- "L3"
framework$raw$lithofacies[framework$raw$cluster == 2] <- "L2"
framework$raw$lithofacies[framework$raw$cluster == 3] <- "L5"
framework$raw$lithofacies[framework$raw$cluster == 4] <- "L4"
framework$raw$lithofacies[framework$raw$cluster == 5] <- "L1"
framework$raw$lithofacies[framework$raw$sample == "JR238"] <- "L6"
framework$raw <- framework$raw[,-18]
framework$clr$lithofacies <- framework$raw$lithofacies
framework$percent$lithofacies <- framework$raw$lithofacies
rm(cluster)
# add grain-size and Mutti dataset ####
source("source/GrainSize.R")
framework$raw <- merge(framework$raw, diameter$moment[,-6], by = "sample", all.x = TRUE)
framework$percent <- merge(framework$percent, diameter$moment[, -6], by = "sample", all.x = TRUE)
framework$clr <- merge(framework$clr, diameter$moment[,-6], by = "sample", all.x = TRUE)
# add lithofacies and unit to Mutti facies ####
mutti <- mutti[,-5] %>%
na.omit() %>%
merge(framework$raw[,c("sample","unit","lithofacies")], by = "sample")
# add counting grains ####
framework$raw %>%
mutate(dickinson = QFm + QFr + L + Lc + D + M,
total = dickinson + C + Glt + Other + Ph + Fc + RA + Bc + Pore,
diff = total - dickinson) -> framework$raw
# export electronic supplementary material ####
write.csv(framework$raw, "reports/ESM_1.csv", row.names = FALSE)
|
5ba1d03bf2203f83192528a778f2a66a590a71bb
|
6cf4e77cb8a08649c133a0477ca72c38724f8677
|
/R/cyr.R
|
dd580638216e6b6415c7c8a569ba0370f10a5264
|
[
"MIT"
] |
permissive
|
nemochina2008/cyr
|
ac1fa3d4e5466693b3837d581778a0614734e0d9
|
8cad3410708305ecdfa964c680dac72436353915
|
refs/heads/master
| 2021-06-15T07:24:03.733087
| 2017-04-20T20:27:29
| 2017-04-20T20:27:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,372
|
r
|
cyr.R
|
port_number <- '1234'
cytoscape_url <- paste("http://localhost:", port_number, "/v1", sep="")
cytoscape_network_url <- paste(cytoscape_url, 'networks', sep = "/")
table_type_node <- 'defaultnode'
table_type_edge <- 'defaultedge'
table_type_network <- 'defaultnetwork'
table_column_selected <- 'selected'
table_column_shared_name <- 'shared name'
table_column_suid <- 'SUID'
getRequest <- function (url) {
response <- httr::GET(url)
jsonlite::fromJSON(httr::content(response, "text", encoding = 'utf-8'))
}
postRequest <- function (url, body) {
response <- httr::POST(url, body = body, encode = 'json')
jsonlite::fromJSON(httr::content(response, "text", encoding = 'utf-8'))
}
putRequesst <- function (url, body) {
httr::PUT(url, body = body, encode = 'json')
}
#' @export
cytoscape.networks.list <- function () {
getRequest(paste(cytoscape_url, 'networks.names', sep = "/"))
}
#' @export
cytoscape.networks.nodes.list <- function (network_id, only_summary = TRUE, summary_using_columns = c('name', 'SUID')) {
all_nodes <- getRequest(paste(cytoscape_network_url, network_id, 'tables', table_type_node, 'rows', sep = "/"))
if (only_summary && !is.null(summary_using_columns) && length(summary_using_columns) > 0) {
all_nodes[, summary_using_columns]
} else {
all_nodes
}
}
#' @export
cytoscape.network.nodes.get <- function (network_id, node_id) {
getRequest(paste(cytoscape_network_url, network_id, 'nodes', node_id, sep = "/"))
}
#' @export
cytoscape.networks.nodes.neighbors.list <- function (network_id, node_id) {
getRequest(paste(cytoscape_network_url, network_id, 'nodes', node_id, 'neighbors', sep = "/"))
}
#' @export
cytoscape.networks.nodes.selected.list <- function (network_id, only_summary = TRUE, summary_using_columns = c('name', 'SUID')) {
all_nodes <- getRequest(paste(cytoscape_network_url, network_id, 'tables', table_type_node, 'rows', sep = "/"))
if (only_summary && !is.null(summary_using_columns) && length(summary_using_columns) > 0) {
all_nodes[all_nodes[table_column_selected]==TRUE, summary_using_columns]
} else {
all_nodes[all_nodes[table_column_selected]==TRUE, ]
}
}
#' @export
cytoscape.networks.nodes.selected.set <- function (network_id, node_name_list) {
selected <- rep(TRUE, length(node_name_list))
df <- data.frame('name' = node_name_list, selected)
request_body <- list('key' = table_column_shared_name, 'dataKey' = 'name', 'data' = df)
response <- putRequesst(paste(cytoscape_network_url, network_id, 'tables', table_type_node, sep = "/"), jsonlite::toJSON(request_body, auto_unbox = T))
}
#' @export
cytoscape.networks.nodes.selected.set_by_id <- function (network_id, node_id_list) {
selected <- rep(TRUE, length(node_id_list))
df <- data.frame('SUID' = node_id_list, selected)
request_body <- list('key' = table_column_suid, 'dataKey' = 'SUID', 'data' = df)
response <- putRequesst(paste(cytoscape_network_url, network_id, 'tables', table_type_node, sep = "/"), jsonlite::toJSON(request_body, auto_unbox = T))
}
#' @export
cytoscape.networks.edges.list <- function (network_id, only_summary = TRUE, summary_using_columns = c('name', 'SUID')) {
all_edges <- getRequest(paste(cytoscape_network_url, network_id, 'tables', table_type_edge, 'rows', sep = "/"))
if (only_summary && !is.null(summary_using_columns) && length(summary_using_columns) > 0) {
all_edges[, summary_using_columns]
} else {
all_edges
}
}
#' @export
cytoscape.networks.edges.selected.list <- function (network_id, only_summary = TRUE, summary_using_columns = c('name', 'SUID')) {
all_edges <- getRequest(paste(cytoscape_network_url, network_id, 'tables', table_type_edge, 'rows', sep = "/"))
if (only_summary && !is.null(summary_using_columns) && length(summary_using_columns) > 0) {
all_edges[all_edges[table_column_selected]==TRUE, summary_using_columns]
} else {
all_edges[all_edges[table_column_selected]==TRUE, ]
}
}
#' @export
cytoscape.networks.edges.selected.set <- function (network_id, edge_id_list) {
selected <- rep(TRUE, length(edge_id_list))
df <- data.frame('SUID' = edge_id_list, selected)
request_body <- list('key' = table_column_suid, 'dataKey' = 'SUID', 'data' = df)
response <- putRequesst(paste(cytoscape_network_url, network_id, 'tables', table_type_edge, sep = "/"), jsonlite::toJSON(request_body, auto_unbox = T))
}
|
442bf8b95cbfd5b8f8aa87636e83b1a6e0256f61
|
18720a0366eddff4bf0a68874b749dedf2c5df31
|
/activity7/activity7_script.R
|
151f53060a5832e1e26a8e8a954a99b750e58dcf
|
[] |
no_license
|
cschwartz1/GEOG331
|
d1edf08ac1f2f4041d03a4e0edd94ad833cdb234
|
079fdbe56e79a31675de0cb723f0a32b0e9aaabf
|
refs/heads/master
| 2020-12-20T10:20:37.143814
| 2020-06-30T20:39:30
| 2020-06-30T20:39:30
| 236,040,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,029
|
r
|
activity7_script.R
|
#loading necessary packages
library(raster)
library(sp)
library(rgdal)
library(rgeos)
library(mapview)
library(caret)
library(randomForest)
library(nnet)
#set up directory for oneida data folder
dirR<- "/Users/carlyschwartz/Documents/Colgate/ColgateRound4/GEOG331/activity7/oneida"
#read in Sentinel data
rdatB2 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B02_20m.tif"))
rdatB3 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B03_20m.tif"))
rdatB4 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B04_20m.tif"))
rdatB5 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B05_20m.tif"))
rdatB6 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B06_20m.tif"))
rdatB7 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B07_20m.tif"))
rdatB8 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B08_20m.tif"))
rdatB11 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B11_20m.tif"))
rdatB12 <- raster(paste0(dirR,"/sentinel/T18TVN_20190814T154911_B12_20m.tif"))
clouds <- raster(paste0(dirR,"/sentinel/MSK_CLDPRB_20m.tif"))
#read in validation data
#here verbose=FALSE hiddes
algae <- readOGR(paste0(dirR,"/Oneida/algae.shp"), verbose=FALSE)
agri <- readOGR(paste0(dirR,"/Oneida/agriculture.shp"), verbose=FALSE)
built <- readOGR(paste0(dirR,"/Oneida/built.shp"), verbose=FALSE)
forest <- readOGR(paste0(dirR,"/Oneida/forest.shp"), verbose=FALSE)
water <- readOGR(paste0(dirR,"/Oneida/water.shp"), verbose=FALSE)
wetlands <- readOGR(paste0(dirR,"/Oneida/wetlands.shp"), verbose=FALSE)
#stack red green blue
rgbS <- stack(rdatB4,rdatB3,rdatB2)
#stack all raster data
allbands <- stack(rdatB2,rdatB3,rdatB4,rdatB5,rdatB6,rdatB7, rdatB8,rdatB11, rdatB12,clouds)
#view raster, maximum digital is around 20000 so set scale to that
plotRGB(rgbS, scale=20000)
#adding linear contrast stretch
plotRGB(rgbS, scale=20000, stretch="lin")
#use mapview package
#view rgb and set up a contrast stretch, exclude clouds with high values
viewRGB(rgbS,r=1,g=2,b=3,maxpixels = 2297430, #view all pixels don' lower resolution
quantiles = c(0.00,0.995), #quantilesfor stretch. Cuts off high reflectance from clouds
homebutton=FALSE,
viewer.suppress=FALSE)#view in Rstudio
#use mapview package
#view rgb and set up a contrast stretch, exclude clouds with high values
#and view all landcover types
viewRGB(rgbS,r=1,g=2,b=3,maxpixels = 2297430,quantiles = c(0.00,0.995), homebutton=FALSE,
viewer.suppress=FALSE)+mapview(algae, color="grey25",col.regions="palegreen")+
mapview(agri, color="grey25",col.regions="violet")+
mapview(built, color="grey25",col.regions="darkgoldenrod3")+
mapview(forest, color="grey25",col.regions="tan4")+
mapview(water, color="grey25",col.regions="royalblue")+mapview(wetlands, color="grey25",col.regions="orangered2")
#cloud layer varies from 0-100
#represents % probability of an area that is cloud
plot(allbands[[10]])
#filter out all values in rasters in cloud area above 60% - change to NA
#set clouds to NA
allbandsCloud <- list()
for(i in 1:9){
allbandsCloud[[i]] <- setValues(allbands[[i]],
ifelse(getValues(allbands[[10]])>60,NA,getValues(allbands[[i]])))
}
allbandsCloudf <- stack(allbandsCloud[[1]],allbandsCloud[[2]],allbandsCloud[[3]],
allbandsCloud[[4]],allbandsCloud[[5]],allbandsCloud[[6]],
allbandsCloud[[7]],allbandsCloud[[8]],allbandsCloud[[9]])
#view all layers
plot(allbandsCloudf)
plotRGB(allbandsCloudf,r=4, g=3, b=2,
scale=10000,
stretch="lin",
margins=TRUE,
colNA="grey50")
#setting up training and validation data
#set seed so samples always the same
set.seed(12153)
#sample function randomly selects numbers from discrete sequence of values
#randomly select
algSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
algData <- rep("train",120)
#randomly replace half of the data to be validating data
algData[algSamp] <- "valid"
waterSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
waterData <- rep("train",120)
#randomly replace half of the data to be validating data
waterData[waterSamp] <- "valid"
agriSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
agriData <- rep("train",120)
#randomly replace half of the data to be validating data
agriData[agriSamp] <- "valid"
builtSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
builtData <- rep("train",120)
#randomly replace half of the data to be validating data
builtData[builtSamp] <- "valid"
forestSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
forestData <- rep("train",120)
#randomly replace half of the data to be validating data
forestData[forestSamp] <- "valid"
wetlandsSamp <- sort(sample(seq(1,120),60))
#set up vector for data type
wetlandsData <- rep("train",120)
#randomly replace half of the data to be validating data
wetlandsData[wetlandsSamp] <- "valid"
#create id table that gives each landcover an ID
landclass <- data.frame(landcID= seq(1,6),
landcover = c("algal bloom", "open water","agriculture","built","forest","wetlands"))
#set up table with coordinates and data type (validate or train) for each point
landExtract <- data.frame(landcID = rep(seq(1,6),each=120),
sampleType=c(algData,waterData,agriData,builtData,forestData, wetlandsData),
x=c(algae@coords[,1],water@coords[,1],agri@coords[,1],built@coords[,1],forest@coords[,1],wetlands@coords[,1] ),
y=c(algae@coords[,2],water@coords[,2],agri@coords[,2],built@coords[,2],forest@coords[,2],wetlands@coords[,2] ))
#extract raster data at each point
#using point coordinates
rasterEx <- data.frame(extract(allbandsCloudf,landExtract[,3:4]))
#give names of bands
colnames(rasterEx) <- c("B2","B3","B4","B5","B6","B7","B8","B11","B12")
#combine point information with raster information
dataAll <- cbind(landExtract,rasterEx)
#preview
head(dataAll)
#separate training and validation data
trainD <- dataAll[dataAll$sampleType == "train",]
validD <- dataAll[dataAll$sampleType == "valid",]
#random forest
#Kfold cross validation
tc <- trainControl(method = "repeatedcv", # repeated cross-validation of the training data
number = 10, # number 10 fold
repeats = 10) # number of repeats
###random forests
#Typically square root of number of variables
rf.grid <- expand.grid(mtry=1:sqrt(9)) # number of variables available for splitting at each tree node
# Train the random forest model to the Sentinel-2 data
#note that caret:: will make sure we use train from the caret package
rf_model <- caret::train(x = trainD[,c(5:13)], #digital number data
y = as.factor(trainD$landcID), #land class we want to predict
method = "rf", #use random forest
metric="Accuracy", #assess by accuracy
trainControl = tc, #use parameter tuning method
tuneGrid = rf.grid) #parameter tuning grid
#check output
rf_model
# Change name in raster stack to match training data
names(allbandsCloudf) <- c("B2","B3","B4","B5","B6","B7","B8","B11","B12")
# Apply the random forest model to the Sentinel-2 data
rf_prediction <- raster::predict(allbandsCloudf, model=rf_model)
#view predictions
plot(rf_prediction)
#landcover class names
landclass
#set up categorical colors
landclass$cols <-c("#a6d854","#8da0cb","#66c2a5",
"#fc8d62","#ffffb3","#ffd92f")
#make plot and hide legend
plot(rf_prediction,
breaks=seq(0,6),
col=landclass$cols ,
legend=FALSE, axes=FALSE)
legend("bottomleft", paste(landclass$landcover),
fill=landclass$cols ,bty="n")
mtext("Random Forest", side=3,cex=2, line=-5)
#get validation data from raster by extracting
#cell values at the cell coordinates
rf_Eval <- extract(rf_prediction, validD[,3:4])
#make the confusion matrix
rf_errorM <- confusionMatrix(as.factor(rf_Eval),as.factor(validD$landcID))
#add landcover names
colnames(rf_errorM$table) <- landclass$landcover
rownames(rf_errorM$table) <- landclass$landcover
#view the matrix
rf_errorM$table
#look at the overall accuracy
rf_errorM$overall
#Neural Network
#compare random forest predictions to neural networks
#set up grid
nnet.grid <- expand.grid(size = seq(from = 16, to = 28, by = 2), # number of neurons units in the hidden layer
decay = seq(from = 0.1, to = 0.6, by = 0.1)) # regularization parameter to avoid over-fitting
#now train the model
nnet_model <- caret::train(x = trainD[,c(5:13)], y = as.factor(trainD$landcID),
method = "nnet", metric="Accuracy", trainControl = tc, tuneGrid = nnet.grid,
trace=FALSE)
nnet_model
#make predictions for the entire image
# Apply the neural network model to the Sentinel-2 data
nnet_prediction <- raster::predict(allbandsCloudf, model=nnet_model)
#make plot and hide legend
plot(nnet_prediction,
breaks=seq(0,6),
col=landclass$cols ,
legend=FALSE)
legend("bottomleft", paste(landclass$landcover),
fill=landclass$cols ,bty="n")
mtext("Neural network", side=3,cex=2, line=-5)
#check model predictions
#extract predictions
nn_Eval = extract(nnet_prediction, validD[,3:4])
#confusion matrix
nn_errorM = confusionMatrix(as.factor(nn_Eval),as.factor(validD$landcID))
colnames(nn_errorM$table) <- landclass$landcover
rownames(nn_errorM$table) <- landclass$landcover
nn_errorM$table
nn_errorM$overall
#compare the two maps (random forest vs neural network)
par(mfrow=c(2,1), mai=c(0,0,0,0))
#random forest
plot(rf_prediction,
breaks=seq(0,6),
col=landclass$cols ,
legend=FALSE)
#legend
legend("bottomleft", paste(landclass$landcover),
fill=landclass$cols ,bty="n")
#add title
mtext("Random Forest", side=3,cex=2, line=-5)
#neural network
plot(nnet_prediction,
breaks=seq(0,6),
col=landclass$cols ,
legend=FALSE, axes=FALSE)
#add legend
legend("bottomleft", paste(landclass$landcover),
fill=landclass$cols, bty="n")
#add title
mtext("Neural network", side=3,cex=2, line=-5)
#Analyzing predictions of land cover
#cell count neural net
freq(nnet_prediction)
#cell count random forest
freq(rf_prediction)
###comparing differences between methods
###QUESTION 5###
#raster that shows whether neural network and random forest predictions agree or disagree
#first idea: subtract the two and see where values are not 0
#those that are 0 are areas that agree, other values do not agree
par(mfrow=c(1,1))
predictions<-nnet_prediction-rf_prediction
breakpoints<-c(-5.5, -4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5)
colors_predict<-c("red", "red", "red", "red", "red", "green","red", "red", "red", "red", "red")
plot(predictions, breaks=breakpoints, col=colors_predict, legend=FALSE)
legend("bottomleft", paste(c("Agree","Disagree")),
fill=c("green","red"), bty="n")
|
64eaa169be9d1c4c6cfe81b38d68e2e0a55439a6
|
36a25a9052d14520300e7f5613730a3a9606a8c9
|
/Generator/kf_ekf.R
|
a452fef3ae643f28bcc679da62702913a29d3c2c
|
[] |
no_license
|
cyrulnic/NoStRa
|
20fbe84dd2c3a7f43bc8e9c39bc025d35c0e50c9
|
83e9776158503fbdf5b5a23aa7a23c5ead53691f
|
refs/heads/master
| 2020-03-29T04:09:25.821312
| 2019-09-18T13:23:15
| 2019-09-18T13:23:15
| 149,518,116
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,644
|
r
|
kf_ekf.R
|
KF_EKF = function(ntime_filter, n, dt, stride, ind_obs_space, ind_time_anls, Rem,
UU, rrho, nnu, ssigma,
F_Lorenz, J_Lorenz, sd_noise,
R_diag, m, OBS,
X_flt_start, A_start,
model_type, filter_type,
predict_BB_KF){
#-----------------------------------------------------------------------------------
# KF/EKF
#
# Note that the "anls-fcst" cycle starts here from an timestep-0 field X_flt_start
# and one cycle is {(i) fcst, (ii) anls}.
#
# Args:
#
# ntime_filter - nu of ANLS (filter) time steps
# (onefilter time step is
# a multiple (stride) of the model time step)
# n - dim-ty of the state vector x
# dt - MODEL time step (atmospheric time), sec.
# stride - nu of model time steps between consecutive analyses
# ind_obs_space - vector of indices of the state vector, where (in space) OBS are present
# ind_time_anls - model time steps at which the anls is to be performed
# (= 1, 1 + stride, 1 + 2*stride, ...)
# Rem - Earth radius, m
# UU, rrho, nnu, ssigma - scnd flds for DSADM
# F_Lorenz, J_Lorenz, sd_noise - Lorenz-2005 params
#
# R_diag - diagonal (a vector) of the obs-err CVM
# m - distance between adjacent obs in space (in grid meshes, integer)
# OBS - obs at ALL MODEL time steps at the obs locations defined by ind_obs_space
# X_flt_start - at time step 1, the fcst is to be started from X_flt_start
# A_start - the imposed "anls-err CVM" at the virtual time step 0
# model_type = "DSADM" or "Lorenz05" or "Lorenz05lin"
# filter_type - "KF" or "EKF"
# NB: with model_type="Lorenz05", only filter_type ="EKF" is acceptable
# predict_BB_KF - compute & return prior filtering cvms (TRUE/FALSE)?
#
# return: XXf, XXa, BB_KF (at the anls times only) and B_mean, A_mean.
# NB: BB_KF are computed only if predict_BB_KF=TRUE.
#
# A Rakitko,
# M Tsyrulnikov (current code owner)
# June 2018
#-----------------------------------------------------------------------------------
h = 2*pi*Rem/n
ntime_model = ntime_filter * stride
XXf = matrix(NA, nrow = n, ncol = ntime_model)
XXa = matrix(NA, nrow = n, ncol = ntime_model)
B_mean = matrix(0, nrow = n, ncol = n)
A_mean = matrix(0, nrow = n, ncol = n)
if(predict_BB_KF) {
BB = array(NA, c(n, n, ntime_filter))
}else{
BB = array(NA, c(n, n, 1)) # select a dummy array to save space
}
#AA <- array(NA,c(n, n, ntime_model))
n_obs=length(ind_obs_space) # number of obs
H = matrix(0, nrow=n_obs, ncol=n) # obs oprt
for (i in 1:n_obs){
H[i, ind_obs_space[i]] = 1
}
R=diag(R_diag) # obs-err CVM
Xa = X_flt_start # the 1st fcst starts at time step 0 from this field
A = A_start
eps=1e-9 # EKF: Jacobian assessment through finite differences:
# scale down anls-CVM columns to reach the linear regime 1e-7..1e-9 are ok
#---------------------------------------------------
# Checks
if(model_type != "DSADM" & model_type != "Lorenz05" & model_type != "Lorenz05lin"){
print(model_type)
stop("KF_EKF: wrong model_type")
}
if(filter_type != "KF" & filter_type != "EKF"){
print(filter_type)
stop("KF_EKF: wrong filter_type")
}
if(model_type == "Lorenz05" & filter_type == "KF"){
print(model_type)
print(filter_type)
stop("KF_EKF: wrong filter_type/model_type pair")
}
if(model_type == "Lorenz05lin" & filter_type == "EKF"){
print(model_type)
print(filter_type)
stop("KF_EKF: wrong filter_type/model_type pair")
}
#---------------------------------------------------
# Lorenz: From atmospheric time to Lorenz time
if(model_type == "Lorenz05" | model_type == "Lorenz05lin") {
dt_atm_h = dt /3600
dt_Lorenz = dt_atm_h/6*0.05 # unitless, "Lorenz time"
# (6h atmospheric time ~ 0.05 Lorenz time units)
Q_Lorenz=sd_noise^2 * diag(n) # Lorenz's Q
}
Q_DSADM_diag = ssigma^2 /h *dt # model-error variances per model time step
#---------------------------------------------------
# main loop over MODEL time steps
i_filter=0
for(i in 1:ntime_model){
# (1) Fcst
# (1.1) run deterministic fcst started from the previous anls
if(model_type == "DSADM"){
N_det=1
XXf[,i] = dsadm_step(Xa, n, N_det, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i], Rem, forcing = FALSE)
}else if(model_type == "Lorenz05"){
XXf[,i] = lorenz05_step(Xa, n, dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n))
}else if(model_type == "Lorenz05lin"){
XXf[,i] = lorenz05lin_step(Xa, X_ref, n, dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n))
}
# (1.2) fcst covs
if(model_type == "DSADM"){
# In the implicit DSADM time integration scheme,
# model error is added Before the mdl oprt is applied ==>
CVM_forcing=diag( Q_DSADM_diag[,i] )
AQ = A + CVM_forcing # A is the previous-cycle anls-err CVM
# NB: CVM_forcing is not exactly Q
# B = F * AQ * F^T
# (1) PHI := F * AQ
# (2) B = PHI * F^T = (F * PHI^T)^T = F * PHI^T
if(filter_type == "KF"){
#PHI = apply( AQ, 2, function(x) dsadm_step(x, n, 1, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i],
# Rem, forcing = FALSE) )
#B = apply( t(PHI),2, function(x) dsadm_step(x, n, 1, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i],
# Rem, forcing = FALSE) )
PHI = dsadm_step(AQ, n, n, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i], Rem, forcing = FALSE)
B = dsadm_step(t(PHI), n, n, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i], Rem, forcing = FALSE)
}else if(filter_type == "EKF"){ # for testing only
PHI = apply( AQ, 2,
function(x) ApplyJacobian_fd( dsadm_step, Xa, XXf[,i], x, eps,
n, n, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i],
Rem, forcing = FALSE) )
B = apply( t(PHI), 2,
function(x) ApplyJacobian_fd( dsadm_step, Xa, XXf[,i], x, eps,
n, n, dt, UU[,i], rrho[,i], nnu[,i], ssigma[,i],
Rem, forcing = FALSE) )
}
}else if(model_type == "Lorenz05"){
# In the Lorenz model, system noise is added after the fcst ==>
# # B = F * A * F^T + Q
# (1) PHI := F * AQ
# (2) B = PHI * F^T = (F * PHI^T)^T = F * PHI^T
PHI = apply( A, 2,
function(x) ApplyJacobian_fd(lorenz05_step, Xa, XXf[,i], x, eps,
n, dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n)) )
P = apply( t(PHI), 2,
function(x) ApplyJacobian_fd(lorenz05_step, Xa, XXf[,i], x, eps,
n, dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n)) )
P = (P + t(P)) /2 # eliminate computational non-symmetry
B = P + Q_Lorenz
}else if(model_type == "Lorenz05lin"){
if(filter_type == "KF"){
PHI = apply(AQ, 2, function(x) lorenz05lin_step(x, Xref, n,
dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n)) )
P = apply(t(PHI),2, function(x) lorenz05lin_step(x, Xref, n,
dt_Lorenz, F_Lorenz, J_Lorenz, rep(0,n)) )
B = P + Q_Lorenz
}
}
# (2) Anls
# Separate model time steps when the anls is to be or not to be performed.
# ANLS are to be done at t=stride*k +1, where k=1,2,3,...
# Therefore at the anls times, t-1 should divisible by stride:
if(((i-1) %% stride) != 0){ # no anls, continue fcst
Xa = XXf[,i]
A = B
}else{ # perform anls
BHT = B[ , ind_obs_space] # B*H^T
HBHT = B[ind_obs_space, ind_obs_space] # H*B*H^T
HBHTpR = HBHT + R
K = BHT %*% solve(HBHTpR)
Xa = XXf[,i] + K %*% (OBS[,i] - XXf[ind_obs_space,i])
A = B - K%*%B[ind_obs_space,] # (I-KH)B
# use the Joseph form:
## A=(I-KH)B(I-KH)^T + KRK^T
## --> Yields the same results.
#ImKH=diag(n) - K %*% H
#A=ImKH %*% B %*% t(ImKH) + K %*% R %*% t(K)
# store BB
if(predict_BB_KF & (i-1) %% stride == 0){
i_filter=i_filter +1
BB[,,i_filter] = B
}
# Averaging of B, A
B_mean = B_mean + B
A_mean = A_mean + A
}
XXa[,i] = Xa
#AA[,,i] = A
} # end time loop
B_mean = B_mean / ntime_filter
A_mean = A_mean / ntime_filter
return(list(XXa = XXa [, ind_time_anls],
XXf = XXf [, ind_time_anls],
B_mean = B_mean,
A_mean = A_mean,
BB = BB
#AA = AA[,,ind_time_anls]
))
}
|
fa0dc59801c7e54733b34bd3a899f9793b102014
|
b0f13e8af99c895b56436a8a8570f090b611ccbd
|
/coenocytic_growth_synchrony.R
|
575a09bd7855caff27d0ae9c360d1200b4080b80
|
[] |
no_license
|
andrejondracka/coenocytic_growth_synchrony
|
b0e5a08245f1af44161d95b25e3b96765977c60b
|
00d99caaee9852cd9574e491c2c349247e762ecf
|
refs/heads/master
| 2020-03-09T10:59:30.062156
| 2018-04-10T20:22:55
| 2018-04-10T20:22:55
| 128,750,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,690
|
r
|
coenocytic_growth_synchrony.R
|
library(dplyr)
initcon <- c(rep(0,328),rep(1,1116),rep(2,270),rep(3,12),rep(4,3),rep(5,1),rep(6,1),rep(7,2)) ###sets a pool of initial conditions from the distribution identical to t=0 in the real data
vari <- 0.50
mean <- 11
###function that generates truncated normal distribution (to avoid having negative durations of intervals)
meanfunction <- function(mean=11, var) {
randommean = rnorm(1,mean,mean*var)
while (randommean<0) {
randommean = rnorm(1,mean,mean*var)}
return(randommean)
}
###function that generates a time history of one cell. sets up starting number of nuclei from the specified initial condition status, and sets the duration of the nucler cycle based on specified mean and variance (time units = hours). in the end, records 50 hours of cell history.
generatecellhistory <- function(initcon=initcon,mean=mean,var=var) {
a <- sample(initcon,1)
d1 <- rep(a,round(meanfunction(11,var)))
d2 <- rep(a+1,round(meanfunction(11,var)))
d3 <- rep(a+2,round(meanfunction(11,var)))
d4 <- rep(a+3,round(meanfunction(11,var)))
d5 <- rep(a+4,round(meanfunction(11,var)))
d6 <- rep(a+5,round(meanfunction(11,var)))
d7 <- rep(a+6,round(meanfunction(11,var)))
d8 <- rep(a+7,round(meanfunction(11,var)))
d9 <- rep(a+8,round(meanfunction(11,var)))
d10 <- rep(a+9,round(meanfunction(11,var)))
d11 <- rep(a+10*a,round(meanfunction(11,var)))
cell <- c(d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11)
cell <- cell[1:50]
return(cell)
}
###function that generates a population of 5000 cells (by generating independent cell histories), calculates mean nuclear content and standard deviation of nuclear content at each time point (sd used as a measure of synchrony)
generatepopulation <- function(initcon=initcon,mean=mean,var=var) {
test2 <- replicate(5000,generatecellhistory(initcon,mean,var))
test3 <- as.data.frame(test2)
test3$mean <- apply(test3, 1, function(x) {mean(x)})
test3$SD <- apply(test3, 1, function(x) {sd(x)})
return(test3$SD)
}
#####simulate 100 times for various variances of nuclear division times, subtract the SD from the t = 0, and calculate mean and standard deviation of the 100 independent simulations
###var = 0.3
time <- c(1:50)
pop <- replicate(100,generatepopulation(initcon,mean,var=0.3))
pop03 <- as.data.frame(pop)
pop03$mean <- apply(pop03,1,function(x) {mean(x)})
pop03$std <- apply(pop03,1,function(x) {sd(x)})
pop03 <- mutate(pop03, meannorm <- mean-mean[1])
interval03 <- pop03$meannorm + outer(pop03$std, c(1,-1))
###var = 0.1
pop <- replicate(100,generatepopulation(initcon,mean,var=0.1))
pop01 <- as.data.frame(pop)
pop01$mean <- apply(pop01,1,function(x) {mean(x)})
pop01$std <- apply(pop01,1,function(x) {sd(x)})
pop01 <- mutate(pop01, meannorm <- mean-mean[1])
interval01 <- pop01$meannorm + outer(pop01$std, c(1,-1))
###var = 0.5
pop <- replicate(100,generatepopulation(initcon,mean,var=0.5))
pop05 <- as.data.frame(pop)
pop05$mean <- apply(pop05,1,function(x) {mean(x)})
pop05$std <- apply(pop05,1,function(x) {sd(x)})
pop05 <- mutate(pop05, meannorm <- mean-mean[1])
interval05 <- pop05$meannorm + outer(pop05$std, c(1,-1))
###var = 0.2
pop <- replicate(100,generatepopulation(initcon,mean,var=0.2))
pop02 <- as.data.frame(pop)
pop02$mean <- apply(pop02,1,function(x) {mean(x)})
pop02$std <- apply(pop02,1,function(x) {sd(x)})
pop02 <- mutate(pop02, meannorm <- mean-mean[1])
interval02 <- pop02$meannorm + outer(pop02$std, c(1,-1))
###var = 0.05
pop <- replicate(100,generatepopulation(initcon,mean,var=0.05))
pop005 <- as.data.frame(pop)
pop005$mean <- apply(pop005,1,function(x) {mean(x)})
pop005$std <- apply(pop005,1,function(x) {sd(x)})
pop005 <- mutate(pop005, meannorm <- mean-mean[1])
interval005 <- pop005$meannorm + outer(pop005$std, c(1,-1))
### function that makes pretty transparent colors
makeTransparent<-function(someColor, alpha=100)
{
newColor<-col2rgb(someColor)
apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2],
blue=curcoldata[3],alpha=alpha, maxColorValue=255)})
}
#generate the palette of colours to plot the shaded regions of plots for various variances
library(wesanderson)
pal2 <- wes_palette("Zissou", 10, type="continuous")
pal <- makeTransparent(pal2, 120)
time3 <- c(1,13,25,37) #times correspnding to the experimental data; the first point in the simulation is actually t=0)
###plots the curves for different CVs
plot(time3-1,pop05$meannorm[time3],type="l",ylim=c(-0.1,0.5), xlab = "time (h)", ylab = "increase of geometric SD")
polygon(c(time3-1, rev(time3-1)), c(interval05[time3,1],rev(interval05[time3,2])), border = NA, col=pal[1])
polygon(c(time3-1, rev(time3-1)), c(interval03[time3,1],rev(interval03[time3,2])), border = NA, col=pal[2])
polygon(c(time3-1, rev(time3-1)), c(interval02[time3,1],rev(interval02[time3,2])), border = NA, col=pal[3])
polygon(c(time3-1, rev(time3-1)), c(interval01[time3,1],rev(interval01[time3,2])), border = NA, col=pal[4])
lines(time3-1, pop03$meannorm[time3], type = "l", col="black")
lines(time3-1, pop02$meannorm[time3], type = "l", col="black")
lines(time3-1, pop01$meannorm[time3], type = "l", col="black")
lines(time3-1, pop05$meannorm[time3], type = "l", col="black")
###adding real experimental data to the plot; values of geometric SD from the two biological replicates at corresponding times
data <- c(0.6626, 0.6807, 0.6268, 0.6903)
data2 <- c(0.797, 0.845, 0.820, 0.883)
datam <-data-data[1]
datam2 <- data2-data2[1]
points(time3-1,datam,type="b", lwd = 3)
points(time3-1,datam2,type="b", lwd = 3)
legend (0,0.5, c("CV = 0.5","CV = 0.3","CV = 0.2","CV = 0.1"), cex=1, col=c(pal[1],pal[2],pal[3],pal[4]),pch=15,bty="n",y.intersp = 1.5)
|
e4a9435475348b0971e9d28f47f33a822aa4f6b9
|
c2149b76357f5f962db0d967b2a22b5a6c3ab622
|
/glm_script.R
|
436d3f3646ffa99baa51f349c7c835959bf7d458
|
[] |
no_license
|
Pereirajpf/R_statistics_learning
|
6c3388889a087654e54acc47cc269e29aaea9c7f
|
df1d8793dd2a2bae0b9ea7dfdff46a53b53f8b9b
|
refs/heads/master
| 2023-03-30T23:02:48.857376
| 2021-04-12T11:53:56
| 2021-04-12T11:53:56
| 357,177,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,091
|
r
|
glm_script.R
|
# Learning Generalized Liner Model (GLM)
## url: https://www.guru99.com/r-generalized-linear-model.html
library(dplyr)
data_adult <-read.csv("https://raw.githubusercontent.com/guru99-edu/R-Programming/master/adult.csv")
glimpse(data_adult)
#ERROS CORRETION
##convert the chr var to factors
data_adult <- data_adult %>%
mutate_if(is.character, as.factor)
str(data_adult)
#OBJECTIVE: predict which individual will gave a revenue higher than 50K
#1)
continuous <- select_if(data_adult, is.numeric)
summary(continuous)
## Histogram with kernel density curve
library(ggplot2)
ggplot(continuous, aes(x = hours.per.week)) +
geom_density(alpha = 0.2, fill = "pink")
## Removing the top 0.01 percent(outliers)
top_one_percent <- quantile(data_adult$hours.per.week, 0.99)
top_one_percent
### 99% of the population works under 80 hours per week
data_adult_drop <- data_adult %>%
filter(hours.per.week < top_one_percent)
dim(data_adult_drop)
## Standardize the continuous variables
data_adult_rescale <- data_adult_drop %>%
mutate_if(is.numeric, funs(as.numeric(scale(.))))
head(data_adult_rescale)
#2) Check factor variables
## select the categorical columns
factor <- select_if(data_adult_rescale, is.factor)
ncol(factor)
## create graph for each column
graph <- lapply(names(factor),
function(x)
ggplot(factor, aes(get(x))) +
geom_bar() +
theme(axis.text.x = element_text(angle = 90)))
graph
#3) Feature engineering
## recast education
recast_data <- data_adult_rescale %>%
select(-x) %>%
mutate(education = factor(ifelse(education == "Preschool" | education == "10th" | education == "11th" | education == "12th" | education == "1st-4th" | education == "5th-6th" | education == "7th-8th" | education == "9th", "dropout", ifelse(education == "HS-grad", "HighGrad", ifelse(education == "Some-college" | education == "Assoc-acdm" | education == "Assoc-voc", "Community",
ifelse(education == "Bachelors", "Bachelors",
ifelse(education == "Masters" | education == "Prof-school", "Master", "PhD")))))))
recast_data %>%
group_by(education) %>%
summarize(average_educ_year = mean(educational.num),
count = n()) %>%
arrange(average_educ_year)
## recast marital-status
recast_data <- recast_data %>%
mutate(marital.status = factor(ifelse(marital.status == "Never-married" | marital.status == "Married-spouse-absent", "Not_married", ifelse(marital.status == "Married-AF-spouse" | marital.status == "Married-civ-spouse", "Married", ifelse(marital.status == "Separated" | marital.status == "Divorced", "Separated", "Widow")))))
table(recast_data$marital.status)
#4) Summary Statistic
# Plot gender income
ggplot(recast_data, aes(x = gender, fill = income)) +
geom_bar(position = "fill") +
theme_classic()
# Plot origin income
ggplot(recast_data, aes(x = race, fill = income)) +
geom_bar(position = "fill") +
theme_classic() +
theme(axis.text.x = element_text(angle = 90))
# box plot gender working time
ggplot(recast_data, aes(x = gender, y = hours.per.week)) +
geom_boxplot() +
stat_summary(fun.y = mean,
geom = "point",
size = 3,
color = "steelblue") +
theme_classic()
# Plot distribution working time by education
ggplot(recast_data, aes(x = hours.per.week)) +
geom_density(aes(color = education), alpha = 0.5) +
theme_classic()
# The ANOVA test confirms the difference in average between groups
anova <- aov(hours.per.week~education, recast_data)
summary(anova)
# Non-linearity
## number of hours worked is related to age?
ggplot(recast_data, aes(x = age, y = hours.per.week)) +
geom_point(aes(color = income),
size = 0.5) +
stat_smooth(method = 'lm',
formula = y~poly(x, 2),
se = TRUE,
aes(color = income)) +
theme_classic()
#Correlation
## visualize the correlation between the variables
library(GGally)
# Convert data to numeric
corr <- data.frame(lapply(recast_data, as.integer))
# Plot the graph
ggcorr(corr,
method = c("pairwise", "spearman"),
nbreaks = 6,
hjust = 0.8,
label = TRUE,
label_size = 3,
color = "grey50")
#5 Train/test set
set.seed(1234)
create_train_test <- function(data, size = 0.8, train = TRUE) {
n_row = nrow(data)
total_row = size * n_row
train_sample <- 1: total_row
if (train == TRUE) {
return (data[train_sample, ])
} else {
return (data[-train_sample, ])
}
}
data_train <- create_train_test(recast_data, 0.8, train = TRUE)
data_test <- create_train_test(recast_data, 0.8, train = FALSE)
dim(data_train)
#6 Build the model
formula <- income~.
logit <- glm(formula, data = data_train, family = 'binomial')
summary(logit)
# The list is very long, print only the first three elements
lapply(logit, class)[1:3]
#AIC (Akaike Information Criteria)
logit$aic
#7 Assess the performance of the model
predict <- predict(logit, data_test, type = 'response')
# confusion matrix
table_mat <- table(data_test$income, predict > 0.5)
table_mat
accuracy_Test <- sum(diag(table_mat)) / sum(table_mat)
accuracy_Test
# Precision vs Recall
precision <- function(matrix) {
# True positive
tp <- matrix[2, 2]
# false positive
fp <- matrix[1, 2]
return (tp / (tp + fp))
}
recall <- function(matrix) {
# true positive
tp <- matrix[2, 2]# false positive
fn <- matrix[2, 1]
return (tp / (tp + fn))
}
prec <- precision(table_mat)
prec
rec <- recall(table_mat)
rec
f1 <- 2 * ((prec * rec) / (prec + rec))
f1
|
8a275cfe46276199159ab3d02140229ddf6c74bd
|
4ed27ad1a562ae48ec160c1182d49420a384eb5e
|
/slowtests/fastshap-parallel-16cores-ames.R
|
f5951906f25adc2dcc810c8bb57ceb55ea4f6267
|
[] |
no_license
|
huitmj/fastshap
|
21033983db4eea6b1916d9cd9ee4083117b11d1d
|
9de31b88b25af6eb6570c3f2f9cbfaab5ae26a06
|
refs/heads/master
| 2023-03-11T05:00:13.600636
| 2021-03-03T02:13:05
| 2021-03-03T02:13:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,058
|
r
|
fastshap-parallel-16cores-ames.R
|
# Load required packages
# library(AmesHousing)
library(fastshap)
library(ranger)
# Load Ames housing data
ames <- as.data.frame(AmesHousing::make_ames())
X <- subset(ames, select = -Sale_Price)
# Fit a random forest
set.seed(102)
rfo <- ranger(Sale_Price ~ ., data = ames, write.forest = TRUE)
# Prediction wrapper
pfun <- function(object, newdata) {
predict(object, data = newdata)$predictions
}
# Use forking approach
library(doParallel)
registerDoParallel(cores = 16)
set.seed(5038)
system.time({ # estimate run time
shap <- fastshap(rfo, X = X, pred_wrapper = pfun, nsim = 10, .parallel = TRUE)
})
# user system elapsed
# 1369.551 56.341 101.875
library(parallel)
cl <- makeCluster(5, type = "PSOCK")
clusterExport(cl, c("fastshap", "X", "pfun", "rfo"))
clusterEvalQ(cl, {
library(ranger)
})
set.seed(5038)
system.time({
res <- parLapply(cl,X = names(X), fun = function(x) {
fastshap(rfo, feature_names = x, X = X, pred_wrapper = pfun, nsim = 10)
})
})
stopCluster(cl)
# user system elapsed
# 1.974 0.085 161.037
|
4c4406b042e456180390eeb026148b5c425cd5c0
|
6292bd85e787a05b5c49aca6646de02c7a8d9dcd
|
/graphs.R
|
d40ae8a91da05e0c66d40f1b743cb7ece2a9cbb2
|
[] |
no_license
|
haututu/rentalCosts
|
367ec06792772e9bf1768889a0f4517ee44041fc
|
f740704b3e1c8adce7f6ea3d324e9e08ee1f37f8
|
refs/heads/master
| 2021-05-10T10:14:58.657970
| 2018-11-08T08:36:16
| 2018-11-08T08:36:16
| 118,376,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
graphs.R
|
<<<<<<< HEAD
=======
#Cleans and loads the base data
>>>>>>> 91d6f3851a19aa52f5f7351b31ccfb09babc3be8
source("setup.R")
library(ggplot2)
library(plotly)
###Data for graphs is generated in setup.R
#Plot for december months, geometric mean
meanDecPlot <- ggplot(filter(rentData, year(Month) > 2008), aes(as.Date(Month), mean, group=area, color=area)) +
geom_point() +
geom_line() +
theme_classic() +
theme(
plot.background = element_rect(fill = "#ecf0f1", color=NA),
panel.background = element_rect(fill = "#ecf0f1"),
legend.background = element_rect(fill = "#ecf0f1"),
plot.title = element_text(hjust = 0.5)
) +
labs(
title="Average rent for December",
y="Geometric mean",
x="Year",
color="City"
)
#Plot for december months, lower quartile
quartDecPlot <- ggplot(filter(rentData, year(Month) > 2008), aes(as.Date(Month), quartLow, group=area, color=area)) +
geom_point() +
geom_line() +
theme_classic() +
theme(
plot.background = element_rect(fill = "#ecf0f1", color=NA),
panel.background = element_rect(fill = "#ecf0f1"),
legend.background = element_rect(fill = "#ecf0f1"),
<<<<<<< HEAD
panel.border = element_rect(color="#ecf0f1", size=0.5, linetype="solid"),
=======
>>>>>>> 91d6f3851a19aa52f5f7351b31ccfb09babc3be8
plot.title = element_text(hjust = 0.5)
) +
labs(
title="Lower quartile rent for December",
y="Geometric mean",
x="Year",
color="City"
)
#Save the plots
ggsave("images/meanDecPlot.svg", plot=meanDecPlot, device="svg", width=7, height=5)
|
1a47ed9e621b2973809b4258b0d0b22617aaea18
|
eeb9196b365641c5353dd5b4194952b1f32fb247
|
/MicroMetabolism/man/get_species_text.Rd
|
6000c14c2678bd687e68a68c86f7f4d7a4796951
|
[
"MIT"
] |
permissive
|
thackmann/MicroMetabolism
|
41f39f906c9e94155f08ec5288198fc4a034ffb4
|
35111f1a28f5d7ffcb537a8a2af541c245f42fc8
|
refs/heads/main
| 2023-08-29T23:31:33.031096
| 2021-10-14T23:57:10
| 2021-10-14T23:57:10
| 300,643,560
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
get_species_text.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_text.R
\name{get_species_text}
\alias{get_species_text}
\title{Get Species Text}
\usage{
get_species_text(url_list, text_full, text_tables)
}
\arguments{
\item{url_list}{A character vector of url names for articles}
\item{text_full}{A list the full text (one element per article)}
\item{text_tables}{A list containing the text of tables (one element per article)}
}
\value{
A list containing the species text (one element per article)
}
\description{
This function gets text under "List of the species of the genus" or similar section
}
|
63b10464e520c386487c3c2358e122df07c34110
|
92e39a5004ee1efa759644a09296640f83404a87
|
/combine_files.R
|
dc943826f52c0712f12e101f3820b56b48ad025f
|
[] |
no_license
|
sagelinae/Brant-Data-Entry
|
68dd4e43b95bf9d7e6eb30bf36c90de785d7eb22
|
2c6571d4d41ae3d2dc88a084261c01f71b5c9297
|
refs/heads/master
| 2020-08-05T04:44:25.148301
| 2020-02-26T16:35:54
| 2020-02-26T16:35:54
| 212,400,542
| 0
| 0
| null | 2020-02-25T18:52:42
| 2019-10-02T17:26:27
|
R
|
UTF-8
|
R
| false
| false
| 2,676
|
r
|
combine_files.R
|
###########
#Script to Combine Data entered on multiple devices
###########
date_time <- paste0(substr(Sys.time(), 6,10), "_",sub(':', '-', strftime(Sys.time(), "%H:%M"))) #Figures out the date to add to backups
#***Set A Directory to save Backups to
NestBackupdir <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Backups\\Nest"
BandBackupdir <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Backups\\Band"
EggBackupdir <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Backups\\Egg"
#***Read in the files that you want to combine
bpathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\BAND2020.csv"
b2pathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\BAND2020 (2).csv"
npathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\NEST2020.csv"
n2pathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\NEST2020 (2).csv"
epathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\EGG2020.csv"
e2pathway <- "C:\\Users\\sellis\\Desktop\\Brant-Data\\Data\\EGG2020 (2).csv"
###
#Band
###
file1 <- read.csv(bpathway)
file2 <- read.csv(b2pathway)
#combine files
combined <- merge(file1, file2, all = TRUE)
combined[combined == ""] <- NA
combined <- unique(combined)
write.csv(combined, file.path(BandBackupdir, paste0("BAND2019_", date_time, ".csv")), row.names = FALSE) #Writes backup
#Here we overwrite the original device's file with the updated info, and then delete the second file.
#These both should be backuped already from app.r on their respective devices
write.csv(combined, bpathway, row.names = FALSE)
file.remove(b2pathway)
###
#Nest
###
nfile1 <- read.csv(npathway)
nfile2 <- read.csv(n2pathway)
#combine files
ncombined <- merge(nfile1, nfile2, all = TRUE)
ncombined[ncombined == ""] <- NA
write.csv(ncombined, file.path(NestBackupdir, paste0("NEST2019_", date_time, ".csv")), row.names = FALSE)
#Here we overwrite the original device's file with the updated info, and then delete the second file.
#These both should be backuped already from app.r on their respective devices
write.csv(ncombined, npathway, row.names = FALSE)
file.remove(n2pathway)
###
#Egg
###
efile1 <- read.csv(epathway)
efile2 <- read.csv(e2pathway)
#combine files
ecombined <- merge(efile1, efile2, all = TRUE)
ecombined[ecombined == ""] <- NA
write.csv(ecombined, file.path(EggBackupdir, paste0("EGG2019_", date_time, ".csv")), row.names = FALSE)
#Here we overwrite the original device's file with the updated info, and then delete the second file.
#These both should be backuped already from app.r on their respective devices
write.csv(ecombined, epathway, row.names = FALSE)
file.remove(e2pathway)
|
cfb972c69ae5a568c6a4c7decdb6885e6f102136
|
d33d116b02f9e993d286ee6e2953410f85809aee
|
/plot2.R
|
b5c68a2f9170d19680259af15a94f37580e48199
|
[] |
no_license
|
tyz910/ExData_Plotting1
|
bfd56ba19685e6bfed0032b60f3ef2784a853e68
|
42f883f4939dd40d408fd9fbfe8eecdd6f04d6d3
|
refs/heads/master
| 2020-12-29T01:00:00.932079
| 2015-01-10T14:45:14
| 2015-01-10T14:45:14
| 29,058,068
| 0
| 0
| null | 2015-01-10T13:32:38
| 2015-01-10T13:32:36
| null |
UTF-8
|
R
| false
| false
| 206
|
r
|
plot2.R
|
source('get_data.R')
png(filename = "plot2.png", width = 480, height = 480)
plot(hpcdata$DateTime, hpcdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (in kilowatts)")
dev.off()
|
e01163851b2dc5f8dbb860f950f778212702612e
|
d55c03b0f4a1a8a7c757ee653198d28b62f43f41
|
/global.R
|
d140cdc241752c7addb324be2bf6780eeed17954
|
[] |
no_license
|
fataltes/herRingShiny
|
58c4aa251ea9d57546622ef9c6d657b288d2ef66
|
049fa42b3a81d2224a5b7049960d7d29e912bc18
|
refs/heads/master
| 2021-07-24T01:03:41.327790
| 2017-10-09T05:35:29
| 2017-10-09T05:35:29
| 96,694,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
global.R
|
rm(list=ls())
allres<-readRDS("allres.rds")
dispNames<-read.csv('AllResNameDecoder.csv')
uniqueCR = unique(allres$CR)
uniquePropc = unique(allres$Propc)
uniqueFcapprop = unique(allres$Fcapprop)
uniqueFracBmsyThreshLo = unique(allres$FracBmsyThreshLo)
uniqueFracBmsyThreshHi = unique(allres[c('FracBmsyThreshHi', 'FracBmsyThreshLo')])
uniqueFracFtarg = unique(allres[c('FracBmsyThreshHi', 'FracBmsyThreshLo', 'FracFtarg')])
selected <- dispNames[dispNames$Type == 'result' & (dispNames$SubType == 'result' | dispNames$SubType == 'median'), ];
axisOption <- selected$OutName;
axisDisplayName <- selected$DisplayName;
xyChoices = setNames(as.character(axisOption), axisDisplayName);
v25 <- dispNames[dispNames$Type == 'result' & dispNames$SubType == '25', 'OutName'];
v75 <- dispNames[dispNames$Type == 'result' & dispNames$SubType == '75', 'OutName'];
dispName <- function(outName) {
return (dispNames[dispNames$OutName==outName, 'DisplayName'])
}
setXYAxisOptions <- function(input, output, session) {
output$selectX <- renderUI({
ns <- session$ns
return (
selectInput(ns("x"), 'Choose X Axis',
choices = xyChoices)
)
})
output$selectY <- renderUI({
ns <- session$ns
return (
selectInput(ns("y"), 'Choose Y Axis',
choices = xyChoices)
)
})
}
|
f8503725b54c160292acaf12d0ff1d1ac9f0fa45
|
1fc421ae8d2d0cc87944ec21ea53b37b1ef02544
|
/R/MackNet_Fit.R
|
25f5fa67280a3b1fd047892865b07a81c2541418
|
[] |
no_license
|
EduardoRamosP/MackNet
|
5f3df28a30385e83c4d3de0eb10606a416499c92
|
1281f90ccad86df2f496b6e1a33aeab18cf81807
|
refs/heads/master
| 2022-12-18T22:17:47.097987
| 2020-09-21T20:30:55
| 2020-09-21T20:30:55
| 296,931,038
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,077
|
r
|
MackNet_Fit.R
|
#' @title MackNet_Fit
#' @description This function fits the ensemble of RNNs required for the payments MackNet model. The optimum weigthed decay is obtained by selecting the configuration that minimizes the test error.
#' @param Cumulative.T Cumulative payments triangle.
#' @param Incurred.T Incurred cost triangle.
#' @param Exposure Exposure measure. Written premiums is an appropriate measure to scale cumulative payments and incurred cost.
#' @param Epochs Maximum number of epochs.
#' @param MinimumEpochs Minimum number of epochs.
#' @param wd The optimization algorithm used is ADAM. This variable defines the weighted decay value.
#' @param Learning Learning rate.
#' @param drop Dropout regularization.
#' @param Ensemble Number of RNNs included in the ensemble.
#' @param AR This variable allows to remove the autorregressive component from the MackNet model when it is set to 0.
#' @param ES Early Stopping object defined under the keras framework.
#' @param Output Linear or ReLU activation function for the output layer.
#' @return The formula generates the following outputs: \itemize{
#' \item \code{TrianglesBackup} Full triangles predicted by each RNN included within the ensemble.
#' \item \code{Error} Test error.
#' }
#' @import keras
#' @import abind
#' @export
#'
MackNet_Fit=function(Cumulative.T,Incurred.T,Exposure,AR,Ensemble,wd,Learning,drop,Epochs,MinimumEpochs,ES,Output){
dimension=dim(Cumulative.T)[1] #Triangle dimension
Exposure_Matrix=matrix(Exposure,dimension,dimension) #Exposure for each position
CumulativeScaled.T=Cumulative.T/Exposure_Matrix #Scaled cumulative payments
IncurredScaled.T=Incurred.T/Exposure_Matrix #Scaled incurred cost
RNN_DB=DB(CumulativeScaled.T,IncurredScaled.T) #Data for fitting RNNs
train_x=RNN_DB$train.x;train_y=RNN_DB$train.y #Train dataset is generated
test_x=RNN_DB$test.x;test_y=RNN_DB$test.y #Test dataset is generated
PI_Ratio=matrix(colSums(CumulativeScaled.T)/colSums(IncurredScaled.T),dimension,dimension,byrow = T)
train_x=abind(train_x,CL_Ultimate_Reference(Cumulative.T,Exposure,PI_Ratio)$X,along=1)
train_y=rbind(train_y,CL_Ultimate_Reference(Cumulative.T,Exposure,PI_Ratio)$Y)
if (AR==0){train_x=train_x[,,-1];test_x=test_x[,,-1]} #Autorregresive component is removed is AR=0
batch=dim(train_x)[1] #Batch size is equal to the observations of the DB
#Matrix to save the triangles sampled
TrianglesBackup=array(0,dim=c(dimension,dimension,Ensemble));Error=0;e=1;iter=1
#Ensemble of RNNs is fitted for the different wd defined within the grid seach
while (e<=Ensemble){
#Model is defined and fitted
if (Output=="relu"){model=RNN_Keras(dim(train_x)[2], dim(train_x)[3], Learning, wd, drop)} else {model=RNN_Keras_Linear(dim(train_x)[2], dim(train_x)[3], Learning, wd, drop)}
model %>% fit(train_x, train_y, validation_data=list(test_x, test_y), epochs=MinimumEpochs, batch_size=batch, verbose=0)
model %>% fit(train_x, train_y, validation_data=list(test_x, test_y), epochs=(Epochs-MinimumEpochs), batch_size=batch,verbose=0,callbacks=ES)
#Triangle is predicted
if (AR==1){TrianglesBackup[,,e]=Full.Cumulative(RNN.Triangle(Triangle.Incremental(CumulativeScaled.T),PI_Ratio,model))}
if (AR==0){TrianglesBackup[,,e]=Full.Cumulative(RNN.Triangle.NoAR(Triangle.Incremental(CumulativeScaled.T),PI_Ratio,model))}
#Check if the RNN is vanishing gradients or not
if (TrianglesBackup[dimension,1,e]==TrianglesBackup[dimension,dimension,e]) {TrianglesBackup[,,e]=0} else {e=e+1; Error=Error+mean(((model %>% predict(test_x))-test_y)^2)/Ensemble}
#Keras graph is reset
model=NULL;K <- backend();K$clear_session(); iter=iter+1
if ((iter>(Ensemble*2)) & (e<=Ensemble)){TrianglesBackup[,,e:Ensemble]=ChainLaddert.t(CumulativeScaled.T,DevFactors.t(CumulativeScaled.T)); e=Ensemble+1; print("ChainLadder Assumptions Partially Taken")}
}
return(list(TrianglesBackup=TrianglesBackup,Error=Error))
}
|
4c15e575a44ffcb677d8558e82efeb240751ab1f
|
20ba561c94011548361ec18b916bdf5cec44eb82
|
/man/vigencia.Rd
|
ffbabc2f8c1533a7bd4959880c97f1d94ca128c7
|
[
"MIT"
] |
permissive
|
paloosa/idealisto
|
5e99f0251be4188c3a690ee3f2542af2026b772b
|
5a82b64bb736c1313a12c0117883bca1e60708b1
|
refs/heads/master
| 2020-07-16T20:05:18.532208
| 2018-04-23T11:57:11
| 2018-04-23T11:57:11
| 205,859,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 790
|
rd
|
vigencia.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vigencia.R
\name{vigencia}
\alias{vigencia}
\title{Enrich the csv file generated by idealisto function.}
\usage{
vigencia(ruta)
}
\arguments{
\item{ruta}{A valid path in your computer that leads to an idealisto created csv file.}
}
\value{
It returns a csv file with more variables. It will be saved in the root (~/).
}
\description{
This function enriches the csv file generated by idealisto with some other variables related with the current situation of the downloaded ads. It's useful to know if the ads are still online days or weeks later or have been removed. If the ad has been removed, vigencia function will tell how many days ago and if the ad is still online it will tell if they have a new price.
}
|
f72ff0f834688698b644c3a81f2271bc2f421e6f
|
251302c5cc0a0ebfd6b8ea4b355ed349697a331d
|
/polynomial/build_poly.R
|
befdbc717c953ee5891b2af45f83ac9c5d0926e8
|
[] |
no_license
|
richiemorrisroe/grad_descent
|
e77c5e7c1f90d24137fad9c251344aaa5ffb525a
|
9e6e8bd02762fcda5e1c70592c2cf139a57b2b2b
|
refs/heads/master
| 2021-01-12T08:45:40.016571
| 2017-07-08T19:13:01
| 2017-07-08T19:13:01
| 76,680,127
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 290
|
r
|
build_poly.R
|
setwd('~/Dropbox/Code/Stats/polynomial/')
devtools::setup(".", rstudio=FALSE)
devtools::use_build_ignore("^#")
devtools::use_build_ignore("build_poly.R")
devtools::use_testthat()
devtools::use_package("stringr")
devtools::document()
devtools::check()
devtools::build()
devtools::install()
|
015b94a0599c7e5f385b39539db775e3e1071ab9
|
646cf417b48d7ba3a363b74230a9045394520ae3
|
/src/brooks_pitchfx_scraper.R
|
44897241c81e1924b6d35176ec30c03b0e06097e
|
[] |
no_license
|
PrestonEn/RockyHelium
|
ff5ddd401f780a12115b546168a44da8a19214a5
|
bb9954b3f0bc758fe1de0f802f323860541a53d2
|
refs/heads/master
| 2021-01-10T16:08:30.315631
| 2016-04-30T05:16:28
| 2016-04-30T05:16:28
| 55,182,069
| 0
| 0
| null | 2016-04-29T21:50:52
| 2016-03-31T20:49:37
|
TeX
|
UTF-8
|
R
| false
| false
| 1,660
|
r
|
brooks_pitchfx_scraper.R
|
library(XML)
library(tidyr)
library(dplyr)
library(magrittr)
library(reshape2)
pitchers <- tbl_df(read.csv("data/control_pitchers_mlbamid.csv"))
pitchers.keys <- pitchers$key_mlbam
#pitchers.keys <- c(506560)
var_list <- c("mph", "maxmph", "pfx_x", "pfx_z", "hloc", "vloc", "bway")
#var_list <- c("mph", "maxmph")
pitchfx <- data.frame()
j <- 0
# for each pitcher
for (i in pitchers.keys) {
j <- j + 1
print(paste("player:", j, "of", nrow(pitchers), ":", i))
# for each pitchf/x variable
for (v in var_list) {
# data table is returned as a list
p <- pitchers.keys[i]
var <- var_list[v]
data <- function(p,var) {
url <- paste0("http://www.brooksbaseball.net/velo.php?player=",p,"&b_hand=-1&gFilt=&pFilt=FA|SI|FC|CU|SL|CS|KN|CH|FS|SB&time=year&minmax=ci&var=",var,"&s_type=2&startDate=01/01/2008&endDate=12/31/2015")
d <- readHTMLTable(url, header = TRUE, skip.rows = 1, as.data.frame = TRUE, stringsAsFactors = FALSE)
d <- d[[1]]
d <- d[complete.cases(d),]
d
}
stats <- function(i,v) {
# index_year <- pitchers %>%
# filter(key_mlbam == paste(i)) %>%
# select(index_year)
# s <- data(i,v) %>%
# filter(Year == paste(index_year) |
# Year == paste(index_year-1) |
# Year == paste(index_year-2)) %>%
# gather(pitchType, value, -Year) %>%
# rename(year = Year)
s <- data(i,v)
s$pfx_var <- rep(v, nrow(s))
s$mlbam_id <- rep(i, nrow(s))
s
}
pitchfx <- stats(i,v) %>%
bind_rows(pitchfx)
}
}
pitchfx %>%
write.csv(file="data/pitchfx_control_pitchers.csv", row.names = FALSE)
|
857b464839335fe3412cf6940d485b9579a53098
|
32a5b9ec56f8cac3053fb630903c3685ffd85c6c
|
/R/easy_fun.R
|
88615e3608779fed0b4676e1c9dc0654b3d2f3f3
|
[] |
no_license
|
cloud-brain/backtest
|
86500bb639bf3ea07912a33b528935f88ae1df30
|
7713dc0f67decc7a2fa1462dfc00cd9cb6a2c1d7
|
refs/heads/master
| 2020-12-13T21:48:45.511327
| 2020-02-04T04:08:44
| 2020-02-04T04:08:44
| 95,461,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
easy_fun.R
|
#' @import lubridate
##date translate to char
dt_to_char <- function(x)
{
if(is.null(x))
{
return(x)
}
return(format(ymd(x),'%Y%m%d'))
}
##combine char list
comb_char <- function(x)
{
paste0("'",x,"'", collapse = ',')
}
fill_na <- function(x, fill = 0)
{
ifelse(is.na(x), fill, x)
}
|
d1e53e420e006ef12872dff21dd23da185634bc4
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/dataCamp/openCourses/dataAnalysisAndStatisticalInference/7_inferenceForCategoricalData/13_whatAboutIndia.R
|
ed48dec177d76b3d3afe80db080bcc3d5568da7f
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654
| 2023-01-24T14:47:52
| 2023-01-24T14:47:52
| 61,184,927
| 2
| 1
|
MIT
| 2022-03-06T11:07:18
| 2016-06-15T06:57:19
|
Python
|
UTF-8
|
R
| false
| false
| 778
|
r
|
13_whatAboutIndia.R
|
# What about India?
#######################################################################################################################
#
# Using the inference() function, now calculate the confidence intervals for the proportion of atheists
# in 2012 in India.
#
# First, make sure to note whether the conditions for inference are met.
#
# The inference() function might be a bit slow.
#
#######################################################################################################################
# The subset for India for 2012:
india <- subset(atheism, atheism$nationality=="India" & atheism$year=="2012")
# The analysis using the inference() function:
inference(india$response, est = "proportion", type = "ci", method = "theoretical", success = "atheist")
|
a243aa3d6388eed9c880730f7403035a5f1e3a40
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/r/generated/R/ComDayCqWcmDesignimporterImplEntryPreprocessorImplProperties.r
|
d8d179f05430b10bbca06abd366a0b8d0a8b113e
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
R
| false
| false
| 3,707
|
r
|
ComDayCqWcmDesignimporterImplEntryPreprocessorImplProperties.r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqWcmDesignimporterImplEntryPreprocessorImplProperties Class
#'
#' @field search.pattern
#' @field replace.pattern
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqWcmDesignimporterImplEntryPreprocessorImplProperties <- R6::R6Class(
'ComDayCqWcmDesignimporterImplEntryPreprocessorImplProperties',
public = list(
`search.pattern` = NULL,
`replace.pattern` = NULL,
initialize = function(`search.pattern`, `replace.pattern`){
if (!missing(`search.pattern`)) {
stopifnot(R6::is.R6(`search.pattern`))
self$`search.pattern` <- `search.pattern`
}
if (!missing(`replace.pattern`)) {
stopifnot(R6::is.R6(`replace.pattern`))
self$`replace.pattern` <- `replace.pattern`
}
},
toJSON = function() {
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject <- list()
if (!is.null(self$`search.pattern`)) {
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject[['search.pattern']] <- self$`search.pattern`$toJSON()
}
if (!is.null(self$`replace.pattern`)) {
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject[['replace.pattern']] <- self$`replace.pattern`$toJSON()
}
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject
},
fromJSON = function(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesJson) {
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject <- jsonlite::fromJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesJson)
if (!is.null(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$`search.pattern`)) {
search.patternObject <- ConfigNodePropertyString$new()
search.patternObject$fromJSON(jsonlite::toJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$search.pattern, auto_unbox = TRUE))
self$`search.pattern` <- search.patternObject
}
if (!is.null(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$`replace.pattern`)) {
replace.patternObject <- ConfigNodePropertyString$new()
replace.patternObject$fromJSON(jsonlite::toJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$replace.pattern, auto_unbox = TRUE))
self$`replace.pattern` <- replace.patternObject
}
},
toJSONString = function() {
sprintf(
'{
"search.pattern": %s,
"replace.pattern": %s
}',
self$`search.pattern`$toJSON(),
self$`replace.pattern`$toJSON()
)
},
fromJSONString = function(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesJson) {
ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject <- jsonlite::fromJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesJson)
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`search.pattern` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$search.pattern, auto_unbox = TRUE))
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`replace.pattern` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqWcmDesignimporterImplEntryPreprocessorImplPropertiesObject$replace.pattern, auto_unbox = TRUE))
}
)
)
|
3043bfa972ed541af84d8cc6415a5ac577ba345b
|
1367e80139e7cf4072aa1aed5413530476ba9ab0
|
/external/actions_navpanel.R
|
fd0d8989345ed8ff8ed62939ba98e9da66a47bf8
|
[] |
no_license
|
xinofekuator/ShinyFirstProject
|
73471ca9b6496553d3e3228394d323f23ab1f308
|
c06d018c9c5a23f8fa6f4889eb5bcb0814f3506b
|
refs/heads/master
| 2021-01-10T01:21:07.804541
| 2015-12-07T18:47:43
| 2015-12-07T18:47:43
| 47,571,742
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
actions_navpanel.R
|
output$carPlot <- renderPlot({ plot(cars, main = 'cars dataset')})
output$otherPlot <- renderPlot({ plot(faithful, main = 'faithful dataset')})
output$info <- renderText({
paste0("x=", input$plot_click$x, "\ny=", input$plot_click$y)
})
output$info2 <- renderText({
xy_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("x=", round(e$x, 1), " y=", round(e$y, 1), "\n")
}
xy_range_str <- function(e) {
if(is.null(e)) return("NULL\n")
paste0("xmin=", round(e$xmin, 1), " xmax=", round(e$xmax, 1),
" ymin=", round(e$ymin, 1), " ymax=", round(e$ymax, 1))
}
paste0(
"click: ", xy_str(input$plot_click),
"brush: ", xy_range_str(input$plot_brush)
)
})
|
14174d8a1b4af960f6d9113cb483fab51caa515c
|
2b16b0bb4b607ba7f18c87f0d3642e3a2a12c8f2
|
/CO2/CO2 calc/sub functions/General/01_SetConstants.R
|
5c7ecf3abcf13aab781fa1d8bec9984dd43afe40
|
[] |
no_license
|
low-decarie/Useful-R-functions
|
4f195cc23806ef54c0f180eac9cc0ab5c6d2fa7d
|
1dfca0de951d9f7a208aace26bf66f80188c5c13
|
refs/heads/master
| 2016-09-07T17:11:16.883656
| 2014-03-27T20:53:18
| 2014-03-27T20:53:18
| 3,854,769
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
01_SetConstants.R
|
#Setting constants
R = 82.05784 #The gas constant (R) in cm^3 atm K^−1 mol^−1
#or library(marelac); R=100*Constants$gasCt1
alpha = .00001 #volume expansion coefficient for borosilicate glass
#for dry air (this assumes xCO2 is .00036)
xN2atm = .78084
xO2atm = .20946
xAratm = .00934
TP = 0 #no nutrients included
TSi = 0 #no nutrients included
T_4 = TP
T_5 = TSi
Pref = 0 #reference pressure for potential temperature
|
ba51d6b0f3a07bca39eaecaa45d0379dc745ac0a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MixMAP/examples/mixmapTest.Rd.R
|
31e04374664576943301f2793b3b87057ffb8dcb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 538
|
r
|
mixmapTest.Rd.R
|
library(MixMAP)
### Name: mixmapTest
### Title: Implements the MixMAP algorithm using hypothesis testing
### framework.
### Aliases: mixmapTest
### Keywords: GWAS Mixed Models Genetics
### ** Examples
library(MixMAP)
#Load data
#This data has been prepared to be used as input to the MixMAP function
data(MixMAP_example)
#Run MixMAP
MixOut<-mixmapTest(MixMAP_example,pval="GC.Pvalue",snp="MarkerName",
chr="Chr",coord="Coordinate",gene="Gene")
#Display first ten detected genes
summary(MixOut)
#MixManhattan Plot
plot(MixOut)
|
464daa24977b619d6a534f2f0e2a9927fbf15679
|
1f5489b5171979817c01b09f550a266e92edfc35
|
/Plot1.R
|
4c4263216739c948064c015119d7d1fbba6645af
|
[] |
no_license
|
sumitraBinu/ExData_Plotting1
|
53a7a4bb323205797aea2cd905b60b2ab4b6d699
|
45b9785e6538c1180e5eb69da9395633ea611fb5
|
refs/heads/master
| 2022-07-04T02:21:27.421601
| 2020-05-15T08:46:37
| 2020-05-15T08:46:37
| 264,104,340
| 0
| 0
| null | 2020-05-15T05:30:18
| 2020-05-15T05:30:17
| null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
Plot1.R
|
#Plot1
#Downloading and unzipping the file if it doesn't already exist
if(!file.exists('pocon.zip')){
url<-"http://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(url,destfile = "pocon.zip")
}
unzip("pocon.zip") # This code is for unzipping the file pocon.zip into a text file titled household_power_consumption
#Load and clean the data
data_pow <- read.table("household_power_consumption.txt", header= TRUE, sep=";",na.strings = "?",
colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))#reading the text file
summary(data_pow)
subsetdata <- data_pow[data_pow$Date %in% c("1/2/2007","2/2/2007"),] #read the data pertaining to Feb-1-2007 & Feb-2-2007
head(subsetdata)
tail(subsetdata)
typeof(data_pow$Global_active_power)
GAP<-subsetdata$Global_active_power
GRP<-subsetdata$Global_reactive_power
voltage<-subsetdata$Voltage
sub_metering_1<-subsetdata$Sub_metering_1
sub_metering_2<-subsetdata$Sub_metering_2
sub_metering_3<-subsetdata$Sub_metering_3
#Plotting Histogram for Global Active Power Consumption
hist(GAP,col="red",main="Global Active Power",xlab="Global Active Power (Kilowatts)")
hist()
|
27f46c8148c26d2390a48f17f8373542fe4d3ae3
|
9ea93143b1c8c1f34f991c2c1dd1446c042aefc3
|
/R/reroute.R
|
996455de39a63ddc9cd7b6b676a7005e31ed3ca5
|
[] |
no_license
|
inambioinfo/tidygraph
|
2ecf217e087c428636982e08be03c190747f93ae
|
08f6f569d18629aa97f020540fb25d343678fab3
|
refs/heads/master
| 2021-06-21T17:22:38.349146
| 2017-08-18T20:46:22
| 2017-08-18T20:46:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,426
|
r
|
reroute.R
|
#' Change terminal nodes of edges
#'
#' The reroute verb lets you change the beginning and end node of edges by
#' specifying the new indexes of the start and/or end node(s). Optionally only
#' a subset of the edges can be rerouted using the subset argument, which should
#' be an expression that are to be evaluated in the context of the edge data and
#' should return an index compliant vector (either logical or integer).
#'
#' @param .data A tbl_graph or morphed_tbl_graph object. grouped_tbl_graph will
#' be ungrouped prior to rerouting
#' @param from,to The new indexes of the terminal nodes. If `NULL` nothing will
#' be changed
#' @param subset An expression evaluating to an indexing vector in the context
#' of the edge data.
#'
#' @return An object of the same class as .data
#' @export
#'
#' @examples
#' # Switch direction of edges
#' create_notable('meredith') %>%
#' activate(edges) %>%
#' reroute(from = to, to = from)
#'
#' # Using subset
#' create_notable('meredith') %>%
#' activate(edges) %>%
#' reroute(from = 1, subset = to > 10)
reroute <- function(.data, from = NULL, to = NULL, subset = NULL) {
UseMethod('reroute')
}
#' @export
#' @importFrom rlang enquo eval_tidy
#' @importFrom igraph is.directed
reroute.tbl_graph <- function(.data, from = NULL, to = NULL, subset = NULL) {
.graph_context$set(.data)
on.exit(.graph_context$clear())
expect_edges()
from <- enquo(from)
to <- enquo(to)
if (is.grouped_tbl_graph(.data)) {
message('Ungrouping prior to rerouting edges')
.data <- ungroup(.data)
}
edges <- as_tibble(.data, active = 'edges')
subset <- enquo(subset)
subset <- eval_tidy(subset, edges)
if (is.null(subset)) subset <- seq_len(nrow(edges))
edges_sub <- edges[subset, , drop = FALSE]
from <- eval_tidy(from, edges_sub)
if (!is.null(from)) edges$from[subset] <- rep(from, length.out = nrow(edges_sub))
to <- eval_tidy(to, edges_sub)
if (!is.null(to)) edges$to[subset] <- rep(to, length.out = nrow(edges_sub))
.data <- tbl_graph(
nodes = as_tibble(.data, active = 'nodes'),
edges = edges,
directed = is.directed(.data)
) %gr_attr% .data
active(.data) <- 'edges'
.data
}
#' @export
#' @importFrom rlang enquo
reroute.morphed_tbl_graph <- function(.data, from = NULL, to = NULL, subset = NULL) {
from <- enquo(from)
to <- enquo(to)
.data[] <- lapply(.data, reroute, from = !!from, to = !!to, subset = subset)
.data
}
|
94284d083738019400039c523ea91ceeba89f082
|
265d146eba2dd4d001262f547f852b464cc5d525
|
/man/multicast.Rd
|
43eb1ac772f1e583794e7fff1fc32f42b6aa9545
|
[] |
no_license
|
cran/multicastR
|
f0160834ce0f36fb4fff1b9d6030b35a5318dbc1
|
c2621c1ceda8ed8b1fe3969abfb6765eb035b56c
|
refs/heads/master
| 2021-06-11T18:19:39.382533
| 2021-02-22T18:20:02
| 2021-02-22T18:20:02
| 137,462,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,520
|
rd
|
multicast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multicast.R
\name{multicast}
\alias{multicast}
\title{Access Multi-CAST annotation data}
\usage{
multicast(vkey = NULL)
}
\arguments{
\item{vkey}{A four-digit number specifying the requested version of the
metadata. Must be one of the version keys listed in the first column of
\code{\link{mc_index}}, or empty. If empty, the most recent version of the
metadata is retrieved automatically.}
}
\value{
A \code{\link{data.frame}} with eleven columns: \describe{
\item{\code{[, 1] corpus}}{The name of the corpus.} \item{\code{[, 2]
text}}{The name of the text.} \item{\code{[, 3] uid}}{The utterance
identifier. Uniquely identifies an utterance within a text.} \item{\code{[,
4] gword}}{Grammatical words. The tokenized utterances in the object
language.} \item{\code{[, 5] gloss}}{Morphological glosses following the
Leipzig Glossing Rules.} \item{\code{[, 6] graid}}{Annotations with the
GRAID scheme (Haig & Schnell 2014).} \item{\code{[, 7] gform}}{The form
symbol of a GRAID gloss.} \item{\code{[, 8] ganim}}{The person-animacy
symbol of a GRAID gloss.} \item{\code{[, 9] gfunc}}{The function symbol of
a GRAID gloss.} \item{\code{[, 10] refind}}{Referent tracking using the
RefIND scheme (Schiborr et al. 2018).} \item{\code{[, 11]
isnref}}{Annotations of the information status of newly introduced
referents.} }
}
\description{
\code{multicast} downloads corpus data from the Multi-CAST collection (Haig &
Schnell 2015) from the servers of the University of Bamberg. As the
Multi-CAST collection is continuously evolving through the addition of
further data sets and the revision of older annotations, the \code{multicast}
function takes an optional argument \code{vkey} to select earlier versions of
the annotation data, ensuring scientific accountability and the
reproducibility of results.
}
\section{Licensing}{
The Multi-CAST annotation data accessed by this package
are published under a \emph{Create Commons Attribution 4.0 International}
(CC-BY 4.0) licence
(\url{https://creativecommons.org/licenses/by-sa/4.0/}). Please refer to
the Multi-CAST website for information on how to give proper credit to its
contributors.
}
\section{Citing Multi-CAST}{
Data from the Multi-CAST collection should be
cited as: \itemize{ \item Haig, Geoffrey & Schnell, Stefan (eds.). 2015.
\emph{Multi-CAST: Multilinguial Corpus of Annotated Spoken Texts}.
(\url{https://multicast.aspra.uni-bamberg.de/}) (Accessed \emph{date}.) } If
for some reason you need to cite this package specifically, please refer to
\code{citation(multicastR)}.
}
\section{References}{
\itemize{\item Haig, Geoffrey & Schnell, Stefan. 2014.
\emph{Annotations using GRAID (Grammatical Relations and Animacy in
Discourse): Introduction and guidelines for annotators.} Version 7.0.
(\url{https://multicast.aspra.uni-bamberg.de/#annotations})
\item Schiborr, Nils N. & Schnell, Stefan & Thiele, Hanna. 2018.
\emph{RefIND -- Referent Indexing in Natural-language Discourse: Annotation
guidelines.} Version 1.1.
(\url{https://multicast.aspra.uni-bamberg.de/#annotations})}
}
\examples{
\dontrun{
# retrieve and print the most recent version of the
# Multi-CAST annotations
multicast()
# retrieve the version of the annotation data published
# in January 2021
multicast(2021)
}
}
\seealso{
\code{\link{mc_index}}, \code{\link{mc_metadata}},
\code{\link{mc_referents}}, \code{\link{mc_clauses}}
}
|
6c28547bb81a957cc4e5c9acc335ebf06d49efb5
|
fbbc021e6029baf5899c0e0f668d9f69f82eaa19
|
/man/joinRtData.Rd
|
5fb1a215701dd1a867dc1c09f1445f4e00406b58
|
[
"MIT"
] |
permissive
|
RichardMN/RtD3
|
a54aee2a7e56ba6e436f34d43cddc626c7cc5c34
|
07e1c4e77a95f99a1882a567e34f0e3c5e441116
|
refs/heads/master
| 2023-01-08T14:35:38.712936
| 2020-11-08T17:16:33
| 2020-11-08T17:16:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,082
|
rd
|
joinRtData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/joinRtData.R
\name{joinRtData}
\alias{joinRtData}
\title{Join RtData}
\usage{
joinRtData(rtData, rtData2)
}
\arguments{
\item{rtData}{A nested list as required by \code{summaryWidget}}
\item{rtData2}{A nested list as required by \code{summaryWidget}}
}
\value{
A nested list as required by \code{summaryWidget}
}
\description{
Joins two nested lists in the format required by \code{summaryWidget}. This may
be useful for merging estimates from disparate data sources or linking national level estimates
with subnational estimates
}
\examples{
\donttest{
base_url <- "https://raw.githubusercontent.com/epiforecasts/covid-rt-estimates/master/"
subnational <- national <- list("Cases" = readInEpiNow2(
path = paste0(base_url, "subnational/italy/cases/summary"),
region_var = "region"))
national <- list("Cases" = readInEpiNow2(
path = paste0(base_url, "national/cases/summary"),
region_var = "country"),
regions = "Italy")
out <- list()
out$Cases <- joinRtData(subnational$Cases, national$Cases)
}
}
|
e60f1a08cb9ea914197236251aed7443017e00b3
|
e6a401ae8cc996ed76881d9d7467a5b6167d5b7b
|
/Scripts/ICEWS_Sources_Git.R
|
406dbbb1e0287edf78bc356c88a895b5251e4886
|
[] |
no_license
|
ZacharyST/ICEWS
|
1d40b7a7b7ad0963e72c7c62e54ef34e3b34c355
|
5379e41755287a94f8da7d0acb33f2b2a5105153
|
refs/heads/master
| 2016-09-08T01:12:30.043360
| 2015-06-13T04:10:15
| 2015-06-13T04:10:15
| 33,836,773
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
ICEWS_Sources_Git.R
|
'''
This script loads 2 years of ICEWS data and reads the source column. The goal is to create a list of all sources used in ICEWS.
'''
#Load data
data <- read.csv('/Data/ICEWS/events.2010.20150313084533.tab',header=TRUE,sep='\t')
data <- rbind(data,read.csv('/Data/ICEWS/events.2011.20150313084656.tab',header=TRUE,sep='\t'))
#Keep only sources column
publishers <- data$Publisher
#Take set of publishers
publishers <- unique(data$Publisher)
publishers <- sort(as.character(publishers))
#Save
write.table(publishers,file='/Data/ICEWS/ICEWS_ListOfPublishers.csv', sep=',',row.names=FALSE,col.names='Publisher')
|
84dfafda11bf17b13396777087e692670a785ead
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.internet.of.things/man/iot_update_audit_suppression.Rd
|
a882b83f1c124821839f53167d9cce8559c15c94
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,371
|
rd
|
iot_update_audit_suppression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_update_audit_suppression}
\alias{iot_update_audit_suppression}
\title{Updates a Device Defender audit suppression}
\usage{
iot_update_audit_suppression(checkName, resourceIdentifier,
expirationDate, suppressIndefinitely, description)
}
\arguments{
\item{checkName}{[required]}
\item{resourceIdentifier}{[required]}
\item{expirationDate}{The expiration date (epoch timestamp in seconds) that you want the
suppression to adhere to.}
\item{suppressIndefinitely}{Indicates whether a suppression should exist indefinitely or not.}
\item{description}{The description of the audit suppression.}
}
\value{
An empty list.
}
\description{
Updates a Device Defender audit suppression.
}
\section{Request syntax}{
\preformatted{svc$update_audit_suppression(
checkName = "string",
resourceIdentifier = list(
deviceCertificateId = "string",
caCertificateId = "string",
cognitoIdentityPoolId = "string",
clientId = "string",
policyVersionIdentifier = list(
policyName = "string",
policyVersionId = "string"
),
account = "string",
iamRoleArn = "string",
roleAliasArn = "string"
),
expirationDate = as.POSIXct(
"2015-01-01"
),
suppressIndefinitely = TRUE|FALSE,
description = "string"
)
}
}
\keyword{internal}
|
171a616f90a2675aec4d300eb0b2ef613335dc99
|
5e64e69fc69cb20dca1d497a8b2022dc66190456
|
/man/genericTest.Rd
|
846e76c5602cbf106601022cc94ec5c1e12c0934
|
[] |
no_license
|
wahani/aoos
|
4f058332b2ed8c1aa400c427c69043764a22a9a0
|
232e0f930fd3e16f7531cbf16fd6cf0032d0d83f
|
refs/heads/master
| 2020-05-20T13:59:15.248798
| 2017-05-06T17:46:38
| 2017-05-06T17:46:38
| 26,126,717
| 4
| 1
| null | 2015-01-14T08:14:51
| 2014-11-03T16:07:51
|
R
|
UTF-8
|
R
| false
| true
| 508
|
rd
|
genericTest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/S4-generics-test.R
\docType{methods}
\name{.genericTest}
\alias{.genericTest}
\alias{.genericTest,numeric-method}
\title{Generic Test}
\usage{
.genericTest(x, ...)
\S4method{.genericTest}{numeric}(x, ..., methodParam = function() 1)
}
\arguments{
\item{x}{Object}
\item{...}{Object}
\item{methodParam}{Object}
}
\description{
This generic function only exists to test that the rexygen2 parser work
correctly. Just ignore it.
}
|
59f73b42511680a6ef9cc7438466d41861008afa
|
5764dcff9c201b8d889f5bb8608f35643fecac53
|
/class 10 Student Data Regression/eda.r
|
e209bd19728c0fb89fa959d79e5d740f9511f444
|
[] |
no_license
|
goforaditya/R-for-Statistics-and-Data-Science
|
2c87987ac97c825dd78d71ddfa607fd373c7fe58
|
f7eb3eb3de98d7040471ca6589c2ed823b65a0c7
|
refs/heads/master
| 2022-12-14T07:13:00.353114
| 2018-05-01T18:48:25
| 2018-05-01T18:48:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 822
|
r
|
eda.r
|
setwd("D:\\MS BDA I SEM\\Computing For Data Science\\R\\class 10 Student Data Regression")
df <- read.csv('student-mat.csv',sep = ';')
str(df)
head(df)
summary(df)
# Checking for NAs
any(is.na(df))
# Exploratory Data Analysis
library(ggplot2)
library(ggthemes)
library(dplyr)
# Correlation
# grab only numeric columns
num.cols <- sapply(df, is.numeric)
head(num.cols)
library(corrgram)
library(corrplot)
help("corrplot")
# Take Only numeric columns for correlation
cor.data <- cor(df[,num.cols])
# Plotting The Correlations
corrplot(cor.data, method = 'color')
# Automatically
corrgram(df, order = T, lower.panel = panel.shade, upper.panel = panel.pie, text.panel = panel.txt)
# Histogram
ggplot(df,aes(x=G3)) + geom_histogram(bins = 20, alpha = 0.5, fill = 'blue') + theme_minimal()
|
bd4f995107b40b5fb1490909bfea3ddb3e3a2d39
|
4d60c4ae06f4a53690ed1fbb30f123d2ea6c87d6
|
/generate-eqtl-ranef-data
|
f4978a5af1c2995341ada09130df6af7146c744f
|
[] |
no_license
|
antoniofabio/eqtl-ranef
|
67841e6f204b99b1d821a76f58b5b8a209d1acc2
|
602ce8a77b22e50accab3827945622166efdb8c0
|
refs/heads/master
| 2021-01-02T09:32:47.661398
| 2016-09-22T11:44:32
| 2016-09-22T11:44:32
| 21,175,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,980
|
generate-eqtl-ranef-data
|
#!/usr/bin/env Rscript
suppressMessages({
library(RSQLite)
library(methods)
library(optparse)
library(reshape)
library(plyr)
})
options(warn = 1)
option_list <-
list(
make_option("--regressors", help = "# regressors [default: 2]", default = 2),
make_option("--outcomes", help = "# outcomes [default: 2] ", default = 2),
make_option("--samples", help = "# samples [default: 10] ", default = 10),
make_option("--fixef", help = "# fixed effects terms [default: 1]", default = 1),
make_option("--output", help = "output folder name [default: 'output']", default = "output"),
make_option("--seed", help = "RNG seed [default: 1234]", default = 1234)
)
parser <- OptionParser(usage = "%prog [options]",
description = "generate random effects eqtls data",
option_list = option_list)
opts <- parse_args(parser, positional_arguments = FALSE)
##
## check sanity of input options
##
with(opts, {
stopifnot(regressors > 0)
stopifnot(outcomes > 0)
stopifnot(samples > 0)
stopifnot(fixef > 0)
})
set.seed(opts$seed)
outcomesIDs <- paste0("outcome", seq_len(opts$outcomes))
regressorsIDs <- paste0("regressor", seq_len(opts$regressors))
samplesIDs <- paste0("sample", seq_len(opts$samples))
F <- with(opts, matrix(runif(fixef * samples),
nrow = samples,
ncol = fixef,
dimnames = list(sampleID = samplesIDs,
paste0("F", seq_len(fixef)))))
F <- data.frame(sampleID = rownames(F), F)
R <- data.frame(sampleID = samplesIDs,
R1 = gl(n = 2, k = 1, length = length(samplesIDs)))
Y <- with(opts, matrix(rnorm(outcomes * samples),
nrow = outcomes,
ncol = samples,
dimnames = list(outcomeID = outcomesIDs,
sampleID = samplesIDs)))
Y <- melt(Y)
Y <- Y[with(Y, order(outcomeID, sampleID)), ]
X <- with(opts, matrix(runif(regressors * samples),
nrow = regressors,
ncol = samples,
dimnames = list(regressorID = regressorsIDs,
sampleID = samplesIDs)))
X <- melt(X)
X <- X[with(X, order(regressorID, sampleID)), ]
sink(stderr())
ls.str()
sink()
##
## generate annotation data
##
chromosomeLength <- max(2 * length(outcomesIDs), length(regressorsIDs))
message("chromosomeLength = ", chromosomeLength)
genesStart <- floor(chromosomeLength * seq(0, 1, length = length(outcomesIDs))) + 1
genespos <-
data.frame(reporterID = outcomesIDs,
chromosome = "1",
start = genesStart,
end = genesStart + 1)
sink(stderr())
print(head(genespos))
sink()
snpspos <-
data.frame(SNPID = regressorsIDs,
chromosome = "1",
position = floor(chromosomeLength * seq(0, 1, length = length(regressorsIDs))) + 1)
sink(stderr())
print(head(snpspos))
sink()
##
## write generated data on disk
##
if(file.exists(opts$output)) {
stopifnot(unlink(opts$output, recursive = TRUE) == 0)
}
dir.create(opts$output, recursive = TRUE)
dumpTable <- function(value, fileName) {
write.table(value,
file = file.path(opts$output, fileName),
row.names = FALSE, col.names = FALSE, sep = "\t", quote = FALSE)
}
dumpTable(F, "fixef")
dumpTable(R, "ranef")
dumpTable(Y, "outcomes")
dumpTable(X, "regressors")
db <- dbConnect(dbDriver("SQLite"), file.path(opts$output, "genespos.sqlite"))
stopifnot(dbWriteTable(db, "genespos", genespos))
ignore <- dbGetQuery(db, "CREATE INDEX reporterIndex on genespos(reporterID)")
stopifnot(dbDisconnect(db))
db <- dbConnect(dbDriver("SQLite"), file.path(opts$output, "snpspos.sqlite"))
stopifnot(dbWriteTable(db, "snpspos", snpspos))
ignore <- dbGetQuery(db, "CREATE INDEX SNPIndex on snpspos(SNPID)")
stopifnot(dbDisconnect(db))
message("analysis completed.")
|
|
f3959344ce94db7ff10ef542c58ec2b36fff526b
|
c8eb502f925a9b9d8f25420a9f57d26ad218c8d7
|
/HFI.02B.Expertise.calculation.R
|
dbb0314e8152052c6d99c6c749005905b265c566
|
[] |
no_license
|
victorcazalis/sensitivity_paper
|
a319704508f54da925b3e092fafe5fb90a59cf1a
|
55edf1449c4695894c381540b923a3e49b9e0c86
|
refs/heads/main
| 2023-04-08T06:55:12.008934
| 2021-06-21T05:17:01
| 2021-06-21T05:17:01
| 378,814,180
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,843
|
r
|
HFI.02B.Expertise.calculation.R
|
### Charge all checklist files and merge
chlist1<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q1merged.rds"))
chlist2<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q2merged.rds"))
chlist3<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q3merged.rds"))
chlist4<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q4merged.rds"))
chlist5<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q5merged.rds"))
chlist6<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q6merged.rds"))
chlist7<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/chlist.expertise.Q7merged.rds"))
chlist<-rbind(chlist1, chlist2, chlist3, chlist4, chlist5, chlist6, chlist7)
### Charge all observer files and merge
observer1<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q1merged.rds"))
observer2<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q2merged.rds"))
observer3<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q3merged.rds"))
observer4<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q4merged.rds"))
observer5<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q5merged.rds"))
observer6<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q6merged.rds"))
observer7<-readRDS(paste0(GBrow, "0.Data/1.Exported4Expertise/Qmerged/observer.expertise.Q7merged.rds"))
observer<-rbind(observer1, observer2, observer3, observer4, observer5, observer6, observer7)
### For each observer, calculate the number of checklists, observations and species
obs<-ddply(observer, .(observer_id), function(x){data.frame(
Nb_checklist=sum(x$Nb_checklist),
Nb_obs=sum(x$Nb_obs),
Nb_spc=length(unique(unlist(strsplit(as.character(x$Species), ";")))),
Species=paste0(unique(unlist(strsplit(as.character(x$Species), ";"))), collapse=";")
)})
### Remove unexperienced observers
tapply(obs$Nb_checklist, obs$Nb_checklist>=50 & obs$Nb_spc>=100, sum) # Number of checklist I'll remove
tapply(obs$Nb_checklist, obs$Nb_checklist>=50 & obs$Nb_spc>=100, length) # Number of observers I'll remove
obs.to.exclude<-subset(obs, obs$Nb_spc<100 | obs$Nb_checklist<50)
chlist2<-subset(chlist, chlist$observer %not in% obs.to.exclude$observer_id)
chlist2$observer<-droplevels(chlist2$observer)
# Remove NAs
cat(paste(100*round(table(is.na(chlist2$duration))["TRUE"]/nrow(chlist2),2), " % of duration values are NA"))
chlist2<-chlist2[,c("checklist", "rich", "protocol", "duration", "n.observers", "time.min", "lon", "lat", "day", "observer")]
chlist3<-chlist2[complete.cases(chlist2),]
chlist3$observer<-droplevels(chlist3$observer)
#################################
### MAKE MODELS FOR EXPERTISE ###
#################################
### Modele in two steps
# GAM (for big datasets)
mod.fix<-mgcv::bam(
rich ~ protocol + n.observers + s(duration) + s(time.min) + te(lon, lat, day),
data=chlist3,
family="nb"
)
# GAM (for big datasets)
chlist3$Residus<-residuals(mod.fix)
mod.random<-nlme::lme(
Residus ~ 1, random =~1|observer,
data=chlist3
)
### Predict
dfExp<-data.frame(
protocol="Stationary",
n.observers=median(chlist3$n.observers, na.rm=T),
duration=median(chlist3$duration, na.rm=T),
time.min=median(chlist3$time.min, na.rm=T),
lon=median(chlist3$lon, na.rm=T),
lat=median(chlist3$lat, na.rm=T),
day=median(chlist3$day, na.rm=T)
)
# Extract fixed effect prediction (unique value)
Pred.fix<-predict(mod.fix, newdata=dfExp)
# Add random effects
Pred.obs<-as.data.frame(nlme::ranef(mod.random))
names(Pred.obs)[1]<-"Pred"
Pred.obs$Pred<-Pred.obs$Pred + as.numeric(Pred.fix)
### SAVE THE EXPERTISE SCORE
obs$obsKelling<-Pred.obs$Pred[match(obs$observer_id, rownames(Pred.obs))]
saveRDS(obs, paste0(GBrow, "1.Tables/Expertise.scores.table.rds"))
############################################
### Graphics of fit + covariates effects ###
############################################
pdf(paste0("D:/eBird/HFI.project/Figures/Check.figures/Obs.expertise.HFI1.pdf"))
hist(chlist3$rich, breaks=50, main="Richness distribution (check Poisson distribution)")
hist(Pred.obs$Pred, breaks=30, xlab="Kelling observer expertise score", main="Expertise score distribution")
par(mfrow=c(2,2)) ; mgcv::gam.check(mod.fix) ; par(mfrow=c(1,1))
### Covariates effects
# Duration
ndf.dur<-data.frame(duration=c(min(chlist3$duration):max(chlist3$duration)), protocol="Stationary", n.observers=median(chlist3$n.observers, na.rm=T), time.min=median(chlist3$time.min, na.rm=T), day=median(chlist3$day, na.rm=T), lat=median(chlist3$lat, na.rm=T), lon=median(chlist3$lon, na.rm=T))
ndf.dur[,8:9]<-predict(mod.fix, ndf.dur, se.fit=T)
ndf.dur$min<-ndf.dur$fit-1.96*ndf.dur$se.fit
ndf.dur$max<-ndf.dur$fit+1.96*ndf.dur$se.fit
for(i in c(8,10,11)){ndf.dur[,i]<-exp(ndf.dur[,i])}
ggplot(ndf.dur)+
geom_line(aes(x=duration, y=fit))+
geom_line(aes(x=duration, y=min), linetype="dashed")+
geom_line(aes(x=duration, y=max), linetype="dashed")+
ggtitle("Model covariates: duration")+
scale_y_continuous(limits=c(0, 2*max(ndf.dur$fit, na.rm=T)))
# Time.min
ndf.time<-data.frame(time.min=c(min(chlist3$time.min):max(chlist3$time.min)), protocol="Stationary", n.observers=median(chlist3$n.observers, na.rm=T), duration=median(chlist3$duration, na.rm=T), day=median(chlist3$day, na.rm=T), lat=median(chlist3$lat, na.rm=T), lon=median(chlist3$lon, na.rm=T))
ndf.time[,8:9]<-predict(mod.fix, ndf.time, se.fit=T)
ndf.time$min<-ndf.time$fit-1.96*ndf.time$se.fit
ndf.time$max<-ndf.time$fit+1.96*ndf.time$se.fit
for(i in c(8,10,11)){ndf.time[,i]<-exp(ndf.time[,i])}
ggplot(ndf.time)+
geom_line(aes(x=time.min, y=fit))+
geom_line(aes(x=time.min, y=min), linetype="dashed")+
geom_line(aes(x=time.min, y=max), linetype="dashed")+
ggtitle("Model covariates: Starting time")+
scale_y_continuous(limits=c(0, 2*max(ndf.time$fit, na.rm=T)))
# n.observers
ndf.nobs<-data.frame(n.observers=c(min(chlist3$n.observers):max(chlist3$n.observers)), protocol="Stationary", duration=median(chlist3$duration, na.rm=T), time.min=median(chlist3$time.min, na.rm=T), day=median(chlist3$day, na.rm=T), lat=median(chlist3$lat, na.rm=T), lon=median(chlist3$lon, na.rm=T))
ndf.nobs[,8:9]<-predict(mod.fix, ndf.nobs, se.fit=T)
ndf.nobs$min<-ndf.nobs$fit-1.96*ndf.nobs$se.fit
ndf.nobs$max<-ndf.nobs$fit+1.96*ndf.nobs$se.fit
for(i in c(8,10,11)){ndf.nobs[,i]<-exp(ndf.nobs[,i])}
ggplot(ndf.nobs)+
geom_line(aes(x=n.observers, y=fit))+
geom_line(aes(x=n.observers, y=min), linetype="dashed")+
geom_line(aes(x=n.observers, y=max), linetype="dashed")+
ggtitle("Model covariates: Number of observers")+
scale_y_continuous(limits=c(0, 2*max(ndf.nobs$fit, na.rm=T)))
# Day map
ndf.map<-as.data.frame(expand.grid(rowN=rownames(chlist3), PA="unPA", day=c(15,74,135,196,258,319), duration=median(chlist3$duration), protocol="Stationary", time.min=median(chlist3$time.min, na.rm=T), n.observers=median(chlist3$n.observers, na.rm=T)))
ndf.map$lon<-chlist3[match(ndf.map$rowN, rownames(chlist3)), "lon"]
ndf.map$lat<-chlist3[match(ndf.map$rowN, rownames(chlist3)), "lat"]
ndf.map$fit<-predict(mod.fix, ndf.map)
ndf.map$fit<-(ndf.map$fit)
ndf.map$day<-revalue(as.factor(ndf.map$day), c("15"="January","74"="March","135"="May","196"="July","258"="September","319"="November"))
ggplot(ndf.map)+
stat_summary_hex(aes(x=lon, y=lat, z=DescTools::Winsorize(fit)))+
ggtitle("Model covariates: Spatio-temporal variations")+
facet_wrap(~day)
dev.off()
|
91c2612a35184f0d67aca6650320c08b25e796eb
|
9ced058004c19ba00d837a8e456817d56a565c9d
|
/tests/testthat/test-oc_bbox.R
|
2a978d5b5b36447f3b0b1782c44a92e8b5011bb8
|
[] |
no_license
|
cran/opencage
|
84594102736a8d97869cceb15ec774c5d7af0f41
|
11a46b26ae7b13a3eca36a2b4a42fa3c998a4361
|
refs/heads/master
| 2021-05-15T01:06:06.777397
| 2021-02-20T00:00:02
| 2021-02-20T00:00:02
| 58,643,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,121
|
r
|
test-oc_bbox.R
|
# Test oc_bbox ------------------------------------------------------------
test_that("oc_bbox works with numeric", {
bbox1 <- oc_bbox(-5.6, 51.2, 0.2, 51.6)
expect_type(bbox1, "list")
expect_s3_class(bbox1[[1]], "bbox")
expect_equal(
unlist(bbox1),
c(xmin = -5.6, ymin = 51.2, xmax = 0.2, ymax = 51.6)
)
expect_output(
object = print(bbox1),
regexp = "xmin\\s+ymin\\s+xmax\\s+ymax\\s+\\n-5.6\\s+51.2\\s+0.2\\s+51.6"
)
})
test_that("oc_bbox works with data.frame", {
xdf <-
data.frame(
northeast_lat = c(54.0, 42.73),
northeast_lng = c(10.3, -78.81),
southwest_lat = c(53.3, 42.70),
southwest_lng = c(8.1, -78.86)
)
bbox2 <-
oc_bbox(
xdf,
southwest_lng,
southwest_lat,
northeast_lng,
northeast_lat
)
expect_type(bbox2, "list")
expect_s3_class(bbox2[[1]], "bbox")
expect_equal(
unlist(bbox2[1]),
c(xmin = 8.1, ymin = 53.3, xmax = 10.3, ymax = 54.0)
)
expect_equal(
unlist(bbox2[2]),
c(xmin = -78.86, ymin = 42.70, xmax = -78.81, ymax = 42.73)
)
})
test_that("oc_bbox works with simple features bbox", {
skip_if_not_installed("sf")
sfbbox <-
sf::st_bbox(
c(xmin = 16.1, xmax = 16.6, ymax = 48.6, ymin = 47.9),
crs = 4326
)
ocbbox <- oc_bbox(sfbbox)
expect_type(ocbbox, "list")
expect_s3_class(ocbbox[[1]], "bbox")
expect_equal(
unlist(ocbbox),
c(xmin = 16.1, ymin = 47.9, xmax = 16.6, ymax = 48.6)
)
sfbbox_3857 <-
sf::st_bbox(
c(xmin = 1792244, ymin = 6090234, xmax = 1847904, ymax = 6207260),
crs = 3857
)
expect_error(
object = oc_bbox(sfbbox_3857),
regexp = "The coordinate reference system of `bbox` must be EPSG 4326."
)
})
test_that("oc_bbox.default gives informative error message", {
expect_error(
object = oc_bbox(TRUE),
regexp = "Can't create a list of bounding boxes",
fixed = TRUE
)
})
# Test checks for oc_bbox -------------------------------------------------
test_that("oc_bbox checks bbox", {
expect_error(
oc_bbox(NA_real_, 51.280430, 0.278970, 51.683979),
"Every `bbox` element must be non-missing."
)
expect_error(
oc_bbox(-0.563160, "51.280430", 0.278970, 51.683979),
"Every `bbox` must be a numeric vector."
)
expect_error(
oc_bbox(-563160, 51.280430, 0.278970, 51.683979),
"`xmin` must be between -180 and 180."
)
expect_error(
oc_bbox(-0.563160, 51280430, 0.278970, 51.683979),
"`ymin` must be between -90 and 90."
)
expect_error(
oc_bbox(-0.563160, 51.280430, 278970, 51.683979),
"`xmax` must be between -180 and 180."
)
expect_error(
oc_bbox(-0.563160, 51.280430, 0.278970, 51683979),
"`ymax` must be between -90 and 90."
)
expect_error(
oc_bbox(0.563160, 51.280430, 0.278970, 51.683979),
"`xmin` must always be smaller than `xmax`"
)
expect_error(
oc_bbox(-0.563160, 53.280430, 0.278970, 51.683979),
"`ymin` must always be smaller than `ymax`"
)
})
|
fe2b98dab306402365e1bb220adeb0e0b32cffa8
|
4f60f5253e9fb3129309c9f65b8b0358da9edc49
|
/server.R
|
e2321c9c2a2ca78e02b31a53fd86ec3c7b73a36f
|
[] |
no_license
|
drwo/CellarMasters
|
5c2c338021ee6922e1cb3035e160e8cc64028c7b
|
e327840471098e2e9dabcc125c5a272056706e4e
|
refs/heads/master
| 2020-06-04T10:58:30.771965
| 2020-01-12T19:17:15
| 2020-01-12T19:17:15
| 191,992,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
r
|
server.R
|
shinyServer(function(input, output, session) {
# print(test.run)
loginModal <- function(failed = FALSE) {
modalDialog(
textInput("user.id", "Enter user ID and password",
placeholder = "user ID"),
passwordInput("password", label = "", placeholder = "password"),
if (failed)
div(tags$b("Invalid user ID or password", style = "color: red;")),
footer = tagList(
actionButton("exit", "Cancel"),
actionButton("ok", "OK")
)
)
}
showModal(loginModal())
observeEvent(input$exit, {stopApp()})
source("./sources/serve.browseTab.R", local = T)
source("./sources/serve.addWinesTab.R", local = T)
source("./sources/serve.manageTab.R", local = T)
observeEvent(input$ok, {
if (db.valid.login(input$user.id, input$password)) {
cm <- init.cm(input$user.id, input$password)
init.browsing()
removeModal()
}
else {
showModal(loginModal(failed = TRUE))
}
})
observeEvent(input$navbar.page, {
if (input$navbar.page == "browse.tab") {
# print("browse.tab")
# keep this if statement. When the app starts up the first thing that happens in ui.R
# is the navbar page is displayed for which the first tab dislpayed is the browse.tab
# this check ensures that no wines are displayed until there is a valid log in
if (cm$logged.in) {
init.browsing()
}
} else if (input$navbar.page == "addWines.tab") {
# print("addWines.tab")
init.add.wines()
}
else {
# must be manage tab
init.manage.wines()
}
})
# cm <- init.cm("drwo@woteki.com", "Wine!Warrior")
})
|
71b4db6f759404257dc14a5ed61242b2626606f2
|
3f172286547e4e01c2fbf3345a3f6f7150cdab3d
|
/man/core_compare_functions.Rd
|
2fa4e5e2eccbf5cd6fb9ca92b051718b6d2a30a5
|
[] |
no_license
|
sujeetp97/compare-functions
|
8b0a546377fd957aba5ada57a389ca4df89bf001
|
d55c85239ab9ebe18840dd48950a5c3f723c862d
|
refs/heads/master
| 2021-07-22T10:50:39.157845
| 2017-10-31T08:38:41
| 2017-10-31T08:38:41
| 104,383,661
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,445
|
rd
|
core_compare_functions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_functions.R
\name{core_compare_functions}
\alias{core_compare_functions}
\title{core_compare_functions}
\usage{
core_compare_functions(functions, param, size)
}
\arguments{
\item{functions}{A vector of single parameter functions. Do not include parameters. Only the function names.}
\item{param}{A single vector, list or data.table parameter for the \code{functions}.}
\item{size}{An integer value. This is the sample size of \code{param} to run the \code{functions}}
}
\value{
A data.table showing execution times and execution rates for the \code{functions} when run with sample of \code{param}
for sample size \code{size}
}
\description{
Compare Two or more functions on execution times using a given sample size of the given parameter
}
\details{
Runs the functions using random sampling of provided parameter for given sample size, and returns a data table with
executions times and execution rates for the different functions
}
\examples{
\dontrun{
sample_function_1 <- function(arg){#Logic}
sample_function_2 <- function(arg){#Logic}
param # <A vector/list/data.table object to be passed as arg to the sample_functions>
size # Size of Sample
compare_functions(functions = c(sample_function_1, sample_function_2), param = param,
size = size)
}
}
\author{
Sujeet G Pillai
}
\keyword{compare}
\keyword{execution}
\keyword{functions,}
\keyword{times}
|
11343d12baddba3ecacd6e8e0f0abe7006ed3e76
|
e0036043d155f01a659af2e821dcccf9fc819ee8
|
/12-3 test for dependent p.R
|
84e2b1489f6871cb64d3bfbf42969a484865b3da
|
[] |
no_license
|
azambesi/STA4173
|
e52cc02b6fcc9dfa2ed7c2a40274b41ab5c112fb
|
8e7040f1ae0b51280c5d89f117c00178c74d88f2
|
refs/heads/master
| 2023-08-14T13:52:59.022642
| 2021-09-14T14:34:59
| 2021-09-14T14:34:59
| 411,345,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,284
|
r
|
12-3 test for dependent p.R
|
##### EXAMPLE 1 #####
# enter counts in a matrix
observed_table <- matrix(c(293, 43,
31, 103),
nrow = 2, ncol = 2, byrow = T)
# I prefer to include breaks to make it look like the table given
# just for checking purposes
# give names to the rows and columns
rownames(observed_table) <- c("B Healed", "B Did Not Heal")
colnames(observed_table) <- c("A Healed", "A Did Not Heal")
# print table to make sure it is what we want
observed_table
# McNemar's test
mcnemar.test(observed_table, correct = FALSE)
# we include correct = FALSE because we do not want the continuity correction
##### EXAMPLE 2 #####
# enter counts in a matrix
observed_table <- matrix(c(494, 335,
126, 537),
nrow = 2, ncol = 2, byrow = T)
# I prefer to include breaks to make it look like the table given
# just for checking purposes
# give names to the rows and columns
rownames(observed_table) <- c("Tap - Agree", "Tap - Disagree")
colnames(observed_table) <- c("Stop - Agree", "Stop - Disagree")
# print table to make sure it is what we want
observed_table
# McNemar's test
mcnemar.test(observed_table, correct = FALSE)
# we include correct = FALSE because we do not want the continuity correction
|
d11a2d9796ab44a8126d506b9aec60bfaa941197
|
13d600b6e0d7fa0d81cc6fe3b366b1b3da319a34
|
/man/zinedown.Rd
|
95f9f4a03c4044447b5247de2690f5a7d2101c7d
|
[] |
no_license
|
Robinlovelace/zinedown
|
37c7ea98ca861c2f62fc6933116ecda41aa851f1
|
2868e7a92618b0f7f4cf8c968e2614aa5ebd4497
|
refs/heads/master
| 2020-04-09T03:52:17.609461
| 2018-12-02T20:44:19
| 2018-12-02T20:44:19
| 160,000,291
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 610
|
rd
|
zinedown.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zinedown.R
\docType{package}
\name{zinedown}
\alias{zinedown}
\alias{zinedown-package}
\title{zinedown: A package for creating zines using R Markdown}
\description{
zinedown: A package for creating zines using R Markdown
}
\section{zine_gitbook}{
Creates an R Markdown zine template as a webpage
}
\section{zine_pdf}{
Creates an R Markdown zine template as a PDF
}
\section{zine_word}{
Creates an R Markdown zine template as a Microsoft Word document
}
\section{zine_epub}{
Creates an R Markdown zine template as an ebook
}
|
d43d20f7415104e52bd1f6d04b3940c7dcad1a52
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bcpa/examples/ChangePointSummary.Rd.R
|
a5ccac269ec854783bb28aa8365d3fa331eca43b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
ChangePointSummary.Rd.R
|
library(bcpa)
### Name: ChangePointSummary
### Title: Obtain summary of BCPA analysis
### Aliases: ChangePointSummary
### ** Examples
if(!exists("Simp.VT")){
data(Simp)
Simp.VT <- GetVT(Simp)}
if(!exists("Simp.ws"))
Simp.ws <- WindowSweep(Simp.VT, "V*cos(Theta)", windowsize = 50, windowstep = 1, progress=TRUE)
# too many change points:
ChangePointSummary(Simp.ws)
# about the right number of change points:
ChangePointSummary(Simp.ws, clusterwidth=3)
|
f78d53e3a5d61a6af5c6a6e8a8aa275e26564861
|
f3aac55a8582aa2b9ec92389a1a8aee72e197db9
|
/man/calc_myreg_mreg_logistic_yreg_logistic.Rd
|
cd17d980945050168ecc549267f0c35c08d73f37
|
[] |
no_license
|
kaz-yos/regmedint
|
fe620ae12014996497d559713bc960400279e185
|
e3c3ffea5d99c00bae2b42f7ab87f57e1bb99a74
|
refs/heads/master
| 2022-05-17T18:21:57.259579
| 2022-04-06T17:02:41
| 2022-04-06T17:02:41
| 245,831,454
| 24
| 6
| null | 2022-02-03T21:24:47
| 2020-03-08T14:41:41
|
R
|
UTF-8
|
R
| false
| true
| 2,503
|
rd
|
calc_myreg_mreg_logistic_yreg_logistic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/07_calc_myreg_mreg_logistic_yreg_logistic.R
\name{calc_myreg_mreg_logistic_yreg_logistic}
\alias{calc_myreg_mreg_logistic_yreg_logistic}
\title{Create calculators for effects and se (mreg logistic / yreg logistic)}
\usage{
calc_myreg_mreg_logistic_yreg_logistic(
mreg,
mreg_fit,
yreg,
yreg_fit,
avar,
mvar,
cvar,
emm_ac_mreg,
emm_ac_yreg,
emm_mc_yreg,
interaction
)
}
\arguments{
\item{mreg}{A character vector of length 1. Mediator regression type: \code{"linear"} or \code{"logistic"}.}
\item{mreg_fit}{Model fit from \code{\link{fit_mreg}}}
\item{yreg}{A character vector of length 1. Outcome regression type: \code{"linear"}, \code{"logistic"}, \code{"loglinear"}, \code{"poisson"}, \code{"negbin"}, \code{"survCox"}, \code{"survAFT_exp"}, or \code{"survAFT_weibull"}.}
\item{yreg_fit}{Model fit from \code{\link{fit_yreg}}}
\item{avar}{A character vector of length 1. Treatment variable name.}
\item{mvar}{A character vector of length 1. Mediator variable name.}
\item{cvar}{A character vector of length > 0. Covariate names. Use \code{NULL} if there is no covariate. However, this is a highly suspicious situation. Even if \code{avar} is randomized, \code{mvar} is not. Thus, there are usually some confounder(s) to account for the common cause structure (confounding) between \code{mvar} and \code{yvar}.}
\item{emm_ac_mreg}{A character vector of length > 0. Effect modifiers names. The covariate vector in treatment-covariate product term in the mediator model.}
\item{emm_ac_yreg}{A character vector of length > 0. Effect modifiers names. The covariate vector in treatment-covariate product term in the outcome model.}
\item{emm_mc_yreg}{A character vector of length > 0. Effect modifiers names. The covariate vector in mediator-covariate product term in outcome model.}
\item{interaction}{A logical vector of length 1. The presence of treatment-mediator interaction in the outcome model. Default to TRUE.}
}
\value{
A list containing a function for effect estimates and a function for corresponding standard errors.
}
\description{
Construct functions for the conditional effect estimates and their standard errors in the mreg logistic / yreg logistic setting. Internally, this function deconstructs model objects and feeds parameter estimates to the internal worker functions \code{calc_myreg_mreg_logistic_yreg_logistic_est} and \code{calc_myreg_mreg_logistic_yreg_logistic_se}.
}
|
000e05153c6eb0af19cc60cd4915caadf2c8b927
|
b363cf1145275571fe5f490d3d7cae9d357d843b
|
/R/old/map.loc.asreml.R
|
e7c1138eb33866c678aa8c5e3a1484a1c996fb58
|
[] |
no_license
|
behuang/dlmap
|
93b31b020866fa5db64615f9df2b45bb0879c48e
|
81cfc6efddf7a32a91ac99296519edae85053147
|
refs/heads/master
| 2020-04-01T19:48:49.328751
| 2015-04-15T23:50:43
| 2015-04-15T23:50:43
| 15,329,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,295
|
r
|
map.loc.asreml.R
|
`map.loc.asreml` <-
function(input, s.chr, chrSet, prevLoc=NULL, ...)
{
dfMrk <- input$dfMrk
dfMerged <- input$dfMerged
envModel <- input$envModel
nphe <- input$nphe
map <- input$mapp[[s.chr]]
mrk <- grep(paste("C", s.chr, "M", sep=""), names(dfMerged))
chr <- sort(c(mrk, grep(paste("C", s.chr, "P", sep=""), names(dfMerged))))
type <- attr(input, "type")
n.chr <- length(input$map)
f.pos <- vector()
f.mrk <- vector()
if (length(prevLoc)>0) {
f.pos <- prevLoc$pos
f.mrk <- prevLoc$mrk
fp <- match(f.pos, names(input$dfMerged))
fm <- match(f.mrk, names(input$dfMerged))
if (type=="f2") {
f.pos <- paste(unique(substr(f.pos, 1, nchar(f.pos)-1)), "D", sep="")
f.mrk <- unique(substr(f.mrk, 1, nchar(f.mrk)-1))
}
}
formula <- envModel
### redefine dfMerged
## pull out the phenotypic data and fixed effect markers/pos
if (ncol(input$dfMrk) > n.chr*nrow(dfMrk)) {
dfm1 <- input$dfMerged[,c(1:nphe, fp, fm)]
## we're going to merge everything else onto this.
## create separate groups of indices
index <- list()
mat <- list()
for (kk in 1:n.chr)
{
index[[kk]] <- setdiff(grep(paste("C", kk, "M", sep=""),colnames(input$dfMrk)[2:ncol(input$dfMrk)]), match(prevLoc$mrk, colnames(input$dfMrk)[2:ncol(input$dfMrk)])) + 1
mat[[kk]] <- input$dfMrk[,index[[kk]]]
mat[[kk]] <- mroot(mat[[kk]] %*% t(mat[[kk]]))
ncolm[kk] <- ncol(mat[[kk]])
}
cumind <- c(0, cumsum(ncolm))
dfm2 <- as.data.frame(do.call("cbind", mat))
dfm2 <- cbind(input$dfMrk[,1], dfm2)
dfMerged2 <- merge(dfm1, dfm2, by=names(dfm2)[1], all.x=TRUE, sort=FALSE)
for (kk in 1:n.chr)
formula$group[[paste("g_", kk, "chr", sep="")]] <- ncol(dfm1) + (cumind[kk]+1):cumind[kk+1]
} else {
for (kk in 1:n.chr)
formula$group[[paste("g_", kk, "chr", sep="")]] <- setdiff(grep(paste("C", kk, "M", sep=""),colnames(dfMerged)[(nphe+1):ncol(dfMerged)]), match(c(prevLoc$mrk, prevLoc$pos), colnames(dfMerged)[(nphe+1):ncol(dfMerged)])) + nphe
dfMerged2 <- dfMerged
}
groups <- formula$group
##################################
if (type=="f2") mrk <- mrk[seq(1, length(mrk), 2)]
results <- list()
wald <- rep(0, length(map))
# Initialize convergence flag for output
results$converge <- TRUE
int <- vector()
if (length(f.pos)>0) {
fmrk <- sapply(match(f.pos, names(dfMerged)), function(x) {
if (x==min(mrk)) return(c(min(mrk), min(mrk[mrk>x]))) else if (x==max(mrk)) return(c(max(mrk[mrk<x]), max(mrk))) else return(c(max(mrk[mrk<x]), min(mrk[mrk>x])))})
# because we want to exclude both additive and dominant effects
if (type=="f2") fmrk[2,] <- fmrk[2,]+1
int <- eval(parse(text=paste("c(", paste(apply(fmrk, 2, function(x) return(paste(x[1], ":", x[2], sep=""))), collapse=","), ")", sep="")))
}
for (jj in 1:length(map))
{
if (type=="f2") pos <- c(2*jj-1, 2*jj) else pos <- jj
# Check that position is not in the same interval as any previously mapped QTL
# need to check up on write up to make sure which fixed model elements are being fit on a given iteration - are we including fixed effects for QTL on other chromosomes?
if (all(!(chr[pos] %in% int)))
{
### create data frame with position and transformed random effects
dfMerged3 <- dfMerged2
if (length(intersect(names(dfMerged)[chr[pos]], names(dfMerged2)))==0){
dfMerged3 <- cbind(dfMerged[, chr[pos]], dfMerged2)
for (kk in 1:n.chr) formula$group[[paste("g_",kk,"chr",sep="")]] <- groups[[paste("g_",kk,"chr",sep="")]]+1
names(dfMerged3)[1] <- names(dfMerged)[chr[pos]] }
chrnam <- paste("idv(grp(g_", setdiff(chrSet,s.chr), "chr))", sep="")
formula$random <- paste("~", paste(chrnam, collapse="+"))
# Include spatial/environmental random effects
if (!is.null(envModel$random))
formula$random <- paste(formula$random, "+", as.character(envModel$random[2]), sep="")
formula$random <- as.formula(formula$random)
formula$fixed <- paste(as.character(envModel$fixed)[2], "~", as.character(envModel$fixed[3]), sep="")
if (length(f.pos) >0)
formula$fixed <- paste(formula$fixed, "+",paste(prevLoc$pos, collapse="+"), sep="")
# not going to work for f2, need to correct this.
if (type=="f2")
formula$fixed <- paste(formula$fixed, "+", paste(paste(names(dfMerged)[chr[pos]], collapse="+"), sep="")) else formula$fixed <- paste(formula$fixed, "+", names(dfMerged)[chr[pos]], sep="")
formula$fixed <- as.formula(formula$fixed)
formula$data <- dfMerged3
formula$Cfixed <- TRUE
formula <- c(formula, ...)
formula <- formula[!duplicated(formula)]
formula <- formula[!sapply(formula, is.null)]
if (length(chrSet)>1) model <- do.call("asreml", formula)
if (length(chrSet)==1)
{
formula1 <- formula
formula1$random <- envModel$random
formula1 <- formula1[!sapply(formula1, is.null)]
model <- do.call("asreml", formula1)
}
if (model$converge==FALSE) results$converge <- FALSE
if (model$coefficients$fixed[names(model$coefficients$fixed) %in% names(dfMerged)[chr[pos]]] != 0)
wald[jj] <- wald.test.asreml(model, list(list(which(model$coefficients$fixed[names(model$coefficients$fixed) %in% names(dfMerged3)[chr[pos]]]!=0), "zero")))$zres$zwald
} # end of check for distinct intervals
}
results$wald <- wald
return(results)
}
|
65910bcbbd57d5944e9930e6f49f4b60ba3cc863
|
ffe87a0a6134783c85aeb5b97332b201d50aca9d
|
/MINI_2015/prace_domowe/PD_11/pd_11_sudol.R
|
df7036dc9de45a6bb05cb734741e530be19d64c1
|
[] |
no_license
|
smudap/RandBigData
|
d34f6f5867c492a375e55f04486a783d105da82d
|
4e5818c153144e7cc935a1a1368426467c3030a5
|
refs/heads/master
| 2020-12-24T15:51:11.870259
| 2015-06-16T08:50:34
| 2015-06-16T08:50:34
| 32,064,294
| 0
| 0
| null | 2015-03-12T07:53:56
| 2015-03-12T07:53:56
| null |
UTF-8
|
R
| false
| false
| 833
|
r
|
pd_11_sudol.R
|
f = function(){
# wylosuj dane, 2 kolumny, 10000 wierszy
df <- data.frame()
for (i in 1:10000) {
df <- rbind(df, data.frame(x=rnorm(1), y=rnorm(1)))
}
# policz modele regresji na probach bootstrapowych
resx <- numeric()
resy <- numeric()
inda <- NULL
for (i in 1:500) {
ind <- sample(1:nrow(df), replace = TRUE)
resx[i] <- lm(x~y, data=df[ind,])$coef[1]
resy[i] <- lm(x~y, data=df[ind,])$coef[2]
inda <- rbind(inda, ind)
}
# posortuj wartosci w kazdym wierszu
df2 <- cbind(resx, resy, inda)
res <- apply(df2, 1, sort)
}
f2 = function(){
inda = data.frame(ncol=10002,nrow=500)
df = data.frame(x=rnorm(10000), y=rnorm(10000))
wynik = lapply(1:500, function(i){
ind = sample(1:10000, replace = TRUE)
inda[i,] = sort(c(lm(y~x, data = df[ind,])$coef,ind))
})
}
system.time(f())
system.time(f2())
|
c861be4dc18c7e8b360389a0938739022e53ef4e
|
6b084409ce9e23028d3749a6508d9f842f186b24
|
/Logistic.R
|
26da2ee5cd1f20ce1582d33303555a61a45faa0b
|
[] |
no_license
|
avigoud89/credit-analysis
|
9cf5494696a819c1938cca46f7be4139209dd284
|
a6721e82c03b47466f611d0d17f258a4ca7a85e1
|
refs/heads/master
| 2021-01-15T12:48:24.679060
| 2015-11-13T01:41:25
| 2015-11-13T01:41:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,841
|
r
|
Logistic.R
|
setwd("~/Downloads/MISC")
credit <- read.csv("credit3.csv")
str(credit)
credit$NPV <- gsub(",","",credit$NPV)
credit$NPV <- as.numeric(credit$NPV)
head(credit$NPV, 20)
credit$CAN <- rep("Yes", nrow(credit))
credit$CAN[credit$NPV <0] <- "No"
credit$CAN <- factor(credit$CAN, levels = c("No", "Yes"))
str(credit$CAN)
credit$CAN <- as.numeric(credit$CAN) - 1
credit$CHK_ACCT <- ifelse(credit$CHK_ACCT > 0, credit$CHK_ACCT+1, credit$CHK_ACCT)
credit$CHK_ACCT[credit$CHK_ACCT == 4] <- 1
credit$CHK_ACCT0 <- ifelse(credit$CHK_ACCT == 0, 1, 0)
credit$CHK_ACCT1 <- ifelse(credit$CHK_ACCT == 1, 1, 0)
credit$CHK_ACCT2 <- ifelse(credit$CHK_ACCT == 2, 1, 0)
credit$CHK_ACCT3 <- ifelse(credit$CHK_ACCT == 3, 1, 0)
# credit$CHK_ACCT <- as.factor(credit$CHK_ACCT)
credit$SAV_ACCT <- as.factor(credit$SAV_ACCT)
credit$JOB <- as.factor(credit$JOB)
credit$TYPE <- as.factor(credit$TYPE)
credit$HISTORY <- as.factor(credit$HISTORY)
credit$PRESENT_RESIDENT <- as.factor(credit$PRESENT_RESIDENT)
credit$EMPLOYMENT <- as.factor(credit$EMPLOYMENT)
credit$NUM_CREDITS <- as.factor(credit$NUM_CREDITS)
credit$INSTALL_RATE <- as.factor(credit$INSTALL_RATE)
credit$NUM_DEPENDENTS <- as.factor(credit$NUM_DEPENDENTS)
credit$AMOUNT_REQUESTED <- gsub(",","",credit$AMOUNT_REQUESTED) # Converts to charecter and takes off','.
credit$AMOUNT_REQUESTED <- as.numeric(credit$AMOUNT_REQUESTED) # charecter can be coverted into numeric directly unlike factor!!
str(credit$AMOUNT_REQUESTED)
str(credit)
set.seed(12345)
x <- sample(nrow(credit),0.7*nrow(credit),replace = FALSE)
train <- credit[x,]
test <- credit[-x,]
str(train)
str(test)
test <- test[,-1]
train <- train[,-1]
logreg1 <- glm(CAN~AGE+CHK_ACCT1+CHK_ACCT2+CHK_ACCT3+SAV_ACCT+NUM_CREDITS+DURATION+HISTORY+PRESENT_RESIDENT+EMPLOYMENT+JOB+NUM_DEPENDENTS+RENT+INSTALL_RATE+GUARANTOR+OTHER_INSTALL+OWN_RES+TELEPHONE+FOREIGN+REAL_ESTATE+TYPE+AMOUNT_REQUESTED, data = train, family = binomial)
summary(logreg1)
pred1 <- predict(logreg1,newdata = test,type = 'response')
y <- 0.5
pred2 <- ifelse(pred1 > y, 1, 0)
tab <- table(test$CAN, pred2)
tab
specificity <- tab[1,1]/(tab[1,1]+tab[1,2])
sensitivity <- tab[2,2]/(tab[2,2]+tab[2,1])
sensitivity
specificity
predtrain <- predict(logreg1, newdata = train, type = 'response')
predtrain2 <- ifelse(predtrain > y, 1, 0)
tabtrain <- table(train$CAN, predtrain2)
tabtrain
library(pROC)
library(ROCR)
ROC <- roc(test$CAN, pred1)
plot(ROC, col = "blue")
ROCtrain <- roc(train$CAN, predtrain)
plot(ROCtrain, add = TRUE, col = 'red')
cutoffs <- seq(0,1,by = 0.05)
eff <- sapply(seq(0, 1, by=0.05), function(cutoff) sum((predtrain > cutoff) == train$CAN))/nrow(train)
plot(cutoffs,eff)
which.max(eff)
max(eff)
valideff <- sapply(seq(0, 1, by=0.05), function(cutoff) sum((pred1 > cutoff) == test$CAN))/nrow(test)
plot(cutoffs,valideff)
which.max(valideff)
max(valideff)
#effy <- sapply(seq(0, 1, by=0.05), function(cutoff1) sum((predtrain < cutoff1) == train$CAN[train$CAN == 1]))#/sum(predtrain[predtrain>cutoff1]))
profitmean <- mean(train$NPV[train$NPV>=0])
lossmean <- mean(train$NPV[train$NPV<0])
profitmean
lossmean
predy <- prediction(pred1, test$CAN)
# perf <- performance(predy, "sens")
# ?performance
# perf1 <-performance(predy, "spec")
perfy <- performance(predy, "tpr", "fpr", col = 'red')
plot(perfy)
x <- matrix(c(0,-profitmean, lossmean,0), nrow = 2, ncol = 2)
x
z <- seq(0,1,0.05)
t1 <- vector(mode = "list", length = length(z))
#predtrain <- predict(logreg1, newdata = train, type = 'response')
# Alert: This code is specifically for roc curve generation manually
for(i in 1:length(z)) {
predtest <- predict(logreg1, newdata = test, type = 'response')
#
for(j in 1:length(predtest)){
#
predtest[j] <- ifelse(predtest[j]>z[i], 1, 0)
}
t1[[i]] <- table(test$CAN, predtest)
}
t1
r <- vector(mode = "list", length = length(z))
for(i in 1:length(z)) {
r[[i]] <- as.data.frame.matrix(t1[[i]])*x
}
r
q <- sapply(1:length(r), function(p) sum(r[[p]]))
q[1] <- -132432.63
q
plot(cutoffs, q, col = 'red')
max(q)
min(q)
which.max(q)
which.min(q)
logreg2 <- glm(CAN~AGE+CHK_ACCT1+CHK_ACCT2+CHK_ACCT3+I(SAV_ACCT == 3)+I(SAV_ACCT == 4)+I(NUM_CREDITS == 2)+DURATION+I(HISTORY == 3)+I(PRESENT_RESIDENT == 2)+I(EMPLOYMENT == 3)+INSTALL_RATE+OTHER_INSTALL+FOREIGN+I(TYPE == 2)+I(TYPE == 5)+AMOUNT_REQUESTED, data = train, family = "binomial")
predfinaltr<- predict(logreg2, newdata = train, type ='response')
effinaltr <- sapply(seq(0, 1, by=0.05), function(cutoff) sum((predfinaltr > cutoff) == train$CAN))/nrow(train)
plot(cutoffs,effinaltr)
which.max(effinaltr)
max(effinaltr)
predfinalte<- predict(logreg2, newdata = test, type ='response')
effinalte <- sapply(seq(0, 1, by=0.05), function(cutoff) sum((predfinalte > cutoff) == test$CAN))/nrow(test)
plot(cutoffs,effinalte)
which.max(effinalte)
max(effinalte)
plot(effinaltr,effinalte)
|
5930bca41c3bb1e9212140ff2ec0850920a0c9bf
|
ff816cf9be953573639964769fff362fd8a1b9d3
|
/R/sampling_reference_set.R
|
6bbcef44538f61f22568a30ee6419e1dacf37481
|
[] |
no_license
|
BEAST-Community/weifang-sarscov2
|
e1a4f8dc3e026930d9e258937614b454cf3aca1d
|
38488632becbbdafb79117bc6b0e0c1b2d96edeb
|
refs/heads/master
| 2023-01-05T12:25:57.541500
| 2020-11-05T12:02:39
| 2020-11-05T12:02:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,671
|
r
|
sampling_reference_set.R
|
# sarscov2Rutils package required: https://github.com/emvolz-phylodynamics/sarscov2Rutils
# devtools::install_github("emvolz-phylodynamics/sarscov2Rutils", ref = 'sarscov2Rutils')
# GISAID database and metadata required: gisaid.org.
# place these files in a /data folder
require(ape)
library(lubridate)
require(sarscov2)
# Reading in database
fn = read.dna("data/gisaid_db.fas", format = 'fasta' ) ## path to the GISAID alignment:
md <- read.csv( "data/md.csv", stringsAs = FALSE ) # Load the metadata
xml_skelly <- paste0('beast/seir.xml' ) # skeleton for xml
# Checking my sequences feature in the metadata
table(unlist(lapply(strsplit(rownames(fn), "[|]"), function(x) x[[2]])) %in% md$gisaid_epi_isl)
# Sampling internal and exogenous sequences.
# Here specifying 50 internal and exogenous sequences; there are only 20 sequences from Weifang so they will all be chosen.
# Note that the function will retrieve not only reference 50 sequences sampled through time, but also close genetic matches to the internal Weifang sequences.
# This will mean the reference set will be larger than 50.
# If available, a distance matrix D can speed this up
sarscov2::region_time_stratified_sample(region_regex = "WF", path_to_align = fn, D=NULL, nregion = 50, n = 50, path_to_save = "algn.fas")
# This is the output alignment: "algn.fas"
# Now make the aligment for BEAST. This adds sample time and deme to tip labels:
d3 = sarscov2::prep_tip_labels_phydyn(path_to_align = "data/algn.fas", path_to_save = paste0("data/algn_prepped.fas"),
regexprs = c( 'WF', 'WF') ) # 'WF' lets the function know that Weifang are the internal sequences
|
545f6f82c5a31f7bfad3f28d60a55734ad556fb1
|
baaa21c343d251555c3ddd21427bef14b9b8af58
|
/BiomarkerSelectionMethods.R
|
ce9986eb85554eccb3e134319a94ca496043574a
|
[] |
no_license
|
amcrisan/UnnamedBiomarkerProject
|
8cfcdab7639b66220cd0a64b82ab60f4392a6337
|
1f9e3d73105226b6f945da67e4ecc51dea7b49c5
|
refs/heads/master
| 2020-04-10T21:06:34.836018
| 2015-09-03T00:03:36
| 2015-09-03T00:03:36
| 31,673,859
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,647
|
r
|
BiomarkerSelectionMethods.R
|
library(biom)
library(scales)
library(plyr)
library(dplyr)
library(rms)
library(RColorBrewer)
library(reshape)
library(ggplot2)
library(e1071)
library(nnet)
library(pROC)
library(vegan)
library(glmnet)
source("SupportingBiomarkerMethods.R")
set.seed(1)
########################################
# Loading and wrangling the data
########################################
# loading the OTU data
dat<- read_biom(biom_file = "data/all.final.an.0.03.subsample.0.03.biom")
abundDat <- t(as.matrix(biom_data(dat))) #rows = Samples, columns = OTU
taxonomy <- observation_metadata(dat)
# loading the metadata seperately
# there needs to be some wrangling to account for samples with not metadata
# and metadata with no associated samples. So just reading the metadata in
# rather than dealing with it in the "make.biom" step.
metadata<-read.csv(file="Data/SchubertMetadata.csv",header=T)
#some manual wraning
#sample DA00754 is only represented once as DA00754.2 whereas it is DA00754
#in the metadata, causing some matching issues. The same is true for DA00939
tempRow<-rownames(abundDat)
tempRow[grep("DA00754.2",rownames(abundDat))]<-"DA00754"
tempRow[grep("DA00939.2",rownames(abundDat))]<-"DA00939"
rownames(abundDat)<-tempRow
# ... the following samples are in the abundace table, but not in the metadata
dropSamps <- setdiff(rownames(abundDat),metadata$sampleID)
# for now, drop samples that don't have associated metadata
abundDat <- abundDat[!(rownames(abundDat) %in% dropSamps), ]
# ... the following have metadata but no associated sample
dropSamps <- setdiff(metadata$sampleID,rownames(abundDat))
metadata <- filter(metadata,!(sampleID %in% dropSamps))
# make sure the metadata data and abundance table are in the same
# row order. Not necessary, but I just like it this way
abundDat <- abundDat[match(rownames(abundDat),metadata$sampleID),]
if(sum(!(rownames(abundDat) == as.character(metadata$sampleID))) == 0){
print("Good to Go")
}
######
# The Wrangling
# removing the mouse data from the metadata and the abundance matricies
dropSamps<- filter(metadata, host_common_name == "human") %>% select(sampleID)
metadata <- filter(metadata,sampleID %in% dropSamps$sampleID) %>% droplevels
abundDat <- abundDat[match(rownames(abundDat),metadata$sampleID),]
########################################
# Sanity check - comparing demographic data to Schubert's paper
########################################
#Initial statistical analyses were conducted to assess differences among
#the three study groups (C. difficile cases, diarrheal con- trols, and
#nondiarrheal controls). For continuous variables (e.g., age and weight),
#one-way analysis of variance was utilized. For categorical variables,
#Pearson’s chi-square test or Fisher’s exact test was performed when
#expected cell frequencies were less than or equal to 5.
#1. Distribution of case and controls
table(metadata$disease_stat) # checks out
#2. Age
summary(aov(age ~ disease_stat, data = metadata)) # p = 0.034 (agree)
#3. Race
table(metadata$disease_stat,metadata$race)
#reclassify to black, white and Other/unknown
metadata$race2 <- mapvalues(metadata$race,
from=levels(metadata$race),
to=c("Other/unknown","Black","Other/unknown","Other/unknown",
"Other/unknown","White"))
#using chi.sq I can get 0.712 reported in paper, but fishers provides the more exact p-value
fisher.test(table(metadata$disease_stat,metadata$race2)) # p = 0.752 (off - but agree not sig)
chisq.test(table(metadata$disease_stat,metadata$race2)) # p=0.712 (agree - but maybe not right test choice)
#4. Weight
# raw weight values were not given, so it is not possible to perform an anova
# (metadat) only contains weight categories.
#5. Drug Use
chisq.test(table(metadata$disease_stat,metadata$antibiotics..3mo)) # p<0.0001 (agree)
#6. Others
chisq.test(table(metadata$disease_stat,metadata$antacid)) # p ~ 0.0001 (agree)
chisq.test(table(metadata$disease_stat,metadata$Surgery6mos)) # p<0.0001 (agree)
fisher.test(table(metadata$disease_stat,metadata$historyCdiff)) # p = 0.793 (agree)
fisher.test(table(metadata$disease_stat,metadata$ResidenceCdiff)) # p = 0.593 (agree)
chisq.test(table(metadata$disease_stat,metadata$Healthworker)) # p<= 0.0007 (off - but agree sig)
########################################
# Sanity check - comparing base model performance
########################################
#add the inverse simpsons diversity biomarkers
metadata$inverseSimpson = diversity(abundDat,index="invsimpson")
################################
# base model
#We used age, gender, race, antibiotic use, antacid use, a vegetarian diet,
#surgery within the past 6 months, a history of CDI, residence with another person who had CDI,
#and residence with another person who works in a health care setting as
baseModel <- multinom(disease_stat ~ age + gender + race2 + antibiotics..3mo + antacid + Surgery6mos +
historyCdiff + ResidenceCdiff + Healthworker,data = metadata)
metadata$basePredCase <- predict(baseModel,metadata,type="probs")[,"Case"]
# case and non-diarrheal control
tmp <- filter(metadata,disease_stat %in% c("Case","NonDiarrhealControl")) %>%
select(c(disease_stat,basePredCase)) %>%
mutate(response = mapvalues(disease_stat,c("Case","NonDiarrhealControl"),c(1,0))) %>%
droplevels
tmp$response = factor(tmp$response,levels = c(0,1))
ci.auc(roc(response=tmp$response,predictor=tmp$basePredCase)) # 0.894 (0.852-0.936) - agree & slightly off
# case and diarrheal control
metadata$basePredDcontrol <- predict(baseModel,metadata,type="probs")[,"DiarrhealControl"]
tmp <- filter(metadata,disease_stat %in% c("Case","DiarrhealControl")) %>%
select(c(disease_stat,basePredDcontrol)) %>%
mutate(response = mapvalues(disease_stat,c("Case","DiarrhealControl"),c(1,0))) %>%
droplevels
tmp$response = factor(tmp$response,levels = c(0,1))
ci.auc(roc(response=tmp$response,predictor=tmp$basePredDcontrol)) # 0.566 ( 0.481 - 0.650 ) - quite far off - something weird
# ...... I think I mis-interpreted "nested logit model" after trying to fit one
# without much success. I then realised, that what is meant is "a series of logistic
# models, where a new variable is successively added" (i.e. the nesting is in the variables
# and not the response). This appraoch seems to work - even so, I don't know if fitting several
# independent logistics is best. I do like the nnet multinom approach, you get more of a
# mixed model, which I think is just a nicer way to handle this data response, maybe we should
# talk about that.
comparisons<-rbind(c("Case","DiarrhealControl"),
c("Case","NonDiarrhealControl"),
c("DiarrhealControl","NonDiarrhealControl"))
# .. getting a storeing the AUCs
storeAUC<-c()
for(comp in 1:nrow(comparisons)){
comp<-comparisons[comp,]
metaSubs <- filter(metadata,disease_stat %in% comp)
#always make sure to select right reference
if("Case" %in% comp){
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "Case"))
}else{
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "DiarrhealControl"))
}
# .... base Model
baseModel <- lrm(response ~ age + gender + race2 + antibiotics..3mo + antacid + Surgery6mos +
historyCdiff + ResidenceCdiff + Healthworker,
data = metaSubs,x=TRUE,y=TRUE)
# I am going to use harrell's C for the AUC, this will allow me to
# to adjust the AUC for overfitting
optimismModel<-validate(baseModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC<-rbind(storeAUC,c(paste(comp,collapse=" - "),"base",
round(harrellC_orj,3),
round(harrellC_adj,3)))
}
################################
# Diversity - inverse simpsons
# THE AUCs while using the "inverse simpsons" measure matched pretty well with some variablity in the
# of 0.1 to 0.2 off the results reported in the paper. This could be because there are different techniques
# to calculate the area under a curve (different approximations) that probably accounts for the difference.
# that they are very much in the right ball park is good.
# .. getting and storing the AUCs
storeAUC_div<-c()
for(comp in 1:nrow(comparisons)){
comp<-comparisons[comp,]
metaSubs <- filter(metadata,disease_stat %in% comp)
print(comp)
#always make sure to select right reference
if("Case" %in% comp){
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "Case"))
}else{
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "DiarrhealControl"))
}
# .... genomic model - inverse simpsons
AUCval<-ci.auc(roc(response=metaSubs$response,predictor=metaSubs$inverseSimpson))
storeAUC_div<-rbind(storeAUC_div,c(paste(comp,collapse=" - "),"genomic-diversity",
round(AUCval[2],3),
"NA"))
# .... combined model
combinedModel <- lrm(response ~ age + gender + race2 + antibiotics..3mo + antacid + Surgery6mos +
historyCdiff + ResidenceCdiff + Healthworker + inverseSimpson,
data = metaSubs,x=TRUE,y=TRUE)
optimismModel<-validate(combinedModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_div<-rbind(storeAUC_div,c(paste(comp,collapse=" - "),"combined - genomic (diversity)",
round(harrellC_orj,3),
round(harrellC_adj,3)))
}
########################################
# The actual biomarker selection
########################################
#grab the LeFSe OTUS from Figure 3
schubertOTUs<-c(1,2,14,22,33,24,10,40,29,13,36,11,15,68,39,
34,61,38,99,51,63,27,66,46,25,26,37,59,42,56,
85,45,28,41,30,18,21,6,7,3,5,4) %>%
formatC(width=4,flag="0")
schubertOTUs <-paste0("Otu",schubertOTUs)
#Drop the OTU 19 (which is c.difficle) at Pat's request
otuIDX<-which(colnames(abundDat) == "Otu0019")
cDiff_OTU<-abundDat[,otuIDX]
abundDat<-abundDat[,c(1:(otuIDX-1),(otuIDX+1):ncol(abundDat))]
#1. Filter 1 : distribution filter
# remove any OTUs where more than 90% of the data is 0
#
# RATIONALE : using a statistical test with a p-value cutoff
# "spends alpha" - this means that the p-value cutoff for subsequent
# statistical tests needs to be more stringent for them to really be
# useful. Also, you don't want to waste your time performing statistical
# tests on features that will be unhelpful because there is no information there.
abFact = 0
percentCounts <- apply(abundDat,2,function(x){
quantile(x,probs = seq(0.1,0.9,by=0.1))["90%"] > abFact
})
#remove those OTUs
abundDat <- abundDat[,percentCounts]
# --- Begin "modified Lefse portion" ---
# 2. Kruskal Wallis Filter
# I can also adjust the p-values after this, but I've choose not to .. for now..
#
# RATIONALE : used in LeFse already, left it in.
otu.pVal<-apply(abundDat,2,function(x){
kruskal.test(x,g=metadata$disease_stat)$p.value})
abundDat <- abundDat[,otu.pVal <0.05]
# --- Normally LefSe does a Wilcox group wise filter here ----
# I really don't think that filter is necessary. You've already spent
# some alpha on the Kruskal Wallis test - why spend more. The next
# step can handle when variables >> observations, so I am going to
# give the next step as much reasonable data as possible.
#4. Penalized regression
#
# RATIONALE : Why not interpret the betas of a regression? Everyone
# knows how to do this, and does it all the time since regressions
# are such a common instrument. Using the glment package, we can
# use penalized regression to perform feature selection. GLMNET
# allows us to use lasso (alpha = 1), ridge (alpha = 0), and
# so called elastic-net (alpha between 0 and 1). By using either of
# the three methods of penalized regression we can control how many
# correlated features (lasso = least; ridge = most) are selected.
# Penalized regression has also been used in human microarray studies
# and are well suited from when variables >> observations.
#
# Also, in practice it seems that there isn't a huge difference between
# using LDA or vanilla binary regression (some sources include the elements of statistical learning - section 4.4.4:
# ... p. 105 - "it is generally felt that logistic regression is a safer, more robust bet than the LDA model,
# ... relying on fewer assumptions. It is our experience that the models give very similar results,
# ... even when LDA is used inapproperiately, such as with qualtiative predictors.)
# Thus, the substitution of regression for the LDA isn't supposed to be revolutionary.
# But it does give more flexibility to work with (in my opinion)
#
# Penalized LDA does exist, but I still prefer the more direct interpretations
# of the beta's afforded by glmnet.
# ----- A. Using the abundance data only & Multinomial Response -----
# could be parallelized to improve efficeny.
# using one core, can take ~ 2 - 5 minutes to run based upon boot num
# personally, I prefer a bootnumb of 100, but I am doing 30 to
# approximately "LefSe's"
bestOTUs_noMeta<-getBestOTU(response=metadata$disease_stat,
countMatrix=abundDat,
alph=1,
bootNum=30,
cvfold=5,
logOTUData=TRUE,
responseType = "multinomial",
type.measure="class")
# effect summary variable has two arguments
# ... effectSummary$effectSizePlot which shows the effect log odds beta
# ... effectSummary$effectSizeSummary which is a table that has each OTU, a summary of the beta (mean, min, max ect.)
effectSummary<-getEffectScore(OTUdat = bestOTUs_noMeta,bootMin=(30*0.9),response="multinomial",taxonomy = taxonomy)
bestOTUs_noMeta_pass<-effectSummary$effectSizeSummary
ggsave(effectSummary$effectSizePlot,file="Figures/Multinomial_ExcludeMeta_EffectSize.tiff")
# otuCheck gives me some diagnostic plots regarding the OTUs that have been selected
# 1. sharedTaxaPlot - this is basically a "venn" diagram, but is more readable. It shows
# whether an individual OTU is shared between (multinomial) disease states
# 2. abundacePlot - is a little dense. If you want to make it better, set maxTaxLevel to something
# else (for example "genus", or "family"). This produces a boxplot of the abundance for each
# of the multinomial controls.
tmp<-otuCheck(bestOTUs = bestOTUs_noMeta_pass,
taxonomy = taxonomy,
maxTaxaLevel = "all",
countMatrix = abundDat,
meta = metadata[,c("sampleID","disease_stat")],
response = "multinomial")
# of note - there is near perfect overlap if I use *all* otus that are found (i.e. effectSize summary bootMin = 0)
# thus, some differences are due to stringency.
vennList<-vennText(A=schubertOTUs,B=unique(as.character(bestOTUs_noMeta_pass$Predictor)))
# .. getting and storing the AUCs
storeAUC_noMeta<-c()
for(comp in 1:nrow(comparisons)){
comp<-comparisons[comp,]
metaSubs <- filter(metadata,disease_stat %in% comp)
print(comp)
#always make sure to select right reference
if("Case" %in% comp){
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "Case"))
}else{
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "DiarrhealControl"))
}
# .... genomic model - biomarkers
biomarker<-log2(abundDat[metaSubs$sampleID,as.character(unique(bestOTUs_noMeta_pass$Predictor))]+1)
biomarkerMeta<-merge(x=metaSubs,y=biomarker,by.x="sampleID",by.y=0)
fla<-as.formula(paste("response ~ ", paste(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))], collapse="+")))
biomarkerFit<-lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(biomarkerFit)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_noMeta<-rbind(storeAUC_noMeta,c(paste(comp,collapse=" - "),"genomic-biomarkers",
round(harrellC_orj,3),
round(harrellC_adj,3)))
# .... combined model
fla<-as.formula(paste("response ~ ", paste(c(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))],
"age","gender"," race2","antibiotics..3mo","antacid",
"Surgery6mos","historyCdiff","ResidenceCdiff", "Healthworker"), collapse="+")))
combinedModel <- lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(combinedModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_noMeta<-rbind(storeAUC_noMeta,c(paste(comp,collapse=" - "),"combined (genomic = biomarkers)",
round(harrellC_orj,3),
round(harrellC_adj,3)))
# .... genomic model - biomarkers
biomarker<-log2(abundDat[metaSubs$sampleID,colnames(abundDat) %in% schubertOTUs]+1)
biomarkerMeta<-merge(x=metaSubs,y=biomarker,by.x="sampleID",by.y=0)
fla<-as.formula(paste("response ~ ", paste(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))], collapse="+")))
biomarkerFit<-lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(biomarkerFit)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_noMeta<-rbind(storeAUC_noMeta,c(paste(comp,collapse=" - "),"genomic-biomarkers(schubert)",
round(harrellC_orj,3),
round(harrellC_adj,3)))
# .... combined model
fla<-as.formula(paste("response ~ ", paste(c(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))],
"age","gender"," race2","antibiotics..3mo","antacid",
"Surgery6mos","historyCdiff","ResidenceCdiff", "Healthworker"), collapse="+")))
combinedModel <- lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(combinedModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_noMeta<-rbind(storeAUC_noMeta,c(paste(comp,collapse=" - "),"combined (genomic = biomarkers (schubert))",
round(harrellC_orj,3),
round(harrellC_adj,3)))
}
#here it is with bootMin = 0 (there is much larger overlap if we further filter for reliable OTUs)
# effectSummary<-getEffectScore(OTUdat = bestOTUs_noMeta,bootMin=0,response="multinomial")
# bestOTUs_noMeta_pass<-effectSummary$effectSizeSummary
#
# vennList<-vennText(A=schubertOTUs,B=unique(as.character(bestOTUs_noMeta_pass$Predictor)))
# ---- Investigate those unique to schubert
# some OTUs were filtered out at the distribution filter step, so
# remove those, from the vennList.
notInAbund<-setdiff(vennList$A_only,colnames(abundDat))
tmpAbund<-melt(abundDat[,setdiff(vennList$A_only,notInAbund)])
colnames(tmpAbund)<-c("sampleID","OTU","Abundance")
tmpAbund <- merge(tmpAbund,metadata[,c("sampleID","disease_stat")],by="sampleID")
#low median abundance?
ggplot(tmpAbund,aes(x=disease_stat,y=log2(Abundance)))+
geom_boxplot(notch=1)+
facet_wrap(~OTU)+
theme_bw()
# ------ investigate those unique to method
#similarity to schubert
# some OTUs were filtered out at the distribution filter step, so
# remove those, from the vennList.
tmpAbund<-melt(abundDat[,vennList$B_only])
colnames(tmpAbund)<-c("sampleID","OTU","Abundance")
tmpAbund <- merge(tmpAbund,metadata[,c("sampleID","disease_stat")],by="sampleID")
# low abundance, too high a reliable on outliers?
ggplot(data=tmpAbund,aes(x=disease_stat,y=log2(Abundance+1)))+
geom_boxplot()+
facet_wrap(~OTU)+
theme_bw()+
theme(axis.text.x = element_text(angle=90,hjust=1))
# ----- B. Using metadata & Multinomial Response-----
# now we can "adjust" the biomarkers for the impacts of the different metadata variables.
# Loosely, this should mean that biomarkers which are highly correlated with specific
# metadata variables are less likely to be selected. So the biomarkers that drop
# out are likely to provide "value added" information. This still needs to be
# checked afterwards to make sure that's true. Also - because diversity was
# found to be significant, I would also like to adjust for that.
#create a subsample of the metadata with variables that I want to "adjust" for.
# I will adjust for all the elements in the base model
metaVars<-c("sampleID",
"age",
"gender",
"race2",
"antibiotics..3mo",
"antacid",
"Surgery6mos",
"historyCdiff",
"ResidenceCdiff",
"Healthworker")
metadata.sub<- metadata[,metaVars]
#now select OTUs again
bestOTUs_Meta<-getBestOTU(metadata=metadata.sub,
response=metadata$disease_stat,
varsToRemove= NULL,
countMatrix=abundDat,
alph=1,
bootNum=30,
cvfold=5,
logOTUData=TRUE,
responseType = "multinomial",
type.measure="class")
effectSummary<-getEffectScore(OTUdat = bestOTUs_Meta,bootMin=(30*0.9),response="multinomial",taxonomy=taxonomy)
bestOTUs_Meta_pass<-effectSummary$effectSizeSummary
ggsave(effectSummary$effectSizePlot,file="Figures/Multinomial_IncludeMeta_EffectSize.tiff")
#adding another columns to indicate whether they are OTUs or metadata variables
bestOTUs_Meta_pass$biomarkerType <- ifelse(grepl("Otu",bestOTUs_Meta_pass$Predictor),"OTU","META")
#now just get at the useful OTUs, see how they do.
bestOTUs_Meta_pass_onlyOTUs<-bestOTUs_Meta_pass[bestOTUs_Meta_pass$biomarkerType == "OTU",] %>% droplevels
#check their usefullness
tmp<-otuCheck(bestOTUs = bestOTUs_Meta_pass_onlyOTUs,
taxonomy = taxonomy,
maxTaxaLevel = "all",
countMatrix = abundDat,
meta = metadata[,c("sampleID","disease_stat")],
response = "multinomial")
#similarity to schubert (simliar results to OTUs found when not including metadata)
vennList<-vennText(A=schubertOTUs,B=as.character(unique(bestOTUs_Meta_pass_onlyOTUs$Predictor)))
#similarity to prior list (very similar, there are some differences however )
vennList<-vennText(A=as.character(unique(bestOTUs_noMeta_pass$Predictor)),B=as.character(unique(bestOTUs_Meta_pass_onlyOTUs$Predictor)))
#add C.difficle Value
metadata$cDiff<-cDiff_OTU
# .. getting and storing the AUCs
storeAUC_Meta<-c()
for(comp in 1:nrow(comparisons)){
comp<-comparisons[comp,]
metaSubs <- filter(metadata,disease_stat %in% comp)
print(comp)
#always make sure to select right reference
if("Case" %in% comp){
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "Case"))
}else{
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "DiarrhealControl"))
}
# .... c diff biomarker
# .... genomic model - biomarkers
biomarker<-log2(abundDat[metaSubs$sampleID,as.character(unique(bestOTUs_Meta_pass_onlyOTUs$Predictor))]+1)
#biomarker<-abundDat[metaSubs$sampleID,colnames(abundDat) %in% schubertOTUs]
biomarkerMeta<-merge(x=metaSubs,y=biomarker,by.x="sampleID",by.y=0)
fla<-as.formula(paste("response ~ ", paste(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))], collapse="+")))
biomarkerFit<-lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(biomarkerFit)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_Meta<-rbind(storeAUC_Meta,c(paste(comp,collapse=" - "),"genomic-biomarkers (meta)",
round(harrellC_orj,3),
round(harrellC_adj,3)))
# .... combined model
fla<-as.formula(paste("response ~ ", paste(c(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))],
"age","gender"," race2","antibiotics..3mo","antacid",
"Surgery6mos","historyCdiff","ResidenceCdiff", "Healthworker"), collapse="+")))
combinedModel <- lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(combinedModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_Meta<-rbind(storeAUC_Meta,c(paste(comp,collapse=" - "),"combined (genomic = biomarkers (meta))",
round(harrellC_orj,3),
round(harrellC_adj,3)))
}
# ----- C. Using binomial response & no -----
# the result that gives the most "overlap" is using not metadata, so I will see what
# perfomring the glment steps with
#I think the binary and continous response should work, but I feel like I've still
#maybe missed something when mucking about with the multinomial response.
# ----- D. Continuous response -----
# revised, so that now the continous response is C. Diff
metaVars<-c("sampleID",
"age",
"gender",
"race2",
"antibiotics..3mo",
"antacid",
"Surgery6mos",
"historyCdiff",
"ResidenceCdiff",
"Healthworker")
metadata.sub<- metadata[,metaVars]
bestOTUs_Meta_continuous<-getBestOTU(metadata=metadata.sub,
response=log2(cDiff_OTU+1),
varsToRemove= NULL,
countMatrix=abundDat,
alph=1,
bootNum=30,
cvfold=5,
logOTUData=TRUE,
responseType = "continuous")
effectSummary<-getEffectScore(OTUdat = bestOTUs_Meta_continuous,bootMin=(30*0.9),response="continuous",taxonomy=taxonomy)
bestOTUs_Meta_continuous_pass<-effectSummary$effectSizeSummary
#adding another columns to indicate whether they are OTUs or metadata variables
bestOTUs_Meta_continuous_pass$biomarkerType <- ifelse(grepl("Otu",bestOTUs_Meta_continuous_pass$Predictor),"OTU","META")
#a quick look at the useful metadata variables
#filter(bestOTUs_Meta_continuous_pass,biomarkerType == "META")
#now just get at the useful OTUs, see how they do.
bestOTUs_Meta_continuous_pass_onlyOTUs<-bestOTUs_Meta_continuous_pass[bestOTUs_Meta_continuous_pass$biomarkerType == "OTU",] %>% droplevels
tmp<-otuCheck(bestOTUs = bestOTUs_Meta_continuous_pass_onlyOTUs,
taxonomy = taxonomy,
maxTaxaLevel = "all",
countMatrix = abundDat,
meta = metadata[,c("sampleID","disease_stat")],
response = "continuous")
vennList<-vennText(A=schubertOTUs,B=as.character(unique(bestOTUs_Meta_continuous_pass_onlyOTUs$Predictor)))
# .. getting and storing the AUCs
storeAUC_Meta_div<-c()
for(comp in 1:nrow(comparisons)){
comp<-comparisons[comp,]
metaSubs <- filter(metadata,disease_stat %in% comp)
print(comp)
#always make sure to select right reference
if("Case" %in% comp){
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "Case"))
}else{
metaSubs<-mutate(metaSubs, response = as.numeric(disease_stat == "DiarrhealControl"))
}
# ..... c difficle AUC
AUCval<-ci.auc(roc(response=metaSubs$response,predictor=metaSubs$inverseSimpson))
storeAUC_Meta_div<-rbind(storeAUC_Meta_div,c(paste(comp,collapse=" - "),"genomic-diversity",
round(AUCval[2],3),
"NA"))
# .... genomic model - biomarkers
biomarker<-log2(abundDat[metaSubs$sampleID,as.character(unique(bestOTUs_Meta_continuous_pass_onlyOTUs$Predictor))]+1)
biomarkerMeta<-merge(x=metaSubs,y=biomarker,by.x="sampleID",by.y=0)
fla<-as.formula(paste("response ~ ", paste(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))], collapse="+")))
biomarkerFit<-lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(biomarkerFit)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_Meta_div<-rbind(storeAUC_Meta_div,c(paste(comp,collapse=" - "),"genomic-biomarkers (cDiff)",
round( harrellC_orj,3),
round(harrellC_adj,3)))
# .... combined model
fla<-as.formula(paste("response ~ ", paste(c(colnames(biomarkerMeta)[grepl("Otu",colnames(biomarkerMeta))],
"age","gender"," race2","antibiotics..3mo","antacid",
"Surgery6mos","historyCdiff","ResidenceCdiff", "Healthworker"), collapse="+")))
combinedModel <- lrm(fla,data=biomarkerMeta,maxit=1000,x=TRUE,y=TRUE)
optimismModel<-validate(combinedModel)
#now adjust that AUC
harrellC_orj<-(optimismModel["Dxy","index.orig"]+1)/2
#harrells c statistics
harrellC_adj<-(optimismModel["Dxy","index.corrected"]+1)/2
storeAUC_Meta_div<-rbind(storeAUC_Meta_div,c(paste(comp,collapse=" - "),"combined (genomic = biomarkers (cDiff))",
round(harrellC_orj,3),
round(harrellC_adj,3)))
}
######################################
# combine all of the AUC tables floating around
tab<-rbind(storeAUC,
storeAUC_div,
storeAUC_noMeta,
storeAUC_Meta,
storeAUC_Meta_div)
write.csv(tab,file="Reports/AUC_for_metadata.csv",quote=F)
|
daf87f955f80dbcf2cc253297165ec1e2599dc13
|
e3c9a095241d3eb7c02544aad3a14fbd5f1a2ba6
|
/cachematrix.R
|
7824a72a38c3384d702cf588d68dff0d3d0be8f2
|
[] |
no_license
|
JotaRX/ProgrammingAssignment2
|
055f769d646f24a3dc51aaccba425b855fbb763f
|
7c95471f057ec17f3761aec3f3911f2d40dd56b8
|
refs/heads/master
| 2022-11-22T12:14:23.425721
| 2020-07-18T18:50:02
| 2020-07-18T18:50:02
| 280,716,893
| 0
| 0
| null | 2020-07-18T18:36:29
| 2020-07-18T18:36:28
| null |
UTF-8
|
R
| false
| false
| 971
|
r
|
cachematrix.R
|
## Assignament 2
## The objective that this functions is create a matrix, save it in the caché and
## then calculate his inverse without use a lot of resources
## Set matrix and clear his inverse
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
setmatrix<-function(y){
x<<-y
i<<-NULL
}
getmatrix <- function() x
setinv <- function(inv) i<<- inv
getinv <- function() i
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinv = setinv,
getinv = getinv)
}
## Verify the inverse previous and if this exist getting it of cache, else then
## calculate his inverse and set it in the cache
cacheSolve <- function(x, ...) {
i<-x$getinv()
if(!is.null(i)){
message("Obtained Matrix from cache")
return(i)
}
i<-x$getmatrix()
inv<-solve(i)
x$setinv(inv)
inv
}
#For example if you want you can delete the # and test with this randomly matrix 3x3
#matriz<-makeCacheMatrix(matrix(rnorm(9),3,3))
#cacheSolve(matriz)
|
fcad1c14e5273d893968eb978657c823200ecee0
|
fde9c70b67e2ea092f0a3966c8b098b08ad0ffcc
|
/man/sortCrit.Rd
|
61845231700eb6dd45f255fdf038b41a77d85900
|
[] |
no_license
|
hazaeljones/geozoning
|
41d215022b34e6944e4ba7395dc0778c2c49ba48
|
c8310ca97a775c4d55807eb3ac3ab1ae73da5334
|
refs/heads/master
| 2021-01-20T12:37:46.187798
| 2018-02-23T09:44:47
| 2018-02-23T09:44:47
| 90,385,766
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,290
|
rd
|
sortCrit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sortCrit.R
\name{sortCrit}
\alias{sortCrit}
\title{sortCrit called by correctionTree}
\usage{
sortCrit(qProb, crit, cost, costL, nz, mdist, listOfZ, map, disp = 0,
SAVE = FALSE)
}
\arguments{
\item{qProb}{probability vector used to generate quantile values}
\item{crit}{list of criteria}
\item{cost}{list of costs}
\item{costL}{list of per label costs}
\item{nz}{list of number of zones}
\item{mdist}{list of distance matrices}
\item{listOfZ}{list of zoning objects}
\item{map}{object returned by function genMap}
\item{disp}{0: no info, 1: plot best corrected zoning}
\item{SAVE}{logical value, if TRUE function returns more elements}
}
\value{
a list with components
\describe{
\item{bestcrit}{best criterion value at last level}
\item{critList}{criterion values at last level}
\item{costList}{cost values at last level}
\item{costLList}{cost per label values at last level}
\item{nzList}{vector of number of zones at last level}
\item{qProb}{vector of probabilities values used for quantiles}
\item{zk}{(SAVE=TRUE) list of zoning objects (such as returned by calNei function), first element corresponds to initial zoning, each other element is a list with each (last if ALL=FALSE) level zoning objects}
\item{mdist}{(SAVE=TRUE) list of initial distance matrix and all (last if ALL=FALSE) level distance matrices}
\item{crit}{(SAVE=TRUE) list of initial criterion and all (last if ALL=FALSE) level criteria }
\item{cost}{(SAVE=TRUE) list of initial cost and all (last if ALL=FALSE) level costs }
\item{costL}{(SAVE=TRUE) list of initial cost per label and all (last if ALL=FALSE) level costs per label}
\item{nz}{(SAVE=TRUE) list of initial number of zones and all (last if ALL=FALSE) level number of zones}
}
}
\description{
sortCrit called by correctionTree
}
\details{
sort last level criteria from list of zonings, return criteria and list of zonings if SAVE=TRUE, otherwise only return last level criteria
}
\examples{
data(mapTest)
qProb=c(0.4,0.7)
criti=correctionTree(qProb,mapTest)
# displays best criterion, corresponding costs and number of zones
geozoning:::sortCrit(qProb,criti$criterion,criti$cost,criti$costL,
criti$nz,criti$mdist,criti$zk,mapTest)
}
\keyword{internal}
|
def65b0d0db71acba01abad4cd51af40f2b30efa
|
57edf42dab2d6ce0d0400daf40573ad4d63cf843
|
/global.R
|
c11acb7cf776b3d0f03496988a37a8a442b52f06
|
[] |
no_license
|
MikeLeeMcLau/Capstone_Info
|
ca42f50a044495d235f8091691f39e496f81ac21
|
266cefa2ab4efb5afe7a349c3a9e109038fdeaf7
|
refs/heads/master
| 2021-01-25T00:57:17.667130
| 2017-06-18T19:33:22
| 2017-06-18T19:33:22
| 94,708,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,602
|
r
|
global.R
|
Bi_Data <- read.csv("Bi_Data.csv")
Tri_Data <- read.csv("Tri_Data.csv")
Quad_Data <- read.csv("Quad_Data.csv")
Pent_Data <- read.csv("Pent_Data.csv")
Sext_Data <- read.csv("Sext_Data.csv")
checkforFiveWords <- function(getCheckString) {
wordCount <- sapply(gregexpr("\\W+", getCheckString), length) + 1
if (wordCount >= 5) {
textcleansplit <- tail(strsplit(getCheckString,split=" ")[[1]],5)
textcleansplit <- paste(textcleansplit,collapse =" ")
}
}
downtoFour <- function(getcheckString4) {
wordCount <- sapply(gregexpr("\\W+", getcheckString4), length) + 1
if (wordCount >= 4) {
textcleansplit <- tail(strsplit(getcheckString4,split=" ")[[1]],4)
textcleansplit <- paste(textcleansplit,collapse =" ")
}
}
downtoThree <- function(getcheckString3) {
wordCount <- sapply(gregexpr("\\W+", getcheckString3), length) + 1
if (wordCount >= 3) {
textcleansplit <- tail(strsplit(getcheckString3,split=" ")[[1]],3)
textcleansplit <- paste(textcleansplit,collapse =" ")
}
}
downtoTwo <- function(getcheckString2) {
wordCount <- sapply(gregexpr("\\W+", getcheckString2), length) + 1
if (wordCount >= 2) {
textcleansplit <- tail(strsplit(getcheckString2,split=" ")[[1]],2)
textcleansplit <- paste(textcleansplit,collapse =" ")
}
}
getTopFiveWords2 <- function(getString2) {
getString2 <- paste('^',getString2,' ',sep = '')
findRecord <- grep(getString2,Tri_Data$term)
if (length(findRecord) >0 ) {
tempDF <- head(Tri_Data[findRecord,],5)
tempDF
SourceFun <- 'Two Word Match'
cbind(tempDF,SourceFun)
}
else {
tempDF <- data.frame(term='No Match', Mike_Count=0, SourceFun='Two Word Check')
}
}
getTopFiveWords3 <- function(getString3) {
getString3 <- paste('^',getString3,' ',sep = '')
findRecord <- grep(getString3,Quad_Data$term)
if (length(findRecord) >0 ) {
tempDF <- head(Quad_Data[findRecord,],5)
tempDF
SourceFun <- 'Three Word Match'
cbind(tempDF,SourceFun)
}
else {
tempDF <- data.frame(term='No Match', Mike_Count=0, SourceFun='Three Word Check')
}
}
getTopFiveWords4 <- function(getString4) {
getString4 <- paste('^',getString4,' ',sep = '')
findRecord <- grep(getString4,Pent_Data$term)
if (length(findRecord) >0 ) {
tempDF <- head(Pent_Data[findRecord,],5)
tempDF
SourceFun <- 'Four Word Match'
cbind(tempDF,SourceFun)
}
else {
tempDF <- data.frame(term='No Match', Mike_Count=0, SourceFun='Four Word Match')
}
}
getTopFiveWords5 <- function(getString5) {
getString5 <- paste('^',getString5,' ',sep = '')
findRecord <- grep(getString5,Sext_Data$term)
if (length(findRecord) >0 ) {
tempDF <- head(Sext_Data[findRecord,],5)
tempDF
SourceFun <- 'Five Word Match'
cbind(tempDF,SourceFun)
}
else {
tempDF <- data.frame(term='No Match', Mike_Count=0, SourceFun='Five Word Match')
}
}
cleanInput <- function(getInput) {
badword1 <- grep('[Ff][Uu][Cc][Kk]',getInput)
badword2 <- grep('[Ss][Hh][Ii][Tt]',getInput)
if (length(badword1)+length(badword2) > 0) {
clean1 <- getInput[-c(badword1,badword2)] }
else {
clean1 <- getInput}
clean1 <- getInput
#clean1 <- iconv(clean1, 'UTF-8', 'ASCII')
clean1 <- iconv(clean1, "latin1", "ASCII", sub="")
clean1 <- gsub("[?.;!¡¿·&,_():;']","", clean1)
clean1 <- tolower(clean1)
clean1 <- gsub(pattern='[^a-zA-Z]',clean1,replacement=' ')
#clean1 <- strsplit(clean1, '\\W+', perl=TRUE)
}
ReviewText <- function(getTextToReview) {
getTextToReviewX <- cleanInput(getTextToReview)
dfFinal <- data.frame(term=character(), Mike_Count=integer(), SourceFun=character())
x <- sapply(gregexpr("\\W+", getTextToReviewX), length) + 1
y <- 0
if (x >= 5) {
txtFive <- checkforFiveWords(getTextToReviewX)
dfFive <- getTopFiveWords5(txtFive)
dfFinal <- dfFive
y <- y + 1
}
if (x > 4) {
txtFour <- downtoFour(getTextToReviewX)
dfFour <- getTopFiveWords4(txtFour)
dfFinal <- rbind(dfFinal,dfFour)
y <- y + 1
}
if (x > 3) {
txtThree <- downtoThree(getTextToReviewX)
dfThree <- getTopFiveWords3(txtThree)
dfFinal <- rbind(dfFinal,dfThree)
y <- y + 1
}
if (x >= 2) {
txtTwo <- downtoTwo(getTextToReviewX)
dfTwo <- getTopFiveWords2(txtTwo)
dfFinal <- rbind(dfFinal,dfTwo)
y <- y + 1
}
if (y == 0) {
dfFinal <- data.frame(term='Please Enter More Than One Word', Mike_Count=0, SourceFun='Please Enter More Than One Word')
}
dfFinal
}
FinalTopFiveWords <- function(dfGetFive) {
dfGetFive$Get_Last_Word <- word(dfGetFive$term,-1)
dfGetFive <- subset(dfGetFive, Get_Last_Word !='Match')
dfFiveWordMatch <- subset(dfGetFive,dfGetFive$SourceFun == 'Five Word Match' & dfGetFive$term != 'No Match')
dfFourWordMatch <- subset(dfGetFive,dfGetFive$SourceFun == 'Four Word Match' & dfGetFive$term != 'No Match')
if(nrow(dfFiveWordMatch) >0 ) {
txtFiveWordMatch <- dfFiveWordMatch$Get_Last_Word
}
if(nrow(dfFourWordMatch) >0 ) {
txtFourWordMatch <- dfFourWordMatch$Get_Last_Word
}
Final_List <- aggregate(Mike_Count ~ Get_Last_Word, dfGetFive, sum)
if(nrow(dfFiveWordMatch) >0 ) {
Final_List <- subset(Final_List,Final_List$Get_Last_Word != txtFiveWordMatch)
}
if(nrow(dfFourWordMatch) >0 ) {
Final_List <- subset(Final_List,Final_List$Get_Last_Word != txtFourWordMatch)
}
Final_Term_Matrix <- Final_List[order(Final_List$Mike_Count,decreasing = TRUE),]
Final_Term_Matrix <- head(Final_Term_Matrix)
Final_Term_Matrix$Get_Last_Word
}
FinalTopFiveWords2 <- function(dfGetFive) {
dfGetFive$Get_Last_Word <- word(dfGetFive$term,-1)
dfGetFive <- subset(dfGetFive, Get_Last_Word !='Match')
txtFinal_Words <- dfGetFive$Get_Last_Word
txtFinal <- unique(txtFinal_Words)
txtFinal <- head(txtFinal,5)
#txtFinal <- paste(txtFinal,"''")
}
graphData <- function(getGraphList) {
getGraphList$Get_Last_Word <- word(getGraphList$term,-1)
getGraphList <- subset(getGraphList, Get_Last_Word !='Match')
Final_List <- aggregate(Mike_Count ~ Get_Last_Word, getGraphList, sum)
Final_Term_Matrix <- Final_List[order(Final_List$Mike_Count,decreasing = TRUE),]
Final_Term_Matrix <- head(Final_Term_Matrix)
}
|
c06130bf145f74b38abacac16fdf8b95553870a9
|
1443e812411278d1f776f8f7d1196add8e2dcc31
|
/man/seurat_small.Rd
|
5b2d6bdb91c44b0d0550d01ba770d686bc9f725d
|
[
"MIT"
] |
permissive
|
WeiSong-bio/roryk-bcbioSinglecell
|
e96f5ab1cb99cf1c59efd728a394aaea104d82b2
|
2b090f2300799d17fafe086bd03a943d612c809f
|
refs/heads/master
| 2020-06-15T23:38:23.802177
| 2018-07-03T21:01:07
| 2018-07-03T21:01:07
| 195,422,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
seurat_small.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{seurat_small}
\alias{seurat_small}
\title{Seurat Example}
\format{An object of class \code{seurat} of length 1.}
\usage{
seurat_small
}
\description{
Seurat Example
}
\examples{
show(seurat_small)
}
\seealso{
Other Minimal Example Data: \code{\link{all_markers_small}},
\code{\link{cellranger_small}},
\code{\link{indrops_small}},
\code{\link{known_markers_small}}
}
\author{
Michael Steinbaugh
}
\keyword{datasets}
|
4489d04f889e5912dc0bb1a75c1630e89f7b4593
|
f928b334d4fdde7fceeb8e21773f0ad85af9b2f3
|
/preliminaries.R
|
fc9c75b4e603d3c74554f9a9273e7079b5bd0552
|
[
"Apache-2.0"
] |
permissive
|
KirstyLHassall/ACE
|
596eeaf09bd8ac0a131a6c5bfb4fb07c2519c0a6
|
5c169c833b62d4bf7bb9f676784559609f30cf36
|
refs/heads/master
| 2020-04-22T11:45:32.348515
| 2019-02-12T21:51:34
| 2019-02-12T21:51:34
| 170,351,902
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,990
|
r
|
preliminaries.R
|
# Create all auxilliary files
# Creates initial Network diagram and saves the coordinates
# Creates initial CPT files
library(igraph)
library(shape)
library(RColorBrewer)
appName <- "ACE"
allcontexts <- read.csv(paste(appName, "//nodeTables//NetworkNames.csv", sep=""))
contexts <- as.character(allcontexts$Networks)
contexts <- gsub(" ", "", contexts)
contexts <- gsub(":", "", contexts)
users <- read.csv(paste(appName, "//nodeTables//Usernames.csv", sep=""))
elicitees <- users$Users
elicitees <- gsub(" ", "", elicitees)
elicitees <- gsub(":", "", elicitees)
elicitees <- gsub("-", "", elicitees)
# choose 9 distinct colours
pal <- brewer.pal(9, "Set1")
for (k in contexts){
print(k)
set.seed(97)
# draw network
edges <- read.csv(paste(appName, "//nodeTables//Edge_", k, ".csv", sep=""))
nodes <- read.csv(paste(appName, "//nodeTables//Node_", k, ".csv", sep=""), colClasses="character")
ue <- unique(c(as.character(edges[,1]), as.character(edges[,2])))
un <- as.character(nodes[,1])
test <- cbind(sort(ue), sort(un))
sum(test[,1] != test[,2])
print(test)
nNodes <- length(nodes$Node)
net <- graph_from_data_frame(d=edges, directed=TRUE)
# order nodes in nodesFile by the Network ordering
index <- NULL
for (j in 1:nNodes){
index <- c(index, which(nodes$Node == V(net)$name[j]))
}
nodes <- data.frame(nodeOrder = c(1:nNodes), nodes)
nodes <- nodes[index, ]
# resave Node files
write.csv(nodes, paste(appName, "//nodeTables//Node_netOrder_", k, ".csv", sep=""), row.names=FALSE)
nodecolours <- pal[as.numeric(as.factor(nodes$Type))]
nodeNames <- V(net)$name
nodeNames <- gsub(" ", "\n", nodeNames)
# V(net())$name <- nodeNames
net_layout <- layout_with_fr(net)
net_layout <- norm_coords(net_layout, ymin=-1, ymax=1, xmin=-2, xmax=2)
pdf(width=15, height=10, file=paste(appName, "//nodeTables//fullNetwork_", k, ".pdf", sep=""))
par(mar=c(1,1,1,1))
plot(net,
vertex.color=nodecolours, vertex.size=20, vertex.frame.color=NA, vertex.label.family="sans", vertex.label.font=2, vertex.label=nodeNames,
edge.arrow.size=.8, rescale=FALSE,
layout=net_layout*.7)
dev.off()
write.csv(data.frame(nodes=V(net)$name, net_layout), file=paste(appName, "//nodeTables//NetworkCoords_", k, ".csv", sep=""), row.names=FALSE)
for (i in 1:nNodes){
selectedNode <- as.character(nodes$Node[i])
# How many Parents
parents <- names(adjacent_vertices(net, selectedNode, mode = c("in"))[[1]])
# How many Child States
childStates <- c(t(nodes[i,-c(1,2,3)]))
childStates <- childStates[!is.na(childStates)]
childStates <- childStates[childStates != ""]
childStates <- childStates[childStates != " "]
nChildStates <- length(childStates)
if(length(parents) < 1){
# defaultFreq <- rep(round(100/nChildStates), nChildStates)
# defaultFreq[nChildStates] <- 100 - sum(defaultFreq[nChildStates - 1])
defaultFreq <- rep(0, nChildStates)
tbl <- data.frame(cbind(c("Frequency"), matrix(defaultFreq, 1, nChildStates)))
for (j in 2:(nChildStates + 1)){
tbl[,j] <- as.numeric(as.vector(tbl[,j]))
}
names(tbl) <- c(" ", as.character(childStates))
tbl$Total <- rowSums(tbl[,(2:(nChildStates + 1))])
} else {
# to ensure columns of the CPT are in the right order, get the parent Nodes one by one
nParents <- length(parents)
colID <- NULL
for (parent in 1:nParents){
colID <- c(colID, which(nodes$Node == parents[parent]))
}
parentNodes <- nodes[colID , -c(1:3)]
npStates <- sapply(1:nParents, function(x) sum(parentNodes[x,] != "", na.rm=TRUE))
pStates <- lapply(1:nParents, function(x){
out <- parentNodes[x, ]
out <- out[!is.na(out)]
out <- out[out != ""]
})
nrows <- prod(npStates)
ncols <- nChildStates
if (nParents > 1){
tempInd <- 1:nParents
tbl <- data.frame(cbind(expand.grid(rev(pStates))[,rev(tempInd)], matrix(0.00, nrows, ncols)))
} else{
tbl <- data.frame(cbind(expand.grid(pStates), matrix(0.00, nrows, ncols)))
}
names(tbl) <- c(parents, as.character(childStates))
tbl$Total <- rowSums(tbl[,(1:nChildStates) + nParents])
}
nrows <- dim(tbl)[1]
tbl$Expertise <- factor(rep(NA, nrows), levels=c("None", "Some", "Expert"))
tbl$Confidence <- factor(rep(NA, nrows), levels=c("Low", "Medium", "High"))
for (e in elicitees){
# childStates <- c(t(nodes()[which(selectedNode == nodes()$Node),-c(1,2,3)]))
# childStates <- childStates[!is.na(childStates)]
# childStates <- childStates[childStates != ""]
# childStates <- childStates[childStates != " "]
childHeaders <- childStates
childHeaders <- sub("<", ".lt.", childHeaders, fixed=TRUE)
childHeaders <- sub(">", ".gt.", childHeaders, fixed=TRUE)
childHeaders <- sub("-", "..", childHeaders, fixed=TRUE)
childHeaders <- sub(" ", ".", childHeaders, fixed=TRUE)
if (length(parents) == 0){
allHeaders <- c("X.",childHeaders,"Total", "Expertise", "Confidence")
} else {
allHeaders <- c(parents, childHeaders,"Total", "Expertise", "Confidence")
}
names(tbl) <- allHeaders
selectedNode <- gsub(" ", "-", selectedNode)
parents <- gsub(" ", "-", parents)
filename = paste(appName, "//initialCPTs//", k, "_", selectedNode, "_", paste(parents, collapse="."), "_", e, ".csv", sep="")
write.csv(tbl, file=filename, row.names=FALSE)
}
}
}
|
40662b03a131028d4a8ee1cb956184093caa6333
|
41cbddf0dca2eb8d7d766c73c824f7ea0e9d643a
|
/man/find_colnames.Rd
|
904c7c68d6ddb07e04fe3b36f190cec122234fb3
|
[
"MIT"
] |
permissive
|
jfontestad/dmtools
|
d3db3380e729e1bc6bf1f7e0b0822b7191ee61fb
|
5e8ad2305600daa4bdd5b393391875ffc2e20678
|
refs/heads/master
| 2023-02-10T05:27:38.535802
| 2020-12-19T18:57:23
| 2020-12-19T18:57:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 421
|
rd
|
find_colnames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abstract.R
\name{find_colnames}
\alias{find_colnames}
\title{Find column names}
\usage{
find_colnames(obj, dataset, row_file)
}
\arguments{
\item{obj}{An object for check.}
\item{dataset}{A dataset, a type is a data frame.}
\item{row_file}{A row of the file.}
}
\value{
A data frame. Result of run_tests.
}
\description{
Find column names
}
|
7455d06300f492c2ca443a00d86d0f5e0813e2a9
|
ba628e6c0bbdfba914a2c6c693ec85d2f5aa027a
|
/Getting and Cleaning Data/assignment1.r
|
1d0f56b5d42d4053a49b38d65807d2ed47a9012b
|
[] |
no_license
|
jlpeng75/Coursera
|
b7abe959bcf327b5aeb463348b3a916f9f25500f
|
adf84ee26c48085af0fd32d798bad6d3b1317f45
|
refs/heads/main
| 2023-07-18T04:15:06.784508
| 2021-09-01T17:58:03
| 2021-09-01T17:58:03
| 402,151,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
assignment1.r
|
setwd("C:/Users/jpeng11/coursera")
course <- "Getting and Cleaning Data"
if(!file.exists(course)) {
dir.create(course)
}
setwd(course)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileUrl, destfile = "IdahoHousing.csv")
dateDownloaded <- date()
dateDownloaded
dat <- read.csv("IdahoHousing.csv",header = T, stringsAsFactors = F)
names(dat)
head(dat$VAL); class(dat$VAL)
sum(dat$VAL==24, na.rm = T)
library(data.table)
library(xlsx)
sum(dat$VAL == 24, na.rm = T)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(fileUrl, destfile = "ngap2017.xlsx", mode = "wb" )
dat <- read.xlsx("ngap2017.xlsx", sheetIndex = 1, rowIndex=17:23, colIndex = 7:15)
sum(dat$Zip*dat$Ext,na.rm=T)
# Question 4
library(XML)
library(RCurl)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
xData <- getURL(fileUrl)
doc <- xmlTreeParse(xData, useInternalNodes =T)
rootNode <- xmlRoot(doc)
xmlName(rootNode)
names(rootNode)
xmlSApply(rootNode, xmlValue)
zip <- xpathSApply(rootNode, "//zipcode", xmlValue)
zip <- xpathSApply(doc, "//zipcode", xmlValue)
# question 5
library(data.table)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
download.file(fileUrl, destfile = "IdahoHousing.csv")
DT <- fread("IdahoHousing.csv")
head(DT); class(DT)
mean(DT$pwgtp15,by=DT$SEX)
sapply(split(DT$pwgtp15,DT$SEX),mean)
system.time(mean(DT[DT$SEX==1,]$pwgtp15)); system.time(mean(DT[DT$SEX==2,]$pwgtp15))
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]
DT[,mean(pwgtp15),by=SEX]
|
d309ae3a8234c79e0ab91c7faf9c58fe4fed5522
|
b24b769f98bd9b5dd8a12b4cbe62f472e5c53318
|
/R/match_fluency.R
|
ffb9347fd36706907b0b5f18d833c0bb16931e0b
|
[] |
no_license
|
dwulff/memnetr
|
b77f37c163d7e2abe5bcc961432d25ed2fb673fd
|
babf658870f8c0d5344253fbba049ae9d2950661
|
refs/heads/master
| 2020-04-01T03:10:24.616086
| 2019-08-01T14:29:08
| 2019-08-01T14:29:08
| 152,811,963
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
r
|
match_fluency.R
|
#' Match group fluency data
#'
#' Prune fluency data of two groups so that the groups'
#' distributions of number of productions match.
#'
#' @param data a data frame containing (at least) fluency productions,
#' subject id, and a grouping variable.
#' @param labels a character vector containing the labels of variables
#' containing the fluency productions, subject id, and grouping.
#'
#' @return
#' Function returns two lists with matching numbers of fluency
#' productions.
#'
#' @export
match_fluency = function(data, labels, type = 'match', tail = TRUE){
# collect first row of each subject
#tmp_data = ddply(data, labels[-1], function(x) x[1,])
tmp_data = as.data.frame(data %>% group_by_(labels[2],labels[3]) %>% filter((1:n()) == 1) %>% ungroup())
# split groups and retrieve ids
groups = split(tmp_data, tmp_data[,labels[3]])
ids_g1 = groups[[1]][,labels[2]]
ids_g2 = groups[[2]][,labels[2]]
tmp_g1 = subset(data, data[[labels[2]]] %in% ids_g1)
tmp_g2 = subset(data, data[[labels[2]]] %in% ids_g2)
# get responses
resp_g1 = split(tmp_g1[[labels[1]]],tmp_g1[[labels[2]]])
resp_g2 = split(tmp_g2[[labels[1]]],tmp_g2[[labels[2]]])
# get lengths
len_g1 = sapply(resp_g1,length)
len_g2 = sapply(resp_g2,length)
# order
resp_g1 = resp_g1[order(len_g1)]
resp_g2 = resp_g2[order(len_g2)]
len_g1 = len_g1[order(len_g1)]
len_g2 = len_g2[order(len_g2)]
# create new lists
resp_g1_new = list()
resp_g2_new = list()
for(i in 1:length(len_g1)){
if(type == 'match'){
id = which.min(abs(len_g1[i] - len_g2))
} else {
id = 1
}
min_len = min(len_g1[i],len_g2[id])
if(tail == TRUE){
resp_g1_new[[i]] = resp_g1[[i]][ 1:min_len]
resp_g2_new[[i]] = resp_g2[[id]][1:min_len]
} else {
resp_g1_new[[i]] = resp_g1[[i]][ (1+len_g1[i] -min_len):len_g1[i]]
resp_g2_new[[i]] = resp_g2[[id]][(1+len_g2[id]-min_len):len_g2[id]]
}
resp_g2 = resp_g2[-id]
len_g2 = len_g2[-id]
}
out = list(resp_g1_new, resp_g2_new)
names(out) = names(groups)
return(out)
}
#
# a = match_fluency(d, labels, tail = T)
#
# lengths(a[[1]])
# lengths(a[[2]])
#
#lengths(a[[1]]) - lengths(resp_g1)
#lengths(a[[2]]) - lengths(resp_g2)
#
|
6ff7ad639f1201b77843f6b8e746a5e6d1c72dfc
|
869f3b10cafa55e1667186a404b400110fc44ac3
|
/nhanes_data_analysis.R
|
ba7a349797f3955369e453c2b5a72f8af31c8a5d
|
[] |
no_license
|
Allisterh/BRSE
|
2c0969cef9edd3f189d6ebd7c9f4f04f2bfead7c
|
c63c2d756dcbff13f425ee9a469bfaa147fa2600
|
refs/heads/main
| 2023-04-17T04:28:21.830643
| 2021-05-07T05:14:11
| 2021-05-07T05:14:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,061
|
r
|
nhanes_data_analysis.R
|
library("sandwich")
library(R2jags)
library(dplyr)
library(xtable)
library(ggplot2)
library(ggpubr)
set.seed(4)
dat <- readRDS("nhanes_subset.rds")
n <- nrow(dat)
x <- dat[, c("MALE", "RIDAGEYR")]
y <- dat[, "BPXSY"]
## exploratory plot
plot(BPXSY ~ RIDAGEYR, data = dat, col = MALE + 1)
dat$gender <- ifelse(dat$MALE, "Male", "Female")
scatter_plot <- ggplot(dat, aes(x = RIDAGEYR, y = BPXSY, color = gender)) +
geom_point() +
stat_smooth() +
xlab("Age (years)") + ylab("Systolic blood pressure (mm Hg)") +
theme_pubr() +
scale_color_manual(values = c(
"#1749FF", "#D92321", "#0AB7C9",
"#FF6F1B", "#810094", "#378252",
"#FF5EBF", "#3700A5", "#8F8F8F",
"#787873"
)) +
theme(
strip.text = element_text(hjust = 0.5, size = 18),
plot.title = element_text(hjust = 0.5, size = 18),
panel.border = element_rect(fill = NA),
panel.grid.minor.y = element_line(),
panel.grid.major.y = element_line(),
legend.position = "bottom",
legend.text = element_text(size = 18),
legend.title = element_blank(),
axis.title = element_text(size = 18),
axis.text = element_text(size = 18)
)
ggsave("nhanes_scatter.png", plot = scatter_plot, height = 5.3, width = 5)
## ordinary least square
lmmod <- lm(BPXSY ~ MALE + RIDAGEYR, data = dat)
## model based standard error
naive_se <- summary(lmmod)$coefficients[ ,'Std. Error']
## Huber-White robust standard error
hw_se <- sqrt(diag(vcovHC(lmmod, "HC0")))
## specify the Bayesian model
jags_model <- textConnection("model{
for (i in 1:n) {
y[i] ~ dnorm(mu[i], invsigma2)
mu[i] = x[i, ] %*% beta[]
}
for (j in 1:3) {
beta[j] ~ dnorm(0, 0.001)
}
invsigma2 ~ dgamma(0.01, 0.01)
}
")
# design matrix
X <- as.matrix(cbind(1, x))
lm_model <- jags.model(file = jags_model,
data = list(y = y, x = X, n = n),
n.chains = 3, n.adapt = 2000)
beta_samples <- as.matrix(coda.samples(lm_model, c('beta', 'invsigma2'), n.iter = 20000)[[1]])
# compute the bayes estimates (posterior mean) and posterior variance
# the first 8,000 interations are "burn-in"
post_mean <- colMeans(beta_samples[8001:20000, ])[1:3]
pos_var_beta <- cov(beta_samples[8001:20000, ])[1:3, 1:3]
# compute the Bayesian robust standard error
Omega <- array(dim = c(3, 3, 12000))
for (i in 1:dim(Omega)[3]) {
Omega[, , i] <- t(X) %*% diag(as.numeric((y - X %*% beta_samples[8000 + i, 1:3]) ^ 2)) %*% X %*%
solve(t(X) %*% X) * beta_samples[8000 + i, 4]
}
Omega <- apply(Omega, c(1, 2), mean)
## the "bayesian" version of sandwich covariance matrix
post_se <- sqrt(diag(pos_var_beta))
bayes_hw_se <- sqrt(diag(pos_var_beta %*% Omega))
# tabulate the results
result_tab <- data.frame(lm = coef(lmmod),
naive_se = naive_se,
hw_se = hw_se,
post_mean = post_mean,
post_se = post_se,
bayes_hw_se = bayes_hw_se)
result_xtab <- xtable(result_tab, digits = 3)
print(result_xtab, file = "nhanes_result.txt")
|
c492dd0784c85f69817155fc6a505e1c4304631b
|
d7caf224ef89ac88d9f1df99b1270db81d4065d8
|
/ui.R
|
e2e2992e276a0d8753ac0d65536f5ef52ac1bed2
|
[] |
no_license
|
emilopezcano/probfreq
|
c92086d1603f7a250a57523ee7d205fade560a90
|
5da55ca5adfd0596f35f817cb99b346fc14e608d
|
refs/heads/main
| 2023-08-27T07:10:10.850844
| 2021-10-24T10:40:10
| 2021-10-24T10:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 742
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Relative frequency approach to Probability"),
# Input text boxes
sidebarLayout(
sidebarPanel(
textInput("elements", "Set elements (separated by commas)", value = "man,woman"),
textInput("item", "Element to compute probability (one of the above)","woman"),
numericInput("size", label = "Sample size from one to", 100),
numericInput("seed", label = "Seed", 1)
),
# Show a plot of the simulated samplings
mainPanel(
plotOutput("simPlot")
)
)
))
|
0da5174e4902e869f5d8096232f7300fe852a3c4
|
172288efc3ea342e191e503f372a6b5b8b5e7465
|
/man/extractExperimentInformation.Rd
|
d9fd60ebdd06ab93a2565a2e1e3ac19bcd243413
|
[] |
no_license
|
guypwhunt/r_shiny_geo2r_visulisation_package
|
062d14e0fd9500bd3133a45828f56f9db498500c
|
afa27c0a97f8ab9488005160981d61c0bfb76128
|
refs/heads/main
| 2023-04-09T17:03:27.130019
| 2021-04-07T14:58:47
| 2021-04-07T14:58:47
| 355,532,616
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 641
|
rd
|
extractExperimentInformation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoIntegrationFunctions.R
\name{extractExperimentInformation}
\alias{extractExperimentInformation}
\title{A GEO Function to Convert the Experiment Information Object into HTML}
\usage{
extractExperimentInformation(experimentData)
}
\arguments{
\item{experimentData}{The experiment object obtained from the getExperimentInformation() function}
}
\description{
This function allows you to convert experiment information into HTML
}
\examples{
extractedExperimentInformation <- extractExperimentInformation(experimentInformation)
}
\author{
Guy Hunt
}
\keyword{GEO}
|
81568663156740ba73a38cd56e4f6e2c0775a2c4
|
8879118eb01708b3c9d914406809fe6b828d5b1c
|
/plot4.R
|
56a2373de95fcb75f89a5bd872625e599438f676
|
[] |
no_license
|
senaus/ExData_Plotting1
|
b6ef237d0e101028b9bcbd6da595390480fb8ed6
|
9eac5cf83045fe654e79ddfad0591a30bf5f8ebf
|
refs/heads/master
| 2020-04-07T13:15:08.709293
| 2018-11-20T15:14:49
| 2018-11-20T15:14:49
| 158,399,405
| 0
| 0
| null | 2018-11-20T14:08:38
| 2018-11-20T14:08:38
| null |
UTF-8
|
R
| false
| false
| 1,360
|
r
|
plot4.R
|
##
## Plot 4
##
# Downloading data
if (!file.exists("HWdata.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "HWdata.zip")
}
# Unzipping data
if (!file.exists("household_power_consumption.txt")) {
unzip("HWdata.zip")
}
# Reading data
header <- read.table("household_power_consumption.txt", sep = ";", na.strings = "?", nrows = 1, stringsAsFactors = FALSE)
elec <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 2880, skip = 66636, col.names = header)
elec$DateTime <- strptime(paste(elec$Date,elec$Time), format = "%d/%m/%Y%H:%M:%S")
# Construction of plot
png("plot4.png")
par(mfcol = c(2,2), lwd = 0.5)
# top left
plot(elec$DateTime, elec$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
# bottom left
plot(elec$DateTime, elec$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(elec$DateTime, elec$Sub_metering_2, col = 2)
lines(elec$DateTime, elec$Sub_metering_3, col = 4)
legend("topright", legend = names(elec)[7:9], lty = 1, col = c(1, 2, 4))
# top right
plot(elec$DateTime, elec$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
# bottom right
plot(elec$DateTime, elec$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
76762f7cc608200ad37c840b2f6660a02bc63c84
|
e641a149924ecc8cc6bc55102ea93aa3046796a9
|
/R/pair.R
|
adc0b722315f05bd71c422883991ff80539aced9
|
[] |
no_license
|
cran/cooccur
|
a866dba2efb77771da40faa7162a4edd1e358441
|
c900eb2120fd0644c4f3734830d8a3969b8c6907
|
refs/heads/master
| 2020-12-24T13:16:04.060161
| 2016-02-09T20:53:56
| 2016-02-09T20:53:56
| 17,695,239
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
pair.R
|
pair <-
function(mod,spp,all=FALSE){
ptab <- mod$results
if (all==T){
alpha <- 1
}else{
alpha <- 0.05
}
if (is.numeric(spp)){
p1 <- ptab[ptab$sp1 == spp & (ptab$p_gt <= alpha | ptab$p_lt <= alpha),c("sp2","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")]
p2 <- ptab[ptab$sp2 == spp & (ptab$p_gt <= alpha | ptab$p_lt <= alpha),c("sp1","sp1_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt"),]
colnames(p1) <- c("sp2","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")
colnames(p2) <- c("sp2","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")
cat("Species:\n")
print(spp)
cat(paste("with", nrow(rbind(p1,p2)) ,"associations\n\n"))
print(rbind(p1,p2))
}
if (is.character(spp)){
p1 <- ptab[ptab$sp1_name == spp & (ptab$p_gt <= alpha | ptab$p_lt <= alpha),c("sp2_name","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")]
p2 <- ptab[ptab$sp2_name == spp & (ptab$p_gt <= alpha | ptab$p_lt <= alpha),c("sp1_name","sp1_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt"),]
colnames(p1) <- c("sp2","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")
colnames(p2) <- c("sp2","sp2_inc","obs_cooccur","prob_cooccur","exp_cooccur","p_lt","p_gt")
cat("Species:\n")
print(spp)
cat(paste("with", nrow(rbind(p1,p2)) ,"associations\n\n"))
print(rbind(p1,p2))
}
}
|
765460d0d02efaf12a8880e987722e8d4949e2e4
|
a3ba5a0582f119daad1e832147b295387fc5bcfb
|
/4_PCA/47.R
|
f416ce77eff6b91106a7c9357688b6d3f89162f8
|
[] |
no_license
|
myoshimu/MV
|
d57c32e4838fec482e5e236cd755adda83272475
|
749d2c4422d4d4f0ac03f83f08d3b8ae97f9f8bf
|
refs/heads/master
| 2022-02-19T03:55:24.651461
| 2019-09-01T14:53:51
| 2019-09-01T14:53:51
| 101,030,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
47.R
|
#prcompによる主成分分析
d<-read.csv("Ramen.csv",fileEncoding = "cp932")
d
#行名の書き換え(biplotで見やすいように)
rownames(d)<-d$店名
d
#prcomp実行
pr<-prcomp(d[,-1],scale=TRUE)
pr
#主成分得点
pr$x
#主成分負荷量
t(t(pr$rotation)*pr$sdev)
#累積寄与率
cumsum(pr$sdev^2)/3
#バイプロット
par(family="HiraginoSans-W3")
biplot(pr)
|
0e13870d84df63cf3396ec63161561ff0c391288
|
73ce0fdac6b3bca74a5a74eac0d25de24011033e
|
/tests/testthat/test-lforce.r
|
f52431dd2e30f33094dc3420dc6acc984436743e
|
[
"MIT"
] |
permissive
|
TobCap/lazystreamr
|
4011082f2ed4bc9229a9dd4e356ab905b0c7d3ac
|
ff77c65a7dc765f55e1577074fe318b74f1c1016
|
refs/heads/master
| 2021-07-02T12:29:43.454616
| 2017-02-04T08:39:21
| 2017-02-04T08:39:21
| 34,054,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
r
|
test-lforce.r
|
context("test for lforce")
test_that("test", {
x1 <- 1L %:% (2L %:% lempty)
x2 <- (1 %..% 2) %:% ((3 %..% 4) %:% lempty)
x3 <- llist(llist(1L, 2L), llist(3L, 4L))
x4 <- 1L %:% 2L
expect_identical(lforce(x1), list(1L, 2L))
expect_identical(lforce(x2), list(list(1L, 2L), list(3L, 4L)))
expect_identical(lforce(x2), lforce(x3))
expect_identical(lforce(1L %:% (2L %:% lempty)), lforce(1 %..% 2))
expect_identical(lforce(llist(1L, 2L, 3L, 4L)), lforce(1 %..% 4))
expect_identical(lforce(llist(1L, 2L) %++% llist(3L, 4L)), lforce(1 %..% 4))
})
|
6719e4f4f21dacba2a1debe9c5d0769434967373
|
62995111781a92641244bd84fb8c5348a9a7b6c9
|
/R/A_br_war.R
|
0ac1d8ec55a38f0f963df1df7b4c5641166642ea
|
[] |
no_license
|
MicroWeaR/MicroWeaR
|
172618e8a23d4fc270f3f9487e8365fd652e8875
|
caf512e44360d85b7e8a7b8589da7fda3e68c13c
|
refs/heads/master
| 2023-06-23T16:30:20.006060
| 2023-06-09T17:18:54
| 2023-06-09T17:18:54
| 119,274,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
A_br_war.R
|
#' @title example dataset
#' @description Working area of the picture of Anoiapithecus brevirostris.
#' @name A_br_war
#' @docType data
#' @author Antonio Profico, Flavia Strani, Pasquale Raia, Daniel DeMiguel
#' @keywords MicroWeaR
#' @usage data(A_br_war)
NULL
|
c1e79ac6779dd319459608533e66a99a7a2d54be
|
442e79fd0d33f74087de7c538b95dba3ac41f9dc
|
/tests/testthat/test_pcl_range.R
|
90dab38590f8f0ab244f1fd335165a4b22e9dfe1
|
[
"CC-BY-4.0"
] |
permissive
|
atkinsjeff/pcl
|
2ef7f2c9ee6ce813b682962c9d522fe85eab57bf
|
d16e7d42b2f58c50b7083a0966c2905972496f15
|
refs/heads/master
| 2023-04-11T20:17:04.679401
| 2022-04-12T14:37:24
| 2022-04-12T14:37:24
| 260,939,433
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
test_pcl_range.R
|
context("pcl")
test_that("pcl", {
dat <- pcl
# checks for outliers
expect_true(all(dat$can.max.ht < 60))
expect_true(all(dat$rugosity < 100))
expect_true(all(dat$vai.max <= 8))
})
|
8603cb85cacca6cfef66a4394f49e403224aa46c
|
ba6b15d209cd71dfdea4cb1d26f278c794e26323
|
/QMJ_VAL_regressions.R
|
355e5ec32f03f359bc221331a887444faabc90da
|
[] |
no_license
|
Patrick-J-Close/Finance_tools
|
f5879f4100fe92bee9193e8a2a79e97c9ccd7c8a
|
e0289a50ae6b771718a9a6e09fc01690ce6e1fc8
|
refs/heads/master
| 2016-08-12T15:10:14.879102
| 2016-02-28T13:38:15
| 2016-02-28T13:38:15
| 52,722,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,861
|
r
|
QMJ_VAL_regressions.R
|
## loop through all files in directory
# http://www.r-bloggers.com/looping-through-files/
path = "S:/Neptune Investment Team/Analyst_Patrick/Dev/Projects/QMJ/OutputFilesAVG"
setwd(path)
out.file <- ""
QUAL_file.names <- dir(path = path, pattern = "QUAL.csv")
VAL_file.names <- dir(path = path, ppattern = "VAL.csv")
RAY <- data.frame(read.csv("RAY_Index.csv"))
RAY$Date <- as.factor(RAY$Date)
# cross sectional regresions
res <- data.frame()
val_ratios <- c("PE_RATIO","CURR_EV_TO_T12M_EBITDA_C","PX_TO_BOOK_RATIO")
for(i in 1:length(QUAL_file.names)){
fileq <- read.csv(QUAL_file.names[i], stringsAsFactors = FALSE)
filev <- read.csv(VAL_file.names[i], stringsAsFactors = FALSE)
# exclude values outside of 5 std devs
fileq <- fileq[which(fileq$Z_QUAL < 5 & fileq$Z_QUAL > -5), ]
# merge quality and valuation data sets
fileq <- merge(fileq, filev, by = "Ticker")
fileq <- fileq[!is.na(fileq$PE_RATIO), ]
fileq <- fileq[!is.na(fileq$CURR_EV_TO_T12M_EBITDA_C), ]
fileq <- fileq[!is.na(fileq$PX_TO_BOOK_RATIO), ]
fileq <- fileq[!is.na(fileq$Z_QUAL), ]
# regression
reg_vector <- data.frame(substr(QUAL_file.names[i],1,8))
for(j in 1:length(val_ratios)){
reg_formula <- paste(val_ratios[j]," ~ Z_QUAL", sep = "")
reg <- lm(as.formula(reg_formula), data = fileq)
# bind results to result array
reg_vector <- cbind(reg_vector, data.frame(summary(reg)$coef[1,1], summary(reg)$coef[2,1],
summary(reg)$r.squared))
colnames(reg_vector)[(j*3-1):((j*3-1)+2)] <- c(paste(val_ratios[j],"$Intercept", sep = ""),
paste(val_ratios[j],"$coef(Z_QUAL)", sep = ""),
paste(val_ratios[j],"$R-squared",sep=""))
}
res <- rbind(res, reg_vector)
}
res <- cbind(res, reg_array)
colnames(res)[1] <- "DATE"
|
9dc034e63b2f267d5067191ec8f3fbb95aa85397
|
d33d903d5cce614e41d8fd05ea5a722996523769
|
/R-scripts/Density.R
|
bd6f4f90adcebbbedd345942d229dd6a014f8cb6
|
[
"Apache-2.0"
] |
permissive
|
DennisRippinger/spade
|
10bea53608a497cfd6efdd699699aa1cee34f136
|
59023fd9e863518e23e71288bd7a23da865a85f6
|
refs/heads/master
| 2023-01-19T21:02:20.639294
| 2023-01-16T08:32:21
| 2023-01-16T08:32:21
| 17,828,071
| 0
| 3
| null | 2016-03-09T19:26:31
| 2014-03-17T13:03:36
|
Java
|
UTF-8
|
R
| false
| false
| 386
|
r
|
Density.R
|
library(RMySQL)
con = dbConnect(dbDriver("MySQL"), user="root", password="root", dbname="spade", host="localhost")
density = dbGetQuery(con,"SELECT densityFunction FROM `Spade`.stylometry;")
df <- data.frame(density = as.numeric(density$densityFunction))
df$density = round(df$density, digits = 1)
table <- xtabs(df)
plot(table, type="o")
dbDisconnect(con)
mean(df$num)
median(df$num)
|
fcc69ca0e3c3e53708f41aba7c6f3a8cb5144be7
|
737c608948fb450f11786cc758091d31ee62f059
|
/man/add_cls-methods.Rd
|
c4d969a16e0084ebca1cbce2f967ac43344bf841
|
[
"MIT"
] |
permissive
|
isglobal-brge/omicRexposome
|
01302d3d1d009b6f4c4acaa1b1ae156ccd319321
|
c8716b177556bd7e082b269d2708eb43184e74fa
|
refs/heads/master
| 2021-10-24T06:34:29.461874
| 2021-10-15T09:36:15
| 2021-10-15T09:36:15
| 84,179,294
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 996
|
rd
|
add_cls-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01_AllMethods.R, R/MultiDataSet-add_cls.R
\docType{methods}
\name{add_cls}
\alias{add_cls}
\alias{add_cls,MultiDataSet,ExposomeClust-method}
\alias{add_cls}
\title{Method to add an ExposomeClust to a MultiDataSet}
\usage{
add_cls(object, clsSet, ...)
\S4method{add_cls}{MultiDataSet,ExposomeClust}(object, clsSet, ...)
}
\arguments{
\item{object}{An object of class \link{MultiDataSet}.}
\item{clsSet}{An object of class \link{ExposomeClust}.}
\item{...}{Arguments given to \link{add_eset} from \link{MultiDataSet}.}
}
\value{
A \link{MultiDataSet} with the \link{ExpressionSet} added as an
independent dataset.
}
\description{
This method allows to insert an object of class \link{ExposomeClust} as an
independent dataset into an object of class \link{MultiDataSet}.
}
\examples{
data("eclust", package = "rexposome")
library(MultiDataSet)
md <- new("MultiDataSet")
names(md)
md <- add_cls(md, expo_c)
names(md)
}
|
7343ac21b23a1a61cd799604a010fd40b28c9b73
|
22dc322d68a8bfaecf3c57be5ec99a433f0a95a8
|
/man/mice.impute.2l.glm.norm.Rd
|
86a80829bb9b75dd827ae71cf9ada34d01eeee6c
|
[] |
no_license
|
cran/micemd
|
19a1acfeb69da9e62d1639265a518ecebeb1f3a5
|
e5adbe076babd9f6c9aa3926eaabdd73d76dd69f
|
refs/heads/master
| 2023-06-09T21:04:43.211056
| 2023-06-01T11:00:04
| 2023-06-01T11:00:04
| 91,136,501
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,861
|
rd
|
mice.impute.2l.glm.norm.Rd
|
\name{mice.impute.2l.glm.norm}
\alias{mice.impute.2l.glm.norm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Imputation of univariate missing data using a Bayesian linear mixed model based on non-informative prior distributions
}
\description{
Imputes univariate missing data using a Bayesian linear mixed model based on non-informative prior distributions. The method is dedicated to a continuous outcome stratified in severals clusters. Should be used with few clusters and few individuals per cluster. Can be very slow to perform otherwise.
}
\usage{
mice.impute.2l.glm.norm(y, ry, x, type,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{y}{
Incomplete data vector of length \code{n}
}
\item{ry}{
Vector of missing data pattern \code{(FALSE=missing, TRUE=observed)}
}
\item{x}{
Matrix \code{(n x p)} of complete covariates.
}
\item{type}{
Vector of length \code{ncol(x)} identifying random and class variables. Random variables are identified by a '2'. The class variable (only one is allowed) is coded as '-2'. Random variables also include the fixed effect.
}
\item{\dots}{
Other named arguments.
}
}
\details{
Imputes univariate two-level continuous variable from a homoscedastic normal model. The variability on the parameters of the imputation is propagated according to an explicit Bayesian modelling. More precisely, improper prior distributions are used for regression coefficients and variances components. The method is recommended for datasets with a small number of clusters and a small number of individuals per cluster. Otherwise, confidence intervals after applying analysis method on the multiply imputed dataset tend to be anti-conservative. In addition, the imputation can be highly time consumming.
}
\value{
A vector of length \code{nmis} with imputations.
}
\references{
Jolani, S. (2017) Hierarchical imputation of systematically and sporadically missing data: An approximate Bayesian approach using chained equations. Biometrical Journal <doi:10.1002/bimj.201600220>
Jolani, S., Debray, T. P. A., Koffijberg, H., van Buuren, S., and Moons, K. G. M. (2015). Imputation
of systematically missing predictors in an individual participant data meta-analysis: a
generalized approach using MICE. Statistics in Medicine, 34(11):1841{1863}. <doi:10.1002/sim.6451>
Audigier, V., White, I. , Jolani ,S. Debray, T., Quartagno, M., Carpenter, J., van Buuren, S. and Resche-Rigon, M.
Multiple imputation for multilevel data with continuous and binary variables (2018). Statistical Science. <doi:10.1214/18-STS646>.
}
\author{
Vincent Audigier \email{vincent.audigier@cnam.fr} from the R code of Shahab Jolani.
}
\seealso{
\code{\link{mice},\link{mice.impute.2l.2stage.norm},\link{mice.impute.2l.jomo}}
}
\keyword{mice}
|
759f1d1e09eb75fa2294633bf29b8caca77e72c5
|
9c308a99d7c2d5c4b6212ee9f5f15759d44994aa
|
/man/records-class.Rd
|
665b516d35a427f9da6898abf3bf2fd9ec805fc7
|
[] |
no_license
|
FranzKrah/rMyCoPortal
|
d1a20217aca46a8e7544271158910af2874ebec9
|
710e32c5d2b4c61f63e5336fead20fa039b78c53
|
refs/heads/master
| 2020-03-31T13:09:36.542099
| 2018-12-30T11:41:25
| 2018-12-30T11:41:25
| 152,243,980
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 845
|
rd
|
records-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/records-class.R
\docType{class}
\name{records-class}
\alias{records-class}
\title{An S4 Class to represent query result from the function \link{mycoportal}}
\description{
\code{mycodist} holds a records table together with the query meta data and recommended citation
}
\section{Slots}{
\describe{
\item{\code{nr.records}}{A numeric giving the number of records retrieved}
\item{\code{citation}}{A character string with the recommended citation from the website}
\item{\code{query}}{A list of the user arguments used}
\item{\code{records}}{A data.frame with the query records results}
\item{\code{db}}{A character string specifying the database (currently only MyCoPortal)}
}}
\seealso{
\code{"\link[=records-class]{records}"}
}
\author{
Franz-Sebastian Krah
}
|
5eaa64735a65c599199fdbf948bd8d31887662a3
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/eha/R/summary.coxreg.R
|
a68398b12125233ebea532dceb91601b7a487f95
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
summary.coxreg.R
|
summary.coxreg <- function(object, ...) print(object)
|
ae28ce63ea9340b34ad9d9a8cbe1f7c290576e81
|
48c1dea7ee94ff1f8cf7077ff1f6189e9300c97a
|
/March 29_2 Binomial Tree.R
|
a0d178d218bb476a44a53d224fe837877617f616
|
[] |
no_license
|
LeafmanZ/Financial-R
|
c153e3a733e0abf2ed6b5a81b039f68eaa094cdf
|
98fde9375699950479182c7c3f12a9d74c576d90
|
refs/heads/main
| 2023-04-11T10:45:23.383347
| 2021-04-09T20:58:27
| 2021-04-09T20:58:27
| 353,474,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
March 29_2 Binomial Tree.R
|
library(fOptions)
library(xlsx)
tree<-BinomialTreeOption(TypeFlag="ca", S=50, X = 55,
Time=4, r=0.01, b=0.00, sigma = 0.2, n = 4)
t <- BinomialTreePlot(tree, cex=.8,
xlab = "period", ylab = "Option Value", digits=3)
title(main="Option Tree")
print(tree)
write.xlsx(tree, file="binomial_tree.xlsx")
|
66b9cdbf3d33626d8648d1d84cc396000ee2808e
|
f0efd62dc5565eb0664ca06c7d8e450dc3158669
|
/pipeline/code/norm.clusters.R
|
d731c861bd44ba78fc970704c2b939771e6b0fd7
|
[] |
no_license
|
hjanime/small-RNA-analysis
|
61c65799a875ed5511db1818f753b0173b73c63d
|
c93d814f082c004eb1deefbfe5faa6a547e3f1aa
|
refs/heads/master
| 2021-01-18T13:55:49.466840
| 2015-07-20T23:19:03
| 2015-07-20T23:19:03
| 42,481,459
| 1
| 1
| null | 2015-09-14T22:50:33
| 2015-09-14T22:50:33
| null |
UTF-8
|
R
| false
| false
| 2,014
|
r
|
norm.clusters.R
|
library(data.table)
library(DESeq2)
library(qvalue)
library(ggplot2)
library("RColorBrewer")
library("gplots")
library(gtools)
table<-read.table("../../fasta/prepare_18_35_10/res/counts.tsv",sep="\t",header=T,row.names=1)
ann<-table[,1]
names(ann)<-row.names(table)
table<-table[,2:(ncol(table))]
table <- table[,mixedsort(names(table))]
table<-read.table("../../fasta/define.cluster.Dec/res/ann.tab",sep="\t",header=T,row.names=1)
ann<-table[,1]
names(ann)<-row.names(table)
table<-table[,2:(ncol(table)-1)]
table <- table[,mixedsort(names(table))]
#########################################################
get_norm_values <- function(table,con,treat,ini,end){
design<-data.frame(condition=c(rep(con,7),rep(treat,7)))
row.names(design)<-names(table)[ini:end]
mirna<-table[,ini:end]
mirna<-mirna[rowSums(mirna>10)>=5,]
dds <- DESeqDataSetFromMatrix(countData = mirna,
colData = design,
design = ~ condition)
dds <- estimateSizeFactors( dds )
#summary(q)
rlogTransformation(dds, blind=FALSE)
}
rld1 <- get_norm_values(table,"cc","ct",1,14)
trna_ids <- names(ann)[grepl("tRNA",ann)]
idx <- intersect(row.names(assay(rld1)),trna_ids)
trna_counts_clininc <- assay(rld1)[idx,]
rld1 <- get_norm_values(table,"pc","pt",15,28)
trna_ids <- names(ann)[grepl("tRNA",ann)]
idx <- intersect(row.names(assay(rld1)),trna_ids)
trna_counts_preclininc <- assay(rld1)[idx,]
library(devtools)
devtools::load_all("~/repos/isomiRs")
counts<-setClass("DataSeq",
slots=c(counts="matrix",
normcounts="matrix",
design="data.frame"
))
do_pls<-function(counts)
{
obj<-counts()
obj@normcounts<-as.matrix(counts)
obj@design<-data.frame(g=gsub("[0-9]+","",colnames(obj@normcounts)), b=1)
pls <- isoPLSDA(obj,"g",nperm = 400)
pls
}
cc_trna <- do_pls(trna_counts_clininc)
pc_tnra <- do_pls(trna_counts_preclininc)
|
4b69558f0b5d72b2d8cef7716ff53f76b9ab85a0
|
235df06a8ef93f21e0af979affa4f08ae89fd9e3
|
/Springleaf_functions.R
|
7731a6c0c7711b6d0bcdb4cb44b5145dc247d8f8
|
[] |
no_license
|
snassimr/springleaf
|
63a87682ea2ce8eb9422be97adfb17ae90242b7f
|
7577058c046843b847395f9eb515b2deb1ac21fa
|
refs/heads/master
| 2021-01-10T03:34:38.111598
| 2015-10-19T07:32:27
| 2015-10-19T07:32:27
| 43,256,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,877
|
r
|
Springleaf_functions.R
|
perform_data_preparation <- function()
{
library(readr)
#READ MODELING DATA
SYSG_ME_DATA_FILENAME <- "train.csv"
setwd(SYSG_INPUT_DIR)
me_input_data <- read_csv(SYSG_ME_DATA_FILENAME)
#READ PREDICTION DATA
SYSG_P_DATA_FILENAME <- "test.csv"
p_input_data <- read_csv(SYSG_P_DATA_FILENAME)
me_target_index <- which(names(me_input_data) == SYS_TARGET_NAME)
p_input_data_ident <- p_input_data[[SYS_IDENTIFIER_FEATURES]]
me_input_data[[SYS_TARGET_NAME]] <- as.factor(paste0("t", me_input_data[[SYS_TARGET_NAME]]))
p_input_data [[SYS_TARGET_NAME]] <- as.factor(paste0("t", p_input_data[[SYS_TARGET_NAME]]))
me_input_target_data <- me_input_data[[SYS_TARGET_NAME]]
################################################# PREPARE MODELING AND EVALUATION DATA
me_input_data1 <- me_input_data[,-c(SYS_IDENTIFIER_FEATURES,me_target_index)]
me_data_exploration_report <- create_data_exploration_report(me_input_data1,iteration = 1,output_mode = 'CSV' )
uv_data_report1 <- data.frame(me_data_exploration_report$uv_data_report)
#PREPARE DATA
SYS_MIN_REQ_DISTINCT_VALUES <- 2
SYS_MAX_REQ_DISTINCT_VALUES <- 50
SYS_REQ_MIN_NUM_NAS <- 0
SYS_REQ_MAX_NUM_NAS <- 1000
# CREATE NEW VALUE-BASED FEATURES
me_ts_var_features <- as.character(subset(uv_data_report1 , FEATURE_TYPE == "timestamp" , select = FEATURE_NAME)$FEATURE_NAME)
me_vbef_features <- c(me_ts_var_features,"VAR_0493")
me_vbef_features_data <- create_vbef_features(me_input_data1[,me_vbef_features], me_ts_var_features)
# REPLACE TIME SERIES and ZIP BASED FEATURES
me_input_data2 <- data.frame(me_input_data1[,!(names(me_input_data1) %in% me_vbef_features)],me_vbef_features_data)
me_data_exploration_report <- create_data_exploration_report(me_input_data2,iteration = 2,output_mode = 'CSV' )
uv_data_report2 <- data.frame(me_data_exploration_report$uv_data_report)
# FILL MISSING
me_fill_NAs_features <- as.character(subset(uv_data_report2 , NO_NAs > SYS_REQ_MIN_NUM_NAS & NO_NAs <= SYS_REQ_MAX_NUM_NAS , select = FEATURE_NAME)$FEATURE_NAME)
me_fill_NAs_features_data <- process_m_missing_data(me_input_data2[,me_fill_NAs_features],me_fill_NAs_features)
me_input_data3 <- data.frame(me_input_data2[,!(names(me_input_data2) %in% me_fill_NAs_features)],me_fill_NAs_features_data)
# CREATE NEW LEARNING-BASED FEATURES
me_disc_features <- c("VAR_1747", "VAR_0541","VAR_0648","VAR_1228", "VAR_0891",
"VAR_0896","VAR_1202","VAR_1581","VAR_1685", "VAR_1914",
"VAR_0241","VAR_1715")
# me_disc_features <- c("VAR_1398", "VAR_1747")
me_lbef_features_data <- create_lbef_features(me_input_data3[,me_disc_features],
me_input_target_data,
me_disc_features)
# Drop "All" discretized if exist
me_disc_features <- str_replace(names(me_lbef_features_data),"_D","")
# ADD DISCRETIZATION FEATURES
if (!is.null(me_lbef_features_data))
me_input_data3 <- data.frame(me_input_data3,me_lbef_features_data)
me_data_exploration_report <- create_data_exploration_report(me_input_data3,iteration = 3,output_mode = 'CSV' )
uv_data_report3 <- data.frame(me_data_exploration_report$uv_data_report)
# REMOVE
me_low_var_features <- as.character(subset(uv_data_report3 , NO_DISTINCT <= SYS_MIN_REQ_DISTINCT_VALUES , select = FEATURE_NAME)$FEATURE_NAME)
me_high_var_features <- as.character(subset(uv_data_report3 , NO_DISTINCT > SYS_MAX_REQ_DISTINCT_VALUES & FEATURE_TYPE == "categorical", select = FEATURE_NAME)$FEATURE_NAME)
me_high_NAs_features <- as.character(subset(uv_data_report3 , NO_NAs > SYS_REQ_MAX_NUM_NAS , select = FEATURE_NAME)$FEATURE_NAME)
# Combine features to remove
me_features_remove <- c(me_low_var_features,me_high_var_features,me_high_NAs_features,me_disc_features)
# Add features back
me_features_add_exc <- c("VAR_0493_GEN5")
me_features_select <- names(me_input_data3)[!(names(me_input_data3) %in% me_features_remove)]
me_input_data4 <- me_input_data3[,c(me_features_select,me_features_add_exc)]
me_data_exploration_report <- create_data_exploration_report(me_input_data4,iteration = 4,output_mode = 'CSV' )
uv_data_report4 <- data.frame(me_data_exploration_report$uv_data_report)
me_input_features <- names(me_input_data4)
# Assuming no data rows drop
me_input_data4 <- data.frame(me_input_data4,target=me_input_target_data)
################################################# PREPARE PREDICTION DATA
# Assuming the same set of input features in train and test data and same number
# and order of instances after processing for predictions
p_vbef_features_data <- create_vbef_features(p_input_data[,me_vbef_features],me_ts_var_features)
p_input_data1 <- data.frame(p_input_data[,!(names(p_input_data) %in% me_vbef_features)],p_vbef_features_data)
p_fill_NAs_features_data <- process_p_missing_data(p_input_data1[,me_fill_NAs_features],me_fill_NAs_features)
p_input_data2 <- data.frame(p_input_data1[,!(names(p_input_data1) %in% me_fill_NAs_features)],p_fill_NAs_features_data)
if(!is.null(me_disc_features)) {
p_lbef_features_data <- process_lbef_features(p_input_data2[,me_disc_features],me_disc_features)
p_input_data3 <- data.frame(p_input_data2,p_lbef_features_data)
} else {
p_input_data3 <- data.frame(p_input_data2)
}
p_input_data3 <- p_input_data3[,c("ID",me_input_features)]
for (f in me_input_features) {
if (class(me_input_data4[[f]])=="factor" || class(me_input_data4[[f]])=="character") {
levels <- unique(c(as.character(me_input_data4[[f]]), as.character(p_input_data3[[f]])))
me_input_data4[[f]] <- factor(me_input_data4[[f]], levels=levels)
p_input_data3[[f]] <- factor(p_input_data3[[f]], levels=levels)
}
}
setwd(SYSG_SYSTEM_DIR)
save(me_input_data4, file = paste0("me_data.rda"))
save(p_input_data3, file = paste0("p_data.rda"))
gc(T,T)
}
create_data_exploration_report <- function (input_data,iteration,output_mode)
{
uv_data_report <- NULL
Sys.setlocale("LC_TIME", "C")
for(i in 1:ncol(input_data)) {
i_uv_data_report <- NULL
i_FEATURE_NAME <- colnames(input_data)[i]
i_FEATURE_TYPE <- if (is.numeric(input_data[,i])) {
'numeric'
} else if (!all(is.na(strptime(input_data[,i], "%d%B%y:%H:%M:%S")))) {
'timestamp'
} else 'categorical'
i_NO_DISTINCT <- length(unique(input_data[,i]))
i_NO_NA <- sum(is.na(input_data[,i]))
i_uv_data_report <- cbind(i_FEATURE_NAME,i_FEATURE_TYPE,i_NO_DISTINCT,i_NO_NA)
uv_data_report <- rbind(uv_data_report,i_uv_data_report)
}
rownames(uv_data_report) <- 1:nrow(uv_data_report)
colnames(uv_data_report) <- c("FEATURE_NAME","FEATURE_TYPE","NO_DISTINCT","NO_NAs")
uv_data_report <- data.frame(uv_data_report)
uv_data_report <- transform(uv_data_report,
NO_DISTINCT = as.numeric(paste(NO_DISTINCT)),
NO_NAs = as.numeric(paste(NO_NAs)))
if(output_mode == 'CSV') {
setwd(SYSG_OUTPUT_MODELING_DIR)
write.csv(uv_data_report, file = paste0("data_exploration_report",iteration,".csv"), row.names = FALSE)
}
return (list(uv_data_report=uv_data_report))
}
create_model_assessment_data <- function (me_input_data,ma_run_id)
{
set.seed(998)
m_indexes <- createDataPartition(me_input_data$target , p = .75, list = FALSE)
m_input_data <- me_input_data[ m_indexes,]
e_input_data <- me_input_data[-m_indexes,]
m_input_data$target <- factor(m_input_data$target,levels(m_input_data$target)[c(2,1)])
e_input_data$target <- factor(e_input_data$target,levels(e_input_data$target)[c(2,1)])
classification_formula <- as.formula(paste("target" ,"~",
paste(names(m_input_data)[!names(m_input_data)=='target'],collapse="+")))
# Initialize model assesment objects
start_time <- NULL
end_time <- NULL
classification_model <- NULL
assesment_grid <- NULL
start_time <- proc.time()
SYS_CV_NFOLDS <- 5
# Greed for parameter evaluation
# xgb_tuneGrid <- expand.grid( nrounds = seq(400,600, length.out = 3) ,
# eta = seq(0.02,0.05, length.out = 4) ,
# max_depth = seq(9,12, length.out = 4))
# assesment_grid <- xgb_tuneGrid
# Best parameters set
xgb_tuneGrid <- expand.grid( nrounds = 500 ,
eta = 0.02,
max_depth = 10)
assesment_grid <- xgb_tuneGrid
# Index for the trainControl()
set.seed(1045481)
tr_index <- createFolds(m_input_data$target, k=SYS_CV_NFOLDS)
# Seeds for the trainControl()
set.seed(1056)
tr_seeds <- vector(mode = "list", length = SYS_CV_NFOLDS+1)
for(i in 1:SYS_CV_NFOLDS) tr_seeds[[i]] <- sample.int(1000, dim(assesment_grid)[1]+SYS_CV_NFOLDS)
set.seed(1056)
tr_seeds[[SYS_CV_NFOLDS+1]] <- sample.int(1000, 1)
ma_control <- trainControl(method = "cv",
number = SYS_CV_NFOLDS,
index = tr_index,
seeds = tr_seeds,
classProbs = T,
summaryFunction = twoClassSummary,
allowParallel = TRUE ,
verboseIter = TRUE)
############################################################# MODEL CREATION #####################################
create_log_entry("",paste0(ma_run_id ," Model Assesment started"),"SF")
create_log_entry(names(assesment_grid),assesment_grid,"F")
xgbc <- train( classification_formula , data = m_input_data ,
method = "xgbTree", metric="ROC" , trControl = ma_control, tuneGrid = assesment_grid ,
objective = 'binary:logistic',
min_child_weight = 5,
subsample = 0.6,
nthread = 4
)
classification_model <- xgbc
end_time <- proc.time() ; runtime <- round(as.numeric((end_time - start_time)[3]),2)
opt_parameters <- classification_model$bestTune
create_log_entry("",paste0(ma_run_id , " Model Assesment finished : " , runtime),"SF")
# Output feature importance based on modelling data
importance_data_obj <- varImp(classification_model,scale = FALSE)$importance
importance_data <- data.frame(Var = rownames(importance_data_obj),Imp = importance_data_obj$Overall,stringsAsFactors=FALSE)
create_log_entry("",paste0(ma_run_id , " Feature Importance : "),"F")
create_log_entry(names(importance_data),head(importance_data,200),"F")
setwd(SYSG_OUTPUT_MODELING_DIR)
save(classification_model, file = paste0(ma_run_id,".rda"))
save(opt_parameters, file = paste0("OM_",ma_run_id,".rda"))
# Create predictions based on evaluation data
create_pe_prediction_data(classification_model, e_input_data , ma_run_id)
}
# Create final model using optimal parameters tuned by caret + non-tunable parameters after manual evaluation
# Use all train data set
create_p_model <- function (opt_model_id , opt_parameters, me_input_data)
{
classification_formula <- as.formula(paste("target" ,"~",
paste(names(me_input_data)[!names(me_input_data)=='target'],collapse="+")))
set.seed(1056)
p_seeds <- vector(mode = "list", length = 1)
p_seeds[[1]] <- sample.int(1000, 1)
m_control <- trainControl(method = "none",
classProbs = T,
summaryFunction = twoClassSummary,
seeds = p_seeds,
allowParallel = TRUE ,
verboseIter = TRUE)
create_log_entry("",paste0(opt_model_id ," Optimal Model Creation started : "),"SF")
create_log_entry(names(opt_parameters), opt_parameters ,"F")
start_time <- proc.time()
opt_xgbc <- train(classification_formula , data = me_input_data ,
method = "xgbTree", trControl = m_control , tuneGrid = opt_parameters ,
objective = 'binary:logistic',
min_child_weight = 5,
subsample = 0.6,
nthread = 8
)
opt_classification_model <- opt_xgbc
end_time <- proc.time() ; runtime <- round(as.numeric((end_time - start_time)[3]),2)
save(opt_classification_model, file = paste0(opt_model_id,".rda"))
create_log_entry("",paste0(opt_model_id , " Optimal Model Creation finished : " , runtime),"SF")
}
# Function predicts model on evaluation data and output AUC to log
create_pe_prediction_data <- function (classification_model, p_input_data , ma_run_id)
{
prediction_class <- predict(classification_model,p_input_data , type = "raw")
prediction_score <- predict(classification_model,p_input_data , type = "prob")
library(ROCR)
prediction_class_score <- NULL
for (i in 1:dim(p_input_data)[1]) {
i_prediction_class_score <- ifelse(p_input_data$target[i]=='t1', prediction_score[i,"t1"], 1 - prediction_score[i,"t0"])
prediction_class_score <- c(prediction_class_score,i_prediction_class_score)
}
prediction.obj <- prediction(prediction_class_score, p_input_data$target , label.ordering = c("t0","t1"))
auc <- performance(prediction.obj, measure = 'auc')@y.values
create_log_entry("",paste0(ma_run_id , " Evaluation AUC : " , auc),"SF")
}
# Function predicts model on prediction/submission data
create_p_prediction_data <- function (classification_model,p_input_data,m_input_data)
{
# Assuming the same order of instances after input data processing for predictions
p_input_data_ident <- p_input_data[,SYS_IDENTIFIER_FEATURES]
prediction_class <- predict(classification_model,p_input_data , type = "raw")
prediction_score <- predict(classification_model,p_input_data , type = "prob")
prediction_class_score <- NULL
for (i in 1:dim(p_input_data)[1]) {
i_prediction_class_score <- ifelse(prediction_class[i]=='t1', prediction_score[i,"t1"], 1 - prediction_score[i,"t0"])
prediction_class_score <- c(prediction_class_score,i_prediction_class_score)
}
prediction_data <- cbind(p_input_data_ident,prediction_class_score)
colnames(prediction_data) <- c("ID","target")
return (prediction_data)
}
process_m_missing_data <- function (m_input_data , m_missing_features)
{
input_data <- m_input_data
m_missing_val <- list()
for (i in 1:ncol(input_data)) {
if (class(input_data[,i]) %in% c("numeric", "integer") ) {
i_mean <- mean(input_data[,i], na.rm = TRUE)
input_data[is.na(input_data[,i]),i] <- i_mean
m_missing_val[[m_missing_features[i]]] <- i_mean
} else if (class(input_data[,i]) %in% c("character", "factor")) {
i_mode <- names(sort(-table(input_data[,i])))[1]
input_data[is.na(input_data[,i]),i] <- i_mode
m_missing_val[[m_missing_features[i]]] <- i_mode
}
}
setwd(SYSG_OUTPUT_MODELING_DIR)
save(m_missing_val, file = paste0("m_missing_val.rda"))
return (input_data)
}
process_p_missing_data <- function (p_input_data,p_missing_features)
{
input_data <- p_input_data
if (exists("m_missing_val"))
rm(m_missing_val)
setwd(SYSG_OUTPUT_MODELING_DIR)
m_missing_val <- get(load("m_missing_val.rda"))
for (i in 1:ncol(input_data)) {
input_data[is.na(input_data[,i]),i] <- m_missing_val[[p_missing_features[i]]]
}
return (input_data)
}
create_vbef_features <- function(me_vbef_input,me_ts_var_features)
{
# Create Day , Month and Hour features for time series features
me_vbef_output <- NULL
me_ts_input_data <- me_vbef_input[,names(me_vbef_input) %in% me_ts_var_features]
me_ts_output_data <- NULL
for (i in 1:ncol(me_ts_input_data)) {
date <- strptime(me_ts_input_data[,i], "%d%B%y:%H:%M:%S")
day <- as.numeric(format(date, "%d"))
month <- as.numeric(format(date, "%m"))
hour <- as.numeric(format(date, "%H"))
i_me_ts_output_data <- cbind(day,month,hour)
colnames(i_me_ts_output_data) <- paste0(names(me_ts_input_data)[i],c("day","month","hour"))
me_ts_output_data <- cbind(me_ts_output_data,i_me_ts_output_data)
}
# Create ZipCode based aggregated feature
library(stringr)
# VAR_0241_ZC <- paste0("ZC",str_sub(str_pad(me_vbef_input[["VAR_0241"]] ,5,pad = "0"),0,2))
VAR_0493_GEN5 <- str_sub(me_vbef_input[["VAR_0493"]],1,5)
# Replace source null values with NA
me_vbef_output <- data.frame(me_ts_output_data,VAR_0493_GEN5)
return(me_vbef_output)
}
create_lbef_features <- function(me_lbef_input,input_target_data,me_disc_features)
{
SYS_LBEF_DATA_FRACTION <- 1
set.seed(1234)
me_lbef_sample_indexes <- createDataPartition(input_target_data , p = SYS_LBEF_DATA_FRACTION , list = FALSE)
me_lbef_m <- me_lbef_input[me_lbef_sample_indexes,]
me_lbef_output <- NULL
# Create Discretizated features
library(discretization)
setwd(SYSG_SYSTEM_DIR)
create_log_entry(""," Feature Discretization started","F")
library(doMC)
closeAllConnections()
registerDoMC(cores=8)
me_discr_break <- foreach(i = 1:length(me_disc_features) , .combine = list) %dopar% {
create_log_entry("",paste0(me_disc_features[i] ," Feature Discretization started"),"F")
discr_model <- mdlp(cbind(me_lbef_m[[me_disc_features[i]]] ,input_target_data))
discr_model_breaks <- discr_model$cutp[[1]]
if (discr_model_breaks != "All")
discr_model_breaks <-
c(min(me_lbef_input[[me_disc_features[i]]]),discr_model_breaks,max(me_lbef_input[[me_disc_features[i]]]))
create_log_entry("",paste0(me_disc_features[i] ," Feature Discretization finished"),"F")
discr_model_breaks
}
me_discr_break <- lapply(unlist(renquote(me_discr_break)), eval)
names(me_discr_break) <- me_disc_features
setwd(SYSG_OUTPUT_MODELING_DIR)
save(me_discr_break, file = paste0("me_discr_break.rda"))
me_lbef_output <- process_lbef_features(me_lbef_input,me_disc_features)
create_log_entry(""," Feature Discretization Finished","F")
closeAllConnections()
return(me_lbef_output)
}
process_lbef_features <- function(discr_input_data,disc_features)
{
# Create Discretizated features
library(discretization)
setwd(SYSG_SYSTEM_DIR)
create_log_entry(""," Feature Discretization Processing started","F")
if (exists("me_discr_break"))
rm(me_discr_break)
setwd(SYSG_OUTPUT_MODELING_DIR)
discr_break <- get(load("me_discr_break.rda"))
discr_output_data <- NULL
new_disc_features <- disc_features
for(i in 1:length(disc_features)) {
breaks <- discr_break[[disc_features[i]]]
# All value
if (length(breaks) == 1) {
new_disc_features <- new_disc_features[new_disc_features!=disc_features[i]]
next
}
# Feature with no breaks is reduced
breaks <- sort(breaks)
i_p_discr_output_data <- findInterval(discr_input_data[,disc_features[i]], breaks)
discr_output_data <- cbind(discr_output_data,paste0("RNG",as.numeric(i_p_discr_output_data)))
}
if(!is.null(discr_output_data)) {
discr_output_data <- data.frame(discr_output_data)
names(discr_output_data) <- paste0(new_disc_features,"_D")
}
create_log_entry(""," Feature Discretization Processing Finished","F")
return(discr_output_data)
}
create_log_entry <- function(message_title = "", message , log_mode)
{
current_library <- getwd()
setwd(SYSG_SYSTEM_DIR)
if (regexpr("S",log_mode)>0) {
print(message_title , row.names = FALSE)
print(message , row.names = FALSE)
}
if (regexpr("F",log_mode)>0) {
write.table(message_title , "log.txt", append = TRUE,col.names = FALSE , row.names = FALSE , quote = FALSE)
write.table(paste0(Sys.time(), " : " , message) , "log.txt", append = TRUE, col.names = FALSE , row.names = FALSE , quote = FALSE,sep = ",")
}
setwd(current_library)
}
renquote <- function(l) if (is.list(l)) lapply(l, renquote) else enquote(l)
|
c5a28098da3d99c5c82a5ec01e0100bfbe51c2b8
|
bdcb7b9d3e48e828934e6019ebdadc06c617e8b5
|
/data-raw/original_events_code.R
|
cee8dbc3b7746c9d9d6dbc5a7916a3c8c6714ffd
|
[
"MIT"
] |
permissive
|
jfontestad/farr
|
8dd97a0b3465ba2c954677153144f3eafb75dd03
|
95a99b69522c700030c48dac4d8e95f9569e5ecd
|
refs/heads/main
| 2023-08-25T23:41:40.867831
| 2021-11-05T02:23:14
| 2021-11-05T02:23:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,667
|
r
|
original_events_code.R
|
# Load up the PostgreSQL driver, create a connection to the database
library(RPostgreSQL)
# The following function takes a list of permnos and event dates, then for each
# calls the function above to get event returns for each PERMNO-event date
# combination.
getEventReturns <- function(permno, event.date, days.before=0, days.after=0,
end.event.date=NULL, label="ret") {
event.date <- as.Date(event.date)
if (is.null(end.event.date)) { end.event.date <- event.date }
end.event.date <- as.Date(end.event.date)
permno <- as.integer(permno)
crsp <- dbConnect(PostgreSQL())
max_date <- dbGetQuery(crsp, "SELECT max(date) AS date FROM crsp.dsi")$date
temp <- data.frame(permno, event_date=event.date,
end_event_date=end.event.date)
dbWriteTable(crsp, "permnos",
subset(temp, subset=!is.na(permno) & !is.na(event.date)),
row.names=FALSE, overwrite=TRUE)
sql <- paste("
CREATE TEMP TABLE permnos_plus AS
SELECT a.permno, a.event_date, a.end_event_date,
c.date AS begin_date, d.date AS end_date
FROM permnos AS a, crsp.anncdates AS b, crsp.trading_dates AS c,
crsp.trading_dates AS d, crsp.anncdates AS e
WHERE a.event_date=b.anncdate AND b.td + ",
days.before, "= c.td AND
a.end_event_date=e.anncdate AND e.td + ",
days.after,"=d.td
AND c.date IS NOT NULL AND d.date IS NOT NULL;
DROP TABLE IF EXISTS permnos;
ANALYZE permnos_plus")
dbGetQuery(crsp, sql)
sql <-"
SELECT a.permno, a.event_date, a.end_event_date,
product(1+ret)-1 AS ret,
product(1+ret)-product(1+vwretd) AS ret_mkt,
product(1+ret)-product(1+decret) AS ret_sz
FROM permnos_plus AS a
INNER JOIN crsp.rets AS b
USING (permno)
WHERE b.date BETWEEN a.begin_date AND a.end_date
GROUP BY a.permno, a.event_date, a.end_event_date"
# cat(sql)
ret.data <- dbGetQuery(crsp, sql)
dbGetQuery(crsp, "DROP TABLE IF EXISTS permnos_plus")
dbDisconnect(crsp)
after.max.date <- ret.data$end_event_date > max_date
# print(max_date)
if (length(after.max.date) > 0) {
for (i in c("ret", "ret_mkt", "ret_sz")) {
ret.data[after.max.date, i] <- NA
}
}
# Label variables using label given appended to suffixes
suffixes <- c("", "_sz", "_mkt")
new.names <- paste(label, suffixes, sep="")
names(ret.data) <- sub("^ret", label, names(ret.data), perl=TRUE)
return(ret.data)
}
|
e01c8aabe0e6cab8955bcf03876c2b4c3862839f
|
c42ac5d274e19dfe2ea47dec3cf3dd64b87b8fcc
|
/air_pollution.R
|
276b9ea64f8a65c09e4eb9f9c1e73da4e086f5dd
|
[] |
no_license
|
jomaghacot/air_pollution
|
c935c8a641be8eae483b17e546862f59009dabc8
|
e8280f6605c3596153ba356f7c79770fa6b40b30
|
refs/heads/master
| 2023-01-24T18:47:52.274558
| 2020-12-15T08:13:33
| 2020-12-15T08:13:33
| 321,594,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28
|
r
|
air_pollution.R
|
setwd("D:/R/air_pollution")
|
8ccfe1fb71d16f5fc4070769c3638e527981ce5c
|
bb80c7add11e2ab33414f5af9172e4755c215ace
|
/src/01_import_spotify.R
|
9ab89ce511ed6e82eb604fcdf7963b3a4974ecde
|
[] |
no_license
|
paulapereda/tayloR-old-version
|
1e0b477952534b154cea47b65262bba7a64622e1
|
17667cbfd657973c5c6684259cef211c8b1bdfe5
|
refs/heads/master
| 2022-02-18T10:35:43.632530
| 2019-08-28T14:13:35
| 2019-08-28T14:13:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,395
|
r
|
01_import_spotify.R
|
library(tidyverse)
library(spotifyr)
source('token.R')
# (!) REVISAR, a algunos álbumes le faltan las tres canciones finales:
############# (i) Fearless
############# (ii) Speak Now
############# (iii) Red
############# (iv) 1989
spotify_df <- get_artist_audio_features('taylor swift') %>%
filter(album_name != "1989 (Big Machine Radio Release Special)") %>%
filter(album_name != "Fearless (Big Machine Radio Release Special)") %>%
filter(album_name != "Fearless Karaoke") %>%
filter(album_name != "Fearless Platinum Edition") %>%
filter(album_name != "Red (Big Machine Radio Release Special)") %>%
filter(album_name != "reputation (Big Machine Radio Release Special)") %>%
filter(album_name != "reputation Stadium Tour Surprise Song Playlist") %>%
filter(album_name != "Speak Now (Big Machine Radio Release Special)") %>%
filter(album_name != "Speak Now World Tour Live") %>%
filter(album_name != "Fearless Platinum Edition") %>%
filter(album_name != "Taylor Swift (Big Machine Radio Release Special)") %>%
filter(album_name != "Taylor Swift Karaoke") %>%
filter(album_name != "Taylor Swift Karaoke: 1989") %>%
filter(album_name != "Taylor Swift Karaoke: Red") %>%
filter(album_name != "Taylor Swift Karaoke: Speak Now") %>%
filter(track_name != "22 - Karaoke Version") %>%
filter(track_name != "All Too Well - Karaoke Version") %>%
filter(track_name != "Begin Again - Karaoke Version") %>%
filter(track_name != "Everything Has Changed - Karaoke Version") %>%
filter(track_name != "Holy Ground - Karaoke Version") %>%
filter(track_name != "I Almost Do - Karaoke Version") %>%
filter(track_name != "I Knew You Were Trouble. - Karaoke Version") %>%
filter(track_name != "Love Story - J Stax Radio Mix") %>%
filter(track_name != "Mine - POP Mix") %>%
filter(track_name != "Red - Karaoke Version") %>%
filter(track_name != "Sad Beautiful Tragic - Karaoke Version") %>%
filter(track_name != "Starlight - Karaoke Version") %>%
filter(track_name != "State Of Grace - Karaoke Version") %>%
filter(track_name != "Stay Stay Stay - Karaoke Version") %>%
filter(track_name != "The Last Time - Karaoke Version") %>%
filter(track_name != "The Lucky One - Karaoke Version") %>%
filter(track_name != "Treacherous - Karaoke Version") %>%
filter(track_name != "We Are Never Ever Getting Back Together - Karaoke Version") %>%
filter(track_name != "Teardrops on My Guitar - Pop Version") %>%
filter(available_markets != "JP") %>%
filter(album_id != "4uUAUqIfSomFTbbjGp3TYp") %>%
filter(album_id != "5fy0X0JmZRZnVa2UEicIOl") %>%
filter(album_id != "6Ar2o9KCqcyYF9J0aQP3au") %>%
filter(album_id != "3Mvk2LKxfhc2KVSnDYC40I") %>%
filter(album_id != "2dqn5yOQWdyGwOpOIi9O4x") %>%
filter(album_id != "1rwH2628RIOVM3WMwwO418") %>%
filter(album_id != "5eyZZoQEFQWRHkV2xgAeBw")
readr::write_rds(spotify_df, 'data/spotify_df.rds')
|
d1960459aa576fcc731a721add3f4aa3a8f470c7
|
768c84c5314c42cad91f3b53c5fb8ede09bb4a68
|
/content/code/R/fizzbuzz.R
|
98d37652f5f6930ead6cef06c310f5c3939606f6
|
[
"CC-BY-4.0"
] |
permissive
|
coderefinery/testing
|
6842146c0dd1af94ce0e65c320583026b8277736
|
00affc88985ceac299bb4ec4a56971e9a9afaffd
|
refs/heads/main
| 2023-05-27T05:19:59.943039
| 2023-05-15T19:28:30
| 2023-05-15T19:28:30
| 76,298,304
| 9
| 41
|
CC-BY-4.0
| 2023-08-24T21:06:03
| 2016-12-12T21:46:14
|
C++
|
UTF-8
|
R
| false
| false
| 810
|
r
|
fizzbuzz.R
|
# define the function
fizz_buzz <- function(number){
if(!number%%1==0 | number < 0) {
stop("non-integer or negative input not allowed!")
}
if(number%%3 == 0 & number%%5 == 0) {
return('FizzBuzz')
}
else if(number%%3 == 0) {
return('Fizz')
}
else if (number%%5 == 0){
return('Buzz')
}
else {
return(number)
}
}
# apply it to the numbers 1 to 50
for (number in 1:50) {
print(fizz_buzz(number))
}
library(testthat)
test_that("Test FizzBuzz", {
expect_equal(fizz_buzz(1), 1)
expect_equal(fizz_buzz(2), 2)
expect_equal(fizz_buzz(3), 'Fizz')
expect_equal(fizz_buzz(4), 4)
expect_equal(fizz_buzz(5), 'Buzz')
expect_equal(fizz_buzz(15), 'FizzBuzz')
expect_error(fizz_buzz(-1))
expect_error(fizz_buzz(1.5))
expect_error(fizz_buzz('rabbit'))
})
|
f0787db602d7211669089e7aec77d32066bfbb9d
|
7182a132d3dac38e52d833a0ac0f4e6792b326d0
|
/R/fileio-package.R
|
860636c2199212beab26530a6b36b41d0335c678
|
[] |
no_license
|
hrbrmstr/fileio
|
29e1ccb173f20aad942d21e656977c96456b7dc2
|
0a0ebdabfa7cc754825a2e5d3862fd27340bb498
|
refs/heads/master
| 2020-03-15T06:40:14.332798
| 2018-05-04T01:42:52
| 2018-05-04T01:42:52
| 132,012,660
| 23
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 506
|
r
|
fileio-package.R
|
#' Post Files, Text or R Data to 'file.io'
#'
#' The 'file.io' <file.io> service enables ephemeral, convenient
#' and anonymous file sharing. Methods are provided to upload existing files,
#' R data objects or text messages to this service.
#'
#' @md
#' @name fileio
#' @docType package
#' @author Bob Rudis (bob@@rud.is)
#' @references [file.io privacy policy](https://www.file.io/privacy.html)
#' @importFrom httr POST upload_file user_agent stop_for_status content
#' @importFrom uuid UUIDgenerate
NULL
|
a97f38eb9f87f0b288370b72bc2cde2e59ebf311
|
cce66c207e90a9b977fb7a4b17f930e58542bded
|
/code/nationwide.R
|
ab9befe89a02f086041620f0943c1de9fd021623
|
[
"MIT"
] |
permissive
|
STRIDES-Codes/Examination-of-COVID-19-s-impact-on-maternal-health-disparities
|
6f5abaa88f5966782215879d65dca8061e2887ed
|
20fca4f3ab12e89ce1bfd3d977686c6d4745dab4
|
refs/heads/main
| 2023-05-31T22:47:10.783917
| 2021-06-25T13:19:49
| 2021-06-25T13:19:49
| 375,821,512
| 0
| 2
|
MIT
| 2021-06-22T18:53:01
| 2021-06-10T20:22:31
| null |
UTF-8
|
R
| false
| false
| 3,285
|
r
|
nationwide.R
|
# Looking at entire covid dataset bnation-wide
# This is looking at representatio in COVID cases in general
# similar code to state_picker.R but looking at US as a whole - see
# state_picker.R for now for mode commented code
library(tidyverse)
library(maps)
library(ggthemes)
library(ggeasy)
entire_covid_data <- read.csv("Downloads/COVID-19_Case_Surveillance_Public_Use_Data_with_Geography_full.csv")
name_shift <- data.frame(old_race = c("Asian", "Black", "American Indian/Alaska Native",
"Native Hawaiian/Other Pacific Islander", "White", "Multiple/Other"),
new_race = c("AA", "BA", "IA", "NA", "WA", "TOM"))
name_shift_graph <- data.frame(graph_name = c("Asian (M)", "Black (M)", "American Indian/Alaska Native (M)",
"Native Hawaiian/Other Pacific Islander (M)", "White (M)", "Multiple/Other (M)",
"Asian (F)", "Black (F)", "American Indian/Alaska Native (F)",
"Native Hawaiian/Other Pacific Islander (F)", "White (F)", "Multiple/Other (F)"),
old_name = c("AA_MALE", "BA_MALE", "IA_MALE", "NA_MALE", "WA_MALE", "TOM_MALE",
"AA_FEMALE", "BA_FEMALE", "IA_FEMALE", "NA_FEMALE", "WA_FEMALE", "TOM_FEMALE"))
observed_cases_usa <- entire_covid_data %>%
filter(!is.na(sex),
!(sex %in% c("Missing", "Unknown")),
!is.na(race),
!(race %in% c("Missing", "Unknown")),
age_group != "0 - 17 years") %>%
count(sex, race) %>%
left_join(name_shift, by = c("race"="old_race")) %>%
select(-race) %>%
rename("race"="new_race") %>%
mutate(sex_race = paste(race, toupper(sex), sep = "_")) %>%
# group_by(FIPS) %>%
# mutate(percent = n/sum(n)) %>%
select("sex_race", "Observed_Counts" = "n") %>%
spread(sex_race, Observed_Counts, fill = 0) %>%
gather(sex_race, Observed_Counts, 1:12)
pop_data <- read_csv("Downloads/cc-est2019-alldata.csv")
cleaned_pop_data_usa <- pop_data %>%
filter(YEAR == 12, AGEGRP >= 4) %>%
select(c(7:22)) %>%
summarise_at(vars(-AGEGRP), sum)
tot_pop <- cleaned_pop_data_usa$TOT_POP
cleaned_pop_data_states_usa <- cleaned_pop_data_usa %>%
mutate_all(funs(./tot_pop)) #%>%
# select(-TOT_POP) %>%
# rename("MALE" = "TOT_MALE", "FEMALE" = "TOT_FEMALE")
props_states_usa <- cleaned_pop_data_states_usa %>%
select(c(4:15)) %>%
gather(sex_race, Census_Props, 1:12) %>%
left_join(observed_cases_usa, by=c("sex_race"))
see_diff_state_usa <- props_states_usa %>%
mutate(Observed_Pct = Observed_Counts/sum(Observed_Counts)) %>%
select(-Observed_Counts) %>%
group_by(sex_race) %>%
gather(DataSet, n, 2:3) %>%
left_join(name_shift_graph, by=c("sex_race"="old_name")) %>%
mutate(DataSet = ifelse(DataSet == "Observed_Pct", "COVID-19 Tests Proportion", "Census Proportion"))
ggplot(filter(see_diff_state_usa), aes(x=graph_name, y=n, fill=DataSet)) +
geom_bar(stat="identity", position=position_dodge()) +
coord_flip() +
ggtitle("United States") +
ylab("Percent") +
xlab("Sex/Race Group") +
labs(fill = "Populations") +
theme_bw() +
theme(legend.position="bottom") +
ggeasy::easy_center_title()
|
e40e26aae810b8f28fbcbf3f7bbdd301b53bbb1d
|
e34c6b5b46a16501607a472b78a82cc631fa65a9
|
/Practicas_TareasU2/Practica4.r
|
4540b27384f8ae7ed6e564c732e18c5ce84d1d6e
|
[] |
no_license
|
manuelorozcotoro/Mineria_De_Datos
|
379598c8045dbf14aa03141f1ee37b3c8cdebd2f
|
595aedb734c045c1e2f804817d016242d3fd756c
|
refs/heads/development
| 2020-12-30T01:54:25.851040
| 2020-06-17T03:44:40
| 2020-06-17T03:44:40
| 238,821,758
| 0
| 3
| null | 2020-06-17T03:44:41
| 2020-02-07T01:42:13
|
R
|
ISO-8859-1
|
R
| false
| false
| 4,901
|
r
|
Practica4.r
|
# El archivo csv se busca desde su ruta
getwd()
setwd("/Users/Dell/Desktop/DataMining-master/MachineLearning/LogisticRegression")
getwd()
# Se importa el conjunto de datos con el que trabajará.
dataset <- read.csv('Social_Network_Ads.csv')
# Se seleccionan los campos con los que trabajaremos.
dataset <- dataset[, 3:5]
# División del conjunto de datos en el conjunto de entrenamiento y el conjunto de prueba El paquete "caTools" se activa Un conjunto de entrenamiento y prueba se selecciona usando el paquete caTools, set.seed (0) es la semilla de los números aleatorios de dinero del generador. El conjunto de entrenamiento corresponde al 75% del conjunto de datos y se elige al azar.
library(caTools)
set.seed(123)
split <- sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
# Características de escala Para la clasificación, es mejor hacer la escala característica (normalización)
training_set[, 1:2] <- scale(training_set[, 1:2])
test_set[, 1:2] <- scale(test_set[, 1:2])
# Al ajustar la regresión logística al conjunto de entrenamiento, el modelo de regresión lineal se aplica utilizando la función glm (modelo lineal generalizado)
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set)
# Predecir los resultados del conjunto de prueba
prob_pred = predict(classifier, type = 'response', newdata = test_set[-3])
prob_pred
y_pred = ifelse(prob_pred > 0.5, 1, 0)
y_pred
# Haciendo la Matriz de Confusión Se realiza para comparar la cantidad de datos emparejados por el modelo.
cm = table(test_set[, 3], y_pred)
cm
# La velocidad de datos correcta para cada modelo realizado para Comprado 0 y 1.
# Usamos la biblioteca ggplo2 para hacer gráficos.
library(ggplot2)
# Las funciones lineales se muestran en el gráfico proporcionado por cada modelo.
ggplot(training_set, aes(x=EstimatedSalary, y=Purchased)) + geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
# Este gráfico muestra una curva entre compras con respecto al año
ggplot(training_set, aes(x=Age, y=Purchased)) + geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
# Este gráfico muestra las compras con respecto al salario estimado del conjunto de pruebas. Por lo tanto, la línea tiende a ser más recta pero ascendente
ggplot(test_set, aes(x=EstimatedSalary, y=Purchased)) + geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
# Se observa un comportamiento similar con el conjunto de pruebas de compra para el año al conjunto de entrenamiento
ggplot(test_set, aes(x=Age, y=Purchased)) + geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)
# Visualización del resultado del conjunto de entrenamiento Se instaló el acuete "ElemStatLearn"
install.packages(path_to_source, repos = NULL, type="source")
install.packages("~/Downloads/ElemStatLearn_2015.6.26.2.tar", repos=NULL, type="source")
# Se usó la biblioteca instalada. Se realizó un diagrama que muestra las pruebas previas del conjunto de entrenamiento en forma de K-Neighbours para reducir la dimensionalidad de los datos.
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
#Ver el resultado de los datos de prueba
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
f25eea4c66a9cabc723b5eb3e9223eea02805f1b
|
3a073957ba775b8457d58760ec43029c12303856
|
/run_analysis.R
|
08aefb57c514a45a8c482deb6b28135fa78dabc9
|
[] |
no_license
|
morsta/coursera_get_clean_data
|
bef18e7913edd6ddf20814c508b4df9f0bec7b32
|
e47ca2e7c70305564193728aede47d0d091384f6
|
refs/heads/master
| 2021-01-01T05:29:29.038075
| 2016-05-22T22:04:06
| 2016-05-22T22:04:06
| 59,412,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
run_analysis.R
|
#read the variable names from the given file:
var_names <- read.table("features.txt", stringsAsFactors = FALSE)
dat_test <- read.table("test/X_test.txt", col.names = var_names[,2])
dat_train <- read.table("train/X_train.txt", col.names = var_names[,2])
merged <- rbind(dat_train, dat_test)
#subs <- grep(".([Mm]ean|std).", names(merged))
#dat_mean_std <- merged[subs]
#read in activity data
activities_train <- read.table("train/y_train.txt", col.names = "Activity")
activities_test <- read.table("test/y_test.txt", col.names = "Activity")
activities <- rbind(activities_train, activities_test)
activities$Activity <- factor(activities$Activity)
#read in subject data
subj_train <- read.table("train/subject_train.txt", col.names ="Subject")
subj_test <- read.table("test/subject_test.txt", col.names ="Subject")
subject <- rbind(subj_train, subj_test)
subject$Subject <- factor(subject$Subject)
#rename the activities (descriptive names)
act_labels <- read.table("activity_labels.txt", stringsAsFactors = FALSE)
levels(activities$Activity) <- act_labels$V2
library(dplyr)
#extract the meassurement on the mean and standard derivation:
subs <- grep(".([Mm]ean|std).", names(merged))
data <- cbind.data.frame(subject, activities, merged[subs])
#group by activity and subject:
by_act_subj <- group_by(data, Activity, Subject)
#calculate median based on the grouped variables:
data_means <- summarise_each(by_act_subj, funs(mean(., na.rm=TRUE)))
|
2763c18ba7ce8274c5c0f4cd5dd2886eb0d0679d
|
4cecc8cc52436a08674442d4df18b25234e0cbfa
|
/R/3_distances_disc.R
|
b3c836e20c673fb7f2585d755cbbec96c67f17ae
|
[] |
no_license
|
anjaweigel/mixComp_package
|
3be8e19eff9a943dadb3e2bb755954f21219d3c4
|
eb27f7ec39fc1e5bdaf5fe4a6e4b2a8f29a16254
|
refs/heads/master
| 2022-12-07T17:35:08.432093
| 2020-08-26T09:07:31
| 2020-08-26T09:07:31
| 279,328,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,429
|
r
|
3_distances_disc.R
|
## Purpose: returns the L2 distance function corresponding to parameters x
.get.fmin.L2 <- function(dat, dist, formals.dist, ndistparams, j, n.inf, N, dist_call){
function(x){
w <- x[1:(j - 1)]
# first term of difference: sum over values up to "n.inf"
theta.list.long <- vector(mode = "list", length = ndistparams)
names(theta.list.long) <- formals.dist
for(i in 1:ndistparams){
theta.list.long[[i]] <- matrix(x[(i*j):((1 + i)*j - 1)], nrow = (n.inf + 1), ncol = j,
byrow = TRUE)
}
theta.list.long$x <- 0:n.inf
# # NAs or warnings can happen as solnp sometimes uses intermediate
# # parameter values outside of the box constraints (to speed up convergence
# # and avoid numerical ill conditioning)
mat <- suppressWarnings(do.call(dist_call, args = theta.list.long))
w <- c(x[1:(j - 1)], 1 - sum(x[1:(j - 1)]))
if(any(w < 0)) return(sqrt(.Machine$integer.max))
f.theta <- as.matrix(mat) %*% w
if(any(is.na(f.theta))) return(sqrt(.Machine$integer.max))
f.theta.sq <- f.theta^2
f.1 <- sum(f.theta.sq) # first term of difference
# second term of difference: sum over the data as we multiply with the empirical
# distribution function
for(i in 1:ndistparams){
theta.list.long[[i]] <- matrix(x[(i*j):((1 + i)*j - 1)], nrow = N, ncol = j,
byrow = TRUE)
}
theta.list.long$x <- dat
# # NAs or warnings can happen as solnp sometimes uses intermediate
# # parameter values outside of the box constraints (to speed up convergence
# # and avoid numerical ill conditioning)
mat <- suppressWarnings(do.call(dist_call, args = theta.list.long))
w <- c(x[1:(j - 1)], 1 - sum(x[1:(j - 1)]))
if(any(w < 0)) return(sqrt(.Machine$integer.max))
f.theta.obs <- as.matrix(mat) %*% w
if(any(is.na(f.theta.obs))) return(sqrt(.Machine$integer.max))
f.2 <- (2/N)*sum(f.theta.obs) # second term of difference
return(f.1 - f.2)
}
}
## Purpose: returns the L2 distance function corresponding to parameters x
## when the mixture consists of only a single component
.get.fmin.L2.0 <- function(dat, dist, formals.dist, ndistparams, n.inf, N, dist_call){
function(x){
# first term of difference: sum over values up to "n.inf"
theta.list.long <- vector(mode = "list", length = ndistparams)
names(theta.list.long) <- formals.dist
for(i in 1:ndistparams){
theta.list.long[[i]] <- rep(x[i], n.inf + 1)
}
theta.list.long$x <- 0:n.inf
# # NAs or warnings can happen as solnp sometimes uses intermediate
# # parameter values outside of the box constraints (to speed up convergence
# # and avoid numerical ill conditioning)
f.theta <- suppressWarnings(do.call(dist_call, args = theta.list.long))
if(any(is.na(f.theta))) return(sqrt(.Machine$integer.max))
f.theta.sq <- f.theta^2
f.1 <- sum(f.theta.sq)
# second term of difference: sum over the data as we multiply with the empirical
# distribution function
for(i in 1:ndistparams){
theta.list.long[[i]] <- rep(x[i], N)
}
theta.list.long$x <- dat
# NAs or warnings can happen as solnp sometimes uses intermediate
# parameter values outside of the box constraints (to speed up convergence
# and avoid numerical ill conditioning)
f.components.obs <- suppressWarnings(do.call(dist_call, args = theta.list.long))
if (any(is.na(f.components.obs))) return(sqrt(.Machine$integer.max))
f.theta.obs <- sum(f.components.obs)
f.2 <- (2/N)*sum(f.theta.obs)
return(f.1 - f.2)
}
}
## Purpose: L2 distance based method of estimating the mixture complexity of a
## discrete mixture (as well as the weights and component parameters) returning
## a 'paramEst' object
L2.disc <- function(obj, j.max = 10, n.inf = 1000, threshold = "SBC", control = c(trace = 0)){
# get standard variables
variable_list <- .get.list(obj)
list2env(variable_list, envir = environment())
# check relevant inputs
.input.checks.functions(obj, thrshL2 = threshold, j.max = j.max, n.inf = n.inf,
discrete = discrete, Hankel = FALSE, param = TRUE)
j0 <- 0
repeat{
j0 <- j0 + 1 # current complexity estimate
j1 <- j0 + 1
if(is.function(threshold)){
thresh <- threshold(n = N, j = j0)
}
else if(threshold == "LIC"){
thresh <- (0.6*log((j1)/j0))/N
}
else if (threshold == "SBC"){
thresh <- (0.6*log(N)*log((j1)/j0))/N
}
if(j0 > 1){ # if j1 was calculated in the last interation, pass it over to j0...
theta.j0 <- theta.j1
L2.j0 <- L2.j1
conv.j0 <- conv.j1
values.j0 <- values.j1
} else { # ... or calculate j0 directly if j0 = 1 (j1 has not been calculated yet)
# in this case we already know w = 1 (single component mixture)
fmin <- .get.fmin.L2.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.inf = n.inf, N = N, dist_call)
restrictions <- .get.restrictions(j = j0, ndistparams = ndistparams, lower = lower,
upper = upper)
lx <- restrictions$lx
ux <- restrictions$ux
initial.j0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
opt <- solnp(initial.j0, fun = fmin, LB = lx, UB = ux, control = control)
.printresults(opt, j0, dist, formals.dist, ndistparams)
theta.j0 <- opt$pars
L2.j0 <- opt$values[length(opt$values)]
conv.j0 <- opt$convergence
values.j0 <- opt$values
}
# optimization for j1. Starts from j1 = 2 so we always need to include weight
# restrictions in optimization
fmin <- .get.fmin.L2(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.inf = n.inf, N = N, dist_call)
restrictions.j1 <- .get.restrictions(j = j1, ndistparams = ndistparams, lower = lower,
upper = upper)
ineq <- restrictions$ineq
lx.j1 <- restrictions.j1$lx
ux.j1 <- restrictions.j1$ux
initial.j1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
opt <- solnp(initial.j1, fun = fmin, LB = lx.j1, UB = ux.j1, ineqfun = ineq,
ineqLB = 0, ineqUB = 1, control = control)
theta.j1 <- opt$pars <- .augment.pars(opt$pars, j1)
L2.j1 <- opt$values[length(opt$values)] <- fmin(opt$pars)
conv.j1 <- opt$convergence
values.j1 <- opt$values
.printresults(opt, j1, dist, formals.dist, ndistparams)
if((L2.j0 - L2.j1) < thresh){
break
} else if(j0 == j.max){
break
}
}
.return.paramEst(j0, j.max, dat, theta.j0, values.j0, conv.j0, dist, ndistparams, formals.dist,
discrete, MLE.function = NULL)
}
## Purpose: L2 distance based method of estimating the mixture complexity of a
## discrete mixture (as well as the weights and component parameters) returning
## a 'paramEst' object (using bootstrap)
L2.boot.disc <- function(obj, j.max = 10, n.inf = 1000, B = 100, ql = 0.025, qu = 0.975,
control = c(trace = 0), ...){
# get standard variables
variable_list <- .get.list(obj)
list2env(variable_list, envir = environment())
# check relevant inputs
.input.checks.functions(obj, j.max = j.max, B = B, n.inf = n.inf, ql = ql, qu = qu,
discrete = discrete, Hankel = FALSE, param = TRUE)
j0 <- 0
repeat{
j0 <- j0 + 1 # current complexity estimate
j1 <- j0 + 1
if(j0 > 1){ # if j1 was calculated in the last interation, pass it over to j0...
theta.j0 <- theta.j1
L2.j0 <- L2.j1
conv.j0 <- conv.j1
values.j0 <- values.j1
# also need to pass over the restrictions as they will be used in the bootstrap
ineq.j0 <- ineq.j1
lx.j0 <- lx.j1
ux.j0 <- ux.j1
} else { # ... or calculate j0 directly if j0 = 1 (j1 has not been calculated yet)
# in this case we already know w = 1 (single component mixture)
fmin.j0 <- .get.fmin.L2.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.inf = n.inf, N = N, dist_call)
restrictions.j0 <- .get.restrictions(j = j0, ndistparams = ndistparams, lower = lower,
upper = upper)
lx.j0 <- restrictions.j0$lx
ux.j0 <- restrictions.j0$ux
initial.j0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
opt <- solnp(initial.j0, fun = fmin.j0, LB = lx.j0, UB = ux.j0, control = control)
.printresults(opt, j0, dist, formals.dist, ndistparams)
theta.j0 <- opt$pars
L2.j0 <- opt$values[length(opt$values)]
conv.j0 <- opt$convergence
values.j0 <- opt$values
}
# optimization for j1. Starts from j1 = 2 so we always need to include weight
# restrictions in optimization
fmin.j1 <- .get.fmin.L2(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.inf = n.inf, N = N, dist_call)
restrictions.j1 <- .get.restrictions(j = j1, ndistparams = ndistparams, lower = lower,
upper = upper)
ineq.j1 <- restrictions.j1$ineq
lx.j1 <- restrictions.j1$lx
ux.j1 <- restrictions.j1$ux
initial.j1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
opt <- solnp(initial.j1, fun = fmin.j1, ineqfun = ineq.j1, ineqLB = 0, ineqUB = 1,
LB = lx.j1, UB = ux.j1, control = control)
theta.j1 <- opt$pars <- .augment.pars(opt$pars, j1)
L2.j1 <- opt$values[length(opt$values)] <- fmin.j1(opt$pars)
conv.j1 <- opt$convergence
values.j1 <- opt$values
.printresults(opt, j1, dist, formals.dist, ndistparams)
diff.0 <- L2.j0 - L2.j1
# parameters used for parametric bootstrap and corresponding 'Mix' object
param.list.boot <- .get.bootstrapparams(formals.dist = formals.dist, ndistparams = ndistparams,
mle.est = theta.j0, j = j0)
Mix.boot <- Mix(dist = dist, w = param.list.boot$w, theta.list = param.list.boot$theta.list,
name = "Mix.boot")
ran.gen <- function(dat, mle){
rMix(n = length(dat), obj = mle)
}
# counting bootstrap iterations to print progression
bs_iter <- -1
stat <- function(dat){
assign("bs_iter", bs_iter + 1, inherits = TRUE)
if(bs_iter != 0){
# don't include first iteration as this just uses the original data
# to calculate t0
cat(paste("Running bootstrap iteration ", bs_iter, " testing for ", j0,
" components.\n", sep = ""))
} else cat(paste("\n"))
# in the bootstrap we have to calculate the values for j0 and j1 as the bootstrap
# data changes in every iteration (cannot reuse last j1 values as j0)
initial.boot0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
# calculate optimal parameters for j0
if(j0 != 1){ # need to include weight restrictions in optimization
fmin.boot0 <- .get.fmin.L2(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j0, n.inf = n.inf, N = N, dist_call)
opt.boot0 <- solnp(initial.boot0, fun = fmin.boot0, ineqfun = ineq.j0, ineqLB = 0,
ineqUB = 1, LB = lx.j0, UB = ux.j0, control = control)
opt.boot0$pars <- .augment.pars(opt.boot0$pars, j0)
L2.boot0 <- fmin.boot0(opt.boot0$pars)
} else { # already know w = 1 (single component mixture)
fmin.boot0 <- .get.fmin.L2.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.inf = n.inf, N = N, dist_call)
opt.boot0 <- solnp(initial.boot0, fun = fmin.boot0, LB = lx.j0, UB = ux.j0,
control = control)
L2.boot0 <- opt.boot0$values[length(opt.boot0$values)]
}
# calculate optimal parameters for j1 (always need weight restrictions since j1
# starts from 2)
fmin.boot1 <- .get.fmin.L2(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.inf = n.inf, N = N, dist_call)
initial.boot1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
opt.boot1 <- solnp(initial.boot1, fun = fmin.boot1, ineqfun = ineq.j1, ineqLB = 0,
ineqUB = 1, LB = lx.j1, UB = ux.j1, control = control)
opt.boot1$pars <- .augment.pars(opt.boot1$pars, j1)
L2.boot1 <- fmin.boot1(opt.boot1$pars)
return(L2.boot0 - L2.boot1)
}
bt <- boot(dat, statistic = stat, R = B, sim = "parametric", ran.gen = ran.gen,
mle = Mix.boot, ...)
diff.boot <- bt$t
q_lower <- quantile(diff.boot, probs = ql)
q_upper <- quantile(diff.boot, probs = qu)
if(diff.0 >= q_lower && diff.0 <= q_upper){
# so that the printed result reflects that the order j.max was actually estimated
# rather than just returned as the default
j.max <- j.max + 1
break
} else if (j0 == j.max){
break
}
}
.return.paramEst(j0, j.max, dat, theta.j0, values.j0, conv.j0, dist, ndistparams, formals.dist,
discrete, MLE.function = NULL)
}
## Purpose: returns the squareroot of the empirical mass function needed for hellinger
## distance calculation
.get.f.n.sqrt <- function(dat, n.max, N){
# calculating square root of the empirical mass function
f.n <- as.numeric(table(dat)[match(0:n.max, sort(unique(dat)))]/N)
f.n[is.na(f.n)] <- 0
f.n.sqrt <- sqrt(f.n)
}
## Purpose: returns the Hellinger distance function corresponding to parameters x
.get.fmin.hellinger <- function(dat, dist, formals.dist, ndistparams, j, n.max, N,
f.n.sqrt, dist_call){
function(x){
w <- x[1:(j-1)]
# calculating square root of mixture distribution corresponding to the parameters x
theta.list.long <- vector(mode = "list", length = ndistparams)
names(theta.list.long) <- formals.dist
for(i in 1:ndistparams){
theta.list.long[[i]] <- matrix(x[(i*j):((1 + i)*j-1)], nrow = (n.max+1), ncol = j, byrow = TRUE)
}
theta.list.long$x <- 0:n.max
# # NAs or warnings can happen as solnp sometimes uses intermediate
# # parameter values outside of the box constraints (to speed up convergence
# # and avoid numerical ill conditioning)
mat <- suppressWarnings(do.call(dist_call, args = theta.list.long))
w <- c(x[1:(j - 1)], 1 - sum(x[1:(j - 1)]))
if(any(w < 0)) return(sqrt(.Machine$integer.max))
f.theta <- as.matrix(mat) %*% w
if(any(is.na(f.theta))) return(sqrt(.Machine$integer.max))
f.theta.sqrt <- sqrt(f.theta)
# calculate Hellinger distance to empirical mass function
H2 <- f.n.sqrt * f.theta.sqrt
return(2 - 2 * sum(H2))
}
}
## Purpose: returns the Hellinger distance function corresponding to parameters x
## when the mixture consists of only a single component
.get.fmin.hellinger.0 <- function(dat, dist, formals.dist, ndistparams, n.max, N,
f.n.sqrt, dist_call){
function(x){
# calculating square root of mixture distribution corresponding to the parameters x
# (single component mixture)
theta.list.long <- vector(mode = "list", length = ndistparams)
names(theta.list.long) <- formals.dist
for(i in 1:ndistparams){
theta.list.long[[i]] <- rep(x[i], n.max+1)
}
theta.list.long$x <- 0:n.max
# # NAs or warnings can happen as solnp sometimes uses intermediate
# # parameter values outside of the box constraints (to speed up convergence
# # and avoid numerical ill conditioning)
f.components <- suppressWarnings(do.call(dist_call, args = theta.list.long))
if(any(is.na(f.components))) return(sqrt(.Machine$integer.max))
f.components.sqrt <- sqrt(f.components)
# calculate Hellinger distance to empirical mass function
H2 <- f.n.sqrt*f.components.sqrt
return(2 - 2*sum(H2))
}
}
## Purpose: Hellinger distance based method of estimating the mixture complexity of a
## discrete mixture (as well as the weights and component parameters) returning
## a 'paramEst' object
hellinger.disc <- function(obj, j.max = 10, threshold = "SBC", control = c(trace = 0)){
# get standard variables
variable_list <- .get.list(obj)
list2env(variable_list, envir = environment())
# check relevant inputs
.input.checks.functions(obj, j.max = j.max, thrshHel = threshold,
discrete = discrete, Hankel = FALSE, param = TRUE)
j0 <- 0
if(is.character(threshold)){
# otherwise it is a function and will be calculated further down
if(threshold == "AIC") thresh <- (ndistparams + 1)/N
if(threshold == "SBC") thresh <- ((ndistparams + 1) * log(N))/(2 * N)
}
repeat{
j0 <- j0 + 1 # current complexity estimate
j1 <- j0 + 1
if(is.function(threshold)){
thresh <- threshold(n = N, j = j0)
}
f.n.sqrt <- .get.f.n.sqrt(dat, n.max, N)
if(j0 > 1){ # if j1 was calculated in the last interation, pass it over to j0...
theta.j0 <- theta.j1
Hellinger.j0 <- Hellinger.j1
conv.j0 <- conv.j1
values.j0 <- values.j1
} else { # ... or calculate j0 directly if j0 = 1 (j1 has not been calculated yet)
# in this case we already know w = 1 (single component mixture)
fmin <- .get.fmin.hellinger.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt, dist_call)
restrictions <- .get.restrictions(j = j0, ndistparams = ndistparams, lower = lower,
upper = upper)
lx <- restrictions$lx
ux <- restrictions$ux
initial.j0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
opt <- solnp(initial.j0, fun = fmin, LB = lx, UB = ux, control = control)
.printresults(opt, j0, dist, formals.dist, ndistparams)
theta.j0 <- opt$pars
Hellinger.j0 <- opt$values[length(opt$values)]
conv.j0 <- opt$convergence
values.j0 <- opt$values
}
# optimization for j1. Starts from j1 = 2 so we always need to include weight
# restrictions in optimization
fmin <- .get.fmin.hellinger(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt, dist_call)
restrictions.j1 <- .get.restrictions(j = j1, ndistparams = ndistparams, lower = lower,
upper = upper)
ineq.j1 <- restrictions.j1$ineq
lx.j1 <- restrictions.j1$lx
ux.j1 <- restrictions.j1$ux
initial.j1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
opt <- solnp(initial.j1, fun = fmin, ineqfun = ineq.j1, ineqLB = 0, ineqUB = 1,
LB = lx.j1, UB = ux.j1, control = control)
theta.j1 <- opt$pars <- .augment.pars(opt$pars, j1)
Hellinger.j1 <- opt$values[length(opt$values)] <- fmin(opt$pars)
conv.j1 <- opt$convergence
values.j1 <- opt$values
.printresults(opt, j1, dist, formals.dist, ndistparams)
if((Hellinger.j0 - Hellinger.j1) < thresh){
# so that the printed result reflects that the order j.max was actually estimated
# rather than just returned as the default
j.max <- j.max + 1
break
} else if(j0 == j.max){
break
}
}
.return.paramEst(j0, j.max, dat, theta.j0, values.j0, conv.j0, dist, ndistparams, formals.dist,
discrete, MLE.function)
}
## Purpose: Hellinger distance based method of estimating the mixture complexity of a
## discrete mixture (as well as the weights and component parameters) returning
## a 'paramEst' object (using bootstrap)
hellinger.boot.disc <- function(obj, j.max = 10, B = 100, ql = 0.025, qu = 0.975,
control = c(trace = 0), ...){
# get standard variables
variable_list <- .get.list(obj)
list2env(variable_list, envir = environment())
# check relevant inputs
.input.checks.functions(obj, j.max = j.max, B = B, ql = ql, qu = qu,
discrete = discrete, Hankel = FALSE, param = TRUE)
j0 <- 0
repeat{
j0 <- j0 + 1 # current complexity estimate
j1 <- j0 + 1
f.n.sqrt <- .get.f.n.sqrt(dat, n.max, N)
if(j0 > 1){ # if j1 was calculated in the last interation, pass it over to j0...
theta.j0 <- theta.j1
Hellinger.j0 <- Hellinger.j1
conv.j0 <- conv.j1
values.j0 <- values.j1
# also need to pass over the restrictions as they will be used in the bootstrap
ineq.j0 <- ineq.j1
lx.j0 <- lx.j1
ux.j0 <- ux.j1
} else { # ... or calculate j0 directly if j0 = 1 (j1 has not been calculated yet)
# in this case we already know w = 1 (single component mixture)
fmin.j0 <- .get.fmin.hellinger.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt, dist_call)
restrictions.j0 <- .get.restrictions(j = j0, ndistparams = ndistparams, lower = lower,
upper = upper)
lx.j0 <- restrictions.j0$lx
ux.j0 <- restrictions.j0$ux
initial.j0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
opt <- solnp(initial.j0, fun = fmin.j0, LB = lx.j0, UB = ux.j0, control = control)
.printresults(opt, j0, dist, formals.dist, ndistparams)
theta.j0 <- opt$pars
Hellinger.j0 <- opt$values[length(opt$values)]
conv.j0 <- opt$convergence
values.j0 <- opt$values
}
# optimization for j1. Starts from j1 = 2 so we always need to include weight
# restrictions in optimization
fmin.j1 <- .get.fmin.hellinger(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt, dist_call)
restrictions.j1 <- .get.restrictions(j = j1, ndistparams = ndistparams, lower = lower,
upper = upper)
ineq.j1 <- restrictions.j1$ineq
lx.j1 <- restrictions.j1$lx
ux.j1 <- restrictions.j1$ux
initial.j1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
opt <- solnp(initial.j1, fun = fmin.j1, ineqfun = ineq.j1, ineqLB = 0, ineqUB = 1,
LB = lx.j1, UB = ux.j1, control = control)
theta.j1 <- opt$pars <- .augment.pars(opt$pars, j1)
Hellinger.j1 <- opt$values[length(opt$values)] <- fmin.j1(opt$pars)
conv.j1 <- opt$convergence
values.j1 <- opt$values
.printresults(opt, j1, dist, formals.dist, ndistparams)
diff.0 <- Hellinger.j0 - Hellinger.j1
# parameters used for parametric bootstrap and corresponding 'Mix' object
param.list.boot <- .get.bootstrapparams(formals.dist = formals.dist, ndistparams = ndistparams,
mle.est = theta.j0, j = j0)
Mix.boot <- Mix(dist = dist, w = param.list.boot$w, theta.list = param.list.boot$theta.list,
name = "Mix.boot")
ran.gen <- function(dat, mle){
rMix(n = length(dat), obj = mle)
}
# counting bootstrap iterations to print progression
bs_iter <- -1
stat <- function(dat){
assign("bs_iter", bs_iter + 1, inherits = TRUE)
if(bs_iter != 0){
# don't include first iteration as this just uses the original data
# to calculate t0
cat(paste("Running bootstrap iteration ", bs_iter, " testing for ", j0,
" components.\n", sep = ""))
} else cat(paste("\n"))
f.n.sqrt.boot <- .get.f.n.sqrt(dat, n.max, N)
# in the bootstrap we have to calculate the values for j0 and j1 as the bootstrap
# data changes in every iteration (cannot reuse last j1 values as j0)
initial.boot0 <- .get.initialvals(dat, j0, ndistparams, MLE.function, lower, upper, dist,
formals.dist)
# calculate optimal parameters for j0
if(j0 != 1){ # need to include weight restrictions in optimization
fmin.boot0 <- .get.fmin.hellinger(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j0, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt.boot, dist_call)
opt.boot0 <- solnp(initial.boot0, fun = fmin.boot0, ineqfun = ineq.j0, ineqLB = 0,
ineqUB = 1, LB = lx.j0, UB = ux.j0, control = control)
opt.boot0$pars <- .augment.pars(opt.boot0$pars, j0)
hellinger.boot0 <- fmin.boot0(opt.boot0$pars)
} else { # already know w = 1 (single component mixture)
fmin.boot0 <- .get.fmin.hellinger.0(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt.boot, dist_call)
opt.boot0 <- solnp(initial.boot0, fun = fmin.boot0, LB = lx.j0, UB = ux.j0,
control = control)
hellinger.boot0 <- opt.boot0$values[length(opt.boot0$values)]
}
# calculate optimal parameters for j1 (always need weight restrictions since j1
# starts from 2)
fmin.boot1 <- .get.fmin.hellinger(dat = dat, dist = dist, formals.dist = formals.dist,
ndistparams = ndistparams, j = j1, n.max = n.max, N = N,
f.n.sqrt = f.n.sqrt.boot, dist_call)
initial.boot1 <- .get.initialvals(dat, j1, ndistparams, MLE.function, lower, upper,
dist, formals.dist)
opt.boot1 <- solnp(initial.boot1, fun = fmin.boot1, ineqfun = ineq.j1, ineqLB = 0,
ineqUB = 1, LB = lx.j1, UB = ux.j1, control = control)
opt.boot1$pars <- .augment.pars(opt.boot1$pars, j1)
hellinger.boot1 <- fmin.boot1(opt.boot1$pars)
return(hellinger.boot0 - hellinger.boot1)
}
bt <- boot(dat, statistic = stat, R = B, sim = "parametric", ran.gen = ran.gen,
mle = Mix.boot, ...)
diff.boot <- bt$t
q_lower <- quantile(diff.boot, probs = ql)
q_upper <- quantile(diff.boot, probs = qu)
if(diff.0 >= q_lower && diff.0 <= q_upper){
# so that the printed result reflects that the order j.max was actually estimated
# rather than just returned as the default
j.max <- j.max + 1
break
} else if (j0 == j.max){
break
}
}
.return.paramEst(j0, j.max, dat, theta.j0, values.j0, conv.j0, dist, ndistparams, formals.dist,
discrete, MLE.function)
}
|
3535c4c624659d2e29493db5e86a8acdd6c39b3a
|
dcda07c019c48d7a64149bde01a0bad7b1982d10
|
/R_code.R
|
b3b399a0cd23870e5b58b4701dfda267623ad3e8
|
[] |
no_license
|
Shrunket/cab-fare-predict-ds
|
8a01185999450f633c65c0e84181600159ff8cab
|
cf3de207272b7d44d4654d111e0d214727a75b5a
|
refs/heads/master
| 2020-11-30T10:24:39.819058
| 2020-01-03T09:34:48
| 2020-01-03T09:34:48
| 230,377,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,064
|
r
|
R_code.R
|
rm(list=ls())
getwd()
setwd('C:/Users/Samruddhi/Desktop/Edwisor Project 1')
train_data= read.csv('train_cab.csv')
test_data= read.csv('test.csv')
head(train_data)
head(test_data)
str(train_data)
str(test_data)
train_data$fare_amount= as.numeric(as.character(train_data$fare_amount))
num_col= c('fare_amount','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude','passenger_count')
for (i in num_col){
print(summary(train_data[i]))
}
test_num_col= c('pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude','passenger_count')
for (i in test_num_col){
print(summary(test_data[i]))
}
hist(train_data$passenger_count, main='Histogram of passenger_count')
################################### Missing Value Analysis###################################################
train_data[111,1] #9
train_data[569,1] #6.5
train_data[11000,1] #9.7
train_data[3,1] #5.7
train_data[6038,1] #6
#train_data$fare_amount[is.na(train_data$fare_amount)]= mean(train_data$fare_amount,na.rm = TRUE) # 15.02
#train_data$fare_amount[is.na(train_data$fare_amount)]= median(train_data$fare_amount,na.rm = TRUE) # 8.50
library(dplyr)
library(DMwR)
train_data_1= knnImputation(train_data,k=5) #Value of KNN with k=5 is more near to the actual values as than to median
train_data_2= knnImputation(train_data,k=5,meth='median')
train_data_1$passenger_count=train_data_2$passenger_count
str(train_data_1)
train_data= train_data_1
summary(train_data$fare_amount)
summary(train_data$passenger_count)
summary(train_data)
############################################# Outlier Analysis ###############################################
boxplot(train_data$passenger_count, main='Boxplot of passenger_count')
boxplot.stats(train_data$fare_amount)
barplot(table(train_data$fare_amount),ylim = c(0,100))
train_data= train_data[train_data$fare_amount > 0 & train_data$fare_amount < 100, ]
train_data= train_data[train_data$passenger_count < 8 & train_data$passenger_count >=1,]
summary(train_data)
boxplot(test_data$passenger_count)
num_col= c('fare_amount','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude')
for(i in num_col){
print(i)
val = train_data[,i][train_data[,i] %in% boxplot.stats(train_data[,i])$out]
print(length(val))
train_data = train_data[which(!train_data[,i] %in% val),]
}
################################################################################################################
############################################# Feature Engineering #############################################
train_data=train_data[!train_data$pickup_datetime==43,]
library(geosphere)
distance= function(pickup_long,pickup_lat,dropoff_long,dropoff_lat){
trip=distHaversine(c(pickup_long,pickup_lat),c(dropoff_long,dropoff_lat))
return(trip)
}
for (i in 1:nrow(train_data)){
attempt=distance(train_data$pickup_longitude[i],train_data$pickup_latitude[i],train_data$dropoff_longitude[i],train_data$dropoff_latitude[i])
train_data$trip_distance[i]= attempt/1609.344
}
for (i in 1:nrow(test_data)){
attempt=distance(test_data$pickup_longitude[i],test_data$pickup_latitude[i],test_data$dropoff_longitude[i],test_data$dropoff_latitude[i])
test_data$trip_distance[i]= attempt/1609.344
}
summary(train_data$trip_distance)
library(lubridate)
for (i in 1:nrow(train_data)){
train_data$year[i]= year(train_data$pickup_datetime[i])
train_data$month[i]= month(train_data$pickup_datetime[i])
train_data$day[i]= day(train_data$pickup_datetime[i])
train_data$hour[i]= hour(train_data$pickup_datetime[i])
train_data$minute[i]= minute(train_data$pickup_datetime[i])
train_data$second[i]= second(train_data$pickup_datetime[i])
}
for (i in 1:nrow(test_data)){
test_data$year[i]= year(test_data$pickup_datetime[i])
test_data$month[i]= month(test_data$pickup_datetime[i])
test_data$day[i]= day(test_data$pickup_datetime[i])
test_data$hour[i]= hour(test_data$pickup_datetime[i])
test_data$minute[i]= minute(test_data$pickup_datetime[i])
test_data$second[i]= second(test_data$pickup_datetime[i])
}
train_data=train_data[,-c(2)]
test_data= test_data[,-c(1)]
train_data= train_data[!train_data$trip_distance==0,]
#write.csv(train_data,'final_train.csv',row.names = F)
########################################### Feature Selection #################################################
library(corrplot)
con_var= c('fare_amount','pickup_longitude','pickup_latitude',"dropoff_longitude","dropoff_latitude","trip_distance","year","month","day","hour","minute","second")
corrplot(cor(train_data),method='pie')
cor(train_data)
train_data= train_data[,-c(2,4)]
########################################### Feature Scaling #####################################################
hist(train_data$passenger_count)
boxplot(train_data$passenger_count)
std_var=c('pickup_longitude','pickup_latitude',"dropoff_longitude","dropoff_latitude")
norm_col=c('trip_distance','year','month','day','hour','minute','second')
# Normalisaztion on norm_col
for(i in norm_col){
print(i)
train_data[,i] = (train_data[,i] - min(train_data[,i]))/
(max(train_data[,i] - min(train_data[,i])))
}
#Standardisation on std_var
for(i in std_var){
print(i)
train_data[,i] = (train_data[,i] - mean(train_data[,i]))/
(sd(train_data[,i] ))
}
corrgram(train_data)
########################################### Model Development##################################################
# Data Split using cross validation / k-fold cross validation
library(tidyverse)
library(caret)
library(party)
set.seed(123)
train_con= trainControl(method = 'oob',number = 10)
# Multiple Algorithms
LR_model= train(fare_amount~.,data = train_data,method='rf',trControl= train_con)
LR_model
# Linear Regression: RMSE: 2.0925 Rsquared:0.7015 MAE: 1.4996
# Logistic Regression: RMSE: 2.0925 Rsquared:0.7015 MAE: 1.4996
# Decision Tree: RMSE: 2.0788 Rsquared:0.70539 MAE: 1.4822
# KNN Algorithm : RMSE: 3.6384 Rsquared:0.1022 MAE: 2.8172
# Random Forest: RMSE: 1.9454 Rsquared:0.7419 mtry:7 MAE:
# Here we can proceed with model building using Random Forest
#Hyper-parameter tuning
# choosing optimum value for mtry
set.seed(1234)
tune_Grid = expand.grid(.mtry = c(1: 10))
rf_mtry = train(fare_amount~.,
data = train_data,
method = "rf",
tuneGrid = tune_Grid,
trControl = train_con,
importance = TRUE,
ntree=num_tree
)
print(rf_mtry)
#mtry = 4
library(randomForest)
RF_model= randomForest(fare_amount~.,train_data,mtry=4,importance=T)
RF_predict= predict(RF_model,test_data)
RF_model
summary(RF_predict)
fare_amount_pred= cbind(test_data,RF_predict)
test_data= read.csv('test.csv')
test_data= cbind(test_data,RF_predict)
colnames(test_data)[colnames(test_data)=='RF_predict']= 'fare_amount'
write.csv(test_data,'final_output.csv',row.names = F)
|
b139ac441fcbbaf9775a0637b21cc1865f40ad4a
|
103f61b6cbdd466a88528aa6e3b76f196f637ad3
|
/R/allele_hist.r
|
7e682feb0f73df3f1efba4b88e50f26d68e72119
|
[] |
no_license
|
cran/genomatic
|
304e6f31443f3b874d5d40c7b2754aa81db1abab
|
9e96f6e987377ce10bd650fd50449c7a8824ea48
|
refs/heads/master
| 2020-05-01T06:14:48.685106
| 2010-01-05T00:00:00
| 2010-01-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,000
|
r
|
allele_hist.r
|
allele_hist <- function(gmtc.a, allele.df, xlim=NULL)
{
# gmtc.a is a locus (two columns) of a genomatic file.
#
# allele.df is a data.frame cotaining bin information.
locus <- names(gmtc.a)[1]
locus <- substr(locus, 1, nchar(locus)-1)
#
allele.df[,8] <- (floor(10*allele.df[,8]))/10
allele.df[,9] <- (ceiling(10*allele.df[,9]))/10
#
# Widen bins of zero width.
allele.df[allele.df[,9]-allele.df[,8] == 0,8] <- allele.df[allele.df[,9]-allele.df[,8] == 0,8] - 0.1
#
alleles <- as.vector(na.omit(unlist(gmtc.a[,1:2])))
a.max <- ceiling(max(alleles, na.rm=TRUE))
a.min <- floor(min(alleles, na.rm=TRUE))
#
#
bincol <- seq(a.min, a.max, by=0.1)
bincol <- cbind(bincol, bincol+0.05)
#
ahist <- hist(alleles, breaks=bincol[,1], plot=FALSE)
bincol <- cbind(bincol, c(ahist$counts, 0))
bincol <- cbind(bincol, rep(NA, length.out=nrow(bincol)))
# Bin the bars.
for (i in 1:nrow(allele.df))
{
bincol[,4][bincol[,2] >= allele.df[i,8] & bincol[,2] <= allele.df[i,9]] <- allele.df[i,1]
}
# Include left bar..
for (i in 1:(nrow(bincol)-1))
{
if (is.na(bincol[i,4]) & is.na(bincol[(i+1),4])==FALSE)
{bincol[i,4] <- bincol[(i+1),4]}
}
bincol <- cbind(bincol, rep(NA, length.out=nrow(bincol)))
binlev <- levels(as.factor(bincol[,4]))
if (binlev[1] == "0")
{binlev <- binlev[-1]}
# Apply alternate colors to bars.
binc <- rep(c(2,3,4), length.out=length(binlev))
for (i in 1:length(binlev))
{
bincol[bincol[,4]==binlev[i],4] <- binc[i]
}
bincol[,4][is.na(bincol[,4])] <- 1
if (is.null(xlim))
{
xlim <- range(bincol[,1])
} else {xlim <- xlim}
hist(alleles, breaks=bincol[,1], col=bincol[,4],
border=bincol[,4], axes=FALSE,
main='', xlab='Mobility-Based Size (bp)', xlim=xlim)
axis(2)
Axis(at=seq(a.min,a.max, by=1),
side=1, cex.axis=0.6, las=2)
legend('topright', locus)
}
|
e6f9bd50777d56245146b8a85aa974488cfb507f
|
82130086817e8fa9291ad9cf095af6817a523282
|
/testing_scripts/scripts/test_update_B.R
|
6f9752067d452fe5c3b1cc84e1e94a9d1caf50c7
|
[] |
no_license
|
skdeshpande91/GAM_SSL_SSGL
|
0b01d95f38ac80d68029708a97588aa3cc53fe3e
|
0b086919eacb2d0ca91c01111e9ac89ae64b02d4
|
refs/heads/master
| 2022-07-18T14:56:35.557757
| 2020-05-15T22:41:14
| 2020-05-15T22:41:14
| 263,340,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,093
|
r
|
test_update_B.R
|
# First test of update_B
library(Rcpp)
library(RcppArmadillo)
library(MASS)
library(splines)
source("scripts/prepare_X_Phi.R")
set.seed(129)
n_train <- 100
n_test <- 100
p <- 200
X_train_orig <- matrix(runif(n_train*p, 0,1), nrow = n_train, ncol= p)
X_test_orig <- matrix(runif(n_test*p, 0,1), nrow = n_test, ncol = p)
D <- 10
tmp_data <- prepare_X_Phi(X_train_orig, X_test_orig, D = D, spline_type = "n")
n <- n_train
X <- tmp_data$X_train
Phi <- tmp_data$Phi_train
#######
# Generate some data
########
sigma <- 0.75
B_true <- matrix(0,nrow = D-1, ncol = p)
B_true[,1] <- runif(D-1, -10,10)
B_true[,3] <- runif(D-1, -10,10)
B_true[,5] <- runif(D-1, -10,10)
R <- Phi[,,1] %*% B_true[,1] + Phi[,,3] %*% B_true[,3] + Phi[,,5] %*% B_true[,5] + sigma * rnorm(n,0,1)
# Try a few different values of xi0
xi1 <- 1
sourceCpp("src/test_update_B.cpp")
B_init <- matrix(0, nrow = D-1, ncol = p)
theta <- c(0.25,0.25,0.25, 0.25)
norm <- function(x){sqrt(sum(x*x))}
B_true_norm <- apply(B_true, MARGIN = 2, FUN = norm)
test_0 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 1 * sqrt(D-1) * xi1, theta, verbose = FALSE, max_iter = 5000)
B_norm0 <- apply(test_0$B, MARGIN = 2, FUN = norm)
range(B_norm0[B_norm0 != 0 & B_true_norm != 0])
range(B_norm0[B_norm0 != 0 & B_true_norm == 0])
test_1 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 10 * sqrt(D-1) * xi1, theta, verbose = FALSE, max_iter = 5000)
B_norm1 <- apply(test_1$B, MARGIN = 2, FUN = norm)
sum(B_norm1 != 0 & B_true_norm == 0)
range(B_norm1[B_norm1 != 0 & B_true_norm != 0])
range(B_norm1[B_norm1 != 0 & B_true_norm == 0])
test_3 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 100 * sqrt(D-1) * xi1, theta, verbose = FALSE, max_iter = 5000)
B_norm3 <- apply(test_3$B, MARGIN = 2, FUN = norm)
sum(B_norm3 != 0 & B_true_norm == 0)
range(B_norm3[B_norm3 != 0 & B_true_norm != 0])
test_4 <- test_update_B(B_init, R, Phi,sigma*sigma, xi1, 500 * sqrt(D-1) * xi1, theta, verbose = FALSE, max_iter = 5000)
B_norm4 <- apply(test_4$B, MARGIN = 2, FUN = norm)
sum(B_norm4 != 0 & B_true_norm == 0)
sum(B_norm4 == 0 & B_true_norm != 0)
|
9f3541f1cecce3ebb06325eeda082679c9e21536
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/metaSEM/R/pattern.n.R
|
373560aeb27b1334bbaadcd7a37cc7e76cad27cd
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 126
|
r
|
pattern.n.R
|
pattern.na <- function(x, show.na=TRUE) {
out <- Reduce("+", lapply(x, is.na))
if (show.na) out else length(x)-out
}
|
28dcaf4083078655f5394527d59f3ff0d1e807dc
|
bbc3754d8900e36146bf80d3cc98f5c36c450cd2
|
/man/get_lambda.Rd
|
f96c17bda9eed7d2704c6b8afbcf552b42dcc09b
|
[] |
no_license
|
netterie/cantrance
|
e9782750337bffd0476b76bfed627df0dddf6c5f
|
a2b29ed44c6dfd50858be06ec03accb2264b85fa
|
refs/heads/master
| 2021-01-25T07:34:45.788607
| 2015-08-26T02:51:45
| 2015-08-26T02:51:45
| 41,400,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
rd
|
get_lambda.Rd
|
\name{get_lambda}
\alias{get_lambda}
\title{Takes in parameter info for an exponential and returns the rate(s)}
\usage{get_lambda(param, values, k = NA)}
\arguments{
\item{param}{String specifying the type of parameter to be used
in the construction of the exponential curve. May
be "rate", "median", "mean", or "ksurv".}
\item{values}{Vector of values corresponding to the designated
parameter.}
\item{k}{If "param" is "ksurv", give time for k-time survival}
}
\value{Vector of rates}
\author{Jeanette Birnbaum & Leslie Mallinger}
|
0c3b4b4f86da2004a5410bb801a07d8b78395160
|
6782c085e03463fc96f87f344296509244f7245a
|
/calibration_toy.R
|
bd0fa4c951e794ac7d2bd1df3b3dedcc1e2af184
|
[] |
no_license
|
jake-coleman32/research_code
|
83f88b768a254d2679312ddd89590b4eb7e3c0be
|
2ddf9dbc49490d94fb1eb512d14f3bc944a469cd
|
refs/heads/master
| 2020-05-30T06:09:59.859710
| 2018-02-14T01:37:14
| 2018-02-14T01:37:14
| 82,625,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
calibration_toy.R
|
t = c(.11,.432,.754,1.077,1.399,1.721,2.043,2.366,2.688,3.010)
y = c(4.73,4.72,4.234,
3.177,2.966,3.653,
1.970,2.267,2.084,
2.079,2.409,2.371,
1.908,1.665,1.685,
1.773,1.603,1.922,
1.370,1.661,1.757,
1.868,1.505,1.638,
1.390,1.275,1.679,
1.461,1.157,1.530)
t_rep <- rep(t,each = 3)
plot(t_rep,y,pch = 1, main = "MLE Fit Only",xlab = 't',
ylab = 'Chemical Concentration', cex = .8,ylim = c(0,max(y)+.5))
x <- seq(0,3,length.out = 50)
lines(x,5*exp(-0.63*x), lwd = 2)
plot(t_rep,y,pch = 1, main = "Two-Model Calibration Toy",xlab = 't',
ylab = 'Chemical Concentration',ylim = c(0,max(y)+.5))
x <- seq(0,3,length.out = 50)
lines(x,5*exp(-0.63*x), lwd = 2)
lines(x,2.7*exp(-0.24*x),col = 'red',lwd = 2)
lines(x,3.5*exp(-1.7*x)+1.5,col = 'blue',lwd = 2)
legend('topright',c("Model 1 (MLE)","Model 2"),lwd=2, col = c('black','red'))
|
b0d4aceae922b53f30f9483cd462eaf8e6195c07
|
a3195ac49f9899167e2406bc83df197fe594906f
|
/sim_setup_ES_Factorial.R
|
dbe468246d303711e071b5e697ff611cb2675920
|
[] |
no_license
|
katiecoburn/effect_size_proj
|
d751cb4808f92482845f0924c43d0b3f7f9c1e92
|
cc0a30b440c134f964f16820a021bfa7fbe18641
|
refs/heads/main
| 2023-02-25T03:29:25.925122
| 2021-02-02T22:24:55
| 2021-02-02T22:24:55
| 323,428,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
sim_setup_ES_Factorial.R
|
library(MASS)
library(tidyverse)
library(mvtnorm)
grandmean=0
alpha= 0.5
#Case with m=2
i=2
j=2
k=10
var_error= 0.5
var_block=0.5
var_inter=0
var_total=1
beta= rnorm(j, 0, var_block)
interaction= rnorm(i*j, 0, var_inter)
error = rnorm (i*j*k, 0, var_error)
Y_trt_1= c()
Y_trt_2= c()
Y_cnt_1=c()
Y_cnt_2 = c()
#note that i should go to 40
for(i in 1:10) {
trt_1 = grandmean + alpha[1] + beta[1] + interaction[1] + error[i]
Y_trt_1 = rbind(Y_trt_1, trt_1)
trt_2 = grandmean + alpha[1] + beta[2] + interaction[2] + error[i]
Y_trt_2 = rbind(Y_trt_2, trt_2)
cnt_1 = grandmean + alpha[2] + beta[1] + interaction[3] + error[i]
Y_cnt_1 = rbind(Y_cnt_1, cnt_1)
cnt_2 = grandmean + alpha[2] + beta[2] + interaction[4] + error[i]
Y_cnt_2 = rbind(Y_cnt_2, cnt_2)
}
data<- c(Y_trt_1, Y_trt_2, Y_cnt_1, Y_cnt_2)
data2<- as.data.frame(data)
data2$group<- c(rep("T", 20), rep("C", 20))
data2$block<- c( rep(1, 10), rep(2, 10), rep(1, 10), rep(2, 10))
names(data2)<- c("outcome", "Group", "Block")
model1<- aov(outcome ~ Group + Block + Group:Block, data=data2)
summary(model1)
model2<- lm(outcome ~ Group + Block + Group:Block, data=data2)
summary(model2)
# #####################################################
# ######Covariance Matrix second option###############
# set.seed=1234
#
# sigma<- matrix( c(0.5, 0.6, 0.9, 0.9,
# 0.6, 0.5, 0.9, 0.9,
# 0.9, 0.9, 0.5, 0.6,
# 0.9, 0.9, 0.6, 0.5), ncol=4)
#
#
# y<-rmvnorm(n=10, mean = c(1.1, 0.5, 0.2, 0.6), sigma=sigma)
#
# data<- as.data.frame(y)
#
# data2<- c(data$V1, data$V2, data$V3, data$V4)
#
# data2<- as.data.frame(data2)
#
# data2$group<- c(rep("T", 20), rep("C", 20))
#
# data2$block<- c( rep(1, 10), rep(2, 10), rep(1, 10), rep(2, 10))
#
# names(data2)<- c("outcome", "Group", "Block")
#
# model1<- aov(outcome ~ Group + Block + Group:Block, data=data2)
#
# summary(model1)
#
# model2<- lm(outcome ~ Group + Block + Group:Block, data=data2)
# summary(model2)
|
8a22e8936ac6de7f2ca4b2a34d8399a7e303d906
|
67e4c0d407570dc986de96c92b9d92ec9da43eaf
|
/scripts/row.extractor.R
|
19f4f9e8692770f3bab8481e9566034673dc8433
|
[] |
no_license
|
mrdwab/2657-R-Functions
|
eb1890121a95d41504544d819ec06ae91f86a6f4
|
881cdd962a54228845fb02c880940d1d2ad5e2e7
|
refs/heads/master
| 2021-01-21T07:53:38.561129
| 2013-01-01T08:36:39
| 2013-01-01T08:36:39
| 4,145,944
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,419
|
r
|
row.extractor.R
|
## @knitr rowextractor
row.extractor = function(data, extract.by, what="all") {
# Extracts rows with min, median, and max values, or by quantiles. Values
# for --what-- can be "min", "median", "max", "all", or a vector
# specifying the desired quantiles. Values for --extract.by-- can be
# the variable name or number.
#
# === EXAMPLES ===
#
# set.seed(1)
# dat = data.frame(V1 = 1:10, V2 = rnorm(10), V3 = rnorm(10),
# V4 = sample(1:20, 10, replace=T))
# dat2 = dat[-10,]
# row.extractor(dat, 4, "all")
# row.extractor(dat1, 4, "min")
# row.extractor(dat, "V4", "median")
# row.extractor(dat, 4, c(0, .5, 1))
# row.extractor(dat, "V4", c(0, .25, .5, .75, 1))
#
# "which.quantile" function by cbeleites:
# http://stackoverflow.com/users/755257/cbeleites
# See: http://stackoverflow.com/q/10256503/1270695
if (is.numeric(extract.by)) {
extract.by = extract.by
} else if (is.numeric(extract.by) != 0) {
extract.by = which(colnames(data) %in% "extract.by")
}
if (is.character(what)) {
which.median = function(data, extract.by) {
a = data[, extract.by]
if (length(a) %% 2 != 0) {
which(a == median(a))
} else if (length(a) %% 2 == 0) {
b = sort(a)[c(length(a)/2, length(a)/2+1)]
c(max(which(a == b[1])), min(which(a == b[2])))
}
}
X1 = data[which(data[extract.by] == min(data[extract.by])), ] # min
X2 = data[which(data[extract.by] == max(data[extract.by])), ] # max
X3 = data[which.median(data, extract.by), ] # median
if (identical(what, "min")) {
X1
} else if (identical(what, "max")) {
X2
} else if (identical(what, "median")) {
X3
} else if (identical(what, "all")) {
rbind(X1, X3, X2)
}
} else if (is.numeric(what)) {
which.quantile <- function (data, extract.by, what, na.rm = FALSE) {
x = data[ , extract.by]
if (! na.rm & any (is.na (x)))
return (rep (NA_integer_, length (what)))
o <- order (x)
n <- sum (! is.na (x))
o <- o [seq_len (n)]
nppm <- n * what - 0.5
j <- floor(nppm)
h <- ifelse((nppm == j) & ((j%%2L) == 0L), 0, 1)
j <- j + h
j [j == 0] <- 1
o[j]
}
data[which.quantile(data, extract.by, what), ] # quantile
}
}
|
7c6876342adce5194e732075f79e21c334c50643
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/aroma.affymetrix/inst/testScripts/system/chipTypes/Mapping50K_Hind240,Xba240/21.CRMA,paired.R
|
5c1ce6ed19e7dff9c43aecc06b393bdf82465771
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
21.CRMA,paired.R
|
library("aroma.affymetrix")
log <- Arguments$getVerbose(-4, timestamp=TRUE);
dataSet <- "HapMap,CEU,testset";
chipTypes <- c("Mapping50K_Hind240", "Mapping50K_Xba240");
#chipTypes <- chipTypes[2];
# Expected sample names
sampleNames <- c("NA06985", "NA06991", "NA06993",
"NA06994", "NA07000", "NA07019");
tags <- "ACC,-XY,RMA,+300,A+B,FLN,-XY";
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Tests for setting up CEL sets and locating the CDF file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
cesList <- list();
for (chipType in chipTypes) {
ces <- CnChipEffectSet$byName(dataSet, tags=tags, chipType=chipType);
print(ces);
stopifnot(identical(getNames(ces), sampleNames));
cesList[[chipType]] <- ces;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Paired GLAD model
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Paired CN anlysis, by setting up some fake (test,control) pairs
testList <- lapply(cesList, FUN=extract, 1:3);
refList <- lapply(cesList, FUN=extract, 4:6);
glad <- GladModel(testList, refList);
print(glad);
fit(glad, arrays=1, chromosomes=19, verbose=log);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# ChromosomeExplorer test
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ce <- ChromosomeExplorer(glad);
print(ce);
process(ce, arrays=1:2, chromosomes=c(19,22), verbose=log);
## process(ce, verbose=log);
|
15a312f1d670c1fb7299658b31b905baa919cef1
|
d859d67e16df9d322481bbecdd7aacec357356f7
|
/btp.R
|
49333a6f79fed1bd8bdc4dddd7d918a66150cdab
|
[] |
no_license
|
sanant854/BTP_final
|
bf6645f6b515fe7ee84724efebfb77215f5d4475
|
4eac574b025c563c00643d778b2536f97d0d0566
|
refs/heads/main
| 2023-02-19T16:09:38.300741
| 2021-01-15T10:03:10
| 2021-01-15T10:03:10
| 329,873,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,291
|
r
|
btp.R
|
total<-100000
lambda<-runif(total,min=0,max=10)
miu<-runif(total,min=0,max=1)
r_wifi<-32
r_lifi<-runif(total,min=2900,max=3200)
t_h<-0.00833
data<-cbind(lambda,miu,r_lifi)
data<-as.data.frame(data)
k1<-r_wifi
k2<-(1-miu)*(r_lifi)
k3<-1:total
for(i in 1:total){
k3[i]<-(max(miu[i]-(lambda[i]*t_h),0)*(r_wifi))+(max(1-miu[i]-(lambda[i]*t_h),0)*(r_lifi[i]))
}
state<-1:total
for(i in 1:total){
if(k1>=k2[i] && k1>=k3[i])
{
state[i]=1
}
else if(k2[i]>=k1 && k2[i]>=k3[i])
{
state[i]=2
}
else if(k3[i]>=k1 && k3[i]>=k2[i])
{
state[i]=3
}
}
data$state<-as.factor(state)
library(caret)
intrain<-createDataPartition(data$state,p=0.7,list=FALSE)
train<-data[intrain,]
test<-data[-intrain,]
library(e1071)
fit<-svm(state~.,data=train,kernel="polynomial",degree=2,gamma=1,cost=0.1,coef0=1)
fit2<-train(state~.,data=train,method="rf")
pre2<-predict(fit2,test);
confusionMatrix(pre2,test$state)
pre<-predict(fit,test)
confusionMatrix(pre,test$state)
confusionMatrix(pre2,test$state)
pre1<-predict(fit,train)
pre2<-predict(fit,test)
pre1<-as.numeric(pre1)
pre2<-as.numeric(pre2)
ac1<-as.numeric(train$state)
ac2<-as.numeric(test$state)
tr<-cor(pre1,ac1)^2
te<-cor(pre2,ac2)^2
tr-te
|
ad1bd6da547f1d333a27bce7d2d323318047bf0d
|
5c318b3d88082918477ea3c8ab24e53c0ba12e25
|
/negbin_trig_run.R
|
319a5782299af7b4ae6df578a6ec685d1b6d3520
|
[] |
no_license
|
alxymitr/Carpenter
|
cc9cd3c6fd6c4cb615c150a04927b79414d2cf55
|
e8e5ae9d71a3ac1bd42b9dd0e3c989585e9dde37
|
refs/heads/master
| 2020-03-20T00:55:12.129322
| 2018-06-12T21:45:14
| 2018-06-12T21:45:14
| 137,060,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,272
|
r
|
negbin_trig_run.R
|
library(rethinking)
library(MASS)
N <- 50L
tt <- 1:N
xcos <- cos(2*pi*tt/N)
xsin <- sin(2*pi*tt/N)
alpha <- 2
beta_cos <- 0.5
beta_sin <- -0.3
theta <- 2
mu <- exp(alpha + beta_cos * xcos + beta_sin * xsin)
y <- rnegbin(N, mu = mu, theta = theta)
# Function to generate random outcomes from a Negative Binomial distribution,
# with mean mu and variance mu + mu^2/theta.
plot(mu, type = "l", col = "red", ylim = c(0, 20))
lines(y, type = "o")
model <- stan_model("negbin_trig.stan")
(mle <- optimizing(model, data = c("N", "xcos", "xsin", "y")))
# Initial log joint probability = -222.276
# Optimization terminated normally:
# Convergence detected: relative gradient magnitude is below tolerance
# $par
# alpha beta_cos beta_sin theta
# 1.8816966 0.3596087 -0.2580791 1.8101928
#
# $value
# [1] 389.5196
#
# $return_code
# [1] 0
fit <- stan("negbin_trig.stan", data = c("N", "xcos", "xsin", "y"))
precis(fit)
# Mean StdDev lower 0.89 upper 0.89 n_eff Rhat
# alpha 1.89 0.12 1.70 2.09 4000 1
# beta_cos 0.36 0.17 0.09 0.63 4000 1
# beta_sin -0.26 0.16 -0.51 0.00 4000 1
# theta 1.88 0.50 1.09 2.57 4000 1
# очень странно: n_eff = 4000!
|
cb8399131c953a65fbd766be8ce8475277518930
|
1c062257e940aa272a30c2f5cf4675d219e9690d
|
/math4753/man/mypvalue.Rd
|
c407a88f31ec5bdc8f264f8aecb46d44f7cfb371
|
[] |
no_license
|
taoxu-zhu/math4753
|
4139ac34f1d2bc55d5cdc86bb6c0a5cb8b0f282b
|
09095268386cef6ea2331f43d2b22cda6dd2e370
|
refs/heads/master
| 2022-11-10T03:31:14.189666
| 2020-06-30T03:37:06
| 2020-06-30T03:37:06
| 275,980,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
mypvalue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mypvalue.R
\name{mypvalue}
\alias{mypvalue}
\title{course 4753}
\usage{
mypvalue(t0, xmax = 4, n = 20, alpha = 0.05)
}
\arguments{
\item{x}{to find the p value}
}
\value{
value of p value
}
\description{
to find the p value.
}
\examples{
set.seed(55);x1=rnorm(30,mean=25,sd=5)
tcalc=(mean(x1)-23)/(sd(x1)/sqrt(30))
mypvalue(tcalc,n=30,alpha=0.05)
}
|
c853dddd1455362a9324adc57ce36ae208a20df4
|
41d46c7c39adecfbfbaddb6bd18632c48fcf4de5
|
/man/calculate_win_probability.Rd
|
603322b705b156c72fe6501ee7ebf2ec03d24eac
|
[
"MIT"
] |
permissive
|
Vinnetou/nflfastR
|
50d9cfc77c40cc9ca8c046037a3710bcc2e15857
|
c124e3ac4cb17746d2962ec957eabaff4d7e8882
|
refs/heads/master
| 2023-02-08T19:26:54.954165
| 2021-01-01T12:55:03
| 2021-01-01T12:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,595
|
rd
|
calculate_win_probability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ep_wp_calculators.R
\name{calculate_win_probability}
\alias{calculate_win_probability}
\title{Compute win probability}
\usage{
calculate_win_probability(pbp_data)
}
\arguments{
\item{pbp_data}{Play-by-play dataset to estimate win probability for.}
}
\value{
The original pbp_data with the following columns appended to it:
\describe{
\item{wp}{win probability.}
\item{vegas_wp}{win probability taking into account pre-game spread.}
}
}
\description{
for provided plays. Returns the data with
probabilities of winning the game. The following columns
must be present: receive_h2_ko (1 if game is in 1st half and possession
team will receive 2nd half kickoff, 0 otherwise), ep (expected points),
home_team, posteam, half_seconds_remaining, game_seconds_remaining,
spread_line (how many points home team was favored by), down, ydstogo,
yardline_100, posteam_timeouts_remaining, defteam_timeouts_remaining
}
\details{
Computes win probability for provided plays. Returns the data with
probabilities of each scoring event and EP added. The following columns
must be present:
\itemize{
\item{receive_2h_ko (1 if game is in 1st half and possession team will receive 2nd half kickoff, 0 otherwise)}
\item{ep (expected points)}
\item{score_differential}
\item{home_team}
\item{posteam}
\item{half_seconds_remaining}
\item{game_seconds_remaining}
\item{spread_line (how many points home team was favored by)}
\item{down}
\item{ydstogo}
\item{yardline_100}
\item{posteam_timeouts_remaining}
\item{defteam_timeouts_remaining}
}
}
|
f57534445391c9da8c4d3789af4d2dbca1b90b4d
|
4314b3de52f8ce39629b7c0887796d895b4a761c
|
/code_R/sentiments.R
|
eeb3f3ca4449cb341869f2f82afe9c18a9ae23fb
|
[] |
no_license
|
kln-courses/corpustextanalysis
|
b04992e11ba449df125f8524f2792bd378e181e2
|
79e707fe9638d073351f9a71b244ae9f6b42b40d
|
refs/heads/master
| 2021-01-11T04:39:17.821553
| 2017-03-08T14:12:27
| 2017-03-08T14:12:27
| 71,120,794
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,612
|
r
|
sentiments.R
|
# sentiment analysis
rm(list = ls())
wd <- '/home/kln/Documents/education/tm_R/some_r'
setwd(wd)
source('util_fun.R')
###### word level lexicons/dictionaries
# handheld with some help
matt.v <- paste(scan('data/kjv_books/Matthew.txt', what = 'character', sep='\n', encoding = 'UTF-8'), collapse = " ")
# sentence tokenizer
library(NLP)
library(openNLP)
token_sent <- function(text, lang = "en") {
sentannotator <- Maxent_Sent_Token_Annotator(language = lang)
text <- as.String(text)# convert to string
sentbound <- annotate(text, sentannotator)
sentences <- text[sentbound]# extract sentences
return(sentences)# return sentences
}
# sentiment function
lexicon_scr <- function(sentences,lexicon){
token_word <- strsplit(tolower(sentences), "[^A-Za-z']+")# tokenize sentences
sentiment.mat = matrix()
for(i in 1:length(token_word)){
tmp <- lexicon$value[which(lexicon$word %in% token_word[[i]])]# valence
w <- length(tmp)# number of words
if (length(tmp) > 0){
sentiment.mat[i] <- sum(tmp)/w}
else{sentiment.mat[i] = 0}
}
# sentiment.mat <- TTR::SMA(sentiment.mat,n = 10)# optional smoothing
return(sentiment.mat)
}
# extract sentences
sent.ch <- token_sent(matt.v)
head(sent.ch)
# import sentiment lexicon
afinn.dt <- read.table('AFINN-111.txt', header = FALSE, sep = '\t',quote = "\"")
names(afinn.dt) <- c('word','value')
head(afinn.dt)
tail(afinn.dt,10)
# test the sentiment code
test.v <- c('I love whales. I hate Ahab because he is the epitome of whaling')
test.ch <- token_sent(test.v)
print(lexicon_scr(test.ch,afinn.dt))
# run on matt
mattsentiment.v <- lexicon_scr(sent.ch,afinn.dt)
dev.new()
par(mfrow = c(3,1))
hist(mattsentiment.v)
plot(mattsentiment.v,type = 'l', xlab = 'Narrative Time', ylab = 'Sentiment')
plot(TTR::SMA(mattsentiment.v,60),type = 'l', xlab = 'Narrative Time', ylab = 'Sentiment')
# aesthetics
library(ggplot2)
mattsentiment.df <- data.frame(line = 1:length(mattsentiment.v), sentiment = TTR::SMA(mattsentiment.v,60))
dev.new()
ggplot(data = mattsentiment.df, aes(x = line, y = sentiment)) +
geom_bar(stat = "identity", colour ="#FF9999")+
theme_minimal() +
xlab("Narrative Time (line)")+
ylab("Sentiment") +
labs(title = expression(paste("Sentiment in ", italic("Matthew"))))
### with Syuzhet library
library(syuzhet)
library(tm)
help(package = syuzhet)
ls("package:syuzhet")
# tokenize at sentence level
text_sent <- get_sentences(matt.v)
head(text_sent)
# AFINN sentiment lexicon
text_afinn <- get_sentiment(text_sent, method = 'afinn')
# explore
text_sent[which(text_afinn == max(text_afinn))]
text_sent[which(text_afinn == min(text_afinn))]
text_sent[which(text_afinn > (mean(text_afinn)+sd(text_afinn)*2))]
text_sent[which(text_afinn < (mean(text_afinn)-sd(text_afinn)*2))]
dev.new()
par(mfrow = c(2,2))
hist(text_afinn)
plot(text_afinn,type = 'l')
#chunck text
text_afinn_val <- get_percentage_values(text_afinn, bin = 100)
hist(text_afinn_val)
plot(text_afinn_val,type = 'l')
# the NRC lexicon
matt_nrc <- get_nrc_sentiment(text_sent)
# several sentiment factors
head(matt_nrc,15)
# explore
text_sent[which(matt_nrc$fear > 4)]
# bit more efficient with dplyr and ggplot
library(dplyr)
library(stringr)
process_sentiment <- function (atext, amethod) {
chunkedtext <- data_frame(x = atext) %>%
group_by(linenumber = ceiling(row_number() / 10)) %>%
summarize(text = str_c(x, collapse = " "))
mySentiment <- data.frame(cbind(linenumber = chunkedtext$linenumber,
sentiment = get_sentiment(chunkedtext$text, method = amethod)))
}
matt_sentiment.df <- rbind(process_sentiment(text_sent,"afinn") %>% mutate(method = "AFINN"),
process_sentiment(text_sent,"bing") %>% mutate(method = "Bing et al"),
process_sentiment(text_sent,"nrc") %>% mutate(method = "NRC"))
library(ggplot2)
dev.new()
ggplot(data = matt_sentiment.df, aes(x = linenumber, y = sentiment, fill = method)) +
geom_bar(stat = "identity") +
facet_wrap(~method, nrow = 3) +
theme_minimal() +
ylab("Sentiment") +
labs(title = expression(paste("Sentiment in ", italic("Matthew"))))
### sentiment as proxy for plot structure
# fft transformation
afinn_fft <- get_transformed_values(text_afinn)
dev.new()
par(mfrow = c(2,1))
plot(text_afinn_val, type = 'l')
plot(afinn_fft, type = 'l')
text_sent_100[which(afinn_fft == min(afinn_fft))]
# discrete cosine transformation
afinn_cos <- get_dct_transform(text_afinn)
dev.new()
par(mfrow = c(2,1))
plot(text_afinn_val, type = 'l')
plot(afinn_cos, type = 'l')
bins = 100
text_sent_100 <- slice_text(text_sent,bins)
text_sent_100[which(afinn_cos == max(afinn_cos))]
text_sent_100[which(afinn_cos == min(afinn_cos))]
# plot comparison
bible_fft <- get_transformed_values(get_sentiment
(get_sentences(paste(scan('data/kjv.txt',
what = 'character', sep='\n', encoding = 'UTF-8'), collapse = " ")),method = 'afinn'))
koran_fft <- get_transformed_values(get_sentiment
(get_sentences(paste(scan('data/koran.txt',
what = 'character', sep='\n', encoding = 'UTF-8'), collapse = " ")),method = 'afinn'))
dev.new()
par(mfrow = c(2,1))
plot(bible_fft, type = 'l', main = 'Bible, KJV' ,xlab = 'Narrative time', ylab = 'Sentiment',col = 'red',lwd = 3)
plot(koran_fft, type = 'l', main = 'Koran, Arberry Translation', xlab = 'Narrative time', ylab = 'Sentiment',col = 'red',lwd = 3)
### scaling with tm
library(tm)
dd = "/home/kln/Documents/education/tm_R/some_r/data/kjv_books";
books.cor <- Corpus(DirSource(dd, encoding = "UTF-8"), readerControl = list(language = "lat"))
names(books.cor) <- gsub("\\..*","",names(books.cor))# remove ending
filenames <- names(books.cor)
books.cor <- tm_map(books.cor, PlainTextDocument)
books.cor <- tm_map(books.cor, content_transformer(tolower))
books.cor <- tm_map(books.cor, removePunctuation)
books.cor <- tm_map(books.cor, removeNumbers)
books.cor <- tm_map(books.cor, stripWhitespace)
names(books.cor) <- filenames
# sentiment for each document
afinncorpus <- function(corpus){
sent <- rep(0,length(corpus))
for(i in 1:length(corpus)){
sent[i] <- get_sentiment(paste(corpus[[i]]$content, collapse = " "),method = 'afinn')
}
return(sent)
}
sent.v <- afinncorpus(books.cor)
dev.new(); barplot(sent.v, main="KJV sentiments", horiz=TRUE)
# use metadata
tmp <- read.csv('kjv_metadata.csv',header = TRUE)
head(tmp)
for (i in 1:length(books.cor)){
books.cor[[i]]$meta$heading <- as.character(tmp$filename[[i]])# pre-defined tag
books.cor[[i]]$meta$collection <- as.character(tmp$collection[[i]])# user-defined tag
}
nt.cor <- books.cor[meta(books.cor, "collection") == 'new']# new testament
old.cor <- books.cor[meta(books.cor, "collection") == 'old']# new testament
nt.sent.v <- afinncorpus(nt.cor)
ot.sent.v <- afinncorpus(old.cor)
par(mfrow = c(1,2));
barplot(nt.sent.v, main="KJV NT", horiz=TRUE); abline(v = mean(nt.sent.v), col = 'red')
barplot(ot.sent.v, main="KJV OT", horiz=TRUE); abline(v = mean(ot.sent.v), col = 'red')
###### sentiment classifier
## annotate sentences with with class valence and split in training and test set
# training set
pos_sent <- rbind(c('I love text mining','positive'), c('Melville is amazing','positive'), c('I feel great today','positive'),
c('I am excited about data','positive'), c('She is my best friend','positive'))
neg_sent <- rbind(c('I do not like text mining','negative'), c('Melville is terrible','negative'), c('I feel tired today','negative'),
c('I am not interested in data','negative'),c('She is my adversary','negative'))
# test set
test_sent <- rbind(c('I feel love for him','positive'),c('George is my friend','positive'),c('I am not tired','positive'),
c('do not like him','negative'),c('your review is terrible','negative'))
sent <- rbind(pos_sent, neg_sent, test_sent)
print(sent)
library(RTextTools) # quick and dirty text classification that use tm
library(e1071) # extensive stats library for Naive Bayes algorithm
dtm = create_matrix(sent[, 1], language = "english", removeStopwords = FALSE,
removeNumbers = TRUE, stemWords = FALSE, tm::weightTfIdf)
dtm.mat = as.matrix(dtm)
sentiment.class = naiveBayes(dtm.mat[1:10, ], as.factor(sent[1:10, 2]))
predict.f = predict(sentiment.class, dtm.mat[11:15, ])
print(predict.f)
## diagnostics
# confusion matrix
table(sent[11:15, 2], predict.f)
# accuracy
recall_accuracy(sent[11:15, 2], predict.f)
|
6e14fa045eb896281750cc79ade88e16a777debb
|
cece5aa1c01160ee3880a6b88a882bcac1aa96d7
|
/Q4.R
|
0a23fb915392a0c4719bcaf596ec625cb0212a4b
|
[] |
no_license
|
vik235/Multivariate-Statistics
|
c29c16da0f713b326b1ba5fd34579946c2ffb0fa
|
780875c2a35c5a47041e0dfca655adf20ef78e61
|
refs/heads/master
| 2021-09-09T04:42:28.880183
| 2018-03-13T23:40:20
| 2018-03-13T23:40:20
| 125,128,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,520
|
r
|
Q4.R
|
library(MVN)
library(alr3)
sweat <- read.csv('sweat.csv', header = TRUE)
head(sweat)
n = nrow(sweat)
p= ncol(sweat)
nu= n-1
alpha=.05
c= sqrt((nu*p/(nu - p +1) )*qf(1 - alpha, p,nu - p +1 ))
plot(sweat)
##Solving for b
S = cov(sweat)
SInv= solve(S)
eigens=eigen(S)
eigenvectors=data.frame(eigens$vectors)
eigenvectors
(half_l_ec1= c*sqrt(eigens$values[1])/sqrt(n))
(half_s1_ec1= c*sqrt(eigens$values[2])/sqrt(n))
(half_s2_ec1= c*sqrt(eigens$values[3])/sqrt(n))
#center
Xbar= apply(sweat ,2 , mean)
#Center: ( 4.640 45.400 9.965 )'
#Axis:
#1- Major axis: (-0.05084144 -0.99828352 0.02907156)', half length: 46.35383
#2- Minor1 axis: (-0.57370364 0.05302042 0.81734508)', half length: 6.969385
#3- Minor2 axis: ( 0.81748351 -0.02487655 0.57541452)', half length: 3.734851
#Lengths
half_l_ec1= c*sqrt(eigens$values[1])
half_s1_ec1= c*sqrt(eigens$values[2])
half_s2_ec1= c*sqrt(eigens$values[3])
#directions.
eigenvectors
##Solving for c 95% T2 confidence for the Seat, Sodium and Potassium.
#Sweat
a = c(1,0,0)
taSa = t(a)%*%S%*%a
ci_Sweat= Xbar[1] +c(-1,1)*sqrt((c^2)*taSa/n)
#T2 simultaneous CI for Sweat
#3.397768 5.882232
#Sodium
a = c(0,1,0)
taSa = t(a)%*%S%*%a
ci_Sodium= Xbar[2] +c(-1,1)*sqrt((c^2)*taSa/n)
#T2 simultaneous CI for Sodium
#44.15777 46.64223
#Potassium
a = c(0,0,1)
taSa = t(a)%*%S%*%a
ci_Potassium= Xbar[3] +c(-1,1)*sqrt((c^2)*taSa/n)
#T2 simultaneous CI for Potassium
# 8.570664 11.359336
##Solving for d 95% Bonferroni confidence for the Seat, Sodium and Potassium.
m=3
alphapc=alpha/(2*m)
t = qt(1 - alphapc , n - 1)*sqrt(c(diag(S))/n)
#Sweat
Xbar[1] + c(-1,1)*t[1]
#3.643952 5.636048
#Sodium
Xbar[2] + c(-1,1)*t[2]
#37.10308 53.69692
#Potassium
Xbar[3] + c(-1,1)*t[3]
#8.846992 11.083008
##Solving for e
mu0=c(4,45,10)
T2 = n*t(Xbar - mu0)%*%(SInv)%*%(Xbar - mu0)
#4.374632 = T2 test statistic.
pvalue = 1 - pf(((nu - p +1)/(nu*p))*T2, p, nu - p +1 )
#0.3052847 P value of the test- Ho : mu=mu0
#T2_Critical
((nu*p)/(nu - p +1))*qf(1 - 0.05, p, nu - p +1 )
#10.7186
# Conclusion: At a p value of 0.3052847 we dont have sufficient evidence (alpha=.05)
# to reject the null hypothesis Ho : mu=mu0
#f
mu_1 = c(4, 45, 10)
MD = t(Xbar - mu_1)%*%SInv%*%(Xbar - mu_1)
#Compare MD with c
MD < c
#TRUE
#Thus mu_1 is inside the 95% confidence region of mu which is consistent with what we concluded from part e.
#g
##Bootstrap setup. For testing via Generalized LR as its an optimal test.
#Tradeoff: n should be very large to approximate the dist. of ts to chi-sq.
#In below case n is not that big so we piggy back to the bootstrap procedure.
#Set the seed so that the results are repeatable.
set.seed(101)
#Transform the data matrix to force H0 to be true, one trick Xtilde= X - Xbar + mu0
sweat_t0 = sweat - matrix(rep(Xbar,n),nrow=n, byrow = TRUE) + matrix(rep(mu0,n),nrow=n, byrow = TRUE)
#Setup the test statistic, refer to the problem. However note that we do not know the sampling distribution
#of this test statisic and hence to run the hypothesis test, we bootstrap to as shown below
#to determine its distribution
detS=det(var(sweat))
detS0= det(var(sweat_t0))
lambda_ts = (detS/detS0)^(n/2) #THE test statistic
#Bootstrap fromt he data under H0 to get the sampling dist.
B=500
tb=rep(0,B)
for(i in 1:B)
{
bS= sweat_t0[sample(1:n, replace = T),]
tb[b] = (det(var(bS))/detS0)^(n/2)
}
#P value of the test
p_value_boot <- mean(tb >= lambda)
#.306
# We note that this is so close to the one obtained by the earlier procedures.
|
dc00b58498a7c9f7a6199944a36ff25f0cfbfc44
|
e9518dced01a5de45d3405218e753db547464904
|
/cachematrix.R
|
8e90491b826f4d03456e1b71ddfa930ede01b512
|
[] |
no_license
|
normbyer/ProgrammingAssignment2
|
91295b6c313efb79efd83b2b1afb7a96963f8418
|
b1960015122419ad803895e88bbe66610d626ee6
|
refs/heads/master
| 2021-01-18T13:00:31.307738
| 2015-12-27T14:31:22
| 2015-12-27T14:31:22
| 48,381,436
| 0
| 0
| null | 2015-12-21T16:13:56
| 2015-12-21T16:13:55
| null |
UTF-8
|
R
| false
| false
| 1,401
|
r
|
cachematrix.R
|
## These functions are used for when one needs to invert a matrix
## and get the result multiple times without recalculating.
## "makeCacheMatrix" sets up the matrix and needed functions
## "cacheSolve" returns the inverted matrix
## returns a list that can be used with the "cacheSolve" function
## a matrix can be passed or set later by passing it to the "Set"
## function returned in the returned list. The value of the matrix
## can be retrieved by the "Get" function returned in the list.
makeCacheMatrix <- function(x = matrix()) {
Matrix <- x
Inverse <- NA
Set <- function(value){
Matrix <<- value
Inverse <<- NA
}
Get <- function(){
Matrix
}
SetInv <- function(value){
Inverse <<- value
}
GetInv <- function() {
Inverse
}
return(list("Set" = Set, "Get" = Get, "SetInv" = SetInv, "GetInv" = GetInv))
}
## runs the solve function on a list generated from the "makeCacheMatrix"
## function and stores the result so future calls to this function will
## not have to run solve unless the data has changed.
cacheSolve <- function(x, ...) {
if(!(length(x$GetInv()) > 1) && is.na(x$GetInv())){ ##the length condition is to supress
##a warning that is.na generates for
##things with alength greater than 1
x$SetInv(solve(x$Get(),...))
}
return(x$GetInv())
}
|
f529a64f7a85a4d0745f645d171fa6402cf905db
|
ef1d6fa0df37fa552c4c4625e6e9cb974e8482f0
|
/R/ovcCrijns.R
|
d9d0a98eac5c52e40be21c15aff84fdca792c262
|
[] |
no_license
|
bhklab/genefu
|
301dd37ef91867de8a759982eb9046d3057723af
|
08aec9994d5ccb46383bedff0cbfde04267d9c9a
|
refs/heads/master
| 2022-11-28T09:22:02.713737
| 2022-05-30T15:35:53
| 2022-05-30T15:35:53
| 1,321,876
| 17
| 15
| null | 2022-11-07T11:52:05
| 2011-02-02T21:06:25
|
R
|
UTF-8
|
R
| false
| false
| 4,915
|
r
|
ovcCrijns.R
|
#' @title Function to compute the subtype scores and risk classifications
#' for the prognostic signature published by Crinjs et al.
#'
#' @description
#' This function computes subtype scores and risk classifications from gene
#' expression values using the weights published by Crijns et al.
#'
#' @usage
#' ovcCrijns(data, annot, hgs,
#' gmap = c("entrezgene", "ensembl_gene_id", "hgnc_symbol", "unigene"),
#' do.mapping = FALSE, verbose = FALSE)
#'
#' @param data Matrix of gene expressions with samples in rows and probes in
#' columns, dimnames being properly defined.
#' @param annot Matrix of annotations with one column named as gmap, dimnames
#' being properly defined.
#' @param hgs vector of booleans with TRUE represents the ovarian cancer
#' patients who have a high grade, late stage, serous tumor, FALSE otherwise.
#' This is particularly important for properly rescaling the data. If hgs is
#' missing, all the patients will be used to rescale the subtype score.
#' @param gmap character string containing the biomaRt attribute to use for
#' mapping if do.mapping=TRUE
#' @param do.mapping TRUE if the mapping through Entrez Gene ids must be
#' performed (in case of ambiguities, the most variant probe is kept for each
#' gene), FALSE otherwise.
#' @param verbose TRUE to print informative messages, FALSE otherwise.
#'
#' @details
#' Note that the original algorithm has not been implemented as it necessitates
#' refitting of the model weights in each new dataset. However the current
#' implementation should give similar results.
#'
#' @return
#' A list with items:
#' - score: Continuous signature scores.
#' - risk: Binary risk classification, 1 being high risk and 0 being low risk.
#' - mapping: Mapping used if necessary.
#' - probe: If mapping is performed, this matrix contains the correspondence.
#' between the gene list (aka signature) and gene expression data.
#'
#' @references
#' Crijns APG, Fehrmann RSN, de Jong S, Gerbens F, Meersma G J, Klip HG,
#' Hollema H, Hofstra RMW, te Meerman GJ, de Vries EGE, van der Zee AGJ (2009)
#' "Survival-Related Profile, Pathways, and Transcription Factors in Ovarian
#' Cancer" PLoS Medicine, 6(2):e1000024.
#'
#' @seealso
#' [genefu::sigOvcCrijns]
#'
#' @examples
#' # load the ovsCrijns signature
#' data(sigOvcCrijns)
#' # load NKI dataset
#' data(nkis)
#' colnames(annot.nkis)[is.element(colnames(annot.nkis), "EntrezGene.ID")] <-
#' "entrezgene"
#' # compute relapse score
#' ovcCrijns.nkis <- ovcCrijns(data=data.nkis, annot=annot.nkis,
#' gmap="entrezgene", do.mapping=TRUE)
#' table(ovcCrijns.nkis$risk)
#'
#' @md
#' @export
#' @name ovcCrijns
ovcCrijns <- function(data, annot, hgs, gmap=c("entrezgene", "ensembl_gene_id",
"hgnc_symbol", "unigene"), do.mapping=FALSE, verbose=FALSE)
{
if (!exists('sigOvcCrijns')) data(sigOvcCrijns, envir=environment())
gmap <- match.arg(gmap)
if(missing(hgs)) { hgs <- rep(TRUE, nrow(data)) }
if(do.mapping) {
if(!is.element(gmap, colnames(annot))) { stop("gmap is not a column of annot!") }
if(verbose) { message("the most variant probe is selected for each gene") }
sigt <- sigOvcCrijns[order(abs(sigOvcCrijns[ ,"weight"]), decreasing=FALSE), ,drop=FALSE]
sigt <- sigt[!duplicated(sigt[ ,gmap]), ,drop=FALSE]
gid2 <- sigt[ ,gmap]
names(gid2) <- rownames(sigt)
gid1 <- annot[ ,gmap]
names(gid1) <- colnames(data)
rr <- geneid.map(geneid1=gid1, data1=data, geneid2=gid2)
data <- rr$data1
annot <- annot[colnames(data), ,drop=FALSE]
sigt <- sigt[names(rr$geneid2), ,drop=FALSE]
pold <- colnames(data)
pold2 <- rownames(sigt)
colnames(data) <- rownames(annot) <- rownames(sigt) <- paste("geneid", annot[ ,gmap], sep=".")
mymapping <- c("mapped"=nrow(sigt), "total"=nrow(sigOvcCrijns))
myprobe <- data.frame("probe"=pold, "gene.map"=annot[ ,gmap], "new.probe"=pold2)
} else {
gix <- intersect(rownames(sigOvcCrijns), colnames(data))
if(length(gix) < 2) { stop("data do not contain enough gene from the ovcTCGA signature!") }
data <- data[ ,gix,drop=FALSE]
annot <- annot[gix, ,drop=FALSE]
mymapping <- c("mapped"=length(gix), "total"=nrow(sigOvcCrijns))
myprobe <- data.frame("probe"=gix, "gene.map"=annot[ ,gmap], "new.probe"=gix)
sigt <- sigOvcCrijns[gix, ,drop=FALSE]
}
## transform the gene expression in Z-scores
data <- scale(data)
pscore <- genefu::sig.score(x=data.frame("probe"=colnames(data), "EntrezGene.ID"=annot[ ,gmap], "coefficient"=sigt[ ,"weight"]), data=data, annot=annot, do.mapping=FALSE, signed=FALSE)$score
prisk <- as.numeric(pscore > median(pscore, na.rm=TRUE))
names(prisk) <- names(pscore) <- rownames(data)
return (list("score"=pscore, "risk"=prisk, "mapping"=mymapping, "probe"=myprobe))
}
|
58af675e5355e681f55d60ec09fbda816c666427
|
a985793831427b9706752ae6ea9454e22960aa59
|
/test.r
|
90b355b5e8ade2c7f1a6524d641860c99d84b1cc
|
[] |
no_license
|
kabirahuja2431/SchizophreniaClassification
|
2df893d725fadb79afe2d8cf1c1db19a55f4851d
|
33e9dba173aa723c7373955822190faae1313320
|
refs/heads/master
| 2021-07-07T12:27:02.356318
| 2017-10-05T20:18:11
| 2017-10-05T20:18:11
| 105,685,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 126
|
r
|
test.r
|
fun1 <- function(x){
x = x+ 10
print(x)
}
fun2 <-function(y){
fun1(y)
}
x <- 15
y <= c(100,100,100)
fun1(y)
print(y)
|
5d079659c5fd03fcfe2336c66784e71c32c04579
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gdimap/R/simul.simplefield.R
|
c8e7b3a131a4cb2b1b53896558529db98433f129
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,586
|
r
|
simul.simplefield.R
|
simul.simplefield <-
function(gdi="gqi", b=3000, sigma=NULL, clusterthr=0.6, logplot=TRUE,
savedir=tempdir(), fmask="m1", ang=NULL, ...)
{
## glyph field mask
tfmask=c("m1","m2","m3","mx1","mx2","mx3")
## default angles in masks
amask =c(60,60,60,60,90,45)
mfun <- match(fmask, tfmask)
if(is.null(ang))
ang <- amask[mfun]
switch(mfun,
a <- m1(ang),
a <- m2(ang),
a <- m3(ang),
a <- mx1(ang),
a <- mx2(ang),
a <- mx3(ang))
## S2 shell grid
s2 <- s2tessel.zorder(depth=3, viewgrid=FALSE)
g0 <- s2$pc
## swap mask (from top left in row order to column order)
if(is.matrix(a)){
nr <- ncol(a); nc <- nrow(a)
as <- matrix(-1,nr,nc)
as[nr:1,1:nc] <- t(a)
}
else
if(is.array(a)) {
dm <- dim(a)
as <- array(0, dim=dm)
as[dm[1]:1, 1:dm[2], ] <- abind::abind(t(a[,,1]), t(a[,,2]), along=3)
} else
stop("Error in mask specification")
## field simulation
gc()
field <- myglyph.synthsimul(as, ang=ang, g0, b=b, sigma=sigma,
logplot=logplot)
## Estimate ODFs
odfs <- fieldtestodf.gqi(gdi=gdi, g0, field, b=b, lambda=NULL,
savedir=savedir)
## Visualize grid of glyphs with color
plotodfvxgrid(g0, field=field, odfsdata=odfs)
## Using movMF for mixture estimation and peak detection)
estimate.vmf.lines(g0, field=field, odfsdata=odfs,
showglyph=FALSE, clusterthr=clusterthr, savedir=savedir, ...)
}
myglyph.synthsimul <-
function(as, ang=60, g0, b=3000, sigma=NULL, logplot=TRUE)
{
if(is.matrix(as)){
nr <- nrow(as); nc <- ncol(as)
nn <- length(which(as != -1))
} else
if(is.array(as)) {
dd <- dim(as)
nr <- dd[2]; nc <- dd[1]
nn <- length(which(as[,,1] != -1))
} else
stop("Error in field mask specification")
## synthesis
ng0 <- dim(g0)[1]
S <- matrix(0, nn, ng0)
k <- 0
open3d()
for(j in 1:nc) {
for(i in 1:nr) {
# single fiber
if(is.matrix(as)) {
ang <- as[i,j]
if(ang == -1) next
pos <- 2 * c(i,j,0)
sv <- synthfiberss2z(g0=g0, angles=ang, b=b, sigma=sigma,
pos=pos, showglyph=TRUE, new=FALSE, logplot=logplot)
k <- k+1
S[k,] <- sv
mask <- as
}
else {
if(is.array(as)) {
ang1 <- as[i,j,1]
if(ang1 == -1) next
ang2 <- as[i,j,2]
if(ang2 == -1) {
pos <- 2 * c(i,j,0)
sv <- synthfiberss2z(g0=g0, angles=ang1, b=b, sigma=sigma,
pos=pos, showglyph=TRUE, new=FALSE, logplot=logplot)
k <- k+1
S[k,] <- sv
} else {
pos <- 2 * c(i,j,0)
sv <- synthfiberss2z(g0=g0, angles=c(ang1,ang2), b=b, sigma=sigma,
pos=pos, showglyph=TRUE, new=FALSE, logplot=logplot)
k <- k+1
S[k,] <- sv
mask <- as[,,1]
}
}
}
}
}
list(S=S, mask=mask)
}
##--------------------------
fieldtestodf.gqi <-
function(gdi="gqi", grad, field, b=3000, lambda=NULL,
savedir=tempdir())
{
cat("estimating field odfs ...\n")
gdimethods <- c("gqi", "gqi2")
gdimethod <- match(gdi, gdimethods)
sfield <- field$S
dv <- dim(sfield)
mask <- field$mask
dm <- dim(mask)
nr <- dm[1]; nc <- dm[2]
odfs <- matrix(0, dv[1], dv[2])
bn <- rep(b, dim(grad)[1])
btable <- as.matrix(cbind(bn, grad))
##-----------------------------
## "gdimethod" process
cat("Estimating slice odfs ...\n")
switch(gdimethod,
q2odf <- gqifn(odfvert=grad, btable=btable,
lambda=lambda),
q2odf <- gqifn2(odfvert=grad, btable=btable,
lambda=lambda) )
##-----------------------------
k <- 0
for(j in 1:nc) {
for(i in 1:nr) {
if(mask[i,j] == -1) next
k <- k+1
odf <- as.vector(q2odf%*%sfield[k,])
odf <- odf - min(odf)
odfs[k,] <- odf
}
}
f <- file.path(savedir,"simtable.txt")
write(t(btable), file=f, ncolumns=4)
cat("wrote",f,"\n")
invisible(odfs)
}
#--------------------------
plotodfvxgrid <-
function(pc0, field, odfsdata)
{
mask <- field$mask
dm <- dim(mask)
nr <- dm[1]; nc <- dm[2]
## GFA
odfs.reg <- t(apply(odfsdata, 1 , norm01))
gfas <- apply(odfs.reg, 1, genfa)
tc <- geometry::delaunayn(pc0)
tc.surf <- t( surf.tri(pc0,tc) )
dt2 <- dim(pc0[tc.surf,])[1]
d1 <- dim(odfsdata)[1]
sgrid <- matrix(0, nrow=d1*dt2, ncol=3)
vcolors <- matrix(0, nrow=d1, ncol=dt2)
k <- 0
cat("Running ...\n")
for(j in 1:nc) {
for(i in 1:nr) {
if(mask[i,j] == -1) next
k <- k+1
odf <- odfs.reg[k,]
gk <- gfas[k]
## RGB channels
zch <- pc0*gk
zch <- t(apply(abs(zch),1,norm01))
ck <- rgb(zch)
pc <- pc0 * as.vector(odf)
pc <- pc / (2*max(pc))
pos <- 2 * c(i,j,0)
pcsurf <- cbind(
pc[tc.surf,1]+pos[1], pc[tc.surf,2]+pos[2], pc[tc.surf,3]+pos[3])
b <- (k-1)*dt2; e <- b+dt2
sgrid[(b+1):e, ] <- pcsurf
vcolors[k,] <- ck[tc.surf]
}
}
cat("Plotting ...\n")
# rgl.open()
open3d()
rgl.viewpoint(theta=0, phi=0)
rgl.triangles(sgrid[,1], sgrid[,2], sgrid[,3], col=t(vcolors))
rgl.viewpoint(0,0)
}
#--------------------------
#
# Estimate vMF mixture and principal distribution directions (PDDs)
#
estimate.vmf.lines <-
function(pc0, field, odfsdata, showglyph=FALSE, clusterthr=0.6, savedir=tempdir(), ...)
{
normvf <- function(x) { norm(matrix(x,length(x),1),"f") }
## control parameters for movMF
E <- list(...)[["E"]]
if (is.null(E)) E <- "softmax"
kappa <- list(...)[["kappa"]]
if (is.null(kappa)) kappa <- "Newton_Fourier"
minalpha <- list(...)[["minalpha"]]
if (is.null(minalpha)) minalpha <- 8
start <- list(...)[["start"]]
if (is.null(start)) start <- "s"
startctl=list(E=E, kappa=kappa, minalpha=minalpha, start=start) ## movMF inits
##
mask <- field$mask
## GFA
odfs.reg <- t(apply(odfsdata, 1 , norm01))
gfas <- apply(odfs.reg, 1, genfa)
tc <- geometry::delaunayn(pc0)
tc.surf <- t( surf.tri(pc0,tc) )
## ------------
d1 <- dim(odfsdata)[1]
nn <- 8*d1
v <- matrix(0, nrow=nn, ncol=3)
ck <- numeric(nn)
q <- 1
m <- 0
cat("running ... \n")
## z2d <- which(mask != 0, arr.ind=TRUE)
z2d <- which(mask != -1, arr.ind=TRUE)
lix <- dim(z2d)[1]
d <- dim(mask)
##
v1perslice <- matrix(0, nrow=lix,ncol=3) # v1 directions
v2perslice <- matrix(0, nrow=lix,ncol=3) # v2 directions
volgfa <- array(0, dim=c(dim(mask),1)) ## gfas map
V1 <- array(0, dim=c(dim(mask),1, 3)) ## V1 vol
V2 <- array(0, dim=c(dim(mask),1, 3)) ## V2 vol
for(m in 1:lix) {
odf <- odfs.reg[m,]
gk <- gfas[m]
ith <- which(odf < clusterthr)
vx <- pc0[-ith,]
n <- dim(vx)[1]
nc <- dim(vx)[2]
kc <- 1:8
npar <- nc*kc+kc-1
bic <- -1.0e+10; nf <- 0; yy <- NULL
for(k in seq(2,4,by=2)) {
y2 <- movMF::movMF(vx, k=k, control=startctl)
par <- logb(n)*npar[k]
bic2 <- 2*logLik(y2) - par
if(bic2 > bic) {
bic <- bic2
nf <- k
yy <- y2
}
}
np <- dim(yy$theta)[1]
## pcoords <- yy$theta/max(yy$theta)
pk <- list(np=np , pcoords=t(yy$theta)) # no scaling
v1perslice[m,] <- pk$pcoords[,1]
if(np == 4) {
## !!! alternative option: use identical alpha values for selection
if(all.equal(abs(pk$pcoords[,1]), abs(pk$pcoords[,2]), toler=0.01)
== TRUE)
v2perslice[m,] <- pk$pcoords[,3]
else
v2perslice[m,] <- pk$pcoords[,2]
}
## reorder
vref <- c(1,0,0)
c1 <- crossprod(v1perslice[m,], vref)
if(c1 < 0)
v1perslice[m,] <- -v1perslice[m,]
if(np == 4) {
vref <- c(1,0,0)
c1 <- crossprod(v1perslice[m,], vref)
c2 <- crossprod(v2perslice[m,], vref)
if(abs(c2) > abs(c1)) {
tmp <- v1perslice[m,]
v1perslice[m,] <- v2perslice[m,]
v2perslice[m,] <- tmp
}
}
## V volume
for(k in 1:3) { # axial
mx <- matrix(0, d[1],d[2])
mx[z2d] <- v1perslice[,k]
V1[,,1,k] <- mx
mx <- matrix(0, d[1],d[2])
mx[z2d] <- v2perslice[,k]
V2[,,1,k] <- mx
mx <- matrix(0, d[1],d[2])
mx[z2d] <- gfas
volgfa[,,1] <- mx
}
##----------------------------
## 1st direction of max odf values for odfs
pc <- pc0 * as.vector(odf)
pc <- pc / (2*max(pc))
# pos <- c(j,i,0)
pos <- c(z2d[m,], 0)
## normalize for visualization
mx <- apply(yy$theta, 1, normvf)
coords <- t(yy$theta/mx)
for(k in 1:min(pk$np, 4)) {
zch <- coords[,k] * gk
zch <- t(norm01(abs(zch)))
ck[q] <- rgb(zch)
ck[q+1] <- ck[q]
pp <- coords[,k]/2
v[q,] <- pos
v[q+1,] <- pp/2 + pos
q <- q+2
}
}
cat("\n")
f <- paste(savedir,"/data_gfa",sep="")
writeNIfTI(volgfa, filename=f, verbose=TRUE)
cat("wrote",f,"\n")
f <- paste(savedir,"/data_V1",sep="")
writeNIfTI(V1, filename=f, verbose=TRUE)
cat("wrote",f,"\n")
f <- paste(savedir,"/data_V2",sep="")
writeNIfTI(V2, filename=f, verbose=TRUE)
cat("wrote",f,"\n")
##--
open3d()
cat("plotting ... \n")
segments3d(v[1:(q-1),], col=ck[1:(q-1)], lwd=2, alpha=1)
rgl.viewpoint(0,0)
rgl.bringtotop()
##
}
##--------------------------
## Built-in field masks
m1 <- function(ang=60) {
gn <- c(3,3); a <- matrix(ang,gn[1],gn[2]); invisible(a) }
m2 <- function(ang=60) {
gn <- c(3,3); a <- matrix(-1, gn[1], gn[2]);
a[,1] <- 180-ang; a[,2] <- 90; a[,3] <- ang;
invisible(a) }
m3 <- function(ang=60) {
gn <- c(5,5); a <- matrix(-1, gn[1], gn[2])
a[,1] <- 120; a[,2] <- 120; a[,3] <- 90;
a[,4] <- ang; a[,5] <- ang
invisible(a) }
mx1 <- function(ang) {
gn <- c(4,4); a <- array(-1, dim=c(gn[1],gn[2],2))
a[1,3:4,1] <- ang; a[2,1:4,1] <- 0; a[3,1:4,1] <- 0
a[4,1:2,1] <- ang; a[2,2:3,2] <- ang; a[3,2:3,2] <- ang
invisible(a) }
mx2 <- function(ang=90) {
gn <- c(13,13)
a <- array(-1, dim=c(gn[1],gn[2],2))
a[6:8,,1] <- 0;
a[1:5,6:8,1] <- ang;
a[1:5,6:8,1] <- ang;
a[9:13,6:8,1] <- ang;
a[6:8,6:8,2] <- ang;
invisible(a) }
mx3 <- function(ang=45) {
gn=c(10,10)
a <- array(-1, dim=c(gn[1],gn[2],2))
a[4:7,1:6,1] <- 0;
a[5:6,,1] <- 0;
a[1,8:9,1] <- ang;
a[2,7:8,1] <- ang;
a[3,6:7,1] <- ang;
a[8,6:7,1] <- 360-ang
a[9,7:8,1] <- 360-ang
a[10,8:9,1] <- 360-ang
a[4,5:6,2] <- ang
a[5, 4:5,2] <- ang
a[6,4:5,2] <- 360-ang
a[7,5:6,2] <- 360-ang
invisible(a) }
#--------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.