blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3a46c4b9922368ae0cceda28b0022ee50a04e1a
|
2113b1ef587481e9c458e115bc049edaf88ad719
|
/man-roxygen/path.R
|
99cadb5eb6095fd80b37448d510f92e44763705f
|
[] |
no_license
|
KasperSkytte/rdrop2
|
bc236a4ecb74867a01b9caf914e6af295384e47e
|
3b6084187835bb457cc7974b534bb9f2f3e37696
|
refs/heads/master
| 2022-04-09T21:53:04.706863
| 2020-03-10T11:24:04
| 2020-03-10T11:24:04
| 105,997,225
| 0
| 0
| null | 2017-10-06T11:29:27
| 2017-10-06T11:29:27
| null |
UTF-8
|
R
| false
| false
| 94
|
r
|
path.R
|
#' @param path This is required The path to the new folder to create relative to
#' root.
|
52e24e579c737434285d5e028b928d0a59450a21
|
f2d761d35e668ea5ce95ad82b65a06edd91441b8
|
/ReadData.R
|
25de570f05f86e5fd16f137b57095cf876b6ae67
|
[] |
no_license
|
lucia-tellez/MScDissertation_Canon
|
dba696a9102eeea960f5411649a46a8030f7e4df
|
1199683082cf7f5f3ff5abb3916cc74476e01247
|
refs/heads/main
| 2023-07-11T13:37:08.289178
| 2021-08-16T22:11:12
| 2021-08-16T22:11:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,595
|
r
|
ReadData.R
|
# PRELIMINARY ANALYSIS OF FOCUSED DATASET
# Load in focused dataset from INCISE project, link it to metadata from Batch1, create a targets df
# Preliminary overview of dataset with proportions of polyp type, recurrence...
# PCA analysis.
##Load libraries
library(limma)
library(scatterplot3d)
library(stringr)
library(ggplot2)
library(edgeR)
##Load INCISE data
PrunedData <- read.table(file = 'INCISE_Batch1_PrunedExpression.tsv', sep = '\t', header = TRUE)
FocusedData <- read.table(file = 'BCL-CO0005b_gene_counts_Focused area_training_samples.tsv', sep = '\t', header = TRUE)
Metadata <- read.table(file = 'INCISE_Batch1_Metadata.tsv', sep = '\t', header = TRUE)
load_data_df <- function(FocusedData, Metadata) {
#Rename columns of the dataframes so that they are named with INCISE code
Metadata2 <- Metadata[,-1]
rownames(Metadata2) <- Metadata[,1]
FocusedData2 <- FocusedData[,-1]
rownames(FocusedData2) <- FocusedData[,1]
##Remove rows that have all 0 (genes not expressed in any sample)
FocusedData2 <- FocusedData2[rowSums(FocusedData2[])>10,]
Quantile10 = rowQuantiles(as.matrix(FocusedData2), probs = 0.1)
keep.exprs <- filterByExpr(FocusedData2, min.count=10)
FocusedData2 = FocusedData2[keep.exprs,]
##Normalize TempOSeq raw counts to CPM (counts Per Million)
SumCol = apply(FocusedData2, 2, sum)
return(FocusedData2)
return(Metadata2)
return(Quantile10)
return(SumCol)
}
create_targets_df <- function(Metadata2, FocusedData2){
##Join FocusedData with Metadata
#Identify which samples are present in the focused dataset
# intersection <- rownames(Metadata2) %in% substr(colnames(FocusedData2), start = 1, stop = 7)
intersection2 <- substr(colnames(FocusedData2), start = 1, stop = 7) %in% rownames(Metadata2)
Metadata2[intersection,]
##Create a targets file for the FocusedData with information for each sample
Sample <- colnames(FocusedData2)
INC_Code <- substr(colnames(FocusedData2), start = 1, stop = 7)
Annotation <- str_sub(colnames(FocusedData2), 9, -1)
FUTURE_POLYP_OR_CRC <- Metadata2[INC_Code,]$FUTURE_POLYP_OR_CRC
FUTURE_LESION <- Metadata2[INC_Code,]$FUTURE_LESION
dysplasia <- Metadata2[INC_Code,]$dysplasia
aden_tubular <- Metadata2[INC_Code,]$aden_tubular
aden_villous <- Metadata2[INC_Code,]$aden_villous
aden_tubulovillous <- Metadata2[INC_Code,]$aden_tubulovillous
aden_serrated <- Metadata2[INC_Code,]$aden_serrated
#Merge all data in a targets dataframe
Targets_df <- data.frame(Sample, INC_Code, Annotation, FUTURE_POLYP_OR_CRC, FUTURE_LESION, dysplasia,
aden_tubular, aden_villous, aden_tubulovillous, aden_serrated)
#Add a factor for patients, will be used in DE analysis for blocking
Targets_df <- transform(Targets_df, patient=as.numeric(factor(INC_Code)))
#Join diff
Targets_df$Annotation <- as.character(Targets_df$Annotation)
Targets_df["Annotation"][Targets_df["Annotation"] == "Dyspl.Epi.1"] <- "Dysplasia"
Targets_df["Annotation"][Targets_df["Annotation"] == "Dyspl.Epi.2"] <- "Dysplasia"
Targets_df["Annotation"][Targets_df["Annotation"] == "Interface.1"] <- "Interface"
Targets_df["Annotation"][Targets_df["Annotation"] == "Interface.2"] <- "Interface"
Targets_df <- Targets_df[1:156,]
return(Targets_df)
return(intersection)
}
fdata_graphs <- function(Targets_df, Metadata2, intersection, FocusedData2){
##Create simple graphs to get a view of the samples from the Focused dataset
TableAnnot <- table(Targets_df$Annotation)
lbls <- paste(names(TableAnnot), "\n", TableAnnot, sep="")
pie(TableAnnot, labels = lbls,
main="Pie Chart of number of samples for each polyp section")
##Barplot for polyp sections
barplot(TableAnnot, main = "Proportion of dysplastic/normal sections in focused samples",
ylab = "Number of samples")
##Barplot for polyp types
barplot(c(count(Metadata2[intersection,]$aden_tubulovillous), count(Metadata2[intersection,]$aden_tubular),
count(Metadata2[intersection,]$aden_serrated), count(Metadata2[intersection,]$aden_villous)),
names.arg = c("tubulovillous", "tubular", "serrated", "villous"),
main = "Proportion of polyp types in focused samples")
##Barplot for recurrence
TableRecurrence <- table(Metadata2[intersection,]$FUTURE_POLYP_OR_CRC)
barplot(TableRecurrence, main = "Recurrence of polyps in focused samples", names.arg = c("No", "Yes"))
##This is all to prepare for the PCA analysis
##Find way of doing this better
Dyspl1 <- grep("*Dyspl.Epi.1",colnames(FocusedData2))
names(Dyspl1) <- rep("red", length(Dyspl1))
Dyspl2 <- grep("*Dyspl.Epi.2",colnames(FocusedData2))
names(Dyspl2) <- rep("red", length(Dyspl2))
Interface1 <- grep("*Interface.1",colnames(FocusedData2))
names(Interface1) <- rep("blue", length(Interface1))
Interface2 <- grep("*Interface.2",colnames(FocusedData2))
names(Interface2) <- rep("blue", length(Interface2))
Normal <- grep("*Normal.1",colnames(FocusedData2))
names(Normal) <- rep("green", length(Normal))
NotAnnotated <- grep("*Not.Annotated" ,colnames(FocusedData2))
names(NotAnnotated) <- rep("yellow", length(NotAnnotated))
Types <- c(Dyspl1, Dyspl2, Interface1, Interface2, Normal, NotAnnotated)
Types <- sort(Types)
Colours <- names(Types)
## Perform PCA
pca <- prcomp(t(FocusedData2), scale=T)
# Plot the PCA results
#png("PCA_FocusedDataset.png")
s3d<-scatterplot3d(pca$x[,1:3], pch=19, color=Colours)
s3d.coords <- s3d$xyz.convert(pca$x[,1:3])
text(s3d.coords$x, s3d.coords$y, labels = colnames(FocusedData2),pos = 3,offset = 0.5, col=Colours)
#dev.off()
}
## PERFORM DE ANALYSIS WITH EDGER
simpleDE_EdgeR <- function(FocusedData2, Targets_df) {
##Simple DE analysis normal vs recurrence
#Create a DGElist object from a table of counts
DElist <- DGEList(counts=FocusedData2[,1:156], group=Targets_df$Annotation)
#Estimate normalization factors
DElist = calcNormFactors(DElist)
# CAN'T GET THIS GRAPH TO WORK
#plotMDS(DElist)
#plotMDS(DElist, labels = Targets_df$Sample,
# col = c("darkgreen","blue")[factor(Targets_df$FUTURE_POLYP_OR_CRC)])
DElist <- estimateCommonDisp(DElist)
DElist <- estimateTagwiseDisp(DElist)
png("MeanVar.png")
plotMeanVar(DElist, show.tagwise.vars = TRUE, NBline = TRUE)
dev.off()
png("plotBCV.png")
plotBCV(DElist)
dev.off()
#Simple DE analysis (normal vs. dysplasia)
de = exactTest(DElist, pair = c("Dysplasia","Normal.1"))
tt = topTags(de, n = nrow(DElist))
head(tt$table, 200)
rn1 = rownames(tt$table)
deg1 = rn1[tt$table$FDR < .05]
#Plot smear plot of results
png("smear_normaldyspl.png")
plotSmear(de, de.tags = deg1)
dev.off()
}
#Build a more complex DE analysis
#Multilevel analysis. Section 3.5 EdgeR User's Guide
multilvl_design_EdgeR <- function(Targets_df) {
Patient <- factor(Targets_df$patient)
Annotation <- factor(Targets_df$Annotation)
Recurrence <- factor(Targets_df$FUTURE_POLYP_OR_CRC)
design <- model.matrix(~Patient)
#Define contrasts of interest
Dysplasia.recurrent <- Targets_df$Annotation== "Dysplasia" & Targets_df$FUTURE_POLYP_OR_CRC == "1"
Dysplasia.nonrecurrent <- Targets_df$Annotation== "Dysplasia" & Targets_df$FUTURE_POLYP_OR_CRC == "0"
Normal.recurrent <- Targets_df$Annotation== "Normal.1" & Targets_df$FUTURE_POLYP_OR_CRC == "1"
Normal.nonrecurrent <- Targets_df$Annotation== "Normal.1" & Targets_df$FUTURE_POLYP_OR_CRC == "0"
design <- cbind(design, Dysplasia.recurrent, Dysplasia.nonrecurrent, Normal.recurrent, Normal.nonrecurrent)
output(design)
}
complexDE_EdgeR <- function(FocusedData2, design){}
#Account for type of tissue and recurrence
ComplexDE <- DGEList(counts=FocusedData2[,1:156])
#Estimate normalizing factors
ComplexDE <- calcNormFactors(ComplexDE)
#Estimate dispersion values using CR-adjusted likelihood
DElist2 = estimateGLMTrendedDisp(ComplexDE, design)
DElist2 = estimateGLMTagwiseDisp(DElist2, design)
fitcomplex <- glmQLFit(DElist2, design)
#Fit a GLM to each feature
drf <- glmQLFTest(fitcomplex, coef="Dysplasia.recurrent")
ttcomp = topTags(drf, n = nrow(DElist2))
head(ttcomp$table, 200)
rn2 = rownames(ttcomp$table)
deg2 = rn2[ttcomp$table$FDR < .05]
#Plot smear plot of results
png("smear_normaldyspl_complex.png")
plotSmear(drf, de.tags = deg2)
dev.off()
write.csv(ttcomp$table, file = "toptags_edgeR_recurrence.csv")
|
5d0afde27731f83ac41834831e0ef0a1dd19a593
|
02f053ce70b065724d4a02619fb402adcc0ec997
|
/analysis/boot/boot272.R
|
69fb1f35688a62ffc948bf4988c46b570ff94ea1
|
[] |
no_license
|
patperry/interaction-proc
|
27950482929240bba55c7d0f2f8c5235d770feea
|
cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4
|
refs/heads/master
| 2021-01-01T06:11:47.125853
| 2012-12-04T20:01:42
| 2012-12-04T20:01:42
| 673,564
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
boot272.R
|
seed <- 272
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226762.8292980991
df.resid <- 35402
df <- 165
coefs <- c(6.565496465893954, 5.895089344100779, 5.659273989558366, 5.358599598824139, 5.091318229800197, 4.857499373043105, 4.681102328261127, 4.632757617844498, 4.363241710904923, 4.180967902883708, 4.225444712144356, 4.114376221842504, 3.9529131250755714, 3.9425516934773492, 3.7130828202030117, 3.506899189309275, 3.2206655871762644, 2.9052679145855786, 2.5233839701225143, 2.071069542330318, 1.4301954487197972, 0.9152609855595014, 1.1460965433103014, 0.4812231821202749, 0.6600475783481806, -1.065325075646758, -0.3908132107810523, 0.9429642379753083, 1.0881612116737494, -1.145999897166572, -2.32198837017751, -1.8706543311127775, -0.46489102430496393, 0.7809504998349726, 1.4117799769409387, -1.0745591286428366, -0.2836404280836104, -2.036438630739936, 0.22952019419317052, -1.0586671136822083, 0.8725421870799133, 0.9372933834488324, -0.6897890018078252, -2.4095909249208076, -1.1043949161749178, -0.6629430542221896, -0.6423327287396278, -7.124201260751241e-2, 0.6907388981489758, -0.35127243564943705, 0.16585890189190897, 0.9920621617984167, -2.3112279571765373, 1.7241213927533647, 0.8340200893940102, 0.9571417944866937, -2.1398467846839346, 0.15354380361311393, -0.5906775735953512, 1.2860143766209415, 1.0062697311967306, 0.5961737020834922, -1.7709781896303882, -1.1175111135270315, -0.707399794010194, 0.4459930365356798, 0.542384796713595, -0.47362464684930733, -0.7704734843413473, -0.746285969578917, -2.358262526162656, -0.7272722014703177, 0.7866962305202065, 1.118268224174749, 0.6380023062989507, -0.6603032949747663, -1.2638964421228969, -1.1031239806105535, -3.0662151349709482e-2, 0.5786019403276345, 1.1338476707558756, 9.238440160174183e-2, 0.18517746648356334, -1.524673722420738, -0.8925268068044646, 0.4266212572392485, 1.266565025320326, 0.47754890370420777, 0.9052781927784751, -2.4373565508242074, 0.5233318127863926, 0.7810560727660827, 0.7501890361109492, 0.2871710886680135, 0.2650236833850191, 1.187049197216349, -0.3232433893296004, 0.18133887039323404, -0.4714375478776917, 0.12921879215749632, 0.2489849611122438, -0.1565894980556751, 0.7493464166001985, 0.12415327145297692, 0.7009884721922712, 0.8636622065912476, 1.1964585172191862, -0.2156195999291156, -0.2375019459998251, -0.6544854528418356, 0.4567189339207747, 0.6642117336727364, 1.6179796787643064, -1.1929119197071645, -0.1818520706368213, -0.9330693680179815, 0.8048716553838303, -0.3379853073978237, 0.4883921338925344, 0.6501813154529937, -1.1755321659412699, -0.3057562990573016, -1.3295469588057978, -0.5741138371486004, 0.3516240175401379, 0.9272389253571006, -2.4195994208833484e-2, 0.9032696852353367, -0.7882726243123247, -0.5503667854105676, 0.2746987745281913, 1.047071630883958, 0.9399955228878013, 0.5360800539056482, 0.1824790730634599, 1.0516808651407532, -0.349496284750203, 1.0532178366397986, 0.6916954242292741, 1.0284191730863814, 0.8384420306512181, -0.7280908585894718, -1.2763880658878033, 0.8140301201185698, 0.4275032414657133, 0.607623281533227, -0.17227628045189017, -0.7745590605518217, -2.0040824106785564, 1.3020549162458939, 8.419277417394912e-2, 1.1870929139158326, -0.13134412958200956, -6.730101066982998e-2, -9.996620137432191e-2, -1.8522031573425706, -1.5988064664088626, 0.9471725046057299, 1.1959077528581084, -7.253227456446688e-2, 1.5515326397893312, -0.35393764461660326, -0.17648372571669455, 0.12720441962995085, 1.170884098305351)
|
ade1357da4365c4cefcfa5cddb28e7826a0ba40c
|
2f6c3c28dcc624c098abb33d95384abcbad5330c
|
/tests/testthat/test-ruigi_task.R
|
91c7acaf196e9382bd0e87a014a414eca2f3769c
|
[] |
no_license
|
kirillseva/ruigi
|
498011243c0fe9995ee0cc3b19e7998e60724130
|
aaa9eaa4e692ec8897535c4ccbe2eb89a625947e
|
refs/heads/master
| 2020-12-24T16:31:41.892098
| 2019-05-26T10:04:10
| 2019-05-26T10:04:10
| 35,401,422
| 46
| 8
| null | 2019-05-26T10:04:12
| 2015-05-11T04:06:21
|
R
|
UTF-8
|
R
| false
| false
| 1,180
|
r
|
test-ruigi_task.R
|
context('Unit tests for ruigi_task')
test_that('A simple unit test for one node', {
tmp1 <- tempfile()
write.csv(data.frame(a=1, b=2, c=3), tmp1, row.names = FALSE)
tmp2 <- tempfile()
task <- ruigi_task$new(
requires = list(CSVtarget$new(tmp1)),
target = CSVtarget$new(tmp2),
runner = function(requires, target) {
tmpvalue <- requires[[1]]$read()
target$write(tmpvalue)
}
)
## We assume the requirement is fulfilled (scheduler should take care of that)
expect_true(task$requires[[1]]$exists())
## And the target is not yet created
expect_false(task$target$exists())
## But if we run!
task$runner(task$requires, task$target)
## The target now exists!
expect_true(task$target$exists())
## And, as we hoped when we were implementing, it's identical to the requirement
expect_identical(task$target$read(), task$requires[[1]]$read())
unlink(tmp1)
unlink(tmp2)
})
test_that('Need a valid target', {
expect_error(ruigi_task$new(
requires = list(Rtarget$new("yay")),
target = c("Error"),
runner = function(requires, target) {
tmpvalue <- requires[[1]]$read()
target$write(tmpvalue)
}
))
})
|
f6f88bbd05acb99d0471a820ca70e352c4c49d3c
|
c4c38220e0d9aebaf6558a70029823cfb2cc51b5
|
/data/MAAS.R
|
ed3623e02df3e9c995cb6995b5201f00e845e915
|
[
"MIT"
] |
permissive
|
ihrke/2020-ftrsgt-paper
|
7eba8234833109aec10880336139f0aaae3cb505
|
d2e59b402fdf27dcb09e8adc8bec5b10bfcb2db1
|
refs/heads/master
| 2022-11-14T07:40:48.492173
| 2020-07-02T12:18:56
| 2020-07-02T12:18:56
| 276,603,905
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48
|
r
|
MAAS.R
|
MAAS <- readxl::read_xlsx("data/raw/MAAS.xlsx")
|
702c675433ed32873240a70b6756d78a5dfcd708
|
2ffdfc3b88c5f8d20711d7f413b1bd515d7832e6
|
/functions/LeagueSettings.R
|
2d7736123a942c1e7026a98ea70f723d35e335b3
|
[] |
no_license
|
binaryfever/fantasyhockey
|
9fdb01894746f2cf64b406ccb0a3f052877f43b2
|
7171c7f49fbf1f0c6719dccbbc180e80340f007e
|
refs/heads/master
| 2021-08-16T16:37:19.146988
| 2017-11-20T04:36:08
| 2017-11-20T04:36:08
| 111,063,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,479
|
r
|
LeagueSettings.R
|
###########################
# File: LeagueSettings.R
# Description: User sets league settings
# Author: Fred McHale
# Notes: with inspiration from Isaac Petersen (isaac@fantasyfootballanalytics.net)
# To do: Determine scoring rules
###########################
#Roster
numGoalies <- 1
numDefense <- 1
numForwards <- 3
numTotalPlayers <- 5
#League settings
#Variable names
prefix <- c("name","pos","sourceName")
sourceSpecific <- c("name","team")
scoreCategories <- c("Goalie_Wins", "Goals_Against_Average", "SV_Percent")
calculatedVars <- c("positionRank","overallRank","points","pointsLo","pointsHi","vor","pick","risk","sdPts","sdPick")
varNames <- c(calculatedVars, scoreCategories)
finalVarNames <- c("name","pos","team","sourceName","player","playerID","season", "playerId", "analystId", varNames)
#Scoring
goal_multiplier <- 12 #12 pts per goal
assist_multiplier <- 8 #8 pts per assist
shorthand_goal_bonus_multiplier <- 8 #8 pts bonus for shorthanded goal
shorthand_assist_multiplier <- 6 #6 pts bonus for shorthanded assits
shot_on_goal_multiplier <- 2 #2 points for shots on goal
hit_multiplier <- 2 #2 points for hits
goalie_win_multiplier <- 12 #12 pts per goalie win
save_multiplier <- 0.5 #half a point per goalie save
zero_goals_against_multiplier <- 12
one_goal_against_multiplier <- 8
two_goals_against_multiplier <- 4
three_goals_against_multiplier <- 0
four_goals_against_multiplier <- -3
five_plus_goals_against_multiplier <- -6
scoringRules <- list(
G = data.frame(dataCol = c("goalWins", "saves", "zeroGoalsAgainst", "oneGoalsAgainst", "twoGoalsAgainst", "threeGoalsAgainst", "fourGoalsAgainst", "fivePlusGoalsAgainst"),
multiplier = c(12, 0.5, 12, 8, 4, 0, -3, -6 )),
DandF = data.frame(dataCol = c("goal", "assist", "shorthandGoalBonus", "shorthandAssistBonus", "shotOnGoal", "hit"),
multiplier = c(12, 8, 8, 6, 2, 2))
)
#Projections
#c("CBS", "ESPN", "Yahoo") #c("Accuscore", "CBS1", "CBS2", "EDSfootball", "ESPN", "FantasyFootballNerd", "FantasyPros", "FantasySharks", "FFtoday", "Footballguys1", "Footballguys2", "Footballguys3", "Footballguys4", "FOX", "NFL", "numberFire", "WalterFootball", "Yahoo")
sourcesOfProjections <- c("ESPN") #, "Dodds-Norton", "Dodds", "Tremblay", "Herman", "Henry", "Wood", "Bloom")
sourcesOfProjectionsAbbreviation <- c("espn") #c("accu", "cbs1", "cbs2", "eds", "espn", "ffn", "fp", "fs", "fftoday", "fbg1", "fbg2", "fbg3", "fbg4", "fox", "nfl", "nf", "wf", "yahoo")
#Weights applied to each source in calculation of weighted average of projections
#weight_accu <- 1 #Accuscore
#weight_cbs1 <- 1 #Jamey Eisenberg
#weight_cbs2 <- 1 #Dave Richard"
#weight_eds <- 1 #EDS Football
weight_espn <- 1 #ESPN
#weight_ffn <- 1 #Fantasy Football Nerd
#weight_fbg1 <- 1 #Footballguys: David Dodds
#weight_fbg2 <- 1 #Footballguys: Bob Henry
#weight_fbg3 <- 1 #Footballguys: Maurile Tremblay
#weight_fbg4 <- 1 #Footballguys: Jason Wood
#weight_fox <- 1 #FOX
#weight_fp <- 1 #FantasyPros
#weight_fs <- 1 #FantasySharks
#weight_fftoday <- 1 #FFtoday
#weight_nfl <- 1 #NFL.com
#weight_nf <- 1 #numberFire
#weight_wf <- 1 #WalterFootball
#weight_yahoo <- 1 #Yahoo
sourceWeights <- c(
# "Jamey Eisenberg" = 1,
# "Dave Richard" = 1,
# "Yahoo Sports" = 1,
"ESPN" = 1
# "NFL" = 1,
# "FOX Sports" = 1,
# "FFtoday" = 1,
# "NumberFire" = 1,
# "FantasyPros" = 1,
# "Dodds-Norton" = 1,
# "Dodds" = 1,
# "Tremblay" = 1,
# "Herman" = 1,
# "Henry" = 1,
# "Wood" = 1,
# "Bloom" = 1
)
#Number of players at each position drafted in Top 100 (adjust for your league)
#qbReplacements <- 15
#rbReplacements <- 37
#wrReplacements <- 36
#teReplacements <- 11
#Alternative way of calculating the number of players at each position drafted in Top 100 based on league settings
numTeams <- 10 #number of teams in league
numGoalies <- 1 #number of avg goalies in starting lineup
numDefense <- 1 #number of avg defensemen in starting lineup
numForwards <- 3 #number of avg forwards in starting lineup
goalieReplacements <- print(ceiling(numGoalies*numTeams*1.7))
defenseReplacements <- print(ceiling(numDefense*numTeams*1.4))
forwardReplacements <- print(ceiling(numForwards*numTeams*1.4))
|
5c88ca205f7a55a07eebb5f1042ee5e64620e0f1
|
e987d486b9f5ae338cad881e4bbb42b45dd20ced
|
/上课/回归分析/期中/analysis/1.stepwise.R
|
4050c26253cc9ec3f45ae415d06ec3d1646b6164
|
[] |
no_license
|
wlmnzf/courses
|
75fb85bee72836a52c08056f6401b47cf3c2ca1f
|
d9af176bfb6a3d9ab6e0cdaee73f12cd419f6cde
|
refs/heads/master
| 2020-03-29T18:24:45.550501
| 2020-01-17T01:11:02
| 2020-01-17T01:11:02
| 150,210,893
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
1.stepwise.R
|
#1.stepwise.R
#回归分析
library(readxl)
data <- read_excel("data.xls")
#rm(xlsx_example)
#x49,x50,x51为哑变量分别为负影响,无影响,正影响
tlm<-lm(y~x1+x2+x3+x4+x5+x6+x7+x8+x9+x10+
x11+x12+x13+x14+x15+x16+x17+x18+x19+x20+
x21+x22+x23+x34+x25+x26+x17+x28+x29+x30+
x31+x32+x33+x34+x35+x36+x37+x38+x39+x40+
x41+x42+x43+x44+x45+x46+x47+x48+x49+x50+
x51,data=data)
#summary(tlm)
tstep<-step(tlm)
drop1(tstep)
summary(tstep)
|
8d0e86a7a1944797ea6b90feacf2a73b3c5d0a5b
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/SAM/R/samHL.R
|
5ceb467d669b75909b9bd140b26681feb06383d4
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,842
|
r
|
samHL.R
|
#-----------------------------------------------------------------------#
# Package: SAM #
# Method: Sparse Additive Modelling using Hinge Loss #
#-----------------------------------------------------------------------#
#' Training function of Sparse Additive Machine
#'
#' The classifier is learned using training data.
#'
#' We adopt various computational algorithms including the block coordinate descent, fast iterative soft-thresholding algorithm, and newton method. The computation is further accelerated by "warm-start" and "active-set" tricks.
#'
#' @param X The \code{n} by \code{d} design matrix of the training set, where \code{n} is sample size and \code{d} is dimension.
#' @param y The \code{n}-dimensional label vector of the training set, where \code{n} is sample size. Labels must be coded in 1 and 0.
#' @param p The number of basis spline functions. The default value is 3.
#' @param lambda A user supplied lambda sequence. Typical usage is to have the program compute its own lambda sequence based on nlambda and lambda.min.ratio. Supplying a value of lambda overrides this. WARNING: use with care. Do not supply a single value for lambda. Supply instead a decreasing sequence of lambda values. samHL relies on its warms starts for speed, and its often faster to fit a whole path than compute a single fit.
#' @param nlambda The number of lambda values. The default value is 20.
#' @param lambda.min.ratio Smallest value for lambda, as a fraction of lambda.max, the (data derived) entry value (i.e. the smallest value for which all coefficients are zero). The default is 0.4.
#' @param thol Stopping precision. The default value is 1e-5.
#' @param mu Smoothing parameter used in approximate the Hinge Loss. The default value is 0.05.
#' @param max.ite The number of maximum iterations. The default value is 1e5.
#' @return
#' \item{p}{
#' The number of basis spline functions used in training.
#' }
#' \item{X.min}{
#' A vector with each entry corresponding to the minimum of each input variable. (Used for rescaling in testing)
#' }
#' \item{X.ran}{
#' A vector with each entry corresponding to the range of each input variable. (Used for rescaling in testing)
#' }
#' \item{lambda}{
#' A sequence of regularization parameter used in training.
#' }
#' \item{w}{
#' The solution path matrix (\code{d*p+1} by length of \code{lambda}) with each column corresponding to a regularization parameter. Since we use the basis expansion with the intercept, the length of each column is \code{d*p+1}.
#' }
#' \item{df}{
#' The degree of freedom of the solution path (The number of non-zero component function)
#' }
#' \item{knots}{
#' The \code{p-1} by \code{d} matrix. Each column contains the knots applied to the corresponding variable.
#' }
#' \item{Boundary.knots}{
#' The \code{2} by \code{d} matrix. Each column contains the boundary points applied to the corresponding variable.
#' }
#' \item{func_norm}{
#' The functional norm matrix (\code{d} by length of \code{lambda}) with each column corresponds to a regularization parameter. Since we have \code{d} input variables, the length of each column is \code{d}.
#' }
#' @seealso \code{\link{SAM}},\code{\link{plot.samHL},\link{print.samHL},\link{predict.samHL}}
#' @examples
#'
#' ## generating training data
#' n = 200
#' d = 100
#' X = 0.5*matrix(runif(n*d),n,d) + matrix(rep(0.5*runif(n),d),n,d)
#' y = sign(((X[,1]-0.5)^2 + (X[,2]-0.5)^2)-0.06)
#'
#' ## flipping about 5 percent of y
#' y = y*sign(runif(n)-0.05)
#'
#' ## Training
#' out.trn = samHL(X,y)
#' out.trn
#'
#' ## plotting solution path
#' plot(out.trn)
#'
#' ## generating testing data
#' nt = 1000
#' Xt = 0.5*matrix(runif(nt*d),nt,d) + matrix(rep(0.5*runif(nt),d),nt,d)
#'
#' yt = sign(((Xt[,1]-0.5)^2 + (Xt[,2]-0.5)^2)-0.06)
#'
#' ## flipping about 5 percent of y
#' yt = yt*sign(runif(nt)-0.05)
#'
#' ## predicting response
#' out.tst = predict(out.trn,Xt)
#' @useDynLib SAM grpSVM
#' @export
samHL = function(X, y, p=3, lambda = NULL, nlambda = NULL, lambda.min.ratio = 0.4, thol=1e-5, mu = 5e-2, max.ite = 1e5){
gcinfo(FALSE)
fit = list()
fit$p = p
fit = list()
X = as.matrix(X)
y = as.vector(y)
n = nrow(X)
d = ncol(X)
m = d*p
np = sum(y==1)
nn = sum(y==-1)
if((np+nn)!=n){
cat("Please check the labels. (Must be coded in 1 and -1)")
fit = "Please check the labels."
return(fit)
}
if(np>nn) a0 = 1-nn/np*mu else a0 = np/nn*mu - 1
fit$p = p
X.min = apply(X,2,min)
X.max = apply(X,2,max)
X.ran = X.max - X.min
X.min.rep = matrix(rep(X.min,n),nrow=n,byrow=T)
X.ran.rep = matrix(rep(X.ran,n),nrow=n,byrow=T)
X = (X-X.min.rep)/X.ran.rep
fit$X.min = X.min
fit$X.ran = X.ran
Z = matrix(0,n,m)
fit$nkots = matrix(0,p-1,d)
fit$Boundary.knots = matrix(0,2,d)
for(j in 1:d){
tmp = (j-1)*p + c(1:p)
tmp0 = ns(X[,j],df=p)
Z[,tmp] = tmp0
fit$nkots[,j] = attr(tmp0,'knots')
fit$Boundary.knots[,j] = attr(tmp0,'Boundary.knots')
}
Z = cbind(matrix(rep(y,m),n,m)*Z,y)
if(is.null(lambda)){
u = cbind((rep(1,n) - a0*y)/mu,rep(0,n),rep(1,n))
u = apply(u,1,median)
if(is.null(nlambda)) nlambda = 20;
lambda_max = max(sqrt(colSums(matrix(t(Z[,1:(p*d)])%*%u,p,d)^2)))
lambda = exp(seq(log(1),log(lambda.min.ratio),length=nlambda))*lambda_max
}
else nlambda = length(lambda)
L0 = norm(Z,'f')^2/mu
out = .C("grpSVM", Z = as.double(Z), lambda = as.double(lambda), nnlambda = as.integer(nlambda), LL0 = as.double(L0), nn = as.integer(n), dd = as.integer(d), pp = as.integer(p),aa0 = as.double(a0), xx = as.double(matrix(0,m+1,nlambda)), mmu = as.double(mu), mmax_ite = as.integer(max.ite), tthol = as.double(thol),aalpha = as.double(0.5),df=as.double(rep(0,nlambda)),func_norm=as.double(matrix(0,d,nlambda)),PACKAGE="SAM")
fit$lambda = out$lambda
fit$w = matrix(out$xx,ncol=nlambda)
fit$df = out$df
fit$func_norm = matrix(out$func_norm,ncol=nlambda)
rm(out,X,y,Z,X.min.rep,X.ran.rep)
class(fit) = "samHL"
return(fit)
}
#' Printing function for S3 class \code{"samHL"}
#'
#' Summarize the information of the object with S3 class \code{samHL}.
#'
#' The output includes length and d.f. of the regularization path.
#'
#' @param x An object with S3 class \code{"samHL"}
#' @param \dots System reserved (No specific usage)
#' @seealso \code{\link{samHL}}
#' @export
print.samHL = function(x,...){
cat("Path length:",length(x$df),"\n")
cat("d.f.:",x$df[1],"--->",x$df[length(x$df)],"\n")
}
#' Plot function for S3 class \code{"samHL"}
#'
#' This function plots the regularization path (regularization parameter versus functional norm)
#'
#' The horizontal axis is for the regularization parameters in log scale. The vertical axis is for the functional norm of each component.
#'
#' @param x An object with S3 class \code{"samHL"}
#' @param \dots System reserved (No specific usage)
#' @seealso \code{\link{samHL}}
#' @export
plot.samHL = function(x,...){
par = par(omi = c(0.0, 0.0, 0, 0), mai = c(1, 1, 0.1, 0.1))
matplot(x$lambda[length(x$lambda):1],t(x$func_norm),type="l",xlab="Regularization Parameters",ylab = "Functional Norms",cex.lab=2,log="x",lwd=2)
}
#' Prediction function for S3 class \code{"samHL"}
#'
#' Predict the labels for testing data.
#'
#' The testing dataset is rescale to the samHLe range, and expanded by the samHLe spline basis functions as the training data.
#'
#' @param object An object with S3 class \code{"samHL"}.
#' @param newdata The testing dataset represented in a \code{n} by \code{d} matrix, where \code{n} is testing sample size and \code{d} is dimension.
#' @param thol The decision value threshold for prediction. The default value is 0.5
#' @param \dots System reserved (No specific usage)
#' @return
#' \item{values}{
#' Predicted decision values also represented in a \code{n} by the length of \code{lambda} matrix, where \code{n} is testing sample size.
#' }
#' \item{labels}{
#' Predicted labels also represented in a \code{n} by the length of \code{lambda} matrix, where \code{n} is testing sample size. }
#' @seealso \code{\link{samHL}}
#' @export
predict.samHL = function(object, newdata, thol = 0, ...){
gcinfo(FALSE)
out = list()
nt = nrow(newdata)
d = ncol(newdata)
X.min.rep = matrix(rep(object$X.min,nt),nrow=nt,byrow=T)
X.ran.rep = matrix(rep(object$X.ran,nt),nrow=nt,byrow=T)
newdata = (newdata-X.min.rep)/X.ran.rep
newdata = pmax(newdata,0)
newdata = pmin(newdata,1)
m = object$p*d
Zt = matrix(0,nt,m)
for(j in 1:d){
tmp = (j-1)*object$p + c(1:object$p)
Zt[,tmp] = ns(newdata[,j],df=object$p,knots=object$knots[,j],Boundary.knots=object$Boundary.knots[,j])
}
out$values = cbind(Zt,rep(1,nt))%*%object$w
out$labels = sign(out$values-0)
rm(Zt,newdata)
return(out)
}
|
9201caf43267454e108c520c1b41201eb9a72418
|
b30a7754d83c85a05a9e4d4043f4f86d4b8a146e
|
/R/filter.R
|
3f06d717e6fa685ef705df2d428229092c9b3e06
|
[] |
no_license
|
cran/MetFns
|
ee916cbc684ecd2c8c8bd71241e87925ede01220
|
b0bc3c7c4bd7724c0dc725c1560df6286f278b97
|
refs/heads/master
| 2021-01-17T10:05:52.743380
| 2018-10-13T21:50:12
| 2018-10-13T21:50:12
| 24,534,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,404
|
r
|
filter.R
|
filter<-function(data,date.start=NULL,date.end=NULL,shw=NULL,lat.low=-90,lat.up=90,long.low=-180,long.up=180,
fname=NULL,lname=NULL,site=NULL,country=NULL,mag.low=1,mag.up=8,P.low=0,P.up=90,sol.low=NULL,sol.up=NULL,
h.low=0,h.up=90,r=NULL,C=NULL)
{
data.select<-data
if(!is.null(shw))
data.select<-filter.shw(data.select, shw)
if(!is.null(date.start) && !is.null(date.end))
data.select<-filter.date(data.select,date.start,date.end)
if(!is.null(fname) && !is.null(lname))
data.select<-filter.obsname(data.select,fname,lname)
if(lat.low!=-90 || lat.up!=90 || long.low!=-180 || long.up!=180)
data.select<-filter.gc(data.select,long.low,long.up,lat.low,lat.up)
if(!is.null(site))
data.select<-filter.site(data.select,site)
if(!is.null(country))
data.select<-filter.country(data.select,country)
if(mag.low!=1 || mag.up!=8)
data.select<-filter.mag(data.select,mag.low,mag.up)
if(P.low!=0 || P.up!=90)
data.select<-filter.P(data.select,P.low,P.up)
if(!is.null(sol.low) && !is.null(sol.up))
data.select<-filter.sol(data.select,sol.low,sol.up)
if(!is.null(shw) && (h.low!=0 || h.up!=90))
data.select<-filter.h(data.select,shw,h.low,h.up)
if(!is.null(r) && !is.null(C))
data.select<-filter.totcor(data.select,shw,r,C)
data.select
}
|
8b8813a3a5b991ff4426b47143c5d89b82383ab4
|
8aa69c336da13f338e944e586171e8bdf0c3f87a
|
/glmmTMB/man/findReTrmClasses.Rd
|
78882b976eb5d6ea6c3c2b54b5209fe0d587d3c5
|
[] |
no_license
|
glmmTMB/glmmTMB
|
4b5612a1cf6ce7567117b3318086fd7b3840e3da
|
0e9d26a02e5e36b74120d8c8a35eae0e0960a73b
|
refs/heads/master
| 2023-08-29T14:39:15.855753
| 2023-08-25T22:28:15
| 2023-08-25T22:28:15
| 40,176,799
| 230
| 63
| null | 2023-09-04T11:10:37
| 2015-08-04T09:53:51
|
HTML
|
UTF-8
|
R
| false
| true
| 269
|
rd
|
findReTrmClasses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reformulas.R
\name{findReTrmClasses}
\alias{findReTrmClasses}
\title{list of specials -- taken from enum.R}
\usage{
findReTrmClasses()
}
\description{
list of specials -- taken from enum.R
}
|
713f9a6f61173bf0f9430c7f164a7608f4a7e59c
|
a71f12d363261c4b67d6f02f5e62a9f82fdf1c0e
|
/code/block_2/code_riccardo_OpenSSH.R
|
1e8c5c4769157e103d028544ab444e0b17481910
|
[] |
no_license
|
thechib12/EOS_telnet
|
46e1c9231919869238cd797a21ecb66994dcc88f
|
22f3b9c6c88c8370ee940117fdaff93793b55eab
|
refs/heads/master
| 2020-07-30T16:14:00.566229
| 2019-10-21T10:43:49
| 2019-10-21T10:43:49
| 210,287,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
code_riccardo_OpenSSH.R
|
#---> OpenSSH version status <---
# Load csv file
scans <- read.csv("~/Downloads/telnetscan.csv")
# Select all the rows with OpenSSH in the banner column
os <- dplyr::filter(scans, grepl('OpenSSH', banner))
## Number of unique IPs running OpenSSH
os_unique <- unique(os[,2])
length(os_unique)
## IPs running a specific OpenSSH version
os_vers <- data.frame()
## IPs already considered
os_done <- data.frame()
# IPs running 6.9 < x < 7.8
for (i in 7:0){
os_num <- dplyr::filter(os, grepl(paste("7.",i, sep = ""), banner))
os_num_unique <- unique(os_num[,2])
os_num_unique <- setdiff(os_num_unique,os_done)
os_vers <- rbind(os_vers,length(os_num_unique))
os_done <- union(os_done,os_num_unique)
}
# IPs running x < 7.X
os_prev7 <- dplyr::filter(os, grepl('1.|2.|3.|4.|5.|6.', banner))
os_prev7_unique <- unique(os_prev7[,2])
os_prev7_unique <- setdiff(os_prev7_unique,os_done)
os_vers <- rbind(os_vers,length(os_prev7_unique))
|
802a930b67e4febc708493b0313358502037403a
|
a9550fbcc10bdda5cc6ab8a985a0e8c15c98c100
|
/R/treePlot.R
|
29f6c30eb612b06c1003506c99aca8270267f9d4
|
[] |
no_license
|
markrobinsonuzh/treeAGG
|
fa6a7705b1a0d74dfedbb18a43d309a818f237ef
|
e26b9401369ddc3682677d644232cc0205e5fdeb
|
refs/heads/master
| 2021-03-16T07:44:55.688468
| 2019-03-23T14:07:19
| 2019-03-23T14:07:19
| 98,645,726
| 2
| 1
| null | 2019-03-23T14:07:20
| 2017-07-28T12:04:06
|
R
|
UTF-8
|
R
| false
| false
| 23,307
|
r
|
treePlot.R
|
#' Visualize the phylogenetic tree
#'
#' \code{treePlot} visualizes a phylogenetic tree.
#'
#' @param tree A phylo object
#' @param branch A vector of node numbers labels to specify the branches to be
#' colored. Each branch is represented by its branch node. A leaf node
#' reprents the edge connecting the leaf and its parent.
#' @param col.branch A vector of colors. Its length should be one or equals to
#' the length of \strong{branch}. If \strong{col.branch} has the same length
#' as \strong{branch}, the branches are colored correspondingly with the
#' \strong{col.branch}. The default is blue.
#' @param col.other A color for the branches other than those specified in
#' \strong{branch}
#' @param point A vector of node numbers or node labels to specify the
#' locations to add points in the tree
#' @param col.point A color for the \strong{point}. It has length equal to one.
#' @param size.point The size for the \strong{point}. It has length equal to
#' one.
#' @param zoomNode A vector of nodes to be zoomed in. If default (NULL), the
#' tree is not zoomed in.
#' @param zoomLevel A numeric vector. Its length is equal to 1 or equal to the
#' length of \strong{zoomNode}. If default (NULL), a leaf is zoomed in its
#' direct parent level and an internal node is zoomed in its own level.
#' @param zoomScale A numeric vector. Its length is equal to one or equal to the
#' length of \strong{zoomNode}. If \strong{zoomScale} has the same length as
#' \strong{zoomNode}, the branches are zoomed in with different scales
#' corresponding to the value of \strong{zoomScale}. If default (NULL), tree
#' is not zoomed in.
#' @param legend TRUE or FALSE. Default is FALSE. If TRUE, the legend is
#' created.
#' @param legend.theme A list of arguments used for the theme in ggplot2 package
#' (see \code{\link[ggplot2]{theme}} ) and starting with "legend."
#' @param legend.title A vector to specify the title of the legend. It must be
#' named with "branch" and "point" to match with the argument \strong{branch}
#' and \strong{point}.
#' @param legend.label A list with three members: "col.branch", "col.other", and
#' "col.point". The elements order in each member matches with the
#' corresponding argument \strong{col.branch}, \strong{col.other} and
#' \strong{col.point}, and will display in the legend.
#' @param size.line.legend The line size shown in the legend for \strong{branch}
#' @param size.point.legend The point size shown in the legend for
#' \strong{point}.
#' @param ... see also \code{\link[ggtree]{ggtree}}
#'
#' @details treePlot is created based on the \pkg{ggtree} and \pkg{ggplot2}. We
#' could combine geoms from these two packages with \code{treePlot} to add
#' geoms.
#'
#' @import ggplot2
#' @import ggtree
#' @export
#' @return A tree plot
#' @author Ruizhu Huang
#' @examples
#'
#' data(bigTree)
#'
#' # If we want to color two branches with branch node 1000 and 1400
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' zoomNode = 1000, zoomScale = 10)
#'
#'
#' # use col.branch and col.other to specify colors
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40")
#'
#' # add legend to the colored branches
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE, legend.label = list(col.branch = c("up", "down")))
#'
#' # change legend title
#' p <- treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE,
#' legend.label = list(col.branch = c("Go up", "Go down")),
#' legend.title = c("branch" = "Abundance"))
#'
#' # change legend position (combine with ggplot2 package)
#' library(ggplot2)
#' p + ggplot2::theme(legend.position = "bottom")
#'
#' # change legend position use legend.theme
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE,
#' legend.label = list(col.branch = c("Go up", "Go down")),
#' legend.title = c("branch" = "Truth"),
#' legend.theme = list(legend.position = "bottom"))
#'
#'
#' # add points
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE,
#' legend.label = list(col.branch = c("Go up", "Go down")),
#' legend.title = c("branch" = "Truth"),
#' legend.theme = list(legend.position = "bottom"),
#' point = c(500, 5, 10))
#'
#'
#'# add points label in legend
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE,
#' legend.label = list(col.branch = c("Go up", "Go down"),
#' col.point = "Found"),
#' legend.title = c("branch" = "Truth", "point"= "Estimate"),
#' legend.theme = list(legend.position = "bottom"),
#' point = c(500, 5, 10))
#'
#'
#'# add points label in legend
#' treePlot(tree = bigTree, branch = c(1000, 1400),
#' col.branch = c("salmon", "blue"), col.other = "grey40",
#' legend = TRUE,
#' legend.label = list(col.branch = c("Go up", "Go down"),
#' col.point = "Found", col.other = "Same"),
#' legend.title = c("branch" = "Truth", "point"= "Estimate"),
#' legend.theme = list(legend.position = "bottom"),
#' point = c(500, 5, 10))
#'
treePlot <- function(tree,
branch = NULL,
col.branch = "blue",
col.other = "grey",
point = NULL,
col.point = "orange",
size.point = 2,
zoomNode = NULL,
zoomLevel = NULL,
zoomScale = 8,
legend = FALSE,
legend.theme = list(NULL),
legend.title = c(
"point" = "Title_point",
"branch" = "Title_branch"),
legend.label = NULL,
size.line.legend = 2,
size.point.legend = 3, size = 1, ...) {
# check tree
if (!inherits(tree, "phylo")) {
stop("tree: should be a phylo object")
}
# p <- ggtree(tree)
p <- ggtree(tree, size = size, ...)
# color branch
if (!is.null(branch)) {
p <- .addBranch(tree = tree, branch = branch,
col.branch = col.branch,
col.other = col.other,
addTo = p)
}
# add points
if (!is.null(point)) {
p <- .addPoint(tree = tree, point = point,
col.point = col.point, addTo = p)
}
# customize the size scale for added points
if (!is.null(point)) {
p <- p +
.sizeScale(size.point = size.point,
legend.label = legend.label,
legend.title = legend.title["point"],
col.point = col.point,
size.point.legend = size.point.legend,
legend = legend)
}
# customize the color
if (!is.null(branch)) {
p <- p +
.colScale(branch = branch,
point = point,
col.branch = col.branch,
col.other = col.other,
col.point = col.point,
legend.label = legend.label,
legend.title = legend.title,
size.line.legend = size.line.legend,
legend = legend )
}
# zoom in selected branches
if (!is.null(zoomNode)) {
p <- .addZoom(tree = tree, zoomNode = zoomNode,
zoomLevel = zoomLevel, zoomScale = zoomScale,
addTo = p)
}
# add legend
if (legend) {
p <- p + .addLegend(legend.theme)
}
if (is.null(legend.label$col.point)) {
p <- p + guides(size = FALSE)
}
if (is.null(legend.label$col.branch)) {
p <- p + guides(color = FALSE)
}
p
}
#' Color a branch
#'
#' \code{.addBranch} colors a branch or some edges.
#'
#' @param tree A phylo object
#' @param branch A vector of node numbers labels to specify the branches to be
#' colored. Each branch is represented by its branch node. A leaf node
#' reprents the edge connecting the leaf and its parent.
#' @param col.branch A vector of colors. Its length should be one or equals to
#' the length of \strong{branch}. If \strong{col.branch} has the same length
#' as \strong{branch}, the branches are colored correspondingly with the
#' \strong{col.branch}. The default is blue.
#' @param col.other A color for the branches other than those specified in
#' \strong{branch}
#' @param addTo NULL or a plot of a phylo object.
#' @param ... see also \code{\link[ggtree]{ggtree}}
#'
#' @import ggplot2
#' @importFrom ggtree ggtree %<+%
#' @return A figure
#' @author Ruizhu Huang
#' @keywords internal
#' @examples
#' # data(tinyTree)
#' # .addBranch(tree = tinyTree, branch = 17,
#' # col.branch = "blue", col.other = "grey")
.addBranch <- function(tree, branch, col.branch,
col.other, addTo = NULL, ...) {
# node number required
if (is.character(branch)) {
branch <- transNode(tree = tree, input = branch,
message = FALSE)
} else {
branch <- branch
}
# -------------------------------------------------------
# create a data frame to indicate the selected edges
# -------------------------------------------------------
p <- ggtree(tree)
d <- p$data[, "node", drop = FALSE]
# The edges selected to be colored
eList <- findOS(tree = tree, ancestor = branch,
only.leaf = FALSE, self.include = TRUE)
el <- unlist(lapply(eList, length))
eList <- eList[order(el, decreasing = TRUE)]
if (length(col.branch) == length(branch)) {
col.branch <- col.branch[order(el, decreasing = TRUE)]
}
dList <- mapply(function(x, y) {
names(x) <- NULL
names(y) <- NULL
cbind.data.frame(node = y, group = x,
stringsAsFactors = FALSE)},
x = col.branch, y = eList, SIMPLIFY = FALSE,
USE.NAMES = FALSE)
df <- do.call(rbind, dList)
Truth <- rep("grp_other", nrow(d))
Truth[match(df$node, d$node)] <- df$group
d <- cbind.data.frame(d, Truth = Truth, stringsAsFactors = FALSE)
# return
if (is.null(addTo)) {
fig <- ggtree(tree, ...)
} else {
fig <- addTo
}
fig %<+% d + aes(colour = Truth)
}
#' Add points to the tree plot
#'
#' \code{.addPoint} adds points to a plot of phylogenetic tree.
#'
#' @param tree A phylo object
#' @param point A vector of node numbers or node labels to specify the
#' locations to add points in the tree
#' @param col.point A color for the \strong{point}. It has length equal to one.
#' @param addTo NULL or a plot of a phylo object.
#' @param ... see also \code{\link[ggtree]{ggtree}}
#'
#' @import ggplot2
#' @importFrom ggtree ggtree geom_point2
#' @return A figure
#' @author Ruizhu Huang
#' @keywords internal
#' @examples
#' data(tinyTree)
#'
#'
.addPoint <- function(tree, point, col.point,
addTo = NULL, ...) {
p <- ggtree(tree)
d <- p$data[, "node", drop = FALSE]
# node number required
if (is.character(point)) {
point <- transNode(tree = tree, input = point,
message = FALSE)
} else {
point <- point
}
# -------------------------------------------------------
# create a data frame to store the information for points
# -------------------------------------------------------
Estimate <- ifelse(d$node %in% point, "YES_Found",
"NO_Found")
show <- ifelse(d$node %in% point, TRUE, FALSE)
d <- cbind.data.frame(d, Estimate = Estimate, show = show)
if (is.null(addTo)) {
fig <- ggtree(tree, ...)
} else {
fig <- addTo
}
fig %<+% d +
geom_point2(aes(subset = show, color = Estimate,
size = Estimate))
}
#' Visualize the phylogenetic tree
#'
#' \code{.addZoom} zooms in a phylogenetic tree.
#'
#' @param tree A phylo object
#' @param zoomNode A vector of nodes to be zoomed in. If default (NULL), the
#' tree is not zoomed in.
#' @param zoomLevel A numeric vector. Its length is equal to 1 or equal to the
#' length of \strong{zoomNode}. If default (NULL), a leaf is zoomed in its
#' direct parent level and an internal node is zoomed in its own level.
#'
#' @param zoomScale A numeric vector. Its length is equal to one or equal to the
#' length of \strong{zoomNode}. If \strong{zoomScale} has the same length as
#' \strong{zoomNode}, the branches are zoomed in with different scales
#' corresponding to the value of \strong{zoomScale}. If default (NULL), tree
#' is not zoomed in.
#' @param addTo NULL or a plot of a phylo object.
#' @param ... see also \code{\link[ggtree]{ggtree}}
#'
#' @import ggplot2
#' @importFrom ggtree ggtree %>% scaleClade
#' @return A figure
#' @author Ruizhu Huang
#' @keywords internal
#' @examples
#' # data(tinyTree)
#' # .addZoom(tree = tinyTree, zoomNode = 17,
#' # zoomScale = 3)
.addZoom <- function(tree, zoomNode = NULL, zoomLevel = NULL,
zoomScale = NULL, addTo = NULL, ...) {
# node number required
if (is.character(zoomNode)) {
zoomNode <- transNode(tree = tree, input = zoomNode,
message = FALSE)
} else {
zoomNode <- zoomNode
}
labAlias <- transNode(tree = tree, input = zoomNode,
use.alias = TRUE)
zList <- findOS(tree = tree, ancestor = zoomNode,
only.leaf = FALSE, self.include = TRUE)
if (!is.list(zList)) {zList <- list(zList)}
names(zList) <- labAlias
z_len <- unlist(lapply(zList, length))
# define zoomLevel
if (is.null(zoomLevel)) {
zoomLevel <- ifelse(z_len > 1, 0, 1)
} else {
if (length(zoomLevel) == 1) {
zoomLevel <- rep(zoomLevel, length(zoomNode))
} else {
zoomLevel <- zoomLevel
}
}
names(zoomLevel) <- labAlias
# define zoomScale
if (is.null(zoomScale)) {
zoomScale <- rep(1, length(zoomNode))
} else {
zoomScale <- rep(zoomScale, length(zoomNode))
}
# the nodes to be zoomed in
nodZ <- findAncestor(tree = tree, node = zoomNode,
level = zoomLevel)
nodLZ <- transNode(tree = tree, input = nodZ, use.alias = TRUE)
names(nodZ) <- names(zoomScale) <- nodLZ
# remove nodes which are the descendants of the others
nodZW <- rmDesc(node = nodZ, tree = tree, use.alias = TRUE)
zoomScale[!names(zoomScale) %in% names(nodZW)] <- 1
if (is.null(addTo)) {
fig <- ggtree(tree, ...)
} else {
fig <- addTo
}
# zoom the selected nodes
i <- 1
repeat {
fig <- fig %>% scaleClade(nodZ[i], scale = zoomScale[i])
i <- i + 1
if (i > length(nodZ)) {
break
}
}
lim <- c(min(fig$data$y), max(fig$data$y))
## I reset limits for y because ggtree function use ylim to limit y axis.
## This would lead to issues, like points not displayed when zoom in some
## branches at the case that layout is circular or radical.
suppressMessages(fig <- fig + scale_y_continuous(limits = lim))
fig
}
#' Add legend
#' \code{.addLegend} customizes the legend.
#'
#' @param legend.theme A list of arguments used for the theme in ggplot2 package
#' (see \code{\link[ggplot2]{theme}} ) and starting with "legend."
#'
#' @import ggplot2
#' @importFrom utils modifyList
#' @return a list
#' @author Ruizhu Huang
#' @keywords internal
.addLegend <- function(legend.theme = list(NULL)) {
# default way to put legend
li1 <- list(legend.position = "right",
legend.text = element_text(size= 12),
legend.key.size = unit(4,"cm"),
legend.key.height = unit(0.4,"cm"),
legend.key.width = unit(0.5, "cm"),
legend.title = element_text(size = 15)
#,
# legend.background = element_rect(),
#legend.box.background = element_rect()
)
# user defined
li2 <- legend.theme
# overwrite the default
li <- modifyList(li1, li2)
# ggplot2 theme
do.call(theme, li)
}
#' Customize the scale
#'
#' \code{.sizeScale} customizes the size scale.
#'
#' @param col.point A color for the \strong{point}. It has length equal to one.
#' @param size.point The size for the \strong{point}. It has length equal to
#' one.
#' @param legend.label A list with three members: "col.branch", "col.other", and
#' "col.point". The elements order in each member matches with the
#' corresponding argument \strong{col.branch}, \strong{col.other} and
#' \strong{col.point}, and will display in the legend. See Examples.
#' @param legend.title A vector to specify the title of the legend. It must be
#' named with "branch" and "point" to match with the argument \strong{branch}
#' and \strong{point}.
#' @param size.point.legend the point size shown in the legend for
#' \strong{point}.
#' @param legend TRUE or FALSE
#'
#' @import ggplot2
#' @importFrom utils modifyList
#' @return ggproto object (Scale)
#' @author Ruizhu Huang
#' @keywords internal
.sizeScale <- function(col.point, size.point,
legend.label, legend.title,
size.point.legend, legend, ...) {
ol <- list(...)
sl <- ol$size
if (!is.null(sl)) {
size.point <- c(sl, size.point)
}
# if legend is required, correct the label with guide_legend
if (legend) {
ll <- list("branch" = NULL, "point" = NULL)
lt <- as.list(legend.title)
names(lt) <- names(legend.title)
legend.title <- modifyList(ll, lt)
scale_size_manual(values = size.point,
labels = legend.label$col.point,
guide = guide_legend(
title = legend.title$point,
override.aes = list(
shape = 16, color = col.point,
size = size.point.legend)))
} else {
scale_size_manual(values = size.point)
}
}
#' Customize the color
#'
#' \code{.colScale} customizes the color scale.
#'
#' @param branch A vector of node numbers labels to specify the branches to be
#' colored. Each branch is represented by its branch node. A leaf node
#' reprents the edge connecting the leaf and its parent.
#' @param point A vector of node numbers or node labels to specify the locations
#' to add points in the tree.
#' @param col.branch A vector of colors. Its length should be one or equals to
#' the length of \strong{branch}. If \strong{col.branch} has the same length
#' as \strong{branch}, the branches are colored correspondingly with the
#' \strong{col.branch}. The default is blue.
#' @param col.other A color for the branches other than those specified in
#' \strong{branch}
#' @param col.point A color for the \strong{point}. It has length equal to one.
#' @param legend.label A list with three members: "col.branch", "col.other", and
#' "col.point". The elements order in each member matches with the
#' corresponding argument \strong{col.branch}, \strong{col.other} and
#' \strong{col.point}, and will display in the legend. See Examples.
#' @param legend.title A vector to specify the title of the legend. It must be
#' named with "branch" and "point" to match with the argument \strong{branch}
#' and \strong{point}.
#' @param size.line.legend the line size shown in the legend for \strong{branch}
#' @param legend TRUE or FALSE. Default is FALSE. If TRUE, the legend is
#' created.
#'
#' @import ggplot2
#' @importFrom utils modifyList tail
#' @importFrom stats setNames
#' @return ggproto object (color)
#' @author Ruizhu Huang
#' @keywords internal
#'
.colScale <- function(branch,
point,
col.branch,
col.other,
col.point,
legend.label,
legend.title,
size.line.legend,
legend) {
# colG is created to correct the color
# vG is created to output the label
if (length(legend.label$col.branch) > length(col.branch)) {
stop("Same color with different labels. You probably need more colors")
}
if (is.null(point)) {
cG <- list(col.branch, col.other)
names(cG) <- c("col.branch", "col.other")
colV <- c(col.branch, col.other)
names(colV) <- c(col.branch, "grp_other")
} else {
cG <- list(col.branch, col.other, col.point)
names(cG) <- c("col.branch", "col.other", "col.point")
colV <- c(col.branch, col.other, col.point)
names(colV) <- c(col.branch, "grp_other", "YES_Found")
}
if (legend) {
#if legend label is not provided
if (is.null(legend.label)) {
stop("legend.label isn't provided")
}
# decide the content in the legend (branch, other or point)
# ll is a template
ll <- list(col.branch = "",
col.other = "",
col.point = "")
listG <- listLab <- ll[names(ll) %in% names(cG)]
listG <- modifyList(listG, cG)
listLab <- modifyList(listLab, legend.label)
# check whether listG and listLab have the same composition pattern.
llG <- lapply(listG, FUN = function(x){match(x, unique(x))})
llLab <- lapply(listLab, FUN = function(x){match(x, unique(x))})
if(!setequal(llG, llLab)){
message("\n The legend label isn't correctly specified. \n")
}
# match the color and the label
namG <- mapply(function(x, y) {
names(x) <- y
x
}, x = setNames(listG, NULL),
y = setNames(listLab, NULL))
if (is.list(namG)) {
colG <- unlist(namG)
} else {
colG <- namG
}
colG <- colG[!(duplicated(colG) &
duplicated(names(colG)))]
lab <- names(colG)
ww <- tail(which(lab %in% legend.label$col.point),1)
lab[ww] <- ""
lty <- ifelse(lab %in% "", "blank", "solid")
du <- duplicated(colG) & duplicated(names(colG))
lab <- ifelse(du, "", lab)
lty <- ifelse(du, "blank", lty)
# update legend.title
ll <- list("branch" = NULL, "point" = NULL)
lt <- as.list(legend.title)
names(lt) <- names(legend.title)
legend.title <- modifyList(ll, lt)
scale_color_manual(
values = colV,
labels = lab,
guide = guide_legend(
title = legend.title$branch,
override.aes = list(
color = colG,
linetype = lty,
shape = rep(NA, length(colG)),
size = size.line.legend
)
)
)
} else {
scale_color_manual(values = colV)
}
}
|
1c121b6b120d499610fdb7ae7cf5477a2fcc44d6
|
f6e85bb931759bbbbd26ad3760733bca256498b6
|
/Ejercicios 27-10-2020/test.r
|
7fd13fa5bbd6d31b375f69a9267e30a6ad45f41f
|
[] |
no_license
|
Oscarvch03/Statistics
|
559782ca8c9025a6e17fb7649bbc8e1ae5804c8a
|
c62edd16e2aa1adbb42b20d26616335711e8f457
|
refs/heads/master
| 2023-02-15T21:09:59.464733
| 2021-01-21T13:45:16
| 2021-01-21T13:45:16
| 296,684,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
test.r
|
clc <- function(){
cat("\014")
}
clc()
rm(list = ls())
x = c(-2, -1, 0, 1, 2)
y = c(0, 0, 1, 1, 3)
plot(x, y, main = "Scatterplot")
mod = lm(y ~ x)
summary(mod)
mod$coefficients
abline(mod, col = 3, lwd = 3)
|
9bc7c4323fae6e004d86adba9e95826125d8ecba
|
16df2e74139ac1877d59de680a479be381436060
|
/R/get_pc_space.R
|
a27130d4d2e57040f459837540133e917993d23a
|
[] |
no_license
|
cran/oddstream
|
706e837bbc7eed98b263f9ce871821c3214cd213
|
842cb5b3240cf38e1aafebc0bc8cdb904edbce27
|
refs/heads/master
| 2020-12-22T01:47:48.003858
| 2019-12-16T21:00:03
| 2019-12-16T21:00:03
| 236,633,789
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,398
|
r
|
get_pc_space.R
|
#' Define a feature space using the PCA components of the feature matrix
#'
#' @description Define a two dimensional feature space using the first two principal components generated from
#' the fetures matrix returned by \code{extract_tsfeatures}
#' @param features Feature matrix returned by \code{\link{extract_tsfeatures}}
#' @param robust If TRUE, a robust PCA will be used on the feature matrix.
#' @param kpc Desired number of components to return.
#' @return It returns a list with class 'pcattributes' containing the following components:
#' \item{pcnorm}{The scores of the firt kpc pricipal components}
#' \item{center, scale}{The centering and scaling used}
#' \item{rotation}{the matrix of variable loadings (i.e., a matrix whose columns contain the eigenvectors).
#' The function \code{princomp} returns this in the element loadings.}
#' @seealso \code{\link[pcaPP]{PCAproj}}, \code{\link[stats]{prcomp}}, \code{\link{find_odd_streams}},
#' \code{\link{extract_tsfeatures}}, \code{\link{set_outlier_threshold}}, \code{\link{gg_featurespace}}
#' @export
#' @importFrom pcaPP PCAproj
#' @importFrom stats prcomp
#' @examples
#' features <- extract_tsfeatures(anomalous_stream[1:100, 1:100])
#' pc <- get_pc_space(features)
#'
get_pc_space <- function(features, robust = TRUE, kpc = 2) {
if (robust) {
pc <- pcaPP::PCAproj(features, k = ncol(features), scale = sd, center = mean)
pcnorm <- pc$scores[, 1:kpc]
colnames(pcnorm) <- c("PC1", "PC2")
pc <- list(
pcnorm = pcnorm, center = pc$center, scale = pc$scale,
rotation = pc$loadings[, 1:ncol(features)]
)
} else {
pc <- stats::prcomp(features, center = TRUE, scale. = TRUE)
pcnorm <- pc$x[, 1:kpc]
colnames(pcnorm) <- c("PC1", "PC2")
pc <- list(
pcnorm = pcnorm, center = pc$center, scale = pc$scale,
rotation = pc$rotation
)
}
class(pc) <- "pcoddstream"
return(pc)
}
#' Produces a ggplot object of two dimensional feature space.
#'
#' @description Create a ggplot object of two dimensional feature space using the first two
#' pricipal component returned by \code{\link{get_pc_space}}.
#' @param object Object of class \dQuote{\code{pcoddstream}}.
#' @param ... Other plotting parameters to affect the plot.
#' @return A ggplot object of two dimensional feature space.
#' @export
#' @seealso \code{\link{find_odd_streams}}, \code{\link{extract_tsfeatures}}, \code{\link{get_pc_space}},
#' \code{\link{set_outlier_threshold}}
#' @import ggplot2
#' @importFrom tibble as_tibble
#' @examples
#' features <- extract_tsfeatures(anomalous_stream[1:100, 1:100])
#' pc <- get_pc_space(features)
#' p <- gg_featurespace(pc)
#' p + ggplot2::geom_density_2d()
gg_featurespace <- function(object, ...) {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("ggplot2 is needed for this function to work. Install it via
install.packages(\"ggplot2\")", call. = FALSE)
}
else {
data <- tibble::as_tibble(object$pcnorm[, 1:2])
# Initialise ggplot object
p <- ggplot2::ggplot(
data = data,
ggplot2::aes_(x = ~PC1, y = ~PC2)
)
# Add data
p <- p + ggplot2::geom_point(color = "cornflowerblue", size = 2, alpha = 0.8)
# Add theme
p <- p + ggplot2::theme(aspect.ratio = 1)
# Add labels
p <- p + ggplot2::labs(title = "Two dimensional feature space")
return(p)
}
}
|
96139165f313c648a9f88a5a576ff6a20576f2db
|
79895359002f52487622b90653d1095c82cdf380
|
/man/afterhours_line.Rd
|
85d9b241bf7466cb16efd14ad79ae7ad38fd6061
|
[
"MIT"
] |
permissive
|
juliajuju93/wpa
|
a8c46c0c07641367e4c2dcb6c609745489c5d130
|
25d362318187930d942f457b4ae45d48921101c9
|
refs/heads/main
| 2023-03-18T21:23:14.780984
| 2020-12-03T16:53:14
| 2020-12-03T16:53:14
| 319,579,572
| 0
| 0
|
NOASSERTION
| 2020-12-08T08:49:12
| 2020-12-08T08:49:11
| null |
UTF-8
|
R
| false
| true
| 1,722
|
rd
|
afterhours_line.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/afterhours_line.R
\name{afterhours_line}
\alias{afterhours_line}
\title{After-hours Collaboration Time Trend - Line Chart}
\usage{
afterhours_line(data, hrvar = "Organization", mingroup = 5, return = "plot")
}
\arguments{
\item{data}{A Standard Person Query dataset in the form of a data frame.}
\item{hrvar}{HR Variable by which to split metrics, defaults to "Organization"
but accepts any character vector, e.g. "LevelDesignation"}
\item{mingroup}{Numeric value setting the privacy threshold / minimum group size. Defaults to 5.}
\item{return}{Character vector specifying what to return, defaults to "plot".
Valid inputs are "plot" and "table".}
}
\value{
Returns a ggplot object by default, where 'plot' is passed in \code{return}.
When 'table' is passed, a summary table is returned as a data frame.
}
\description{
Provides a week by week view of after-hours collaboration time, visualized as line charts.
By default returns a line chart for after-hours collaboration hours,
with a separate panel per value in the HR attribute.
Additional options available to return a summary table.
}
\details{
Uses the metric \code{After_hours_collaboration_hours}.
See \code{create_line()} for applying the same analysis to a different metric.
}
\examples{
## Return a line plot
afterhours_line(sq_data, hrvar = "LevelDesignation")
## Return a table
afterhours_line(sq_data, hrvar = "LevelDesignation", return = "table")
}
\seealso{
Other After-Hours:
\code{\link{afterhours_dist}()},
\code{\link{afterhours_fizz}()},
\code{\link{afterhours_rank}()},
\code{\link{afterhours_summary}()},
\code{\link{afterhours_trend}()}
}
\concept{After-Hours}
|
68619b3d4c4d28d6a7f32007a6964e76599aab03
|
75db03a61980f42f955f21b3fb3a556cadca25b0
|
/R2neo4j/BulkLoadVia_R2neo4j.R
|
101b59f973c76d8a7f616565f5365baae2981a8a
|
[] |
no_license
|
rsaporta/pubR
|
09575cfc6c15bdcadc2801eff8a8959bac4c5100
|
fe487d7020311b19b92d80e214800813188ad793
|
refs/heads/gitbranch
| 2016-09-06T13:48:23.919397
| 2014-09-28T07:28:03
| 2014-09-28T07:28:03
| 6,797,840
| 15
| 5
| null | 2013-10-22T20:34:24
| 2012-11-21T15:23:29
|
R
|
UTF-8
|
R
| false
| false
| 674
|
r
|
BulkLoadVia_R2neo4j.R
|
N O T E :
this file is just Ricks scrap work.
You probably want one of the other files,
such as: batchCreateFromDT.R
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# BulkLoadVia_R2neo4j.R
## approx 7 seconds per 1,000
Artists.NodeInputs <- createNodesFromDT ( DT.Nodes.Concs.artist)
DT.Nodes.Concs.artist.Loaded <- cbind(DT.Nodes.Concs.artist, Artists.NodeInputs)
output <- batchCreateNodesAndRels(NODES.DT, RELS.DT, nodes.idcol="node", addSerialNumberToRels=TRUE)
# Trouble shooting.
# output <- batchCreateNodesAndRels(N.mini, R.mini, nodes.idcol="node", addSerialNumberToRels=TRUE)
|
d5d8263d847f3a79d1de3a9ad5193c342608559d
|
9362b22a596f0ca7bc5c391a925c43ae8fb19683
|
/man/internalOutputScript.Rd
|
33e329438763774f960e18be63b40f04a3fb3c81
|
[] |
no_license
|
pmur002/conduit
|
c174d6300b45a7145bd48da08bd15539ba1e9261
|
2d26f12f28268f72ead0d5c911a7751c6e843dac
|
refs/heads/master
| 2021-01-18T05:03:56.094146
| 2016-03-01T22:47:00
| 2016-03-01T22:47:00
| 53,277,769
| 0
| 0
| null | 2016-03-06T21:43:22
| 2016-03-06T21:43:22
| null |
UTF-8
|
R
| false
| true
| 992
|
rd
|
internalOutputScript.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R.R, R/python.R, R/script.R, R/shell.R
\name{internalOutputScript.R}
\alias{internalOutputScript}
\alias{internalOutputScript.R}
\alias{internalOutputScript.python}
\alias{internalOutputScript.shell}
\title{prepare script to resolve internal output}
\usage{
\method{internalOutputScript}{R}(symbol)
\method{internalOutputScript}{python}(symbol)
internalOutputScript(symbol)
\method{internalOutputScript}{shell}(symbol)
}
\arguments{
\item{symbol}{character string with class set to language of module script}
}
\value{
character vector of script to ensure input
}
\description{
prepare script to resolve internal output
}
\section{Methods (by class)}{
\itemize{
\item \code{R}: create script to create internal
output for language = "R"
\item \code{python}: create script to create internal
output for language = "python"
\item \code{shell}: create script to create internal
output for language = "shell"
}}
|
f681ffae8357a07eabc50663ebf5de02d1eb239c
|
b2fa820b93bb6b6ca31d667f90c46ef3e082914d
|
/man/read_bismark_nucleotide_coverage.Rd
|
ef31bf35062e4495a37a5d24a391b267472ea10c
|
[] |
no_license
|
labbcb/MethylMetrics
|
2ce4797b22c0d3a05079b7207361c0f2e164fb64
|
beff6cc3d2a750a1be2ea899e24810c1486a6567
|
refs/heads/main
| 2023-06-16T04:57:57.594300
| 2021-07-04T21:45:17
| 2021-07-04T21:45:17
| 302,016,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 347
|
rd
|
read_bismark_nucleotide_coverage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bismark.R
\name{read_bismark_nucleotide_coverage}
\alias{read_bismark_nucleotide_coverage}
\title{Read Bismark nucleotide coverage}
\usage{
read_bismark_nucleotide_coverage(file)
}
\arguments{
\item{file}{}
}
\value{
}
\description{
Read Bismark nucleotide coverage
}
|
f4f05eb9e92a1333f5579d37f1ee07ec0c6a9a1b
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/lsbclust/R/int.lsbclust.R
|
f5f7af151e7a30b57c20f49f46c106881571e7cc
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,609
|
r
|
int.lsbclust.R
|
#' Interaction Clustering in Least Squares Bilinear Clustering
#'
#' This function implements the interaction clustering part of the Least Squares Bilinear Clustering
#' method of Schoonees, Groenen and Van de Velden (2014).
#'
#' @param data A three-way array representing the data.
#' @param margin An integer giving the single subscript of \code{data} over which the clustering
#' will be applied.
#' @param delta A four-element binary vector (logical or numeric) indicating which sum-to-zero
#' constraints must be enforced.
#' @param nclust An integer giving the desired number of clusters. If it is a vector, the algorithm
#' will be run for each element.
#' @param ndim The required rank for the approximation of the interactions (a scalar).
#' @param fixed One of \code{"none"}, \code{"rows"} or \code{"columns"} indicating whether to fix neither
#' sets of coordinates, or whether to fix the row or column coordinates across clusters respectively.
#' If a vector is supplied, only the first element will be used.
#' @param nstart The number of random starts to use.
#' @param starts A list containing starting configurations for the cluster membership vector. If not
#' supplied, random initializations will be generated.
#' @param alpha Numeric value in [0, 1] which determines how the singular values are distributed
#' between rows and columns.
#' @param parallel Logical indicating whether to parallelize over different starts or not.
#' @param mc.cores The number of cores to use in case \code{parallel = TRUE}, passed to
#' \code{\link{makeCluster}}.
#' @param maxit The maximum number of iterations allowed.
#' @param verbose Integer controlling the amount of information printed: 0 = no information,
#' 1 = Information on random starts and progress, and 2 = information is printed after
#' each iteration for the interaction clustering.
#' @param method The method for calculating cluster agreement across random starts, passed on
#' to \code{\link{cl_agreement}}. None is calculated when set to \code{NULL}.
#' @param minsize Integer giving the minimum size of cluster to uphold when reinitializing
#' empty clusters.
#' @param return_data Logical indicating whether to include the data in the return
#' value or not
#' @return An object of class \code{int.lsb}
#' @importFrom parallel detectCores makeCluster stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom foreach foreach %dopar%
#' @export
#' @examples
#' data("supermarkets")
#' out <- int.lsbclust(data = supermarkets, margin = 3, delta = c(1,1,0,0), nclust = 4, ndim = 2,
#' fixed = "rows", nstart = 1, alpha = 0)
#' @export
int.lsbclust <- function(data, margin = 3L, delta, nclust, ndim = 2,
fixed = c("none", "rows", "columns"),
nstart = 50, starts = NULL, alpha = 0.5,
parallel = FALSE, mc.cores = detectCores() - 1,
maxit = 100, verbose = 1, method = "diag",
minsize = 3L, return_data = FALSE){
## Capture call, start time, original data
time0 <- proc.time()[3]
cll <- match.call()
data.org <- data
## Recurse if nclust is a vector
if (length(nclust) > 1){
return(lapply(nclust, int.lsbclust, data = data, margin = margin, delta = delta,
ndim = ndim, fixed = fixed, nstart = nstart, starts = starts, alpha = alpha,
parallel = parallel, maxit = maxit, verbose = verbose, method = method))
}
## Sanity checks and coercion
if (!is(data, "array") || length(dim(data)) != 3) stop("Data must be a three-way array.")
if (!all(margin %in% 1:3) || length(margin) != 1) stop("Argument 'margin' must be 1, 2 or 3.")
delta <- as.numeric(delta)
if (alpha < 0 || alpha > 1) stop("Alpha must be between 0 and 1.")
if (length(delta) != 4 || !all(delta %in% 0:1)) stop("Argument 'delta' supplied in an unrecognized format.")
fixed <- match.arg(tolower(fixed[1L]), choices = c("none", "rows", "columns"))
## Permute the dimensions of the data so that clustering is over the last dimension
perm <- c((1:3)[-margin], margin)
data <- aperm.default(data, perm = perm)
## Get dimensions
dims <- dim(data)
J <- dims[1]
K <- dims[2]
N <- dims[3]
## Handle nclust == 1
if (nclust == 1) nstart <- 1
## Check for reasonable rank
if (ndim > min(J, K)) stop("Number of dimensions required exceeds min(J, K).")
## Generate random starts for cluster membership if not supplied
if (is.null(starts)){
## Function to generate start without empty clusters
genstart <- function(prob, n = nclust, size = N, replace = TRUE) {
start <- sample.int(n = n, size = size, replace = replace, prob = prob)
counts <- table(factor(start, levels = seq_len(n)))
if (any(counts == 0)) {
## Move random person(s) to empty cluster(s)
start[sample.int(n = size, size = sum(counts == 0))] <- which(counts == 0)
}
return(start)
}
starts <- lapply(X = replicate(nstart, runif(nclust) + 5/N, simplify = FALSE),
FUN = genstart, n = nclust, size = N, replace = TRUE)
} else {
starts <- as.list(starts)
nstart <- length(starts)
if(any(sapply(starts, length) != N))
stop("At least one of the supplied starting configurations are of an incorrect length.")
}
## Centre the data if necessary
if (delta[1] == 1) {
data <- sweep(data, MARGIN = c(2, 3), STATS = colMeans(data), FUN = "-")
}
if (delta[2] == 1) {
data <- sweep(data, MARGIN = c(1, 3), STATS = apply(data, c(1, 3), mean), FUN = "-")
}
## Concatenated data
datmat <- apply(data, 3, c)
## Function to compute loss
# lossfun <- function(y, z) sum((y - z)^2)
## Inline function to do update of row and column coordinates given cluster membership
updCD <- function(start, fixed){
## Cluster sizes
nvec <- tabulate(start)
# ## Calculate cluster means and cluster means weighted by the square root of cluster sizes
# indlst <- split(seq_len(N), f = start)
# mns <- lapply(indlst, function(x) apply(data[, , x], MARGIN = 1:2, FUN = mean))
# ## TODO: This can be optimized
#
# ## Add weights if fixed != "none"
# if(fixed != "none") mns <- mapply("*", mns, sqrt(nvec), SIMPLIFY = FALSE)
## Calculate cluster means and cluster means weighted by the square root of cluster sizes
mns <- ClustMeans(nclust, start, datmat)
if(fixed != "none") mns <- mns * sqrt(nvec)
mns <- lapply(seq_len(nclust), function(x) matrix(mns[x, ], nrow = J, ncol = K))
## Function for fixed = "rows"
CDu <- function(){
## Setup matrix concatenating along the columns
Xc <- do.call(cbind, mns)
## Do SVD
svdX <- svd(Xc, nu = ndim, nv = ndim)
## Determine updates from SVD
C <- svdX$u %*% diag(svdX$d[1:ndim]^alpha, nrow = ndim, ncol = ndim)
Dstar <- svdX$v %*% diag(svdX$d[1:ndim]^(1 - alpha), nrow = ndim, ncol = ndim)
## Rescale Dstar and divide up in Du's
Dstar.rs <- Dstar * 1/sqrt(nvec) %x% matrix(1, K, ndim)
Ds <- split.data.frame(Dstar.rs, rep(1:nclust, each = K))
## Return C as Ds
out <- list(C = list(C), D = Ds, svd = list(svdX), Xc = Xc, Xr = NULL,
Cstar = NULL, Dstar = Dstar, means = mns)
return(out)
}
## Function for fixed = "columns"
CuD <- function(){
## Setup matrix concatenating along the rows
Xr <- do.call(rbind, mns)
## Do SVD
svdX <- svd(Xr, nu = ndim, nv = ndim)
## Determine updates from SVD
Cstar <- svdX$u %*% diag(svdX$d[1:ndim]^alpha, nrow = ndim, ncol = ndim)
D <- svdX$v %*% diag(svdX$d[1:ndim]^(1 - alpha), nrow = ndim, ncol = ndim)
## Rescale Cstar and divide up in Cu's
Cstar.rs <- Cstar * 1/sqrt(nvec) %x% matrix(1, J, ndim)
Cs <- split.data.frame(Cstar.rs, rep(1:nclust, each = J))
## Return C as Ds
out <- list(C = Cs, D = list(D), svd = list(svdX), Xc = NULL, Xr = Xr,
Cstar = Cstar, Dstar = NULL, means = mns)
return(out)
}
## Function for fixed = "none"
CuDu <- function(){
## SVD's for all u
svdX <- lapply(mns, svd, nu = ndim, nv = ndim)
## Calculate Cu's and Du's
Cs <- lapply(svdX, function(x) x$u %*% diag(x$d[1:ndim]^alpha, nrow = ndim, ncol = ndim))
Ds <- lapply(svdX, function(x) x$v %*% diag(x$d[1:ndim]^(1 - alpha), nrow = ndim, ncol = ndim))
## Return updates of Cs and Ds
out <- list(C = Cs, D = Ds, svd = svdX, Xc = NULL, Xr = NULL,
Cstar = NULL, Dstar = NULL, means = mns)
return(out)
}
updfun <- switch(fixed, none = CuDu, rows = CDu, columns = CuD)
updfun()
}
## Function to update G, using list of model mean estimates
updG <- function(mns){
## Matrix of loss function values
mnsmat <- vapply(mns, c, numeric(J * K))
lossmat <- LossMat(datmat, mnsmat)
# lossmat <- apply(data, 3, function(x) vapply(mns, lossfun, numeric(1), z = x))
# if(nclust == 1) lossmat <- matrix(lossmat, nrow = 1, ncol = N)
# ## Construct arrays of the means
# mns.arr <- lapply(mns, replicate, n = N)
# lossmat <- mapply(FUN = lossfun, list(data), mns.arr)
## Determine class of minimum loss and the value of the loss currently
newclass <- apply(lossmat, 2, which.min)
losscomps <- apply(lossmat, 2, min)
## Function to check for empty classes, and if any to ensure at least on row per class
checkempty <- function(class) {
## Class counts and determine which are empty
classcts <- table(factor(class, levels = 1:nclust))
if(any(classcts == 0)) {
zeroclass <- (1:nclust)[classcts == 0]
## Move worst fitting observation(s) to the empty cluster(s)
## Order from worst to best-fitting
ord <- order(losscomps, decreasing = TRUE)
## Remove objects from classes smaller than or equal to minsize from consideration
smallclasses <- which(classcts <= minsize)
ord <- setdiff(ord, which(class %in% smallclasses))
## Move worst-fitting observation(s) in consideration set
mvs <- ord[seq_along(zeroclass)]
class[mvs] <- zeroclass
## Update loss components and recalculate counts
losscomps[mvs] <- lossmat[cbind(zeroclass, mvs)]
classcts <- table(factor(class, levels = 1:nclust))
## Give message about cluster
message("int.lsbclust: ", length(zeroclass), " empty cluster(s) re-initialized.")
## If an empty class still exists, fill in with random choices
if(any(classcts == 0)) {
zeroclass <- (1:nclust)[classcts == 0]
mvs <- sample.int(n = N, size = length(zeroclass), replace = FALSE)
class[mvs] <- zeroclass
losscomps[mvs] <- lossmat[cbind(zeroclass, mvs)]
message("int.lsbclust: an empty cluster was randomly re-initialized.")
}
}
return(list(class = class, losscomps = losscomps))
}
## Update classification if empty classes and calculate loss
emptyChecked <- checkempty(class = newclass)
newclass <- emptyChecked$class
losscomps <- emptyChecked$losscomps
loss <- sum(losscomps)
return(list(newclass = newclass, loss = loss, losscomps = losscomps))
}
## Maximum loss for standardization
maxloss <- sum(data^2)
## Function for algorithm: loop over updCD and updG until convergence for a single start
onestart <- function(start){
## Terminate start if one or more classes are empty
if(any(table(factor(start, levels = 1:nclust)) == 0)) {
message("int.lsbclust: Random start discarded due to empty clusters.\n")
return(list(minloss = NA))
}
## Initialization
iter <- 0
loss <- rep(NA, maxit)
nchange <- rep(NA, maxit)
curclass <- start
## Loop over updCD and updG until maxit is reached or no more moves are made
repeat {
## Increase iterations
iter <- iter + 1
## Update Cs and D
CDs <- updCD(start = curclass, fixed = fixed)
## Model estimates of means
rmns <- mapply(FUN = tcrossprod, CDs$C, CDs$D, SIMPLIFY = FALSE)
## Update G
Gupd <- updG(rmns)
## Calculate number of changes and update curclass
ord <- clue::solve_LSAP(table(Gupd$newclass, curclass), maximum = TRUE)
Gupd$newclass <- plyr::mapvalues(Gupd$newclass, from = 1:nclust, to = ord)
nchange[iter] <- sum(Gupd$newclass != curclass)
curclass <- Gupd$newclass
## Calculate standardized loss and print information if relevant
loss[iter] <- Gupd$loss/maxloss
if(verbose > 1) cat(sprintf(paste0("%", nchar(as.character(maxit)), "d"), iter),
"| Loss =", sprintf("%5f", loss[iter]),
sprintf(paste0("%", nchar(as.character(N)) + 3, "s"),
paste0("(", nchange[iter], ")")),
"\n")
## Break in case no further changes occur or if maxit is reached
if(iter == maxit || nchange[iter] == 0) {
if(verbose == 1) cat("\nLoss =", sprintf("%6f", loss[iter]))
break
}
}
res <- list(C = CDs$C, D = CDs$D, svd = CDs$svd, cluster = curclass, loss = loss[1:iter],
nchange = nchange[1:iter], iter = iter, minloss = loss[iter], means = CDs$means,
Xr = CDs$Xr, Xc = CDs$Xc, Cstar = CDs$Cstar, Dstar = CDs$Dstar,
losscomps = Gupd$losscomps)
class(res) <- c("int.lsbclust", "list")
# gc(verbose = FALSE)
return(res)
}
## Apply algorithm over all starts, possibly in parallel
if (parallel) {
if (.Platform$OS.type == "windows") {
cl <- makeCluster(mc.cores, type = "PSOCK")
registerDoParallel(cl)
# res <- parLapply(cl = cl, X = starts, fun = onestart)
# stopCluster(cl)
} else {
cl <- makeCluster(mc.cores, type = "FORK")
registerDoParallel(cl)
# res <- mclapply(X = starts, FUN = onestart, mc.cores = mc.cores, mc.allow.recursive = FALSE)
}
res <- foreach(i = seq_along(starts)) %dopar% onestart(start = starts[[i]])
stopCluster(cl)
} else {
res <- lapply(starts, onestart)
}
## Determine loss for all starts
allloss <- sapply(res, "[[", "minloss")
## Drop discarded starts
res <- res[!is.na(allloss)]
## Determine best start
allloss <- allloss[!is.na(allloss)]
bestres <- res[[which.min(allloss)]]
## Calculate the number of estimated parameters
df <- switch(fixed, none = nclust * ndim * (J + K - ndim - sum(delta[1:2])) + N * (nclust - 1),
rows = ndim * (J + nclust * K - ndim - delta[1] - nclust * delta[2]) + N * (nclust - 1),
columns = ndim * (nclust * J + K - ndim - nclust * delta[1] - delta[2]) + N * (nclust - 1))
## Reorder class according to size
ord <- order(table(bestres$cluster), decreasing = TRUE)
bestres$cluster <- plyr::mapvalues(x = bestres$cluster, from = ord, to = seq_along(ord))
if (fixed == "rows" || fixed == "none") {
bestres$D <- bestres$D[ord]
names(bestres$D) <- seq_len(nclust)
}
if (fixed == "columns" || fixed == "none") {
bestres$C <- bestres$C[ord]
names(bestres$C) <- seq_len(nclust)
}
## Reorder svds
if (fixed == "none") {
bestres$svd <- bestres$svd[ord]
names(bestres$svd) <- seq_len(nclust)
}
if (fixed == "rows") {
inds <- unlist(split(seq_len(nclust * K), rep(seq_len(nclust), each = K))[ord],
use.names = FALSE, recursive = FALSE)
bestres$svd[[1]]$v <- bestres$svd[[1]]$v[inds, , drop = FALSE]
}
if (fixed == "columns") {
inds <- unlist(split(seq_len(nclust * J), rep(seq_len(nclust), each = J))[ord],
use.names = FALSE, recursive = FALSE)
bestres$svd[[1]]$u <- bestres$svd[[1]]$u[inds, , drop = FALSE]
}
## Reorder means, Xr, Xc, Cstar, Dstar (or a subset of these)
bestres$means <- bestres$means[ord]
if (fixed == "columns") {
bestres$Xr <- do.call(rbind, split.data.frame(bestres$Xr, f = rep(seq_len(nclust), each = J))[ord])
bestres$Cstar <- do.call(rbind, split.data.frame(bestres$Cstar, f = rep(seq_len(nclust), each = J))[ord])
}
if (fixed == "rows") {
bestres$Xc <- t(do.call(rbind, split.data.frame(t(bestres$Xc), f = rep(seq_len(nclust), each = K))[ord]))
bestres$Dstar <- do.call(rbind, split.data.frame(bestres$Dstar, f = rep(seq_len(nclust), each = K))[ord])
}
## Calculate fit measures for biplots and persons
if (fixed == "none") {
CxD.comp <- Map(function(x, y) lapply(seq_len(ndim), function(z) tcrossprod(x[, z], y[, z])), bestres$C, bestres$D)
CxD <- lapply(CxD.comp, Reduce, f = "+")
## Row fits
rnum <- vapply(CxD, function(x) diag(tcrossprod(x)), numeric(J))
rnum.comp <- rapply(CxD.comp, function(y) diag(tcrossprod(y)), how = "replace")
rdenom <- vapply(bestres$means, function(x) diag(tcrossprod(x)), numeric(J))
rnum.comp <- aperm(array(unlist(rnum.comp), dim = c(J, ndim, nclust)), c(1, 3, 2))
rfit.comp <- sweep(rnum.comp, MARGIN = 1:2, STATS = rdenom, FUN = "/")
rfit <- rnum/rdenom
colnames(rfit) <- seq_len(nclust)
rownames(rfit) <- rownames(data)
dimnames(rfit.comp) <- list(rownames(data), seq_len(nclust), paste("Dim.", seq_len(ndim)))
## Column fits
cnum <- vapply(CxD, function(x) diag(crossprod(x)), numeric(K))
cnum.comp <- rapply(CxD.comp, function(y) diag(crossprod(y)), how = "replace")
cdenom <- vapply(bestres$means, function(x) diag(crossprod(x)), numeric(K))
cnum.comp <- aperm(array(unlist(cnum.comp), dim = c(K, ndim, nclust)), c(1, 3, 2))
cfit.comp <- sweep(cnum.comp, MARGIN = 1:2, STATS = cdenom, FUN = "/")
cfit <- cnum/cdenom
colnames(cfit) <- seq_len(nclust)
rownames(cfit) <- colnames(data)
dimnames(cfit.comp) <- list(colnames(data), seq_len(nclust), paste("Dim.", seq_len(ndim)))
## Overall fit
ofit <- vapply(bestres$svd, function(x) x$d^2/sum(x$d^2), numeric(min(J, K)))
rownames(ofit) <- seq_len(min(J, K))
}
if (fixed == "columns") {
## Calculate CstarD and numerator per dimension
CstarD.comp <- lapply(seq_len(ndim), function(x) tcrossprod(bestres$Cstar[, x], bestres$D[[1]][, x]))
CstarD <- Reduce("+", CstarD.comp)
rnum.comp <- sapply(CstarD.comp, function(x) diag(tcrossprod(x)))
## Row fits
rnum <- rowSums(rnum.comp)
rdenom <- diag(tcrossprod(bestres$Xr))
rfit <- matrix(rnum/rdenom, ncol = nclust, nrow = J)
rfit.comp <- array(sweep(x = rnum.comp, MARGIN = 1, STATS = rdenom, FUN = "/"), dim = c(J, nclust, ndim))
colnames(rfit) <- seq_len(nclust)
rownames(rfit) <- rownames(data)
dimnames(rfit.comp) <- list(rownames(data), seq_len(nclust), paste("Dim.", seq_len(ndim)))
## Column fits
cnum.comp <- sapply(CstarD.comp, function(x) diag(crossprod(x)))
cnum <- rowSums(cnum.comp)
cdenom <- diag(crossprod(bestres$Xr))
cfit <- matrix(cnum/cdenom, ncol = 1)
cfit.comp <- sweep(x = cnum.comp, MARGIN = 1, STATS = cdenom, FUN = "/")
colnames(cfit) <- 1
rownames(cfit) <- rownames(cfit.comp) <- colnames(data)
colnames(cfit.comp) <- paste("Dim.", seq_len(ndim))
## Overall fit
ofit <- matrix(bestres$svd[[1]]$d^2 / sum(bestres$svd[[1]]$d^2), ncol = 1)
rownames(ofit) <- seq_len(min(nclust * J, K))
colnames(ofit) <- 1
}
if (fixed == "rows") {
CDstar.comp <- lapply(seq_len(ndim), function(x) tcrossprod(bestres$C[[1]][, x], bestres$Dstar[, x]))
CDstar <- Reduce("+", CDstar.comp)
## Row fits
rnum.comp <- sapply(CDstar.comp, function(x) diag(tcrossprod(x)))
rnum <- rowSums(rnum.comp) # diag(tcrossprod(CDstar)) #rowSums(CDstar^2)
rdenom <- diag(tcrossprod(bestres$Xc)) #rowSums(bestres$Xc^2)
rfit <- matrix(rnum/rdenom, ncol = 1)
rfit.comp <- sweep(x = rnum.comp, MARGIN = 1, STATS = rdenom, FUN = "/")
colnames(rfit) <- 1
rownames(rfit) <- rownames(rfit.comp) <- rownames(data)
colnames(rfit.comp) <- paste("Dim.", seq_len(ndim))
## Column fits
cnum.comp <- sapply(CDstar.comp, function(x) diag(crossprod(x)))
cnum <- rowSums(cnum.comp)
cdenom <- diag(crossprod(bestres$Xc)) #colSums(bestres$Xc^2)
cfit <- matrix(cnum/cdenom, nrow = K, ncol = nclust)
cfit.comp <- array(sweep(cnum.comp, MARGIN = 1, STATS = cdenom, FUN = "/"), dim = c(K, nclust, ndim))
colnames(cfit) <- seq_len(nclust)
rownames(cfit) <- colnames(data)
dimnames(cfit.comp) <- list(colnames(data), seq_len(nclust), paste("Dim.", seq_len(ndim)))
## Overall fit
ofit <- matrix(bestres$svd[[1]]$d^2/sum(bestres$svd[[1]]$d^2), ncol = 1)
rownames(ofit) <- seq_len(min(J, nclust*K))
colnames(ofit) <- 1
}
## Person fit (RV/congruence coefficient)
if (fixed != "none") CxD <- Map(tcrossprod, bestres$C, bestres$D)
ss.X <- apply(data^2, MARGIN = 3, sum)
ss.CD <- vapply(CxD, FUN = function(x) sum(x^2), FUN.VALUE = numeric(1))[bestres$cluster]
ss.XCD <- rep(NA, N)
for (i in seq_len(N)) ss.XCD[i] <- sum(diag(tcrossprod(data[, , i], CxD[[bestres$cluster[i]]])))
pfit <- ss.XCD / sqrt(ss.X * ss.CD)
## Remove sqrt(N_u) from means if added before
if (fixed != "none") {
nvec <- table(bestres$cluster)
bestres$means <- Map("/", bestres$means, sqrt(nvec))
}
## Update result and return
for (i in seq_len(nclust)) {
colnames(bestres$means[[i]]) <- colnames(data)
rownames(bestres$means[[i]]) <- rownames(data)
}
names(bestres$means) <- seq_len(nclust)
bestres$C <- lapply(bestres$C, 'rownames<-', rownames(data))
bestres$D <- lapply(bestres$D, 'rownames<-', colnames(data))
bestres$allloss <- allloss
bestres$alpha <- alpha
bestres$df <- df
if (return_data)
bestres$data <- data.org
bestres$delta <- delta
bestres$fixed <- fixed
bestres$rfit <- rfit
bestres$rfit.comp <- rfit.comp
bestres$cfit <- cfit
bestres$cfit.comp <- cfit.comp
bestres$ofit <- ofit
bestres$pfit <- pfit
bestres$maxloss <- maxloss
bestres$losscomps <- bestres$losscomps/maxloss
bestres$nclust <- nclust
bestres$N <- N
bestres$perm <- perm
bestres$call <- cll
bestres$time <- proc.time()[3] - time0
# bestres$means <- lapply(bestres$means, 'dimnames<-', list(rownames(data), colnames(data)))
## Cluster correspondence for best results to all other starts (if more than one start)
if(!is.null(method) && nstart > 1) bestres$cl_agreement <- cl_agreement(res[-which.min(allloss)],
bestres, method = method)
return(bestres)
}
#' @rdname cmat
#' @title Centring Matrix
#' @description A utility function for calculating centring matrices.
#' @param k An integer determining the dimensions of the centring matrix.
cmat <- function(k) diag(k) - matrix(1, nrow = k, ncol = k)/k
# #' @rdname centre
# #' @title Centre the Rows or Columns of a Matrix
# #' @description Remove either the column (\code{pre = TRUE}) or row (\code{pre = FALSE}) means of
# #' a matrix.
# #' @param x A matrix.
# #' @param pre Logical indicating whether to remove the column means (\code{TRUE}) or the row means
# #' (\code{FALSE}).
# # centre <- function(x, pre = TRUE) {
# # if (!pre) {
# # return(t(scale(t(x), scale = FALSE)))
# # } else {
# # return(scale(x, scale = FALSE))
# # }
# # }
# centre <- function(x, type = c("pre", "post", "both")) {
# type <- match.arg(type, choices = c("pre", "post", "both"))
# retval <- switch(type,
# pre = x - tcrossprod(rowMeans(x), rep(1, ncol(x))),
# post = x - tcrossprod(rep(1, nrow(x)), colMeans(x)),
# both = x - tcrossprod(rowMeans(x), rep(1, ncol(x))) -
# tcrossprod(rep(1, nrow(x)), colMeans(x)) + mean.default(x))
# return(retval)
# }
# precentre <- function(x) {
# return(x - tcrossprod(rowMeans(x), rep(1, ncol(x))))
# }
# postcentre <- function(x) {
# return(x - tcrossprod(rep(1, nrow(x)), colMeans(x)))
# }
# bothcentre <- function(x) {
# x - tcrossprod(rowMeans(x), rep(1, ncol(x))) - tcrossprod(rep(1, nrow(x)), colMeans(x)) + mean.default(x)
# }
|
ce067c244689d64ec8eda11a8682cfbbb91b7dd2
|
c051966c65cc98d50d8bce8e73670022b587a0a1
|
/Rpackage/R/plot-helpers.R
|
71c921fbf75794a5697346e6fb3b7e392b6146a5
|
[
"MIT"
] |
permissive
|
mzdan/insta-sound
|
60ee9cef3881decb20d084eeb6e78e6746ebfbc9
|
7261b30762ab3bde4359e9c1de8c9dc35bc67f29
|
refs/heads/master
| 2020-05-07T19:40:34.582957
| 2015-02-02T18:26:04
| 2015-02-02T18:26:04
| 14,677,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 551
|
r
|
plot-helpers.R
|
#' Creates an empty theme.
#' @export
empty_theme <- function() {
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
}
|
0730c007d9fd0d302cdefa68aa71fe29054ac8b0
|
6771b3be59935639b51698036e5fbbaf51feec4b
|
/man/proximity_indices.Rd
|
268bee5a450ccbae5014ce640289550827866445
|
[] |
no_license
|
pieterbeck/CanHeMonR
|
f1a15bc68afc77f66bb13b3e90fbfaa3e99376e3
|
94ac1171b5bb7ff88e3cbe7dee3594c31d628ff4
|
refs/heads/master
| 2020-05-21T04:42:39.673459
| 2018-05-14T09:33:09
| 2018-05-14T09:33:09
| 48,625,321
| 0
| 3
| null | 2018-05-14T09:33:10
| 2015-12-26T22:26:10
|
R
|
UTF-8
|
R
| false
| true
| 554
|
rd
|
proximity_indices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/region_growing_functions.r
\name{proximity_indices}
\alias{proximity_indices}
\title{Make a distance-to-center matrix}
\usage{
proximity_indices(nrow_ = 3, ncol_ = 6)
}
\arguments{
\item{nrow_}{nr of rows of the matrix}
\item{ncol_}{nr of columsn of the matrix}
}
\value{
a 2 row matrix where row 1: cell nr, and row 2: distance to center measured in cellnrs
}
\description{
For a matrix, order cells by distance to the center.
}
\note{
called in region_growing_wrapper.r
}
|
5340b7f676bf5bad35d6d80119e6283b540ba3e8
|
4d95e5ceddbfaedd8c984fd5dc04036d2f6e88e6
|
/maplet/R/mt_reporting_tictoc.R
|
a39f627cb88095f218744a0263eb0bdfe25659b1
|
[] |
no_license
|
ZWCharlie/maplet
|
864603059867f3dae25f856832056dd780dde4a1
|
21a331bbb939b940b19ceb94105198c4213f2ed8
|
refs/heads/main
| 2023-04-12T00:44:49.776031
| 2021-04-27T00:04:44
| 2021-04-27T00:04:44
| 348,540,703
| 0
| 0
| null | 2021-03-17T01:15:52
| 2021-03-17T01:15:50
| null |
UTF-8
|
R
| false
| false
| 1,819
|
r
|
mt_reporting_tictoc.R
|
#' Timing functionality
#'
#' Call mt_reporting_tic to start timing anywhere in pipeline, then mt_reporting_toc to show the time elapsed in the status log.
#'
#' @param D \code{SummarizedExperiment} input.
#'
#' @return Does not change the \code{SummarizedExperiment} object.
#'
#' @examples
#' \dontrun{%>%
#' mt_reporting_tic() %>%
#' ... %>% ... # pipeline steps
#' ... %>% ... # pipeline steps
#' ... %>% ... # pipeline steps
#' mt_reporting_toc()}
#'
#' @author JK
#'
#' @import tictoc
#'
#' @export
mt_reporting_tic <- function(D) {
# validate argument
stopifnot("SummarizedExperiment" %in% class(D))
# tic
tic()
# add status information & plot
funargs <- mti_funargs()
metadata(D)$results %<>%
mti_generate_result(
funargs = funargs,
logtxt = sprintf("tic")
)
# return
D
}
#' Timing functionality
#'
#' Call mt_reporting_tic to start timing anywhere in pipeline, then mt_reporting_toc to show the time elapsed in the status log.
#'
#' @param D \code{SummarizedExperiment} input.
#'
#' @return Does not change the \code{SummarizedExperiment} object.
#'
#' @examples
#' \dontrun{%>%
#' mt_reporting_tic() %>%
#' ... %>% ... # pipeline steps
#' ... %>% ... # pipeline steps
#' ... %>% ... # pipeline steps
#' mt_reporting_toc()}
#'
#' @author JK
#'
#' @import tictoc
#'
#' @export
mt_reporting_toc <- function(D) {
# validate argument
stopifnot("SummarizedExperiment" %in% class(D))
# toc
t <- toc(quiet=T)
if (is.null(t)) {
logtxt <- "toc without prior tic, no timing recorded"
} else {
logtxt = sprintf("toc, elapsed: %.2fs", t$toc-t$tic)
}
# add status information & plot
funargs <- mti_funargs()
metadata(D)$results %<>%
mti_generate_result(
funargs = funargs,
logtxt = logtxt
)
# return
D
}
|
526384e22bf301e07c82d98c2b3f2befe64fd216
|
0f0b18926b1c0ed35f6e69f86b2f117551e1fb17
|
/man/inboxCompose.Rd
|
a12cb36416f0bfe87979cf52b0c8c3e776743d73
|
[] |
no_license
|
moturoa/shinyinbox
|
44316b567a9954a3338b2d3306fa8909f3b6e1c9
|
085872b3c7846b3bc265a21052cc9a1cb1f030c8
|
refs/heads/master
| 2023-04-03T06:49:31.356881
| 2021-04-06T07:16:51
| 2021-04-06T07:16:51
| 312,277,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,157
|
rd
|
inboxCompose.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/composeInbox.R
\name{inboxComposeUI}
\alias{inboxComposeUI}
\alias{inboxCompose}
\title{Inbox and Compose box Shiny module}
\usage{
inboxComposeUI(id)
inboxCompose(
input,
output,
session,
msg,
users = NULL,
current_user = NULL,
filter_user = NULL,
attachment = reactive(NULL),
attachment_function = NULL,
filter_attachment = reactive(NULL),
label_inbox = getOption("sib_inbox", "Inbox"),
label_delete = getOption("sib_delete", "Verwijder selectie"),
label_compose_box = getOption("sib_compose_titel", "Schrijf bericht"),
label_tag_user = getOption("sib_tag_user", "Notificatie gebruiker"),
label_send = getOption("sib_send", "Verstuur"),
label_message = getOption("sib_message", "Bericht"),
label_undo = getOption("sib_undo", "Undo"),
label_tagged = getOption("sib_tagged", "Tagged"),
label_close = getOption("sib_close", "Sluiten"),
label_edit = getOption("sib_edit", "Edit"),
label_save = getOption("sib_save", "Opslaan"),
width = 4,
...
)
}
\description{
Inbox and Compose box Shiny module
Inbox and Compose box Shiny module
}
|
a5a5a3fabba0bfcf603ab50e747051658e8d0cab
|
790e131bb9fbb7e7c4c419b0078fe1aa80e39340
|
/testApplication/testApp.R
|
8eeab3de1f63cbee721f5cbb400bd3dee372774e
|
[
"MIT"
] |
permissive
|
inbo/reporting-rshiny-waterbirds
|
ccccb5af62987afb82b21ff8e113d043ff2c10ef
|
fcf71cc01f385e20478efa79e1cdd8ca3df54d8d
|
refs/heads/master
| 2021-01-19T07:06:04.299795
| 2019-09-24T06:58:30
| 2019-09-24T06:58:30
| 87,520,667
| 1
| 1
| null | 2017-11-08T11:35:21
| 2017-04-07T08:00:03
|
R
|
UTF-8
|
R
| false
| false
| 145
|
r
|
testApp.R
|
# launch the application
# detach('package:watervogelsAnalyse', unload = TRUE)
library(watervogelsAnalyse)
watervogelsAnalyse::runWatervogels()
|
bb6db8af174772aac8dad8c9e584321ed250748a
|
7437dc3501715b0edbd3e7ee1f4d2aedf374e7a2
|
/R/LNAPhyloDyn-package.R
|
8247b668ca9990c3996131f1ece6a0a56ff1b68c
|
[] |
no_license
|
MingweiWilliamTang/LNAphyloDyn
|
577b6ca0fa9c2ab505257d051019f43e44d12e19
|
4da93614561f089115636711e72d72950ae6e9e4
|
refs/heads/master
| 2021-01-11T02:30:43.988544
| 2019-10-22T21:44:28
| 2019-10-22T21:44:28
| 70,955,647
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31
|
r
|
LNAPhyloDyn-package.R
|
#' @useDynLib LNAPhyloDyn
NULL
|
b9aee1d644bb9bf01c77622889594908c1fab581
|
878aa28161ed778da05902113a9a18fbb2738319
|
/Figures/Keogh stan figures.R
|
7b25b10a715579d603c092eb9c83836f4978291b
|
[] |
no_license
|
klwilson23/Keogh
|
a6f9f3ccb24d10ce08d694eaa8cdecae8dd06dbf
|
e499c087c267d3e3a89c8edfe4b088248f6338ec
|
refs/heads/master
| 2023-04-11T02:09:36.009864
| 2022-01-21T21:39:32
| 2022-01-21T21:39:32
| 152,797,463
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,062
|
r
|
Keogh stan figures.R
|
source("some functions.R")
library(reshape2)
library(gridExtra)
library(ggpubr)
library(ggplot2)
library(MARSS)
library(broom)
library(wesanderson)
library(rstan)
library(loo)
library(rethinking)
library(corrplot)
fit <- readRDS(file="~/Google Drive/SFU postdoc/Keogh river/Stan fits/keogh mvnorm covars.rds")
ptSize <- 0.7
txtSize <- 0.9
target <- 0.80 # how much of the posterior mass are we okay being > or < 0 to draw inference
spp_ord <- c(2,1,3,5,4)
spp_ord_fact <- c("Dolly Varden","Steelhead","Cutthroat","Coho","Pink")
color_grad <- colorRampPalette(c("darkred","white","darkred"))
sig_colors <- color_grad(21)
keogh <- readRDS("Data/Keogh_collinear_enviro.rds")
keogh_long <- subset(keogh,Year<=2015 & Year>=1976)
keogh_long <- subset(keogh_long,Species!="Chum")
keogh_long$Species <- factor(keogh_long$Species,levels=unique(keogh_long$Species))
keogh_long$marSurv <- keogh_long$Stock/keogh_long$juvCohort
keogh_long$logitSurv <- log(keogh_long$marSurv/(1-keogh_long$marSurv))
keogh_long$prod <- log(keogh_long$Recruits/keogh_long$Stock)
keogh_sel <- subset(keogh_long,select = c(Year,Species,Stock,prod))
keogh_wide <- reshape(keogh_sel,direction = "wide",idvar="Year",timevar="Species")
run_time <- readRDS("Data/steelhead_run.rds")
sh_annual <- readRDS("Data/steelhead_run_annual.rds")
sh_annual$time <- 1:length(sh_annual$year)
sh_annual <- subset(sh_annual,year<=2015 & year>=1976)
sh_annual$logit_surv[1] <- NA
sh_annual$log_adults <- log(sh_annual$Stock)
Xvars <- c("ocean_interact","npgo","ocean_covar_2")
sdSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:scale")
mnSurv_sh <- attr(scale(sh_annual[,Xvars],center=TRUE,scale=TRUE),"scaled:center")
enviro <- scale(sh_annual[,Xvars],center=TRUE,scale=TRUE)
enviro <- data.frame(Xvars=enviro)
colnames(enviro) <- Xvars
x1 <- model.matrix(~-1+ocean_interact+npgo+ocean_covar_2,data=enviro)
XXvars <- c("total_rain_run","mean_temp_run")
sdSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:scale")
mnSurv_run <- attr(scale(sh_annual[,XXvars],center=TRUE,scale=TRUE),"scaled:center")
enviro_run <- scale(sh_annual[,XXvars],center=TRUE,scale=TRUE)
enviro_run <- data.frame(enviro_run)
colnames(enviro_run) <- XXvars
x2 <- model.matrix(~-1+total_rain_run+mean_temp_run,data=enviro_run)
XXXvars <- c("meanLogging","total_rain_egg","mean_temp_egg","freshPink","fertil")
sdSurv_prod <- attr(scale(sh_annual[,XXXvars],center=TRUE,scale=TRUE),"scaled:scale")
mnSurv_prod <- attr(scale(sh_annual[,XXXvars],center=TRUE,scale=TRUE),"scaled:center")
enviro_prod <- scale(sh_annual[,XXXvars],center=TRUE,scale=TRUE)
enviro_prod <- data.frame(XXXvars=enviro_prod)
colnames(enviro_prod) <- XXXvars
x <- as.matrix(keogh_wide[,grep("Stock",colnames(keogh_wide))])
y <- as.matrix(keogh_wide[,grep("prod",colnames(keogh_wide))])
y[which(is.na(y),arr.ind=TRUE)] <- colMeans(y[1:10,],na.rm=TRUE)[which(is.na(y),arr.ind=TRUE)[,2]]
xx1 <- scale(model.matrix(~-1+meanLogging+total_rain_egg+mean_temp_egg+freshPink+fertil,data=enviro_prod),center=TRUE,scale=TRUE)
xx2 <- scale(model.matrix(~-1+meanLogging+sumTemp+sumRain+winTemp+winRain+freshPink+fertil,data=keogh_long[keogh_long$Species=="Dolly Varden",]),center=TRUE,scale=TRUE)
xx3 <- scale(model.matrix(~-1+meanLogging+sumTemp+sumRain+winTemp+winRain+freshPink+fertil,data=keogh_long[keogh_long$Species=="Cutthroat",]),center=TRUE,scale=TRUE)
xx4 <- scale(model.matrix(~-1+meanLogging+sumTemp+sumRain+ocean_interact+ocean_covar_2+npgo+winTemp+winRain,data=keogh_long[keogh_long$Species=="Pink",]),center=TRUE,scale=TRUE)
xx5 <- scale(model.matrix(~-1+meanLogging+sumTemp+sumRain+winTemp+winRain+freshPink+fertil,data=keogh_long[keogh_long$Species=="Coho",]),center=TRUE,scale=TRUE)
# all models
dat <- list("N"=nrow(x),
"K"=ncol(x),
"x"=x,
"y"=y,
"init_priors"=rep(-2e-3,ncol(x)),
"J1"=ncol(xx1),
"J2"=ncol(xx2),
"J3"=ncol(xx3),
"J4"=ncol(xx4),
"J5"=ncol(xx5),
"xx1"=xx1,
"xx2"=xx2,
"xx3"=xx3,
"xx4"=xx4,
"xx5"=xx5,
"N_obs"=sum(!is.na(sh_annual$logit_surv)),
"M"=ncol(x1),
"Q"=ncol(x2),
"P"=2,
"x1"=x1,
"x2"=x2,
"juvCoh"=as.vector(scale(sh_annual$juvCohort)),
#"x3"=as.numeric(x3),
"y1_obs"=sh_annual$logit_surv[!is.na(sh_annual$logit_surv)],
"y2"=sh_annual$run_date_corrected,
"init_s0"=mean(sh_annual$logit_surv[1:10],na.rm=TRUE))
init_fx <- function(chain_id)
{
list("beta_steel"=rep(0,dat$J1+1),
"beta_dolly"=rep(0,dat$J2),
"beta_cutt"=rep(0,dat$J3),
"beta_pink"=rep(0,dat$J4),
"beta_coho"=rep(0,dat$J5),
"beta_adults"=rep(0,dat$P))
}
intervals <- c(0.1,0.9)
# posterior frequencies for coefficients
betas <- rstan::extract(fit)$beta_surv
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_adult
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_run
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_steel
colSums(betas>0)/nrow(betas)
# species
betas <- rstan::extract(fit)$beta_dolly
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_steel
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_cutt
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_coho
colSums(betas>0)/nrow(betas)
betas <- rstan::extract(fit)$beta_pink
colSums(betas>0)/nrow(betas)
pro_corr <- rstan::extract(fit)$Omega_proc
pro_corr_mn <- apply(pro_corr,c(3,2),mean)
colnames(pro_corr_mn) <- row.names(pro_corr_mn) <- unique(keogh_long$Species)
pro_cov <- rstan::extract(fit)$Sigma_proc
pro_cov_mn <- apply(pro_cov,c(3,2),mean)
colnames(pro_cov_mn) <- row.names(pro_cov_mn) <- unique(keogh_long$Species)
obs_cov <- rstan::extract(fit)$Sigma
obs_cov_mn <- apply(obs_cov,c(3,2),mean)
colnames(obs_cov_mn) <- row.names(obs_cov_mn) <- unique(keogh_long$Species)
obs_corr <- rstan::extract(fit)$Omega
obs_corr_mn <- apply(obs_corr,c(3,2),mean)
colnames(obs_corr_mn) <- row.names(obs_corr_mn) <- unique(keogh_long$Species)
cov_ord <- sum(dat$J1+1+dat$J2+dat$J3+dat$J4+dat$J5)
pdf("Figures/Keogh species interactions.pdf",width=6,height=6)
matLayout <- matrix(1:4,nrow=2,ncol=2,byrow=TRUE)
layout(matLayout)
par(mar=c(5,4,1,1))
ppd <- rstan::extract(fit)$x0
alpha_mns <- apply(ppd,c(3,2),mean)
r <- matrix(NA,nrow=dat$K,ncol=dat$K,dimnames=list(spp_ord_fact,spp_ord_fact))
diag(r) <- 1
ci <- apply(ppd,c(3,2),quantile,probs=intervals)
for(k in spp_ord)
{
for(j in (spp_ord)[(spp_ord)!=k])
{
k.i=which(unique(keogh_long$Species)[k]==spp_ord_fact)
j.i=which(unique(keogh_long$Species)[j]==spp_ord_fact)
r[k.i,j.i] <- median(apply(ppd,1,function(x){cor(x=x[,k],y=x[,j])}))
spp1 <- as.character(unique(keogh_long$Species)[k])
spp2 <- as.character(unique(keogh_long$Species)[j])
xlabel <- bquote(alpha[.(spp1)])
ylabel <- bquote(alpha[.(spp2)])
plot(alpha_mns[k,],alpha_mns[j,],xlab="",ylab="",pch=21,bg="orange",ylim=range(ci[,j,]),xlim=range(ci[,k,]))
mtext(xlabel,side=1,cex=1,line=2.5)
mtext(ylabel,side=2,cex=1,line=2.5)
segments(x0=ci[1,k,],x1=ci[2,k,],y0=alpha_mns[j,],lwd=0.5)
segments(x0=alpha_mns[k,],y0=ci[1,j,],y1=ci[2,j,],lwd=0.5)
points(alpha_mns[k,],alpha_mns[j,],pch=21,bg="orange")
Corner_text(paste(unique(keogh_long$Species)[k],"v.",unique(keogh_long$Species)[j]),"topleft")
}
}
dev.off()
jpeg("Figures/Keogh species correlations.jpeg",width=8,height=8,units="in",res=800)
layout(1)
par(mar=c(1,1,1,1),mai=c(0.2,0.2,0.2,0.2))
colFun <- colorRampPalette(c("tomato","orange","dodgerblue","darkblue"))
corrplot.mixed(r,upper="ellipse",lower.col="black",tl.col="black",upper.col = colFun(100))
dev.off()
# productivity
jpeg("Figures/Keogh productivity stan marss.jpeg",width=6.5,height=6.5,units="in",res=800)
regime <- which(sh_annual==1991)
regime2 <- which(sh_annual==2008)
matLayout <- matrix(1:5,nrow=5,ncol=1,byrow=TRUE)
layout(matLayout)
par(mar=c(0,0,0,0),mai=c(0.1,0,0,0),oma=c(4,4,0.1,0))
ppd <- rstan::extract(fit)$y_ppd
mns <- apply(ppd,c(3,2),median)
ci <- apply(ppd,c(3,2),quantile,probs=intervals)
for(k in spp_ord)
{
plot(mns[k,],ylim=range(ci[,k,]),type="l",lwd=2,xlab="Year",ylab="",xaxt="n")
mtext("Productivity (ln R/S)",side=2,line=2.25,cex=0.9*txtSize)
axis(1,labels=FALSE,tick=TRUE)
polygon(c(1:regime,rev(1:regime)),c(ci[1,k,1:regime],rev(ci[2,k,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,k,regime:regime2],rev(ci[2,k,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,k,regime2:40],rev(ci[2,k,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(mns[k,],lwd=2,col="black")
points(dat$y[,k],pch=21,bg="grey50")
Corner_text(unique(keogh_long$Species)[k],"topleft")
if(k==spp_ord[5])
{
legend("bottomleft",c("Early","Compensatory","Decline"),title="Regime",pch=22,pt.bg=c(adjustcolor("orange",0.5),adjustcolor("dodgerblue",0.5),adjustcolor("darkblue",0.5)),bty="n",cex=1.05*txtSize)
}
}
axis(1,at=seq(from=0,to=dat$N,length=5),labels=seq(from=min(sh_annual$year)-1,to=max(sh_annual$year),length=5),tick=FALSE)
dev.off()
# productivity
jpeg("Figures/Keogh alpha stan marss.jpeg",height=6.5,width=6.5,units="in",res=800)
regime <- which(sh_annual==1991)
regime2 <- which(sh_annual==2008)
matLayout <- matrix(0,nrow=10,ncol=10,byrow=TRUE)
matLayout[1:2,1:5] <- 1
matLayout[3:4,1:5] <- 2
matLayout[5:6,1:5] <- 3
matLayout[7:8,1:5] <- 4
matLayout[9:10,1:5] <- 5
matLayout[1:6,6:10] <- 6
matLayout[7:10,6:10] <- 7
layout(matLayout)
#par(mar=c(5,4,0.1,0.1),mai=c(0.5,0.5,0.05,0))
par(mar=c(0,0,0,0),mai=c(0.1,0,0,0),oma=c(4,4,0.1,0))
ppd <- rstan::extract(fit)$x0
mns <- apply(ppd,c(3,2),median)
ci <- apply(ppd,c(3,2),quantile,probs=intervals)
runtime <- rstan::extract(fit)$pred_run
runtime <- apply(runtime,1,FUN=function(x){(x-mean(x))/sd(x)})
runtime <- rowMeans(runtime)
for(k in spp_ord)
{
plot(mns[k,],ylim=range(ci[,k,]),type="l",lwd=2,xlab="",ylab="",xaxt="n")
axis(1,labels=FALSE,tick=TRUE)
mtext("Productivity (ln \u03B1)",side=2,line=2.25,cex=0.7*txtSize)
polygon(c(1:regime,rev(1:regime)),c(ci[1,k,1:regime],rev(ci[2,k,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,k,regime:regime2],rev(ci[2,k,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,k,regime2:40],rev(ci[2,k,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(mns[k,],lwd=2,col="black")
legend("topleft",paste("(a) - ",unique(keogh_long$Species)[k],sep=""),bty="n",cex=txtSize)
if(k==spp_ord[4])
{
legend("bottomright",c("Early","Compensatory","Decline"),title="Regime",pch=22,pt.bg=c(adjustcolor("orange",0.5),adjustcolor("dodgerblue",0.5),adjustcolor("darkblue",0.5)),bty="n",cex=txtSize)
}
}
mtext("Year",side=1,line=2.25,cex=0.8*txtSize)
axis(1,at=seq(from=0,to=dat$N,length=5),labels=seq(from=min(sh_annual$year)-1,to=max(sh_annual$year),length=5),tick=TRUE)
# covariates
par(mar=c(4,2,1,1),mai=c(0.2,1.15,0,0.05))
#par(mar=c(0,0,0,0),mai=c(0,2,0,0),oma=c(5,1,0.1,1))
plot(rep(0,cov_ord),1:cov_ord,xlim=c(-4,2),type="p",pch=0,ylab="",xlab="",bg=0,col=0,yaxt="n")
axis(2,at=1:cov_ord,labels=FALSE,tick=TRUE)
abline(h=1:cov_ord,lwd=0.5,lty=3,col="grey85")
abline(v=0,lty=1,lwd=1,col="red")
mtext("Effect size",side=1,line=2.25,cex=0.7*txtSize)
counter <- cov_ord
for(k in spp_ord)
{
xnames <- c("Cumulative logging","Summer air temperature","Summer rainfall","Winter air temperature","Winter rainfall","Pink salmon in FW","Fertilization","Adult run time")
if(k==1) { betas <- rstan::extract(fit)$beta_steel; xx <- data.frame(xx1,runtime);
xnames <- c("Cumulative logging","Rainfall after run","Temp. after run","Pink salmon in FW","Fertilization","Spawn date")}
if(k==2) { betas <- rstan::extract(fit)$beta_dolly; xx <- xx2}
if(k==3) { betas <- rstan::extract(fit)$beta_cutt; xx <- xx3}
if(k==4) { betas <- rstan::extract(fit)$beta_pink; xx <- xx4;
xnames <- c("Cumulative logging","Summer air temperature","Summer rainfall","Ocean interactions","Ocean PCA-2","NPGO","Winter air temperature","Winter rainfall")}
if(k==5) { betas <- rstan::extract(fit)$beta_coho; xx <- xx5}
for(j in 1:ncol(betas))
{
if(j==1){text(-4,y=counter+0.5,paste("(b) - ", unique(keogh_long$Species)[k],sep=""),cex=0.7*txtSize,adj=0,xpd=NA)}
points(mean(betas[,j])/sd(dat$y[,k]),counter,type="p",pch=21,bg="dodgerblue",xpd=NA)
segments(x0=quantile(betas[,j]/sd(dat$y[,k]),probs=intervals[1]),x1=quantile(betas[,j]/sd(dat$y[,k]),probs=intervals[2]),y0=counter,lwd=2,xpd=NA)
axis(2,at=counter,labels=xnames[j],tick=FALSE,las=1,line=0, cex.axis=0.9*txtSize)
ifelse((sum((betas[,j]/sd(dat$y[,k]))>0)/length(betas[,j])>target) | (sum((betas[,j]/sd(dat$y[,k]))<0)/length(betas[,j])>target),pch_color <- "red",pch_color<-"grey50")
pch_color <- 1+round(round(sum(betas[,j]>0)/length(betas[,j])/5,2)*5*20)
points(mean(betas[,j])/sd(dat$y[,k]),counter,pch=21,bg=sig_colors[pch_color],xpd=NA)
counter <- counter - 1
}
}
par(xpd=NA)
colFun <- colorRampPalette(c("tomato","orange","dodgerblue","darkblue"))
corrplot.mixed(r,upper="ellipse",lower.col="black",tl.col="black",upper.col = colFun(100),tl.cex =0.7*txtSize,mar=c(0,0,5,0.05))
text("(c) - Species correlations",x=1,y=5.75,xpd=NA,cex=txtSize)
dev.off()
par(xpd=F)
# recruitment
jpeg("Figures/Keogh recruitment stan marss.jpeg",width=8,height=8,units="in",res=800)
matLayout <- matrix(c(1:5,0),nrow=3,ncol=2,byrow=TRUE)
layout(matLayout)
par(mar=c(5,4,1,1))
ppd <- rstan::extract(fit)$R
mns <- apply(ppd,c(3,2),median)
ci <- apply(ppd,c(3,2),quantile,probs=intervals)
for(k in spp_ord)
{
rec <- keogh_long[keogh_long$Species==unique(keogh_long$Species)[k],"Recruits"]
plot(mns[k,],ylim=range(ci[,k,],rec,na.rm=TRUE),type="l",lwd=2,xlab="Year",ylab="Recruitment",xaxt="n")
axis(1,labels=NULL,tick=TRUE)
polygon(c(1:dat$N,rev(1:dat$N)),c(ci[1,k,],rev(ci[2,k,])),col=adjustcolor("dodgerblue",0.3),border=NA)
points(rec,pch=21,bg="orange")
Corner_text(unique(keogh_long$Species)[k],"topleft")
}
dev.off()
pdf("Figures/Keogh regression stan marss.pdf",width=5.5,height=8)
perc_change <- array(NA,dim=c(length(spp_ord),max(ncol(xx1)+1,ncol(xx2),ncol(xx3),ncol(xx4),ncol(xx5)),3))
ppd <- rstan::extract(fit)$x0
mns <- apply(ppd,c(3,2),mean)
ci <- apply(ppd,c(3,2),quantile,probs=intervals)
mus <- rstan::extract(fit)$mu
runtime <- rstan::extract(fit)$pred_run
runtime <- apply(runtime,1,FUN=function(x){(x-mean(x))/sd(x)})
runtime <- rowMeans(runtime)
for(k in spp_ord)
{
xnames <- c("Cumulative logging (15-years lagged)","Summer air temperature","Summer rainfall","Winter air temperature","Winter rainfall","Pink salmon abundance in early life","Fertilization experiment","Adult run time")
if(k==1) { betas <- rstan::extract(fit)$beta_steel; xx <- data.frame(xx1,runtime);
xnames <- c("Cumulative logging (15-years lagged)","Rainfall after run","Temp. after run","Pink salmon abundance in early life","Fertilization experiment","Spawn date")}
if(k==2) { betas <- rstan::extract(fit)$beta_dolly; xx <- xx2}
if(k==3) { betas <- rstan::extract(fit)$beta_cutt; xx <- xx3}
if(k==4) { betas <- rstan::extract(fit)$beta_pink; xx <- xx4;
xnames <- c("Cumulative logging (15-years lagged)","Summer air temperature","Summer rainfall","Ocean interactions","Ocean PCA-2","NPGO","Winter air temperature","Winter rainfall")}
if(k==5) { betas <- rstan::extract(fit)$beta_coho; xx <- xx5}
matLayout <- matrix(c(1:ncol(xx),rep(0,8-ncol(xx))),nrow=4,ncol=2,byrow=TRUE)
layout(matLayout)
par(mar=c(5,4,1,1))
for(j in 1:ncol(betas))
{
covar_seq <- seq(from=min(xx[,j]),to=max(xx[,j]),length=25)
resid_alphas <- mus[,,k] - dat$x[,k]*rstan::extract(fit)$beta[,k] - ppd[,,k] - apply(xx[,-j],1,function(x){rowSums(x*betas[,-j])})
pred <- apply(resid_alphas,2,quantile,probs=intervals)
plot(xx[,j],colMeans(resid_alphas),ylim=range(pred,na.rm=TRUE),type="p",pch=21,xlab=xnames[j],ylab="Residual productivity (ln R/S)",xaxt="n",bg="grey50")
segments(x0=xx[,j],y0=pred[1,],y1=pred[2,],lwd=0.5)
regress <- sapply(covar_seq,function(x){x*betas[,j]})
ci <- apply(regress,2,quantile,probs=intervals)
axis(1,labels=NULL,tick=TRUE)
polygon(c(covar_seq,rev(covar_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("dodgerblue",0.3),border=NA)
points(xx[,j],colMeans(resid_alphas),pch=21,bg="grey50")
Corner_text(unique(keogh_long$Species)[k],"topleft")
covar_seq <- c(xx[1,j],xx[40,j])
regress <- sapply(covar_seq,function(x){x*betas[,j]})
delta_change <- exp(colMeans(regress))
#delta_change <- exp(colMeans(resid_alphas))
perc_change[k,j,1] <- 100*((delta_change[length(delta_change)]-delta_change[1])/delta_change[1])
perc_change[k,j,2:3] <- quantile(apply(exp(regress),1,function(x){100*((x[2]-x[1])/x[1])}),probs=intervals)
}
}
dev.off()
dimnames(perc_change)[1] <- list("Species"=unique(keogh_long$Species))
dimnames(perc_change)[3] <- list("Intervals"=c("Mean","LI","UI"))
dimnames(perc_change)[2] <- list("Environment"=c("Logging",rep("None",7)))
pdf("Figures/Keogh survival regression stan marss.pdf",width=5.5,height=5.5)
ppd <- rstan::extract(fit)$s0
ci <- apply(ppd,2,quantile,probs=intervals)
mns <- apply(ppd,2,mean)
mus <- rstan::extract(fit)$pred_surv
for(k in spp_ord)
{
xnames <- c("Ocean interactions","NPGO","Ocean PCA-2")
betas <- rstan::extract(fit)$beta_surv
xx <- x1
matLayout <- matrix(c(1:ncol(xx),rep(0,4-ncol(xx))),nrow=2,ncol=2,byrow=TRUE)
layout(matLayout)
par(mar=c(5,4,1,1))
for(j in 1:ncol(betas))
{
covar_seq <- seq(from=min(xx[,j]),to=max(xx[,j]),length=25)
resid_alphas <- mus - ppd - apply(xx[,-j],1,function(x){rowSums(x*betas[,-j])})
pred <- apply(resid_alphas,2,quantile,probs=intervals)
plot(xx[,j],colMeans(resid_alphas),ylim=range(pred,na.rm=TRUE),type="p",pch=21,xlab=xnames[j],ylab="Residual logit survival",xaxt="n",bg="grey50")
segments(x0=xx[,j],y0=pred[1,],y1=pred[2,],lwd=0.5)
regress <- sapply(covar_seq,function(x){x*betas[,j]})
ci <- apply(regress,2,quantile,probs=intervals)
axis(1,labels=NULL,tick=TRUE)
polygon(c(covar_seq,rev(covar_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("dodgerblue",0.3),border=NA)
points(xx[,j],colMeans(resid_alphas),pch=21,bg="grey50")
abline(lm(colMeans(resid_alphas)~xx[,j]))
}
}
dev.off()
# steelhead lifecycle
# survival
jpeg("Figures/Steelhead cycle marss.jpeg",res=800,height=6.5,width=6.5,units="in")
txtSize <- 0.7
regime <- which(sh_annual==1991)
regime2 <- which(sh_annual==2008)
matLayout <- matrix(0,nrow=15,ncol=15)
matLayout[1:3,1:9] <- 1
matLayout[1:3,10:15] <- 2
matLayout[4:6,1:9] <- 3
matLayout[4:6,10:15] <- 4
matLayout[7:9,1:9] <- 5
matLayout[7:9,10:15] <- 6
matLayout[10:12,1:9] <- 7
matLayout[10:12,10:15] <- 8
matLayout[13:15,1:9] <- 9
matLayout[13:15,10:15] <- 10
layout(matLayout)
#par(mar=c(3.5,4,0,0.5),mai=c(0.45,0.55,0.05,0.05))
par(oma=c(2.5,1,0.05,0.05),mai=c(0.1,0.35,0.1,0.05),mgp=c(2,0.4,0))
ppd <- rstan::extract(fit)$y1_ppd
ci <- apply(ppd,2,quantile,probs=intervals)
plot(colMeans(ppd),ylim=range(ci,quantile(1/(1+exp(-rstan::extract(fit)$y1_miss)),probs=intervals)),type="l",lwd=2,xlab="",ylab="",xaxt="n")
mtext("Marine survival",2,line=2,xpd=NA,cex=0.8*txtSize)
axis(1,tick=TRUE,labels=FALSE)
polygon(c(1:regime,rev(1:regime)),c(ci[1,1:regime],rev(ci[2,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,regime:regime2],rev(ci[2,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,regime2:40],rev(ci[2,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(colMeans(ppd),lwd=2)
ppx <- rstan::extract(fit)$y1
ci <- apply((1/(1+exp(-ppx))),2,quantile,probs=intervals)
segments(x0=1:ncol(ppx),y0=ci[1,],y1=ci[2,],lwd=2,col="black")
points(colMeans(1/(1+exp(-ppx))),pch=21,bg="grey70")
legend("topright",c("Early","Compensatory","Decline"),title="Regime",pch=22,pt.bg=c(adjustcolor("orange",0.5),adjustcolor("dodgerblue",0.5),adjustcolor("darkblue",0.5)),bty="n",cex=1.2*txtSize)
Corner_text("(a)","topleft")
# some effects on survival
betas <- rstan::extract(fit)$beta_surv
std_eff <- apply(betas,2,function(x){c(quantile(x,probs=intervals[1]),mean(x),quantile(x,probs=intervals[2]))})/sd(dat$y1_obs)
colnames(std_eff) <- c("Ocean interactions","NPGO","Ocean PCA-2")
#pch_color <- ifelse(apply(std_eff,2,FUN=function(x){(x[1]*x[3])>0}),"red","grey50")
pch_color <- apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})
pch_color <- 1+round(round(pch_color/5,2)*5*20)
plot(std_eff[2,],ylim=1.1*range(c(0,std_eff)),xaxt="n",xlim=range(0.5:(ncol(std_eff)+0.5)),yaxt="n",ylab="",col=0,xlab="")
axis(1,at=1:ncol(std_eff),labels=colnames(std_eff),cex.axis=txtSize)
axis(2,line=0)
mtext("Effect size",side=2,cex=0.8*txtSize,line=2)
segments(x0=1:3,y0=std_eff[1,],y1=std_eff[3,],lwd=2)
points(std_eff[2,],pch=21,bg=sig_colors[pch_color],cex=1.5)
abline(h=0,lwd=1,lty=5)
Corner_text("(b)","topleft")
# adult abundance
ppd <- rstan::extract(fit)$x3_ppd
ci <- apply(ppd,2,quantile,probs=intervals)
colMed <- apply(ppd,2,quantile,probs=0.5)
plot(colMed,ylim=range(ci),type="l",lwd=2,xlab="",ylab="",xaxt="n")
mtext("Adult female returns",2,line=2,xpd=NA,cex=0.8*txtSize)
axis(1,tick=TRUE,labels=FALSE)
polygon(c(1:regime,rev(1:regime)),c(ci[1,1:regime],rev(ci[2,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,regime:regime2],rev(ci[2,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,regime2:40],rev(ci[2,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(colMed,lwd=2)
points(sh_annual$Stock,pch=21,bg="grey70")
Corner_text("(c)","topleft")
# some effects on adult abundance
betas <- rstan::extract(fit)$beta_adults
std_eff <- apply(betas,2,function(x){c(quantile(x,probs=intervals[1]),mean(x),quantile(x,probs=intervals[2]))})/log(sd(dat$x[,1]))
colnames(std_eff) <- c("Marine survival","Smolt cohort")
#pch_color <- ifelse(apply(std_eff,2,FUN=function(x){(x[1]*x[3])>0}),"red","grey50")
pch_color <- ifelse((apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})>target) | (apply(betas,2,FUN=function(x){sum(x<0)/nrow(betas)})>target),"red","grey50")
pch_color <- apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})
pch_color <- 1+round(round(pch_color/5,2)*5*20)
plot(std_eff[2,],ylim=1.1*range(c(0,std_eff)),xaxt="n",xlim=range(0.5:(ncol(std_eff)+0.5)),yaxt="n",ylab="",col=0,xlab="")
axis(1,at=1:ncol(std_eff),labels=colnames(std_eff),cex.axis=txtSize)
axis(2,line=0)
mtext("Effect size",side=2,cex=0.8*txtSize,line=2)
segments(x0=1:3,y0=std_eff[1,],y1=std_eff[3,],lwd=2)
points(std_eff[2,],pch=21,bg=sig_colors[pch_color],cex=1.5)
abline(h=0,lwd=1,lty=5)
Corner_text("(d)","topleft")
# run time
ppd <- rstan::extract(fit)$y2_ppd
ci <- apply(ppd,2,quantile,probs=intervals)
plot(colMeans(ppd),ylim=range(ci),type="l",lwd=2,xlab="",ylab="",xaxt="n")
mtext("Median spawn date",2,line=2,xpd=NA,cex=0.8*txtSize)
axis(1,tick=TRUE,labels=FALSE)
polygon(c(1:regime,rev(1:regime)),c(ci[1,1:regime],rev(ci[2,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,regime:regime2],rev(ci[2,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,regime2:40],rev(ci[2,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(colMeans(ppd),lwd=2)
points(dat$y2,pch=21,bg="grey70")
Corner_text("(e)","topleft")
# some effects on run time
betas <- rstan::extract(fit)$beta_run
std_eff <- apply(betas,2,function(x){c(quantile(x,probs=intervals[1]),mean(x),quantile(x,probs=intervals[2]))})/sd(dat$y2)
colnames(std_eff) <- c("ln(Adults)","Rainfall","Temperature")
pch_color <- ifelse((apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})>target) | (apply(betas,2,FUN=function(x){sum(x<0)/nrow(betas)})>target),"red","grey50")
pch_color <- apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})
pch_color <- 1+round(round(pch_color/5,2)*5*20)
plot(std_eff[2,],ylim=1.1*range(c(0,std_eff)),xaxt="n",xlim=range(0.5:(ncol(std_eff)+0.5)),yaxt="n",ylab="",col=0,xlab="")
axis(1,at=1:ncol(std_eff),labels=colnames(std_eff),cex.axis=txtSize)
axis(2,line=0)
mtext("Effect size",side=2,cex=0.8*txtSize,line=2)
segments(x0=1:3,y0=std_eff[1,],y1=std_eff[3,],lwd=2)
points(std_eff[2,],pch=21,bg=sig_colors[pch_color],cex=1.5)
abline(h=0,lwd=1,lty=5)
Corner_text("(f)","topleft")
# productivity
ppd <- rstan::extract(fit)$y_ppd[,,1]
k=1
j=2
mus <- rstan::extract(fit)$mu
runtime <- rstan::extract(fit)$pred_run
runtime <- apply(runtime,1,FUN=function(x){(x-mean(x))/sd(x)})
runtime <- rowMeans(runtime)
betas <- rstan::extract(fit)$beta_steel
xx <- data.frame(xx1,runtime)
dens_ind <- rstan::extract(fit)$x0[,,1] - sapply(dat$x[,k],FUN=function(x){x*rstan::extract(fit)$beta[,k]})
resid_alphas <- (ppd - dens_ind)
ci <- apply(resid_alphas,2,quantile,probs=intervals)
#ci <- apply(ppd,2,quantile,probs=intervals)
plot(colMeans(resid_alphas),ylim=range(ci),type="l",lwd=2,xlab="",ylab="",xaxt="n")
mtext("Residual productivity (\u0394ln R/S)",2,line=2,xpd=NA,cex=0.8*txtSize)
axis(1,labels=FALSE,tick=TRUE)
polygon(c(1:regime,rev(1:regime)),c(ci[1,1:regime],rev(ci[2,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,regime:regime2],rev(ci[2,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,regime2:40],rev(ci[2,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(colMeans(resid_alphas),lwd=2)
points(dat$y[,k]-colMeans(dens_ind),pch=21,bg="grey70")
Corner_text("(g)","topleft")
# some effects on productivity
betas <- rstan::extract(fit)$beta_steel
std_eff <- apply(betas,2,function(x){c(quantile(x,probs=intervals[1]),mean(x),quantile(x,probs=intervals[2]))})/sd(dat$y[,1])
#colnames(std_eff) <- c("Cumulative logging","Summer temp.","Summer rain","Winter temp.","Winter rain","Pink salmon in early life","Spawn date")
colnames(std_eff) <- c("Logging","Rainfall","Temp.","Pinks","Fert.","Spawn date")
#pch_color <- ifelse(apply(std_eff,2,FUN=function(x){(x[1]*x[3])>0}),"red","grey50")
pch_color <- ifelse((apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})>target) | (apply(betas,2,FUN=function(x){sum(x<0)/nrow(betas)})>target),"red","grey50")
pch_color <- apply(betas,2,FUN=function(x){sum(x>0)/nrow(betas)})
pch_color <- 1+round(round(pch_color/5,2)*5*20)
plot(std_eff[2,],ylim=1.1*range(c(0,std_eff)),xaxt="n",xlim=range(0.5:(ncol(std_eff)+0.5)),yaxt="n",ylab="",col=0,xlab="")
#axis(1,at=1:ncol(std_eff),labels=colnames(std_eff),cex.axis=0.7*txtSize)
axis(1,at=1:ncol(std_eff),labels=colnames(std_eff),cex.axis=0.9*txtSize)
axis(2,line=0)
mtext("Effect size",side=2,cex=0.85*txtSize,line=2)
segments(x0=1:length(colnames(std_eff)),y0=std_eff[1,],y1=std_eff[3,],lwd=2)
points(std_eff[2,],pch=21,bg=sig_colors[pch_color],cex=1.5)
abline(h=0,lwd=1,lty=5)
Corner_text("(h)","topleft")
# recruitment
ppd <- rstan::extract(fit)$R[,,1]
ci <- apply(ppd,2,quantile,probs=intervals)
plot(colMeans(ppd),ylim=range(ci),type="l",lwd=2,xlab="",ylab="",xaxt="n")
mtext("Smolts",2,line=2,xpd=NA,cex=0.8*txtSize)
axis(1,at=seq(from=0,to=dat$N,length=5),labels=seq(from=min(sh_annual$year)-1,to=max(sh_annual$year),length=5),tick=TRUE)
mtext("Year",side=1,line=2,cex=0.8*txtSize)
polygon(c(1:regime,rev(1:regime)),c(ci[1,1:regime],rev(ci[2,1:regime])),col=adjustcolor("orange",0.5),border=NA)
polygon(c(regime:regime2,rev(regime:regime2)),c(ci[1,regime:regime2],rev(ci[2,regime:regime2])),col=adjustcolor("dodgerblue",0.5),border=NA)
polygon(c(regime2:40,rev(regime2:40)),c(ci[1,regime2:40],rev(ci[2,regime2:40])),col=adjustcolor("darkblue",0.5),border=NA)
lines(colMeans(ppd),lwd=2)
points(sh_annual$Recruits,pch=21,bg="grey70")
Corner_text("(i)","topleft")
# some effects on recruitment
betas <- rstan::extract(fit)$beta[,1]
alphas <- rstan::extract(fit)$x0[,,1]
runtime <- rstan::extract(fit)$pred_run
runtime <- t(apply(runtime,1,FUN=function(x){(x-mean(x))/sd(x)}))
runtime <- colMeans(runtime)
beta_cov <- colMeans(rstan::extract(fit)$beta_steel)
xx <- data.frame(xx1,runtime)
adult_seq <- seq(from=0,to=max(dat$x[1:(regime-1),1]),length.out=25)
adult_seq2 <- seq(from=0,to=max(dat$x[regime:nrow(dat$x),1]),length.out=25)
adult_seq3 <- seq(from=0,to=max(dat$x[regime2:nrow(dat$x),1]),length.out=25)
ppd <- sapply(1:length(betas),function(x){exp(mean(alphas[x,1:(regime-1)]))*adult_seq*exp(betas[x] * adult_seq + sum(beta_cov * colMeans(xx[1:(regime-1),])))})
ci <- apply(ppd,1,quantile,probs=intervals)
ppd2 <- sapply(1:length(betas),function(x){exp(mean(alphas[x,regime:nrow(dat$x)]))*adult_seq2*exp(betas[x] * adult_seq2 + sum(beta_cov * colMeans(xx[regime:nrow(dat$x),])))})
ci2 <- apply(ppd2,1,quantile,probs=intervals)
ppd3 <- sapply(1:length(betas),function(x){exp(mean(alphas[x,regime2:nrow(dat$x)]))*adult_seq3*exp(betas[x] * adult_seq3 + sum(beta_cov * colMeans(xx[regime2:nrow(dat$x),])))})
ci3 <- apply(ppd3,1,quantile,probs=intervals)
plot(adult_seq,rowMeans(ppd),xlab="",type="l",ylab="",ylim=range(ci,ci2),lwd=2,xaxt="n",yaxt="n")
axis(1,line=0)
axis(2,line=0)
mtext("Smolts",side=2,line=2,cex=0.8*txtSize)
mtext("Adult steelhead",side=1,line=2,cex=0.8*txtSize)
polygon(c(adult_seq,rev(adult_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("orange",0.4),border=NA)
polygon(c(adult_seq2,rev(adult_seq2)),c(ci2[1,],rev(ci2[2,])),col=adjustcolor("dodgerblue",0.5),border=NA)
#lines(adult_seq2,rowMeans(ppd2),lwd=2)
polygon(c(adult_seq3,rev(adult_seq3)),c(ci3[1,],rev(ci3[2,])),col=adjustcolor("darkblue",0.5),border=NA)
#lines(adult_seq3,rowMeans(ppd3),lwd=2)
lines(adult_seq,rowMeans(ppd),lwd=2)
Corner_text("(j)","topleft")
dev.off()
sh_adults <- rstan::extract(fit)$pred_adults
ad <- colMeans(sh_adults)
plot(log(ad),dat$y2)
plot(log(x[,1]),dat$y2)
layout(1)
plot(adult_seq,rowMeans(ppd),xlab="",type="l",ylab="",ylim=range(ci,ci2),lwd=2,xaxt="n",yaxt="n")
axis(1,line=0)
axis(2,line=0)
mtext("Smolts",side=2,line=2)
mtext("Adult steelhead",side=1,line=2)
polygon(c(adult_seq,rev(adult_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("orange",0.4),border=NA)
lines(adult_seq,rowMeans(ppd),lwd=2)
layout(1)
ppd2_alt <- sapply(1:length(betas),function(x){exp(mean(alphas[x,regime:nrow(dat$x)]))*adult_seq2*exp(betas[x] * adult_seq2)})
ci2_alt <- apply(ppd2_alt,1,quantile,probs=intervals)
plot(adult_seq,rowMeans(ppd),xlab="",type="l",ylab="",ylim=range(ci,ci2,ci2_alt),lwd=2,xaxt="n",yaxt="n")
axis(1,line=0)
axis(2,line=0)
mtext("Smolts",side=2,line=2)
mtext("Adult steelhead",side=1,line=2)
polygon(c(adult_seq,rev(adult_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("orange",0.4),border=NA)
polygon(c(adult_seq2,rev(adult_seq2)),c(ci2[1,],rev(ci2[2,])),col=adjustcolor("dodgerblue",0.5),border=NA)
lines(adult_seq,rowMeans(ppd),lwd=2)
plot(adult_seq,rowMeans(ppd),xlab="",type="l",ylab="",ylim=range(ci,ci2,ci2_alt),lwd=2,xaxt="n",yaxt="n")
axis(1,line=0)
axis(2,line=0)
mtext("Smolts",side=2,line=2)
mtext("Adult steelhead",side=1,line=2)
polygon(c(adult_seq,rev(adult_seq)),c(ci[1,],rev(ci[2,])),col=adjustcolor("orange",0.4),border=NA)
polygon(c(adult_seq2,rev(adult_seq2)),c(ci2_alt[1,],rev(ci2_alt[2,])),col=adjustcolor("dodgerblue",0.5),border=NA)
lines(adult_seq,rowMeans(ppd),lwd=2)
#lines(adult_seq2,rowMeans(ppd2),lwd=2)
ppd2 <- sapply(1:length(betas),function(x){exp(mean(alphas[x,regime:nrow(dat$x)]))*adult_seq2*exp(betas[x] * adult_seq2 + sum(beta_cov * colMeans(xx[regime:nrow(dat$x),])))})
ci2 <- apply(ppd2,1,quantile,probs=intervals)
polygon(c(adult_seq3,rev(adult_seq3)),c(ci3[1,],rev(ci3[2,])),col=adjustcolor("darkblue",0.5),border=NA)
#lines(adult_seq3,rowMeans(ppd3),lwd=2)
lines(adult_seq,rowMeans(ppd),lwd=2)
|
32cc6a9da17f4d1cdb4c16d903d031e38c69cb24
|
57d1abe121af6d93354c93597ec4e76417479de2
|
/seccion5-data-visualization-principles/assessment9.r
|
5ce99b256cac02b5d2c6d761610dbd6de049ceed
|
[] |
no_license
|
johnfelipe/PH125.2x-Data-Science-Visualization
|
e7beef8a6905ef28fe0088a894c4c2c59e5ffab7
|
c8b1d1eec308422cda01ee65767c96145e504684
|
refs/heads/master
| 2020-04-16T22:06:17.204653
| 2018-07-12T23:53:29
| 2018-07-12T23:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,477
|
r
|
assessment9.r
|
"Exercise 1: Showing the data and customizing plots
Say we are interested in comparing gun homicide rates across regions of the US. We see this plot:
library(dplyr)
library(ggplot2)
library(dslabs)
data("murders")
murders %>% mutate(rate = total/population*100000) %>%
group_by(region) %>%
summarize(avg = mean(rate)) %>%
mutate(region = factor(region)) %>%
ggplot(aes(region, avg)) +
geom_bar(stat="identity") +
ylab("Murder Rate Average")
and decide to move to a state in the western region. What is the main problem with this interpretaion?
Possible Answers
* The categories are ordered alphabetically.
* The graph does not show standard errors.
* It does not show all the data. We do not see the variability within a region and it's possible
that the safest states are not in the West. *
* The Northeast has the lowest average.
"
"Exercise 2: Making a box plot
To further investigate whether moving to the western region is a wise decision, let's make a box
plot of murder rates by region, showing all points.
* Make a box plot of the murder rates by region.
* Order the regions by their median murder rate.
* Show all of the points on the box plot."
library(dplyr)
library(ggplot2)
library(dslabs)
data("murders")
murders %>% mutate(rate = total/population*100000) %>%
mutate(region = reorder(region, rate)) %>%
ggplot() +
geom_boxplot(aes(region, rate)) +
geom_point(aes(region, rate))
|
7ea104d288839bebcfa6a58c90eaf0d3e9332c30
|
b8f7f32df8a12d56df79b1351b4266fece9ca68d
|
/cachematrix.R
|
5e16830d722c7a742718d734540338633176cb5c
|
[] |
no_license
|
skhummer/ProgrammingAssignment2
|
fa7b68cb1a4cb42f9a0382999312b986130e78b3
|
2190fff01bfa56c8a3892a5c1c621e90c6d21ada
|
refs/heads/master
| 2020-03-18T13:55:28.790701
| 2018-05-30T06:44:07
| 2018-05-30T06:44:07
| 134,817,868
| 0
| 0
| null | 2018-05-25T07:11:51
| 2018-05-25T07:11:50
| null |
UTF-8
|
R
| false
| false
| 690
|
r
|
cachematrix.R
|
## the fxn below caches the matrix 'x'
makeCacheMatrix <- function(x = matrix()) {
t <- NULL
set <- function(y) {
x <<- y
t <<- NULL
}
get <- function() x
setInverse <- function(inverse) t <<- inverse
getInverse <- function() t
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## this function returns a matrix thats the invs of 'x'
cacheSolve <- function(x, ...) {
t <= x$getinverse()
if(!is.null(t)) {
message("getting that cached data")
return(t)
}
mtx <- x$get()
t <- solve(data, ...)
x$setinverse(t)
t
}
|
612d14dd6685e879387c0ad13220da4246f651af
|
3e1a57c5cd9735115babcfc16d04c6a037fd54d9
|
/evapostats/calib.R
|
97efb7a761e4ffe0ed0377073ed763a1c55c64fe
|
[] |
no_license
|
liorstov/evapocalc
|
677eccbc6149d860bb60396ff533a0bb974f1fb7
|
d4b293789b49d2c80e9573bcd4d354ac5525c257
|
refs/heads/master
| 2023-02-02T01:18:24.230140
| 2020-12-21T14:46:35
| 2020-12-21T14:46:35
| 319,621,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,603
|
r
|
calib.R
|
require(parallel)
load("synth.RData")
cl <- makeCluster(5, type = "PSOCK")
clusterExport(cl, c("Zel11Observed", "T1.9Observed", "T1.10Observed", "T2.1", "T1.1Observed", "opt.AETF", "opt.dust", "opt.FC", "opt.sulfate", "opt.WP", "Zel1"))
clusterEvalQ(cl, {
require(tidyverse)
require(Rcpp)
require(ggplot2)
require(reshape2)
require(zoo)
require(tictoc)
Rcpp::sourceCpp('C:/Users/liorst/source/repos/evapocalc/Calcyp/CSM.cpp', verbose = TRUE, rebuild = 0);
b <<- new(CSMCLASS);
source("C:/Users/liorst/source/repos/evapocalc/evapostats/Functions.R", encoding = "Windows-1252")
opt.AETF = 1.2;
opt.WP = 0.013;
opt.FC = 0.1;
opt.sulfate = 13;
opt.dust = 6;
seq.AETF = seq(0.8, 1.2, by = 0.4) * opt.AETF %>% rep(60);
seq.WP = seq(0.8, 1.2, by = 0.4) * opt.WP %>% rep(60);
seq.FC = seq(0.8, 1.2, by = 0.4) * opt.FC %>% rep(60);
seq.rainSeq = seq(0.8, 1.2, by = 0.4) * opt.sulfate %>% rep(60);
seq.dustSeq = seq(0.8, 1.2, by = 0.4) * opt.dust %>% rep(60);
RainArr = seq(1, 20, by = 1);
DustArr = c(seq(1, 10, by = 1));
repetition = 1
rainDustArray = as.matrix(crossing(RainArr, DustArr, repetition))
})
results = list()
results$T1.10 = list();
results$T1.9 = list();
results$zel11 = list();
clusterExport(cl, "SynthRainE")
clusterExport(cl, "SynthRainS")
tic()
#calibration ---
results$T1.10 = c(results$T1.10, parLapply(cl, 1:nrow(rainDustArray), fun = function(X) CalcGypsum(SynthRainE, duration = 13400, Depth = 150, rainSO4 = rainDustArray[X, 1], dustFlux = rainDustArray[X, 2])))
results$T1.9 = c(results$T1.9, parLapply(cl, 1:nrow(rainDustArray), fun = function(X) CalcGypsum(SynthRainE, duration = 11000, Depth = 150, rainSO4 = rainDustArray[X, 1], dustFlux = rainDustArray[X, 2])))
results$zel11 = c(results$zel11, parLapply(cl, 1:nrow(rainDustArray), fun = function(X) CalcGypsum(SynthRainS, duration = 10300, Depth = 150, rainSO4 = rainDustArray[X, 1], dustFlux = rainDustArray[X, 2])))
resultsTable = bind_rows(
RMSD.T.10 = RectanglingResults(results$T1.10, c(T1.10Observed %>% pull(correctedMean))) %>% mutate(profile = "T1.10", isHolocene = T),
RMSD.T1.9 = RectanglingResults(results$T1.9, c(T1.9Observed %>% pull(correctedMean))) %>% mutate(profile = "T1.9", isHolocene = T),
RMSD.Zel11 = RectanglingResults(results$zel11, c(Zel11Observed %>% pull(correctedMean))) %>% mutate(profile = "zel11", isHolocene = T),
#RMSD.T1.1 = RectanglingResults(results$T1.1, c(T1.1Observed %>% pull(correctedMean))) %>% mutate(profile = "T1.1", isHolocene = F),
#RMSD.T2.1 = RectanglingResults(results$T2.1, c(T2.1 %>% pull(correctedMean))) %>% mutate(profile = "T2.1", isHolocene = F),
#RMSD.zel1 = RectanglingResults(results$zel1, c(Zel1 %>% pull(correctedMean))) %>% mutate(profile = "zel1", isHolocene = F)
)
resultsTable = resultsTable %>% mutate(optimal = ifelse(FC == opt.FC & AETF == opt.AETF & sulfate == opt.sulfate & dustFlux == opt.dust & WP == opt.WP, T, F)) %>%
mutate(region = ifelse(profile %in% c("zel11", "zel1"), "zeelim", "shehoret"))
test = resultsTable %>% filter() %>% groupByParam %>% calculatePareto() #%>% filter(!pareto) %>% calculatePareto %>% filter(!pareto) %>% calculatePareto
test %>% filter(pareto) %>% dplyr::select(sulfate, dustFlux) %>% gather() %>% ggplot(aes(y = value, x = key)) + geom_boxplot() + geom_point() + scale_y_continuous(breaks = scales::extended_breaks(20)) + theme(axis.title.x = element_blank())
test %>% filter(pareto) %>% dplyr::select(sulfate, dustFlux) %>% ungroup %>% summarise_all(median)
save(results, file = "resultsListcalib.RData")
|
07efe2b2afaa5f3864eb808cb544941ced69d3af
|
386c09d74bd57b5b5b64ccba4a33e928ea1a7242
|
/man/get_hours.Rd
|
e834eb9c1749410830504500bf5343c827fe167e
|
[
"MIT"
] |
permissive
|
SamanthaToet/yelpr
|
adf3519670bcce0366e12846cb0d5ad7030c36c7
|
96fcabc82a50ac27a80e3d9fe937b3713d38e61f
|
refs/heads/master
| 2020-05-27T03:26:43.112562
| 2020-01-27T22:19:56
| 2020-01-27T22:19:56
| 188,464,148
| 2
| 1
|
NOASSERTION
| 2019-07-17T20:38:39
| 2019-05-24T17:42:05
|
R
|
UTF-8
|
R
| false
| true
| 686
|
rd
|
get_hours.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_attributes.R
\name{get_hours}
\alias{get_hours}
\title{Print a table of operating hours for a business.}
\usage{
get_hours(tbl, business_name = NULL)
}
\arguments{
\item{tbl}{The results of \code{yelp_search} stored as a dataframe.}
\item{business_name}{Optional string. The name of a specific business to get operating hours for. Defaults to \code{NULL}.}
}
\value{
A dataframe of hours of operation.
}
\description{
This is a helper function to be used with \code{yelp_search} to parse the hours for a specific business or list of businesses.
}
\examples{
get_hours(tbl)
get_hours(tbl, "Wegmans")
}
|
7869964d1850806a452a8a0261057372c6b992c1
|
92550e6df56f8f38fcd459bbfb8a79ff98ebaadd
|
/tarefa1-Q3.r
|
8813ceab18c4a30740c88012251daf183fe35a0a
|
[] |
no_license
|
rfribeiro/INF0611_Tarefa1
|
0fab1fff21f211dec3eef196562a747269704f25
|
67dc330bac9263998cb5c4317327be26e6a60d70
|
refs/heads/master
| 2020-04-09T03:19:02.426333
| 2018-04-13T20:27:14
| 2018-04-13T20:27:14
| 159,977,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,839
|
r
|
tarefa1-Q3.r
|
library(ggplot2)
#dados do descritor 1
ID <- 1:20
X1 <- c(16,16,13,12,17,11,11,11,10,14,2,7,5,3,10,9,4,4,2,4)
Y1 <- c(9,19,15,17,10,12,10,17,13,13,7,7,3,2,7,0,7,4,5,6)
descritor1 <- data.frame(id=ID, X=X1, Y=Y1)
consulta1 <- c(12,8)
#dados do descritor 2
ID <- 1:20
X2 <- c(10,18,7,13,17,8,9,9,10,14,10,3,4,9,3,7,7,8,6,7)
Y2 <- c(15,15,14,19,16,14,10,14,12,14,1,10,3,0,6,2,7,10,7,9)
descritor2 <- data.frame(id=ID, X=X2, Y=Y2)
consulta2 <- c(10,10)
#imagens relevantes
IDrel <- c(7,8,9,10,17,18,19,20)
# Calculo de Distancia L2
DistL2 <- function(x, y){
d = 0
for (i in c(1:length(x))) {
d = d+(x[i]-y[i])^2
}
return(sqrt(d))
}
calcDists <- function(descritor, consulta) {
dists <- c()
for (i in 1:length(descritor$id)) {
dists <- c(dists, DistL2(c(descritor[i,2], descritor[i,3]), consulta))
}
return(dists)
}
distsDesc1 <- order(calcDists(descritor1, consulta1))
distsDesc2 <- order(calcDists(descritor2, consulta2))
precision <- function(returned, relevant) {
rate <- length(intersect(returned, relevant))/length(returned)
return (rate)
}
recall <- function(returned, relevant) {
rate <- length(intersect(returned, relevant))/length(relevant)
return(rate)
}
points1 <- matrix(nrow=20, ncol=2)
points2 <- matrix(nrow=20, ncol=2)
for(i in 1:20) {
points1[i,] <- c(precision(distsDesc1[1:i], IDrel), recall(distsDesc1[1:i], IDrel))
points2[i,] <- c(precision(distsDesc2[1:i], IDrel), recall(distsDesc2[1:i], IDrel))
}
points1df <- data.frame(Descritor="Descritor1", Precisao=points1[,1], Revocacao=points1[,2])
points2df <- data.frame(Descritor="Descritor2", Precisao=points2[,1], Revocacao=points2[,2])
pointsDf <- rbind(points1df, points2df)
g1 <- ggplot(pointsDf, aes(x=Revocacao, group = Descritor, colour = Descritor)) + geom_line(aes(y=Precisao)) + geom_point(aes(y=Precisao))
g1
|
1e742e9ba9cf0ffc08ff98a792bea94cc567a0ca
|
b6f8e79a1660f3374ef752c0657c8702f983e844
|
/R/TBMIBIquantifyPD1TcellsAcrossTopics.R
|
5442624b41b30fe77143b8b37a27fb5f1a8ce4f0
|
[] |
no_license
|
efrancis28/TBMIBI
|
9ab8a2688bf39aeb9d48ebc59844a2cf206628dd
|
c63ed17e42b27bb4f406f7049eb7b739ea07703a
|
refs/heads/master
| 2022-06-30T19:55:56.155009
| 2020-05-15T04:39:56
| 2020-05-15T04:39:56
| 264,096,893
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
r
|
TBMIBIquantifyPD1TcellsAcrossTopics.R
|
# TBMIBIquantifyPD1TcellsAcrossTopics.R
# Author: Erin McCaffrey
# Date created: 200225
# Overview: This script reads in the csv for the topic loadings for each cell with each cell assigned to a topic.
# Next, it looks at the distribution of PD1+ T cells across topics in bulk.
library(ggplot2)
library(forcats)
library(dplyr)
##...Load in data..##
setwd("/Volumes/GoogleDrive/My Drive/Angelo Lab/MIBIProjects/Human_ATB_paper-cohort/topic_analysis")
topic_data<-read.csv('all_TB_topic_annotation.csv')
##...Indicate cell type(s) to look at...##
cell_types<-c('lymphocyte')
##...Create dataframe with just cell type of interest...##
topic_data_cell <- topic_data[topic_data$cell_lin %in% cell_types, ]
##..Look at expression of marker on cell type across all topics..##
colors<-c('#4B9B79', '#CA6627', '#7470AE', '#D53E88', '#74A439', '#DDAE3B', '#BB2D34', '#668AB7')
ggplot(data = topic_data_cell, aes(x = fct_reorder(as.factor(topic),PD.1,.fun=median,.desc=TRUE), y = PD.1, fill = as.factor(topic))) +
geom_violin(trim=FALSE, draw_quantiles = 0.5) +
labs(x = 'Topic') +
labs(y = 'PD1 Exprs') +
scale_fill_manual(name = 'Topic', values = colors)
##..Counts of all PD1 positive cells across topics..##
PD1_pos<-as.data.frame(table(topic_data_cell[topic_data_cell$PD.1>0.21, ]$topic))
colnames(PD1_pos)<-c('Topic','Count')
ggplot(data = PD1_pos, aes(x = as.factor(Topic), y = Count, fill = as.factor(Topic))) +
geom_bar(stat = 'identity') +
theme_bw() +
theme(legend.position = "none") +
labs(x = 'Topic') +
labs(y = 'Count PD1+ Cells') +
scale_fill_manual(name = 'Topic', values = colors)
|
acde71d633f38277e2d8f0c5d060273c870ec5b0
|
88a30cd83fdb2d37b854088f49d9fac9bd715d31
|
/main_monthly_attendance.R
|
7473c997c7f735b9fdaf052f891708f2d0be62c6
|
[] |
no_license
|
danlauNV/monthlyattendance
|
c036a55db5177d35de85e2d6f3ac8a558152cc29
|
07c1c9b874671df133c78522d71cb5c2f1be39f3
|
refs/heads/master
| 2021-01-10T12:48:30.996284
| 2016-03-25T16:16:38
| 2016-03-25T16:16:38
| 54,571,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
main_monthly_attendance.R
|
source("createTSattdRate and export to stata.R")
if (1==1) {
source("holtwinters.R") # creates FC with confidence intvls
source("createDFofPredictions.R")
source("calculateEOYforecast.R")
} else {
source("xtabond.R")
source("calcEOYbasedonStata.R")
}
source("exporttoTableau.R")
|
1b9982ae007edfbfc6c15fb7a57baca50ceac4be
|
0d8198fdd2b6b4a876ac0a907264ec53b3ca5cd4
|
/recce1/6-simsmerge.r
|
d634977f3c43ef1ed22931109573eb2a9e84f34d
|
[] |
no_license
|
soride/Rprojects
|
4766973bc0592575491f564be49ddbb3f1829555
|
b1639bced14c008394b30a107b4975a502c6b1b0
|
refs/heads/master
| 2021-01-16T23:05:53.698073
| 2011-09-01T20:56:20
| 2011-09-01T20:56:20
| 2,049,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,930
|
r
|
6-simsmerge.r
|
### Analyze ironX and DISTANCE sampling
## rm(list = ls())
setwd("/Users/solson/Rprojects/recce1")
load("/Users/solson/Rprojects/recce1/simn5e5/k100.RData")
hk100<-k100
load("/Users/solson/Rprojects/recce1/simn5e5/k20.RData")
hk20<-k20
load("/Users/solson/Rprojects/recce1/simn5e5/k10.RData")
hk10<-k10
load("/Users/solson/Rprojects/recce1/simn5e5/k5.RData")
hk5<-k5
load("/Users/solson/Rprojects/recce1/simn5e5/k3IXs.RData")
load("/Users/solson/Rprojects/recce1/simn5e5/k3Dp1.RData")
load("/Users/solson/Rprojects/recce1/simn5e5/k3Dp2.RData")
load("/Users/solson/Rprojects/recce1/simn5e5/k3Dp3.RData")
hk3<-vector("list",10)
hk3[[8]]<-rbind(k3Dp1[[8]],k3Dp2[[8]],k3Dp3[[8]])
hk3[[9]][1:5]<-k3Dp1[[9]][1:5]
hk3[[9]][6:10]<-k3Dp2[[9]][1:5]
hk3[[9]][11:15]<-k3Dp3[[9]][1:5]
hk3[[10]][1:5]<-k3Dp1[[10]][1:5]
hk3[[10]][6:10]<-k3Dp2[[10]][1:5]
hk3[[10]][11:15]<-k3Dp3[[10]][1:5]
hk3[1:7]<-k3IXs[1:7]
rm(k100,k20,k10,k5,k3Dp1,k3Dp2,k3Dp3,k3IXs)
load("/Users/solson/Rprojects/recce1/simn3e5/k100IXs.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k100Dp1.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k100Dp2.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k100Dp3.RData")
mk100<-vector("list",10)
mk100[[8]]<-rbind(k100Dp1[[8]],k100Dp2[[8]],k100Dp3[[8]])
mk100[[9]][1:5]<-k100Dp1[[9]][1:5]
mk100[[9]][6:10]<-k100Dp2[[9]][1:5]
mk100[[9]][11:15]<-k100Dp3[[9]][1:5]
mk100[[10]][1:5]<-k100Dp1[[10]][1:5]
mk100[[10]][6:10]<-k100Dp2[[10]][1:5]
mk100[[10]][11:15]<-k100Dp3[[10]][1:5]
mk100[1:7]<-k100IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn3e5/k20IXs.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k20Dp1.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k20Dp2.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k20Dp3.RData")
mk20<-vector("list",10)
mk20[[8]]<-rbind(k20Dp1[[8]],k20Dp2[[8]],k20Dp3[[8]])
mk20[[9]][1:5]<-k20Dp1[[9]][1:5]
mk20[[9]][6:10]<-k20Dp2[[9]][1:5]
mk20[[9]][11:15]<-k20Dp3[[9]][1:5]
mk20[[10]][1:5]<-k20Dp1[[10]][1:5]
mk20[[10]][6:10]<-k20Dp2[[10]][1:5]
mk20[[10]][11:15]<-k20Dp3[[10]][1:5]
mk20[1:7]<-k20IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn3e5/k10.RData")
mk10<-k10
load("/Users/solson/Rprojects/recce1/simn3e5/k5.RData")
mk5<-k5
load("/Users/solson/Rprojects/recce1/simn3e5/k3IXs.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k3Dp1.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k3Dp2.RData")
load("/Users/solson/Rprojects/recce1/simn3e5/k3Dp3.RData")
mk3<-vector("list",10)
mk3[[8]]<-rbind(k3Dp1[[8]],k3Dp2[[8]],k3Dp3[[8]])
mk3[[9]][1:5]<-k3Dp1[[9]][1:5]
mk3[[9]][6:10]<-k3Dp2[[9]][1:5]
mk3[[9]][11:15]<-k3Dp3[[9]][1:5]
mk3[[10]][1:5]<-k3Dp1[[10]][1:5]
mk3[[10]][6:10]<-k3Dp2[[10]][1:5]
mk3[[10]][11:15]<-k3Dp3[[10]][1:5]
mk3[1:7]<-k3IXs[1:7]
rm(k100IXs,k100Dp1,k100Dp2,k100Dp3,k20IXs,k20Dp1,k20Dp2,k20Dp3,k10,k5,k3IXs,k3Dp1,k3Dp2,k3Dp3)
load("/Users/solson/Rprojects/recce1/simn1e5/k100IXs.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k100dp1.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k100dp2.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k100dp3.RData")
lk100<-vector("list",10)
lk100[[8]]<-rbind(k100dp1[[8]],k100dp2[[8]],k100dp3[[8]])
lk100[[9]][1:5]<-k100dp1[[9]][1:5]
lk100[[9]][6:10]<-k100dp2[[9]][1:5]
lk100[[9]][11:15]<-k100dp3[[9]][1:5]
lk100[[10]][1:5]<-k100dp1[[10]][1:5]
lk100[[10]][6:10]<-k100dp2[[10]][1:5]
lk100[[10]][11:15]<-k100dp3[[10]][1:5]
lk100[1:7]<-k100IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn1e5/k20IXs.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k20dp1.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k20dp2.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k20dp3.RData")
lk20<-vector("list",10)
lk20[[8]]<-rbind(k20dp1[[8]],k20dp2[[8]],k20dp3[[8]])
lk20[[9]][1:5]<-k20dp1[[9]][1:5]
lk20[[9]][6:10]<-k20dp2[[9]][1:5]
lk20[[9]][11:15]<-k20dp3[[9]][1:5]
lk20[[10]][1:5]<-k20dp1[[10]][1:5]
lk20[[10]][6:10]<-k20dp2[[10]][1:5]
lk20[[10]][11:15]<-k20dp3[[10]][1:5]
lk20[1:7]<-k20IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn1e5/k10IXs.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k10dp1.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k10dp2.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k10dp3.RData")
lk10<-vector("list",10)
lk10[[8]]<-rbind(k10dp1[[8]],k10dp2[[8]],k10dp3[[8]])
lk10[[9]][1:5]<-k10dp1[[9]][1:5]
lk10[[9]][6:10]<-k10dp2[[9]][1:5]
lk10[[9]][11:15]<-k10dp3[[9]][1:5]
lk10[[10]][1:5]<-k10dp1[[10]][1:5]
lk10[[10]][6:10]<-k10dp2[[10]][1:5]
lk10[[10]][11:15]<-k10dp3[[10]][1:5]
lk10[1:7]<-k10IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn1e5/k5IXs.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k5dp1.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k5dp2.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k5dp3.RData")
lk5<-vector("list",10)
lk5[[8]]<-rbind(k5dp1[[8]],k5dp2[[8]],k5dp3[[8]])
lk5[[9]][1:5]<-k5dp1[[9]][1:5]
lk5[[9]][6:10]<-k5dp2[[9]][1:5]
lk5[[9]][11:15]<-k5dp3[[9]][1:5]
lk5[[10]][1:5]<-k5dp1[[10]][1:5]
lk5[[10]][6:10]<-k5dp2[[10]][1:5]
lk5[[10]][11:15]<-k5dp3[[10]][1:5]
lk5[1:7]<-k5IXs[1:7]
load("/Users/solson/Rprojects/recce1/simn1e5/k3IXs.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k3dp1.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k3dp2.RData")
load("/Users/solson/Rprojects/recce1/simn1e5/k3dp3.RData")
lk3<-vector("list",10)
lk3[[8]]<-rbind(k3dp1[[8]],k3dp2[[8]],k3dp3[[8]])
lk3[[9]][1:5]<-k3dp1[[9]][1:5]
lk3[[9]][6:10]<-k3dp2[[9]][1:5]
lk3[[9]][11:15]<-k3dp3[[9]][1:5]
lk3[[10]][1:5]<-k3dp1[[10]][1:5]
lk3[[10]][6:10]<-k3dp2[[10]][1:5]
lk3[[10]][11:15]<-k3dp3[[10]][1:5]
lk3[1:7]<-k3IXs[1:7]
rm(k100IXs,k100Dp1,k100Dp2,k100Dp3,k20IXs,k20Dp1,k20Dp2,k20Dp3,k10IXs,k10Dp1,k10Dp2,k10Dp3,k5IXs,k5dp1,k5dp2,k5dp3,
k3IXs,k3Dp1,k3Dp2,k3Dp3)
save(hk100,hk20,hk10,hk5,hk3,mk100,mk20,mk10,mk5,mk3,lk100,lk20,lk10,lk5,lk3,file="/Users/solson/Rprojects/recce1/out-6.Rdata")
#load(file="/Users/solson/Rprojects/recce1/out-6.Rdata")
|
ca8a2b102f32e6d3289c58e379c3b7888a87f95b
|
647e2db904866c2917af40a07657c36e863a6e4f
|
/MDist/R/mc_sim.R
|
bc5ac0e81fcbc071b770f2c9660fef0f06d0ce3c
|
[] |
no_license
|
stacyderuiter/MDistComparison
|
68827fbce410c52d1f37fb32a19955f44d39652b
|
4ea955796147ff42ab87eac5ee724d2133e17923
|
refs/heads/master
| 2020-03-30T16:45:11.423497
| 2019-02-19T16:53:37
| 2019-02-19T16:53:37
| 151,424,818
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
r
|
mc_sim.R
|
#' simulate from a Markov chain
#'
#' @param sim_dives data frame with variable "resp" with values "baseline" and "reponse"
#' @param n number of time-steps to simulate
#' @param G transition probability matrix (baseline)
#' @param G_exp transition probability matrix (exposure)
#' @param first l
#' @return a vector of numeric values (1 up to number of rows/columns in G) indicating the simulated state sequence
#' @export
mc_sim <- function(sim_dives, n,G,G_exp,first) {
sim <- numeric(n)
m <- ncol(G)
sim[1] <- first
for (i in 2:n) {
if (sim_dives$resp[i]=="baseline"){
newstate <- sample(1:m,1,prob=G[sim[i-1],])
}else{
#if this is a "response" dive
newstate <- sample(1:m,1,prob=G_exp[sim[i-1],])
}
sim[i] <- newstate
}
return(sim)
}
|
eda3935a7f90da9fe654340d62777eeb24e78002
|
6d2720114b2608a7da2c5ed7dabf60adb76e56a2
|
/server.R
|
f998ccdf9135bbdb333b2a25b6c328cf5c5c2825
|
[] |
no_license
|
GuilhermeFCO/shinyCovid
|
3f8fad5c1ed1bff9dedff06250df2bcc42133382
|
9e85f551aa9b10f1e632d0baffe74869ea9662cd
|
refs/heads/main
| 2023-05-28T05:13:40.044208
| 2021-06-09T01:46:07
| 2021-06-09T01:46:07
| 375,193,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,034
|
r
|
server.R
|
server <- function(input, output, sesion) {
output$covidConfirmed <- renderText({
if (input$tabselectedCovid == 0) {
df_covidConfirmedAux <- brasilCovid
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(confirmed)
} else if (input$tabselectedCovid == 1) {
df_covidConfirmedAux <- regionStates
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(confirmed)
} else if (input$tabselectedCovid == 2) {
df_covidConfirmedAux <- states
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(confirmed)
} else if (input$tabselectedCovid == 3) {
df_covidConfirmedAux <- meso
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(confirmed)
} else if (input$tabselectedCovid == 4) {
df_covidConfirmedAux <- micro
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(confirmed)
} else if (input$tabselectedCovid == 5) {
df_covidConfirmedAux <- cities
df_covidConfirmed <- df_covidConfirmedAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(confirmed)
}
return(df_covidConfirmed[[1]])
})
output$covidDateConfirmedPerDay <- renderText({
if (input$tabselectedCovid == 0) {
df_covidDateConfirmedPerDayAux <- brasilCovid
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 1) {
df_covidDateConfirmedPerDayAux <- regionStates
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 2) {
df_covidDateConfirmedPerDayAux <- states
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 3) {
df_covidDateConfirmedPerDayAux <- meso
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 4) {
df_covidDateConfirmedPerDayAux <- micro
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 5) {
df_covidDateConfirmedPerDayAux <- cities
df_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDayAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(date)
}
date_covidDateConfirmedPerDay <- df_covidDateConfirmedPerDay[[1]]
date_covidDateConfirmedPerDay <- format.Date(date_covidDateConfirmedPerDay)
return(date_covidDateConfirmedPerDay)
})
output$covidConfirmedPerDay <- renderText({
if (input$tabselectedCovid == 0) {
df_covidConfirmedPerDayAux <- brasilCovid
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(confirmed_per_day)
} else if (input$tabselectedCovid == 1) {
df_covidConfirmedPerDayAux <- regionStates
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(confirmed_per_day)
} else if (input$tabselectedCovid == 2) {
df_covidConfirmedPerDayAux <- states
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(confirmed_per_day)
} else if (input$tabselectedCovid == 3) {
df_covidConfirmedPerDayAux <- meso
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(confirmed_per_day)
} else if (input$tabselectedCovid == 4) {
df_covidConfirmedPerDayAux <- micro
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(confirmed_per_day)
} else if (input$tabselectedCovid == 5) {
df_covidConfirmedPerDayAux <- cities
df_covidConfirmedPerDay <- df_covidConfirmedPerDayAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(confirmed_per_day)
}
return(df_covidConfirmedPerDay[[1]])
})
output$covidDeaths <- renderText({
if (input$tabselectedCovid == 0) {
df_covidDeathsAux <- brasilCovid
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(deaths)
} else if (input$tabselectedCovid == 1) {
df_covidDeathsAux <- regionStates
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(deaths)
} else if (input$tabselectedCovid == 2) {
df_covidDeathsAux <- states
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(deaths)
} else if (input$tabselectedCovid == 3) {
df_covidDeathsAux <- meso
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(deaths)
} else if (input$tabselectedCovid == 4) {
df_covidDeathsAux <- micro
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(deaths)
} else if (input$tabselectedCovid == 5) {
df_covidDeathsAux <- cities
df_covidDeaths <- df_covidDeathsAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(deaths)
}
return(df_covidDeaths[[1]])
})
output$covidDateDeathsPerDay <- renderText({
if (input$tabselectedCovid == 0) {
df_covidDateDeathsPerDayAux <- brasilCovid
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 1) {
df_covidDateDeathsPerDayAux <- regionStates
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 2) {
df_covidDateDeathsPerDayAux <- states
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 3) {
df_covidDateDeathsPerDayAux <- meso
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 4) {
df_covidDateDeathsPerDayAux <- micro
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(date)
} else if (input$tabselectedCovid == 5) {
df_covidDateDeathsPerDayAux <- cities
df_covidDateDeathsPerDay <- df_covidDateDeathsPerDayAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(date)
}
date_covidDateDeathsPerDay <- df_covidDateDeathsPerDay[[1]]
date_covidDateDeathsPerDay <- format.Date(date_covidDateDeathsPerDay)
return(date_covidDateDeathsPerDay)
})
output$covidDeathsPerDay <- renderText({
if (input$tabselectedCovid == 0) {
df_covidDeathsPerDayAux <- brasilCovid
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(is_last == "TRUE") %>%
dplyr::select(deaths_per_day)
} else if (input$tabselectedCovid == 1) {
df_covidDeathsPerDayAux <- regionStates
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(name_region == input$covidRegions) %>%
dplyr::select(deaths_per_day)
} else if (input$tabselectedCovid == 2) {
df_covidDeathsPerDayAux <- states
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(name_state == input$covidStates) %>%
dplyr::select(deaths_per_day)
} else if (input$tabselectedCovid == 3) {
df_covidDeathsPerDayAux <- meso
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(name_meso == input$covidMeso) %>%
dplyr::select(deaths_per_day)
} else if (input$tabselectedCovid == 4) {
df_covidDeathsPerDayAux <- micro
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(name_micro == input$covidMicro) %>%
dplyr::select(deaths_per_day)
} else if (input$tabselectedCovid == 5) {
df_covidDeathsPerDayAux <- cities
df_covidDeathsPerDay <- df_covidDeathsPerDayAux %>%
dplyr::filter(city == input$covidCity) %>%
dplyr::select(deaths_per_day)
}
return(df_covidDeathsPerDay[[1]])
})
}
|
47ee45244b18a596bd9b350f139a51df02f93288
|
e2647f85500d3e4c9651ee3aedd250a54604eac5
|
/R/Scatter2DView.R
|
7441ecab4f3327498cd612e049046c28bb02f77d
|
[] |
no_license
|
gejun1226/ggView
|
df1c458cf0d6e49b3ecf6d7983fe92cc4c521077
|
61e65caffc1264d0114ca63ba8d6ad48a9446a66
|
refs/heads/master
| 2020-07-24T18:16:28.056093
| 2019-06-14T02:12:40
| 2019-06-14T02:12:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 833
|
r
|
Scatter2DView.R
|
Scatter2DView <- function(data, x, y, color, label, label.list){
requireNamespace("ggplot2")
p = ggplot(data, aes(x, y, color = color))
p = p + geom_point()
p = p + scale_color_gradient2(low = "blue", mid = "yellow",
high = "red", midpoint = 2)
p = p + labs(x = x, y = y, color = color)
p = p + theme(text = element_text(colour="black", size = 14, family = "Helvetica"),
plot.title = element_text(hjust = 0.5, size=18),
axis.text = element_text(colour="gray10"))
p = p + theme(axis.line = element_line(size=0.5, colour = "black"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank(),
legend.key = element_blank())
p
}
|
f9067227036e2c3b917285291f767892bc5125ca
|
f3cea29c249db2a0e18a689c92e4bde0a059d5fc
|
/cachematrix.R
|
5d6801875c7a78a89fab8a7070913fc4d6d99b07
|
[] |
no_license
|
gglaze/ProgrammingAssignment2
|
ef181bc6a1a7d2ce0ab30d84ebf13ed414e786da
|
d033b492028324cc5267f409272fc161935ff506
|
refs/heads/master
| 2020-04-05T23:32:45.287452
| 2014-05-23T00:14:14
| 2014-05-23T00:14:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,753
|
r
|
cachematrix.R
|
## These two functions invert matrices and cache the results. This potentially
## speeds computation by avoiding repeated calculation of the same matrix inversion.
# *********************************************************
# makeCacheMatrix creates and initializes a matrix object able
# to store and retrieve a matrix and its cached inverse.
# Inputs: x -- an invertible matrix (optional)
# outputs: a list containing 4 functions suitable for use by cacheSolve().
# set: stores invertible matrix x.
# get: returns value of previously stored invertible matrix.
# setInverse: stores inverse of a matrix x. Does not calculate the inverse.
# getInverse: returns value of previously stored matrix inverse.
# processes:
# initialize matrix to optional input x.
# reset cached inverse value to NULL
# create list functions described above
# *********************************************************
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL # initialize inverse matrix to NULL
# function "set" initializes already created matrix object
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x # function "get" recalls input matrix
# function "setInverse" calculates and caches matrix inverse
setInverse <- function(solve) inverse <<- solve
# function "getInverse" recalls cached matrix inverse
getInverse <- function() inverse
# return list object containing four functions
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
# *********************************************************
# cacheSolve returns the inverse of an invertible matrix.
# Inputs: the matrix list object created by makeCacheMatrix().
# list function "get" provides matrix to be inverted
# list function "getInverse" furnishes either the cached inverse or NULL
# Outputs: stores matrix inverse using list function "setInverse."
# Returns: inverse of matrix provided by list function "get"
# Processes:
# If cached value not present, calculates, caches, and returns matrix inverse.
# If cached value present, returns inverse retrieved from cache.
# *********************************************************
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse() # get cached inverse
if(!is.null(inverse)) { # return cached inverse if present
message("getting cached data")
return(inverse)
}
# no cached value found, so get matrix to be inverted
data <- x$get()
inverse <- solve(data) # invert the matrix
x$setInverse(inverse) # cache the inverted matrix
inverse # return the inverted matrix
}
|
fba5506496ff10f21df72a3083e48451ea9f47ff
|
977505d27f66959febf4e9c4b0f6590a5208994c
|
/tests/testthat/test-01-sample_size.R
|
afc7242e4d8e22b149275c48f91f977bb575b41d
|
[] |
no_license
|
nutriverse/sleacr
|
a37091599e8d8b1b630c3b458d567197a35fa008
|
4e8df17a9aefa7bb61da08f0016914066e7a6b3f
|
refs/heads/main
| 2023-04-14T11:05:40.170429
| 2023-01-07T19:01:28
| 2023-01-07T19:01:28
| 186,984,529
| 0
| 0
| null | 2023-01-07T19:01:29
| 2019-05-16T08:22:23
|
R
|
UTF-8
|
R
| false
| false
| 1,279
|
r
|
test-01-sample_size.R
|
## Test that outputs are numeric
test_that("output is numeric", {
expect_type(get_binom_hypergeom(n = 600, k = 40), "double")
expect_true(is.numeric(get_binom_hypergeom(n = 600, k = 40)))
})
test_that("output is numeric", {
expect_type(get_hypergeom(k = 5, m = 600, n = 25, N = 10000), "double")
expect_true(is.numeric(get_hypergeom(k = 5, m = 600, n = 25, N = 10000)))
})
test_that("output is numeric", {
expect_type(
get_hypergeom_cumulative(k = 5, m = 600, n = 25, N = 10000), "double"
)
expect_true(
is.numeric(get_hypergeom_cumulative(k = 5, m = 600, n = 25, N = 10000))
)
})
## Test that output is a list
test_that("output is list", {
expect_type(get_n(N = 600, dLower = 0.7, dUpper = 0.9), "list")
})
test_that("output is list", {
expect_type(get_d(N = 600, n = 40, dLower = 0.7, dUpper = 0.9), "list")
})
## Test that output is an numeric
test_that("output is numeric", {
expect_type(get_n_cases(N = 100000, u5 = 0.17, p = 0.02), "double")
expect_true(is.numeric(get_n_cases(N = 100000, u5 = 0.17, p = 0.02)))
})
test_that("output is numeric", {
expect_type(
get_n_clusters(n = 40, N = 100000, u5 = 0.17, p = 0.02), "double"
)
expect_true(
is.numeric(get_n_clusters(n = 40, N = 100000, u5 = 0.17, p = 0.02))
)
})
|
45e49b4c65c9d133d0d51e4e2c3c4c1f8dd8a6f0
|
0b302e9a8d8fda17d6762bcccf00cd67b532a88b
|
/R/divRatio.R
|
e25f45a52ee0f5ae223a5b2d4198dc36573b094a
|
[] |
no_license
|
lnsongxf/dMisc
|
0d94c0e5b0f323053f473d478c3290a691b17e8f
|
b54acc761f66c4e0ef0d6d0099221e883717f98f
|
refs/heads/master
| 2021-05-31T19:30:30.893978
| 2016-06-05T15:50:26
| 2016-06-05T15:50:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
divRatio.R
|
#' Compute the Diversification Ratio for a portfolio.
#'
#' This function computes the Diversification Ratio (DR) for a portfolio, given
#' a weight vector, \code{w}, a set of asset returns, \code{R}, and a
#' covariance matrix, \code{sigma}. This ratio is a measure of how well
#' diversified a portoflio is. The square of this number is a proxy for the
#' number of unique sources of variation exist in a portfolio. The higher the
#' number, the more diversified the portfolio.
#'
#' @param w a vector of weights.
#' @param R an xts object or matrix of asset returns.
#' @param sigma a covariance matrix for R.
#' @param scale the annualization factor for R. Default = 1. This parameter is
#' passed through to wtdAvgVol() and portfolioVol().
#'
#' @return the Diversification Ratio (DR) for the portfolio.
#'
#' @examples
#' dts <- seq(Sys.Date()-199, Sys.Date(), 1)
#' returns <- matrix(rnorm(1000),ncol = 5) / 100
#' ret.xts <- xts(returns, dts)
#' cm <- cov(ret.xts)
#' divRatio(R = ret.xts, sigma = cm)
#'
#' @seealso wtdAvgVol, portfolioVol
#' @export
#'
divRatio <- function(w = NULL, R, sigma, scale = 12) {
if (is.null(w)) {
w <- rep(1/ncol(R), ncol(R))
}
wav <- dMisc::wtdAvgVol(w = w, R = R, scale = scale)
pvol <- dMisc::portfolioVol(w = w, R = R, sigma = sigma, scale = scale)
dr <- wav / pvol
return(dr)
}
|
67adedf8a7eb0481e7712cbff9956baad900076a
|
31bd220a932ce88bbabc6aa7a2819e48008dde19
|
/R/P4.4.R
|
15903693bb1c4db25d1f7d52ceca249dee5720f1
|
[] |
no_license
|
cran/DoE.base
|
6fa51fb944ba3f841ad35302c60e0a7ab3da6902
|
ecad38c67623b041c29252cb7608e54035e0567d
|
refs/heads/master
| 2023-05-12T04:24:46.333540
| 2023-05-08T15:20:07
| 2023-05-08T15:20:07
| 17,691,739
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,050
|
r
|
P4.4.R
|
P4.4 <- function (ID, digits = 4, rela = FALSE, parft=FALSE, parftdf=FALSE,
detailed=FALSE)
{
## function used for calculations in the paper
## final updated function P4.4 for R package DoE.base will omit "newdf"
## and will rename "new" to "parft"
## function to calculate pattern of numbers of generalized length 4 words
## for projections into four factors
if (!is.logical(rela))
stop("rela must be TRUE or FALSE")
if (!is.logical(parft))
stop("parft must be TRUE or FALSE")
if (!is.logical(parftdf))
stop("parftdf must be TRUE or FALSE")
## retrieve child array or array identified by character string
## gsub for case where ID is character string
IDname <- gsub("\"", "", deparse(substitute(ID)))
if (all(IDname %in% oacat$name)) {
if (!exists(IDname))
ID <- eval(parse(text = paste("oa.design(", IDname,
")")))
else if (is.character(ID))
ID <- eval(parse(text = paste("oa.design(", IDname,
")")))
}
if ((rela || parft || parftdf) & !(isTRUE(all.equal(length2(ID), 0)) & isTRUE(all.equal(length3(ID),
0))))
stop(IDname, " is not a strength 3 array, \nP4.4 with rela, parft or parftdf TRUE is inadequate.")
if (!(is.data.frame(ID) | is.matrix(ID)))
stop("ID must be a data frame or a matrix")
if (is.matrix(ID))
ID <- as.data.frame(ID)
if (!ncol(ID) >= 4)
return(NULL)
hilf <- length4(ID, J=TRUE)
fhilf <- factor(names(hilf), levels=unique(names(hilf))) ## bug fix 2 Sep 2013
## hilf was in unexpected order before,
## yielding wrong calculations for rela in designs
## with many columns due to character instead of
## numeric sorting
hilf <- sapply(split(hilf, fhilf), function(obj) sum(obj^2))
if (rela) {
waehl <- nchoosek(ncol(ID), 4)
nlevels <- sapply(ID, function(obj) length(unique(obj)))
div <- apply(waehl, 2, function(obj) min((nlevels[obj] -
1)))
}
if (parft) {
waehl <- nchoosek(ncol(ID), 4)
nlevels <- sapply(ID, function(obj) length(unique(obj)))
div <- apply(waehl, 2, function(obj) 4/sum(1/(nlevels[obj] -
1))) ## divisor, multiplier is 1/divisor
}
if (parftdf) {
waehl <- nchoosek(ncol(ID), 4)
nlevels <- sapply(ID, function(obj) length(unique(obj)))
div <- apply(waehl, 2, function(obj) mean(nlevels[obj] -
1)) ## divisor, multiplier is 1/divisor
}
aus <- table(round(hilf, digits))
if (rela || parft || parftdf) aus <- table(round(hilf/div, digits))
## formatting the table for output
aus <- cbind(length4 = as.numeric(names(aus)), frequency = aus)
if (rela)
colnames(aus) <- c("length4.rela", "frequency")
if (parft)
colnames(aus) <- c("length4.parft", "frequency")
if (parftdf)
colnames(aus) <- c("length4.parftdf", "frequency")
rownames(aus) <- rep("", nrow(aus))
## attaching attributes
attr(aus, "A4") <- A4 <- sum(hilf)
if (!(rela || parft || parftdf) & detailed & A4>0)
attr(aus, "detail") <- round(hilf, digits)
if (rela) {
attr(aus, "rA4") <- rA4 <- sum(hilf/div)
if (rA4 > 0) attr(aus, "GR") <- round(4+1-sqrt(max(hilf/div)),digits)
else attr(aus, "GR") <- ">=5"
if (rA4 > 0 & detailed) attr(aus, "detail") <- round(hilf/div, digits)
}
if (parft){
attr(aus, "sumPARFT4") <- sumPARFT4 <- sum(hilf/div)
if (sumPARFT4 > 0 & detailed)
attr(aus, "detail") <- round(hilf/div, digits)
}
if (parftdf){
attr(aus, "sumPARFTdf4") <- sumPARFTdf4 <- sum(hilf/div)
if (sumPARFTdf4 > 0 & detailed)
attr(aus, "detail") <- round(hilf/div, digits)
}
aus
}
|
265e7c70c9e730c836d802d298e10d3ebef63ea6
|
64597b32b4242c5e80537b68c3f113a657040e79
|
/r_code.R
|
bfc3ac2eb3cc68d11e2ed4becffd774b00ceb924
|
[] |
no_license
|
ankita1103/Product-Analysis
|
e99582b11c8844d4cf67542ef6e7edc9b4dd94f6
|
3a71205b35a68bccd96e9ed6a1854449d8a48eb2
|
refs/heads/master
| 2023-03-03T12:05:04.528230
| 2021-02-10T13:27:08
| 2021-02-10T13:27:08
| 337,716,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,324
|
r
|
r_code.R
|
library(tm)
options(header=FALSE, stringsAsFactors = FALSE,FileEncoding="latin1")
setwd("C:/Users/dell/Documents/Product-Analysis")
text<-readLines("new.txt")
corpus<-Corpus(VectorSource(text))
inspect(corpus[34])
corpus<-tm_map(corpus, tolower)
corpus<-tm_map(corpus, removePunctuation)
corpus<-tm_map(corpus, removeNumbers)
cleanset<-tm_map(corpus, removeWords,stopwords("english"))
cleanset<-tm_map(cleanset, stripWhitespace)
dtm<-TermDocumentMatrix(cleanset,control=list(minwordLength=c(1,Inf)))
findFreqTerms(dtm,lowfreq=15)
termFrequency<-rowSums(as.matrix(dtm))
termFrequency<-subset(termFrequency,termFrequency>=10)
library(ggplot2)
barplot(termFrequency,las=2,col=rainbow(20))
cleanset<-tm_map(cleanset,removeWords,c("asus","use","will","still","can","one","really","phone","life","many","day","dont","get","like","make","even","just","without","mode","know","now","thing","well","phones","good","going","rog","oled","never"))
cleanset<-tm_map(cleanset,gsub,pattern="games",replacement="gaming")
cleanset<-tm_map(cleanset, stripWhitespace)
dtm<-TermDocumentMatrix(cleanset,control=list(minwordLength=c(1,Inf)))
findFreqTerms(dtm,lowfreq=15)
termFrequency<-rowSums(as.matrix(dtm))
termFrequency<-subset(termFrequency,termFrequency>=10)
library(ggplot2)
barplot(termFrequency,las=2,col=rainbow(20))
library(wordcloud)
library(wordcloud2)
m<- as.matrix(dtm)
wordFreq<- sort(rowSums(m),decreasing = TRUE)
set.seed(375)
grayLevels <-gray(wordFreq+10) / (max(wordFreq)+10)
wordcloud(words=names(wordFreq),freq=wordFreq,max.words=100, rot.per=0.2,scale=c(7,.1),min.freq=5,colors=brewer.pal(8,"Dark2"))
wordcloud(words=names(wordFreq),freq=wordFreq,max.words=100, rot.per=0.2,scale=c(7,.1),min.freq=5,colors=rainbow(20))
#sentiment analysis
library(tidyverse)
library(tidytext)
library(glue)
library(stringr)
filename<-glue("C:/Users/ANANT/Desktop/project",amazon,sep="")
filename<- trimws("new.txt")
fileText <- glue(read_file(filename))
tokens <- data_frame(text = fileText) %>% unnest_tokens(word, text)
tokens %>%
inner_join(get_sentiments("bing")) %>% # pull out only sentiment words
count(sentiment) %>% # count the # of positive & negative words
spread(sentiment, n, fill = 0) %>% # made data wide rather than narrow
mutate(sentiment = positive - negative) # # of positive words - # of negative
|
3e88b858d92bb7624f5a359b3e7f02781645e7fc
|
b0828ab1cd8d8271c66d6935416b32580fb93bb4
|
/workflow.r
|
4f6e02d41f5275ce52f3be0995a9d1198f32d18c
|
[] |
no_license
|
KonradZych/GBIC2011
|
ba0f145d4f51549d344a756abd578675f291cf0d
|
c84ea96728f260d030d661e66a0442e934cb58f3
|
refs/heads/master
| 2016-09-06T06:23:01.350627
| 2011-07-13T19:17:58
| 2011-07-13T19:17:58
| 1,341,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,768
|
r
|
workflow.r
|
#copy-paste into R for analysis
workflow.appriopriateMarkers <- function(){
setwd("D:/data")
library(basicQtl)
library(qtl)
expressionMatrix <- as.matrix(read.table("Expression_BrassicaRapa_10chr2.txt",sep=""))
brassica_genotypes <- toGenotypes(expressionMatrix,margin=0.5,genotypes=c(1,0),overlapInd=0, verb=T)
recoMatrix <- recombinationCount(brassica_genotypes,flip=0)
recoMatrixFlipped <- recombinationCount(brassica_genotypes,flip=1)
res <- rep(0,length(rownames(recoMatrix)))
names(res) <- rownames(recoMatrix)
for(x in 1:nrow(recoMatrix)){
i <- recoMatrix[x,]
j <- recoMatrixFlipped[x,]
res[names(which(j[which(i>24)]<24))] <- res[names(which(j[which(i>24)]<24))]+1
}
hist(res)
genotypes[which(res>780&&res<880),] <- 1-genotypes[which(res>780&&res<880),]
}
workflow.parental <- function(){
setwd("D:/data/parental")
library(pheno2geno)
ril <- readFiles(verbose=TRUE,debugMode=2)
ril <- preprocessData(ril,verbose=TRUE,debugMode=2)
crossSimulated <- toGenotypes(ril,use="simulated",verbose=TRUE,debugMode=1,treshold=0.05,margin=15)
design <- sort(ril$rils$phenotypes[1,],decreasing=TRUE)
weight <- c(rep(0,73),rep(0,73))
modellikelihood(design[c(1:10,35000:35010),],rep(3,146),c(rep(0,73),rep(0,73)))
setwd("D:/GenesForSeedQualityWageningen")
library(pheno2geno)
ril <- readFiles(verbose=TRUE,debugMode=2,sep="\t")
ril <- preprocessData(ril,verbose=TRUE,debugMode=2,groupLabels=c(0,0,0,1,1,1))
ril2 <- ril
ril2$rils$phenotypes <- ril2$rils$phenotypes[,-1]
ril2$rils$phenotypes <- matrix(as.numeric(ril2$rils$phenotypes),nrow(ril$rils$phenotypes),(ncol(ril$rils$phenotypes)-1))
ril2$parental$phenotypes <- ril2$parental$phenotypes[,-1]
ril2$parental$phenotypes <- matrix(as.numeric(ril2$parental$phenotypes),nrow(ril$parental$phenotypes),(ncol(ril$parental$phenotypes)-1))
rownames(ril2$rils$phenotypes)<- 1:nrow(ril2$rils$phenotypes)
colnames(ril2$rils$phenotypes)<- 1:ncol(ril2$rils$phenotypes)
rownames(ril2$parental$phenotypes)<- 1:nrow(ril2$parental$phenotypes)
colnames(ril2$parental$phenotypes)<- 1:ncol(ril2$parental$phenotypes)
cross <- toGenotypes(ril2,use="simulated",treshold=0.05,overlapInd = 0, proportion = c(50,50), margin=15, minChrLength=5, verbose=TRUE, debugMode=1, max.rf=0.26, min.lod=0)
setwd("D:/data/from slave/data")
ril <- readFiles(verbose=TRUE,debugMode=2,sep="")
ril <- preprocessData(ril,verbose=TRUE,debugMode=2,groupLabels=c(0,0,1,1))
cross <- toGenotypes(ril,use="simulated",treshold=0.05,overlapInd = 0, proportion = c(50,50), margin=15, minChrLength=5, verbose=TRUE, debugMode=1, max.rf=0.26, min.lod=0)
exp_p <- read.table(file="children_phenotypes_rp.txt" ,sep="\t")
exp_p <- read.table(file="parental_phenotypes_rp.txt" ,sep="\t")
setwd("D:/data/bc")
library(pheno2geno)
ril <- readFiles(verbose=TRUE,debugMode=2)
ril <- preprocessData(ril,groupLabels = c(0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1),verbose=TRUE,debugMode=2)
crossMean <- toGenotypes(ril,use="simulated",treshold=0.01,overlapInd = 0, proportion = c(25,50,25), minChrLength=5, verbose=TRUE, debugMode=1)
png("tolerateordereddefault.png", width=1200, height=1200)
plot.rf(formLinkageGroups(cross,reorgMarkers=TRUE))
dev.off()
}
ril <- convertToGenotypes.internal(ril, 0, 0.01, c(50,50), 15, TRUE, 1)
setwd("D:/data/wilco data")
parental <- read.table(file="mapping_probes.txt",sep="\t",header=T,row.names=1)
children <- read.table(file="children.txt",sep="\t",header=T,row.names=1)
ril <- NULL
ril$rils$phenotypes <- children[,20:120]
ril$parental$phenotypes <- parental[,5:14]
ril <- preprocessData(ril, c(1,1,1,1,1,0,0,0,0,0))
cross <- toGenotypes(ril, use="simulated", splitMethod="EM", treshold=0.001, verbose=TRUE,debug=1)
chil2 <- parchil
setwd("D:/data/wilco data")
children <- read.table(file="children.txt",sep="\t",header=T,row.names=1)
parchil <- children[,8:120]
colnames(parchil) <- cnames[,2]
parchil <- parchil[,-which(colnames(parchil)=="Pimp_6_1")]
parchil <- parchil[,-which(colnames(parchil)=="MM_6_2")]
parchil <- parchil[,-which(colnames(parchil)=="RIL_278_6")]
tom <- tom[,-which(colnames(tom)=="..bot1703.CEL")]
tom <- tom[,-which(colnames(tom)=="..bot1707.CEL")]
tom <- tom[,-which(colnames(tom)=="..bot2000.CEL")]
#parchil <- parchil[,-which(colnames(parchil)=="RIL_260_d")]
parchil[,which(colnames(parchil)=="RIL_292_6")], parchil[,which(colnames(parchil)=="RIL_308_d")]
colo <- matrix("grey",1,111)
colo[which(colnames(parchil)=="RIL_278_6")] <- "red"
colo[which(colnames(parchil)=="RIL_260_d")] <- "red"
colo[which(colnames(parchil)=="RIL_206_6")] <- "red"
group_6 <- grep("_6",colnames(parchil))
group_d <- grep("_d",colnames(parchil))
groups <- list(group_6,group_d)
setwd("D:/data/wilco data")
parchil <- read.table(file="expressions_log_norm_cor.txt",sep="\t",header=T,row.names=1)
population <- createPopulation(parchil[,11:108],parchil[,1:10])
population <- preprocessData(population, c(1,1,1,1,1,0,0,0,0,0))
cross <- toGenotypes(population, use="simulated", splitMethod="mean", treshold=0.01, verbose=TRUE,debug=1)
show.dist <- function(nr){
print(colnames(genotypes)[nr])
res <- apply(genotypes,2,cor,genotypes[,nr])
hist(res)
print(names(res)[which(res>0.4)])
}
filename <- paste("tomato_exp_",i,".txt",sep="")
cat("Processing:",filename,"\n")
tom <- read.table(filename,sep="")
tom <- tom[,c(-13,-17,-91)]
j <- 1
tom_ <- matrix(apply(tom,1,diffexp),nrow(tom),1)
print(dim(tom_))
rownames(tom_) <- rownames(tom)
filename2 <- paste("tomato_exp_",i,"_corrected.txt",sep="")
write.table(tom[names(sort(tom_[,1],decreasing=TRUE)[1:100000]),], file=filename2, sep="\t")
cat("Done:",filename2,"\n")
tom <- NULL
tom_ <- NULL
gc()
gc()
gc()
gc()
gc()
setwd("D:/GenesForSeedQualityWageningen/tomato")
tom_c <- read.table("batchcorrected.txt",sep="\t")
tom <- read.table("200_000_markers_normalized.txt",sep="\t")
for(i in 1:12){
filename <- paste("tomato_exp_",i,".txt",sep="")
cat("Processing:",filename,"\n")
tom <- read.table(filename,sep="",row.names=1)
tom <- tom[,c(-13,-17,-91)]
j <- 1
tom_ <- matrix(apply(tom,1,diffexp),nrow(tom),1)
print(dim(tom_))
rownames(tom_) <- rownames(tom)
filename2 <- paste("tomato_exp_",i,"_corrected.txt",sep="")
write.table(tom[names(sort(tom_[,1],decreasing=TRUE)),], file=filename2, sep="\t")
cat("Done:",filename2,"\n")
tom <- NULL
tom_ <- NULL
gc()
gc()
gc()
gc()
gc()
}
for(i in 2:13){
filename <- paste("tomato_exp_",i,"_corrected.txt",sep="")
cat("Processing:",filename,"\n")
tom <- read.table(filename,sep="\t")
if(ncol(tom)==118){
rownames(tom) <- tom[,1]
tom <- tom[,-1]
}
colnames(tom) <- colnames(tom_c)
cat(dim(tom_c),dim(tom),"\n")
tom_c <- rbind(tom_c,tom)
j <- 1
tom_ <- matrix(apply(tom_c,1,diffexp),nrow(tom_c),1)
print(dim(tom_))
rownames(tom_) <- rownames(tom_c)
tom_c <- tom_c[names(sort(tom_[,1],decreasing=TRUE)[1:250000]),]
tom <- NULL
tom_ <- NULL
gc()
gc()
gc()
gc()
gc()
}
if(i%%5000==0)
norm <- NULL
for(i in 1:nrow(tom_c)){
norm <- rbind(norm, normalizeQuantiles(tom_c[i,]))
cat(i,"/",nrow(tom_c),"\n")
}
for(i in 1:ncol(ara)){
if(colnames(ara)[i]%in%anames[,1]){
colnames(ara)[i] <- anames[which(anames[,1]==colnames(ara)[i]),2]
}
}
cnames <- read.table("ril_labels_tomato.txt" ,sep="\t")
colnames(tom_c)<-as.character(cnames[which(cnames[,1]%in%colnames(tom_c)),2])
load("200_000_population.rd")
load()
tom_ <- matrix(unlist(tom_c),nrow(tom_c),ncol(tom_c))
rownames(tom_) <- rownames(tom_c)
colnames(tom_) <- colnames(tom_c)
tom_c <- tom_
tom_ <- NULL
gc()
gc()
gc()
gc()
population <- createPopulation(tom_c[,11:109],tom_c[,1:10])
population <- preprocessData(population, c(1,1,1,1,1,0,0,0,0,0))
cross <- toGenotypes(population, use="simulated", splitMethod="mean", treshold=0.01, verbose=TRUE,debug=2)
which(rownames(population$founders$phenotypes)[which(population$founders$RP$pval[1]<0.01)] %in% rownames(population$founders$phenotypes)[which(population$founders$RP$pval[2]<0.01)])
### arabidopsis
for(i in 1:19){
filename <- paste("arabidopsis_exp_",i,".txt",sep="")
cat("Processing:",filename,"\n")
ara <- read.table(filename,sep="\t",header=F,row.names=1)
colnames(ara) <- colnames(ara_c)
cat(dim(ara_c),dim(ara),"\n")
ara_c <- rbind(ara_c,ara)
ara_ <- matrix(apply(ara_c,1,diffexp),nrow(ara_c),1)
print(dim(ara_))
rownames(ara_) <- rownames(ara_c)
ara_c <- ara_c[names(sort(ara_[,1],decreasing=TRUE)[1:10000]),]
ara <- NULL
ara_ <- NULL
gc()
gc()
gc()
gc()
gc()
}
diffexp <- function(cur_row){
a <- mean(as.numeric(cur_row[17:24]),na.rm=TRUE)
b <- mean(as.numeric(cur_row[189:196]),na.rm=TRUE)
if(!(is.na(a))&&!(is.na(b))){
invisible(abs(a-b))
}else{
invisible(0)
}
}
for(i in 1:98){
colnames(genotype)[i] <- rn[which(rn[,1]==i),3]
}
setwd("D:/GenesForSeedQualityWageningen/tomato")
require(pheno2geno)
load("200_000_population.rd")
map <- read.table("marker.txt",sep="\t",row.names=1)
map <- as.matrix(map)
genotype <- read.table("offspring_genotype.txt",sep="\t",header=T)
genotype <- as.matrix(genotype)
population <- intoPopulation(population, list(genotype,map), c("offspring$genotypes","maps$genetic"))
cross <- toGenotypes(population,genotype="real",orderUsing="maps_genetic")
setwd("D:/data/tomato")
require(pheno2geno)
population <- readFiles(verbose=T,debugMode=2)
population <- preprocessData(population,c(0,0,0,0,0,1,1,1,1,1))
cross <- toGenotypes(population,genotype="real",orderUsing="maps_genetic")
cross <- toGenotypes(ril,splitMethod="mean",genotype="simulated",minChrLength=0,treshold=0.5,margin=50,max.rf=10)
population_ <- removeIndividuals.internal(population,c("RIL_308_d","RIL_304_d","RIL_278_6"))
tom_c <- tom_c[,-which(colnames(tom_c)=="RIL_304_d")]
for(i in 1:ncol(tom_c)){
colnames(tom_c)[i]<-cnames[which(cnames[,1]==colnames(tom_c)[i]),2]
}
result <- NULL
for(i in 1:nrow(pl)){
print(sum(pl[,1]==pl[i,1]))
if(sum(pl[,1]==pl[i,1])!=1){
cur <- which(pl[,1]==pl[1,1])
if(sum(pl[cur,3]==100)!=1){
cur_ <- cur[which(pl[cur,3]==100)]
if(sum(pl[cur_,4]==25)==1){
result <- rbind(result,unlist(c(pl[cur_[which(pl[cur_,3]==100)],1],as.numeric(substr(pl[cur_[which(pl[cur_,3]==100)],2],9,10)),pl[cur_[which(pl[cur_,3]==100)],c(9,10)])))
}
}else{
result <- rbind(result,unlist(c(pl[cur[which(pl[cur,3]==100)],1],as.numeric(substr(pl[cur[which(pl[cur,3]==100)],2],9,10)),pl[cur[which(pl[cur,3]==100)],c(9,10)])))
}
pl <- pl[-which(pl[,1]==pl[i,1]),]
}else{
result <- rbind(result,unlist(c(pl[i,1],as.numeric(substr(pl[i,2],9,10)),pl[i,c(9,10)])))
}
#result <- matrix(unlist(result),nrow(result),ncol(result))
}
sortTable <- function(table_,col_n){
res_ <- NULL
cur_ <- sort(table_[,col_n],decreasing=T)
for(i in 1:nrow(table_)){
res_ <- rbind(res_,table_[,which(table_[,col_n]==cur_[i])])
}
invisible(res_)
}
|
9fda05be5664b6cff4dd93b85ab781718a88b7ee
|
b3cfea796b338543537ca71f22e8b37477e6275f
|
/Parallel_Version_RandContaSimulation.R
|
99268d4ca2b8ec3877a1769f20fb9b67e798ec3b
|
[] |
no_license
|
lloydliu717/discussion-1204
|
8f9e0564fda857c08be14ff68b29b9e169ec5ff5
|
bdae02853e4323f5fafe77d388379d8ee7611ab4
|
refs/heads/master
| 2021-08-23T12:13:00.354371
| 2017-12-04T21:31:33
| 2017-12-04T21:31:33
| 113,097,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,986
|
r
|
Parallel_Version_RandContaSimulation.R
|
library(doMC) # needed for parallel computing
library(sem) # needed for latent variable analysis
library(parallel) # needed for parallel computing
library(foreach) # needed for parallel computing
library(iterators) # needed for parallel computing
#setwd("/Users/liuzichun/Documents/BU/2017SPRING/Characteristic_Respondents/SurveyDataSimulation/RandContaSimulation/")
#####detect cores and register multiple cores to work
#num_cores <- detectCores() ## Calculate the number of available cores
num_cores = as.numeric(Sys.getenv("NSLOTS"))
if(is.na(num_cores)){num_cores = 1}
# num_cores <- num_cores - 1 ## always best to use one less than max when using MAC
max_cores <- 28 ## set to a given value or use 8 or 16
## on the cluster this value whould be set to the
## number of cores requested by the job
## on scc-lite this ncores should be set to any number
## between 1 and 28 or 36
# max_cores <- 3
num_cores <- min(max_cores,num_cores)
registerDoMC(num_cores)
getDoParWorkers()
######some functions
source("Function1.R")
# model text generation
# alternative model text generation
# calculate cr.alpha
# generate dataset
# generate corresponding contaminated dataset
# test model on original dataset and error processing
# test model on contaminated dataset and error processing
# generate a series of random seed
######set random seed
#my.new.seeds = generate.seed(25)
###### global value setup
source("Setup.R")
# granularity
# minimum lambda
# number of manifest items
# sample size
# percentage contamination
# pure repeat times
###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ######
###### ###### ###### ###### ######
###### ###### ###### ###### ######
###### ###### ###### ###### ###### Parallel loop
ptm = proc.time()
ctr = 0
MDF = data.frame()
print(Sys.time())
print("Start!!!!!!!!")
MDF = foreach(l = 1:n.runs,.combine = rbind) %dopar% {
for(i in 1:length(n.man)){
ori.model.text = originmodel(n.man[i])
model.sem.SIM = specifyModel(text = ori.model.text,quiet = T)
for(g in gammalist){
gammas = g * {{-1}^{1:n.man[i]}+1}/2
for(minla in minlambda){
lambdas = seq(0.86, minla, length.out = n.man[i])
for(ta in taubound){
taus = seq(-ta,ta,length.out = 6)
for(j in 1:length(n.sample)){
for(k in 1:length(per.conta)){
ctr = ctr + 1
#"Generate original data set"
Xmat.orig = GenerateData(n.sample = n.sample[j], n.man = n.man[i],lambdas,gammas,taus)
#"Generate conta data set"
Xmat.conta = ContaminateData(DataMatrix = Xmat.orig, conta.percent = per.conta[k])
#"Record loop and data set information"
container1 = data.frame(ctr = ctr,
manifest.item = n.man[i],
sample.size = n.sample[j],
min.lambda = minla,
gamma = g,
taubound = ta,
noise.range = paste("0","0.4",sep = ","),
contamination.percent = per.conta[k],
cr.alpha.orig = cr.alpha(Xmat.orig),
cr.alpha.conta = cr.alpha(Xmat.conta))
#"run original sem model"
container2 = TestOriginModel(ctr,Xmat.orig, model.sem.SIM, ori.model.text)
#"run conta sem model"
container3 = TestContainmationModel(ctr, Xmat.conta, model.sem.SIM, ori.model.text)
MDF = rbind(MDF, cbind(container1, container2, container3))
}
}
}
}
}
}
return(MDF)
}
proc.time() - ptm
print(paste("In total get",nrow(MDF),"rows"))
write.csv(MDF,"MDF_4_14.csv")
|
c49a35bb82bff3733a5df288d79ef4a927e52acf
|
3fae56ff310649949557e3f594526843e21334dc
|
/man/WMCT.Rd
|
9023c6d225fb66aebbffe6b1d30d0cbe0b2d0cd5
|
[] |
no_license
|
geneticsMiNIng/RStemnessScorer
|
a3c41f5a4b0ab0ef74e01102f4b1c803d63526d6
|
457758557837a0d0e0bc81c518d90ed627956fed
|
refs/heads/master
| 2021-01-11T00:35:48.558418
| 2016-11-07T17:30:21
| 2016-11-07T17:30:21
| 70,513,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,850
|
rd
|
WMCT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WMCT.R
\name{WMCT}
\alias{WMCT}
\title{Wilcoxon Multiple Comparisions Test}
\usage{
WMCT(pcbc, tcga, G = "SC", adjust = "fdr", njob = 1)
}
\arguments{
\item{pcbc}{data set containing in the first column labels
given as factor. At least one level of the factor should be G. The
next columns are for predictors.}
\item{tcga}{tcga data set with healthy tissues.
The columns are predictors.}
\item{G}{label name for selected group of samples in \code{pcbc} dataset.}
\item{adjust}{method of correction. "none" if not interesting.
Another possible arguments are:
c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY","fdr")}
\item{njob}{number of clusters for parallel computing.}
}
\value{
Two-element list. The first element is a vector of adjusted
p.values obtained from multple comparisons test. The second one is a vector
of original p-values without any correction. You can use this element
to apply some other corretion method. See more: \code{\link{p.adjust}}.
}
\description{
Find siginificant differences between samples from
G class (PCBC) and healthy tissues (TCGA) + non G (PCBC)
for each predictor. To get interesting signature use
\code{\link{signatureWMCT}} function.
}
\details{
Two-sample Wilcoxon test is used for each of predictors
(for more details see: \code{\link{wilcox.test}}). As an alternative "two.sided"
was choosen. Note, that there are as many comparisions as many predictors
is on the overlap of `pcbc` and `tcga` sets. This approach requires
to use some of correction methods. Read more on \href{https://en.wikipedia.org/wiki/Multiple_comparisons_problem}{wikipedia}.
}
\examples{
wmct <- WMCT(methPCBC, ENDOhealthy, G='SC', njob=3)
wmct2 <- p.adjust(wmct[[2]],'bonferroni')
signatureWMCT(wmct, n=10)
}
\author{
Katarzyna Sobiczewska
}
|
22b77489552f437dbc081b2ecf6d98ebcd0b8eb2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fda.usc/examples/plot.fdata.Rd.R
|
a6b535c196ef4bcaffb651512693320d302cf03b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,038
|
r
|
plot.fdata.Rd.R
|
library(fda.usc)
### Name: plot.fdata
### Title: Plot functional data: fdata.
### Aliases: plot.fdata lines.fdata title.fdata plot.bifd
### Keywords: hplot
### ** Examples
## Not run:
##D
##D # example for fdata class of 1 dimension (curve)
##D a1<-seq(0,1,by=.01)
##D a2=rnorm(length(a1),sd=0.2)
##D f1<-(sin(2*pi*a1))+rnorm(length(a1),sd=0.2)
##D nc<-10
##D np<-length(f1)
##D tt=seq(0,1,len=101)
##D mdata<-matrix(NA,ncol=np,nrow=nc)
##D for (i in 1:nc) mdata[i,]<- (sin(2*pi*a1))+rnorm(length(a1),sd=0.2)
##D fdataobj<-fdata(mdata,tt)
##D res=plot.fdata(fdataobj,type="l",col=gray(1:nrow(mdata)/nrow(mdata)))
##D lines(func.mean(fdataobj),col=3,lwd=2) #original curve
##D
##D # example for fdata2d class of 2 dimension (surface)
##D t1 <- seq(0, 1, length= 51)
##D t2 <- seq(0, 1, length= 31)
##D z<-array(NA,dim=c(4,51,31))
##D for (i in 1:4) z[i,,] <- outer(t1, t2, function(a, b) (i*a)*(b)^i)
##D z.fdata<-fdata(z,list(t1,t2))
##D plot(z.fdata,time=2)
##D plot(z.fdata,mfrow=c(2,2),type="persp",theta=30)
## End(Not run)
|
4364936a406e5db11129231743dea51b544c8695
|
3f442e29843473912b95eab97346b3bf6858f917
|
/Course Note.R
|
83ca242780b44b6a4da1a8a6266ae5f328b3690f
|
[] |
no_license
|
rahulgeorge/Data-Management
|
3044f8bb0fc74fa9acb3c5e59ae6ab2aff351ba4
|
f6f5172373a697de3767d260c19df1d7d77a02e7
|
refs/heads/main
| 2023-07-09T12:21:01.208402
| 2021-08-01T14:59:50
| 2021-08-01T14:59:50
| 382,807,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,070
|
r
|
Course Note.R
|
#Starting course on July 4th
#-----------------------------------------------------------------------------
#Data Sources
#United Nations - data.un.org
#Country Data - data.gov + data.gov/opendatasites #WOW
#Gapminder - Lot of data on human data
#Kaggle
#-----------------------------------------------------------------------------
#Managing Files
dir.create("data") #Creates a new directory - data
dir.exists("data") #Checks if the current directory has a child directory data
fileURL <- "https://opendata.arcgis.com/api/v3/datasets/7055dbb02f0c4f14ab7ea3eb5ebfda42_0/downloads/data?format=csv&spatialRefId=3857"
download.file(fileURL, destfile = "./data/cameras.csv", method = "curl") # ./ puts the URL as relative #curl method is required to download from HTTPS
list.files("./data")
dateDownloaded <- date() #Good to keep track when you downloaded the file
cameraData <- read.table("./data/cameras.csv", sep = ",", header = TRUE, quote = "") #No quotations in the file.
head(cameraData)
#Reading excel
#library(xlsx)
cameraData <- read.xlsx("./data/cameras.xlsx", sheetIndex = 1, header = TRUE)
colIndex < 2:3
rowIndex <- 1:4
cameraData <- read.xlsx("./data/cameras.xlsx", sheetIndex = 1, colIndex = colIndex, rowIndex = rowIndex) #Reading specific rows and columns
#write.xlsx - Writing excel
#XLConnect package for manipulating excel files
#Tags, Elements & Attributes
# start tag - <section>
# end tag - </section>
# empty tag - <line-break />
#Elements are specific examples of tags
#Attributes are components of labels as shown below
#<img src="jeff.jpg" />
#Reading XML
library(XML)
fileURL <- "./data/simple.xml"
doc <- xmlTreeParse(fileURL, useInternal = TRUE)
rootNode <- xmlRoot(doc) #Wrapper for the entire XML
xmlName(rootNode) #Name of the element
names(rootNode) #Names of all elements within
rootNode[[1]] #First Element
rootNode[[1]][[1]] #Still drilling down
xmlSApply(rootNode, xmlValue) #Sapply function for XML. xmlValue function returns the values for the xml or subsets
xpathSApply(rootNode, "//name", xmlValue) # //name accesses name element at any node level #Seperate book available for learning xpath
xpathSApply(rootNode, "//price", xmlValue) #Accesses price
#Accessing data from the web
fileURL <- getURL("https://www.espn.in/nfl/team/_/name/bal/baltimore-ravens")
doc <- htmlTreeParse(fileURL, useInternal = TRUE)
readHTMLTable(doc)
#Reading JSON - Javscript Object Notation
library(jsonlite)
jsonData <- fromJSON("https://api.github.com/users/jtleek/repos")
names(jsonData) #Shows the JASON headers and drills down further
names(jsonData$owner)
names(jsonData$owner$login)
myjson <- toJSON(iris, pretty = TRUE) #Converts IRIS data to JSON format. Pretty intends the JSON
iris2 <- fromJSON(myjson)
head(iris2)
#data.table package
library(data.table)
DF <- data.frame(x=rnorm(9), y=rep(c("a","b", "c"), each = 3), z = rnorm(9))
head(DF,3)
DT <- data.table(x=rnorm(9), y=rep(c("a","b", "c"), each = 3), z = rnorm(9))
head(DF,3)
tables() #command to view all the tables in memory
DT[2,] #subsetting rows
DT[DT$y == "b",]
DT[c(2,3)] #If you just pass one variable, it subsets using rows. Little diff to data frame
DT[,c(2,3)] #This doesnt subset columns like in data frame. Really diverges from data frame
#This is used to calculate values for variables with expressions.
DT[,list(mean(x),sum(z))] #you can pass a list of functions to perform on variables. Here x & z are columns where the function needs to be applied. No need for ""
DT[,table(y)]
DT[,w:=z^2] #Adds a new column w that is the square of the column z
DT2 <- DT
DT[,y:=2] #here when DT is changed DT2 created above will also get changed. Data table doesnt keep copies and this is helpful for memory management
#For copying, use the copy function
DT[,m:= {tmp <- (x+z); log2(tmp+5)}] #Multiple statement column creation
DT[,a:= x>0] #Creates a new column with value of condition
DT[,b:= mean(x+w), by = a] #creates a new column b where the values of mean of x&z is taken but grouped by a which here is either TRUE or FALSE
# .N integer with length 1
DT <- data.table(x = sample(letters[1:3], 1E5, TRUE))
DT[, .N, by = x] #counts the number of times each values of x is in the data table
DT <- data.table(x = rep(c("a","b","c"), each = 100), y = rnorm(300))
setkey(DT, x)
DT['a'] #Quickly subsetting using a
DT1 <- data.table(x = c("a","a","b","dt1"), y = 1:4)
DT2 <- data.table(x = c("a","b","dt2"), z = 5:7)
setkey(DT1, x); setkey(DT2, x)
merge(DT1, DT2)
big_df <- data.frame(x = rnorm(1E6), y = rnorm(1E6))
file <- tempfile()
write.table(big_df, file = file, row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)
system.time(fread(file)) #fread function can be used to read data.table, it is a drop in substitute for read.table but much more efficient
system.time(read.table(file, header = TRUE, sep = "\t"))
#SWIRL NOTES
##Manipulating Data with dplyr
#dplyr package installed
library(dplyr)
mydf <- read.csv(path2csv, stringsAsFactors = FALSE) #path2CSV was created automatically in swirl
head(mydf)
cran <- tbl_df(mydf) #load the data into what the package authors call a 'data frame tbl' or 'tbl_df'
rm("mydf")
cran #Main use is printing. Check this out
#dplyr supplies five 'verbs' that cover most fundamental data manipulation tasks: select(), filter(), arrange(), mutate(), and summarize().
#Rename column names
chicago <- readRDS("./data/chicago.rds")
str(chicago)
names(chicago)
chicago <- rename(chicago, pm25 = pm25tmean2, dewpoint = dptp) #Renames the variables pm25tmean2 to pm25 & dptp to dewpoint in DF chicago
chicago <- mutate(chicago, tempcat2 = factor(tmpd > 80, labels = c("cold", "hot")))
hotcold <- group_by(chicago, tempcat) #Groups by the tempcat factor
summarise(hotcold, pm25 = mean(pm25), o3 = max(o3tmean2), no2 = median(no2tmean2)) #Summarises the new variables for both groups created by the factor variable
summarise(hotcold, pm25 = mean(pm25, na.rm = TRUE), o3 = max(o3tmean2), no2 = median(no2tmean2)) #Removes the NA in the mean in the above statement
chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900)
years <- group_by(chicago, year)
summarise(years, pm25 = mean(pm25, na.rm = TRUE), o3 = max(o3tmean2), no2 = median(no2tmean2))
#subsetting columns using select
select(cran, ip_id, package, country) #Selects just these columns
select(cran, r_arch:country) #selects all columns from r_arch to country, similar to operating on numbers 1:5
select(cran, country:r_arch)
select(cran, -time) #Removes time column
#Subsetting rows using filter
filter(cran, package == "swirl") #Only swirl downloads
filter(cran, r_version == "3.1.1", country == "US") #Can give multiple conditions comma seperated
filter(cran, !is.na(r_version))
#Sorting using arrange
cran2 <- select(cran, size:ip_id)
arrange(cran2, ip_id) #Arrange in ascending order
arrange(cran2, desc(ip_id)) #Descending ip_id order
arrange(cran2, package, ip_id) #arranges first by package and then by ip_id
arrange(cran2, country, desc(r_version), ip_id)
cran3 <- select(cran, ip_id, package, size)
mutate(cran3, size_mb = size / 2^20) #Creating a new column based on values of another column
summarize(cran, avg_bytes = mean(size)) #collapses the entire data to show new value avg_bytes as calculated
#Grouping and Chaining with dplyr
by_package <- group_by(cran, package) #At the top of the output above, you'll see 'Groups: package', which tells us that this tbl has been grouped by the package variablw.
#Now any operation we apply to the grouped data will take place on a per package basis.
summarise(by_package, mean(size)) #Summarise now gives mean for all the packages as it is grouped by package already
# You should also take a look at ?n and ?n_distinct, so
# that you really understand what is going on.
pack_sum <- summarize(by_package,
count = n(),
unique = n_distinct(ip_id) ,
countries = n_distinct(country),
avg_bytes = mean(size))
quantile(pack_sum$count, probs = 0.99) #Provides the top 1% 0.99 percetile of the downloaded packages based on count of download. Gives the values of count above which it is top 1%
top_counts <- filter(pack_sum, count > 679) #Getting all the top ones.
View(top_counts) #Provides a great view of the table
top_counts_sorted <- arrange(top_counts, desc(count))
#Chaining using dplyr
cran %>% #This operator is used to chain commands
select(ip_id, country, package, size) %>% #No need to provide the prior package into the argument. Operator facilitates that
mutate(size_mb = size / 2^20) %>%
filter(size_mb <= 0.5) %>%
arrange(desc(size_mb)) %>%
print #Print doesnt need the function paranthesis in chaining
#Tidying Data with dplyr
by_package <- group_by(cran, package)
pack_sum <- summarize(by_package,
count = n(),
unique = n_distinct(ip_id),
countries = n_distinct(country),
avg_bytes = mean(size))
quantile(pack_sum$count, probs = 0.99) #Calculating the top 1% or 99th percentile
top_counts <- filter(pack_sum, count > 679)
View(top_counts) #Provides a full view
#Reading from MySQL
library(RMySQL)
ucscDB <- dbConnect(MySQL(), user="genome", host="genome-mysql.soe.ucsc.edu") #Got information from http://genome.ucsc.edu/goldenPath/help/mysql.html
result <- dbGetQuery(ucscDB, "show databases;") #Connects to MySQL to send a query and sends the MySQL query to show all databases; in the server
dbDisconnect(ucscDB) #Very important to disconnect
hg19 <- dbConnect(MySQL(), user="genome", db = "hg19", host="genome-mysql.soe.ucsc.edu")
allTables <- dbListTables(hg19) #All tables in DB
length(allTables)
allTables[1:5]
dbListFields(hg19, "affyU133Plus2") #Checking for all the fields in affy.. table
dbGetQuery(hg19, "select count(*) from affyU133Plus2") #Count of number of elements in the table. (Rows)\
affyData <- dbReadTable(hg19, "affyU133Plus2")
head(affyData)
query <- dbSendQuery(hg19, "select * from affyU133Plus2 where misMatches between 1 and 3") #misMatches is a column in the table #Stored remotely at DB
affyMis <- fetch(query) #Retrieves information from BD
quantile(affyMis$misMatches)
affyMisSmall <- fetch(query, n = 10) #Gets only a small set back using the same query we stored in the DB
dbClearResult(query)
dim(affyMisSmall)
dbDisconnect(hg19)
#https://www.pantz.org/software/mysql/mysqlcommands.html
#https://www.r-bloggers.com/2011/08/mysql-and-r/
#Reading from HDF5 - Hierarchical Data Format
#To install follow the following steps
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install()
#BiocManager::install(rhdf5)
library(rhdf5)
created = h5createFile("example.h5") #Creating an HDF5 file
created
created = h5createGroup("example.h5", "foo") #Create groups inside the HDF5
created = h5createGroup("example.h5", "baa")
created = h5createGroup("example.h5", "foo/foobaa")
h5ls("example.h5") #View contents of HDF5 file
A = matrix(1:10, nr=5, nc=2)
h5write(A, "example.h5", "foo/A") #Writes the matrix into the group foo
B <- array(seq(0.1,2.0,by = 0.1),dim=c(5,2,2))
attr(B, "scale") <- "litre"
h5write(B, "example.h5", "foo/foobaa/B")
h5ls("example.h5")
df <- data.frame(1L:5L, seq(0,1,length.out=5), c("ab","cde","fghi","a","s"), stringsAsFactors = FALSE)
h5write(df, "example.h5", "df") #Can pass a variable directly to the top group
readA <- h5read("example.h5", "foo/A")
readB <- h5read("example.h5","foo/foobaa/B")
readdf <- h5read("example.h5", "df")
readdf
h5write(c(12,13,14),"example.h5","foo/A", index=list(1:3,1))
h5read("example.h5","foo/A")
#Reading Data from the Web
con <- url("https://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
htmlCode = readLines(con)
close(con)
htmlCode
library(XML)
library(RCurl)
url <- getURL("https://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
html <- htmlTreeParse(url, useInternalNodes = TRUE)
xpathSApply(html, "//title", xmlValue)
xpathSApply(html, "//td[@id='gsc_a_ac gs_ibl']", xmlValue) #Not6 working for me
#Get from HTTR package
library(httr)
html2 <- GET("https://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
content2 <- content(html2, as = "text")
parsedHtml <- htmlParse(content2, asText = TRUE)
xpathApply(parsedHtml, "//title", xmlValue)
pg1 <- GET("http://httpbin.org/basic-auth/user/passwd")
pg1 #This wont as error code 401 as we didnt login
pg2 <- GET("http://httpbin.org/basic-auth/user/passwd", authenticate("user","passwd"))
pg2
names(pg2)
google <- handle("http://google.com") #Setting a handle
pg1 <- GET(handle = google, path = "/")
pg2 <- GET(handle = google, path = "search")
#Reading Data from APIs
#Twitter Info - https://developer.twitter.com/en/docs/twitter-api/early-access
#API Key - 66hKsADgbVB2bLMIUbVfeTG3k
#API Secret - tkt5XXlnWqa2PvnGyMxBKsSwhK9PQFHov7fXknfqfppZ8gKnG9
#Bearer Token - AAAAAAAAAAAAAAAAAAAAAAtuRgEAAAAAIaMilMHo0u5pJ3fG2xJopGd8WgQ%3DZBAupmVNUmROrSp8hFl5AmT26pAmCrs1fBCFLiNMlzyYTwb6nn
#Access Token - 1346191428-q6m8k9KdEhgawEw56jZoTUqJL3IloKwn8RpqyH4
#Access Secret - yThR6z9lUpFc6G70IJrGRQ8MYGdQAwTyx3NaiQ89d7xDy
myapp <- oauth_app("tweetCoursera", key = "66hKsADgbVB2bLMIUbVfeTG3k", secret = "tkt5XXlnWqa2PvnGyMxBKsSwhK9PQFHov7fXknfqfppZ8gKnG9")
sig <- sign_oauth1.0(myapp, token = "1346191428-q6m8k9KdEhgawEw56jZoTUqJL3IloKwn8RpqyH4", token_secret = "yThR6z9lUpFc6G70IJrGRQ8MYGdQAwTyx3NaiQ89d7xDy")
homeTL <- GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
json1 <- content(homeTL)
library(jsonlite)
json2 <- jsonlite::fromJSON(toJSON(json1)) #converting from Robject to JSON to data frame using the jsonlite package
json2[1,1:4]
#Subsetting and Sorting
set.seed(13435)
x <- data.frame("var1" = sample(1:5), "var2" = sample(6:10), "var3" = sample(11:15))
x <- x[sample(1:5),]
x$var2[c(1,3)] <- NA
x[,1]
x[,"var1"]
x[1:2,"var2"]
x[(x$var1 <= 3 & x$var3 >11),]
x[(x$var1 <= 3 | x$var3 >15),]
x[x$var2>8,] #Has problem dealing with NA
x[which(x$var2 >8),] #Use which() to deal with NA
sort(x$var1)
sort(x$var1, decreasing = TRUE)
sort(x$var2, na.last = TRUE)
x[order(x$var1, x$var3),] #To order a dataframe
#plyr package
library(dplyr)
arrange(x,var1)
#Adding rows and columns
x$var4 <- rnorm(5)
y <- cbind(x,rnorm(5)) #Binds at the right
y <- cbind(rnorm(5),y) #Binds at the left
y
#Baltimore City Data
restData <- read.csv("./data/Restaurants.csv")
head(restData,n=3)
quantile(restData$cncldst, na.rm = TRUE)
quantile(restData$cncldst, probs = c(0.25,0.5,0.75,0.99))
table(restData$zipcode, useNA = "ifany") #useNA = IfAny will show the count of missing values as well. By default this is ommitted
table(restData$cncldst, restData$zipcode) #Creates a 2 dimensional table
#Checking for missing values
sum(is.na(restData$cncldst)) #Returns the count of missing values
any(is.na(restData$cncldst))
all(restData$zipcode > 0) #Checks if all zipcode values are > 0
colSums(is.na(restData)) #Giuves count of NA for every column
table(restData$zipcode %in% c("21212", "21213"))
restData[restData$zipcode %in% c("21212", "21213"),] #Subsets the DF using the logical operator
#Working with UC Berkley Admissions data
data("UCBAdmissions")
DF <- as.data.frame(UCBAdmissions)
str(DF)
summary(DF)
xt <- xtabs(Freq ~ Gender + Admit, data = DF) #Creates a table where Freq's information is shown within the table and Gender and Admin are on both axis
xt
object.size(xt)
#Creating New Variables
restData <- read.csv(("./data/Restaurants.csv"))
#Creating sequences
s1 <- seq(1,10,by=2) #Creates a sequence s1 with min value 1 & max value 10 and increaing each by 2
s2 <-seq(1,10,length = 3) #length of the sequenmce will eb 3
restData$nearMe <- restData$nghbrhd %in% c("Roland Park", "Homeland")
table(restData$nearMe)
restData$zipWrong <- ifelse(restData$zipcode < 0, TRUE, FALSE) #ifelse function definition
table(restData$zipWrong, restData$zipcode < 0) #Table between the new variable and a condition to check if it came properly
#Creating categorical variables
restData$zipcode <- as.numeric(restData$zipcode)
restData$zipGroups <- cut(restData$zipcode, breaks = quantile(restData$zipcode, na.rm = TRUE)) #Cut command to break it up to some value (quantiles of zipcode). Returns a factor variabel
table(restData$zipGroups)
table(restData$zipGroups, restData$zipcode)
library(Hmisc)
restData$zipGroups <- cut2(restData$zipcode,g=4)
#Creating Factor Variables
restData$zcf <- factor(restData$zipcode) #Creating a factor variable
restData$zcf[1:10]
yesno <- sample(c("yes","no"), size = 10, replace = TRUE)
yesnofac <- factor(yesno, levels = c("yes","no"))
relevel(yesnofac, ref = "yes") #Relevels the factor vairbale startying with yes. Not needed in this case.
as.numeric(yesnofac)
#Reshaping Data
library(reshape2)
head(mtcars)
#Melting Data Set
mtcars$carname <- rownames(mtcars)
carMelt <- melt(mtcars, id = c("carname", "gear", "cyl"), measure.vars = c("mpg","hp")) #Creates a skinned down data set for ID values as
#given with a separate row for each variable value defined
head(carMelt)
tail(carMelt)
#Castind Data Set
cylData <- dcast(carMelt, cyl ~ variable)
cylData
cylData <- dcast(carMelt, cyl ~ variable, mean) #Taking the data set and reorganising it in different ways.
cylData
#Averaging values
head(InsectSprays)
tapply(InsectSprays$count, InsectSprays$spray, sum)
spIns <- split(InsectSprays$count, InsectSprays$spray) #Another way - splitting
spIns
sprCount <- lapply(spIns, sum)
sprCount
sapply(spIns, sum)
#Plyr Package
library(plyr)
ddply(InsectSprays, .(spray), summarize, sum = sum(count))
#Merging Data
reviews <- read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
head(reviews)
head(solutions)
names(reviews)
names(solutions)
mergedData <- merge(reviews, solutions, by.x = "solution_id", by.y = "id", all = TRUE)
head(mergedData)
#Editing Text Variable
list.files("./data")
cameraData <- read.csv("./data/cameras.csv")
names(cameraData)
tolower(names(cameraData)) #to convert all names to lower case
#strsplit function can be used to seperate names containing "."
fileUrl1 <- "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
download.file(fileUrl1, destfile = "./data/reviews.csv", method = "curl")
fileUrl2 <- "https://dl.dropboxusercontent.com/u/7710864/data/solutions-apr29.csv"
download.file(fileUrl2, destfile = "./data/solutions.csv", method = "curl")
reviews <- read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
head(reviews,2)
names(reviews)
sub("_","",names(reviews)) #Substitues all underscores in names of review with nothing to remove them
gsub("_","",testName) #Removes all underscores in a name. The above method only removes the first one
#Finding Values - grep(), grepl()
grep("Alameda", cameraData$intersecti) #Searches for Alameda in the CameraData loaded earlier's intersection column
table(grepl("Alameda", cameraData$intersecti)) #grepl returns True when alameda appears and False else. Here creating a table for the same
cameraData2 <- cameraData[grepl("Alameda", cameraData$intersecti),] #Selects all rows where alameda is matched to a new variable
grep("Alameda", cameraData$intersecti, value = TRUE) #This returns the values instead of the index
#Check for length of grep to see if the search doesnt return anything. Length will be 0
#useful string operations
library(stringr)
nchar("Rahul George") #Returns number of characters
substr("Rahul George",1,7) #Takes values from first through 7
paste("Rahul", "George")
str_trim("Rahul ") #Removes the spaces
#Regular Expressions
#Metacharacters are used to identify scenarios more speific and more extensively than can be matched literally.
#Literally matching picks out specific instances of a word as searched
# ^ represents start of a line ^i think will only return sentences starting with ithink
# $ represents the end of the line, ex. morning$
# [Bb][Uu][Ss][Hh] will match all the possible cases of the word Bush
# ^[Ii] am will match all lines starting with I am with i in any case
#You can specify a range of letter using [a-z] or [a-zA-Z] or [0-9]
#When used inside the character class, ^ also means NOT
#[^?.]$ indicates serching lined that do not end with ? or .
# "." dot is a metacharacter as well which means it can be anything
#9.11 will search for all instances where dot can be anything (one character) inbetween 9 & 11. Ex. 9-11, 9/11, 103.169.114.66, 9:11 am
# Pipe opertor indicates or flood|fire indicates search for flood or fire. flood|earthquake|fire
# ^[Gg]ood|[Bb]ad searches for G/good in start of line and B/bad anywhere int he line
# ^([Gg]ood|[Bb]ad) both will be searched in beginning of the line
# question mark indicates that the expression is optional
#[Gg]eorge( [Ww]\.)? [Bb]ush - Will not necessarily need W in the middle to match
#the escape "\" is used to indicate that the dot is not a meta character in this expression
#(.*) * means any number, including none of the item
#(24, m, germany) or () both will match for the above
# * is a greedy operator and will check for the longest possible match
# ^s(.*)s will look for the longest expression between two s and wont consider if there is an s in between 2 s
# ^s(.*?)s will reduce the greediness. not sure how!!
# + means atleast one of the item
#[0-9]+ (.*)[0-9]+ means atleast one number followed by another number with any charactwrs in between
# {} are interval quantifiers that specify the minimum and maximum number of matches of an experession
#[Bb]ush( +[^ ]+ +){1,5} debate will match all instances where Bush and debate is seperated by atleast 1 space
#followed by something not a space followed by a space and will allow for 1 to 5 repeates of that type
#In curly operator {m,n} is atleast m and not more than n, {m} means exactly m, {m,} means atleast m
# \1 or \2 etc is used to remember the expression matched in ()
# +([a-zA-Z]+) +\1 will match lines good night night baby or blah blah blah blah etc
#Working with Dates
d1 <- date()
d1
class(d1) #Character
d2 <- Sys.Date()
d2
class(d2) #Date
# %d = day as number(0-31), %a = abbreviated weekday, %A = unabbreviated weekday, %m = month (00-12)
# %b = abbreviated month, %B = unabbreviated Month, %y = 2 digit year, %Y = four digit year
format(d2, "%a %b %d") #"Thu Jul 29"
x = c("1jan1960", "2jan1960","31mar1960","30jul1960")
z <- as.Date(x, "%d%b%Y")
z
z[1] - z[2]
as.numeric(z[1]-z[2])
weekdays(d2)
months(d2)
julian(d2)
library(lubridate) #Very useful to work with dates
ymd("20140108")
ymd("1989 May 17")
ymd("1920/1/2")
mdy("08/04/2013")
mdy("08042013")
dmy("03-04-2013")
ymd_hms("2011-08-03 10:15:03")
ymd_hms("2011-08-03 10:15:03", tz = "Pacific/Auckland") #All timezones can be found at http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
hms("03:22:14")
this_day <- today()
wday(this_day)
wday(this_day, label = TRUE)
this_moment <- now()
nyc <- now(tz = "America/New_York")
depart <- nyc + days(2)
depart <- update(depart, hours = 17, minutes = 34)
arrive <- depart + hours(15) + minutes(50)
arrive <- with_tz(arrive, tzone = "Asia/Hong_Kong") #Converting to Hong Kong timezone
last_time <- mdy("June 17, 2008", tz = "Singapore")
how_long <- interval(start = last_time, end = arrive) #Finding the interval between last time and arrival time
as.period(how_long)
|
e4ffe43d77522dc335171d1b4fcefb1af35cdbdf
|
867a7d01947c3a532ac03ce944cfdfcbc8579196
|
/ch06/scr0-unknown-n-binomial-form.R
|
3ee54c16db11aabd215fbcc23794ecaae559e145
|
[
"MIT"
] |
permissive
|
mbjoseph/scr-stan
|
7c9d5b2ff7af844d609d8f6e69b2736f763d0c3d
|
ed0654c21ee955b249191f59feb4e614c1dcada2
|
refs/heads/master
| 2022-08-31T16:42:28.121823
| 2020-05-17T03:51:09
| 2020-05-17T03:51:09
| 257,470,508
| 21
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 842
|
r
|
scr0-unknown-n-binomial-form.R
|
library(scrbook)
library(rstan)
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
data <- simSCR0(discard0 = TRUE, rnd = 2013, N = 30)
# setup a grid to approximate the marginalization over space
# smaller delta values --> better approximation
delta <- .3
x1_grid <- seq(data$xlim[1], data$xlim[2], by = delta)
x2_grid <- seq(data$ylim[1], data$ylim[2], by = delta)
grid_pts <- expand.grid(x1 = x1_grid, x2 = x2_grid)
stan_d <- list(
n_nonzero_histories = nrow(data$Y),
n_trap = nrow(data$traplocs),
n_occasion = data$K,
n_grid = nrow(grid_pts),
grid_pts = as.matrix(grid_pts),
X = data$traplocs,
y = data$Y,
n0_prior_scale = 10
)
m_init <- stan_model("ch06/scr0-unknown-n-binomial-form.stan")
m_fit <- sampling(m_init, data = stan_d)
traceplot(m_fit, pars = c("alpha1", "alpha0", "N"))
|
72f6e3a185a9387663abc0261febb863da576c3b
|
b28f74d681bb5dfbf34549c82a8c932f77c1b0a8
|
/inst/extdata/examples/prepEntrez_.R
|
bddc870340950e635a864c86bb430bfbe66c721c
|
[
"MIT"
] |
permissive
|
sailfish009/proteoQ
|
b07e179e9fe27a90fd76cde2ed7caa55e793e9d6
|
e6a4fe79a21f9a9106a35d78c2ce42d59e9d82e2
|
refs/heads/master
| 2022-12-25T20:06:40.340740
| 2020-10-15T20:18:14
| 2020-10-15T20:18:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,385
|
r
|
prepEntrez_.R
|
\donttest{
# ===============================================
# Apply custom `human` and `mouse` Entrez lookups
# ===============================================
## A RefSeq example
# Prep I: fetch up-to-date `org.Xx.eg.db`
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("org.Hs.eg.db")
BiocManager::install("org.Mm.eg.db")
# Prep II: make available RefSeq Fasta(s) if not yet
library(proteoQDA)
db_path <- "~/proteoQ/dbs/fasta/refseq"
copy_refseq_hs(db_path)
copy_refseq_mm(db_path)
# Prep III: copy metadata and PSMs if not yet
dat_dir <- "~/proteoQ/custom_refseq_lookups"
copy_global_exptsmry(dat_dir)
copy_global_fracsmry(dat_dir)
# (for MaxQuant, use `copy_global_maxquant()`)
# (for Spectrum Mill, use `copy_global_sm()`)
copy_global_mascot(dat_dir)
# --- workflow begins ---
library(proteoQ)
load_expts("~/proteoQ/custom_refseq_lookups")
# prepare RefSeq-to-Entrez lookups
Ref2Entrez(species = human)
Ref2Entrez(species = mouse)
# head(readRDS(file.path("~/proteoQ/dbs/entrez/refseq_entrez_hs.rds")))
# head(readRDS(file.path("~/proteoQ/dbs/entrez/refseq_entrez_mm.rds")))
# overrule the default `Entrez` lookups with the custom databases
normPSM(
group_psm_by = pep_seq_mod,
group_pep_by = gene,
fasta = c("~/proteoQ/dbs/fasta/refseq/refseq_hs_2013_07.fasta",
"~/proteoQ/dbs/fasta/refseq/refseq_mm_2013_07.fasta"),
entrez = c("~/proteoQ/dbs/entrez/refseq_entrez_hs.rds",
"~/proteoQ/dbs/entrez/refseq_entrez_mm.rds"),
)
## A UniProt example
# Prep I: set up UniProt Fasta(s) if not yet
library(proteoQDA)
db_path <- "~/proteoQ/dbs/fasta/uniprot"
copy_uniprot_hs(db_path)
copy_uniprot_mm(db_path)
# Prep II: copy metadata and PSMs if not yet
dat_dir <- "~/proteoQ/custom_uniprot_lookups"
copy_global_exptsmry(dat_dir)
copy_global_fracsmry(dat_dir)
# (for Mascot, use `copy_global_mascot()`)
# (for Spectrum Mill, use `copy_global_sm()`)
copy_global_maxquant(dat_dir)
# Prep III: simulate UniProt data from RefSeq PSMs
# (for Mascot, use `simulUniprotPSM(Mascot)`)
library(proteoQ)
simulUniprotPSM(MaxQuant)
# --- workflow begins ---
library(proteoQ)
dat_dir <- "~/proteoQ/custom_uniprot_lookups"
load_expts()
# prepare UniProt-to-Entrez lookups
Uni2Entrez(species = human)
Uni2Entrez(species = mouse)
# head(readRDS(file.path("~/proteoQ/dbs/entrez/uniprot_entrez_hs.rds")))
# head(readRDS(file.path("~/proteoQ/dbs/entrez/uniprot_entrez_mm.rds")))
normPSM(
group_psm_by = pep_seq_mod,
group_pep_by = gene,
fasta = c("~/proteoQ/dbs/fasta/uniprot/uniprot_hs_2014_07.fasta",
"~/proteoQ/dbs/fasta/uniprot/uniprot_mm_2014_07.fasta"),
entrez = c("~/proteoQ/dbs/entrez/uniprot_entrez_hs.rds",
"~/proteoQ/dbs/entrez/uniprot_entrez_mm.rds"),
)
}
\dontrun{
# name your `species`
Uni2Entrez(species = this_human, abbr_species = Hs, filename = my_human.rds)
Uni2Entrez(species = this_mouse, abbr_species = Mm, filename = my_mouse.rds)
# head(readRDS(file.path("~/proteoQ/dbs/entrez/my_human.rds")))
# head(readRDS(file.path("~/proteoQ/dbs/entrez/my_mouse.rds")))
# in PSM and subsequent outputs, values under column `species`
# will be shown as "this_human" or "this_mouse"
normPSM(
group_psm_by = pep_seq_mod,
group_pep_by = gene,
fasta = c("~/proteoQ/dbs/fasta/refseq/refseq_hs_2013_07.fasta",
"~/proteoQ/dbs/fasta/refseq/refseq_mm_2013_07.fasta"),
entrez = c("~/proteoQ/dbs/entrez/my_human.rds",
"~/proteoQ/dbs/entrez/my_mouse.rds"),
)
}
\dontrun{
## Custom database(s) are required for workflows
# with species other than `human`, `mouse` and `rat`
BiocManager::install("org.Ce.eg.db")
library(org.Ce.eg.db)
library(proteoQ)
Uni2Entrez(species = "worm", abbr_species = "Ce", filename = uniprot_entrez_ce.rds)
# --- PAUSE: prepare Fasta file(s) before proceeding to `normPSM`. ---
normPSM(
fasta = "~/proteoQ/dbs/fasta/specify_your_worm.fasta",
entrez = c("~/proteoQ/dbs/entrez/uniprot_entrez_ce.rds"),
)
}
\dontrun{
# wrong `abbr_species` provided `species` other than "human", "mouse" and "rat"
Uni2Entrez(species = "my human", abbr_species = Hu, filename = my_human.rds, overwrite = TRUE)
# the value of `abbr_species` ignored at `species` among "human", "mouse" and "rat"
Uni2Entrez(species = human, abbr_species = ok_not_Hs, filename = my_human.rds, overwrite = TRUE)
}
|
e9fa1dc3f7efc0c5f5bc617d364f8368efbaf323
|
bed96b020bcd7d101bb6cde6d140b7afc474f7bc
|
/R/gdm.crossvalidation.R
|
699ff0b6e9097b76d0436624bf0bf9ed639a44a2
|
[] |
no_license
|
cran/gdm
|
7c9b4a5dfdb351437fbe4826ba97f5aa8fb342df
|
6197648afb0235a8453231afe69c7c618a3b0170
|
refs/heads/master
| 2022-12-10T21:14:38.723481
| 2022-12-01T14:30:02
| 2022-12-01T14:30:02
| 33,008,317
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,307
|
r
|
gdm.crossvalidation.R
|
#' @title Cross-Validation Assessment of a Fitted GDM
#'
#' @description Undertake a cross-validation assessment of a GDM fit using all
#' the predictors included in the formated GDM input table (spTable). The
#' cross-validation is run using a specified proportion (train.proportion) of
#' the randomly selected sites included in spTable to train the model, with the
#' remaining sites being used to test the performance of the model predictions.
#' The test is repeated a specified number of times (n.crossvalid.tests), with
#' a unique random sample taken each time. Outputs are a number of
#' cross-validation test metrics.
#'
#' @usage gdm.crossvalidation(spTable, train.proportion=0.9, n.crossvalid.tests=1,
#' geo=FALSE, splines=NULL, knots=NULL)
#'
#' @param spTable (dataframe) A dataframe holding the GDM input table for model
#' fitting.
#'
#' @param train.proportion (float) The proportion of sites in 'spTable' to use
#' in training the GDM, with the remaining proportion used to test the model.
#' (default = 0.9)
#'
#' @param n.crossvalid.tests (integer) The number of cross-validation sets to
#' use in testing the GDM. (default = 1)
#'
#' @param geo (boolean) Geographic distance to be used in model fitting
#' (default = FALSE).
#'
#' @param splines (vector) An optional vector of the number of I-spline basis
#' functions to be used for each predictor in fitting the model.
#'
#' @param knots (vector) An optional vector of knots in units of the predictor
#' variables to be used in the fitting process.
#'
#' @return List, providing cross-validation statistics. These are metrics that describe how well the model fit using the
#' sitepair training table predicts the dissimilarities in the sitepair testing table. Metrics provided include:
#' 'Deviance.Explained' (the deviance explained for the training data);
#' 'Test.Deviance.Explained' (the deviance explained for the test data);
#' 'Mean.Error';
#' 'Mean.Absolute.Error';
#' 'Root.Mean.Squre.Error';
#' 'Obs.Pred.Correlation' (Pearson's correlation coefficient between observed and predicted values);
#' 'Equalised.RMSE' (the average root mean square error across bands of observed dissimilarities (0.05 dissimialrity units));
#' 'Error.by.Observed.Value' (the average root mean square error and number of observations within bands of observed dissimilarities (0.05 dissimialrity units)).
#'
#'@export
gdm.crossvalidation <- function(spTable,
train.proportion=0.9,
n.crossvalid.tests=1,
geo=FALSE,
splines=NULL,
knots=NULL)
{
##checks to see if in site-pair format from formatsitepair function
if(class(spTable)[1] != "gdmData"){
warning("spTable class does not include type 'gdmData'. Make sure your data is in site-pair format or the gdm model will not fit.")
}
##checks to makes sure data is a matrix or data frame
if(!(class(spTable)[1]=="gdmData" | class(spTable)[1]=="matrix" | class(spTable)[1]=="data.frame")){
stop("spTable argument needs to be gdmData, a matrix, or a data frame")
}
##makes sure that train.proportion is a number between 0 and 1,
##and that it is not equal to 0
if(is.numeric(train.proportion)==FALSE | train.proportion<=0 | train.proportion>1){
stop("argument train.proportion needs to be a positive number between 0 and 1")
}
##Check we have at least one cross-validation test to run
if(n.crossvalid.tests<1){
stop("set 'n.crossvalid.tests' to 1 or greater")
}
sortMatX <- sapply(1:nrow(spTable), function(i, spTab){c(spTab[i,3], spTab[i,5])}, spTab=spTable)
sortMatY <- sapply(1:nrow(spTable), function(i, spTab){c(spTab[i,4], spTab[i,6])}, spTab=spTable)
sortMatNum <- sapply(1:nrow(spTable), function(i){c(1,2)})
sortMatRow <- sapply(1:nrow(spTable), function(i){c(i,i)})
##adds a column of NA for index to be added to
fullSortMat <- cbind(as.vector(sortMatX), as.vector(sortMatY), as.vector(sortMatNum), as.vector(sortMatRow), rep(NA, length(sortMatX)))
##assigns sites by unique coordinates
siteByCoords <- as.data.frame(unique(fullSortMat[,1:2]))
##number of sites to expect by coordinates
numSites <- nrow(siteByCoords)
##assigns site index based on coordinates
for(i in 1:numSites){
fullSortMat[which(fullSortMat[,1]==siteByCoords[i,1] & fullSortMat[,2]==siteByCoords[i,2]),5] <- i
}
##create index table to know where each site is in input site-pair table
indexTab <- matrix(NA,nrow(spTable),2)
for(iRow in 1:nrow(fullSortMat)){
indexTab[fullSortMat[iRow,4],fullSortMat[iRow,3]] <- fullSortMat[iRow,5]
}
##determines the number of sites to remove
numTestSites <- round(max(indexTab)*(1-train.proportion))
numTrainSites <- max(indexTab) - numTestSites
##randomly determines the index of sites to remove
if(numTestSites <= 1)
{
stop("train.proportion is too high - no sites are available as test data in the cross-validation.")
}
# Set up the catcher for the cross-validation outputs
Deviance.Explained <- NULL
Mean.Error <- NULL
Mean.Absolute.Error <- NULL
Root.Mean.Squre.Error <- NULL
Obs.Pred.Correlation <- NULL
Equalised.RMSE <- NULL
Error.by.Observed.Value.npairs <- NULL
Error.by.Observed.Value.RMSE <- NULL
# Now loop through the cross-validation tests
for(i.test in 1:n.crossvalid.tests) # turn this into a parallel loop perhaps
{
# randomly select the train and test sites
testSites <- sample(1:max(indexTab), numTestSites)
trainSites <- c(1:max(indexTab))
trainSites <- trainSites[-testSites]
# grab the sitepairs containing only training sites, and only testing sites
# TEST
rmIndexCol1 <- which(indexTab[,1] %in% testSites)
rmIndexCol2 <- which(indexTab[,2] %in% testSites)
all.test.indices <- c(rmIndexCol1,rmIndexCol2)
test.pairs <- all.test.indices[duplicated(all.test.indices)]
sampTableTest <- spTable[test.pairs,]
# TRAIN
rmIndexCol1 <- which(indexTab[,1] %in% trainSites)
rmIndexCol2 <- which(indexTab[,2] %in% trainSites)
all.test.indices <- c(rmIndexCol1,rmIndexCol2)
train.pairs <- all.test.indices[duplicated(all.test.indices)]
sampTableTrain <- spTable[train.pairs,]
# Test how well a model fit with the training data predicts observed dissimilarities for the test data
cv.test.out <- gdm.single.crossvalidation(sampTableTrain,
sampTableTest,
geo=geo,
splines=splines,
knots=knots)
# Catch the outputs
Deviance.Explained <- c(Deviance.Explained, cv.test.out$Test.Deviance.Explained)
Mean.Error <- c(Mean.Error, cv.test.out$Mean.Error)
Mean.Absolute.Error <- c(Mean.Absolute.Error, cv.test.out$Mean.Absolute.Error)
Root.Mean.Squre.Error <- c(Root.Mean.Squre.Error, cv.test.out$Root.Mean.Squre.Error)
Obs.Pred.Correlation <- c(Obs.Pred.Correlation, cv.test.out$Obs.Pred.Correlation)
Equalised.RMSE <- c(Equalised.RMSE, cv.test.out$Equalised.RMSE)
Error.by.Observed.Value.npairs <- cbind(Error.by.Observed.Value.npairs, cv.test.out$Error.by.Observed.Value$obs.count)
Error.by.Observed.Value.RMSE <- cbind(Error.by.Observed.Value.RMSE, cv.test.out$Error.by.Observed.Value$pred.RMSE)
}# end for i.test
# Give the observed dissimilarity bands for the error by observed value
row.names(Error.by.Observed.Value.npairs) <- cv.test.out$Error.by.Observed.Value$obs.dissim
row.names(Error.by.Observed.Value.RMSE) <- cv.test.out$Error.by.Observed.Value$obs.dissim
# Create a summary of the RMSE by observed value
Error.by.Observed.Value <- data.frame('observed.dissimilarity' = cv.test.out$Error.by.Observed.Value$obs.dissim,
'number.obs.sitepairs' = rowMeans(Error.by.Observed.Value.npairs, na.rm=TRUE),
'RMSE' = rowMeans(Error.by.Observed.Value.RMSE, na.rm=TRUE))
rownames(Error.by.Observed.Value) <- c()
# Now generate outputs for the cross-validation
# write the outputs of the function
list(Deviance.Explained = mean(Deviance.Explained),
Mean.Error = mean(Mean.Error),
Mean.Absolute.Error = mean(Mean.Absolute.Error),
Root.Mean.Squre.Error = mean(Root.Mean.Squre.Error),
Obs.Pred.Correlation = mean(Obs.Pred.Correlation),
Equalised.RMSE = mean(Equalised.RMSE),
Error.by.Observed.Value = Error.by.Observed.Value,
Full.Crossvalidation.Stats = list(Deviance.Explained = Deviance.Explained,
Mean.Error = Mean.Error,
Mean.Absolute.Error = Mean.Absolute.Error,
Root.Mean.Squre.Error = Root.Mean.Squre.Error,
Obs.Pred.Correlation = Obs.Pred.Correlation,
Equalised.RMSE = Equalised.RMSE,
Error.by.Observed.Value.npairs= Error.by.Observed.Value.npairs,
Error.by.Observed.Value.RMSE = Error.by.Observed.Value.RMSE))
} # end gdm_crossvalidation()
|
0a0ab143f15425cbd2b61afe817800aa826b8834
|
3bb0be347cf60ea159e47ff27a42e1f5d4212a22
|
/Practical Machine Learning/Quiz 4.R
|
cb5780381351dbc1c9ffcce2d5cbf92ec2ea4523
|
[] |
no_license
|
neeraj5699/datasciencecoursera-2
|
5e5254653b40f415a73107214fbbca18f7f48e6b
|
f59a80d4c35c2564386d020aa0fe2a069c44887d
|
refs/heads/master
| 2021-01-23T16:36:50.658012
| 2015-06-28T08:20:31
| 2015-06-28T08:20:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
Quiz 4.R
|
# Q1 ----------
library(ElemStatLearn)
library(caret)
library(randomForest)
set.seed(33833)
data(vowel.train)
data(vowel.test)
summary(vowel.train)
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
rf <- train(y ~ . ,data=vowel.train,method="rf",prox=TRUE,ntree=500)
gbm <- train(y ~ .,data=vowel.train,method ="gbm",verbose=F)
pred.rf <- predict(rf,vowel.test)
pred.gbm <- predict(gbm,vowel.test)
# Get the accuracy for the tree and the gbm
rf_accuracy = sum(pred.rf == vowel.test$y) / length(pred.rf)
gbm_accuracy = sum(pred.gbm == vowel.test$y) / length(pred.gbm)
# Get the last part of the answer
agreeSub = vowel.test[pred.rf == pred.gbm,]
pred.comb = predict(rf, agreeSub)
comb_accuracy = sum(pred.comb == agreeSub$y) / length(pred.comb)
rf_accuracy * (1-gbm_accuracy) + gbm_accuracy * (1 - rf_accuracy) + rf_accuracy * gbm_accuracy
# Another solution
combinedTestData <- data.frame(rf.pred=pred.rf,
gbm.pred=pred.gbm,
y=vowel.test$y)
sum()
# Q2 ----------
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
|
2600b2686db23ad698be99dab4eaecbbf5ecb358
|
9f3b8cd37ec1e63c51bfe1c2a5c1b5a955a10e34
|
/Yidi_Wang_HM3.R
|
4bd2f93d9b44834856d23944bd77d3d13b7de697
|
[] |
no_license
|
IndyNYU/R-in-Finance
|
3fd3cbd58bd9d4d80f6472bc6bdd1afeddc874f5
|
fa1108909a3e555a2ae756af4fa47b956013ca26
|
refs/heads/master
| 2020-04-01T18:35:04.714059
| 2018-10-17T18:16:39
| 2018-10-17T18:16:39
| 153,501,789
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,402
|
r
|
Yidi_Wang_HM3.R
|
# FRE 6871 Homework 3 By Yidi Wang
# 4/18/2018
rm(list=ls())
setwd("D:/R")
# 1. For this problem we will be working with the data set 'cars' from the openintro package.
install.packages("openintro")
library(openintro)
# 1.1 Descriptive statistics.
# Load and explore the data set 'cars'.
data("cars")
summary(cars)
structure(cars)
View(cars)
class(cars)
# Create a dataframe named carSub that only includes the columns price, mpgCity and weight.
carsSub <- cars[c("price", "mpgCity", "weight")]
View(carsSub)
# Write a function that takes in a numeric vector and returns the descriptive statistics.
mystats <- function(x, na.omit = F) {
if (na.omit)
x <- x[!is.na(x)]
m <- mean(x)
s <- sd(x)
minimum <- min(x)
maximum <- max(x)
n_unique <- length(unique(x))
return(c(m = m, s = s, minimum=minimum, maximum=maximum,n_unique=n_unique))
}
# Using a function from the apply() family, run your function on carSub.
sapply(carsSub, mystats)
# Use aggregate() with carsSub to apply the mean function to each level of the variables drivetrain and type.
aggregate(carsSub, by=list(level=cars$driveTrain), mean)
# Use aggregate() with carsSub to apply the mean function to each combination of levels of the variables driventrain and type.
aggregate(carsSub, by=list(level1=cars$driveTrain, level2=cars$type), mean)
# 1.2 Frequency & contingency tables, associated chi-square tests.
# Create a one-way frequency table with categorical variable drivetrain.
mytable <- table(cars$driveTrain)
mytable
# Create a two way table with categorical variables drivetrain and type.
mytable2 <- table(cars$driveTrain, cars$type)
mytable2
# Similar variables to the last bullet, create a marginal frequency table with.
# values being the percent of the row and a bottom row which is the sum of each column.
addmargins(prop.table(mytable2))
# Use a chi-squared test to check if the two categorical variables are independent.
chisq.test(mytable2)
# The p-value is 0.03468 is larger than 0.01 and i couldn't reject the hypothesis.
# So they are actually independent.
# 1.3 Correlation and covariance.
# Produce the Covariance Matrix for carsSub.
cov(carsSub)
# Produce the Pearson Correlation Matrix for carsSub.
cor(carsSub, method = 'pearson')
# Using cor.test(), test the significance of the correlation between the price and weight variables.
cor.test(cars$price, cars$weight)
# So there exists correlation between the price and the weight.
# 1.4 t-tests.
# Create a dataframe named carsFR that is the subset of cars that only includes rows where driveTrain is either 'front' or 'rear'.
carsFR <- subset(cars, driveTrain=='front' | driveTrain=='rear')
View(carsFR)
# Using an independent t-test, determine whether there is a statistically significant difference between the mean mpgCity in the two groups.
t.test(carsFR$mpgCity, carsFR$driveTrain)
# 1.5 Nonparametric Statistics.
# Use the Wilcox rank sum test to test the same question above, to assess whether the Mile Per Gallon in each category are sampled from the same population.
wilcox.test(cars$mpgCity, cars$driveTrain)
View(cars)
# 2. For this problem we will continue working with the data set 'cars'.
# 2.1 Simple linear regression.
# Using simple linear regression, lm(), predict the mpgCity from weight.
myfit <- lm(mpgCity~weight, data=cars)
# 2.2 Polynomial Regression.
# Fit a polynomial regression to the same data.
fit2 <- lm(mpgCity~weight + I(weight^2), data=cars)
# Explain the coefficients and p-values.
summary(myfit)
summary(fit2)
# Both regression coefficients are significant at the p < 0.0001 level.
# The R-squared values increase. The second model is better.
# 2.3 Scatterplot from the car library.
# Using scatterplot() from the car package, compare the simple linear and Loess curve plots pictorially.
library(car)
scatterplot(mpgCity~weight, data=cars)
scatterplot(mpgCity~weight+I(weight^2), data=cars)
# Confirm your conclusion from the R-squared values visually.
# 2.4 Multiple linear regression.
# Create a dataframe named carsSub that only includes the columns price, mpgCity and weight.
data(cars)
carsSub <- cars[c("price","mpgCity","weight")]
View(carsSub)
# look at the relationships between these variables using scatterplot() from the car package.
cor(carsSub)
library(car)
scatterplotMatrix(carsSub)
# Tell us 3 things you notice from the plots.
# 1. The price and mpgCity is negatively related.
# 2. The price and weight is positively related.
# 3. The mpgCity and weight is negatively related.
# Fit the multiple regression using lm() modeling mpgCity as a function of price and weight.
multifit <- lm(mpgCity~price + weight, data=carsSub)
# From the modeling results, what conclusions can you draw from the p-values.
summary(multifit)
# The p-value of price is a little large.
# Use confint(), produce the 95% confidence intervals for the coefficients of each variable.
confint(multifit)
# Using plot(fit).
plot(multifit)
# Based on these plots, I think, the four assumptions are satisfied.
# Using newer methods.
# Use the residplot() function to see if the errors follow a normal distribution.
residplot <- function(fit, nbreaks = 10) {
z <- rstudent(fit)
hist(z, breaks = nbreaks, freq = F)
rug(jitter(z), col = "brown")
curve(dnorm(x, mean=mean(z), sd=sd(z)),
add=T, col="blue", lwd=2)
lines(density(z)$x, density(z)$y,
col="red", lwd=2, lty=2)
}
residplot(multifit)
# Use the crPlots() from the library 'car' to asses if we have met the linearity assumption.
library(car)
crPlots(multifit)
# Use the ncvTest() from the library 'car' to test if we have met the constant variance assumptions.
ncvTest(multifit)
# Use influencePlot() from the car package, determine which points are outliers.
outlierTest(multifit)
# Selecting the best regression model.
library(MASS)
stepAIC(multifit, direction = "backward")
# 3. For this problem we will be working with the data set "PlantGrwoth".
data("PlantGrowth")
# 3.1 One Way ANOVA.
# Load and explore the data set PlantGrowth.
str(PlantGrowth)
summary(PlantGrowth)
View(PlantGrowth)
# Create a talbe() of the group column to confirm a balanced design.
table(PlantGrowth$group)
# Use aggregate to find the group means.
aggregate(PlantGrowth$weight, by=list(PlantGrowth$group), FUN=mean)
# Use aggregate to find the group standard deviations.
aggregate(PlantGrowth$weight, by=list(PlantGrowth$group), FUN=sd)
# Use ANOVA test for group differences.
fit <- aov(PlantGrowth$weight~PlantGrowth$group)
summary(fit)
# Use TukeyHSD() to answer this.
TukeyHSD(fit)
# From the multicomp package, use glht() combined plot(cld()) to see this result.
library(multcomp)
tuk <- glht(fit)
plot(cld(tuk))
# Use bartlett.test() to determine if the variances in the groups differ significantly/
libray(car)
bartlett.test(PlantGrowth$weight~PlantGrowth$group)
qqPlot(lm(PlantGrowth$weight~PlantGrowth$group))
# Use outliers Test() from the car package.
outlierTest(fit)
|
495bbce6a1d6ec68686ee02f9256b72590e61ef3
|
9d7f9350bc17fd00e590ddd5053addb4090b1993
|
/man/RGA-package.Rd
|
a8fdf5efa04f2540647be50d010fc001d5e286d9
|
[] |
no_license
|
selesnow/RGA
|
14359e1d90ec7fbe7b91d9f4420926b820ba1536
|
c0f91a102ef50c57289ac5fb9bda1ef5bc95d920
|
refs/heads/master
| 2019-07-12T01:28:37.211872
| 2016-08-23T05:21:36
| 2016-08-23T05:21:36
| 105,628,870
| 2
| 2
| null | 2017-10-03T08:19:09
| 2017-10-03T08:19:09
| null |
UTF-8
|
R
| false
| true
| 3,377
|
rd
|
RGA-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{RGA-package}
\alias{RGA}
\alias{RGA-package}
\alias{rga}
\title{A Google Analytics API client for R}
\description{
A package for extracting data from Google Analytics API into R.
}
\section{Key features}{
\itemize{
\item Support for \href{https://developers.google.com/accounts/docs/OAuth2}{OAuth 2.0 authorization};
\item Access to the following \href{https://developers.google.com/analytics/devguides/platform/}{Google Analytics APIs}:
\itemize{
\item \href{https://developers.google.com/analytics/devguides/config/mgmt/v3}{Management API}: access to configuration data for accounts, web properties, views (profiles), goals and segments;
\item \href{https://developers.google.com/analytics/devguides/reporting/core/v3}{Core Reporting API}: query for dimensions and metrics to produce customized reports;
\item \href{https://developers.google.com/analytics/devguides/reporting/mcf/v3}{Multi-Channel Funnels Reporting API}: query the traffic source paths that lead to a user's goal conversion;
\item \href{https://developers.google.com/analytics/devguides/reporting/realtime/v3}{Real Time Reporting API}: report on activity occurring on your property at the moment;
\item \href{https://developers.google.com/analytics/devguides/reporting/metadata/v3}{Metadata API}: access the list of API dimensions and metrics and their attributes;
}
\item Access to all the accounts which the user has access to;
\item API responses is converted directly into R as a \code{data.frame};
\item Auto-pagination to return more than 10,000 rows of the results by combining multiple data requests.
}
To report a bug please type: \code{utils::bug.report(package = "RGA")}.
}
\section{Usage}{
Once you have the package loaded, there are 3 steps you need to use to get data from Google Analytics:
\enumerate{
\item Authorize this package to access your Google Analytics data with the \code{\link{authorize}} function;
\item Determine the profile ID which you want to get access to with the \code{\link{list_profiles}} function;
\item Get the results from the API with one of these functions: \code{\link{get_ga}}, \code{\link{get_mcf}} or \code{\link{get_realtime}}.
}
For details about this steps please type into R: \code{browseVignettes(package = "RGA")}
}
\section{Bug reports}{
Before posting a bug please try execute your code with the \code{\link[httr]{with_verbose}} wrapper. It will be useful if you attach verbose output to the bug report. For example: \code{httr::with_verbose(list_profiles())}
Post the \code{traceback()} output also may be helpful.
To report a bug please type into R: \code{utils::bug.report(package = "RGA")}
}
\examples{
\dontrun{
# load package
library(RGA)
# get access token
authorize()
# get a GA profiles
ga_profiles <- list_profiles()
# choose the profile ID by site URL
id <- ga_profiles[grep("http://example.com", ga_profiles$website.url), "id"]
# get date when GA tracking began
first.date <- firstdate(id)
# get GA report data
ga_data <- get_ga(id, start.date = first.date, end.date = "today",
metrics = "ga:users,ga:sessions",
dimensions = "ga:userGender,ga:userAgeBracket")
}
}
\author{
Artem Klevtsov \email{a.a.klevtsov@gmail.com}
}
\keyword{package}
|
28fa2afe950c0d0254e991ffe373269daefccc35
|
0cf4e13f4575be71a3a8f6fb1be79bed29aa3881
|
/pkg/patchwork/man/patchwork.smoothing.Rd
|
fd30856803118a74c933ef131f587d644aef9cd2
|
[] |
no_license
|
Sebastian-D/patchwork
|
57f7281e4ec456538707de3587e80eab1af34269
|
2eecd882a2dff99c765c85333b6223fd2c85991d
|
refs/heads/master
| 2021-06-12T01:27:44.338367
| 2017-06-02T12:25:33
| 2017-06-02T12:25:33
| 4,831,657
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,581
|
rd
|
patchwork.smoothing.Rd
|
\name{patchwork.smoothing}
\alias{patchwork.smoothing}
\alias{smoothing}
\title{
patchwork smoothing function
}
\description{
Applies smoothing by averaging kb segments.
}
\usage{
patchwork.smoothing(data,normaldata=NULL,reference=NULL,chroms)
}
\arguments{
\item{data}{
data object generated after patchwork.GCNorm().
}
\item{normaldata}{
Default is NULL, generated and used if you have supplied normal.bam to patchwork.plot.
}
\item{reference}{
Default is NULL, used if you have supplied reference argument to patchwork.plot.
}
\item{chroms}{
chroms is an object generated from pile.alleles.RData's alf\$achr attribute.
}
}
%\details{
%Details
%}
%\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Markus Mayrhofer, \email{markus.mayrhofer@medsci.uu.se}
Sebastian DiLorenzo, \email{sebastian.dilorenzo@medsci.uu.se}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{patchwork.plot}}
}
%\examples{
%##---- Should be DIRECTLY executable !! ----
%##-- ==> Define data, use random,
%##-- or do help(data=index) for the standard data sets.
%Run examples of karyotype stuff. or not, maybe just point back to patchwork.plot.
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
90010235e8a9aa91057bc3fb1c8d6371dbcc5402
|
c924028cb279cc74dfaacc80f1fab2fb036d3035
|
/man/treebut.Rd
|
50ad895046b442bca167adf6165c275d0b4fe4b5
|
[] |
no_license
|
drmarcogir/recluster
|
b8b17f596916253f37e424308df8c4bcd8167fde
|
9eea0a13aa9a62207a0330c411bb596b1880e0f3
|
refs/heads/master
| 2022-11-23T11:37:53.767717
| 2020-08-05T18:17:45
| 2020-08-05T18:17:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
rd
|
treebut.Rd
|
\name{treebut}
\alias{treebut}
\docType{data}
\title{
Phylogenetic tree for the butterfly species included in dataisl dataset
}
\description{
This phylogenetic tree has been created based on known phylogeny of butterflies at family and subfamily level and on COI sequences at genus and species level. Branch lenghts have been calculated by Graphen method
}
\usage{
data(treemod)
}
\details{
A phylogenetic tree of butterfly species occurring on Western Mediterranean islands.
}
\references{
Dapporto L., Ramazzotti M., Fattorini S., Talavera G., Vila R., Dennis R.
"recluster: an unbiased clustering procedure for beta-diversity turnover"
Ecography (2013), 36:1070-1075.
}
\author{
Gerard Talavera and Roger Vila
}
\keyword{datasets}
|
eaac33108a87a6ef1d01423f604b6b04133bea37
|
fc878f0e959cedc4b65e6926089aa8c2dead547a
|
/server.R
|
b9abfdc22b53e8ebbb07638d80ac9338dc559953
|
[] |
no_license
|
thongle7/Info201-Final-Project
|
a9d3b6ecf91e3194c788461336d3b1b5c3374cc7
|
ed95792ae9ac48d877a32ecd4be6fd2ffe6c674e
|
refs/heads/master
| 2021-04-27T10:59:21.787424
| 2018-03-08T07:52:51
| 2018-03-08T07:52:51
| 122,550,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,911
|
r
|
server.R
|
#run necessary packages for map
library(shiny)
library(dplyr)
library(ggplot2)
library(ggmap)
library(leaflet)
# Read in the data for each year
data2013 <-
read.csv(file = "newdata/2013ms.csv", stringsAsFactors = FALSE)
data2014 <-
read.csv(file = "newdata/2014ms.csv", stringsAsFactors = FALSE)
data2015 <-
read.csv(file = "newdata/2015ms.csv", stringsAsFactors = FALSE)
data2016 <-
read.csv(file = "newdata/2016ms.csv", stringsAsFactors = FALSE)
data2017 <-
read.csv(file = "newdata/2017ms.csv", stringsAsFactors = FALSE)
all_data <- rbind(data2013, data2014, data2015, data2016, data2017)
my.server <- function(input, output) {
## Functions for incidents map
# select_year allows us to quickly select the year in the output section
select_year <- function(year) {
data <- eval(parse(text = paste("data", year, sep = "")))
return(data)
}
# filter_dead allows us to quickly choose the data from the dead amount slider
filter_dead <- function(data, input) {
data <- filter(data, X..Killed >= input[1]) %>%
filter(X..Killed <= input[2])
return(data)
}
#filter_injured allows us to quickly choose the data from the injured amount slider
filter_injured <- function(data, input) {
data <- filter(data, X..Injured >= input[1]) %>%
filter(X..Injured <= input[2])
return(data)
}
# gets information for the map concerning the specified year.
map_year <- function(data) {
data %>%
leaflet() %>%
addTiles() %>%
addMarkers(
popup = paste0(
"<b>Full address:</b> ",
data$fullad,
"<br/><b>",
"Incident Date: </b>",
data$Incident.Date,
"<br/><b>",
"Dead: </b>",
data$X..Killed,
"</br><b>",
"Injured: </b>",
data$X..Injured
),
clusterOptions = markerClusterOptions()
)
}
## incidents map
output$map <- renderLeaflet({
# if user selects all then the entire dataset is displayed. otherwise the specific year
# selected will have its data displayed
if (input$year == "All") {
data <- all_data
data <- filter_dead(data, input$Dead)
data <- filter_injured(data, input$Injured)
map_year(data)
} else{
data <- select_year(input$year)
data <- filter_dead(data, input$Dead)
data <- filter_injured(data, input$Injured)
map_year(data)
}
})
################################
# Histogram for deaths and injured in each state
output$histogram2 <- renderPlot({
# if else functions same way as map tab
if (input$year3 == "All") {
data <- all_data
data <- filter_dead(data, input$Dead3)
data <- filter_injured(data, input$Injured3)
# ggplot used to create histogram for each state and injury amounts
ggplot(
data,
aes(x = State, y = X..Injured),
group = State
,
fill = State,
environment = environment()
) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Amount of Injured in states", x = "State", y = "Injured in State")
}else{
data <- select_year(input$year3)
data <- filter_dead(data, input$Dead3)
data <- filter_injured(data, input$Injured3)
ggplot(
data,
aes(x = State, y = X..Injured),
group = State
,
fill = State,
environment = environment()
) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Amount of Injured in states", x = "State", y = "Injured in State")
}
})
output$histogram <- renderPlot({
if (input$year3 == "All") {
data <- all_data
data <- filter_dead(data, input$Dead3)
data <- filter_injured(data, input$Injured3)
# ggplot used to create histogram for each state and death amounts
ggplot(
data,
aes(x = State, y = X..Killed),
group = State
,
fill = State,
environment = environment()
) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Amount of deaths in states", x = "State", y = "Deaths in State")
} else{
data <- select_year(input$year3)
data <- filter_dead(data, input$Dead3)
data <- filter_injured(data, input$Injured3)
ggplot(
data,
aes(x = State, y = X..Killed),
group = State
,
fill = State,
environment = environment()
) +
geom_bar(stat = "identity") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Amount of deaths in states", x = "State", y = "Deaths in State")
}
})
# histogram for frequency of mass shootings in each state
output$histogram3 <- renderPlot({
# if else same as above tabs
if (input$year4 == "All") {
data <- all_data
data <- filter_dead(data, input$Dead4)
data <- filter_injured(data, input$Injured4)
#ggplot to display state vs frequency data.
ggplot(data, aes(x = State))+
geom_bar(aes(col=State)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Incidents Frequency", x = "State", y = "Frequency")
} else{
data <- select_year(input$year4)
data <- filter_dead(data, input$Dead4)
data <- filter_injured(data, input$Injured4)
ggplot(data, aes(x = State))+
geom_bar(aes(col=State)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Incidents Frequency", x = "State", y = "Frequency")
}
})
}
shinyServer(my.server)
|
40da7621cff47ed12ae9c92e87835e24772dd744
|
8847282f697f1bf9da75feabe758307b72712e54
|
/dataAnalysisCodes/watchERP_2stim/04-icon-detection/old/temp.R
|
df5bfd64404f267bc16c7050bb6bf445a2d0a150
|
[] |
no_license
|
adriencombaz/HybBciCode
|
a17b48a8a01fcf76e8c22d00732b8ceb1e642dce
|
755432a2a06c2abe2eb5adbca33d1348bcf9ac68
|
refs/heads/master
| 2020-05-19T17:49:50.102762
| 2013-10-02T13:54:34
| 2013-10-02T13:54:34
| 6,388,055
| 2
| 1
| null | 2016-01-20T10:42:13
| 2012-10-25T13:57:07
|
Matlab
|
UTF-8
|
R
| false
| false
| 5,982
|
r
|
temp.R
|
setwd("d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciCode/dataAnalysisCodes/watchERP_2stim/04-icon-detection/")
rm(list = ls())
library(ggplot2)
source("d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciCode/dataAnalysisCodes/deps/cleanPlot.R")
#################################################################################################################
#################################################################################################################
loadData <- function(file)
{
fileDir = "d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciProcessedData/watchERP_2stim/04-icon-detection"
fileext = ".txt"
filename = file.path( fileDir, paste0(file, fileext) )
figDir = "d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciResults/watchERP_2stim/04-icon-detection/"
figname = paste0(file, ".png")
accData <- read.csv(filename, header = TRUE, sep = ",", strip.white = TRUE)
str(accData)
summary(accData)
temp <- subset( accData, select = -c(correctness_ssvep, correctness))
temp$correctnessType <- "p3"
temp$correctness <- temp$correctness_p3
temp <- subset( temp, select = -correctness_p3 )
temp2 <- subset( accData, select = -c(correctness_p3, correctness))
temp2$correctnessType <- "ssvep"
temp2$correctness <- temp2$correctness_ssvep
temp2 <- subset( temp2, select = -correctness_ssvep )
temp3 <- subset( accData, select = -c(correctness_ssvep, correctness_p3))
temp3$correctnessType <- "symbol"
accData <- rbind(temp, temp2, temp3)
return(accData)
}
#################################################################################################################
#################################################################################################################
file = c("train1_test2345678", "train12_test345678", "train2_test345678", "train23_test45678", "train3_test45678","train34_test5678")
for (iF in 1:length(file))
{
temp <- loadData(file[iF])
temp$condition <- file[iF]
if (iF == 1) { accData <- temp }
else { accData <- rbind(accData, temp) }
}
figDir = "d:/KULeuven/PhD/Work/Hybrid-BCI/HybBciResults/watchERP_2stim/04-icon-detection/"
#################################################################################################################
#################################################################################################################
pp <- ggplot( accData, aes(nRep, correctness, colour=condition ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ correctnessType )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
#################################################################################################################
#################################################################################################################
pp <- ggplot( subset(accData, targetFrequency==15), aes(nRep, correctness, colour=condition ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ correctnessType )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData_15Hz.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
pp <- ggplot( subset(accData, targetFrequency==12), aes(nRep, correctness, colour=condition ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ correctnessType )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData_12Hz.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
#################################################################################################################
#################################################################################################################
accData$targetFrequency <- as.factor(accData$targetFrequency)
pp <- ggplot( subset(accData, correctnessType=="symbol"), aes(nRep, correctness, colour=targetFrequency ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ condition )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData_symbol.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
pp <- ggplot( subset(accData, correctnessType=="ssvep"), aes(nRep, correctness, colour=targetFrequency ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ condition )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData_ssvep.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
pp <- ggplot( subset(accData, correctnessType=="p3"), aes(nRep, correctness, colour=targetFrequency ) )
pp <- pp + stat_summary(fun.y = mean, geom="point", position = position_dodge(0.4), shape = 20, size = 3)
pp <- pp + stat_summary(fun.y = mean, geom="line", position = position_dodge(0.4))
pp <- pp + facet_grid( subject ~ condition )
pp <- pp + ylim(0, 1)
pp <- cleanPlot(pp)
ggsave( "allData_p3.png"
, plot = pp
, path = figDir
, width = 30
, height = 20
, units = "cm"
)
|
e01c31147b7f75bbccea98ae1fbe990082fd4082
|
2b4e13cf56d27be0e3e791a9c59d1c43665df452
|
/man/StochasticGrid.Rd
|
25b7c9c8b31c72816cc5dfd8cb6c73ee377639be
|
[] |
no_license
|
IanMadlenya/rcss
|
dc16f85956cc9f8c05881965225d12107c183a79
|
f6463a8a218c72ddbb773db14b956b82aab72d57
|
refs/heads/master
| 2020-05-24T18:07:57.996818
| 2017-01-27T22:12:16
| 2017-01-27T22:12:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,428
|
rd
|
StochasticGrid.Rd
|
\name{StochasticGrid}
\alias{StochasticGrid}
\title{Stochastic grid}
\description{Generate a grid using k-means clsutering.}
\usage{
StochasticGrid(start, disturb, n_grid, max_iter, warning)
}
\arguments{
\item{start}{Array representing the start. The first entry must be 1
and array [-1] represents the starting state.}
\item{disturb}{4 dimensional array containing the path disturbances.
Matrix [,,i,j] represents the disturbance at time i for sample path j.}
\item{n_grid}{Number of grid points in the stochastic grid.}
\item{max_iter}{Maximum iterations in the k-means clustering algorithm.}
\item{warning}{Boolean indicating whether messages from the k-means
clustering algorithm are to be displayed}
}
\value{
Matrix representing the stochastic matrix. Each row represents a
particular grid point. The first column contains only 1s.
}
\examples{
## Generate a stochastic matrix using an AR(2) process
start <- c(1, 0, 0)
n_dim <- length(start)
n_path <- 10
psi1 <- 0.3
psi2 <- 0.65
n_dec <- 21
path_disturb <- array(data = matrix(c(1, 0, 0,
0, 0, 1,
0, psi2, psi1), ncol = 3, byrow = TRUE),
dim = c(n_dim, n_dim, (n_dec - 1), n_path))
path_disturb[3,1,,] <- runif((n_dec - 1) * n_path, -1, 1)
n_grid <- 10
grid <- StochasticGrid(start, path_disturb, n_grid, 10, TRUE)
}
\author{Jeremy Yee}
|
2783f1570eeeccce47866b707ec762770e100897
|
ba6c7b1fad0a804596956468c71e4605f239fabd
|
/man/print.metamodel.Rd
|
6349829972f6888376a0e60606a8d647b91d9267
|
[] |
no_license
|
cran/dampack
|
4d39c2cffc5195b50bd34f76b1039c8ae8991f75
|
4d5c2e8329b415ee7620335a5c77d151b41c52cc
|
refs/heads/master
| 2023-05-07T02:06:21.051605
| 2021-05-30T20:40:03
| 2021-05-30T20:40:03
| 349,128,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
print.metamodel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metamodel.R
\name{print.metamodel}
\alias{print.metamodel}
\title{Print metamodel}
\usage{
\method{print}{metamodel}(x, ...)
}
\arguments{
\item{x}{metamodel to print}
\item{...}{further arguments to print}
}
\value{
None (invisible NULL)
}
\description{
Print metamodel
}
|
3549df282faa27014e18e00398b9c985c27a3e3f
|
70b210f63f8a1b1942037d8b707510e08abb740b
|
/in power bi/create chart.6.r
|
e5f10b1255dd2ed07f09e9bd6fdce8a41f283912
|
[] |
no_license
|
iemBS/r
|
3472cf3d12a57d4a4b6a943e268c2e45e4926a45
|
63de0cca36c3deda56d748608d3cecacfe14f040
|
refs/heads/master
| 2022-01-11T15:47:28.781455
| 2019-05-20T08:37:29
| 2019-05-20T08:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
create chart.6.r
|
dataset$color <- "Negative"
dataset$color[dataset$DaysToImplementation >= 0] <- "Positive"
chartTitle <- paste("Days to Implementation for",format(sum(dataset$OpportunityCount,na.rm=TRUE),big.mark=",",scientific=FALSE));
chartTitle <- paste(chartTitle,"opportunities");
library(ggplot2)
ggplot(dataset,aes(x=OpportunityCreateMonth,y=DaysToImplementation,size=OpportunityCount,colour=color)) +
geom_point(alpha=0.3) +
xlab("Opportunity Created Month") +
ylab("Days to Implementation") +
labs(size="Opportunity Count", colour="Positive & Negative Age", title = chartTitle) +
scale_colour_discrete(limits=c("Negative","Positive")) +
theme(axis.text.x = element_text(angle=60, hjust=1));
|
3e03e0c001038ec345483fc9555a073a4831421a
|
55f150f1b5283eeb0bf890c83dee93188c59b50d
|
/Shell_version/VennDiagram.R
|
21c70f1d76f2e1256cac6470df58f9c6f584756d
|
[] |
no_license
|
ignacio3437/PreBait
|
d0443168cce0fb33e7902fb11ab08849bd5b3b8e
|
e135ce9e58a456124f28987294076b0d863debbb
|
refs/heads/master
| 2021-01-22T13:30:35.058630
| 2018-11-28T01:26:59
| 2018-11-28T01:26:59
| 100,669,296
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 530
|
r
|
VennDiagram.R
|
library(VennDiagram)
library(RColorBrewer)
#display.brewer.all()
fast5<-read.csv(file="Fast5_genelist.txt",header=T)
nudis2<-read.csv(file="Nudis2_genelist.txt",header=T)
teas<-read.csv(file="Teas_genelist.txt",header=T)
geneLS<-c(fast5,nudis2,teas)
#Print last few entries
lapply(geneLS,tail)
#check names
names(geneLS)
#Make VennDiagram with colorbrewer pretty colors
venn.plot<-venn.diagram(geneLS,NULL,fill=brewer.pal(3,"Pastel2"))
grid.draw(venn.plot)
pdf("VennDiagramBaits.pdf")
plot<-grid.draw(venn.plot)
dev.off()
|
b0b017e50b465386aac53412f699d0e54ef6bf6e
|
57eb613a446a89e08918c18e4a2ef5b7904754a3
|
/man/parentNames-set.Rd
|
01e0cd9fc296f971aa035e46200bffe76e73d11d
|
[] |
no_license
|
kate-crosby/ProgenyArray
|
c0fec380460e1d21d16477a05d05361e88e59f70
|
6fde9526f0bcb953251a28473a7c042b40254211
|
refs/heads/master
| 2020-12-28T23:49:59.252767
| 2015-05-15T23:44:22
| 2015-05-15T23:44:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
rd
|
parentNames-set.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AllGenerics.R
\name{parentNames<-}
\alias{parentNames<-}
\title{Setter for parents sample names}
\usage{
parentNames(x) <- value
}
\description{
Setter for parents sample names
}
|
b1b2ea097af4c20c3de85a61c82f6f710777ad3d
|
e385a183d5b0fedf407192c6a74f166f8f57876c
|
/snippet1.R
|
60eb0e1f3c0a3a24853a72915734a58e8b2fbd5a
|
[] |
no_license
|
mikeydavison/bbasgdemo
|
c0c53fa238e26c8e449c9455603e58ceaa1b3956
|
187ff56e309fcb3f8052ef034d92fb4eec6b3771
|
refs/heads/master
| 2021-01-21T10:30:00.627563
| 2017-06-01T14:38:07
| 2017-06-01T14:38:07
| 91,693,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,197
|
r
|
snippet1.R
|
# Map 1-based optional input ports to variables
allstar <- maml.mapInputPort(1) # class: data.frame
batting <- maml.mapInputPort(2) # class: data.frame
# Contents of optional Zip port are in ./src/
# source("src/yourfile.R");
# load("src/yourData.rdata");
library(dplyr)
#merge players who played on multiple teams in a year into a single row
g = group_by(batting, playerID, yearID)
batting_grp = dplyr::summarise(g, G=sum(G), AB=sum(AB), R=sum(R), H=sum(H), X2B=sum(X2B), X3B=sum(X3B), HR=sum(HR), RBI=sum(RBI), SB=sum(SB), BB=sum(BB), SO=sum(SO), IBB=sum(IBB), HBP=sum(HBP), SH=sum(SH), SF=sum(SF))
#add batting stats
batting_grp = filter(batting_grp, AB>0) %>% mutate(BA = round(H/AB,3)) %>% mutate(PA = AB+BB+IBB+HBP+SH+SF) %>% mutate(OB = H + BB + IBB + HBP) %>% mutate (OBP = 0 + (AB > 0) * round(OB/PA, 3) )
batting_grp = mutate(batting_grp, TB = H + X2B + 2 * X3B + 3 * HR) %>% mutate(SLG = round(TB/AB, 3)) %>% mutate(OPS = SLG+OBP)
#merge batting and all star history
df = merge(batting_grp, allstar, by=c("playerID", "yearID"), all.x = TRUE)
#add indicator of whether player was all star last year#all star history
aspy = mutate(allstar, nextYear = yearID+1) %>% mutate(IsAllStarPrevYear=TRUE) %>% select(playerID, yearID=nextYear, IsAllStarPrevYear)
aspy[is.na(aspy)] = FALSE
df = merge(df, aspy, by=c("playerID", "yearID"), all.x = TRUE)
ind = is.na(df$IsAllStarPrevYear)
df$IsAllStarPrevYear[ind] = FALSE
df$IsAllStarPrevYear = as.factor(df$IsAllStarPrevYear)
df = mutate(df, IsAllStar = as.factor(!is.na(df$gameNum)))
#df$AllStarClass = "None"
#ind = df$IsAllStar==TRUE & df$position=="G_c"
#df$AllStarClass[ind] = "AS"
#ind = df$IsAllStar==TRUE & df$position=="G_1b"
#df$AllStarClass[ind] = "AS"
#ind = df$IsAllStar==TRUE & df$position=="G_2b"
#df$AllStarClass[ind] = "AS"
#ind = df$IsAllStar==TRUE & df$position=="G_3b"
#df$AllStarClass[ind] = "AS"
#ind = df$IsAllStar==TRUE & df$position=="G_ss"
#df$AllStarClass[ind] = "AS"
#ind = df$IsAllStar==TRUE & df$position=="G_of"
#df$AllStarClass[ind] = "AS"
#df$AllStarClass = as.factor(df$AllStarClass)
df$AllStarClass=as.factor(df$IsAllStar)
# Select data.frame to be sent to the output Dataset port
maml.mapOutputPort("df");
|
80fb0e5a760ba0328effbb2e2c1195e7b5461fd0
|
64cd66c599bcaf7f14a599627be7c7b04123ca97
|
/geneticdrift/geneticdrift.R
|
0baffa87d71ae487ee9af99abebce37dc8a5bf19
|
[] |
no_license
|
ldutoit/popgenshinyapps
|
724dd57a3fde5fc037b46bfe8188aefa6a570897
|
6d105b70771a5f94f6494c1e1436cf3fe021fdd7
|
refs/heads/master
| 2020-08-10T06:30:26.239389
| 2019-10-10T20:54:31
| 2019-10-10T20:54:31
| 214,283,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
geneticdrift.R
|
require(shiny)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Genetic drift"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
sliderInput(inputId = "N",
label = "Population size",
min = 2,
max = 10000,
value = 100)
),
# Main panel for displaying outputs ----
mainPanel(
h6("This is a simple model of genetic drift starting with 10 alleles at frequency = 0.5. Experience how the size of the population limits the loss of genetic diversity by limiting the effect of random sampling."),
# Output: Histogram ----
plotOutput(outputId = "distPlot")
)
)
)
server <- function(input, output) {
# This expression that generates a histogram is wrapped in a call
# to renderPlot to indicate that:
#
# 1. It is "reactive" and therefore should be automatically
# re-executed when inputs (input$bins) change
output$distPlot <- renderPlot({
N<-input$N
plot(1,0, type="n", xlim=c(1,100), ylim=c(0,1), xlab="Generations", ylab="frequency")
for (Nallele in 1:10){
alleles <- c(rep("A",N/2), rep("a",N/2))
traj<-rep(NA,100)
for(i in 1:100){
alleles <- sample(alleles, N, replace=TRUE)
traj[i]<-length(alleles[alleles=="A"])
}
lines(1:100,traj/N, pch=19, col=rainbow(10)[Nallele],cex=0.5,type="b")
}
})
}
shinyApp(ui = ui, server = server)
|
1867f02ca76b91295f5a71178ba6c391757a3f8c
|
0763807ee1864196bae6bf9b7b7b2de997c84bac
|
/tests/testthat.R
|
a9585d8d136ee40a9e9961134eb2784e71ead8a7
|
[
"MIT"
] |
permissive
|
federman/shinylego
|
56a84c2dedc131c96ba7e402bcb9a1ee62f0ceec
|
8bff199b91f5a5abbc038556c28434d293ee7b49
|
refs/heads/master
| 2021-10-28T15:20:39.107911
| 2021-09-29T20:58:04
| 2021-09-29T20:58:04
| 179,860,006
| 0
| 0
|
NOASSERTION
| 2019-04-06T16:44:08
| 2019-04-06T16:44:08
| null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(shinylego)
test_check("shinylego")
|
4af9fb7762f72eca80e1897e766a5166fd318be9
|
600b76919c94c119a6d58c8041650dc8b055addc
|
/R/configureHyperspark.R
|
f33091f12b6041d9d385327153133a8038adbb65
|
[] |
no_license
|
jarnos97/rPackageHypeRSpark
|
b2f3808056be259300f43739f61e771eb37fac58
|
2be8f15c38e2017aeb66e7913a88d54b7e87c3f4
|
refs/heads/main
| 2023-06-05T20:20:53.874175
| 2021-06-29T10:01:55
| 2021-06-29T10:01:55
| 356,249,442
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,506
|
r
|
configureHyperspark.R
|
#' @title Configure HyperSpark
#'
#' @description Lets users configure HyperSpark. The result is a written .scala
#' file, which is added to the HyperSpark source code.
#'
#' @param setProblem Set the problem to be solved.
#' @param data Set the data for the application.
#' @param setStoppingCondition Set the stopping condition for algorithms to
#' terminate.
#' @param stoppingValue Set the stopping value for the stopping condition.
#' @param setAlgorithms Set the algorithms to execute. Always include
#' parentheses, e.g. GAAlgorithm()
#
#'
#' @return NULL
#'
#' @examples hypeRSpark::configureHyperSpark(setProblem = 'PfsProblem',
#' data = 'inst_ta054.txt', setStoppingCondition = 'TimeExpired',
#' stoppingValue = 180000, setAlgorithms = "TSAlgorithm(maxTabooListSize=7)",
#' numOfAlgorithms = 34, setNDefaultInitialSeeds = 34,
#' setDeploymentMesos = '("spark://207.184.161.138", 7077)',
#' setNumberOfIterations = 6, setSeedingStrategy = 'SeedPlusSlidingWindow(10)')
#'
#' @export configureHyperSpark
#'
#' @importFrom rt3 NONE
# Function needs to accept an arbitrary number of arguments.
configureHyperSpark <- function(setProblem,
data,
setStoppingCondition,
stoppingValue,
setAlgorithms,
numOfAlgorithms = NONE,
setNumberOfIterations = NONE,
setInitialSeeds = NONE,
setNInitialSeeds= NONE,
setNDefaultInitialSeeds = NONE,
setSeedingStrategy = NONE,
setProperty = NONE,
setMapReduceHandler = NONE,
setRandomSeed = NONE,
setSparkMaster = NONE,
setAppName = NONE,
setNumberOfExecutors = NONE,
setNumberOfResultingRDDPartitions = NONE,
setDeploymentLocalNoParallelism = NONE,
setDeploymentLocalNumExecutors = NONE,
setDeploymentLocalMaxCores = NONE,
setDeploymentSpark = NONE,
setDeploymentMesos = NONE,
setDeploymentYarnClient = NONE,
setDeploymentYarnCluster = NONE){
# Define scala file
class_file <- "HyperSpark-master/src/main/scala/it/polimi/hyperh/apps/MainClass.scala"
# Create scala file and add constant code
initializeFile <- function(){
# Define constant code (which we always need)
constantCode <- 'package it.polimi.hyperh.apps
import it.polimi.hyperh.solution.EvaluatedSolution
import it.polimi.hyperh.spark.Framework
import it.polimi.hyperh.spark.FrameworkConf
import java.io._
'
# Create scala file with constant code
fileConn <- file(class_file) # File is overwritten if it already exists.
writeLines(constantCode, fileConn)
close(fileConn) # Close connection here, we cannot use this to append
} # end initializeFile
# Add additional imports based on user input
addImports <- function(){
# Define stopping condition import
stoppingImport <- sprintf("import it.polimi.hyperh.spark.%s", setStoppingCondition)
write(stoppingImport, class_file, append = T)
# Find the problem path
problemPath <- tolower(setProblem)
problemPath <- stringr::str_remove(problemPath, "problem")
# Define problem import
problemImport <- sprintf("import %sp.problem.%s", problemPath, setProblem)
write(problemImport, class_file, append = T)
# Define algorithm import(s) and write to file
for (alg in setAlgorithms){
alg <- strsplit(alg, "\\(")[[1]][1] # Extract algorithm
alg_import <- sprintf("import %sp.algorithms.%s", problemPath, alg)
write(alg_import, class_file, append = T)
}
if (setMapReduceHandler != NONE){
mp_import <- sprintf("import it.polimi.hyperh.spark.%s", setMapReduceHandler)
write(mp_import, class_file, append = T)
}
if (setSeedingStrategy != NONE){
ss <- strsplit(setSeedingStrategy, "\\(")[[1]][1] # Extract seeding strategy
if (ss == 'SameSeeds'){
ss_import <- sprintf("import it.polimi.hyperh.spark.%s", ss)
} else {
ss_import <- sprintf("import %sp.spark.%s", problemPath, ss)
}
write(ss_import, class_file, append = T)
}
} # end addImports
# Create MainClass object and define main function
createObject <- function(){
object <- '
object MainClass{
def main(args: Array[String]) {
'
write(object, class_file, append = T)
} # end createObject
# Define the necessary variables
defineVariables <- function(){
# Define problem
defProblem <- sprintf(' val problem = %s.fromResources(name="%s")',
setProblem, data) # fromResources is problem specific!
write(defProblem, class_file, append = T)
# Define algorithms
if (numOfAlgorithms != NONE){ # i.e. if a single algorithm is set
defAlgorithm <- sprintf(' val makeAlgo = new %s', setAlgorithms)
write(defAlgorithm, class_file, append = T)
defNumOfAlgorithms <- sprintf(' val numOfAlgorithms = %s', numOfAlgorithms)
write(defNumOfAlgorithms, class_file, append = T)
} else{
for (alg in setAlgorithms){
alg2 <- strsplit(alg, "\\(")[[1]][1] # Extract algorithm
defAlgorithm <- sprintf(' val alg%s = new %s', alg2, alg)
write(defAlgorithm, class_file, append = T)
}
}
} # end defineVariables
# helper function for configuration()
string_array <- function(algs){
text <- 'Array('
index <- 1
for (item in algs){
if (index == length(algs)){ # if it is the last item
text <- paste0(text, item, ')')
} else {
text <- paste0(text, item, ', ')
}
index <- index + 1
}
return(text)
}
# Create Framework configuration
configuration <- function(){
# Constant beginning
frameworkConf <- ' val conf = new FrameworkConf()'
problem <- ' .setProblem(problem)'
# Define algorithms
if (numOfAlgorithms != NONE){ # i.e. if a single algorithm is set
defAlgorithms <- ' .setNAlgorithms(makeAlgo, numOfAlgorithms)'
} else { # i.e. array of algorithms
algs <- NULL
for (alg in setAlgorithms){
alg <- strsplit(alg, "\\(")[[1]][1] # Extract algorithm
alg_code <- sprintf('alg%s', alg)
algs <- c(algs, alg_code)
}
# Convert algs to string array
algs <- string_array(algs)
defAlgorithms <- sprintf(' .setAlgorithms(%s)', algs) # Should be an array!
}
# Define stopping condition
defStoppingCondition <- sprintf(" .setStoppingCondition(new %s(%s))",
setStoppingCondition, stoppingValue)
# Write lines to file
for (line in c(frameworkConf, problem, defAlgorithms, defStoppingCondition)){
write(line, class_file, append = T)
}
# Set deployment parameter
deployment_names <- c('setDeploymentLocalNoParallelism',
'setDeploymentLocalNumExecutors',
'setDeploymentLocalMaxCores', 'setDeploymentSpark',
'setDeploymentMesos', 'setDeploymentYarnClient',
'setDeploymentYarnCluster')
deployment_params <- c(setDeploymentLocalNoParallelism,
setDeploymentLocalNumExecutors,
setDeploymentLocalMaxCores, setDeploymentSpark,
setDeploymentMesos, setDeploymentYarnClient,
setDeploymentYarnCluster)
index <- 1
for (deployment in deployment_params){
if (deployment != NONE){
if (deployment == T){ # if there is no parameter for the deployment mode
defDeployment <- sprintf(' .%s()', deployment_names[index])
} else { # if there is a parameter
defDeployment <- sprintf(' .%s%s', deployment_names[index],
deployment)
}
write(defDeployment, class_file, append = T)
break # Only one deployment mode should not be NONE, thus we can break
}
index <- index + 1
}
# Set the optional parameters. Except seeding strategy
param_names <- c('setNumberOfIterations', 'setInitialSeeds',
'setInitialSeeds', 'setNDefaultInitialSeeds',
'setProperty', 'setRandomSeed',
'setSparkMaster', 'setAppName', 'setNumberOfExecutors',
'setNumberOfResultingRDDPartitions')
optional_params <- c(setNumberOfIterations, setInitialSeeds,
setNInitialSeeds, setNDefaultInitialSeeds,
setProperty, setRandomSeed,
setSparkMaster, setAppName, setNumberOfExecutors,
setNumberOfResultingRDDPartitions)
# Write all defined parameters to file
index <- 1
for (param in optional_params){
if (param != NONE){
param_line <- sprintf(' .%s(%s)', param_names[index], param) # Assumes each parameter gets an argument, True?
write(param_line, class_file, append = T)
}
index <- index + 1
}
# Set seeding strategy
if (setSeedingStrategy != NONE){
s_str <- sprintf(' .setSeedingStrategy(new %s)', setSeedingStrategy)
write(s_str, class_file, append = T)
}
# Set MapReduce handler
if (setMapReduceHandler != NONE){
s_str <- sprintf(' .setMapReduceHandler(new %s())', setMapReduceHandler)
write(s_str, class_file, append = T)
}
} # End configuration
# Add constant ending
addEnding <- function(){
ending <- '
val solution: EvaluatedSolution = Framework.run(conf)
// Write solutions to file
val fw = new FileWriter("solution.txt.")
try {
fw.write(solution.toString)
}
finally fw.close()
println(solution)
}
}
'
# Write to file
write(ending, class_file, append = T)
} # end addEnding
# Execute functions
initializeFile()
addImports()
createObject()
defineVariables()
configuration()
addEnding()
}
|
3de3ab5db5be1a1df6ff901383ede66a76fcee66
|
16d6f9a925fb8ae78938baf67173afc7b4e3f94b
|
/tests/testthat/test-io-bedGraph.R
|
4a26c21be6a477fdd7c4c56d4eb118f69a33b1da
|
[] |
no_license
|
liupfskygre/plyranges
|
809851435ac1d9a60df8400b8c7c409862966418
|
c82b7eb8ec31478e0f8439207d20897f0c102a6f
|
refs/heads/master
| 2023-06-04T10:52:44.864177
| 2021-06-28T01:15:43
| 2021-06-28T01:15:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,814
|
r
|
test-io-bedGraph.R
|
context("reading/writing bedGraph files")
# tests adapted from rtracklayer
createCorrectGR <- function(seqinfo) {
ir <- shift(IRanges(end = seq(300, 2700, by = 300), width = 300), 59102000)
score <- seq(-1, 1, by = 0.25)
space <- factor(c(rep("chr19", 6), "chr17", rep("chr18", 2)),
seqlevels(seqinfo))
correct_gr <- GRanges(space, ir, score = score)
if (!any(is.na(genome(seqinfo))))
genome(correct_gr) <- unname(genome(seqinfo)[1])
seqinfo(correct_gr) <- seqinfo
correct_gr
}
test_that("read_bed_graph returns correct GRanges", {
test_path <- system.file("tests", package = "rtracklayer")
test_bg <- file.path(test_path, "test.bedGraph")
correct_gr <- createCorrectGR(Seqinfo(c("chr19", "chr17", "chr18")))
test_gr <- read_bed_graph(test_bg)
expect_identical(test_gr, correct_gr)
test_bg_file <- rtracklayer::BEDGraphFile(test_bg)
test_gr <- read_bed_graph(test_bg)
expect_identical(test_gr, correct_gr)
test_bg_con <- file(test_bg)
test_gr <- read_bed_graph(test_bg_con)
expect_identical(test_gr, correct_gr)
# check overlaps
correct_which <- filter_by_overlaps(correct_gr, correct_gr[3:4])
test_gr <- read_bed_graph(test_bg, overlap_ranges = correct_gr[3:4])
expect_identical(correct_which, test_gr)
# test genome_info
skip_if_not(
requireNamespace("BSgenome.Hsapiens.UCSC.hg19", quietly = TRUE),
message = "'BSgenome.Hsapiens.UCSC.hg19' must be installed to run tests"
)
library(BSgenome.Hsapiens.UCSC.hg19)
hg19_seqinfo <- SeqinfoForBSGenome("hg19")
correct_genome <- createCorrectGR(hg19_seqinfo)
test_gr <- read_bed_graph(test_bg, genome_info = "hg19")
expect_identical(correct_genome, test_gr)
hg19_gr <- get_genome_info(hg19_seqinfo)
test_gr <- read_bed_graph(test_bg, genome_info = hg19_gr)
expect_identical(correct_genome, test_gr)
})
test_that("writing bedGraph files works", {
correct_gr <- createCorrectGR(Seqinfo(c("chr19", "chr17", "chr18")))
test_bg_out <- file.path(tempdir(), "test.bedGraph")
on.exit(unlink(test_bg_out))
write_bed_graph(correct_gr, test_bg_out)
test_gr <- read_bed_graph(test_bg_out)
expect_identical(correct_gr, test_gr)
test_foo_out <- file.path(tempdir(), "test.foo")
write_bed_graph(correct_gr, test_foo_out)
on.exit(unlink(test_foo_out))
test_gr <- read_bed_graph(test_bg_out)
expect_identical(correct_gr, test_gr)
test_bg_out_file <- rtracklayer::BEDGraphFile(test_bg_out)
write_bed_graph(correct_gr, test_bg_out_file)
test_gr <- read_bed_graph(test_bg_out_file)
expect_identical(correct_gr, test_gr)
# gzipped output
test_bg_gz <- paste(test_bg_out, ".gz", sep = "")
on.exit(unlink(test_bg_gz))
write_bed_graph(correct_gr, test_bg_gz)
test_gr <- read_bed_graph(test_bg_gz)
expect_identical(correct_gr, test_gr)
})
|
03dd9eddd3ac9f056e440f96df9ecff04951e117
|
288bc69d98ed160a01b3c6af141b6c2797b88ec0
|
/cachematrix.R
|
324da01838d037fc58d8998bca20c70580b88fb3
|
[] |
no_license
|
pasterka/ProgrammingAssignment2
|
a862a30901377aba56e90164dffa276ed9a3b03c
|
fd8eba4748134867c0e30d28a3eaf471f18bfb01
|
refs/heads/master
| 2021-01-19T07:16:02.989317
| 2015-07-24T20:46:06
| 2015-07-24T20:46:06
| 39,655,189
| 0
| 0
| null | 2015-07-24T20:18:11
| 2015-07-24T20:18:11
| null |
UTF-8
|
R
| false
| false
| 848
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a matrix object
## and adds functions to display it and inverse it
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setinverse <- function(solve) s <<- solve
getinverse <- function() s
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the matrix returned by the function above.
## If the inverse has already been calculated, the inverse is returned from the cache
cacheSolve <- function(x, ...) {
s <- x$getinverse()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setinverse(s)
s
}
|
048c74740802b5ffc8e5699928d0248be49fc62e
|
c166cee1a5da825ada5eda6ab06a64d624c119d7
|
/Code/KableWon/make_kable_one.R
|
07d24747c0256549a462ea72a9b4faa3515d9a31
|
[] |
no_license
|
FreedomSoldier-Liam/Number-and-timing-of-BP-measurements
|
ccec31560eb860fc43190968886fc51091ba3499
|
a9181cf86dc0cbe5a2e9d5406764c7466a03f5ca
|
refs/heads/master
| 2021-03-25T04:13:37.668823
| 2019-05-22T22:26:10
| 2019-05-22T22:26:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,176
|
r
|
make_kable_one.R
|
make_kable_one <- function(tbl_one){
omit_indx=grep(tbl_one$strat$variable_name,names(tbl_one$meta_variables))
groups = map_chr(tbl_one$meta_variables[-omit_indx],~paste(.$group))
group_labels=unique(groups)
breaks = c(0)
nlevels=map_dbl(tbl_one$meta_variables[-omit_indx],~length(.$levels))
nlevels[nlevels<3]=0
for(i in 1:length(group_labels)){
breaks[i+1] = breaks[i] + sum(
map_dbl(tbl_one$meta_variable[-omit_indx],
~.$group==group_labels[i])*(1+nlevels)
)
}
kab = suppressWarnings(
knitr::kable(
tbl_one$table,format='html',align=rep('c', ncol(tbl_one$table)),
caption = tbl_one$caption, booktabs = TRUE,escape = FALSE)%>%
kable_styling(bootstrap_options=c("striped","hover"),full_width=T)%>%
column_spec(1, width = "6cm")%>%
add_indent(which(grepl(" ",rownames(tbl_one$table))))%>%
add_footnote(tbl_one$notes,notation='symbol',threeparttable=T)
)
# for(i in 1:length(group_labels)){
# kab %<>% group_rows(group_label = group_labels[i],
# start_row = breaks[i]+1,
# end_row = breaks[i+1])
# }
return(kab)
}
|
32cdbd92aca54fc63023ca129bcbf06c41fe9d9a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/biwavelet/examples/wtc.Rd.R
|
0333b91bb3a6a1de4e7b7f013e5e428bc2717902
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
wtc.Rd.R
|
library(biwavelet)
### Name: wtc
### Title: Compute wavelet coherence
### Aliases: wtc
### ** Examples
t1 <- cbind(1:100, rnorm(100))
t2 <- cbind(1:100, rnorm(100))
## Wavelet coherence
wtc.t1t2 <- wtc(t1, t2, nrands = 10)
## Plot wavelet coherence and phase difference (arrows)
## Make room to the right for the color bar
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 4, 5) + 0.1)
plot(wtc.t1t2, plot.cb = TRUE, plot.phase = TRUE)
|
2bc7b82118819b39af6a66890d0473c105516202
|
efc5c6096121095cadc37acd42e03fadde89eb06
|
/R/model_comparison/models/ardl_model.R
|
3297160e9e29ea8a889dfaf567424fc907ef2dc9
|
[] |
no_license
|
AlexAfanasev/bookdown_thesis
|
b04396739f2495dd60a5e5abfeccd916acaf2545
|
1cfe343618b5fca6e53a8c786cb6792589edc0c7
|
refs/heads/master
| 2023-06-03T11:00:36.998514
| 2021-06-17T16:44:46
| 2021-06-17T16:44:46
| 331,723,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
ardl_model.R
|
# SETUP
source(here::here("R", "pd_pomp.R"))
y <- read.csv(here::here("data", "final_dataset.csv"))
source(here::here("R", "covars.R"))
# QUESTION: WHICH MODEL TO CHOSE???
# MODEL 2: MODEL WITH AR & COVARIATES
model_2 <- pomp::pomp(
data = y[, c(1, 2)], times = "time", t0 = 0,
rinit = function(e_lpd_0, ...) {
return(c(e_lpd = e_lpd_0))
},
rprocess = pomp::discrete_time(
pomp::Csnippet(
"
e_lpd = (
beta_0
+ tanh(phi)*e_lpd
+ beta_1*mys
+ beta_2*ms
+ rnorm(0, exp(sigma_u))
);
"
),
delta.t = 1
),
dmeasure = rw_latent_lpd_dmeasure,
statenames = c("e_lpd"),
paramnames = c("sigma_u", "sigma_e", "e_lpd_0", "beta_0", "phi",
"beta_1", "beta_2"),
covar = pomp::covariate_table(covars, times = "time"),
covarnames = colnames(covars[, -1])
)
rm(covars, y)
theta <- c(
e_lpd_0 = 3.5, sigma_e = log(0.02), sigma_u = log(0.05), phi = atanh(0.95),
beta_0 = 0.12, beta_1 = 0.08, beta_2 = 0.02
)
res <- pomp::pmcmc(
model_2, Nmcmc = 500, Np = 2000,
proposal = pomp::mvn.diag.rw(
c(e_lpd_0 = 0.001, sigma_e = 0.015, sigma_u = 0.015, phi = 0.015,
beta_0 = 0.001, beta_1 = 0.001, beta_2 = 0.001,
beta_3 = 0.001, beta_4 = 0.001, beta_5 = 0.001)
),
params = theta,
dprior = function(sigma_u,
sigma_e,
phi,
e_lpd_0,
beta_0,
beta_1,
beta_2,
beta_3,
beta_4,
beta_5,
...,
log) {
p_sigma_u <- exp(sigma_e)
p_sigma_e <- exp(sigma_u)
p_phi <- (1 - tanh(phi) ^ 2)
lik <- p_sigma_u * p_sigma_e * p_phi
return(ifelse(log, log(lik), lik))
}
)
a <- replicate(
10, pomp::logLik(pomp::pfilter(model_2, Np = 2000, params = theta))
)
|
c89c856f0623ec58577fe8e0a39c06fbd287bbd7
|
acc80e88c26433639557f85c8b5f08cd3cdbe40a
|
/R/RunSomaticSignatures.R
|
f131f11574958b5c5bd20eaf417fcf7d2d4afc2a
|
[] |
no_license
|
WuyangFF95/SynSigRun
|
32cb430ee5ea30584908f2254be459afb8c9ca81
|
62655260a460ea120de1baa623d0987d45a5a6bc
|
refs/heads/master
| 2022-11-11T10:06:45.160948
| 2021-12-30T06:40:08
| 2021-12-30T06:40:08
| 222,591,376
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,957
|
r
|
RunSomaticSignatures.R
|
#' Install SomaticSignatures from Bioconductor,
#' also installing its dependent package, NMF.
#'
#' @keywords internal
InstallSomaticSignatures <- function(){
message("Installing SomaticSignatures from Bioconductor...\n")
if (!requireNamespace("BiocManager", quietly = TRUE))
utils::install.packages("BiocManager")
BiocManager::install("SomaticSignatures")
}
#' Run SomaticSignatures.NMF extraction and attribution on a spectra catalog file
#'
#' @param input.catalog File containing input spectra catalog.
#' Columns are samples (tumors), rows are mutation types.
#'
#' @param out.dir Directory that will be created for the output;
#' abort if it already exits. Log files will be in
#' \code{paste0(out.dir, "/tmp")}.
#'
#'
#' @param CPU.cores Number of CPUs to use in running
#' SomaticSignatures.NMF. For a server, 30 cores would be a good
#' choice; while for a PC, you may only choose 2-4 cores.
#' By default (CPU.cores = NULL), the CPU.cores would be equal
#' to \code{(parallel::detectCores())/2}, total number of CPUs
#' divided by 2.
#'
#' @param seedNumber Specify the pseudo-random seed number
#' used to run SomaticSignatures. Setting seed can make the
#' attribution of SomaticSignatures repeatable.
#' Default: 1.
#'
#' @param K.exact,K.range \code{K.exact} is the exact value for
#' the number of signatures active in spectra (K).
#' Specify \code{K.exact} if you know exactly how many signatures
#' are active in the \code{input.catalog}, which is the
#' \code{ICAMS}-formatted spectra file.
#'
#' \code{K.range} is A numeric vector \code{(K.min,K.max)}
#' of length 2 which tell SomaticSignatures.NMF to search the best
#' signature number active in spectra, K, in this range of Ks.
#' Specify \code{K.range} if you don't know how many signatures
#' are active in the \code{input.catalog}.
#'
#' WARNING: You must specify only one of \code{K.exact} or \code{K.range}!
#'
#' Default: NULL
#'
#' @param nrun.est.K Number of NMF runs for each possible number of signature.
#' This is used in the step to estimate the most plausible number
#' of signatures in input spectra catalog.
#'
#' @param nrun.extract number of NMF runs for extracting signatures and inferring
#' exposures.
#'
#' @param pConstant A small positive value (a.k.a. pseudocount)
#' to add to every entry in the \code{input.catalog}.
#' Specify a value ONLY if an "non-conformable arrays error"
#' is raised.
#'
#' @param save.diag Save object of class \code{MutationalSignatures} which
#' stores full results from multiple NMF decomposition runs into files below:
#' \itemize{
#' \item \code{assess.K.pdf} {RSS and explained variance at each K in \code{K.range}.
#' Used for manual selection of number of signatures (K).}
#' \item \code{assess.K.Rdata} {Full results for each K in \code{K.range}. Used for
#' diagnosing goodness of fit and stability.}
#' \item \code{extract.given.K.Rdata} {Full results when K is specified by \code{K.exact}
#' or selected by elbow-point method. Used for diagnosing accuracy of signature extraction.}
#' }
#'
#' Set to \code{TRUE} for diagnostic purposes, set to \code{FALSE} for cleaner
#' results.
#'
#' @param test.only If TRUE, only analyze the first 10 columns
#' read in from \code{input.catalog}.
#' Default: FALSE
#'
#' @param overwrite If TRUE, overwrite existing output.
#' Default: FALSE
#'
#' @return A list contains: \itemize{
#' \item $signature extracted signatures,
#' \item $exposure inferred exposures,
#' } of \code{SomaticSignatures.NMF}, invisibly.
#'
#' @details SomaticSignatures.NMF used approach in Hutchins et al. (2008)
#' to estimate \code{K}: it selects the first inflection point of
#' residual sum of squares (RSS) function by finding the smallest \code{K}
#' where the second derivate of RSS at its neighbouring \code{K}s have
#' opposite signs.
#'
#' This requires calculation of second derivative of residual sum
#' of squares (RSS) at >2 integers, and thus requires at least 3 \code{K}s
#' to be assessed.
#'
#' @references http://dx.doi.org/10.1093/bioinformatics/btn526
#'
#' @importFrom dplyr mutate filter select
#' @importFrom magrittr %>%
#' @importFrom tibble as_tibble tibble
#' @importFrom utils capture.output
#'
#' @export
RunSomaticSignatures <-
function(input.catalog,
out.dir,
CPU.cores = NULL,
seedNumber = 1,
K.exact = NULL,
K.range = NULL,
nrun.est.K = 30,
nrun.extract = 1,
pConstant = NULL,
save.diag = FALSE,
test.only = FALSE,
overwrite = FALSE) {
# Check whether ONLY ONE of K.exact or K.range is specified.
bool1 <- is.numeric(K.exact) & is.null(K.range)
bool2 <- is.null(K.exact) & is.numeric(K.range) & length(K.range) == 2
stopifnot(bool1 | bool2)
# Install SomaticSignatures, if not found in library
if ("SomaticSignatures" %in% rownames(utils::installed.packages()) == FALSE)
InstallSomaticSignatures()
# Set seed
set.seed(seedNumber)
seedInUse <- .Random.seed # Save the seed used so that we can restore the pseudorandom series
RNGInUse <- RNGkind() # Save the random number generator (RNG) used
# Read in spectra data from input.catalog file
# spectra: spectra data.frame in ICAMS format
spectra <- ICAMS::ReadCatalog(input.catalog,
strict = FALSE)
if (test.only) spectra <- spectra[ , 1:10]
# convSpectra: convert the ICAMS-formatted spectra catalog
# into a matrix which SomaticSignatures accepts:
# 1. Remove the catalog related attributes in convSpectra
# 2. Transpose the catalog
convSpectra <- spectra
class(convSpectra) <- "matrix"
attr(convSpectra,"catalog.type") <- NULL
attr(convSpectra,"region") <- NULL
dimnames(convSpectra) <- dimnames(spectra)
sample.number <- dim(spectra)[2]
# Add pConstant to convSpectra.
if(!is.null(pConstant)) convSpectra <- convSpectra + pConstant
# Create output directory
if (dir.exists(out.dir)) {
if (!overwrite) stop(out.dir, " already exits")
} else {
dir.create(out.dir, recursive = T)
}
# CPU.cores specifies number of CPU cores to use.
# If CPU.cores is not specified, CPU.cores will
# be equal to the minimum of 30 or (total cores)/2
if(is.null(CPU.cores)){
CPU.cores = min(30,(parallel::detectCores())/2)
} else {
stopifnot(is.numeric(CPU.cores))
}
# Before running NMF packge,
# Load it explicitly to prevent errors.
requireNamespace("NMF")
# Run NMF using ICAMS-formatted spectra catalog
# Determine the best number of signatures (K.best).
# If K.exact is provided, use K.exact as the K.best.
# If K.range is provided, determine K.best by doing raw extraction.
if(bool1){
K.best <- K.exact
print(paste0("Assuming there are ",K.best," signatures active in input spectra."))
}
if(bool2){
cat("\n==========================================\n")
cat("Choosing the 1st inflection point of RSS function as K.best.\n")
cat("\n==========================================\n")
# SomaticSignatures used approach in Hutchins et al.
# (2008) to estimate K.
# This requires calculation of second derivative of
# RSS at >2 integers, and thus requires at least 4 Ks
# to be assessed.
if(max(K.range) - min(K.range) < 3)
stop("To calculate second derivative, K.range should span at least 4 integers.\n")
assess <- SomaticSignatures::assessNumberSignatures(
convSpectra,
nSigs = seq.int(min(K.range),max(K.range)),
decomposition = SomaticSignatures::nmfDecomposition,
.options = paste0("p", CPU.cores),
seed = seedNumber,
nrun = nrun.est.K,
includeFit = save.diag)
if(save.diag){
message("===============================")
message("Saving diagnostic plots and full results for all K in K.range...")
message("===============================")
ggObj <- SomaticSignatures::plotNumberSignatures(assess)
grDevices::pdf(file = paste0(out.dir,"/assess.K.pdf"))
ggObj
grDevices::dev.off()
save(assess, file = paste0(out.dir,"/assess.K.Rdata"))
}
# Choose K.best as the smallest current.K
# which is an inflection point (changing sign of second derivative)
# of RSS value.
# For discrete function, we used forward formula to calculate
# first and second derivatives.
# Fetch the values of RSS
RSS <- tibble::as_tibble(assess[,c("NumberSignatures","RSS")])
# Derive the first derivative of RSS using numerical differentiation.
##
# See https://stackoverflow.com/questions/627055/compute-a-derivative-using-discrete-methods/637969#637969
# for more details.
RSS.deriv <- numeric(0)
for(current.K in seq.int(K.range[1],K.range[2])) {
RSS.K <- RSS %>% dplyr::filter(NumberSignatures == current.K) %>% dplyr::select(RSS) %>% as.numeric
if(current.K > K.range[1]) RSS.Kminus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K - 1) %>% dplyr::select(RSS) %>% as.numeric
if(current.K < K.range[2]) RSS.Kplus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K + 1) %>% dplyr::select(RSS) %>% as.numeric
if (current.K == K.range[1]) {
# For the smallest possible K specified by user,
# calculate 1st-derivative using forward difference operator
# with spacing equals to 1.
deriv.current.K <- RSS.Kplus1 - RSS.K
} else if (current.K == K.range[2]) {
# For the largest possible K,
# calculate 1st-derivative using backward difference operator.
deriv.current.K <- RSS.K - RSS.Kminus1
} else { # Calculate 1st-derivative using central difference
deriv.current.K <- (RSS.Kplus1 - RSS.Kminus1) / 2
}
deriv.current.K <- as.numeric(deriv.current.K)
names(deriv.current.K) <- as.character(current.K)
RSS.deriv <- c(RSS.deriv, deriv.current.K)
}
# Add 1st-derivative to tibble_df RSS.
RSS <- RSS %>% dplyr::mutate(RSS.deriv)
# Derive the second derivative of RSS using numerical differentiation
# of first derivative.
RSS.deriv.2 <- numeric(0)
for(current.K in seq.int(K.range[1],K.range[2])) {
RSS.deriv.K <- RSS %>% dplyr::filter(NumberSignatures == current.K) %>% dplyr::select(RSS.deriv) %>% as.numeric
if(current.K > K.range[1]) RSS.deriv.Kminus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K - 1) %>% dplyr::select(RSS.deriv) %>% as.numeric
if(current.K < K.range[2]) RSS.deriv.Kplus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K + 1) %>% dplyr::select(RSS.deriv) %>% as.numeric
if (current.K == K.range[1]) {
# For the smallest possible K specified by user,
# calculate 1st-derivative of RSS.deriv
# using forward difference operator
# with spacing equals to 1.
deriv.2.current.K <- RSS.deriv.Kplus1 - RSS.deriv.K
} else if (current.K == K.range[2]) {
# For the largest possible K,
# calculate 1st-derivative of RSS.deriv
# using backward difference operator.
deriv.2.current.K <- RSS.deriv.K - RSS.deriv.Kminus1
} else { # Calculate 1st-derivative of RSS.deriv using central difference
deriv.2.current.K <- (RSS.deriv.Kplus1 - RSS.deriv.Kminus1) / 2
}
deriv.2.current.K <- as.numeric(deriv.2.current.K)
names(deriv.2.current.K) <- as.character(current.K)
RSS.deriv.2 <- c(RSS.deriv.2, deriv.2.current.K)
}
# Add 2nd-derivative to tibble_df RSS.
RSS <- RSS %>% dplyr::mutate(RSS.deriv.2 = RSS.deriv.2)
# Print RSS to standard output
cat("\n==========================================\n")
print(RSS)
cat("\n==========================================\n")
# Select the minimum K where signs of 2nd-derivative of
# RSS at its neighboring Ks are opposite as the best K.
for(current.K in seq.int(K.range[1],K.range[2]))
{
deriv2.K <- RSS %>% dplyr::filter(NumberSignatures == current.K) %>% dplyr::select(RSS.deriv.2) %>% as.numeric
if(current.K > K.range[1])
deriv2.Kminus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K - 1) %>% dplyr::select(RSS.deriv.2) %>% as.numeric
if(current.K < K.range[2])
deriv2.Kplus1 <- RSS %>% dplyr::filter(NumberSignatures == current.K + 1) %>% dplyr::select(RSS.deriv.2) %>% as.numeric
# Choose the current.K if the second derivative at (current.K-1)
# and second derivative at (current.K+1) have opposite sign.
##
# If current.K == K.range[1], the comparison would be between
# 2nd-derivative at current.K and current.K+1.
##
# If current.K == K.range[2], the comparison would be between
# 2nd-derivative at current.K and current.K-1.
if(current.K == K.range[1]){
if(sign(deriv2.K) * sign(deriv2.Kplus1) == -1)
break
} else if(current.K == K.range[2]) {
if(sign(deriv2.K) * sign(deriv2.Kminus1) == -1)
break
} else {
if(sign(deriv2.Kminus1) * sign(deriv2.Kplus1) == -1)
break
}
}
K.best <- current.K
print(paste0("The best number of signatures is found.",
"It equals to: ",K.best))
}
# Signature extraction
res <- SomaticSignatures::identifySignatures(
convSpectra,
K.best,
SomaticSignatures::nmfDecomposition,
.options = paste0("p", CPU.cores),
seed = seedNumber,
nrun = nrun.extract,
includeFit = save.diag)
if(save.diag) {
message("===============================")
message("Saving final result for all K = K.exact...")
message("===============================")
save(res, file = paste0(out.dir,"/extract.given.K.Rdata"))
}
gc()
gc()
gc()
# un-normalized signature matrix
sigsRaw <- res@signatures
colnames(sigsRaw) <-
paste("SS.NMF",1:ncol(sigsRaw),sep=".")
extractedSignatures <- apply(sigsRaw,2,function(x) x/sum(x)) # normalize each signature's sum to 1
extractedSignatures <- ICAMS::as.catalog(extractedSignatures,
region = "unknown",
catalog.type = "counts.signature")
# Output extracted signatures in ICAMS format
ICAMS::WriteCatalog(extractedSignatures,
paste0(out.dir,"/extracted.signatures.csv"))
# Derive exposure count attribution results.
rawExposures <- t(res@samples)
rownames(rawExposures) <-
paste("SS.NMF",1:nrow(rawExposures),sep=".")
# normalize exposure matrix
exposureCounts <- apply(rawExposures,2,function(x) x/sum(x))
# Make exposureCounts real exposure counts.
for (sample in seq(1,ncol(exposureCounts))){
exposureCounts[,sample] <-
colSums(spectra)[sample] * exposureCounts[,sample]
}
# Write exposure counts in ICAMS and SynSig format.
SynSigGen::WriteExposure(exposureCounts,
paste0(out.dir,"/inferred.exposures.csv"))
# Save seeds and session information
# for better reproducibility
capture.output(sessionInfo(), file = paste0(out.dir,"/sessionInfo.txt")) # Save session info
write(x = seedInUse, file = paste0(out.dir,"/seedInUse.txt")) # Save seed in use to a text file
write(x = RNGInUse, file = paste0(out.dir,"/RNGInUse.txt")) # Save seed in use to a text file
# Return a list of signatures and exposures
invisible(list("signature" = extractedSignatures,
"exposure" = exposureCounts))
}
|
98cc9b546b28dd771ff6e99195e1cade9b4bff47
|
b457ede5c2d4d5065896c612a331b0988a297a30
|
/Chevalier_etal_MD962048/figures/makeFigDR3.R
|
77ddf6a62a399b9b1e37caf9cd0550a7dcc57f86
|
[] |
no_license
|
mchevalier2/Papers
|
6556e6ccd19bd71a11be2865477e436f5f3eb62c
|
73d24cda42f86fe1b9bf2ffebe031603260e45d4
|
refs/heads/master
| 2023-05-27T20:58:51.029066
| 2023-05-19T09:08:27
| 2023-05-19T09:08:27
| 187,044,389
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,932
|
r
|
makeFigDR3.R
|
## Figure 4: Comparison of the reconstruction with independent records
##
OUTPUT_FOLDER=getwd()
s <- readline(prompt=paste0("Where should the figure be saved?\nDefault is current workin directory (",OUTPUT_FOLDER,"): "))
if(s != '') OUTPUT_FOLDER <- s
pkg2install=c()
if (! ("rio" %in% rownames(installed.packages()))) pkg2install=c(pkg2install, 'rio')
makePlot <- TRUE
if (length(pkg2install) > 0){
s=''
while (! s %in% c('y', 'yes', 'Y', 'YES', 'n', 'no', 'N', 'NO')){
s <- readline(prompt=paste0("The following are required: ", paste(pkg2install, collapse=', '),". Do you want to install them? [yes/no] "))
}
if(s %in% c('y', 'yes', 'Y', 'YES')){
install.packages(pkg2install)
}else{
print("Script aborded.")
makePlot <- FALSE
}
}
if (makePlot) {
makeTransparent <- function(..., alpha=0.5) {
if(alpha>1) alpha=1
if(alpha<0) alpha=0
alpha = floor(255*alpha)
newColor = col2rgb(col=unlist(list(...)), alpha=FALSE)
.makeTransparent = function(col, alpha) {
rgb(red=col[1], green=col[2], blue=col[3], alpha=alpha, maxColorValue=255)
}
newColor = apply(newColor, 2, .makeTransparent, alpha=alpha)
return(newColor)
}
## Calculate the Gaussian density of Probability
## defined by xbar and sigma, at x
gauss=function(x, xbar, sigma){return(1/sqrt(2*pi*sigma**2)*exp(-(x-xbar)**2/sigma**2))}
## Apply the Gaussian smoothing kernel on dat, using sigma
## as a kernel width. xout defines the output axis
gausmooth=function(dat, xout, sigma, interp=TRUE){
yout=rep(NA,length(xout))
for(i in 1:length(xout)){
if((xout[i] >= min(dat[,1]) & xout[i] <= max(dat[,1])) | interp){
yout[i]=sum(dat[,2]*gauss(dat[,1], xout[i], sigma))/sum(gauss(dat[,1], xout[i], sigma))
}
}
return(yout)
}
cat(">>> Loading data.\n")
MAT=rio::import('https://github.com/mchevalier2/ClimateReconstructions/raw/master/MD96-2048_MAT_01.xlsx', which=2)[1:181,]
CO2=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=7)[62:1883,c(2,3)]
SSTs=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=5)[1:306, c(23, 24)]
LR04=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=10)[1:701,c(1,2)]
DomeC=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=6)[1:5784,c(1,2)]
MALAWI=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=8)[1:295,c(1,3)]
LEAFWAX=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=9)[1:177,c(1,10)]
LEAFWAX.detrended=cbind(LEAFWAX[,1], LEAFWAX[,2] - LEAFWAX[,1]*coef(lm(LEAFWAX[,2]~ LEAFWAX[,1]))[2] - coef(lm(LEAFWAX[,2]~ LEAFWAX[,1]))[1])
COL='black'
COL2=rgb(217,95,2,maxColorValue=255)
pdf(paste0(OUTPUT_FOLDER, "/Chevalier_etal_MD962048_FigDR3.pdf"), width=7.48, height=8, useDingbats=FALSE) ; {
par(ps=7,bg=makeTransparent("white",alpha=0),mar=rep(0,4),cex=1,cex.main=1)
layout(matrix(1:6, ncol=2, byrow=TRUE), width=1, height=1)
plot.new() ; { ## SSTs
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'A', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=range(SSTs[,2])+diff(range(SSTs[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(SSTs, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(-3,3,1)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(-3,3,2)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(920, min(SSTs[,2])+diff(range(SSTs[,2]))/2, 'Mozambique Channel SSTs PC1', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
plot.new() ; { ## LEAFWAX.detrended
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'B', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=rev(range(LEAFWAX.detrended[,2]))-diff(range(LEAFWAX.detrended[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(LEAFWAX.detrended, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(-0.04,0.05,0.01)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(-0.04,0.05,0.02)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(930, min(LEAFWAX.detrended[,2])+diff(range(LEAFWAX.detrended[,2]))/2, 'Ratio of long-chain n-alkanes\nC31/(C29+C31) [detrended]', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
plot.new() ; { ## MALAWI
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'C', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=range(MALAWI[,2])+diff(range(MALAWI[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MALAWI, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(17,27,1)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(17,27,2)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(920, min(MALAWI[,2])+diff(range(MALAWI[,2]))/2, 'Malawi lake surface temperature (°C)', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
plot.new() ; { ## DomeC
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'D', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=range(DomeC[,2])+diff(range(DomeC[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(DomeC, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(-10,5,2.5)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(-10,5,5)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(930, min(DomeC[,2])+diff(range(DomeC[,2]))/2, 'Dome C, Antarctica\nTemperature Reconstruction (°C)', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
plot.new() ; { ## CO2
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'E', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=range(CO2[,2])+diff(range(CO2[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(CO2, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(180,315,15)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(180,315,30)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(920, min(CO2[,2])+diff(range(CO2[,2]))/2, 'Dome C, Antarctica pCO2 (ppm)', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
plot.new() ; { ## LR04
plot.window(xlim=c(-100,900),ylim=range(MAT[,2])+diff(range(MAT[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(MAT[,2])+diff(range(MAT[,2]))/2, 'MD96-2048 Pollen-based\nMAT Reconstruction (°C)', adj=c(0.5,1), srt=90, col=COL, cex=10/7)
rect(-9,min(MAT[,2])-0.02*diff(range(MAT[,2])),809,max(MAT[,2])+0.02*diff(range(MAT[,2])),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(MAT[,2])-0.02*diff(range(MAT[,2])),i,min(MAT[,2])-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(MAT[,2])), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(MAT[,2])-0.04*diff(range(MAT[,2])), i, cex=1, adj=c(0.5, 1)) }
text(400, min(MAT[,2])-0.1*diff(range(MAT[,2])), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=10/7)
text(10, max(MAT[,2]), 'F', cex=2.5, font=2, adj=c(0,1))
}
plot.window(xlim=c(-100,900),ylim=rev(range(LR04[,2]))-diff(range(LR04[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(LR04, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(5,3.2,-0.25)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(5,3.2,-0.5)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(930, min(LR04[,2])+diff(range(LR04[,2]))/2, 'Global ice volume (LR04)\nd18Obenthic (permil VPDB)', adj=c(0.5,0), srt=90, col=COL2, cex=10/7)
}
}
dev.off() ; }
}
#-;
|
cb0c122f1d894eccb5c0f6d2001627c59fe3b1b7
|
78fbb9c9a9f1a90e2c958c16bc39410f9bd44fe9
|
/Code.R
|
b97d2de4c36a8ae930b8e8f4d08b8df00b8444bc
|
[] |
no_license
|
CaDiez/RepData_PeerAssessment2
|
3f2bb5dc1316f2b3d9fc03b8fe1430780744d9d4
|
f99be3c5de0dd82eb5462073447bd295e3e9d269
|
refs/heads/master
| 2021-01-25T10:15:06.248885
| 2015-05-22T01:48:15
| 2015-05-22T01:48:15
| 35,895,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,790
|
r
|
Code.R
|
##packsUsed<-c("R.utils", "data.table")
##sapply(packsUsed, require, character.only=TRUE, quietly=TRUE)
library ("data.table", quietly=TRUE)
library ("R.utils")
cache = TRUE
## Check and load data
if (!"StormData.csv.bz2" %in% dir("./Data Science Specialization/05 Reproducible Research/RepData_PeerAssessment2")) {
download.file("http://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2", destfile = "stormData.csv.bz2")
}
if(!file.exists("StormData.csv"))
bunzip2("stormData.csv.bz2", overwrite=T, remove=F)
stormData <- read.csv("stormData.csv", sep = ",")
## Review data structure
dim(stormData)
head(stormData, n=2)
#Preprocces fieldnames and structure for further analysis
old <- names(stormData)
new <- tolower(old)
setnames(stormData, old, new)
# Define multipliers for valid exponents
powerExp <- data.frame(c("", "0", "H", "K", "M", "B"), c(1, 1, 10^2, 10^3,10^6, 10^9))
colnames(powerExp) <- c("validexp", "PowerBy")
# Subset data retaining only records with valid exponent
stormDataExp <- subset(stormData, (cropdmgexp %in% powerExp$validexp) & (propdmgexp %in% powerExp$validexp))
# Get the final economics in crops & props by multiplying factor by value
colnames(powerExp) <- c("validexp", "propdmgPower")
stormDataExp <- merge(stormData, powerExp, by.x = "propdmgexp", by.y = "validexp")
stormDataExp$propdmg <- (stormDataExp$propdmg * stormDataExp$propdmgPower)
colnames(powerExp) <- c("validexp", "cropdmgPower")
stormDataExp <- merge(stormDataExp, powerExp, by.x = "cropdmgexp", by.y = "validexp")
stormDataExp$cropdmg <- (stormDataExp$cropdmg * stormDataExp$cropdmgPower)
##Create a function to summarize any column of the dataset
selectColumnMetric<-function(field)
{
##summarize the field & create a data frame with titles
tableColumn <- tapply(stormDataExp[,field],stormDataExp$evtype,sum)
tableColumnSumOrd <- data.frame(eventtype=names(tableColumn),
metric=as.numeric(tableColumn))
#order by column in descending fashion
tableColumnSumOrd <- tableColumnSumOrd[order(tableColumnSumOrd$metric,decreasing=TRUE),]
# Take only top 20 & return
tableColumnSumOrd <- tableColumnSumOrd[1:20,]
return(tableColumnSumOrd)
}
injuries<-selectColumnMetric("injuries")
fatalities<-selectColumnMetric("fatalities")
par(las=3, cex=0.3, mfrow=c(1,2))
barplot(fatalities$metric, names.arg=fatalities$eventtype, col=fatalities$eventtype,
ylab="Fatalities",
main="Top 20 Fatalities/Event Type")
title(xlab = "Event Type", line=11)
barplot(injuries$metric, names.arg=injuries$eventtype, col=injuries$eventtype,
ylab="Injuries",
main="Top 20 Injuries/Event Type")
title(xlab = "Event Type", line=11)
|
0f38a4435e628050dc457f680c9a014d1ea87757
|
3d52bb75ea458b44c7e2935f818a25117bc4370d
|
/chap3.r
|
8e3504c4ebd61dbb260fb026e0856cbf289774b1
|
[] |
no_license
|
omelhoro/r-nlp-baayen
|
1b068853125d9a39872cb400074b839308ed4a98
|
5c71cb96a2a9be715d66e5a14246d717611b4bb0
|
refs/heads/master
| 2020-04-02T12:39:56.776704
| 2016-06-02T18:50:55
| 2016-06-02T18:50:55
| 60,289,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
chap3.r
|
# TODO: Add comment
#
# Author: igor
###############################################################################
library("languageR")
library("MASS")
#task1
alice=tolower(alice)
wonderland=data.frame(word=alice[1:25920],chunk=cut(1:25920,breaks=40,labels=F))
for (word in c("alice","hare","very")){
wonderland$is_the_word=wonderland$word==word
temp_count=tapply(wonderland$is_the_word,wonderland$chunk,sum)
assign(paste(word,"count",sep="_"),temp_count)
assign(paste(word,"tab",sep="_"),xtabs(~temp_var))
}
#wonderland$low_word=tolower(wonderland$word)
#task2
par(mfrow=c(1,3))
plot(1:40,alice_count,type="h")
plot(1:40,very_count,type="h")
plot(1:40,hare_count,type="h")
#task3
par(mfrow=c(1,3))
plot(as.numeric(names(alice_tab)),alice_tab/sum(alice_tab),ylim=c(0,0.9),type="h")
plot(as.numeric(names(very_tab)),very_tab/sum(very_tab),type="h")
plot(as.numeric(names(hare_tab)),hare_tab/sum(hare_tab),type="h")
#task4
par(mfrow=c(1,2))
n=length(alice)/40
p=mean(alice_count/n)
lambda=p*n
nums_count=as.numeric(names(alice_tab))
plot(nums_count,dpois(nums_count,lambda), type="h",ylim=c(0,0.9))
plot(as.numeric(names(alice_tab)),alice_tab/sum(alice_tab),ylim=c(0,0.9),type="h")
#task5
quant_seq=seq(0.05,0.95,by=0.05)
pois_quant=qpois(quant_seq,lambda)
real_quant=quantile(alice_count,quant_seq)
plot(pois_quant,real_quant)
#task6
pois10=ppois(10,lambda)
quantile(alice_count, pois10)
|
5486e965d94c250cb71e7903b586fc3ba75606d3
|
ab859104148d76008b7263fcbe640e1627bdf17e
|
/man/read_sas_count.Rd
|
14dfe1e5e9560e0846189de6a138777125340d22
|
[
"MIT"
] |
permissive
|
arnaud-feldmann/RchunkNcount
|
0bd753f93716f104dca524a237df27b5eebf92ee
|
bbe2173a2bf45920062660941c490a1b2a590ddd
|
refs/heads/master
| 2023-09-01T05:50:11.862343
| 2021-10-19T08:54:45
| 2021-10-19T08:54:45
| 418,579,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,144
|
rd
|
read_sas_count.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_sas_count.R
\name{read_sas_count}
\alias{read_sas_count}
\title{Reads a sas file by chunks and summarize everything}
\usage{
read_sas_count(
data_file,
col_select,
...,
row_filter = NULL,
chunk_size = 10000,
name = "n",
weight = NULL
)
}
\arguments{
\item{data_file}{Path to data and catalog files}
\item{col_select}{the selected columns}
\item{\dots}{some mutate to be done}
\item{row_filter}{the filtering expression}
\item{chunk_size}{the size of the chunks}
\item{name}{the name of the columns for counts}
\item{weight}{a column to be taken as weights for the counts}
}
\value{
a tibble
}
\description{
`read_sas_count` allows you to wrap `read_sas` in order to make the usual
dplyr counts without exploding the ram.
}
\details{
Everything is summarized, grouped by the output columns, and
counted into the variable of name `name` with a weight equal to `weight`.
You can add or modify the columns with the `...` as you would into a
`mutate`, and you can filter the rows with a the argument `row_filter`
(as you would with `filter`).
}
|
9c17c9958205e6dab329b551f775f5e7b55cbf7c
|
83353b29f5577f575ff982738c4c957ddc2c2a40
|
/edsats_limpeza.R
|
b1466b281d24b43674211be095a3aff3f8cf31f9
|
[] |
no_license
|
Letisouza/Projeto_tcc_PUC_camila
|
fb91d90cdae699bb8e802250106330997e0bb156
|
8b66ff214ef455b59c2759f530865f89e0c26bf8
|
refs/heads/master
| 2023-04-11T10:14:16.825627
| 2021-04-26T00:05:42
| 2021-04-26T00:05:42
| 361,169,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
edsats_limpeza.R
|
edstats <- read.csv("EdStatsData.csv")
library(tidyverse)
glimpse(edstats)
skimr::skim(edstats)
# filtrando pelas variáveis indesejadas
edstats_filtrado <- edstats %>%
select(-c( "X2020", "X2025", "X2030", "X2035", "X2040", "X2045", "X2050", "X2055", "X2060", "X2065", "X2070", "X2075", "X2080", "X2085", "X2090", "X2095", "X2100", "X"))
# Mudando o formato
edstats_mod <- edstats_filtrado %>%
pivot_longer(
cols = starts_with("X"),
names_to = "year",
values_to = "value"
)
glimpse(edstats_mod)
# selecionando os indicadores
library(readxl)
Variaveis_edstats <- read_excel("Variaveis_edstats.xlsx") %>%
select(Código) %>%
as.vector()
Variaveis_edstats
edstats_mod <- edstats_mod %>%
filter(Indicator.Code %in% Variaveis_edstats$Código)
# tirando valores de países agregados
agregados <- c("WLD", "ARB", "EAP","EAS", "ECA","ECS", "EMU", "EUU", "HPC", "LCN", "LDC", "LIC", "LMC", "LMY", "MEA", "MIC", "MNA", "NAC", "OED", "SAS", "SSA", "SSF", "UMC", "HIC", "LAC")
edstats_mod <- edstats_mod %>%
filter(!Country.Code %in% agregados) %>%
mutate(year = substring(year, 2))
##########################################
# juntando bases pelo nome do país
banco_joined <- left_join(edstats_mod, pv2018_mod1,
by = c("ï..Country.Name" = "country",
"year" = "year"))
View(banco_joined)
|
6f29c88f104b0a8f7ba2411ec04f8f21af64d203
|
b0fef7d420719221d736c6fc22efcdfdc6859311
|
/plot3.R
|
65df7adf9866d51476d9bbba31dd8f66b08d7adf
|
[] |
no_license
|
ajibolafaki/Exploratory-Data-Analysis-Week-1-Project
|
d3cca69abe9a92a410860426f2cc3a5ffd2f0318
|
66f0982114efc622332f8dbcb9a7b35f1b1ac4f9
|
refs/heads/master
| 2021-04-27T00:04:45.928081
| 2018-03-04T02:14:56
| 2018-03-04T02:14:56
| 123,748,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
r
|
plot3.R
|
df <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE, dec=".")
sub_df <- df[df$Date %in% c("1/2/2007","2/2/2007"), ]
sub_df$date_time <- strptime(paste(sub_df$Date, sub_df$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#convert columns to numeric
df[c(3:7)] <- lapply(df[c(3:7)], function(x) {as.numeric(x)})
#plot3.png
png("plot3.png", width=480, height=480)
plot(sub_df$date_time, sub_df$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(sub_df$date_time, sub_df$Sub_metering_2, type="l", col="red")
lines(sub_df$date_time, sub_df$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
e60fdf45b5d763e5d6e501ed4cb37a5d57154038
|
a4c45052798f39e8def9221c85c100291fb890b6
|
/hwscript_week10.R
|
a1cb0c007ea09b09856aa8f452632a667034cf8f
|
[] |
no_license
|
JonesLabTAMU/Rscripts
|
38042023a4fdf321bf4eeed8d5f796f4cd50bd39
|
1852b21fec8c2fdbc97bbbff1d7038453937330f
|
refs/heads/master
| 2021-01-09T06:00:25.025685
| 2017-04-20T19:40:02
| 2017-04-20T19:40:02
| 80,870,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,524
|
r
|
hwscript_week10.R
|
# Analysis of gambusia data from O'Dea et al.
setwd("~/Rexamples/Week10")
gamb <- read.csv("gambusia_individual_offspring.csv")
#Offspring are nested within mothers
#Consequently, mother should be included as a random factor
#First take a look at the residuals
M1.lm <- lm(Indiv_Offspring_Size ~ Age*Size, data=gamb)
summary(M1.lm)
par(mfrow=c(2,2))
plot(M1.lm)
#One of the treatments has outliers on both ends
hist(residuals(M1.lm))
#Except for the outliers, everything looks normal
gamb$logOS <- log(gamb$Indiv_Offspring_Size)
M2.lm <- lm(logOS ~ Age*Size, data=gamb)
summary(M2.lm)
par(mfrow=c(2,2))
plot(M2.lm)
hist(residuals(M2.lm))
#The log transformation didn't help really, so stick with the untransformed data
#Does the female ID affect the values of offspring size? Do a boxplot to see.
E <- rstandard(M1.lm)
boxplot(E ~ Female_ID, data=gamb, axes=FALSE)
abline(0, 0); axis(2)
text(1:141, -2.5, levels(gamb$Female_ID), cex=0.5, srt=65)
#This plot also identifies some of the females whose broods contain outliers
#Run a generalized linear regression with the same variables as the lm
library(nlme)
M.gls <- gls(Indiv_Offspring_Size ~ Age*Size, data=gamb)
summary(M.gls)
#Fit a linear mixed model
M1.lme <- lme(Indiv_Offspring_Size ~ Age*Size, random = ~1 | Female_ID, method="REML", data=gamb)
anova(M.gls, M1.lme)
#The fit with the random term is WAAAAAY better
#Validate the model with this random structure
E2 <- resid(M1.lme, type="normalized")
F2 <- fitted(M1.lme)
op <- par(mfrow = c(2,2), mar=c(4,4,3,2))
MyYlab = "Residuals"
plot(x=F2, y=E2, xlab="Fitted values", ylab=MyYlab)
boxplot(E2 ~ Age, data=gamb, main="Age", ylab=MyYlab)
boxplot(E2 ~ Size, data=gamb, main="Size", ylab=MyYlab)
par(op)
#The model validation looks really good actually (but there are still a couple of outliers)
#Find the optimal fixed structure
summary(M1.lme)
#The first candidate to remove is the Age:Size interaction
M1.Full <- lme(Indiv_Offspring_Size ~ Age*Size, random = ~1 | Female_ID, method="ML", data=gamb)
M1.A <- lme(Indiv_Offspring_Size ~ Age+Size, random = ~1 | Female_ID, method="ML", data=gamb)
anova(M1.Full, M1.A)
#In M1.A, Age is also not significant, so drop that, too
M1.A1 <- lme(Indiv_Offspring_Size ~ Size, random = ~1 | Female_ID, method="ML", data=gamb)
anova(M1.A, M1.A1)
#The final model has just Size as a fixed effect and Female ID as a random effect
M1.Full_REML <- lme(Indiv_Offspring_Size ~ Size, random = ~1 | Female_ID, method="REML", data=gamb)
summary(M1.Full_REML)
#And... It's not significant.
#Model Validation
E2 <- resid(M1.Full_REML, type="normalized")
F2 <- fitted(M1.Full_REML)
op <- par(mfrow = c(2,2), mar=c(4,4,3,2))
MyYlab = "Residuals"
plot(x=F2, y=E2, xlab="Fitted values", ylab=MyYlab)
boxplot(E2 ~ Size, data=gamb, main="Size", ylab=MyYlab)
par(op)
#Produce a boxplot showing different means for the treatment categories
boxplot(Indiv_Offspring_Size ~ Age+Size, data=gamb, xlab="Treatment Combination", ylab="Offspring Size")
#All of this seems consistent with the notion that neither Age nor Size affects offspring size
library(lsmeans)
gamb_lsmeans <- lsmeans(M1.A, ~ Size*Age, data=gamb)
gamb_lsmeans
plot(gamb_lsmeans)
#If there's any trend at all, it's that the larger females (B) have smaller offspring than the smaller females (S).
#However, we cannot be confident in this result, because the p-value is 0.07 from our GLMM.
#Part two -- the effects of female size and age on number of offspring
setwd("~/Rexamples/Week12")
gamb2 <- read.csv("gambusia_female_means.csv")
gamb2 <- gamb2[1:140,] #remove missing row at the end
gamb2$Age <- factor(gamb2$Age)
gamb2$Size <- factor(gamb2$Size)
#What is the relationship between female size and number of offspring?
plot(Number_of_Offspring ~ Female_Size, data=gamb2)
M1.lm <- lm(Number_of_Offspring ~ Female_Size, data=gamb2)
plot(M1.lm)
#Perhaps there's some heterogeneity, with the variance increasing with female size
plot(Number_of_Offspring ~ Age, data=gamb2) # Variance looks larger with Old but maybe it's driven by size
#Linear model with Female Size and Age as the factors
M2.lm <- lm(Number_of_Offspring ~ Female_Size*Age, data=gamb2)
summary(M2.lm)
plot(M2.lm)
#This model might be better fit with a model that includes heterogeneity, so it calls for a GLS.
M.gls <- gls(Number_of_Offspring ~ Female_Size*Age, data=gamb2)
#Fit different variance structures based on Female_Size
vf1 <- varPower(form = ~Female_Size)
M.gls1 <- gls(Number_of_Offspring ~ Female_Size*Age,weights=vf1,data=gamb2)
vf2 <- varExp(form = ~Female_Size)
M.gls2 <- gls(Number_of_Offspring ~ Female_Size*Age,weights=vf2,data=gamb2)
vf3 <- varConstPower(form = ~Female_Size)
M.gls3 <- gls(Number_of_Offspring ~ Female_Size*Age,weights=vf3,data=gamb2)
anova(M.gls, M.gls1, M.gls2, M.gls3) #looks like the best is gls2, confirm with LRtest
anova(M.gls,M.gls2) #p < .0001
plot(M.gls2) #looks like the heterogeneity problem is solved
#Does it help to also model variance by level of Age
vf4 <- varComb(varIdent(form=~1|Age),varExp(form = ~Female_Size))
M.gls4 <- gls(Number_of_Offspring ~ Female_Size*Age,weights=vf4,data=gamb2)
anova(M.gls2,M.gls4) #p = 0.3528 so it doesn't help
#So M.gls2 is my model. Now look at the results
summary(M.gls2)
#The interaction is non-significant, so remove it
vf2 <- varExp(form = ~Female_Size)
M.gls5 <- gls(Number_of_Offspring ~ Female_Size + Age,weights=vf2, method="ML", data=gamb2)
M.gls6 <- gls(Number_of_Offspring ~ Female_Size * Age,weights=vf2, method="ML", data=gamb2)
M.gls7 <- gls(Number_of_Offspring ~ Female_Size,weights=vf2, method="ML", data=gamb2)
anova(M.gls6,M.gls5,M.gls7)
#Refit the final model with REML
M.gls8 <- gls(Number_of_Offspring ~ Female_Size,weights=vf2, method="REML", data=gamb2)
summary(M.gls8)
#At the end of the day, only female size predicts offspring number
plot(M.gls8)
boxplot(Number_of_Offspring ~ Size * Age, data=gamb2)
#However, their experimental design included Size as a categorical variable
#What happens if we do that?
#It looks like big females have a greater variance in offspring number.
#We can construct models with and without taking that into account
M1.cat <- gls(Number_of_Offspring ~ Size * Age, data=gamb2)
plot(M1.cat)
summary(M1.cat)
vf10 <- varIdent(form = ~1 | Size)
M2.cat <- gls(Number_of_Offspring ~ Size*Age, weights=vf10, data=gamb2)
summary(M2.cat)
anova(M1.cat, M2.cat)
plot(M2.cat)
#Modeling the residuals does fix the heterogeneity. Now the interaction is
#no longer significant, but age and size are both significant.
M3.cat <- gls(Number_of_Offspring ~ Size + Age, weights=vf10, method="ML", data=gamb2)
M4.cat <- gls(Number_of_Offspring ~ Size*Age, weights=vf10, method="ML", data=gamb2)
M5.cat <- gls(Number_of_Offspring ~ Size, weights=vf10, method="ML", data=gamb2)
anova(M4.cat,M3.cat,M5.cat)
summary(M5.cat)
#The best model includes only Size -- bigger females have more offspring
M6.cat <- gls(Number_of_Offspring ~ Size, weights=vf10, method="REML", data=gamb2)
summary(M6.cat)
#Size definitely has an effect -- there may be an interaction between size and age.
#The effect of age is not very convincing
#Look at the boxplots to see:
boxplot(Number_of_Offspring ~ Size + Age, data=gamb2)
boxplot(Number_of_Offspring ~ Age, data=gamb2)
boxplot(Number_of_Offspring ~ Size, data=gamb2)
|
94403a6906157afc40bfdfdc5c81b07a7e41405e
|
fef1aaea89907036cb1f524a98dfb1139af95447
|
/ecs132/final/ProbB.R
|
df8451ff3fa878f5b50e15ad1643c33228c3ed43
|
[] |
no_license
|
seanjedi/Old_Code
|
2998d3e67d3afed969b86ece682c690ffd6479b5
|
f0788faa73b001167e32f7091db5a3004c73f71a
|
refs/heads/master
| 2020-04-05T06:02:23.013771
| 2018-11-09T01:36:16
| 2018-11-09T01:36:16
| 156,622,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,686
|
r
|
ProbB.R
|
# wrap all problem B into one function
probB <- function()
{
data <- getTrainingAndTest(getData())
meanAnalyze(data$training, data$test)
print(regressionAnalyze(data$training, data$test))
print(logisticAnalyze(data$training, data$test))
principalAnalyze(data$training, data$test)
}
# reads the data in from the .txt file
getData <- function()
{
require(data.table)
#We use fread to read in the file "year Prediction MSD.txt" since it is significantly faster than read.csv
#Using fread however does put our data into a datatable instead of a data.frame, limiting us on what functions we can run on our data
fread("YearPredictionMSD.txt")
}
# get the training and test data sets from all data
getTrainingAndTest <- function(data)
{
#Now we need to split our data into two parts: training set, and test set
#We need training set to hold 2/3rds of the data, and test to hold the rest
set.seed(101) # just a random seed to ensure we get random data
sample <- sample.int(nrow(data), floor(2/3*nrow(data)), replace = F)
training <- data[sample, ]
test <- data[-sample, ]
list(training = training, test = test)
}
#COMPARISON OF MEANS
meanAnalyze <- function(training, test)
{
#We now need to split the training set into two parts
#one part with the year of the data to be before 1996, and then the rest
L <- (training$V1 < 1996) # a logical index vector
trainingSplitPre1996 <- training[L,] # all training data before 1996 (pre- autotune)
trainingSplitPost1996 <- training[!L,] # all training data after 1996 (post-autotune)
# we need to compare the value of v77 in both pre-1996 and post-1996 via means
trainingSplitMeanComparisonResults <- t.test(trainingSplitPre1996[,77], trainingSplitPost1996[,77])
print(trainingSplitMeanComparisonResults)
}
# find the mean square error for using ALL variables in the training set
regressionAnalyze <- function(training, test)
{
coeffs <- lm(V1 ~ . , training)$coefficients
computeAverageSquaredPredictionError(test, coeffs)
}
logisticAnalyze <- function(training, test)
{
# Fit logistic model
newFirstCol <- ifelse(training[,1] >= 1996, 1, 0)
g <- glm(newFirstCol ~ ., data=training[,-1], family=binomial)
# Now, we can find the predicted values, remembering that
# we're dealing with a logistic function
betasSum <- as.matrix(cbind(1,test[,2:ncol(test)])) %*% g$coefficients
predictions <- 1 / (1 + exp(-1 * betasSum))
# Find proportion of incorrectly predicted cases. More specifically,
# among all rows we predicted would have a year of at least 1996,
# find the proportion that are before 1996.
predictedClassifications <- (predictions > 0.5) # True/false values
mean(as.matrix(test)[predictedClassifications,1] < 1996)
}
principalAnalyze <- function(training, test)
{
require(ggplot2)
P <- prcomp(training[,-1])$rotation # principal components matrix
# get the components from the training matrix and test matrix
components <- as.matrix(training[,2:ncol(training)]) %*% P
testComponents <- as.matrix(test[,2:ncol(test)]) %*% P
# so the computeAvSqPrErr function works
testComponents <- cbind(test[,1], testComponents)
# establish first colum of data as the song years
data <- as.data.frame(components)
errors <- c() # to store errors as we process them
# this takes a really long time to run, because it is not optimized
# but I am unsure of how to optimize it any better. We have to do the lm 90 times
for(i in 1:ncol(components))
{
# find the mean square errors for each component
PredictorVariables <- paste("components[,", 1:i, "]", sep="")
Formula <- formula(paste("training$V1 ~ ", paste(PredictorVariables, collapse=" + ")))
coeffs <- lm(Formula, data)$coefficients
errors <- append(errors, computeAverageSquaredPredictionError(testComponents[,1:(i+1)], coeffs))
}
# now plot the results
plotData <- data.frame(components = 1:ncol(components), meanSquareErrors = errors)
plot <- ggplot(plotData, aes(x=components, y=meanSquareErrors)) + geom_line()
plot + labs(title="Principal Component Analysis", x = "Number of Principal Components", y = "Mean Squared Error")
}
# returns the mean squared error between the test set and a test set
computeAverageSquaredPredictionError <- function(test, coeffs)
{
# grab the predicted values using matrix multiplication
predictions <- as.matrix(cbind(1, test[,-1])) %*% coeffs
# Then compute average squared prediction error over all the test set.
meansSquared <- mean((predictions - test[,1])^2)
}
|
0b2c314990fc11bc5ac866d56be514aa2468cf0e
|
b5a1bd1654ad240813aec25a16a236b198558095
|
/Archive/paper/results/archive/fields.R
|
b134c611a6035e0469c9a84605bf8bad0717e6e2
|
[] |
no_license
|
trevor-harris/assimilation-cfr
|
af4d6125dff636f0edd338f0bd4fbbd57a176aca
|
60a311dccafc4d524136e6400a41118a4ec2ff58
|
refs/heads/master
| 2021-03-22T00:23:45.788770
| 2020-06-18T19:06:09
| 2020-06-18T19:06:09
| 118,714,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,261
|
r
|
fields.R
|
rm(list = ls())
gc()
years = 851:1848
ens = 100
library(extdepth)
library(ncdf4)
library(dplyr)
library(reshape2)
library(ggplot2)
library(OpenImageR)
library(future)
library(future.apply)
source("../research/assimilation-cfr/code/depth_tests.R")
source("../research/assimilation-cfr/code/depths.R")
source("../research/assimilation-cfr/code/simulation.R")
plan(multiprocess)
# prepare data
prep_prior = function(nc.prior) {
n.lon = nc.prior$dim$lon$len
n.lat = nc.prior$dim$lat$len
n.ens = nc.prior$dim$time2$len
# extract data from the ncdf4 objects
prior = ncvar_get(nc.prior, attributes(nc.prior$var)$names[1], start = c(1, 1, 1), count = c(-1, -1, -1))
# transpose for intuitive (to me) layout
prior = aperm(prior, c(2, 1, 3))
# remove lat means
# prior = vapply(1:n.ens, function(x) prior[,,x] - rowMeans(prior[,,x]), FUN.VALUE = matrix(0, nrow = n.lat, ncol = n.lon))
# normalize
lats = as.vector(nc.prior$dim$lat$vals)
latmat = matrix(rep(lats, n.lon), n.lat, n.lon)
latmat = sqrt(abs(cos(latmat*pi/180)))
prior = vapply(1:n.ens, function(x) prior[,,x]*latmat, FUN.VALUE = matrix(0, nrow = n.lat, ncol = n.lon))
return(prior)
}
prep_post = function(nc.post, t) {
n.lon = nc.post$dim$lon$len
n.lat = nc.post$dim$lat$len
n.ens = nc.post$dim$sub_ens$len
# extract data from the ncdf4 objects
ens = ncvar_get(nc.post, attributes(nc.post$var)$names[1], start = c(1, 1, t, 1), count = c(-1, -1, 1, -1))
# transpose for intuitive (to me) layout
ens = aperm(ens, c(2, 1, 3))
# remove lat means
# ens = vapply(1:n.ens, function(x) ens[,,x] - rowMeans(ens[,,x]), FUN.VALUE = matrix(0, nrow = n.lat, ncol = n.lon))
# normalize
lats = as.vector(nc.post$dim$lat$vals)
latmat = matrix(rep(lats, n.lon), n.lat, n.lon)
latmat = sqrt(abs(cos(latmat*pi/180)))
ens = vapply(1:n.ens, function(x) ens[,,x]*latmat, FUN.VALUE = matrix(0, nrow = n.lat, ncol = n.lon))
return(ens)
}
flatten = function(mat) {
matrix(mat, prod(dim(mat)[1:2]), dim(mat)[3])
}
# plots
remove_cr = function(cr, gmat, downsamp=1) {
lower = cr$lower
upper = cr$upper
out = rowMeans((gmat - lower)*(lower > gmat) + (gmat - upper)*(upper < gmat))
matrix(out, 96/downsamp, 144/downsamp)
}
field_plot <- function(field, nc, main = "", downsamp = 1, zlim = c(-max(abs(field)), max(abs(field)))) {
field = prior[,,1]
nc = nc.prior
downsamp = 1
lats = as.vector(nc$dim$lat$vals)[seq(1, 96, by=downsamp)]
lons = as.vector(nc$dim$lon$vals)[seq(1, 144, by=downsamp)]
dimnames(field) = list(lats, ifelse(lons >= 180, lons - 360, lons))
field.gg = melt(field)
colnames(field.gg) = c("lat", "lon", "Temp")
world = map_data("world")
world = world[world$long <= 178, ]
zlim = c(-max(abs(field)), max(abs(field)))
ggplot() +
geom_raster(data = field.gg, aes(x=lon, y=lat, fill=Temp), interpolate = TRUE) +
geom_polygon(data = world, aes(x=long, y=lat, group=group), fill = NA, color="black") +
coord_cartesian() +
scale_fill_gradient2(midpoint=0, low="blue", mid="white", high="red") +
# scale_fill_gradient2(midpoint=0, low="#4393c3", mid="white", high="#d6604d") +
# scale_fill_gradient2(midpoint=0, low="#2166ac", mid="white", high="#b2182b") +
theme_void() +
# ggtitle(main) +
theme(plot.title = element_text(hjust = 0.5))
}
##### Actual data
nc.post = nc_open('../research/assimilation-cfr/data/tas_ens_da_hydro_r.1000-2000_d.16-Feb-2018.nc')
nc.prior = nc_open('../research/assimilation-cfr/data/tas_prior_da_hydro_r.1000-2000_d.16-Feb-2018.nc')
prior_ind = read.csv("../research/assimilation-cfr/data/prior_ens.txt", header = F)$V1
prior = prep_prior(nc.prior)
prior = flatten(prior[,,prior_ind])
prior.depths = xdepth(prior, prior)
# also pull in K values
dir = "../research/assimilation-cfr/paper/results/results/"
files = list.files(dir)
read_era = function(dir, file) {
cbind(readRDS(paste0(dir, file)), era = as.numeric(strsplit(file, "\\D+")[[1]][-1]))
}
temperature = read_era(dir, files[1])
for(f in 3:length(files)) {
temperature = rbind(temperature, read_era(dir, files[f]))
}
temperature = rbind(temperature, read_era(dir, files[2]))
temperature[["time"]] = years
##### Exceedence plots
save_dir = "research/assimilation-cfr/paper/results/"
# CDF of prior to get central regions
prior.ranks = rank(prior.depths) / length(prior.depths)
cr = central_region(prior, prior.ranks, 0.05)
# Import post years of interest and remove the cr
post = data.frame()
times = c(2, 300, 600, 998)
kt = numeric(0)
for(t in times) {
post.t = melt(remove_cr(cr, flatten(prep_post(nc.post, t))))
post.t[["time"]] = t
names(post.t) = c("lat", "lon", "val", "time")
post = rbind(post, post.t)
kt = c(kt, formatC(temperature$stat[t] / sqrt(100*100 / 200), digits = 2))
}
# format into lats and lons properly
nc = nc.prior
lats = as.vector(nc$dim$lat$vals)
lons = as.vector(nc$dim$lon$vals)
lats = rep(lats, 144*length(times))
lons = rep(rep(lons, each = 96), 4)
lons = ifelse(lons >= 180, lons - 360, lons)
post[["lat"]] = lats
post[["lon"]] = lons
post[["time"]] = factor(post[["time"]], levels = c("2", "300", "600", "998"),
labels = c(paste0(years[2], "CE ","(K = ", kt[1], ")"),
paste0(years[300], "CE ","(K = ", kt[2], ")"),
paste0(years[600], "CE ","(K = ", kt[3], ")"),
paste0(years[998], "CE ","(K = ", kt[4], ")")))
# get world "underlay"
world = map_data("world")
world = world[world$long <= 178, ]
ggplot() +
geom_raster(data = post, aes(x=lon, y=lat, fill=val), interpolate = TRUE) +
geom_polygon(data = world, aes(x=long, y=lat, group=group), fill = NA, color="black") +
coord_cartesian() +
scale_fill_gradient2(midpoint=0, low="blue", mid="white", high="red") +
theme_void() +
facet_wrap(. ~ time, nrow = 2) +
theme(plot.title = element_text(hjust = 0.5))
ggsave("../research/assimilation-cfr/paper/results/multiyear_fields.png")
|
bb9d1759d48e515512b979fe48e39177abcdaebb
|
0b1a775c53a028843617d6f6fb36b456564a9915
|
/LibraryCheck.R
|
01635a2efb23247cdf90e22214d350f496b2e703
|
[] |
no_license
|
GMRI-SEL/LabFunctionsandCode
|
f566c33cd2255713c316354a463e748eb67b64d3
|
594ec0c10670d6fdcccafb7a25b2b511923952f0
|
refs/heads/master
| 2020-06-03T13:30:53.045366
| 2019-12-10T16:35:52
| 2019-12-10T16:35:52
| 191,587,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
LibraryCheck.R
|
# Library check helper function -------------------------------------------
library_check<- function(libraries) {
## Details
# This function will check for and then either load or install any libraries needed to run subsequent functions
# Args:
# libraries = Vector of required library names
# Returns: NA, just downloads/loads libraries into current work space.
## Start function
lapply(libraries, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
## End function
}
|
da51eaf1ee246f694391545f83b5d71c879353db
|
910d76aba43a33729b07d254b4d3c5b99c21ea03
|
/ui.R
|
379bf4dfee50ac99f8d4ccd1905869f9b18c9da0
|
[] |
no_license
|
tobrom/share-price-analysis
|
f531ec119516ddb1b9fc3a9bade36edb3c03bdfe
|
b7e533b353be03a82455dee06a287940c25f0f9e
|
refs/heads/master
| 2021-01-18T09:04:57.952535
| 2017-03-21T13:05:58
| 2017-03-21T13:05:58
| 84,309,534
| 0
| 0
| null | 2017-03-21T12:53:56
| 2017-03-08T10:43:18
|
R
|
UTF-8
|
R
| false
| false
| 3,439
|
r
|
ui.R
|
library(shiny)
library(xlsx)
symbols <- read.xlsx("OMXS30.xlsx", sheetName = 1)
symbols$yahooTicker <- paste0(gsub(" ", "-", symbols$SecuritySymbol), ".ST")
shinyUI(fluidPage(
titlePanel("The Share Price Analyser"),
hr(),
sidebarLayout(
sidebarPanel(
helpText("This application allows you to select a OMXS30 share and analyse it.
The analysis starts by smoothing the entire chosen history with a LOESS regression.
After that, the long term trend represented by a simple moving average
for 50 and 200 days is analysed followed by the user defined short time trend.
After the moving averages have been calculated, monthly closing prices are
decomposed into a trend, a seasonal and a remaining
irregular component by LOESS smoothing. Finally, the monthly closing
prices are modelled with a variety of both exponential and ARIMA models. For
each of these methods, the best model based on historic data is
chosen and used for future predictions. The best exponential model is
chosen by minimizing the log-likelihood and the best ARIMA model
is picked based on the lowest AIC."),
hr(),
selectInput("tickers", "Ticker:", choices = symbols$CompanyName),
dateInput('startDate', label = 'Start Date:', value = "2005-01-15"),
hr(),
sliderInput("shortMA",
"Moving Average (Short):",
min = 5,
max = 15,
value = 10),
sliderInput("longMA",
"Moving Average (Long):",
min = 20,
max = 50,
value = 25),
hr(),
sliderInput("period",
"Months to forecast:",
min = 1,
max = 24,
value = 12),
hr(),
submitButton("Analyse"),
hr(),
h3("Analysis"),
hr(),
h4("Overview"),
textOutput("text1"),
hr(),
h4("Current Trends"),
textOutput("text2"),
hr(),
h4("Seasonality"),
textOutput("text3"),
hr(),
h4("Prediction"),
textOutput("text4"),
hr(),
h4("Summary"),
textOutput("text5")
),
mainPanel(
h3("Historical Price Development"),
h6("Historical daily closing prices smoothed with LOESS"),
plotOutput("plot1"),
h3("Long Term Trend - MA(50) vs. MA(200)"),
h6("Current share price compared to long term moving averages"),
plotOutput("plot12"),
h3("Short Term Trend - Chosen Period"),
h6("Current share price compared to short term (user defined) moving averages"),
plotOutput("plot2"),
h3("Price Decomposition"),
h6("Monthly closing prices have been decomposed into a trend, a seasonal component and the remaining residuals"),
plotOutput("plot3"),
h3("Prediction - Exponential Model"),
h6("Based on the monthly history an optimal exponential model has been used for prediction"),
plotOutput("plot4"),
h3("Prediction - ARIMA Model"),
h6("Based on the monthly history an optimal ARIMA model has been used for prediction"),
plotOutput("plot5")
)
)
)
)
|
382846c3d4db77a69f787df4e5e7e875aa3c7552
|
dcaadda89752dbb7fcc0b19268db5df9dd74352e
|
/man/by_s_m3.Rd
|
ee8b22a1417e4e577d92605574ee3011b1be0239
|
[] |
no_license
|
boshek/limnotools
|
12f9266ea47a60979dcc9629b725aabfe7816c36
|
c64340d19a39715d13babe9b24d200cedb87cb2c
|
refs/heads/master
| 2021-01-12T03:39:00.773099
| 2017-06-12T21:04:57
| 2017-06-12T21:04:57
| 78,247,279
| 3
| 0
| null | 2017-08-31T15:27:00
| 2017-01-06T23:44:53
|
R
|
UTF-8
|
R
| false
| true
| 1,045
|
rd
|
by_s_m3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/by_s_m.R
\name{by_s_m3}
\alias{by_s_m3}
\title{Service subroutine for determining Mixed Layer Depth for a SPECIFIED NUMBER OF SEGMENTS}
\usage{
by_s_m3(nr, z0, zmax, z, sigma)
}
\arguments{
\item{nr}{fixed number of segments}
\item{z0}{initial depth: use to omit data above z0}
\item{zmax}{maximum depth: use to omit data below zmax}
\item{z}{input x data array, should be increasing function of index}
\item{sigma}{input y data array}
}
\value{
list(eps=s_mNresults$eps, cline=cline, by_s_m=ss,smz=smz,sms=sms)
\itemize{
\item eps: the maximum error over all intervals.
\item smz: final z array of segmented data
\item sms: final sigma array of segmented data
\item by_s_m: position of MLD = smz(2); or -99 if something is not right
\item cline: Cline depth is defined as the midpoint of the segment connecting inflection points that has the maximum slope
}
}
\description{
Service subroutine for determining Mixed Layer Depth for a SPECIFIED ERROR NORM VALUE
}
|
bfda0401a0c2baa1574484c6546a7fad80d3a2fb
|
99593b8dd68e64afcc5761d832c3eccf982ed275
|
/analysis/increasing-data.R
|
76824064e5d9e96a33607787ae8a0356845ab27a
|
[] |
no_license
|
vcerqueira/forecasting_experiments
|
3de99974bdd843b688b4977673861a4f7fb0d9ff
|
1a406e969f0c4357fdd63c50c599f13ce95fa5dc
|
refs/heads/master
| 2020-03-25T07:00:31.449705
| 2018-12-18T14:46:58
| 2018-12-18T14:46:58
| 143,536,625
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
increasing-data.R
|
library(tsensembler)
library(reshape2)
library(ggplot2)
setwd("/Users/vcerqueira/Desktop/ADETK2/ID/")
x <- lapply(list.files(),
function(x) {
load(x)
rm.null(adddata_analysis)
})
adddata_analysis <- sapply(x, rm.null)
length(adddata_analysis)
names(adddata_analysis) <- paste0("ts_",1:14)
res <-
lapply(adddata_analysis,
function(x) {
ds <- Reduce(rbind.data.frame,
lapply(x, as.data.frame))
ds_by_arima <- as.data.frame(ds)
ds_rank <- as.data.frame(t(apply(ds_by_arima[,c(1,2,3,5)],1,rank)))
ds_rank <- roll_mean_matrix(ds_rank,100)
ds_rank <- round(ds_rank,3)
as.matrix(head(ds_rank,2800))
})
resf<-apply(simplify2array(res), 1:2, mean)
colnames(resf) <-
c("ADE","ARIMA","Naive","SimpleTrim")
df <- melt(resf)
colnames(df) <- c("Time","Model","AvgRank")
ggplot(df, aes(x=Time,
y=AvgRank,
color=Model)) +
geom_line(lwd=.8) +
theme_minimal() +
geom_vline(xintercept = 150) +
theme(legend.position = "top")
|
0b37dcb22affbb209e8d76cec546c22cad4ed6f4
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.business.applications/man/alexaforbusiness_update_device.Rd
|
34c2a803d3756b2b48783c9ec082dfc65e8642e9
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 638
|
rd
|
alexaforbusiness_update_device.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alexaforbusiness_operations.R
\name{alexaforbusiness_update_device}
\alias{alexaforbusiness_update_device}
\title{Updates the device name by device ARN}
\usage{
alexaforbusiness_update_device(DeviceArn, DeviceName)
}
\arguments{
\item{DeviceArn}{The ARN of the device to update. Required.}
\item{DeviceName}{The updated device name. Required.}
}
\value{
An empty list.
}
\description{
Updates the device name by device ARN.
}
\section{Request syntax}{
\preformatted{svc$update_device(
DeviceArn = "string",
DeviceName = "string"
)
}
}
\keyword{internal}
|
3a0992be50b879fc24650cae8853a38d6b27c3a5
|
387eb7f986d66b82012fc2f5d47cacfec57a689c
|
/scripts/final_analysis.R
|
637bd5df66067b941f9376b85689444b6ae64626
|
[] |
no_license
|
jinxbuddy/Genetic-Diversity-Analysis-1KGP
|
f3d8520462a5cbbd28b102ded9e3b787b88039ba
|
2dea0d451c2cd9093094eba4bf40cd514fb404a7
|
refs/heads/main
| 2023-07-15T09:54:23.811968
| 2021-09-03T08:19:34
| 2021-09-03T08:19:34
| 401,106,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,586
|
r
|
final_analysis.R
|
#setwd
setwd("F:\\r_projects\\thesis\\1000G_snpr")
#load dependencies
library(ggplot2)
library(LEA)
library(mapplots)
library(fields)
library(tidyverse)
# selection of a subset of data by families
read_tsv("1000G_phase3_common_norel.fam", col_names = F) %>%
select(X1,X2) %>%
filter(X1 == "GBR" | X1 == "FIN" | X1 == "PUR" | X1 == "PJL" | X1 == "CDX" | X1 == "ACB" | X1 == "ESN" | X1 == "BEB" | X1 == "STU" | X1 == "ITU" | X1 == "MSL" | X1 == "CLM") %>%
write_delim("1kg_hapmap3_12pop.txt", col_names = FALSE)
#
system("plink --bfile 1000G_phase3_common_norel --keep 1kg_hapmap3_12pop.txt --make-bed --out 12pop_1kg_hapmap3")
#missingness filters
system("plink --bfile 12pop_1kg_hapmap3 --geno 0.1 --mind 0.25 --threads 2 --make-bed --out missingness_filtered_data ")
#minor allele frequency reporting
system("plink --bfile missingness_filtered_data --freq --out alle_frequency_plink1.9")
system("plink --bfile missingness_filtered_data --threads 2 --freqx --out allele_frequency_plink1.9")
system("plink2 --bfile missingness_filtered_data --freq alt1bins=0.01,0.02,0.03,0.04,0.05,0.1,0.2,0.3,0.4 --threads 2 --out allele_spectrum")
#Selectinga Sample Subset Without Very Close Relatives
system("plink2 --bfile missingness_filtered_data --king-cutoff 0.177 --threads 2 --make-bed --out relpruned_data")
#hardy-weinberg
system("plink2 --bfile missingness_filtered_data --hwe 1e-25 keep-fewhet --make-bed --out hwe_filtered_data")
# Selecting a SNP Subset in Approximate Linkage Equilibrium
system("plink --bfile hwe_filtered_data --indep-pairwise 200 50 0.2 --threads 2 --out ldpruned_snplist")
system("plink --bfile hwe_filtered_data --extract ldpruned_snplist.prune.in --threads 2 --make-bed --out ldpruned_data")
#maf filtered data
system("plink --bfile ldpruned_data --maf 0.1 --threads 2 --make-bed --out maf_filtered_data")
#PCA
system("plink2 --bfile maf_filtered_data --pca --threads 2 --out pca_results")
#loading the data as a list
pca_table <- read.table("pca_results.eigenvec", header=TRUE, comment.char="")
#loading the ggplot library
library(ggplot2)
# PCA plot
ggplot(data = pca_table) +
geom_point(mapping = aes(x = PC2, y = PC3, color = X.FID, ), size = 2, show.legend = TRUE ) +
geom_hline(yintercept = 0, linetype="dotted") +
geom_vline(xintercept = 0, linetype="dotted") +
labs(title = "PCA Plot") +
theme_minimal()
##### the admixture analysis
#converting ped to vcf
system("plink --bfile maf_filtered_data --recode vcf --threads 2 --out 12_pop_1kg")
#converting vcf to lfmm
vcf2lfmm("12_pop_1kg.vcf")
# convertinf lfmm to geno
lfmm2geno("12_pop_1kg.lfmm")
# running admixture
project3.snmf = snmf("12_pop_1kg.geno",
K = 13:15,
entropy = TRUE,
repetitions = 10,
project = "new")
# plot cross-entropy criterion of all runs of the project
plot(project3.snmf, cex = 2, col = "red3", pch = 19)
# get the cross-entropy of the 10 runs for K = 4
ce3 = cross.entropy(project3.snmf, K =12 )
# select the run with the lowest cross-entropy for K = 4
best2 = which.min(ce3)
# display the Q-matrix
my.colors <- c("red4", "slateblue4",
"mediumblue", "yellow", "seagreen2", "sienna","green", "indianred1", "dimgray", "darkgreen","cyan","darkgoldenrod1")
barchart(project3.snmf, K = 12, run = best2,
border = NA, space = 0, col = my.colors,
xlab = "Individuals", ylab = "Ancestry proportions",
main = "Ancestry matrix") -> bp
axis(1, at = 1:length(bp$order),
labels = bp$order, las = 3, cex.axis = .4)
###For further data exploration
# show the project
show(project2.snmf)
# summary of the project
summary(project2.snmf)
# get the cross-entropy for all runs for K = 4
ce7 = cross.entropy(project2.snmf, K = 7)
# get the cross-entropy for the 2nd run for K = 4
ce4 = cross.entropy(project2.snmf, K = 4, run = 2)
# get the ancestral genotype frequency matrix, G, for the 2nd run for K = 4.
freq = G(project2.snmf, K = 7, run = 2)
# display the Q-matrix
Q.matrix <- as.qmatrix(Q(project2.snmf, K = 7, run = best2))
my.colors <- c("red4", "slateblue4",
"mediumblue", "yellow", "seagreen2", "sienna","green", "indianred1", "dimgray", "darkgreen")
barplot(Q.matrix,
border = NA,
space = 0,
col = my.colors,
xlab = "Individuals",
ylab = "Ancestry proportions",
main = "Ancestry matrix") -> bp
axis(1, at = 1:nrow(Q.matrix), labels = bp$order, las = 3, cex.axis = .4)
|
c122fdfc384ede4cbbddcb80e0910f0278fdfd6c
|
3ab0f950f50f50e9207b2917ca6bd867217433a9
|
/main.R
|
1313b594d95fa3e032c41392fbc399079aa97ad5
|
[] |
no_license
|
iamchetry/AutoDataAnalysis
|
03c9afba33415518db85cc53353928ed5d3610cb
|
fdaac963803abb1f6d3b11fbfe357b6f591f086c
|
refs/heads/main
| 2023-03-01T00:30:25.343183
| 2021-02-04T18:55:11
| 2021-02-04T18:55:11
| 334,736,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,999
|
r
|
main.R
|
#install.packages('ISLR')
#install.packages('MASS')
library(ISLR)
library(lattice)
data_ = Auto
dim(data_)
attach(data_)
# 1st Question---------------------------------------------------
#------------Pairwise scatter plots only across the numeric variables-----------
pairs(data_[, 1:7], main='Pair wise Scatter Plots for Auto data')
#-------------Separate Scatter Plots between mpg (response variable) and other features-------------
par(mfrow = c(6, 1))
par(mar=c(1,1,1,1))
plot(mpg~cylinders, data = data_, main='mpg vs cylinders')
plot(mpg~displacement, data = data_, main='mpg vs displacement')
plot(mpg~horsepower, data = data_, main='mpg vs horsepower')
plot(mpg~weight, data = data_, main='mpg vs weight')
plot(mpg~acceleration, data = data_, main='mpg vs acceleration')
plot(mpg~year, data = data_, main='mpg vs year')
#-----------Density plots for mpg and weight for distinct labels of origin------------
par(mfrow = c(2, 1))
densityplot(~mpg | origin, data = data_, auto.key = list(space = "right"),
groups = origin, main='MPG vs Origin') # Used for Outlier Removal too
densityplot(~weight | origin, data = data_, auto.key = list(space = "right"),
groups = origin, main='Weight vs Origin')
#-----------Density plots for mpg, weight, acceleration and horsepower for different values of cylinders------------
densityplot(~mpg | cylinders, data = data_, auto.key = list(space = "right"),
groups = cylinders, main='MPG vs Cylinders') # Used for Outlier Removal too
densityplot(~weight | cylinders, data = data_, auto.key = list(space = "right"),
groups = cylinders, main='Weight vs Cylinders')
densityplot(~acceleration | cylinders, data = data_, auto.key = list(space = "right"),
groups = cylinders, main='Acceleration vs Cylinders')
densityplot(~horsepower | cylinders, data = data_, auto.key = list(space = "right"),
groups = cylinders, main='Horsepower vs Cylinders')
#----------Correlation matrix across the numeric variables------------
cor_data_auto = cor(data_[, 1:7])
#----------Additional Scatter plots----------
par(mfrow = c(2, 1))
plot(weight~year, data = data_, main='weight vs year')
plot(acceleration~horsepower, data = data_, main='acceleration vs horsepower')
#------------Box Plots Analysis------------
par(mfrow = c(3, 1))
boxplot(mpg, horizontal = TRUE, main='MPG Boxplot')
boxplot(horsepower, horizontal = TRUE, main='Horsepower Boxplot') # Outlier found at horsepower > 200
boxplot(acceleration, horizontal = TRUE, main='Acceleration Boxplot') # Outlier found at acceleration (>=22 and <=8)
#---------Outliers Identified Below-----------
subset(data_, horsepower > 200)
subset(data_, acceleration <= 8 | acceleration >= 22)
subset(data_, mpg>40 & origin==2)
subset(data_, mpg>41 & origin==3)
subset(data_, mpg>=38 & cylinders==6)
print(dim(data_)[1]) # 392 rows
#----------Removal of Outliers-----------
data_ = subset(data_, horsepower <= 200)
print(dim(data_)[1])
data_ = subset(data_, acceleration > 8 & acceleration < 22)
print(dim(data_)[1])
data_ = subset(data_, mpg<=40 | origin!=2)
print(dim(data_)[1])
data_ = subset(data_, mpg<=41 | origin!=3)
print(dim(data_)[1])
data_ = subset(data_, mpg<38 | cylinders!=6)
print(dim(data_)[1]) # 368 rows
#--------------Writing the cleaned Pre-Processed Data--------------
write.table(data_, file='/Users/iamchetry/Documents/UB_files/506/hw_1/auto_pre_processed.RData')
#-----------Please Use read.table to read the RData--------------
# 2nd Question -----------Model Training------------
data_$origin = as.factor(data_$origin)
model_ = lm(mpg~., data = data_[1:8]) # Multiple Regression
summary_model = summary(model_)
print(summary_model)
model_interaction = lm(mpg~(cylinders+displacement+horsepower+weight+acceleration+year+
origin)^2, data = data_[1:8]) # Multiple Regression by incorporating the interactions
summary_model_interaction = summary(model_interaction)
print(summary_model_interaction)
# 3rd Question -------------------------------------------------------
library('MASS')
dim(Boston)
head(Boston)
attach(Boston)
#----------Univariate Scatter Plots---------
par(mfrow = c(9, 1))
par(mar=c(1,1,1,1))
plot(crim, main='Crime Rate') #Skewed near 400th index
plot(zn, main='zn') #Skewed at 0
plot(indus, main='indus') # Skewed at 18.16
plot(chas, main='chas') # Large population at Label 0 only
plot(age, main='age')
plot(dis, main='dis')
plot(ptratio, main='ptratio') # Skewed at 20.2
plot(black, main='black') # Skewed at 400
plot(lstat, main='lstat')
#-----------Pairwise Scatter Plots------------
pairs(Boston, main='Pair wise Scatter Plots fo Boston data')
#----------Correlation matrix across variables------------
cor_data_boston = cor(Boston)
print(cor_data_boston)
#-----------Rows 400-450 have high values for Crime Rate, Tax Rate and Pupil-Teacher Ratio----------
par(mfrow = c(3, 1))
plot(crim, main='Crime Rate Scatter Plot')
plot(tax, main='Tax rate Scatter Plot')
plot(ptratio, main='Pupil-Teacher Ratio Scatter Plot')
#-----------Calculated Ranges-----------
range(crim)
range(tax)
range(ptratio)
#----------Suburbs Counts having average number of rooms per dwelling more than 7 or 8----------
dim(subset(Boston, rm>7)) # 64 rows (12.6% of total)
dim(subset(Boston, rm>8)) # 13 rows (2.5% of total)
#---------Comparison of overall data vs data with rm > 8----------
par(mfrow = c(4, 2))
hist(Boston$crim, main='Overall Crime Rate')
hist(subset(Boston, rm>8)$crim, main='Crime Rate for rm > 8')
# Crime rate is significantly low with range from 0.02009 to 3.47428
hist(Boston$zn, main='Overall Zn') # mean is 11.3
hist(subset(Boston, rm>8)$zn, main='Zn for rm > 8') # mean is 13.6
hist(Boston$age, main='Overall Age') # mean is 68.5
hist(subset(Boston, rm>8)$age, main='Age for rm > 8') # mean is 71.5
hist(Boston$medv, main='Overall Median Value of homes') # median is 21.2
hist(subset(Boston, rm>8)$medv,main='Median Value of homes for rm > 8') # median is 48.3
|
591d5de700ac8005286b48f8fd73ddc4c6bf026f
|
7b9b0edde58cbb2ed9fb63a8def80935697a084b
|
/res.function.R
|
a3aae94716362ddb012f54699f614f57acb4d876
|
[] |
no_license
|
ariel32/GodvilleMonitor
|
6e6908877e9b707fa6a7c5113df8c911fec232d3
|
113d7d13d4950ccd48b3a7a671426f390d2141c8
|
refs/heads/master
| 2020-12-13T14:17:58.688849
| 2016-08-04T21:05:43
| 2016-08-04T21:05:43
| 36,492,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,456
|
r
|
res.function.R
|
# SUMMARIZATION BY 888 GODS
res.function = function() {
d = read.csv("DungeonsDB.csv", sep = ";", stringsAsFactor = F)
godnames = unique(d$godname)
#save(godnames, file = "godnames")
coeff <- sapply(godnames, FUN = function(x) {
data = d[d$godname==x,]
as.numeric(lm(wood_cnt ~ time, data = data)$coefficients[2])
} )
# переводим коэффициент наклона прямой в количество бревен
res = data.frame(name = godnames, woods = round(as.numeric(coeff*60*60*24),2), stringsAsFactors = F)
res <- res[order(-res$woods),]
head(res)
# summarization
res$active = ifelse(res$woods > median(res$woods, na.rm = T), 1, 0)
res$alignment <- sapply(res$name, function(x) FUN = median(d$alignment[d$godname==x]))
res$arena.wins <- sapply(res$name, function(x) FUN = median(d$arena.wins[d$godname==x]))
res$arena.loses <- sapply(res$name, function(x) FUN = median(d$arena.loses[d$godname==x]))
res$arena.rate <- sapply(res$name, function(x) FUN = median(d$arena.wins[d$godname==x])/median(d$arena.loses[d$godname==x]))
res$gold_approx <- sapply(res$name, function(x) FUN = median(d$gold_approx[d$godname==x]))
res$level <- sapply(res$name, function(x) FUN = median(d$level[d$godname==x]))
res$age <- sapply(res$name, function(x) FUN = median(d$age[d$godname==x]))
res$monsters_killed <- sapply(res$name, function(x) FUN = median(as.numeric(d$monsters_killed[d$godname==x])))
res$pet_level <- sapply(res$name, function(x) FUN = median(as.numeric(d$pet_level[d$godname==x]), na.rm = T))
res$equip.rate <- sapply(res$name, function(x) FUN = median(d$equip.level[d$godname==x], na.rm = T)/median(d$level[d$godname==x], na.rm = T))
res$might <- sapply(res$name, function(x) FUN = median(d$p.might[d$godname==x]))
res$templehood <- sapply(res$name, function(x) FUN = median(d$p.templehood[d$godname==x]))
res$gladiatorship <- sapply(res$name, function(x) FUN = median(d$p.gladiatorship[d$godname==x]))
res$mastery <- sapply(res$name, function(x) FUN = median(d$p.mastery[d$godname==x]))
res$taming <- sapply(res$name, function(x) FUN = median(d$p.taming[d$godname==x]))
res$survival <- sapply(res$name, function(x) FUN = median(d$p.survival[d$godname==x]))
res$savings <- sapply(res$name, function(x) FUN = median(d$p.savings[d$godname==x]))
res$alignment.p <- sapply(res$name, function(x) FUN = median(d$p.alignment[d$godname==x]))
res$a.lamb <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.lamb[d$godname==x], na.rm = T)))
res$a.imp <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.imp[d$godname==x], na.rm = T)))
res$a.martyr <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.martyr[d$godname==x], na.rm = T)))
res$a.favorite <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.favorite[d$godname==x], na.rm = T)))
res$a.scoffer <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.scoffer[d$godname==x], na.rm = T)))
res$a.warrior <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.warrior[d$godname==x], na.rm = T)))
res$a.maniac <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.maniac[d$godname==x], na.rm = T)))
res$a.champion <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.champion[d$godname==x], na.rm = T)))
res$a.tutor <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.tutor[d$godname==x], na.rm = T)))
res$a.hunter <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.hunter[d$godname==x], na.rm = T)))
res$a.plunderer <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.plunderer[d$godname==x], na.rm = T)))
res$a.careerist <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.careerist[d$godname==x], na.rm = T)))
res$a.breeder <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.breeder[d$godname==x], na.rm = T)))
#res$a.architect <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.architect[d$godname==x], na.rm = T)))
res$a.shipbuilder <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.shipbuilder[d$godname==x], na.rm = T)))
res$a.sailor <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.sailor[d$godname==x], na.rm = T)))
#res$a.fowler <- sapply(res$name, function(x) FUN = as.numeric(median(d$a.fowler[d$godname==x], na.rm = T)))
return(res)
}
|
fb7b056a84d726d2902326fe7011058575f850af
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gdkPixbufNewFromFileAtScale.Rd
|
7b75f21c89f25e53c28f6ec829073a4cf36baa1c
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 1,921
|
rd
|
gdkPixbufNewFromFileAtScale.Rd
|
\alias{gdkPixbufNewFromFileAtScale}
\name{gdkPixbufNewFromFileAtScale}
\title{gdkPixbufNewFromFileAtScale}
\description{Creates a new pixbuf by loading an image from a file. The file format is
detected automatically. If \code{NULL} is returned, then \code{error} will be set.
Possible errors are in the \verb{GDK_PIXBUF_ERROR} and \verb{G_FILE_ERROR} domains.
The image will be scaled to fit in the requested size, optionally preserving
the image's aspect ratio. }
\usage{gdkPixbufNewFromFileAtScale(filename, width, height, preserve.aspect.ratio,
.errwarn = TRUE)}
\arguments{
\item{\verb{filename}}{Name of file to load, in the GLib file name encoding}
\item{\verb{width}}{The width the image should have or -1 to not constrain the width}
\item{\verb{height}}{The height the image should have or -1 to not constrain the height}
\item{\verb{preserve.aspect.ratio}}{\code{TRUE} to preserve the image's aspect ratio}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{When preserving the aspect ratio, a \code{width} of -1 will cause the image
to be scaled to the exact given height, and a \code{height} of -1 will cause
the image to be scaled to the exact given width. When not preserving
aspect ratio, a \code{width} or \code{height} of -1 means to not scale the image
at all in that dimension. Negative values for \code{width} and \code{height} are
allowed since 2.8.
Since 2.6}
\value{
A list containing the following elements:
\item{retval}{[\code{\link{GdkPixbuf}}] A newly-created pixbuf with a reference count of 1, or \code{NULL}
if any of several error conditions occurred: the file could not be opened,
there was no loader for the file's format, there was not enough memory to
allocate the image buffer, or the image file contained invalid data.}
\item{\verb{error}}{Return location for an error}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
f0e9f4e9e67505ef42ebdbeab7618b4c98570eb7
|
7a6625b46ba88e21be3d03aa3cb6934d40329867
|
/man/seq2func.Rd
|
98acb5e71a11342b9b61a8641b235e10ad4f5702
|
[] |
no_license
|
syuoni/NonParameterEstimation
|
09c8f07b561884775b0836ee047843dcf364ab58
|
e2575471233045f71b64b41c5c3eb2eae3a6e75d
|
refs/heads/master
| 2021-01-10T23:57:29.785059
| 2016-10-22T06:27:22
| 2016-10-22T06:27:22
| 70,776,946
| 0
| 0
| null | 2016-10-21T15:51:16
| 2016-10-13T06:42:41
|
R
|
UTF-8
|
R
| false
| true
| 510
|
rd
|
seq2func.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Seq2Func.R
\name{seq2func}
\alias{seq2func}
\title{Transform the sequence of two variables to a mapping function, with linear interpolation.}
\usage{
seq2func(x.seq, y.seq)
}
\arguments{
\item{x.seq}{sequence for explanatory variable}
\item{y.seq}{sequence for explained variable}
}
\value{
a function
}
\description{
Transform the sequence of two variables to a mapping function, with linear interpolation.
}
|
b0b80a8f6a38097345766528a9bd494d689f03fe
|
e4755d1e2207edc616f4f20eb2d4e5fb65a71c42
|
/man/createMrBayesTipCalibrations.Rd
|
5e8a0adc7ef6a25b0f41b25b93b2f41591ef6680
|
[
"CC0-1.0"
] |
permissive
|
dwbapst/paleotree
|
14bbfd5b312848c109a5fc539a1e82978a760538
|
95c2f57e91c4204c04cd59d9662ba94c43c87a60
|
refs/heads/master
| 2022-09-23T03:57:35.959138
| 2022-08-25T18:29:50
| 2022-08-25T18:29:50
| 3,827,289
| 20
| 11
| null | 2022-08-25T18:30:48
| 2012-03-25T21:01:38
|
R
|
UTF-8
|
R
| false
| true
| 7,995
|
rd
|
createMrBayesTipCalibrations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createMrBayesTipCalibrations.R
\name{createMrBayesTipCalibrations}
\alias{createMrBayesTipCalibrations}
\title{Construct A Block of Tip Age Calibrations for Use with Tip-Dating Analyses in MrBayes}
\usage{
createMrBayesTipCalibrations(
tipTimes,
ageCalibrationType,
whichAppearance = "first",
treeAgeOffset,
minTreeAge = NULL,
collapseUniform = TRUE,
anchorTaxon = TRUE,
file = NULL
)
}
\arguments{
\item{tipTimes}{This input may be either: (a) a \code{timeList} object,
consisting of a \code{list} of \code{length = 2}, composed of a table of interval
upper and lower time boundaries (i.e., the earlier and latter bounds of the intervals) and
a table of first and last intervals for taxa, or (b) a matrix with row names
corresponding to taxon names, matching those names listed in the MrBayes block,
with either one, two or four columns containing ages (respectively) for point occurrences with
precise dates (for a single column), uncertainty bounds on a point occurrence
(for two columns), or uncertainty bounds on the first and
last occurrence (for four columns). Note that precise first and last occurrence
dates should not be entered as a two column matrix, as this will instead be interpreted
as uncertainty bounds on a single occurrence. Instead, either select which you want to
use for tip-dates and give a one-column matrix, or repeat (and collate) the columns, so that
the first and last appearances has uncertainty bounds of zero.}
\item{ageCalibrationType}{This argument decides how age calibrations are defined,
and currently allows for four options: \code{"fixedDateEarlier"} which fixes tip
ages at the earlier (lower) bound for the selected age of appearance (see argument
\code{whichAppearance} for how that selection is made), \code{"fixedDateLatter"}
which fixes the date to the latter (upper) bound of the selected age of appearance,
\code{"fixedDateRandom"} which fixes tips to a date that is randomly drawn from a
uniform distribution bounded by the upper and lower bounds on the selected age of
appearance, or (the recommended option) \code{"uniformRange"} which places a uniform
prior on the age of the tip, bounded by the latest and earliest (upper and lower)
bounds on the the selected age.}
\item{whichAppearance}{Which appearance date of the taxa should be used:
their \code{'first'} or their \code{'last'} appearance date? The default
option is to use the 'first' appearance date. Note that use of the last
appearance date means that tips will be constrained to occur before their
last occurrence, and thus could occur long after their first occurrence (!).}
\item{treeAgeOffset}{A parameter given by the user controlling the offset
between the minimum and expected tree age prior. mean tree age for the
offset exponential prior on tree age will be set to the minimum tree age,
plus this offset value. Thus, an offset of 10 million years would equate to a prior
assuming that the expected tree age is around 10 million years before the minimum age.}
\item{minTreeAge}{if \code{NULL} (the default), then \code{minTreeAge} will
be set as the oldest date among the tip age used (those used being
determine by user choices (or oldest bound on a tip age). Otherwise,
the user can supply their own minimum tree, which must be greater than
whatever the oldest tip age used is.}
\item{collapseUniform}{MrBayes won't accept uniform age priors where the maximum and
minimum age are identical (i.e. its actually a fixed age). Thus, if this argument
is \code{TRUE} (the default), this function
will treat any taxon ages where the maximum and minimum are identical as a fixed age, and
will override setting \code{ageCalibrationType = "uniformRange"} for those dates.
All taxa with their ages set to fixed by the behavior of \code{anchorTaxon} or \code{collapseUniform}
are returned as a list within a commented line of the returned MrBayes block.}
\item{anchorTaxon}{This argument may be a logical (default is \code{TRUE},
or a character string of length = 1.
This argument has no effect if \code{ageCalibrationType} is not set to
\code{"uniformRange"}, but the argument may still be evaluated.
If \code{ageCalibrationType = "uniformRange"},
MrBayes will do a tip-dating analysis with uniform age uncertainties on
all taxa (if such uncertainties exist; see \code{collapseUniform}).
However, MrBayes does not record how each tree sits on an absolute time-scale,
so if the placement of \emph{every} tip is uncertain, lining up multiple dated trees
sampled from the posterior (where each tip's true age might
differ) could be a nightmare to back-calculate, if not impossible.
Thus, if \code{ageCalibrationType = "uniformRange"}, and there are no tip taxa given
fixed dates due to \code{collapseUniform} (i.e. all of the tip ages have a range of uncertainty on them),
then a particular taxon will be selected and given a fixed date equal to its
earliest appearance time for its respective \code{whichAppearance}.
This taxon can either be indicated by the user or instead the first taxon listed
in \code{tipTimes} will be arbitrary selected. All taxa with their ages set
to fixed by the behavior of \code{anchorTaxon} or \code{collapseUniform}
are returned as a list within a commented line of the returned MrBayes block.}
\item{file}{Filename (possibly with path) as a character string
to a file which will be overwritten with the output tip age calibrations.
If \code{NULL}, tip calibration commands are output to the console.}
}
\value{
If argument \code{file} is \code{NULL}, then the tip age commands
are output as a series of character strings.
All taxa with their ages set to fixed by the behavior of \code{anchorTaxon} or \code{collapseUniform}
are returned as a list within a commented line of the returned MrBayes block.
}
\description{
Takes a set of tip ages (in several possible forms, see below),
and outputs a set of tip age calibrations
for use with tip-dating analyses (sensu Zhang et al., 2016)
in the popular phylogenetics program \emph{MrBayes}.
These calibrations are printed as a set of character strings, as well as a
line placing an offset exponential prior on the tree age, either
printed in the R console or in a named text file, which can be used as
commands in the \emph{MrBayes} block of a NEXUS file for use with
(you guessed it!) \emph{MrBayes}.
}
\details{
Beware: some combinations of arguments might not make sense for your data.
(But that's always true, is it not?)
}
\examples{
# load retiolitid dataset
data(retiolitinae)
# uniform prior, with a 10 million year offset for
# the expected tree age from the earliest first appearance
createMrBayesTipCalibrations(
tipTimes = retioRanges,
whichAppearance = "first",
ageCalibrationType = "uniformRange",
treeAgeOffset = 10)
# fixed prior, at the earliest bound for the first appearance
createMrBayesTipCalibrations(
tipTimes = retioRanges,
whichAppearance = "first",
ageCalibrationType = "fixedDateEarlier",
treeAgeOffset = 10
)
# fixed prior, sampled from between the bounds on the last appearance
# you should probably never do this, fyi
createMrBayesTipCalibrations(
tipTimes = retioRanges,
whichAppearance = "first",
ageCalibrationType = "fixedDateRandom",
treeAgeOffset = 10
)
\dontrun{
createMrBayesTipCalibrations(
tipTimes = retioRanges,
whichAppearance = "first",
ageCalibrationType = "uniformRange",
treeAgeOffset = 10,
file = "tipCalibrations.txt"
)
}
}
\references{
Zhang, C., T. Stadler, S. Klopfstein, T. A. Heath, and F. Ronquist. 2016.
Total-Evidence Dating under the Fossilized Birth-Death Process.
\emph{Systematic Biology} 65(2):228-249.
}
\seealso{
\code{\link{createMrBayesConstraints}}, \code{\link{createMrBayesTipDatingNexus}}
}
\author{
David W. Bapst. This code was produced as part of a project
funded by National Science Foundation grant EAR-1147537 to S. J. Carlson.
}
|
e7fce8822ef3834d0162e7fd30fd4bfb96661bb4
|
ba00b1509adcf0f099ecda35cd570cfe2f53a7af
|
/forest plot.R
|
440797eee9a6ab5c40ec552234b3a2b1eaaf5321
|
[] |
no_license
|
chnjqh/IRGPI_ESCC
|
ec736eff1af16d0282060a3fc653dbff08281f8d
|
1017559a51995032b08e900d7872c9e736b2758a
|
refs/heads/main
| 2023-07-28T07:00:49.586030
| 2021-09-17T15:32:11
| 2021-09-17T15:32:11
| 407,485,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,222
|
r
|
forest plot.R
|
Unicox<- function(x){
form1 <- as.formula(paste0("Surv(OS.time,OS)~",x))
cox_form1 <- coxph(form1,data = clinical_ESCC_expr)
cox_Sum <- summary(cox_form1)
HR=cox_Sum$conf.int[,"exp(coef)"]
HR.95L=cox_Sum$conf.int[,"lower .95"]
HR.95H=cox_Sum$conf.int[,"upper .95"]
pvalue=cox_Sum$coefficients[,"Pr(>|z|)"]
Unicox <- data.frame("Characteristics" = x,
"HR" = HR,
"HR.95L" = HR.95L,
"HR.95H" = HR.95H,
"pvalue" = pvalue)
return(Unicox)
}
library(plyr)
Univar_gene <- lapply(candidate_genes_for_cox2, Unicox)
Univar_gene <- ldply(Univar_gene,data.frame)
Univar_gene[,2:ncol(Univar_gene)] <- as.numeric(unlist(Univar_gene[,2:ncol(Univar_gene)]))
hz <- paste(round(Univar_gene$HR,3),
"(",round(Univar_gene$HR.95L,3),
"-",round(Univar_gene$HR.95H,3),")",sep = "")
tabletext <- cbind(c(NA,"Gene",Univar_gene$Characteristics),
c(NA,"P value",round(Univar_gene$pvalue,3)),
c(NA,"Hazard Ratio(95% CI)",hz))
library(forestplot)
forestplot(labeltext=tabletext,
graph.pos=2,
col=fpColors(box="#4DBBD5B2", lines="#3C5488B2", zero = "gray50"),
mean=c(NA,NA,Univar_gene$HR),
lower=c(NA,NA,Univar_gene$HR.95L),
upper=c(NA,NA,Univar_gene$HR.95H),
boxsize=0.3,lwd.ci=2,
ci.vertices.height = 0.1,ci.vertices=TRUE,
zero=1,lwd.zero=1,
colgap=unit(20,"mm"),
xticks = c(0.5, 1,1.5),
lwd.xaxis=1,
lineheight = unit(2,"cm"),
graphwidth = unit(.3,"npc"),
cex=0.9, fn.ci_norm = fpDrawDiamondCI,
hrzl_lines=list("2" = gpar(lwd=2, col="black"),
"3" = gpar(lwd=2, col="black"),
"7" = gpar(lwd=2, col="black")),
mar=unit(rep(0.5, times = 4), "cm"),
txt_gp=fpTxtGp(label=gpar(cex=1),
ticks=gpar(cex=1),
xlab=gpar(cex = 1.25),
title=gpar(cex = 1.2)),
xlab="Hazard Ratio")
|
6e32cd29bca90b510f3d890c7c0702db7a507108
|
0221655c47793c841dcaa19846d89d97a383ca7b
|
/Scripts/E-MAP/Figure 4 scripts/old/Fig4B_Sina_mutant_groups.R
|
2d85a1782de779fd8adab9d94ccc5144640d22a7
|
[] |
no_license
|
tinaperica/Gsp1_manuscript
|
47482665da7a9d7b3194e422d8ddf758980735ab
|
eecd615e160422fcb141a1dcd4a8d5e49bbf3e55
|
refs/heads/master
| 2021-11-14T18:59:50.596318
| 2021-10-13T15:47:05
| 2021-10-13T15:47:05
| 216,316,976
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,373
|
r
|
Fig4B_Sina_mutant_groups.R
|
##### load libraries
library(tidyverse)
library(ggforce)
source('ucsf_colors.R')
##### load datafiles
# load('Data/spitzemapko_correlations_and_bonferroni_fdr_all.RData')
load('Data/filtered_v6_correlations.RData')
name2ORF <- read_delim('Data/spitzemap_name2ORF_index.txt', delim = '\t', col_types = cols())
# set mutant groups based on clustering
mutant_group_order = c('hyd', 'inter', 'exch')
mutant_group_labels = c(hyd = 'Hydrolysis mutants\n(reduced GAP, WT-like GEF)\n(D79S, T34Q, T34E,\nT34G, T34A, Q147E)',
exch = 'Exchange mutants\n(increased GAP, reduced GEF)\n(Y157A, H141R, R108Y,\nR108Q, R108G, K101R)',
inter = 'Intermediate mutants\n(WT-like GAP, reduced GEF)\n(R108I, R108L, G80A, R78K)')
mutant_groups <-
list('hyd' = c('D79S', 'T34Q', 'T34E', 'T34G', 'T34A', 'Q147E'),
'inter' = c('R108I', 'R108L', 'G80A', 'R78K'),
'exch' = c('Y157A', 'H141R', 'R108Y', 'R108Q', 'R108G', 'K101R')) %>%
stack() %>%
rename('mutant' = 'values', 'mutant_group' = 'ind') %>%
mutate(mutant_group = factor(mutant_group, levels = mutant_group_order))
# clean correlations dataset, so each row is a correlation between a mutant and a strain
corr_for_sina <-
filtered_correlations %>%
filter(grepl('GSP1', query_uniq1), !grepl('GSP1', query_uniq2)) %>%
separate(query_uniq1, sep = ' - ', into = c('GSP1','query_uniq1')) %>%
filter(query_uniq1 %in% unlist(mutant_groups)) %>%
select(query_uniq1, query_uniq2, pearson, greater_fdr) %>%
rename('mutant' = query_uniq1, 'strain' = query_uniq2) %>%
left_join(mutant_groups, by = 'mutant')
##### SINA PLOT ONLY HIGHLIGHTING THREE COLORS, BUT WITH DIFFERENT MUTANT GROUPS
# annotate strains based on gene set
gene_sets_order <- c('nuclear pore complex', 'spindle assembly checkpoint', 'tRNA modification', 'other')
gene_sets_colors <- c(ucsf_colors$green1, ucsf_colors$pink1, ucsf_colors$blue1, ucsf_colors$gray3)
gene_sets <-
'Supplementary_Data_Tables/Excel_files/gene_sets_final.txt' %>%
read_delim(delim = '\t', col_types = cols()) %>%
select(query, gene_set) %>%
rename('strain' = query) %>%
filter(gene_set %in% gene_sets_order)
data <-
corr_for_sina %>%
left_join(gene_sets, by = 'strain') %>%
filter(greater_fdr < 0.05) %>%
mutate(gene_set = ifelse(is.na(gene_set), 'other', gene_set) %>%
factor(levels = gene_sets_order))
ggplot(data, aes(x = mutant_group, y = pearson, size = greater_fdr,)) +
geom_violin(data = filter(data, gene_set == 'other'),
fill = ucsf_colors$gray3, color=NA,
width = 0.8, alpha = 0.2) +
geom_jitter(data = filter(data, gene_set == 'nuclear pore complex'),
mapping = aes(x = as.numeric(mutant_group) - 0.3, size = greater_fdr, color = gene_set),
width = 0.1) +
geom_jitter(data = filter(data, gene_set == 'spindle assembly checkpoint'),
mapping = aes(size = greater_fdr, color = gene_set),
width = 0.1) +
geom_jitter(data = filter(data, gene_set == 'tRNA modification'),
mapping = aes(x = as.numeric(mutant_group) + 0.3, size = greater_fdr, color = gene_set),
width = 0.1) +
scale_x_discrete(breaks = c('hyd', 'inter', 'exch'), labels = mutant_group_labels) +
scale_color_manual(name='Gene set', values=gene_sets_colors) +
scale_size_continuous(name = 'P-value', range = c(0.5, 0.0),
breaks = c(0.001, 0.01, 0.03, 0.05), limits = c(0, 0.06)) +
ylim(c(0.05, 0.45)) + xlab('Gsp1 point mutant group') +
ylab('Pearson correlation\nbetween Gsp1 mutant\nand S. cerevisiae gene') +
guides(size=guide_legend(nrow=4,byrow=TRUE,
override.aes = list(fill=NA)),
color=guide_legend(nrow=3,byrow=TRUE)) +
theme_classic() +
theme(
text = element_text(family = "Helvetica", size = 6),
axis.title = element_text(size = 6), axis.text = element_text(size = 6),
axis.ticks = element_line(size = 0.05), axis.ticks.length = unit(0.05, 'cm'),
legend.position = 'top',
# legend.position = 'none',
legend.spacing.y = unit(0.01, 'cm'),
legend.box = 'horizontal', legend.box.just = 'left',
legend.text = element_text(size = 6), legend.title = element_text(size = 6),
legend.margin = margin(t = 0, unit='cm'),
plot.margin = margin(t = 0, unit='cm'),
axis.line = element_line(size = 0.1),
strip.text.x = element_text(size = 6)
)
ggsave('Revisions/Main Figures/Figure4/Fig4_Sina_with_kinetics_groups.pdf', height = 3.5, width = 4.5)
dev.off()
##### BIG plot showing three groups for all gene sets:
gene_sets <-
'Supplementary_Data_Tables/Excel_files/gene_sets_final.txt' %>%
read_delim(delim = '\t', col_types = cols()) %>%
select(query, gene_set) %>%
rename('strain' = query)
corr_for_sina %>%
left_join(gene_sets, by = 'strain') %>%
filter(greater_fdr < 0.05) %>%
mutate(gene_set = ifelse(is.na(gene_set), 'other', gene_set)) %>%
ggplot(aes(x = mutant_group, y = pearson,
size = greater_fdr, color = mutant_group)) +
facet_wrap(vars(gene_set)) +
geom_jitter() +
scale_color_manual(name='Mutant group',
values=c(ucsf_colors$pink1,
ucsf_colors$green1,
ucsf_colors$blue1)) +
scale_size_continuous(name = 'P-value', range = c(0.5, 0.0),
breaks = c(0.001, 0.01, 0.03, 0.05),
limits = c(0, 0.06)) +
ylim(c(0.05, 0.45)) + xlab('Gsp1 point mutant group') +
ylab('Pearson correlation between Gsp1 mutant and S. cerevisiae gene') +
theme_classic() +
theme(
text = element_text(family = "Helvetica", size = 6),
axis.title = element_text(size = 6), axis.text = element_text(size = 6),
axis.ticks = element_line(size = 0.05), axis.ticks.length = unit(0.05, 'cm'),
legend.position = 'top', legend.spacing.y = unit(0.01, 'cm'),
legend.box = 'horizontal', legend.box.just = 'left',
legend.text = element_text(size = 6), legend.title = element_text(size = 6),
legend.margin = margin(t = 0, unit='cm'),
plot.margin = margin(t = 0, unit='cm'),
axis.line = element_line(size = 0.1),
strip.text.x = element_text(size = 6)
)
ggsave('Revisions/Main Figures/Figure4/Fig4_alt_Sina_of_genesets.pdf', height = 7.5, width = 7.5)
dev.off()
|
74567a04029a8f858721e54a6f4b3d699ab73208
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/RoughSets/man/BC.IND.relation.RST.Rd
|
fd54fe6522155bf88d4e4e946d2d6785af0ece24
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,788
|
rd
|
BC.IND.relation.RST.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BasicRoughSets.R
\name{BC.IND.relation.RST}
\alias{BC.IND.relation.RST}
\title{Computation of indiscernibility classes based on the rough set theory}
\usage{
BC.IND.relation.RST(decision.table, feature.set = NULL)
}
\arguments{
\item{decision.table}{an object inheriting from the \code{"DecisionTable"} class, which represents a decision system.
See \code{\link{SF.asDecisionTable}}.}
\item{feature.set}{an integer vector indicating indexes of attributes which should be used or an object inheriting from
the \code{FeatureSubset} class.
The computed indiscernibility classes will be relative to this attribute set.
The default value is \code{NULL} which means that
all conditional attributes should be considered. It is usually reasonable
to discretize numeric attributes before the computation of indiscernibility classes.}
}
\value{
An object of a class \code{"IndiscernibilityRelation"} which is a list with the following components:
\itemize{
\item \code{IND.relation}: a list of indiscernibility classes in the data. Each class is represented by indices
of data instances which belong to that class
\item \code{type.relation}: a character vector representing a type of relation used in computations. Currently,
only \code{"equivalence"} is provided.
\item \code{type.model}: a character vector identifying the type of model which is used.
In this case, it is \code{"RST"} which means the rough set theory.
}
}
\description{
This function implements a fundamental part of RST: the indiscernibility relation.
This binary relation indicates whether it is possible to discriminate any given pair of objects from an information system.
This function can be used as a basic building block for development of other RST-based methods.
A more detailed explanation of the notion of indiscernibility relation can be found in \code{\link{A.Introduction-RoughSets}}.
}
\examples{
#############################################
data(RoughSetData)
hiring.data <- RoughSetData$hiring.dt
## In this case, we only consider the second and third attribute:
A <- c(2,3)
## We can also compute a decision reduct:
B <- FS.reduct.computation(hiring.data)
## Compute the indiscernibility classes:
IND.A <- BC.IND.relation.RST(hiring.data, feature.set = A)
IND.A
IND.B <- BC.IND.relation.RST(hiring.data, feature.set = B)
IND.B
}
\references{
Z. Pawlak, "Rough Sets", International Journal of Computer and Information Sciences,
vol. 11, no. 5, p. 341 - 356 (1982).
}
\seealso{
\code{\link{BC.LU.approximation.RST}}, \code{\link{FS.reduct.computation}}, \code{\link{FS.feature.subset.computation}}
}
\author{
Andrzej Janusz
}
|
bb686738daa9d6b4d2b33ee0a805c6fa6cc7f667
|
1f56e1a1d7405e71d3c34edd1d3262ac44c27ecd
|
/man/loadGCT.Rd
|
792f92f41c2e8158ba31651e5df795bd5f48cbdd
|
[] |
no_license
|
cran/tempoR
|
0676e0b1088249913186c9b0ac3d1970f1b02d3b
|
3f768b0042424855bde7df5de3c009ab54b6cbea
|
refs/heads/master
| 2020-12-22T22:34:37.450614
| 2019-05-27T07:30:03
| 2019-05-27T07:30:03
| 236,949,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 851
|
rd
|
loadGCT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tempoInput.R
\name{loadGCT}
\alias{loadGCT}
\title{Load a Gene Cluster Text formatted file}
\usage{
loadGCT(target)
}
\arguments{
\item{target}{a string indicating the location of the .gct file}
}
\value{
a matrix with sample ids as row names and gene ids as column names
}
\description{
\code{loadGCT} loads a Gene Cluster Text formatted file from a text file to the data structure used by TEMPO. A .gct
file is organized as described at \href{http://www.broadinstitute.org/cancer/software/gsea/wiki/index.php/Data_formats}{the BROAD site}.
}
\examples{
# An example gene expression data set is included in the package in .gct format
exampleDataPath = file.path(path.package("tempoR"),"gse32472Example.gct")
exampleData = loadGCT(exampleDataPath)
}
|
19c1dd9c7c00b67a0d9faa3255a7c108956740c2
|
9417844014186fe4be4d893843b0fbc4d13133df
|
/setup.R
|
f77a413d2da44c65dd40264d6d9e0b8760bf95bc
|
[
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
jsta/glatos-spatial_workshop_materials
|
e884e76c02d4dd7915b5dca5f2346a0b26e809b7
|
2a54b7f36f6558247e3254582a8b8eb7a04d5482
|
refs/heads/master
| 2023-07-24T15:41:31.057061
| 2023-07-13T14:10:17
| 2023-07-13T14:10:17
| 233,695,391
| 1
| 0
|
NOASSERTION
| 2023-07-13T14:12:42
| 2020-01-13T21:17:28
|
CSS
|
UTF-8
|
R
| false
| false
| 2,793
|
r
|
setup.R
|
library(raster)
library(sf)
# aggregate and re-project raster file(s)
if(!file.exists("data/Lake_Erie_bathymetry (raster)/erie_lld_agg.tif")){
# setwd("_episodes_rmd")
erie_bathy <-
raster("data/Lake_Erie_bathymetry (raster)/erie_lld.tif")
erie_bathy <- aggregate(erie_bathy, fact = 4)
erie_bathy <- raster::projectRaster(erie_bathy, crs = sf::st_crs(26917)$proj4string)
writeRaster(erie_bathy, "data/Lake_Erie_bathymetry (raster)/erie_lld_agg.tif",
overwrite = TRUE)
}else{
erie_bathy <- raster("data/Lake_Erie_bathymetry (raster)/erie_lld_agg.tif")
}
if(!file.exists("data/Lake_Erie_Walleye_Management_Units/Lake_Erie_Walleye_Management_Units_utm.shp")){
erie_zones <- st_read("data/Lake_Erie_Walleye_Management_Units/Lake_Erie_Walleye_Management_Units.shp")
erie_zones <- sf::st_transform(erie_zones, 26917)
sf::st_write(erie_zones, "data/Lake_Erie_Walleye_Management_Units/Lake_Erie_Walleye_Management_Units_utm.shp")
}else{
erie_zones <- sf::st_read("data/Lake_Erie_Walleye_Management_Units/Lake_Erie_Walleye_Management_Units_utm.shp")
}
if(!file.exists("data/Lake_Erie_Shoreline/Lake_Erie_Shoreline_utm.shp")){
erie_outline <- st_read("data/Lake_Erie_Shoreline/Lake_Erie_Shoreline.shp")
erie_outline <- sf::st_transform(erie_zones, 26917)
sf::st_write(erie_outline, "data/Lake_Erie_Shoreline/Lake_Erie_Shoreline_utm.shp")
}else{
erie_outline <- sf::st_read("data/Lake_Erie_Shoreline/Lake_Erie_Shoreline_utm.shp")
}
if(!file.exists("data/Two_Interpolated_Fish_Tracks_utm.csv")){
fish_tracks <-
read.csv("data/Two_Interpolated_Fish_Tracks.csv")
fish_tracks <- fish_tracks[,-which(names(fish_tracks) == "X")]
fish_tracks <- st_as_sf(fish_tracks,
coords = c("longitude", "latitude"),
crs = 4326)
fish_tracks <- st_transform(fish_tracks, 26917)
coords <- st_coordinates(fish_tracks)
fish_tracks <- cbind(st_drop_geometry(fish_tracks), coords)
fish_tracks$utmZone <- 17
write.csv(fish_tracks, "data/Two_Interpolated_Fish_Tracks_utm.csv",
row.names = FALSE)
}else{
fish_tracks <- read.csv("data/Two_Interpolated_Fish_Tracks_utm.csv",
stringsAsFactors = FALSE)
}
erie_contours <- sf::st_read("data/Lake_Erie_bathymetric_contours/bathymetry_lake_erie.shp")
## move files to learner repo release
# dir.create("data/learner_data")
# sf::st_write(erie_outline, "data/learner_data/erie_outline.shp")
# sf::st_write(erie_contours, "data/learner_data/erie_contours.shp")
# sf::st_write(erie_zones, "data/learner_data/erie_zones.shp")
# write.csv(fish_tracks, "data/learner_data/fish_tracks.csv", row.names = FALSE)
# raster::writeRaster(erie_bathy, "data/learner_data/erie_bathy.tif")
|
f062603d8ac62cd95a3f3fc4ca99a8c18be26d98
|
fb57207c08c19ba2683f8c0cbc981c6dd5f79374
|
/man/assign_colnames.Rd
|
93cedc34a08d52d71a90743ee4fd1e84c9763d14
|
[] |
no_license
|
muschellij2/docxtractr
|
56bfb5c8a36fb4b968b6feb2e690cda5eefdf4d9
|
c6b5a0524340ce545580cee88c410771f09c292f
|
refs/heads/master
| 2020-06-19T07:42:20.168013
| 2020-06-16T22:40:18
| 2020-06-16T22:40:18
| 196,622,286
| 0
| 0
| null | 2019-07-12T17:52:39
| 2019-07-12T17:52:39
| null |
UTF-8
|
R
| false
| true
| 1,607
|
rd
|
assign_colnames.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_colnames.r
\name{assign_colnames}
\alias{assign_colnames}
\title{Make a specific row the column names for the specified data.frame}
\usage{
assign_colnames(dat, row, remove = TRUE, remove_previous = remove)
}
\arguments{
\item{dat}{can be any \code{data.frame} but is intended for use with
ones returned by this package}
\item{row}{numeric value indicating the row number that is to become
the column names}
\item{remove}{remove row specified by \code{row} after making it
the column names? (Default: \code{TRUE})}
\item{remove_previous}{remove any rows preceding \code{row}? (Default:
\code{TRUE} but will be assigned whatever is given for
\code{remove}).}
}
\value{
\code{data.frame}
}
\description{
Many tables in Word documents are in twisted formats where there may be
labels or other oddities mixed in that make it difficult to work with the
underlying data. This function makes it easy to identify a particular row
in a scraped \code{data.frame} as the one containing column names and
have it become the column names, removing it and (optionally) all of the
rows before it (since that's usually what needs to be done).
}
\examples{
# a "real" Word doc
real_world <- read_docx(system.file("examples/realworld.docx", package="docxtractr"))
docx_tbl_count(real_world)
# get all the tables
tbls <- docx_extract_all_tbls(real_world)
# make table 1 better
assign_colnames(tbls[[1]], 2)
# make table 5 better
assign_colnames(tbls[[5]], 2)
}
\seealso{
\code{\link{docx_extract_all}}, \code{\link{docx_extract_tbl}}
}
|
42406e46acd7daea51beb2423c1ea736da0c4efd
|
3fa1b23746232975b3b014db2f525007a3b49991
|
/anna_code/6_minute_walk_analysis/classify_survey_sex.r
|
c960e2973614ff1fe2ba77abb25a9f2fd274d955
|
[] |
no_license
|
AshleyLab/myheartcounts
|
ba879e10abbde085b5c9550f0c13ab3f730d7d03
|
0f80492f7d3fc53d25bdb2c69f14961326450edf
|
refs/heads/master
| 2021-06-17T05:41:58.405061
| 2021-02-28T05:33:08
| 2021-02-28T05:33:08
| 32,551,526
| 7
| 1
| null | 2020-08-17T22:37:43
| 2015-03-19T23:25:01
|
OpenEdge ABL
|
UTF-8
|
R
| false
| false
| 5,415
|
r
|
classify_survey_sex.r
|
library(tree)
library(data.table)
library(gbm)
rm(list=ls()) #REMOVE OLD VARIABLES FROM ENVIRONMENT
source('/home/anna/r_scripts/split_data.R')
setwd('/home/anna/r_scripts/')
metadata_file<-'/home/anna/r_scripts/Non_timeseries_filtered2.tsv'
meta<-read.table(metadata_file,sep="\t",header=TRUE,row.names=1)
merged<-as.data.frame(meta)
#DROP TIMESTAMP COLUMNS FOR NOW -- NOT SURE HOW TO USE THEM
merged$patientWakeUpTime<-NULL
merged$patientGoSleepTime<-NULL
#COERCE NUMERICAL VALUES TO FACTORS FOR CATEGORICAL RESPONSES
merged$atwork<-factor(merged$atwork)
merged$phys_activity<-factor(merged$phys_activity)
merged$sleep_time_ActivitySleep<-factor(merged$sleep_time_ActivitySleep)
merged$vascular<-factor(merged$vascular)
merged$medications_to_treat<-factor(merged$medications_to_treat)
merged$heartAgeDataHypertension<-factor(merged$heartAgeDataHypertension)
merged$family_history<-factor(merged$family_history)
merged$sodium<-factor(merged$sodium)
merged$phone_on_user<-factor(merged$phone_on_user)
#TRAINING DATA (Sex/Sex known)
Sex.train<-subset(merged,is.na(merged$Sex)==FALSE)
Sex.test<-subset(merged,is.na(merged$Sex))
#RANDOMLY SPLIT TRAINING DATA FOR 2-FOLD CROSS-VALIDATION
Sex.train.split<-split_data(Sex.train)
Sex.train.fold1<-Sex.train.split$"1"
Sex.train.fold2<-Sex.train.split$"2"
#TRAIN A DECISION TREE ON SURVEY DATA
tree.survey.Sex=randomForest(formula('Sex~
bloodPressureInstruction+
heartAgeDataBloodGlucose+
heartAgeDataDiabetes +
heartAgeDataEthnicity +
heartAgeDataHdl+
heartAgeDataHypertension+
heartAgeDataLdl +
smokingHistory +
heartAgeDataSystolicBloodPressure+
heartAgeDataTotalCholesterol +
patientWeightPoints +
patientHeightInches +
atwork+
moderate_act +
phys_activity +
sleep_diagnosis1 +
sleep_time_ActivitySleep+
sleep_time1 +
vigorous_act +
work +
activity1_intensity+
activity1_option +
activity1_time +
activity1_type +
activity2_intensity+
activity2_option +
activity2_time +
activity2_type +
phone_on_user+
sleep_time_DailyCheck +
fish +
fruit +
grains+
sodium +
sugar_drinks +
vegetable +
chestPain+
chestPainInLastMonth +
dizziness +
heartCondition +
jointProblem+
physicallyCapable +
prescriptionDrugs +
family_history +
heart_disease+
medications_to_treat +
vascular +
feel_worthwhile1 +
feel_worthwhile2+
feel_worthwhile3 +
feel_worthwhile4 +
riskfactors1 +
riskfactors2+
riskfactors3 +
riskfactors4 +
satisfiedwith_life +
zip'),data=Sex.train.fold1,importance=TRUE,na.action=na.tree.replace)
browser()
#par(mfrow=c(1,1))rt
#plot(tree.survey.Sex)
#text(tree.survey.Sex,pretty=0)
#tree.survey.pred=predict(tree.survey.Sex,Sex.train.fold2,type="class")
tree.survey.pred=predict(tree.survey.Sex,Sex.train.fold2,n.trees=500)
table(tree.survey.pred,Sex.train.fold2$Sex)
plot(Sex.train.fold2$Sex,tree.survey.pred)
cor(Sex.train.fold2$Sex,tree.survey.pred,use="complete.obs")
|
c9c494a2c1cf83e07d41883318226890e1ff6fe7
|
3076790177b055d06f79b5626f79056c3f947a68
|
/R/set_plot_format.R
|
fec9e825fcb5eaa997112fda449b3caf3de901d6
|
[] |
no_license
|
genomewalker/oligo4fun
|
44d71a0e02bdaab371d8fa9d3f317ccc3169e323
|
775c32b15a61229c07b428041e3124cbb888a21b
|
refs/heads/master
| 2020-04-06T07:01:41.241217
| 2016-11-02T11:04:30
| 2016-11-02T11:04:30
| 52,823,649
| 0
| 1
| null | 2016-02-29T22:35:03
| 2016-02-29T20:49:05
|
R
|
UTF-8
|
R
| false
| false
| 444
|
r
|
set_plot_format.R
|
#' @export
.set_plot_format <- function(format, file_name){
if((format == "pdf")){
pdf(file_name, height = 8.27, width = 11.69)
}else if ((format == "png")){
png(file_name, height = 8.27, width = 11.69, units = "in", res = 300)
}else if ((format == "jpg")){
jpeg(file_name, height = 8.27, width = 11.69, units = "in", res = 300)
}else{
stop("Format not valid. Please choose 'png', 'pdf' or 'jpg'\n", call. = FALSE)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.