blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1835c71218b92908bbba2821120f7a93a273c6a8
|
2e45444684b19b9796604d8038f0a2e8abeed1ba
|
/src/supportingInfo_domains.R
|
ae13a3a2f9a4689dcc15bde86159788c63679f2e
|
[
"CC-BY-3.0"
] |
permissive
|
girke-lab/targetSelectivity
|
d430e76586c4acccdd1876c8ad265568d707cf8a
|
9a475936eccf3aed4d039ec8131e3b70530b19a7
|
refs/heads/master
| 2021-01-15T23:46:07.590908
| 2017-02-16T16:39:02
| 2017-02-16T16:39:02
| 65,675,614
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,175
|
r
|
supportingInfo_domains.R
|
#!/usr/bin/env Rscript
# (C) 2016 Tyler William H Backman
# Purpose: produce list of Pfam domains including median target, cluster, and domain selectivities for FDA approved and non-FDA compounds
library(R.utils)
# parse input options
targetSelectivityByDomainFile <- commandArgs(trailingOnly=TRUE)[1]
outputFilename <- commandArgs(trailingOnly=TRUE)[2]
# test code for running without make:
if(is.na(commandArgs(trailingOnly=TRUE)[1])){
targetSelectivityByDomainFile <- "working/targetSelectivityByDomain.tab"
outputFilename <- "working/supportingInfo_domains.tab"
}
# parse input files
targetSelectivityByDomains <- read.table(targetSelectivityByDomainFile)
colnames(targetSelectivityByDomains) <- c("totalFDA", "totalNonFDA", "medianFDAtargetSelectivity",
"medianNonFDAtargetSelectivity", "medianFDAclusterSelectivity", "medianNonFDAclusterSelectivity",
"medianFDAdomainSelectivity", "medianNonFDAdomainSelectivity")
targetSelectivityByDomains <- targetSelectivityByDomains[rowSums(targetSelectivityByDomains[,1:2]) > 0,]
# write out table
write.table(targetSelectivityByDomains, outputFilename, quote=F, sep="\t", row.names=T, col.names = T)
|
daed92a441b42826057c144161e89f2ed9b57632
|
9c643d0399d433d89b30af0335a953740706d241
|
/cropnames.R
|
ea20c697baf806542b711dfc1979c703f8e7ec79
|
[] |
no_license
|
AramburuMerlos/cropdiv_usa
|
e17c40151f6b3f3e641c4ec25c10ffb4942d37bc
|
2eb9e5045183f5cade1feae28075f72077cc9027
|
refs/heads/master
| 2023-03-17T22:54:36.600516
| 2021-03-06T01:39:12
| 2021-03-06T01:39:12
| 272,756,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 676
|
r
|
cropnames.R
|
cropcode <- read.csv('CropCode.csv', stringsAsFactors = FALSE)
cropcode <- cropcode[c('ID','CLASS_NAME', 'A_N')]
cropcode <- cropcode[cropcode$ID!=0,]
head(cropcode)
# fixing name in rep codes
cropcode[cropcode$ID == 1,2] <- 'Maize'
cropcode[cropcode$ID == 5,2] <- 'Wheat'
cropcode[cropcode$ID == 41,2] <- 'Canola'
cropcode[cropcode$ID == 32,2] <- 'Peach/Nectarine'
cropcode[cropcode$ID == 46,2] <- 'Cabbage'
cropcode[cropcode$ID == 53,2] <- 'Cantaloupe'
cropcode[cropcode$ID == 55,2] <- 'Squash'
cropcode <- unique(cropcode)
cropcode <- cropcode[cropcode$ID<200,]
cropcode
names(cropcode) <- c("ID", "Name", "A_N")
write.csv(cropcode, "cropnames.csv", row.names = FALSE)
|
07d8f96c86f64de797a46b1b9ed4d91c16530451
|
630cf29a8e10d79dac84b157a15320522e48656f
|
/workout03/binomial/man/bin_cumulative.Rd
|
e8438e058de1ba13f3e8ba75eecc02d828adee1b
|
[] |
no_license
|
stat133-sp19/hw-stat133-luzhangberkeley
|
d64896866ac2e4d5746002124a71319965aab13a
|
36af51be0a4678a38f193656c256c2cea451224c
|
refs/heads/master
| 2020-04-28T13:27:32.602752
| 2019-05-03T23:59:26
| 2019-05-03T23:59:26
| 175,307,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 548
|
rd
|
bin_cumulative.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Function bin_cumulative()}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{the probability of success}
}
\value{
the probability and cumulative probability distribution data frame
}
\description{
dispaly the probability and cumulative probability of each success times
}
\examples{
# binomial cumulative distribution
bin_cumulative(trials = 5, prob = 0.5)
}
|
6c575581f87fb8a318c08c585342c24d2888f5c2
|
db462c413fa63bd14c529e45f1ea732c8216f3ee
|
/code/ui.R
|
7cdd1a4580d1a2b7edd31407161ccb950cfae906
|
[] |
no_license
|
priyadarsanshankar/Voice-Integrated-Visual-Analytics
|
2decfd888c09151979da9748e5d97377cf37fa5c
|
b8e9cbc425215d4b8694f3c715068ddbd4f69a8d
|
refs/heads/master
| 2020-03-26T18:35:11.731814
| 2019-03-22T07:00:18
| 2019-03-22T07:00:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,337
|
r
|
ui.R
|
shinyUI(dashboardPage(skin = "purple",
dashboardHeader(title = "Global migration flows",
tags$li(a(onclick = "openTab('home')",
href = NULL,
icon("home"),
title = "Homepage",
style = "cursor: pointer;"),
class = "dropdown",
tags$script(HTML("
var openTab = function(tabName){
$('a', $('.sidebar')).each(function() {
if(this.getAttribute('data-value') == tabName) {
this.click()
};
});
}")))
),
dashboardSidebar(sidebarMenu(id="tabs",
menuItem("Exploring migration", tabName = "home", icon = icon("dashboard")),
menuItem("Migration flows", tabName = "trend", icon = icon("dashboard")),
menuItem("Mapping migration", tabName = "map", icon = icon("dashboard")),
menuItem("Unilateral migration", tabName = "unilateral", icon = icon("dashboard")),
menuItem("Bilateral migration", tabName = "bilateral", icon = icon("dashboard"))
)),
dashboardBody(
tags$head(includeCSS("styles.css")),
singleton(tags$head(
tags$script(src="//cdnjs.cloudflare.com/ajax/libs/annyang/2.4.0/annyang.min.js"),
#tags$script(src="//cdnjs.cloudflare.com/ajax/libs/SpeechKITT/1.0.0/speechkitt.min.js"),
tags$script(src="speechkitt.min.js"),
includeScript('init.js')
)),
tabItems(
# First tab content
tabItem(tabName = "home",
fluidRow(
box(plotlyOutput("dest_income")),
box(plotlyOutput("orig_income"))
),
fluidRow(
box(plotlyOutput("dest_type")),
box(plotlyOutput("orig_type"))
),
fluidRow(
box(plotlyOutput("remit_years")),
box(align = "center", title = "What happened in 2009?",plotlyOutput("remit_change"))
)
),
# Second tab content
tabItem(tabName = "trend",
fluidRow(
column(width = 2,
box(width = 200, height = 100,radioButtons("radio",
"Select flow aggregation type:",
choices = c("continent", "region"),
selected = "continent"))),
# column(width = 3,box(width = 250, height = 100,selectInput("year",
# "Year:",
# choices = c(1990,1995,2000,2005,2010,2015,2017),
# selected = 2010))),
column(width = 2, box(width = 200, height = 100,
#setSliderColor(c("SkyBlue", "#FF4500", "", "Teal"), c(1, 2, 4)),
setSliderColor(c("rgb(236,236,236)", "rgb(236,236,236)", "", "rgb(236,236,236)"), c(1, 2, 4)),
sliderInput("year",
"Select a year:",
min = 1990,
max = 2015,
value = 2015,
ticks = T,
animate = T,
step = 5,format="####",sep = ""))),
column(3,valueBoxOutput("cnty_flow",width = 300)),
column(5,valueBoxOutput("conti_flow",width = 600))
# ,
#column(width = 3,box(width = 250, height = 100,uiOutput("ui1"))),
#column(width = 3,box(width = 250, height = 100,uiOutput("ui2")))
),
fluidRow(box(title="Region level flows",collapsible = T,
column(width = 6,chorddiagOutput("chorddiag", height = 650, width = 650))),
box(column(width = 6,chorddiagOutput("chorddiag_country", height = 650, width = 650)),collapsible = T,title="Country level flows (Select a region flow to update)")),
# fluidRow(
# column(width = 6,verbatimTextOutput("shiny_return"))),
fluidRow(column(width = 2,plotlyOutput("new",height = 300)),
(column(width = 9, align = "center", box(title="Bidirectional migration flow trend (Select a country flow to update)",width=10,plotlyOutput("gender_line", height = 300, width = 800))))
),
fluidRow(
box(column(width = 6, plotlyOutput("gender_bar1",height = 300, width = 600))),
box(column(width = 6, plotlyOutput("gender_bar2",height = 300, width = 600)))
)
),
tabItem(tabName = "map",
fluidRow(
column(width = 8, leafletOutput("map",width = 800)),
box(width = 4, column(width = 3, plotlyOutput("map_bar1", width = 300)))
),
fluidRow(
column(width = 8, leafletOutput("map_remit",width = 800)),
box(width = 4, column(width = 3, plotlyOutput("map_bar2", width = 300)))
),
absolutePanel(id = "controlmap", class = "panel panel-default", fixed = TRUE,
draggable = TRUE, top = 30, left ="auto" , right = 30, bottom = "auto",
width = 300, height = 430,
h5('MAP OPTIONS'),
radioButtons("gender_direction_map",
"Migration direction:",
choices = c("Inflow", "Outflow"),
selected = "Outflow"),
uiOutput("selectize_countries_map"),
selectInput("map_year", "Year:", choices = c(2010,2015,2017), multiple = FALSE, selected = T),
setSliderColor(c("rgb(236,236,236)", "rgb(236,236,236)", "", "rgb(236,236,236)"), c(1, 2, 4)),
sliderInput("rank",
"Top countries # to show:",
min = 5,
max = 15,
value = 5,
ticks = T,
animate = F,
step = 5,sep = ""),
actionButton(inputId = "mapButton", label = "Update Map"),style = "opacity: 0.65; z-index: 1000;"
)
),
tabItem(tabName = "unilateral",
fluidRow(
column(width = 3,
box(width = 250, height = 100,selectInput("remittance_country",
"Select an Origin country:",
choices = migrant_remittance$`Origin country`,
selected = T))),
column(width = 7,box(width = 350, height = 100,radioButtons("remittance_color",inline = T,
"Color by",
choices = c("Destination Region", "Destination Income level","Destination Country type"),
selected = "Destination Region")))
),
fluidRow(
column(width = 12, box(title = "Migration vs Remittance quadrant analysis",width=12,collapsible = T,plotlyOutput("remittance_plot")))
),
fluidRow(
column(width = 12, box(title = "Migrant stock in absolute numbers (Select data from quadrants to update)",collapsible = T,width=12,plotOutput("bar_quadrant")))
),
fluidRow(
column(width = 12, box(title = "Remittance amounts in thousand USD (Select data from quadrants to update)",collapsible = T,width=12,plotOutput("bar_quadrant_remit")))
)
),
tabItem(tabName = "bilateral",
fluidRow(
column(width = 3,box(width = 200, height = 100,selectInput("bilat_year",
"Year:",
choices = c(1990,1995,2000,2005,2010,2015,2017),
selected = 2010))),
column(width = 3,box(width = 200, height = 100,selectInput("corrmethod",
"Correlation plot method:",
choices = c("color","circle","square","ellipse","shade","number","pie"),
selected = "color")
)),
column(width = 3,box(width = 200, height = 100,selectInput("corrorder",
"Correlation plot order by:",
choices = c("alphabet","AOE","FPC","hclust"),
selected = "alphabet"))
),
column(width = 3,box(width = 200, height = 100,selectInput("corrclustmethod",
"Hclustering algorithm:",
choices = c("ward", "single", "complete", "average", "mcquitty", "median", "centroid"),
selected = "single")
))
),
fluidRow(box(title = paste0("Yearly average cross continental route bilateral balances for selected year"),
valueBoxOutput("avg_bilat_least_cross"),
valueBoxOutput("avg_bilat_less_cross"),
valueBoxOutput("avg_bilat_more_cross")
),
box(title = paste0("Yearly average within continental route bilateral balances for selected year"),
valueBoxOutput("avg_bilat_least_within"),
valueBoxOutput("avg_bilat_less_within"),
valueBoxOutput("avg_bilat_more_within")
)),
fluidRow(
box(width = 4,title = paste0("Bilateral balance between Least developed countries"),
#plotlyOutput("bilat_plot",height =600)
plotOutput("bilat_corr_plot3",height =400)),
box(width = 4,title = paste0("Bilateral balance between Less developed countries"),
#plotlyOutput("bilat_plot",height =600)
plotOutput("bilat_corr_plot2",height =400)),
box(width = 4,title = paste0("Bilateral balance between More developed countries"),
#plotlyOutput("bilat_plot",height =600)
plotOutput("bilat_corr_plot1",height =400))
),
fluidRow(
column(align="center",width = 4,actionButton("zoom1", "Zoom")),
column(align="center",width = 4,actionButton("zoom2", "Zoom")),
column(align="center",width = 4,actionButton("zoom3", "Zoom"))
),
bsModal("leastcorrmod", "Bilateral balance between Least developed countries", "zoom1", size = "large",
plotOutput("leastcorr",width = 800,height = 800)),
bsModal("lesscorrmod", "Bilateral balance between Less developed countries", "zoom2", size = "large",
plotOutput("lesscorr",width = 800,height = 800)),
bsModal("morecorrmod", "Bilateral balance between More developed countries", "zoom3", size = "large",
plotOutput("morecorr",width = 800,height = 800))
)
))))
|
6abdeb1c3b3824b94c3e915740a2443c7104b2c3
|
8b73830ae1b558df462e9766b0ef69f149942448
|
/p2_plot3.R
|
25588b9c626855b4d405a54f88c3ce53fd5268c0
|
[] |
no_license
|
pattareeya/Expo_Data_Project2
|
e9d68902e1aae7d6657961bc6cd8546a774e708c
|
8f25f9e755d6373c4376a0905afa7b041e434416
|
refs/heads/master
| 2016-09-06T02:53:41.476319
| 2014-07-26T04:25:05
| 2014-07-26T04:25:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
p2_plot3.R
|
#Explore Data Course Project No. 2
NEI <- readRDS("summarySCC_PM25.rds")
library(reshape2)
#3.Of the four types of sources indicated by the type
#(point, nonpoint, onroad, nonroad) variable,
#which of these four sources have seen decreases in emissions
#from 1999-2008 for Baltimore City?
#Which have seen increases in emissions from 1999-2008?
#Use the ggplot2 plotting system to make a plot answer this question.
#Select only rows which belong to Baltimore city (fips = 24510)
Bal_NEI <- NEI[grep("24510",NEI$fips),]
sum_nei3 <- melt(tapply(Bal_NEI$Emissions, list(Bal_NEI$year, Bal_NEI$type), sum))
colnames(sum_nei3)<-c("year","type", "Emissions")
library(ggplot2)
sum_nei3 = transform(sum_nei3, type = factor(type))
png("p2_plot3.png", width=480, height=480)
ggplot(data=sum_nei3, aes(x=year, y=Emissions, group=type, colour=type, shape=type))+geom_line()+geom_point()+labs(title = "Total Emissions in Baltimore City, Maryland")
dev.off()
#All of them except POINT showed that total emissions has been
#decreased from year 1999-2008
|
e5e714bc4bd519c728791548c29ed602d39a9841
|
4e99ee270ca1ad9c6fd2112be7d6581ae8539707
|
/ASR_Diversitree_Pollinators.R
|
e4097904a96eaf0ca89243fa542cd9d59fe997a8
|
[] |
no_license
|
jblandis/Polemoniaceae_family
|
b5029ca00b78a25cf520fd07d725027389a7e1fb
|
a91401cb188f44ef58a0bb6f2874bde1f60a57e7
|
refs/heads/master
| 2020-03-19T04:35:28.396257
| 2018-06-02T23:53:11
| 2018-06-02T23:53:11
| 135,844,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,798
|
r
|
ASR_Diversitree_Pollinators.R
|
library(phytools)
library(geiger)
library(nlme)
library(diversitree)
#starting files of the tree and the data file
mydata <- read.csv("Autogamy_and_out.csv",row.names=1)
mytree <- read.nexus("MCC.fixed.nex")
#compares names between the tree and the data to list any discrepancies
comparison <- name.check(phy=mytree,data=mydata)
# prune taxa that don't have data but are present in the tree
mytree <- drop.tip(mytree,comparison$tree_not_data)
#double check to make sure that taxa all match with tree and data
name.check(phy=mytree,data=mydata)
comparison <- name.check(phy=mytree,data=mydata)
#create vector for traits
# if the vector names are changed, then commands following this will need to be altered to maintain the procedure
states <- mydata[,1]
names(states) <- row.names(mydata)
#ancestral state reconstruction
sampling.f <- 192 / 471
lik <- make.bisse(mytree,states,sampling=sampling.f)
p <- starting.point.bisse(mytree)
fit <- find.mle(lik,p)
st <- asr.marginal(lik, coef(fit))
#this method produces a circle tree, easy to read
#need to use this code for doing stochastic character mapping
pdf("Pollinator_ASR.pdf")
state.colors <- c("gray", "black")
plot(mytree, type = "fan", label.offset=1.5, cex = 0.3, no.margin=TRUE)
nodelabels(pie = t(st), frame ="circle", piecol = state.colors, cex = 0.35)
dev.off()
#MK2 ancestral state reconstructions ignoring shifts in diversification
pdf("Pollination_MK2.pdf")
lik.mk2 <- make.mk2(mytree, states)
p <- c(.1,.1)
fit.mk2 <- find.mle(lik.mk2, p)
coef(fit.mk2)
logLik(fit.mk2)
st.mk2 <- asr.marginal(lik.mk2, coef(fit.mk2))
plot(mytree, type = "fan", label.offset=1.5, cex = 0.3, no.margin=TRUE)
state.colors <- c("gray", "black")
nodelabels(pie = t(st.mk2), frame ="circle", piecol = state.colors, cex = 0.35, adj = -0.5)
dev.off()
|
5fd86e6e1166ad2bb6fc0fcdc9581428edb83539
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/LogicOpt/tests/testthat.R
|
27070646dc6ab366546e387b964c8722d89fb373
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(LogicOpt)
test_check("LogicOpt")
|
9ba8b3252a6059f2b0e4122439947988a4db886d
|
3b2476a25c1ed2d07eb91994215840604567dc54
|
/Chris_experiment_analysis_v3.R
|
0ac11ccc0c4dcb744a4bfe2c59189ffd7a92a657
|
[] |
no_license
|
GuptonLab/Microtubule-Comet-Analyzer
|
fd0110c1d7c12f9faef80eb73576fb06902dc7b6
|
548f0e8691c62486ef273c0031a29ecf88d7e489
|
refs/heads/master
| 2023-01-01T04:18:21.352848
| 2020-10-06T16:08:58
| 2020-10-06T16:08:58
| 288,845,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,670
|
r
|
Chris_experiment_analysis_v3.R
|
library(reshape2)
library(tidyverse)
library(cowplot)
#read in the dataset
dataset <- read.csv("C:/Users/chard/OneDrive/Desktop/New_Analysis_1.Csv")
#change exp number into a factor
dataset$exp_num <- as.factor(dataset$exp_num)
dataset$cell_num <- as.factor(dataset$cell_num)
#dataset$identifier <- as.factor(dataset$identifier)
#melt it all
dataset_melt <- melt(dataset)
#remove the NAs
dataset_melt <- na.omit(dataset_melt)
dataset_melt$condition <- factor(dataset_melt$condition , levels = rev(levels(dataset_melt$condition)))
#subset out the speed, lifetime, and density
comet_speed <- subset(dataset_melt, variable == "speed")
comet_lifetime <- subset(dataset_melt, variable == "lifetime")
comet_density <- subset(dataset_melt, variable == "density")
########
comet_speed_log <- comet_speed
comet_speed_log$value <- log(comet_speed_log$value)
#means of everything
#mean of each condition and exp num
average_comet_speed <- aggregate(comet_speed$value, list(comet_speed$exp_num, comet_speed$condition),mean)
average_comet_speed_percell <- aggregate(comet_speed$value, list(comet_speed$exp_num, comet_speed$condition,comet_speed$cell_num),mean)
average_comet_speed_percell$key_val <- paste(average_comet_speed_percell$Group.1, average_comet_speed_percell$Group.3)
wt_n_dat <- subset(average_comet_speed_percell, Group.2 == "WT" | Group.2 == "WT_Netrin")
#ggplot out the stuffs
#wt vs wt netrin graph
ggplot(wt_n_dat,aes(x = Group.2, y = x)) + geom_boxplot() + geom_point() +
geom_path(aes(group = key_val)) + geom_text(
aes(label = key_val),
nudge_x = 0,
nudge_y = 0.1
)
#comet speeds
ggplot(comet_speed, aes(x = condition, y = value)) +
geom_point(aes(alpha = 0.1), position = position_jitterdodge(dodge.width = 0.75))+
geom_boxplot(aes(alpha = 0.4)) + theme_cowplot()
#average comet speeds
ggplot(average_comet_speed_percell, aes(x = Group.2, y = x)) +
geom_boxplot() + geom_point()
ggplot(average_comet_speed, aes(x = Group.2,y=x)) + geom_boxplot() + geom_point(aes(color = Group.3), size = 2) + theme_cowplot()
ggplot(average_comet_lifetime, aes(x = Group.2,y=x)) + geom_boxplot() + geom_point()
ggplot(comet_speed,aes(y = value)) + geom_boxplot() + geom_point()
comet_one <- subset(comet_speed, condition == "WT" | condition == "WT_Netrin")
comet_one_log <- comet_one
comet_one_log$value <- log(comet_one_log$value)
ggplot(comet_one_log, aes(x = value, group = identifier, fill = condition))+ geom_density(alpha = 0.1) +
geom_vline(aes(group = identifier, xintercept = median(comet_one_log$value), color = "blue")) +
geom_vline(aes(group = identifier, xintercept = mean(comet_one_log$value)))
write.csv(average_comet_speed,"comet_speed.csv")
write.csv(average_comet_lifetime,"comet_lifetime.csv")
########
#all ROIs put together
ggplot(comet_speed, aes(x = Genotype, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed, aes(x = Genotype, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
#split by ROI
comet_speed_gc = subset(comet_speed, ROI == "growth cone")
comet_speed_axon = subset(comet_speed, ROI == "axon")
comet_speed_both = subset(comet_speed, ROI == "both")
ggplot(comet_speed_gc, aes(x = Genotype, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_axon, aes(x = Genotype, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_both, aes(x = Genotype, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_gc, aes(x = Genotype, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
ggplot(comet_speed_axon, aes(x = Genotype, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
ggplot(comet_speed_both, aes(x = Genotype, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
#split by genotype
comet_speed_wt = subset(comet_speed, Genotype == "wt")
comet_speed_t67d = subset(comet_speed, Genotype == "t67d")
comet_speed_t67r = subset(comet_speed, Genotype == "t67r")
ggplot(comet_speed_wt, aes(x = ROI, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_t67d, aes(x = ROI, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_t67r, aes(x = ROI, y = value, fill = Treatment)) + geom_boxplot()
ggplot(comet_speed_wt, aes(x = ROI, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
ggplot(comet_speed_t67d, aes(x = ROI, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
ggplot(comet_speed_t67r, aes(x = ROI, y = value)) +
geom_violin(aes(color = Treatment), position = position_dodge(0.9)) +
geom_boxplot(aes(color = Treatment),outlier.shape = NA,position = position_dodge(0.9),width=0.1)
########### Wt, untreated vs netrin, paired t test
average_speeds <- comet_speed %>%
group_by(ROI,Genotype,Cell.num,Treatment) %>%
summarize(mean_speed = mean(value, na.rm = TRUE))
WT_both_average_speeds <- average_speeds %>%
filter(Genotype == "wt" & ROI == "both")
t.test(WT_both_average_speeds$mean_speed ~ WT_both_average_speeds$Treatment, paired = TRUE)
############ t67, untreated vs netrin, paired t test, only 3 neurons
average_speeds <- comet_speed %>%
group_by(ROI,Genotype,Cell.num,Treatment) %>%
summarize(mean_speed = mean(value, na.rm = TRUE))
t67_both_average_speeds <- average_speeds %>%
filter(Genotype == "t67d" & ROI == "both" & Cell.num != 4)
t.test(t67_both_average_speeds$mean_speed ~ t67_both_average_speeds$Treatment, paired = TRUE)
############# t test between WT and T67
average_speeds <- comet_speed %>%
group_by(ROI,Genotype,Cell.num,Treatment) %>%
summarize(mean_speed = mean(value, na.rm = TRUE))
wt_t67_untreated_both <- average_speeds %>%
filter(Treatment == "none" & ROI == "both" & Cell.num != 4)
t.test(wt_t67_untreated_both$mean_speed ~ wt_t67_untreated_both$Genotype, paired = FALSE)
|
0c8869b623f827fa42c79a4b688afafd74687cd2
|
873968fcec50323933c5f3a70b35f52b470d1580
|
/NLP_algTest_v6.R
|
775a00951631164ee3a17898afdf9dae94da1dea
|
[] |
no_license
|
cootem/DataScienceCapstone
|
364a9312d75ffa8742b471b323cfbecb7372b20e
|
176d896257377863f299b79f6637ee99a53d196c
|
refs/heads/master
| 2020-04-23T18:41:07.109193
| 2019-04-08T03:53:29
| 2019-04-08T03:53:29
| 171,376,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,501
|
r
|
NLP_algTest_v6.R
|
#### NLP - algorithm testing ####
# Michael Coote
# 3/27/2019
library(data.table)
source("nextWord.R")
# load ngrams
load("unigrams.RData")
load("bigrams_sm.RData")
load("trigrams_sm.RData")
load("quadgrams_sm.RData")
load("quintgrams_sm.RData")
load("hexagrams_sm.RData")
save(unigrams, file = "unigrams.RData", compress = FALSE)
save(bigrams, file = "bigrams_sm.RData", compress = FALSE)
save(trigrams, file = "trigrams_sm.RData", compress = FALSE)
save(quadgrams, file = "quadgrams_sm.RData", compress = FALSE)
save(quintgrams, file = "quintgrams_sm.RData", compress = FALSE)
save(hexagrams, file = "hexagrams_sm.RData", compress = FALSE)
# setkey(unigrams, nextWord)
# setkey(bigrams, ngram_start)
# setkey(trigrams, ngram_start)
# setkey(quadgrams, ngram_start)
# setkey(quintgrams, ngram_start)
# test pulling next word
phrase <- "of the"
phrase <- "a big thank you to"
phrase <- "big thank you to"
phrase <- "thank you to"
phrase <- "you to"
phrase <- "i'd"
phrase <- "the baseball"
phrase <- "at the end of the"
phrase <- "sarah likes to have"
phrase <- "test of jjkjklj"
nw <- nextWord4(unigrams, bigrams, trigrams, quadgrams, phrase)
nw <- nextWord5(unigrams, bigrams, trigrams, quadgrams, quintgrams, phrase)
nw <- nextWord6(unigrams, bigrams, trigrams, quadgrams, quintgrams, hexagrams,
phrase)
nw
phrase <- q1_phrase
phrase <- q2_phrase
phrase <- q10_phrase
phrase <- "adam sandler"
phrase <- 'you been way, way too'
phrase <- "might"
bigrams[phrase, nextWord]
|
7168c3f1c41fca67a0f4c1cd5fe5e8348a4fe694
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/spsurv/man/print.summary.bpph.bayes.Rd
|
01a0b3419d4df522d91df92e908c6c73a38107c6
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
print.summary.bpph.bayes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.summary.bpph.bayes.R
\name{print.summary.bpph.bayes}
\alias{print.summary.bpph.bayes}
\title{Bernstein Polynomial Based Regression Object Summary BPPH Bayes}
\usage{
\method{print}{summary.bpph.bayes}(...)
}
\arguments{
\item{...}{further arguments passed to or from other methods}
}
\value{
none
}
\description{
Bernstein Polynomial Based Regression Object Summary BPPH Bayes
}
|
824660daa5e8f2da9dee57b2c079bc77829aa5e3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bPeaks/examples/peakDetection.Rd.R
|
ceef07729b7e34bdf03eb1e1fa198088c6ddb5b5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,121
|
r
|
peakDetection.Rd.R
|
library(bPeaks)
### Name: peakDetection
### Title: Peak calling method, i. e. identification of genomic regions
### with a high density of sequences (reads)
### Aliases: peakDetection
### Keywords: peak calling ChIP-seq protein binding sites protein-DNA
### interactions deep sequencing
### ** Examples
# get library
library(bPeaks)
# get PDR1 data
data(dataPDR1)
# combine IP and control data
allData = cbind(dataPDR1$IPdata, dataPDR1$controlData)
colnames(allData) = c("chr", "pos", "IPsignal", "chr", "pos", "controlSignal")
print("**********************************************")
# calculate baseline IP and control values
lineIP = baseLineCalc(allData$IPsignal)
print(paste("Baseline coverage value in IP sample : ", round(lineIP, 3)))
lineControl = baseLineCalc(allData$controlSignal)
print(paste("Baseline coverage value in control sample : ", round(lineControl, 3)))
print("**********************************************")
print("")
# get list of chromosomes
chromNames = unique(allData[,1])
# start peak detection on the first chromosome
print("**********************************************")
print(paste("Starting analysis of chromosome ", chromNames[1]))
# information for one chromosome
subData = allData[allData[,1] == chromNames[1],]
# only 10 kb are analyzed here (as an illustration)
vecIP = subData[40000:50000,3]
vecControl = subData[40000:50000,6]
# smooth of the data
smoothedIP = dataSmoothing(vecData = vecIP, widthValue = 20)
smoothedControl = dataSmoothing(vecData = vecControl, widthValue = 20)
# peak detection
detectedPeaks = peakDetection(IPdata = smoothedIP, controlData = smoothedControl,
chrName = as.character(chromNames[1]),
windowSize = 150, windowOverlap = 50,
outputName = paste("bPeaks_example_", chromNames[1], sep = ""),
baseLineIP = lineIP, baseLineControl = lineControl,
IPthreshold = 4, controlThreshold = 2,
ratioThreshold = 1, averageThreshold = 0.5,
peakDrawing = TRUE)
# print detected genomic positions
print(detectedPeaks)
|
157149d3e70e7f9922e32698506702319d42a1e9
|
1746b596d91b1e9f7e46acc9dd155591b58ba50b
|
/R/req.R
|
abea2cf66ae9e741785d31227b66d26c849a961d
|
[
"MIT"
] |
permissive
|
rodrigoesborges/siconfir-1
|
6edb4fa86e81d0e576ae6676fc2755df971188d0
|
b4bfe615639e1b1164459fed0b107ca0c57980e3
|
refs/heads/master
| 2023-04-13T07:13:19.721225
| 2021-04-20T13:35:08
| 2021-04-20T13:35:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
req.R
|
#' @importFrom purrr map
#' @importFrom tibble tibble
#' @importFrom tidyr unnest_wider
#' @importFrom httr GET status_code http_status content
http_request <- function(url, query, verbose) {
alert <- function() {
paste(
purrr::map(
names(query), ~ paste0(substring(., 4), ": ", query[[.]], "\n")
)
)
}
Sys.sleep(1)
if (!is.null(query)) {
response <- httr::GET(
url = url,
query = query
)
} else {
response <- httr::GET(url = url)
}
if (httr::status_code(response) != 200) {
stop(httr::http_status(response)$message)
}
if (verbose) {
print(response)
}
content <- httr::content(response)
if (length(content$items) == 0 && !is.null(query)) {
message("Not found data for:\n", alert())
}
has_offset <- ifelse(content$hasMore,
purrr::map(content$links, ~ .[["href"]][.[[1]] == "next"]) %>%
.[purrr::map_lgl(., ~ length(.) > 0)] %>%
.[[1]],
FALSE
)
list(items = content$items, hasMore = content$hasMore, offset = has_offset)
}
fetch_rec <- function(url, query = NULL, verbose, old = NULL) {
df <- http_request(url = url, query = query, verbose = verbose)
if (df$hasMore) {
return(
fetch_rec(url = df$offset, old = c(df$items, old), verbose = verbose)
)
}
c(df$items, old)
}
req <- function(type, query, verbose) {
df <- fetch_rec(
url = utils::URLencode(api(type)), query = query, verbose = verbose
)
tibble::tibble(data = df) %>%
tidyr::unnest_wider(col = "data")
}
|
63d109faa2a39fff76baac30deb056d8dd27452e
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/pracma/R/polyApprox.R
|
86eede4ebf7f03f538788b589117d9553b7d8f81
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
polyApprox.R
|
##
## p o l y A p p r o x . R Polynomial Approximation
##
polyApprox <- function(f, a, b, n, ...) {
if (!is.numeric(a) || !is.numeric(b) || !is.numeric(n) ||
length(a) != 1 || length(b) != 1 || length(n) != 1 ||
a >= b || n <= 0)
stop("One of arguments 'a', 'b', or 'n' incorrectly chosen.")
f1 <- match.fun(f)
f <- function(x) f1(x, ...)
# Compute the Chebyshev coefficients
cP <- chebPoly(n)
cC <- chebCoeff(sin, a, b, n)
p <- drop(cC %*% cP)
c0 <- cC[1]
# Compute the corresponding polynomial
q <- c(2, -(b+a))/(b-a)
r <- polytrans(p, q)
r <- polyadd(r, c(-c0/2))
rf <- function(x) polyval(r, x)
ep <- fnorm(f, rf, a, b, p = Inf)
return(list(p = p, f = rf, estim.prec = ep))
}
|
5c3a13ed2f750b1876c5b5c7aa42a3019260187c
|
26455464b8fd0bc5516f5af293dcbb9d35509295
|
/quiz-results/Arjun Gopinath/Quiz2_ArjG.R
|
09af9ac029b5965b0b1da6476817f6dfff62de89
|
[
"MIT"
] |
permissive
|
PercyUBC/r-tutorial
|
dd1dd5f03dea414ea8ce38409032e7cadde98370
|
cb2b6d8f8ae8d1d2cbe1f667f93e5f997ebb4732
|
refs/heads/master
| 2022-01-26T12:13:39.214680
| 2018-08-02T21:05:12
| 2018-08-02T21:05:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,225
|
r
|
Quiz2_ArjG.R
|
# Arjun Gopinath - Aug 2 2018
# R Tutorial Quiz 2
library(data.table)
library(ggplot2)
library(AER)
library(lfe)
# Ensure reproducibility of random numbers generated.
set.seed(101)
num_ind <- 1000 # Number of individuals
num_time <- 5 # Number of time periods
# Provide parameters in a list
param <- list(
"alp" = 8, "bet" = 1, "kap" = 1 # Providing parameter values for alpha, beta and kappa.
)
# Creating the data table
ddata_panel <- data.table(expand.grid(id = 1:num_ind, time = 1:num_time))
# Adding fixed effect error term for each individual
ddata_panel[, mu := rnorm(n = 1), by = "id"]
# Generating shock for each id, time
ddata_panel[, eps := rnorm(n = nrow(ddata_panel))]
# Generating Z independent of eps
ddata_panel[, Z := time * 0.75 + rnorm(n = nrow(ddata_panel))]
# Generating X based on mu, eps, Z and time for each individual in each time period
ddata_panel[, X := -mu + 0.25 * eps - time * 0.5 + 2.5 * Z + rnorm(n = nrow(ddata_panel))]
# Computing Y based on the DGP
ddata_panel[, Y := param$alp + param$bet * X + time * param$kap + mu + eps]
# Obtaining observed variables and store in a new dataset
observed_data <- ddata_panel [, .(id, time, Y, X, Z)]
# Running an OLS regression, ignoring Z
OLS_formula <- as.formula("Y ~ X + time + as.factor(id)")
OLS_result <- lm(formula = OLS_formula, data = observed_data) # regression
OLS_coef <- coef(summary(OLS_result))
# Running an IV regression
IV_formula <- as.formula("Y ~ X + time + as.factor(id) | time + as.factor(id) + Z")
IV_result <- ivreg(formula = IV_formula, data = observed_data)
IV_coef <- coef(summary(IV_result))
# Running a FE-LM regression accounting for Z
FELM_formula <- as.formula("Y ~ time | id | (X ~ Z)")
FELM_result <- felm(FELM_formula, data = observed_data)
FELM_coef <- coef(summary(FELM_result))
# Copying the results into a .CSV file
OLS_beta <- OLS_coef[2, ]
IV_beta <- IV_coef[2, ]
FELM_beta <- FELM_coef[2, ]
OLS_kappa <- OLS_coef[3, ]
IV_kappa <- IV_coef[3, ]
FELM_kappa <- FELM_coef[1, ]
results_beta <- rbind(OLS_beta, IV_beta, FELM_beta)
results_kappa <- rbind(OLS_kappa, IV_kappa, FELM_kappa)
write.csv(rbind(results_beta, results_kappa),
file = "IVresults.csv", row.names = T, na = " ")
|
67acec8a32e47ea7af17922678e25c840a21c1c1
|
86184eee1f66063b5bef943c3d30bf3128279fd8
|
/Scripts/constant_test.R
|
67864eb760d083d59154687a52c3c90ba0078197
|
[] |
no_license
|
GustafRydevik/Chapter3
|
04fc804fc071b601712d513aa6ed2b030d3bafe5
|
c00123cfbb2b3ba289deb6178286b69d912fb98e
|
refs/heads/master
| 2021-01-01T06:50:16.579188
| 2015-04-26T20:49:27
| 2015-04-26T20:49:27
| 24,935,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,544
|
r
|
constant_test.R
|
#proj.path<-"/Users/gustafrydevik/Dropbox/PhD folder/Chapter3"
end.time<-30
incidence<-1/10
Pop<-1:1000
InfTime<-rep(NA,length(Pop))
kinetic.fun=LotkaVolterra.Fun(diseaseType1)
iteration.data<-
LabdataGeneratorGeneric(
testkinetic=kinetic.fun,
timeFun=EndemicConstant,
timeFun.args=list(n.infection=1000,
start.time=1,
end.time=30,
incidence=1/10
),
errorFun=errorFun.lognorm,
errorFun.args=list(standard.deviation=log(c(1.2,1.1)))#log(measurement.sd))
)
library(rjags)
tmp<-jags.model(file.path(script.path,"bugs/hindcast_constant.txt"),
data=list(N=nrow(iteration.data$test.obsvalues),
Test.data=iteration.data$test.obsvalue,
is.naive=as.numeric(is.na(iteration.data$infection.times)),
censorLimit=end.time,ntest=2,
time.lookup=seq(0.5,99.5,by=0.5),
test.lookup=kinetic.fun(c(seq(0.5,99.5,by=0.5)))),
inits=list(InfTime=ifelse(is.na(iteration.data$infection.times),end.time+1,NA),lambda=1/10),
n.chains=5,n.adapt=100)
tmp2<-coda.samples(tmp,c("InfTime","incidence","sd"),n.iter=1000,n.adapt=500)
#diagnostics
plot(tmp2[,c("incidence","sd[1]","sd[2]")])
hist(unlist(tmp2[[1]][,grep("InfTime",colnames(tmp2[[1]]))]),freq=FALSE)
lines(dexp(seq(0,end.time,by=0.5),mean(tmp2[[1]][,"incidence"])))
plot(iteration.data$infection.times,colMeans(tmp2[[1]][,grep("InfTime",colnames(tmp2[[1]]))]))
|
149a5722756372a903a01043da3ccb997203c214
|
b52f2b6df3ca4e70fb308b852887fd114a8be3de
|
/p_3_bsb_seasonal_lda.R
|
94f6be727e5514ccf4553d774542836f5af2f0ab
|
[] |
no_license
|
wajra/fish-habitat-discrimination
|
315dfbd0f21c96114bdcd9a6b129bef094161a92
|
e135a5c58f4c17ec0b2a950608b44c7cfa47bd9c
|
refs/heads/master
| 2022-02-08T20:38:25.358959
| 2022-01-22T15:53:29
| 2022-01-22T15:53:29
| 220,505,079
| 0
| 0
| null | 2022-01-22T15:43:32
| 2019-11-08T16:21:16
|
HTML
|
UTF-8
|
R
| false
| false
| 1,196
|
r
|
p_3_bsb_seasonal_lda.R
|
# In this file we'll perform an exploratory analysis on the
# geofiltered value
# Written for R Version 3.6.1
library(tidyverse)
library(MASS)
# Read in the data
bsb_data <- read.csv("data/black_sea_bass_nw_stock_filtered.csv")
# bsb_data <- bsb_data[!(bsb_data$season=='Summer'), ]
# bsb_data$season <- factor(bsb_data$season)
# bsb_data <- bsb_data %>% filter(season %in% c("Spring","Fall"))
lda_formula <- formula(season ~ SBT.seasonal + SST.seasonal.mean + SBT.min + SBT.max + SST.max + rugosity + GRAINSIZE)
sp_lda <- lda(lda_formula, data = bsb_data)
lda_values <- predict(sp_lda)
lda_df <- data.frame(lda_values$x[,1], lda_values$x[,2], bsb_data$season)
# Rename the columns
columns <- c("LD1", "LD2","season")
colnames(lda_df) <- columns
# The actual plotting
ggplot(lda_df, aes(x=LD1, y=LD2)) +
geom_point(aes(color = factor(season)))
# Performing a Bartlett's test on the data
# First let's test for SBT.seasonal
# bartlett.test(count ~ spray, data = InsectSprays)
plot(SBT.seasonal ~ season, data = bsb_data)
bsb_data <- bsb_data %>% filter(season %in% c("Spring","Fall"))
bsb_data$season <- factor(bsb_data$season)
bartlett.test(SBT.seasonal ~ season, data = bsb_data)
|
9751ec34e2ecf316937f0fdff0e82a29076b2d5c
|
0ee081efd2a418cc9d940c3adb7484a2735785bb
|
/R/plotPEstopVSptrue.R
|
38e0199e6727fab224ce4f8bafff8a95aedfe465
|
[] |
no_license
|
biostata/BDP2
|
9ec6b5be0026b26e13801b820dc2282319b50c32
|
f674d39ef8686ab7a7cfe7e2c969ace4e2dbdb76
|
refs/heads/master
| 2023-04-03T08:34:42.841460
| 2021-04-07T19:47:32
| 2021-04-07T19:47:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
plotPEstopVSptrue.R
|
plotPEstopVSptrue=function(n,vn.int,pF,cF,pE,cE,pvec,shape1F,shape2F,shape1E=NULL,shape2E=NULL, col=1, add=FALSE,show=TRUE,...){
if (is.null(shape1E)) shape1E=shape1F
if (is.null(shape2E)) shape2E=shape2F
v.critE <- critEF(n=n, vn.int=vn.int, crit=cE, pE=pE,pF=pF, EF="E",
shape1=shape1E,shape2=shape2E)
v.critF <- critEF(n=n, vn.int=vn.int, crit=cF, pE=pE,pF=pF, EF="F",
shape1=shape1F,shape2=shape2F)
out=lapply(pvec, function(p) {pFstopEstop(p,c(vn.int,n),v.critE,v.critF)})
names(out)=pvec
summ=sapply(out, function(x) x$P.effic.cum)
rownames(summ)= paste0("Pat.",c(vn.int,n))
res=list(summary=summ, all=out)
if (!add) plot(as.numeric(colnames(res$summary)),res$summary[length(vn.int)+1,],xlab=expression(p["true"]),
ylab="Cumulative Probability of Efficacy at Final",
# main=paste("Analyses at",paste(c(vn.int,n),collapse=", "),"\npF =",pF,", cF=",cF,", pE =",pE,", cE=",cE ),
sub=paste("Interim analyses at",paste(vn.int,collapse=", "),", pF =",pF,", cF=",cF,", pE =",pE,", cE=",cE ),
ylim=c(0,1),pch=20,xlim=c(min(pvec),max(pvec)), type="n",las=1,...) #final
lines(as.numeric(colnames(res$summary)),res$summary[length(vn.int)+1,],lwd=2,col=col)
class(res)="ptrue_vs_pEstop"
invisible(res)
}
|
98e7b628a63baea3ff382d55b74653f800e93336
|
7019c612f7a673efe4fa91c374c333f78919dd8e
|
/code/sol-hw1.R
|
8596c31cf357ba47de28943e62f4757c27cbb1a8
|
[] |
no_license
|
math445-LU/2016
|
5223e32e01c08ebd5d3586dbebf3113f6011c6d2
|
d65794b1bfbb86437b926e77c6cc79fbe4448e79
|
refs/heads/master
| 2021-01-21T13:43:57.373656
| 2016-05-26T05:45:58
| 2016-05-26T05:45:58
| 54,741,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,621
|
r
|
sol-hw1.R
|
## ----include=FALSE-------------------------------------------------------
library(ggplot2)
library(dplyr)
library(knitr)
opts_chunk$set(
tidy=FALSE, # display code as typed
size="small", # slightly smaller font for code
fig.width=5,
fig.height=3
)
## ------------------------------------------------------------------------
gss <- read.csv("https://raw.githubusercontent.com/math445-LU/2016/master/data/GSS2002.csv")
## ----eval=FALSE, fig.height=2, fig.width = 3.5---------------------------
## library(dplyr)
## gss %>%
## group_by(DeathPenalty) %>%
## summarise(count = n())
##
## library(ggplot2)
## ggplot(data = gss) +
## geom_bar(mapping = aes(x = DeathPenalty))
## ----echo=FALSE----------------------------------------------------------
gss %>%
group_by(DeathPenalty) %>%
summarise(count = n()) %>%
kable
## ----echo=FALSE,fig.height=2, fig.width=2--------------------------------
ggplot(data = gss) +
geom_bar(mapping = aes(x = DeathPenalty))
## ----eval=FALSE----------------------------------------------------------
## library(dplyr)
## gss %>%
## group_by(OwnGun) %>%
## summarise(count = n())
##
## library(ggplot2)
## ggplot(data = gss) +
## geom_bar(mapping = aes(x = OwnGun))
## ----echo=FALSE----------------------------------------------------------
gss %>%
group_by(OwnGun) %>%
summarise(count = n()) %>%
kable
## ----echo=FALSE,fig.height=2, fig.width=2--------------------------------
ggplot(data = gss) +
geom_bar(mapping = aes(x = OwnGun))
## ------------------------------------------------------------------------
views_tbl <-
gss %>%
group_by(DeathPenalty, OwnGun) %>%
summarise(count = n())
views_tbl
## ----message=FALSE-------------------------------------------------------
library(tidyr)
views_tbl %>%
na.omit %>%
spread(DeathPenalty, count)
## ------------------------------------------------------------------------
spruce <- read.csv("https://raw.githubusercontent.com/math445-LU/2016/master/data/Spruce.csv")
## ------------------------------------------------------------------------
summary(spruce$Ht.change)
sd(spruce$Ht.change)
length(spruce$Ht.change)
## ------------------------------------------------------------------------
spruce %>%
summarise(min = min(Ht.change),
Q1 = quantile(Ht.change, probs = .25),
median = median(Ht.change),
Q3 = quantile(Ht.change, probs = .75),
mean = mean(Ht.change),
sd = sd(Ht.change),
n = n())
## ------------------------------------------------------------------------
ggplot(data = spruce) +
geom_histogram(mapping = aes(x = Ht.change), binwidth = 5, colour = "gray")
ggplot(data = spruce) +
stat_qq(mapping = aes(sample = Ht.change))
## ------------------------------------------------------------------------
spruce %>%
group_by(Fertilizer) %>%
summarise(min = min(Ht.change),
Q1 = quantile(Ht.change, probs = .25),
median = median(Ht.change),
Q3 = quantile(Ht.change, probs = .75),
mean = mean(Ht.change),
sd = sd(Ht.change),
n = n())
## ------------------------------------------------------------------------
ggplot(data = spruce) +
geom_density(mapping = aes(x = Di.change, fill = Fertilizer), alpha = 0.4)
ggplot(data = spruce) +
geom_boxplot(mapping = aes(x = Fertilizer, y = Di.change))
ggplot(data = spruce) +
geom_histogram(mapping = aes(x = Di.change), colour = "gray", binwidth = .5) +
facet_wrap(~ Fertilizer, ncol = 1)
## ------------------------------------------------------------------------
spruce %>%
group_by(Fertilizer, Competition) %>%
summarise(min = min(Ht.change),
Q1 = quantile(Ht.change, probs = .25),
median = median(Ht.change),
Q3 = quantile(Ht.change, probs = .75),
mean = mean(Ht.change),
sd = sd(Ht.change),
n = n())
## ------------------------------------------------------------------------
ggplot(data = spruce) +
geom_density(mapping = aes(x = Di.change, fill = Fertilizer:Competition), alpha = 0.4)
ggplot(data = spruce) +
geom_boxplot(mapping = aes(x = Fertilizer:Competition, y = Di.change))
ggplot(data = spruce) +
geom_histogram(mapping = aes(x = Di.change), colour = "gray", binwidth = .5) +
facet_grid(Fertilizer ~ Competition)
## ------------------------------------------------------------------------
ggplot(data = spruce) +
geom_point(mapping = aes(x = Di.change, y = Ht.change)) +
geom_smooth(mapping = aes(x = Di.change, y = Ht.change), method = "lm")
|
437556e9c085953f030f19dbb3f5cfe1ed43a6dc
|
2573b2b226e922302accf53ae5b7f055c2951d6e
|
/R/sites_by_species.R
|
59a5d59023ad67bcf7aeacdf916bee8abac56198
|
[] |
no_license
|
hieuqtran/ALA4R
|
442c32e9b5d2c750b748856d6dc7aaa4723e2529
|
ec240a434d8ba86c0c5c0a14961629ed313d705e
|
refs/heads/master
| 2020-08-24T01:51:35.699192
| 2019-08-01T04:48:08
| 2019-08-01T04:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,017
|
r
|
sites_by_species.R
|
#' Sites by species
#'
#' A data.frame is returned as grid cells by species with values in each cell being the number of occurrences of each species. No null (all zero) species should be returned. The coordinates returned are the TOP-LEFT corner of the grid cell.
#'
#' @references Associated web services: \url{https://spatial.ala.org.au/ws/capabilities} see PointsToGrid/sitesBySpecies
#' @references \url{http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html}
#'
#' @param taxon string: the identifier to get the species data from the ala biocache. E.g. "genus:Heleioporus". See \code{ala_fields("occurrence_stored")} for valid field names
#' @param wkt string: Bounding area in Well Known Text (WKT) format. E.g. "POLYGON((118 -30,146 -30,146 -11,118 -11,118 -30))".
#' @param gridsize numeric: size of output grid cells in decimal degrees. E.g. 0.1 (=~10km)
#' @param SPdata.frame logical: should the output be returned as a SpatialPointsDataFrame of the sp package?
#' @param verbose logical: show additional progress information? [default is set by ala_config()]
#' @return A dataframe or a SpatialPointsDataFrame containing the species by sites data. Columns will include longitude, latitude, and each species present. Values for species are record counts (i.e. number of recorded occurrences of that taxon in each grid cell). The \code{guid} attribute of the data frame gives the guids of the species (columns) as a named character vector
#'
#' @examples
#' \dontrun{
#' # Eucalyptus in Tasmania based on a 0.1 degree grid
#' ss <- sites_by_species(taxon="genus:Eucalyptus", wkt="POLYGON((144 -43,148 -43,148 -40,
#' 144 -40,144 -43))", gridsize=0.1, verbose=TRUE)
#' head(ss[, 1:6])
#'
#' ## get the guid of the first species
#' attr(ss,"guid")[1]
#'
#' # Steps: 1. POST webservice creates a task (use single quotes around data-binary argument)
#' curl 'https://spatial.ala.org.au/ws/tasks/create?userId=0' --data-binary name=PointsToGrid
#' &input={"area":[{"name":"Current extent"
#' ,"wkt":"POLYGON((144 -43,148 -43,148 -40,144 -40,144 -43))"}]
#' ,"occurrenceDensity":false,"sitesBySpecies":true,"speciesRichness":false
#' ,"species":{"q":["genus:Eucalyptus"]
#' ,"bs":"https://biocache-ws.ala.org.au/ws/","name":"genus:Eucalyptus"}
#' ,"gridCellSize":0.1,"resolution":0.01,"movingAverage":"1x1 (no moving average)"}'
#' #resp eg '{"name":"PointsToGrid","created":1552881125953,"email":"null","history":{}
#' ,"tag":"null","userId":"0","sessionId":"null","status":0,"id":<id>}
#' # 2. check status values: 0 = in_queue, 1 = running, 2 = cancelled, 3 = error, 4 = finished
#' curl 'https://spatial.ala.org.au/ws/tasks/status/<id>'
#' waiting: {"status":1,"message":"getting species data","id":<id>,"name":"PointsToGrid"}
#' complete:{"status":4,"message":"finished","id":<id>,"name":"PointsToGrid"
#' ,"history":{"1552879452131":"finished","1552879452155":"finished (id:<id>)"}
#' ,"output":[{"name":"files","file":"SitesBySpecies.csv","taskId":<id>,"id":33111}
#' ,{"name":"sxs_metadata.html","taskId":<id>,"file":"sxs_metadata.html","id":33109}
#' ,{"file":"download.zip","taskId":<id>,"name":"download.zip","id":33110}]}
#' failed: {"status":4,"message":"finished","id":<id>,"name":"PointsToGrid"
#' ,"history":{"1552881921817":"failed (id:<id>)"}}
#' # 3. download the zip and extract the file
#' https://spatial.ala.org.au/ws/tasks/output/<id>/download.zip
#' https://spatial.ala.org.au/ws/tasks/output/<id>/SitesBySpecies.csv
#' }
#'
#'
# TODO need way to better check input species query. If the query is incorrect, the call will fail with message along the lines of: "Error in sites_by_species(taxon = "gen:Eucalyptus", wkt = "POLYGON((144 -43,148 -43,148 -40,144 -40,144 -43))", : Error processing your Sites By Species request. Please try again or if problem persists, contact the Administrator."
## fails: ss <- sites_by_species(taxon="rk_genus:Eucalyptus",wkt="POLYGON((144 -43,148 -43,148 -40,144 -40,144 -43))",verbose=TRUE)
## fails: ss <- sites_by_species(taxon="scientificNameAuthorship:Maiden",wkt="POLYGON((144 -43,148 -43,148 -40,144 -40,144 -43))",verbose=TRUE)
## fails: ss <- sites_by_species(taxon="parentGuid:http://id.biodiversity.org.au/node/apni/6337078",wkt="POLYGON((144 -43,148 -43,148 -40,144 -40,144 -43))",verbose=TRUE)
#' @export
sites_by_species <- function(taxon, wkt, gridsize=0.1, SPdata.frame=FALSE, verbose=ala_config()$verbose) {
## check input parms are sensible
assert_that(is.notempty.string(taxon))
assert_that(is.notempty.string(wkt))
assert_that(is.numeric(gridsize), gridsize>0)
assert_that(is.flag(SPdata.frame))
assert_that(is.flag(verbose))
if (SPdata.frame && !requireNamespace("sp", quietly=TRUE)) {
## sp package not available
warning("sp package required for SpatialPointsDataFrame output")
SPdata.frame <- FALSE
}
## create the task
tasks_url <- paste(getOption("ALA4R_server_config")$base_url_spatial, "tasks/", sep="") #get the base url, append /tasks/
url_str <- paste(tasks_url,"create?userId=0&sessionId=",ala_sourcetypeid(), sep="")
# --data-binary values go into cached_post(...body...) and subsequently curlPeform(postfields=body)
resolution=0.01
body <- paste('name=PointsToGrid&input={"area":[{"name":"Current extent","wkt":"', wkt, '"}]', sep="")
body <- paste(body,',"occurrenceDensity":false', sep="")
body <- paste(body,',"sitesBySpecies":true', sep="")
body <- paste(body,',"speciesRichness":false', sep="")
body <- paste(body,',"species":{"q":["', taxon, '"]', sep="")
body <- paste(body,',"bs":"', getOption("ALA4R_server_config")$base_url_biocache,'"', sep="")
body <- paste(body,',"name":"',taxon,'"}', sep="")
body <- paste(body,',"gridCellSize":',gridsize, sep="")
body <- paste(body,',"resolution":',resolution, sep="")
body <- paste(body,',"movingAverage":"1x1 (no moving average)"}', sep="")
# ## somehow this doesn't work: not sure why. Leave for now
# #this_url <- build_url_from_parts(getOption("ALA4R_server_config")$base_url_alaspatial, "sitesbyspecies", list(speciesq=taxon, qname="data", area=wkt, bs=getOption("ALA4R_server_config")$base_url_biocache, movingaveragesize=1, gridsize=gridsize, sitesbyspecies=1))
# ##moving window average value (1 = 1 cell, which means that no moving average applied)
# #url_str <- build_url(this_url)
this_cache_file <- ala_cache_filename(paste(url_str,body,sep="")) ## the file that will ultimately hold the results (even if we are not caching, it still gets saved to file)
if ((ala_config()$caching %in% c("off", "refresh")) || (! file.exists(this_cache_file))) {
create_response <- cached_post(URLencode(url_str), body=body, encoding="json", type="json", content_type="application/x-www-form-urlencoded", caching="off", verbose=verbose) #returns json
id <- create_response$id
if (is.null(id) || id=="") {
## error - but note that we may still get a STATUS 200 from the server in this case
## Check the WKT string, maybe that was the problem
if (!missing(wkt) && !isTRUE(check_wkt(wkt))) warning("WKT string may not be valid: ", wkt)
## NOTE May 2018: in fact it seems that the server can fail to parse the WKT string even if (apparently) valid
## see https://github.com/AtlasOfLivingAustralia/biocache-service/issues/225
stop("there has been an issue with this service. ", getOption("ALA4R_server_config")$notify) } #catch for these missing pid issues
status_url <- paste(tasks_url, "status/", id, sep="")
if (verbose) message(paste("Waiting for sites-by-species results to become available: ",status_url,sep=""), appendLF=FALSE)
status_response <- cached_get(URLencode(status_url), type="json", caching="off", verbose=verbose) #get the status
while (status_response$status <= 1) {
if (verbose) message(".", appendLF=FALSE) ## keep checking the status until finished
status_response <- cached_get(URLencode(status_url), type="json", caching="off", verbose=verbose) #get the status
if ((status_response$status >= 2) && (is.null(status_response$output))) {
## stop if there was an error
## first check the wkt string: if it was invalid (or unrecognized by our checker) then warn the user
if (!missing(wkt) && !isTRUE(check_wkt(wkt))) warning("WKT string may not be valid: ", wkt)
stop(status_response$history)
}
Sys.sleep(2)
}
message("") ## to get LF
output_url <- paste(tasks_url,"output/",id,"/download.zip",sep="")
download_to_file(output_url, outfile=this_cache_file, binary_file=TRUE, verbose=verbose)
} else {
## we are using the existing cached file
if (verbose) message(sprintf("Using cached file %s", this_cache_file))
}
out <- read_csv_quietly(unz(this_cache_file, "SitesBySpecies.csv"), as.is=TRUE, skip=4) ## read in the csv data from the zip file; omit the first 4 header rows. use read_csv_quietly to avoid warnings about incomplete final line
## drop the "Species" column, which appears to be a site identifier (but just constructed from the longitude and latitude, so is not particularly helpful
out <- out[, names(out)!="Species"]
##deal with SpatialPointsDataFrame
if (SPdata.frame) { #if output is requested as a SpatialPointsDataFrame
## coerce to SpatialPointsDataFrame class
if (nrow(out)>0) {
out <- SpatialPointsDataFrame(coords=out[, c("Longitude", "Latitude")], proj4string=CRS("+proj=longlat +ellps=WGS84"), data=out)
}
}
## rename variables
names(out) <- rename_variables(names(out), type="other")
## also read the species guids
guids <- read.csv(unz(this_cache_file, "SitesBySpecies.csv"), stringsAsFactors=FALSE, nrows=1, header=FALSE)
guids <- guids[-1:-3] ## first 3 cols will be species lon lat
guids <- as.character(guids)
names(guids) <- names(out)[-2:-1]
attr(out, "guid") <- guids
## warn about empty results if appropriate
if (nrow(out)<1 && ala_config()$warn_on_empty) {
warning("no occurrences found")
}
out
}
|
bf26307045e2d780953f0262cb9b5cf3d852c82f
|
179e7f45e8002a2fc408e69534d2545c26eed53c
|
/problem34.R
|
e77d5f736b3fdb56d568ab9eb513d64db2fcc24d
|
[] |
no_license
|
shannonrush/euler
|
45a4f7360af2671ecdea1e110c2e88bd2227ccbf
|
acfb8e57bb557574a46d475d12561237d68273fa
|
refs/heads/master
| 2021-01-22T04:41:32.670810
| 2015-05-09T20:43:49
| 2015-05-09T20:43:49
| 32,261,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
problem34.R
|
# 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
#
# Find the sum of all numbers which are equal to the sum of the factorial of their digits.
#
# Note: as 1! = 1 and 2! = 2 are not sums they are not included.
DigitFactorials <- function() {
sum <- 0
for (i in 3:99999) if (SumFacts(i)) sum <- sum + i
sum
}
SumFacts <- function(n) {
digits <- as.integer(unlist(strsplit(toString(n),"")))
ifelse(sum(factorial(digits))==n, TRUE, FALSE)
}
# test
# SumFacts(145)==TRUE
# DigitFactorials()
# Unit: seconds
# expr min lq mean median uq max neval
# DigitFactorials() 2.184715 2.217862 2.252604 2.241981 2.303971 2.319154 10
|
a68ad21b41da75e854459b3859c759593e09dbca
|
2a2c97a3c0a10a543ed7884d8cfb94d493c3c537
|
/R/current-supply.R
|
774da59783d150525a8b3eacdd4bd797cccbeecc
|
[
"MIT"
] |
permissive
|
CryptocurrencyDevelopers/stablecoin
|
f688d51e78623d295d5e95af06db61d048d6e5ac
|
4d6b4a811cdce4dd1d03668469c6221b2c1e7798
|
refs/heads/master
| 2023-06-20T01:59:22.211144
| 2021-07-19T02:21:05
| 2021-07-19T02:21:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,927
|
r
|
current-supply.R
|
#' Retrieve current supply balances on supported blockchains
#'
#' @description
#' `current_supply` retrieves the current circulating supply of a stablecoin on
#' a single blockchain
#'
#' `current_supply_all` retrieves the current circulating supply of a stablecoin
#' or multiple stablecoins on all underlying blockchains
#'
#' @export
#'
#' @param network the blockchain network
#' @param token the stablecoin token
#' @param selected_tokens a single or multiple tokens
#'
#' @rdname current_supply
#'
#' @examples
#' \donttest{
#' current_supply(network = "Ethereum", token = "USDC")
#' current_supply_all()
#' current_supply_all("UDSC")
#' current_supply_all(selected_tokens = c("USDC", "USDT"))
#' }
current_supply <- function(network, token) {
switch (network,
"Algorand" = current_supply_algorand(token),
"Ethereum" = current_supply_ethereum(token),
"Solana" = current_supply_solana(token),
"Stellar" = current_supply_stellar(token),
"TRON" = current_supply_tron(token),
stop("Invalid or unsupported blockchain network specified")
)
}
#' @export
#'
#' @rdname current_supply
current_supply_all <- function(selected_tokens = NULL) {
if(is.null(selected_tokens)) {
selected_tokens <- tokens %>% dplyr::select(token) %>% unique() %>% purrr::as_vector()
}
result <- tokens %>%
filter(token %in% selected_tokens) %>%
mutate(current_supply = purrr::map2_dbl(network, token, current_supply)) %>%
arrange(token, network)
return(result)
}
current_supply_algorand <- function(token) {
algorand_token <- cached_token_address("Algorand", token)
r <- cached_webservice_call(base_url = algorand_service,
verb = 'GET',
path = paste0('v1/asset/', algorand_token))
t <- r$content
supply <- as.numeric(t$circulatingsupply) / 10**t$decimal
return(supply)
}
current_supply_ethereum <- function(token) {
ethereum_token <- cached_token_address("Ethereum", token)
r <- cached_webservice_call(base_url = ethereum_service,
verb = 'GET',
path = paste0('ethereum/erc-20/', ethereum_token,'/stats'))
t <- r$content$data
supply <- as.numeric(t$circulation) / 10**t$decimals
return(supply)
}
current_supply_stellar <- function(token) {
stellar_token <- cached_token_address("Stellar", token)
r <- cached_webservice_call(base_url = stellar_service,
verb = 'GET',
path = 'assets',
query = list(
asset_code = 'USDC',
asset_issuer = stellar_token
))
t <- r$content$`_embedded`$records[[1]]$amount
supply <- as.numeric(t)
return(supply)
}
current_supply_solana <- function(token) {
solana_token <- cached_token_address("Solana", token)
params <- list(
jsonrpc = jsonlite::unbox('2.0'),
id = jsonlite::unbox(1),
method = jsonlite::unbox('getTokenSupply'),
params = solana_token
)
body <- jsonlite::toJSON(params)
r <- cached_webservice_call(base_url = solana_service,
verb = 'POST',
body = body)
t <- r$content$result$value
supply <- as.numeric(t$amount) / 10**t$decimals
return(supply)
}
current_supply_tron <- function(token) {
tron_token <- cached_token_address("TRON", token)
r <- cached_webservice_call(base_url = tron_service,
verb = 'GET',
path = 'api/token_trc20',
query = list(
contract = tron_token,
showAll = 1)
)
t <- r$content$trc20_tokens
supply <- as.numeric(t[[1]]$total_supply_with_decimals) / 10**6
return(supply)
}
|
031c9bff634d24a42efc4e70dcc0d1e0b17faf39
|
892a3354107c4508a79e86181269201275642a04
|
/make.R
|
fa9ad9cc898544890726ad863929b5e3552aa530
|
[] |
no_license
|
mkearney/NCA18
|
b32afe4344c0aa7aeb40e13b7f07782144b20ccc
|
3bb396265883dd68186095c3a90fb1d7fc2e7204
|
refs/heads/master
| 2020-04-05T14:28:45.867847
| 2018-11-11T14:08:17
| 2018-11-11T14:08:17
| 156,931,045
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 487
|
r
|
make.R
|
## load packages
library(mwk)
library(rtweet)
## read data
nca <- readRDS("data/nca18.rds")
## search for NCA tweets
nca_new <- search_tweets(
"nca18 OR nca2018 OR natcomm OR \"national communication association\"",
n = 45000,
token = bearer_token(),
since_id = since_id(nca)
)
## merge data
nca <- bind_rows(nca_new, nca)
nca <- filter(nca, !duplicated(status_id))
## save dat
saveRDS(nca, "data/nca18.rds")
## save IDs
save_as_csv(select(nca, status_id), "status_id.csv")
|
6a267dc5e7294e280c7135cad3023d410015192a
|
fff1983213472577d171b22700db4cde42e23101
|
/R/modeltime_forecast.R
|
9bb08ef0ceba61e32733ecb3c1d8ad7c130cf884
|
[
"MIT"
] |
permissive
|
shizelong1985/modeltime.ensemble
|
dad37a7c161dbc97d95e136f2af199e2a5c9b618
|
1ba373cf79490da89ef26f08f8bd903097c51d04
|
refs/heads/master
| 2023-04-30T13:35:33.209217
| 2021-05-25T18:06:15
| 2021-05-25T18:06:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,035
|
r
|
modeltime_forecast.R
|
# ENSEMBLE FORECAST DISPATCH ----
# 1.0 AVERAGE ENSEMBLE ----
#' @export
#' @importFrom modeltime mdl_time_forecast
mdl_time_forecast.mdl_time_ensemble_avg <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...) {
# SUB-MODELS ----
model_tbl <- object$model_tbl
type <- object$parameters$type
# Get the raw forecast results for each of the models
modeltime_fcast <- modeltime::modeltime_forecast(
object = model_tbl,
new_data = new_data,
h = h,
actual_data = actual_data,
conf_interval = NULL,
keep_data = FALSE
) %>%
dplyr::select(-.model_desc)
# CONVERT CALIBRATION or H TO NEW DATA ----
new_data <- convert_calib_h_to_new_data(new_data, h, calibration_data, actual_data)
# For combining new data
actual_data_unprocessed <- actual_data
new_data_unprocessed <- new_data
# SEPARATE ACTUAL & FORECAST ----
# .key contains "actual"
contains_actual <- "actual" %in% unique(modeltime_fcast$.key)
if (contains_actual) {
actual_data <- modeltime_fcast %>%
dplyr::filter(.key == "actual") %>%
dplyr::select(-.model_id)
modeltime_fcast <- modeltime_fcast %>%
dplyr::filter(.key != "actual")
}
# ENSEMBLE CALCULATION ----
# Select correct summary function
if (type == "mean") {
summary_fun <- mean
} else {
summary_fun <- stats::median
}
# Calculate Ensemble
modeltime_fcast <- modeltime_fcast %>%
# Add row id's
dplyr::group_by(.model_id) %>%
dplyr::group_split() %>%
purrr::map(.f = function(df) {
df %>%
tibble::rowid_to_column(var = ".row_id")
}) %>%
dplyr::bind_rows() %>%
# Pivot to Wide
tidyr::pivot_wider(
names_from = .model_id,
names_prefix = ".model_id_",
values_from = .value
) %>%
dplyr::rowwise() %>%
dplyr::mutate(.value = summary_fun( dplyr::c_across( dplyr::starts_with(".model_id_") ), na.rm = FALSE )) %>%
dplyr::ungroup() %>%
dplyr::select(-dplyr::starts_with(".model_id_"), -.row_id)
# FINALIZE ----
# Recombine with actual
if (contains_actual && bind_actual) {
modeltime_fcast <- actual_data %>%
dplyr::bind_rows(modeltime_fcast)
}
ret <- finalize_mdl_time_forecast(modeltime_fcast, keep_data,
actual_data_unprocessed, new_data_unprocessed,
contains_actual, bind_actual,
arrange_index)
return(ret)
}
# 2.0 WEIGHTED ENSEMBLE ----
#' @export
mdl_time_forecast.mdl_time_ensemble_wt <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...) {
# SUB-MODELS -----
model_tbl <- object$model_tbl
loadings_tbl <- object$fit$loadings_tbl
# Get the raw forecast results for each of the sub-models
modeltime_fcast <- modeltime::modeltime_forecast(
object = model_tbl,
new_data = new_data,
h = h,
actual_data = actual_data,
conf_interval = NULL,
keep_data = FALSE
) %>%
dplyr::select(-.model_desc)
# CONVERT CALIBRATION or H TO NEW DATA ----
new_data <- convert_calib_h_to_new_data(new_data, h, calibration_data, actual_data)
# For combining new data
actual_data_unprocessed <- actual_data
new_data_unprocessed <- new_data
# SEPARATE ACTUAL & FORECAST ----
# .key contains "actual"
contains_actual <- "actual" %in% unique(modeltime_fcast$.key)
if (contains_actual) {
actual_data <- modeltime_fcast %>%
dplyr::filter(.key == "actual") %>%
dplyr::select(-.model_id)
modeltime_fcast <- modeltime_fcast %>%
dplyr::filter(.key != "actual")
}
# ENSEMBLE CALCULATION -----
# Calculate Ensemble
modeltime_fcast <- modeltime_fcast %>%
# Add row id's
dplyr::group_by(.model_id) %>%
dplyr::group_split() %>%
purrr::map(.f = function(df) {
df %>%
tibble::rowid_to_column(var = ".row_id")
}) %>%
dplyr::bind_rows() %>%
# Apply loadings
dplyr::left_join(loadings_tbl, by = ".model_id") %>%
dplyr::mutate(.value = .value * .loadings) %>%
dplyr::select(-.loadings) %>%
# Pivot to Wide
tidyr::pivot_wider(
names_from = .model_id,
names_prefix = ".model_id_",
values_from = .value
) %>%
dplyr::rowwise() %>%
dplyr::mutate(.value = sum( dplyr::c_across( dplyr::starts_with(".model_id_") ), na.rm = FALSE )) %>%
dplyr::ungroup() %>%
dplyr::select(-dplyr::starts_with(".model_id_"), -.row_id)
# FINALIZE -----
# Recombine with actual
if (contains_actual && bind_actual) {
modeltime_fcast <- actual_data %>%
dplyr::bind_rows(modeltime_fcast)
}
ret <- finalize_mdl_time_forecast(modeltime_fcast, keep_data,
actual_data_unprocessed, new_data_unprocessed,
contains_actual, bind_actual,
arrange_index)
return(ret)
}
# 3.0 MODEL SPEC ENSEMBLE ----
#' @export
mdl_time_forecast.mdl_time_ensemble_model_spec <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...) {
# SUB-MODELS ----
model_tbl <- object$model_tbl
wflw_fit <- object$fit$fit
# Get the raw forecast results for each of the models
modeltime_fcast <- modeltime::modeltime_forecast(
object = model_tbl,
new_data = new_data,
h = h,
actual_data = actual_data,
conf_interval = NULL,
keep_data = FALSE
) %>%
dplyr::select(-.model_desc)
# CONVERT CALIBRATION or H TO NEW DATA ----
new_data <- convert_calib_h_to_new_data(new_data, h, calibration_data, actual_data)
# For combining new data
actual_data_unprocessed <- actual_data
new_data_unprocessed <- new_data
# SEPARATE ACTUAL & FORECAST ----
# .key contains "actual"
contains_actual <- "actual" %in% unique(modeltime_fcast$.key)
if (contains_actual) {
actual_data <- modeltime_fcast %>%
dplyr::filter(.key == "actual") %>%
dplyr::select(-.model_id)
modeltime_fcast <- modeltime_fcast %>%
dplyr::filter(.key != "actual")
}
# ENSEMBLE CALCULATION ----
# Calculate Ensemble
data_prepared_tbl <- modeltime_fcast %>%
# Add row id's
dplyr::group_by(.model_id) %>%
dplyr::group_split() %>%
purrr::map(.f = function(df) {
df %>%
tibble::rowid_to_column(var = ".row_id")
}) %>%
dplyr::bind_rows() %>%
# Pivot to Wide
tidyr::pivot_wider(
names_from = .model_id,
names_prefix = ".model_id_",
values_from = .value
) %>%
dplyr::select(-.row_id)
pred_vec <- stats::predict(wflw_fit, new_data = data_prepared_tbl) %>%
dplyr::pull(.pred)
modeltime_fcast <- data_prepared_tbl %>%
dplyr::select(.key, .index) %>%
dplyr::mutate(.value = pred_vec)
# FINALIZE ----
# Recombine with actual
if (contains_actual && bind_actual) {
modeltime_fcast <- actual_data %>%
dplyr::bind_rows(modeltime_fcast)
}
ret <- finalize_mdl_time_forecast(modeltime_fcast, keep_data,
actual_data_unprocessed, new_data_unprocessed,
contains_actual, bind_actual,
arrange_index)
return(ret)
}
# UTILITIES ----
convert_calib_h_to_new_data <- function(new_data, h, calibration_data, actual_data, ...) {
# If no 'new_data', forecast 'calibration_data'
if (is.null(new_data) && is.null(h)) {
if (is.data.frame(calibration_data)) {
new_data <- calibration_data
} else if (is.data.frame(actual_data)) {
new_data <- actual_data
} else {
rlang::abort("Forecast requires 'new_data', 'calibration_data', or 'actual_data'.")
}
}
# Convert 'h' to 'new_data'
if (!is.null(h)) {
if (is.data.frame(calibration_data)) {
tryCatch({
# Suppress date selection
suppressMessages(new_data <- timetk::future_frame(calibration_data, .length_out = h, ...))
}, error = function(e) {
rlang::abort("Attempt to extend '.calibration_data' into the future using 'h' has failed.")
})
} else if (is.data.frame(actual_data)) {
tryCatch({
# Suppress date selection
suppressMessages(new_data <- timetk::future_frame(actual_data, .length_out = h, ...))
}, error = function(e) {
rlang::abort("Attempt to extend 'actual_data' into the future using 'h' has failed.")
})
} else {
rlang::abort("Forecast requires 'new_data', '.calibration_data', or 'actual_data'.")
}
}
return(new_data)
}
finalize_mdl_time_forecast <- function(modeltime_fcast, keep_data,
actual_data_unprocessed, new_data_unprocessed,
contains_actual, bind_actual,
arrange_index) {
ret <- modeltime_fcast %>%
dplyr::select(.key, .index, .value) %>%
dplyr::mutate(.key = factor(.key, levels = c("actual", "prediction")))
# Keep Data
act_tbl <- NULL
pred_tbl <- NULL
if (keep_data) {
if (contains_actual && bind_actual) {
act_tbl <- ret %>%
dplyr::filter(.key == "actual") %>%
dplyr::bind_cols(actual_data_unprocessed)
}
pred_tbl <- ret %>%
dplyr::filter(.key == "prediction") %>%
dplyr::bind_cols(new_data_unprocessed)
ret <- dplyr::bind_rows(act_tbl, pred_tbl)
}
if (arrange_index) {
ret <- ret %>%
dplyr::arrange(.key, .index)
}
return(ret)
}
# 4.0 RECURSIVE ----
#' @export
mdl_time_forecast.recursive_ensemble <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...){
if (inherits(object, "recursive")){
ret <- mdl_time_forecast_recursive_ensemble(object = object, calibration_data = calibration_data,
new_data = new_data, h = h, actual_data = actual_data,
bind_actual = bind_actual, keep_data = keep_data,
arrange_index = arrange_index, ...)
}
if (inherits(object, "recursive_panel")){
ret <- mdl_time_forecast_recursive_ensemble_panel(object = object, calibration_data = calibration_data,
new_data = new_data, h = h, actual_data = actual_data,
bind_actual = bind_actual, keep_data = keep_data,
arrange_index = arrange_index, ...)
}
return(ret)
}
mdl_time_forecast_recursive_ensemble <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...){
# SETUP ----
y_var <- object$spec$y_var
class(object) <- class(object)[3:length(class(object))]
.transform <- object$spec[["transform"]]
train_tail <- object$spec$train_tail
# LOOP LOGIC ----
.first_slice <- new_data %>%
dplyr::slice_head(n = 1)
.forecasts <- modeltime::mdl_time_forecast(
object,
new_data = .first_slice,
h = h,
actual_data = actual_data,
keep_data = keep_data,
arrange_index = arrange_index,
...
)
.forecast_from_model <- .forecasts %>%
dplyr::filter(.key == "prediction")
new_data[1, y_var] <- .forecast_from_model$.value
for (i in 2:nrow(new_data)) {
.temp_new_data <- dplyr::bind_rows(
train_tail,
new_data
)
.nth_slice <- .transform(.temp_new_data, nrow(new_data), i)
.nth_forecast <- modeltime::mdl_time_forecast(
object,
new_data = .nth_slice,
h = h,
actual_data = actual_data,
keep_data = keep_data,
arrange_index = arrange_index,
...
)
.nth_forecast_from_model <-
.nth_forecast %>%
dplyr::filter(.key == "prediction") %>%
.[1,]
.forecasts <- dplyr::bind_rows(
.forecasts, .nth_forecast_from_model
)
new_data[i, y_var] <- .nth_forecast_from_model$.value
}
return(.forecasts)
}
mdl_time_forecast_recursive_ensemble_panel <- function(object, calibration_data,
new_data = NULL, h = NULL, actual_data = NULL, bind_actual = TRUE,
keep_data = FALSE, arrange_index = FALSE, ...){
# SETUP ----
y_var <- object$spec$y_var
class(object) <- class(object)[3:length(class(object))]
.transform <- object$spec[["transform"]]
train_tail <- object$spec$train_tail
id <- object$spec$id
.id <- dplyr::ensym(id)
# LOOP LOGIC ----
.preds <- tibble::tibble(.id = new_data %>% dplyr::pull(!! .id),
.pred = numeric(nrow(new_data))) %>%
dplyr::group_by(.id) %>%
dplyr::mutate(rowid.. = dplyr::row_number()) %>%
dplyr::ungroup()
.first_slice <- new_data %>%
dplyr::group_by(!! .id) %>%
dplyr::slice_head(n = 1) %>%
dplyr::ungroup()
new_data <- new_data %>%
dplyr::group_by(!! .id) %>%
dplyr::mutate(rowid.. = dplyr::row_number()) %>%
dplyr::ungroup()
if ("rowid.." %in% names(.first_slice)) {
.first_slice <- .first_slice %>% dplyr::select(-rowid..)
}
.forecasts <- modeltime::mdl_time_forecast(
object,
new_data = .first_slice,
h = h,
actual_data = actual_data,
keep_data = keep_data,
arrange_index = arrange_index,
...
) %>%
dplyr::filter(!is.na(.value))
.forecast_from_model <- .forecasts %>%
dplyr::filter(.key == "prediction")
.preds[.preds$rowid.. == 1, 2] <- new_data[new_data$rowid.. == 1, y_var] <- .forecast_from_model$.value
.groups <- new_data %>%
dplyr::group_by(!! .id) %>%
dplyr::count(!! .id) %>%
dim() %>%
.[1]
new_data_size <- nrow(.preds)/.groups
for (i in 2:new_data_size) {
.temp_new_data <- dplyr::bind_rows(
train_tail,
new_data
)
.nth_slice <- .transform(.temp_new_data, new_data_size, i, id)
if ("rowid.." %in% names(.first_slice)) {
.first_slice <- .first_slice %>% dplyr::select(-rowid..)
}
.nth_slice <- .nth_slice[names(.first_slice)]
.nth_forecast <- modeltime::mdl_time_forecast(
object,
new_data = .nth_slice,
h = h,
actual_data = actual_data,
keep_data = keep_data,
arrange_index = arrange_index,
...
) %>%
dplyr::filter(!is.na(.value))
.nth_forecast_from_model <- .nth_forecast %>%
dplyr::filter(.key == "prediction")
.forecasts <- dplyr::bind_rows(
.forecasts, .nth_forecast_from_model
)
.preds[.preds$rowid.. == i, 2] <- new_data[new_data$rowid.. == i, y_var] <- .nth_forecast_from_model$.value
}
return(.forecasts)
}
|
02b8349ab1047738faa380a89da86922b8e929d1
|
a380fc9c34297805106ca9e7ea164b3a6d763468
|
/data-analysis/1-localRegression/src/1.R
|
55202b557a7be84e5276e33b3996e43f5306c749
|
[] |
no_license
|
cameronbracken/classy
|
a87c92194230baf46b5363ad058710d0c93964cb
|
5099b6a9ecacd32bfb59ad69245f5d244cfeeb88
|
refs/heads/master
| 2020-04-05T23:16:13.524536
| 2011-04-07T16:19:33
| 2011-04-07T16:19:33
| 293,346
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
1.R
|
#!/usr/bin/env Rscript
rm(list=ls())
suppressPackageStartupMessages(require(locfit))
source('libblocfit.R')
if(!file.exists('output')) dir.create('output')
#Read simple test data
oneD <- read.table('data/oneD.tab',header=T)
x <- oneD$x; y <- oneD$y
save(list=ls(),file='output/1.Rdata')
|
b6c06e61685029007f3b5fa7e4e7b489c94948a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fsthet/examples/calc.actual.fst.Rd.R
|
d08268c6f2fca281ad19d0aea1cc2e6749340908
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 526
|
r
|
calc.actual.fst.Rd.R
|
library(fsthet)
### Name: calc.actual.fst
### Title: This calcualtes global Fsts from a genepop dataframe.
### Aliases: calc.actual.fst
### ** Examples
gpop<-data.frame(popinfo=c(rep("POP 1", 20),rep("POP 2", 20)),ind.names=c(1:20,1:20),
loc0=sample(c("0101","0102","0202"),40,replace=TRUE))
fsts<-calc.actual.fst(gpop)
## Not run:
##D gfile<-system.file("extdata", "example.genepop.txt",package = 'fsthet')
##D gpop<-my.read.genepop(gfile)
##D fsts<-calc.actual.fst(gpop)
##D
## End(Not run)
|
9da06467cf799a074bcd0eda5f139955bc293a60
|
5a7f7ebee0e458863e1da9d2a0fcc93b600d1786
|
/man/Package.Rd
|
d94b81dc99de2f31c2b117d8304ae1536231b406
|
[] |
no_license
|
HenrikBengtsson/R.oo
|
68071bacb43afe2a46201aea0350a3597ee19e6c
|
4101a141b2fa49a43a10df99f56c180ba2c662e6
|
refs/heads/master
| 2023-01-06T23:48:54.872999
| 2022-06-12T18:04:23
| 2022-06-12T18:04:23
| 19,437,907
| 20
| 1
| null | 2018-05-02T04:51:57
| 2014-05-04T22:47:54
|
R
|
UTF-8
|
R
| false
| false
| 7,392
|
rd
|
Package.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% Package.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{Package}
\docType{class}
\alias{Package}
\title{The Package class provides methods for accessing package information}
\description{
Package: R.oo \cr
\bold{Class Package}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{Package}\cr
\bold{Directly known subclasses:}\cr
\cr
public class \bold{Package}\cr
extends \link[R.oo]{Object}\cr
Creates a Package that can be thrown and caught. The \code{Package}
class is the root class of all other \code{Package} classes.
}
\usage{
Package(name=NULL)
}
\arguments{
\item{name}{Name of the package.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{\link[R.oo:as.character.Package]{as.character}} \tab Gets a string representation of this package.\cr
\tab \code{\link[R.oo:getAuthor.Package]{getAuthor}} \tab Gets the Author of this package.\cr
\tab \code{\link[R.oo:getBundle.Package]{getBundle}} \tab Gets the Bundle that this package might belong to.\cr
\tab \code{\link[R.oo:getBundlePackages.Package]{getBundlePackages}} \tab Gets the names of the other packages that is in the same bundle as this package.\cr
\tab \code{\link[R.oo:getChangeLog.Package]{getChangeLog}} \tab Gets the change log of this package.\cr
\tab \code{\link[R.oo:getClasses.Package]{getClasses}} \tab Gets all classes of a package.\cr
\tab \code{\link[R.oo:getContents.Package]{getContents}} \tab Gets the contents of this package.\cr
\tab \code{\link[R.oo:getContribUrl.Package]{getContribUrl}} \tab Gets the URL(s) from where this package can be installed.\cr
\tab \code{\link[R.oo:getDataPath.Package]{getDataPath}} \tab Gets the path to the data (data/) directory of this package.\cr
\tab \code{\link[R.oo:getDate.Package]{getDate}} \tab Gets the date when package was build.\cr
\tab \code{\link[R.oo:getDescription.Package]{getDescription}} \tab Gets the description of the package.\cr
\tab \code{\link[R.oo:getDescriptionFile.Package]{getDescriptionFile}} \tab Gets the description file of this package.\cr
\tab \code{\link[R.oo:getDevelUrl.Package]{getDevelUrl}} \tab Gets the URL(s) from where the developers version of this package can be installed.\cr
\tab \code{\link[R.oo:getDocPath.Package]{getDocPath}} \tab Gets the path to the accompanying documentation (doc/) directory of this package.\cr
\tab \code{\link[R.oo:getEnvironment.Package]{getEnvironment}} \tab Gets the environment of a loaded package.\cr
\tab \code{\link[R.oo:getExamplePath.Package]{getExamplePath}} \tab Gets the path to the example (R-ex/) directory of this package.\cr
\tab \code{getHistory} \tab -\cr
\tab \code{\link[R.oo:getHowToCite.Package]{getHowToCite}} \tab Gets the citation of this package.\cr
\tab \code{\link[R.oo:getLicense.Package]{getLicense}} \tab Gets the License of this package.\cr
\tab \code{\link[R.oo:getMaintainer.Package]{getMaintainer}} \tab Gets the Maintainer of this package.\cr
\tab \code{\link[R.oo:getName.Package]{getName}} \tab Gets the name of this package.\cr
\tab \code{getNews} \tab -\cr
\tab \code{\link[R.oo:getPath.Package]{getPath}} \tab Gets the library (system) path to this package.\cr
\tab \code{\link[R.oo:getPosition.Package]{getPosition}} \tab Gets the search path position of the package.\cr
\tab \code{\link[R.oo:getTitle.Package]{getTitle}} \tab Gets the Title of this package.\cr
\tab \code{\link[R.oo:getUrl.Package]{getUrl}} \tab Gets the URL of this package.\cr
\tab \code{\link[R.oo:getVersion.Package]{getVersion}} \tab Gets the version of this package.\cr
\tab \code{\link[R.oo:isLoaded.Package]{isLoaded}} \tab Checks if the package is installed on the search path or not.\cr
\tab \code{\link[R.oo:isOlderThan.Package]{isOlderThan}} \tab Checks if the package is older than a given version.\cr
\tab \code{\link[R.oo:ll.Package]{ll}} \tab Generates a list of informative properties of all members of the package.\cr
\tab \code{\link[R.oo:load.Package]{load}} \tab Loads a package.\cr
\tab \code{\link[R.oo:showChangeLog.Package]{showChangeLog}} \tab Show the change log of this package.\cr
\tab \code{\link[R.oo:showContents.Package]{showContents}} \tab Show the CONTENTS file of this package.\cr
\tab \code{\link[R.oo:showDescriptionFile.Package]{showDescriptionFile}} \tab Show the DESCRIPTION file of this package.\cr
\tab \code{showHistory} \tab -\cr
\tab \code{\link[R.oo:showHowToCite.Package]{showHowToCite}} \tab Show the HOWTOCITE file of this package.\cr
\tab \code{showNews} \tab -\cr
\tab \code{\link[R.oo:startupMessage.Package]{startupMessage}} \tab Generates a 'package successfully loaded' package startup message.\cr
\tab \code{\link[R.oo:unload.Package]{unload}} \tab Unloads a package.\cr
}
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save
}
\examples{\dontrun{# By defining .onAttach() as follows in zzz.R for a package, an
# instance of class Package with the same name as the package will
# be made available on the search path. More over, the code below
# will also inform the user that the package has been loaded:
#
# > library(R.oo)
# R.oo v0.52 (2003/04/13) was successfully loaded.
#
.onAttach <- function(libname, pkgname) {
pkg <- Package(pkgname)
assign(pkgname, pkg, pos=getPosition(pkg))
cat(getName(pkg), " v", getVersion(pkg), " (", getDate(pkg), ")",
" was successfully loaded.\n", sep="")
}
# The Package class works for any packages, loaded or not.
# Some information about the base package
pkg <- Package("base")
print(pkg)
# [1] "Package: base v3.6.2 is loaded (pos=14). Title: The R Base Package.
# The official webpage is NA and the maintainer is R Core Team <R-core@
# r-project.org>. The package is installed in /usr/lib/R/library/base/.
# License: Part of R 3.6.2. Description: Base R functions. Type
# showNews(base) for package history, and ?base for help."
print(list.files(Package("base")$dataPath))
# Some information about the R.oo package
print(R.oo::R.oo)
# [1] "Package: R.oo v1.23.0-9000 . Title: R Object-Oriented Programming
# with or without References. The official webpage is https://github.com/
# HenrikBengtsson/R.oo and the maintainer is Henrik Bengtsson. The package
# is installed in /home/alice/R/x86_64-pc-linux-gnu-library/3.6/R.oo/.
# License: LGPL (>= 2.1). Description: Methods and classes for object-
# oriented programming in R with or without references. Large effort has
# been made on making definition of methods as simple as possible with a
# minimum of maintenance for package developers. The package has been
# developed since 2001 and is now considered very stable. This is a
# cross-platform package implemented in pure R that defines standard S3
# classes without any tricks. Type showNews(R.oo) for package history,
# and ?R.oo for help."
}}
\author{Henrik Bengtsson}
\keyword{programming}
\keyword{methods}
\keyword{classes}
|
16c49448383f248ff69d7410ff88dbc2d8c8e34c
|
84108ae54987955233447c20e1dc4ddba272a42f
|
/R/Rexam/day6/data_processing2.R
|
0701815d59fe4c141dc8ded159a080b952a8c639
|
[] |
no_license
|
MyChoYS/K_TIL
|
bb82554a6b68f9d3fcd39c9031331ea4df0d8716
|
8417a6f22addcfe08bcb708d61c91091f702b2cb
|
refs/heads/master
| 2023-04-24T05:58:29.253920
| 2021-05-07T06:13:34
| 2021-05-07T06:13:34
| 325,244,373
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 759
|
r
|
data_processing2.R
|
#문제2
#1
memo <- readLines("data/memo.txt",encoding="UTF-8") #행 단위 불러오기
memo[1] <- gsub("[[:punct:]]", "", memo[1]) #특수문자 제거
memo[1]
#2
memo[2] <- gsub("e","E",memo[2]) #문자 교체
memo[2]
#3
memo[3] <- gsub("[[:digit:]]","",memo[3]) #숫자제거
memo[3]
#4
memo[4] <- gsub("[a-z]","",memo[4]) #알파벳 제거 #[A-z] 하면 한방에 된다
memo[4] <- gsub("[A-Z]","",memo[4]) #[[a-zA-Z]]로해도됨
memo[4]
#5
memo[5] <- gsub("[[:punct:][:digit:]]", "", memo[5]) #특수문자, 숫자 제거
memo[5]
#6
memo[6] <- gsub("[[:space:]]","",memo[5]) #공백 제거
memo[6]
#7
memo[7] <- tolower(memo[7]) #소문자 대문자로 교체
memo[7]
memo
write(memo,file="memo_new.txt") #.txt파일로 저장,내보내기
|
3a505af4e32a8ed6d339e797fa9a2aa4dc538c25
|
34b2dbfbe9fecf3e17e719a8778c87e585b9c298
|
/man/stat_pde_density.Rd
|
48e26f763c6fa164cc30f7fde17a0bde6b994183
|
[] |
no_license
|
Mthrun/DataVisualizations
|
512713fd1f236386e4423c323b2921675890be15
|
bfbb79557050e2eae8571bdebfb2e9d16f7fc20a
|
refs/heads/master
| 2023-08-23T13:45:38.465468
| 2023-08-07T14:23:41
| 2023-08-07T14:23:41
| 127,918,063
| 7
| 3
| null | 2019-01-30T15:32:16
| 2018-04-03T14:08:04
|
R
|
UTF-8
|
R
| false
| false
| 4,440
|
rd
|
stat_pde_density.Rd
|
\name{stat_pde_density}
\alias{stat_pde_density}
\title{
Calculate Pareto density estimation for ggplot2 plots
}
\description{
This function enables to replace the default density estimation for ggplot2 plots with the Pareto density estimation [Ultsch, 2005].
It is used for the PDE-Optimized violin plot published in [Thrun et al, 2018].
}
\usage{
stat_pde_density(mapping = NULL,
data = NULL,
geom = "violin",
position = "dodge",
...,
trim = TRUE,
scale = "area",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[=aes]{aes()}} or
\code{\link[=aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[=ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[=fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{Other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{trim}{This parameter only matters if you are displaying multiple
densities in one plot. If `FALSE`, the default, each density is
computed on the full range of the data. If `TRUE`, each density
is computed over the range of that group: this typically means the
estimated x values will not line-up, and hence you won't be able to
stack density values.}
\item{scale}{When used with geom_violin: if "area" (default), all violins have the same area (before trimming
the tails). If "count", areas are scaled proportionally to the number of
observations. If "width", all violins have the same maximum width.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[=borders]{borders()}}.}
}
\details{
Pareto Density Estimation (PDE) is a method for the estimation of probability density functions using hyperspheres. The Pareto-radius of the hyperspheres is derived from the optimization of information for minimal set size. It is shown, that Pareto Density is the best estimate for clusters of Gaussian structure. The method is shown to be robust when cluster overlap and when the variances differ across clusters.
}
\references{
Ultsch, A.: Pareto density estimation: A density estimation for knowledge discovery, in Baier, D.; Werrnecke, K. D., (Eds), Innovations in classification, data science, and information systems, Proc Gfkl 2003, pp 91-100, Springer, Berlin, 2005.
[Thrun et al, 2018] Thrun, M. C., Pape, F., & Ultsch, A. : Benchmarking Cluster Analysis Methods using PDE-Optimized Violin Plots, Proc. European Conference on Data Analysis (ECDA), accepted, Paderborn, Germany, 2018.
}
\author{
Felix Pape
}
\seealso{
\code{[ggplot2]stat_density}
}
\examples{
miris <- reshape2::melt(iris)
ggplot2::ggplot(miris,
mapping = ggplot2::aes_string(y = 'value', x = 'variable')) +
ggplot2::geom_violin(stat = "PDEdensity")
}
|
60a807bf7f6dc541b66a34b75b26bdf324596fb2
|
40ecae36bc31ed0619c94e3efde8ac4e13129f1a
|
/2017/07.R
|
fb0e12993a15a9bc605f4ff9dca2cfa18780d23d
|
[] |
no_license
|
epson121/adventofcode
|
d5ece712395fadf4aa939b7b3971556b3ca70449
|
febd4a060d1c23dc068ae7553193c8282ff1bef7
|
refs/heads/master
| 2023-04-06T20:37:54.767182
| 2023-04-05T08:27:52
| 2023-04-05T08:27:52
| 48,589,288
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
07.R
|
getLines <- function(filename, delimiter=" ") {
dat <- file(filename);
lines <- readLines(dat)
close(dat)
return(strsplit(lines, delimiter))
}
lines <- getLines("07.txt", "->")
programs <- NULL
children <- NULL
childrenPerProgram <- NULL
weightPerProgram <- NULL
for (i in 1:length(lines)) {
cp = lines[[i]]
parts = unlist(strsplit(cp[[1]], " "))
pName = parts[[1]]
pWeight = toint(gsub("[()]", "", parts[[2]]))
# has children
if (length(cp) > 1) {
chs <- trimws(cp[[2]])
chs <- unlist(strsplit(chs, ", "))
children <- c(children, chs)
}
programs <- c(programs, pName)
childrenPerProgram[[pName]] = chs
weightPerProgram[[pName]] = pWeight
chs <- NULL
}
programs <- sort(programs)
children <- sort(children)
root <- NULL
for (i in 1:10000) {
if (programs[[i]] != children[[i]]) {
root <- programs[[i]]
break
}
}
weightPP <- function(child) {
weight <- weightPerProgram[[child]]
if (length(childrenPerProgram[[child]]) > 0) {
for (i in childrenPerProgram[[child]]) {
weight <- weight + weightPP(i)
}
}
return(weight)
}
# manually check which one is wrong by going through the children
for (i in childrenPerProgram[["boropxd"]]) {
print(i)
print(weightPerProgram[[i]])
print(weightPP(i))
}
#print(weightPP("qjvtm"))
|
412d14d53ef87ed1c32b29dbeeb77799bddc0d87
|
1608e6e652b53ee796af232551ba42cc560be9c7
|
/R/get_urban_summary_for_each_species.R
|
9dee8ec8b92f7f6f0a2904bcf3aaf10d945f776f
|
[] |
no_license
|
coreytcallaghan/BIOC_108753
|
bc0e23ba2bffd3fdccbf7ac92d69e36bae2a6639
|
70f2420713387d6953bf19bfb88d5adbf863227d
|
refs/heads/master
| 2022-12-04T15:12:45.047130
| 2020-08-26T23:48:39
| 2020-08-26T23:48:39
| 251,501,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,460
|
r
|
get_urban_summary_for_each_species.R
|
## an R script to get the urban score for every species
## by taking the median of their night-time lights values
## first will start with an initial exploratory figure though
# packages
library(ggplot2)
library(dplyr)
library(tidyr)
# read in inat observation data
inat_dat <- readRDS("data/lvl2_terra.rds")
# read in lights data
# where each record is assigned a lights value
lights <- readRDS("data/viirs_lights_obs.RDS")
# join the two
data <- inat_dat %>%
left_join(., lights, by="catalogNumber") %>%
# Filters out observations without light pixel values.
filter(!is.na(lights))
# number of species
length(unique(data$species))
# histogram of records per species
data %>%
group_by(species) %>%
summarize(N=n()) %>%
ggplot(., aes(x=N))+
geom_histogram(bins=50, color="black", fill="orange")+
scale_x_log10()+
theme_bw()+
theme(axis.text=element_text(color="black"))+
xlab("Number of occurrences")+
ylab("Count")
ggsave("outputs/graphics/number_records_by_species.png")
# calculate statistics of night-time lights value for every species
# and the number of records for each species into a dataframe
urban_scores <- data %>%
group_by(species) %>%
summarize(median_lights=median(lights),
mean_lights=mean(lights),
min_lights=min(lights),
max_lights=max(lights),
sd_lights=sd(lights),
number_of_records=n())
saveRDS(urban_scores, "data/urbanness_summaries.RDS")
|
4c95ab92532f6b9a7b58c0b00218e1c1dce02fda
|
3fa396ad04038e9b586e30fb3ed0e528ac59fcc7
|
/projectA/porject_5.R
|
4f1cc4adada95ef4d164e98f63b4424ae2de778d
|
[] |
no_license
|
fionisme/datascience
|
f889f4dc719b4f04a89a8771f68ccf76fa25e077
|
5ee87a97efb55ee60b3f175be66a9061a1477da2
|
refs/heads/master
| 2021-04-09T15:15:28.275329
| 2018-11-06T13:31:10
| 2018-11-06T13:31:10
| 125,801,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,039
|
r
|
porject_5.R
|
library(httr)
library(devtools)
install.packages("DMwR")
library(DMwR)
library(nnet)
install.packages("reshape")
library(reshape)
library(scales)
library(ggplot2)
install.packages("NeuralNetTools")
library(NeuralNetTools)
rm(list = ls())
wine <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data" , header = T)
str(wine)
colnames(wine) <- c("class", "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium", "Total phenols", "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins", "Color intensity", "Hue", "OD280/OD315 of diluted wines", "Proline")
n <- nrow(wine)
t_size = round(0.7 * n)
t_idx <- sample(seq_len(n), size = t_size)
traindata <- wine[t_idx,]
testdata <- wine[ - t_idx,]
nnetM <- nnet(formula = class ~ ., linout = T, size = 3, decay = 0.001, maxit = 1000, trace = T, data = traindata)
NeuralNetTools::plotnet(nnetM, wts.only = F)
#預測
prediction <- predict(nnetM, testdata, type = 'raw')
cm <- table(x = testdata$class, y = prediction, dnn = c("實際", "預測"))
cm
|
0324888458169043f03183bfb7b5ea6f958fc878
|
df1c4feee3da7e39f233fa45ec4dc34338d1a300
|
/man/fmRsq.Rd
|
d196a5e7ace6dccaefab30f09684fab3eca69404
|
[] |
no_license
|
AvinashAcharya/factorAnalytics
|
1abf7e436417f63f938504733feb791186805e91
|
dd3572c5454e4f1691bbf76009144931383983a6
|
refs/heads/master
| 2020-04-05T15:17:00.784572
| 2018-10-01T22:14:56
| 2018-10-01T22:14:56
| 59,917,661
| 25
| 20
| null | 2016-05-28T22:55:24
| 2016-05-28T22:55:24
| null |
UTF-8
|
R
| false
| true
| 2,995
|
rd
|
fmRsq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fmRsq.R
\name{fmRsq}
\alias{fmRsq}
\alias{fmRsq.ffm}
\title{Factor Model R-Squared and Adj R-Squared Values}
\usage{
fmRsq(ffmObj, ...)
\method{fmRsq}{ffm}(ffmObj, rsq = T, rsqAdj = F, plt.type = 2,
digits = 2, isPrint = T, isPlot = T, lwd = 2, stripText.cex = 1,
axis.cex = 1, title = TRUE, ...)
}
\arguments{
\item{ffmObj}{an object of class \code{ffm} produced by \code{fitFfm}}
\item{...}{potentially further arguments passed.}
\item{rsq}{logical; if \code{TRUE}, Factor Model R-squared values are computed for the portfolio. Default is \code{TRUE}.}
\item{rsqAdj}{logical; if \code{TRUE}, Adjusted R-squared values are computed for the portfolio. Default is \code{FALSE}.}
\item{plt.type}{a number to indicate the type of plot for plotting Factor Model R-squared/Adj. R-squared values.
1 indicates barplot, 2 indicates time series xy plot. Default is 2.}
\item{digits}{an integer indicating the number of decimal places to be used for rounding. Default is 2.}
\item{isPrint}{logical. if \code{TRUE}, the time series of the computed factor model values is printed along with their mean values.
Else, only the mean values are printed. Default is \code{TRUE}.}
\item{isPlot}{logical. if \code{TRUE}, the time series of the output is plotted. Default is \code{TRUE}.}
\item{lwd}{line width relative to the default. Default is 2.}
\item{stripText.cex}{a number indicating the amount by which strip text in the plot(s) should be scaled relative to the default. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc.}
\item{axis.cex}{a number indicating the amount by which axis in the plot(s) should be scaled relative to the default. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc.}
\item{title}{logical. if \code{TRUE}, the plots will have the main tiltle. default is \code{TRUE}.}
}
\value{
\code{fmRsq} returns the sample mean values and plots the time series of corresponding R squared values
and the Variance Inflation factors depending on the values of \code{rsq}, \code{rsqAdj} and \code{VIF}.
The time series of the output values are also printed if \code{isPrint} is \code{TRUE}
}
\description{
Calcluate and plot the Factor Model R-Squared, Adjusted R-Squared for a portfolio of assets
}
\examples{
#Load the data
data("factorDataSetDjia5Yrs")
#Fit a Ffm
require(factorAnalytics)
fit <- fitFfm(data=factorDataSetDjia5Yrs, asset.var="TICKER", ret.var="RETURN",
date.var="DATE", exposure.vars="SECTOR")
#Calcuate and plot the portfolio R-squared values
fmRsq(fit)
fit1 <- fitFfm(data=factorDataSetDjia5Yrs, asset.var="TICKER", ret.var="RETURN",
date.var="DATE", exposure.vars=c("SECTOR", "P2B", "EV2S", "MKTCAP"), addIntercept=TRUE)
#Plot and print the time series of Adj R-squared and VIF values
fmRsq(fit1, rsqAdj=TRUE, isPrint=TRUE, plt.type = 2)
}
\author{
Avinash Acharya and Doug Martin
}
|
e9c358a002029f26f3109cf5de4ec692bf35d882
|
e25513cd273b541a743d5eaff45a4d800a14c36c
|
/group3 lab.R
|
cbd0e89d8b8abe35ad08ac5bcd4c7558ecf6eb29
|
[] |
no_license
|
YueYuan717/DataScience
|
695078abea229c872abfb8258d2bdbec907bfac4
|
02fb18fb4d276d81e5fd62d53cbd6648ca2b890d
|
refs/heads/master
| 2022-05-28T17:39:38.306055
| 2020-05-04T23:56:09
| 2020-05-04T23:56:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
group3 lab.R
|
wine_data <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data", sep = ",")
head(wine_data)
nrow(wine_data)
dim(wine_data)
# Adding the variable names
colnames(wine_data) <- c("Cvs", "Alcohol",
"Malic_Acid", "Ash", "Alkalinity_of_Ash",
"Magnesium", "Total_Phenols", "Flavanoids", "NonFlavanoid_Phenols",
"Proanthocyanins", "Color_Intensity", "Hue", "OD280/OD315_of_Diluted_Wine",
"Proline")
head(wine_data) # Now you can see the header names.
# Using the Heatmap() function, we can check the correlations,
# In the heatmap(), the "Dark Colors" represent the "Correlated"
# In the heatmap(), the "Light Colors" represent the "Not Correlated"
help("heatmap") # Read the heatmap() function Documentation in RStudio.
# Now we will use the heatmap() function to show the correlation among variables.
heatmap(cor(wine_data),Rowv = NA, Colv = NA)
help("factor")
cultivar_classes <- factor(wine_data$Cvs)
cultivar_classes
help("prcomp")
help("scale")
wine_data_PCA <- prcomp(scale(wine_data[,-1]))
summary(wine_data_PCA)
|
fd6c53f5a507b61442784f7fc596c6ab022bf877
|
6e4f004782186082b73025cda95f31bcae76afcf
|
/man/gl.pcoa.Rd
|
3948d48bc2383f6a6495047a998b773f4b5c3d36
|
[] |
no_license
|
carlopacioni/dartR
|
319fbff40a385ca74ab7490b07857b0b027c93a8
|
06614b3a328329d00ae836b27616227152360473
|
refs/heads/master
| 2023-08-23T00:32:10.850006
| 2021-09-08T06:52:44
| 2021-09-08T06:52:44
| 262,468,788
| 0
| 0
| null | 2020-05-09T02:07:08
| 2020-05-09T02:07:07
| null |
UTF-8
|
R
| false
| true
| 7,267
|
rd
|
gl.pcoa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.pcoa.r
\name{gl.pcoa}
\alias{gl.pcoa}
\title{Ordination applied to genotypes in a genlight object (PCA), in an fd object, or to a distance matrix (PCoA)}
\usage{
gl.pcoa(
x,
nfactors = 5,
correction = NULL,
parallel = FALSE,
n.cores = 16,
verbose = NULL
)
}
\arguments{
\item{x}{-- name of the genlight object or fd object containing the SNP data, or a distance matrix of type dist [required]}
\item{nfactors}{-- number of axes to retain in the output of factor scores.}
\item{correction}{Method applied to correct for negative eigenvalues, either 'lingoes' or 'cailliez' [Default NULL]}
\item{parallel}{TRUE if parallel processing is required (does fail under Windows) [default FALSE]}
\item{n.cores}{Number of cores to use if parallel processing is requested [default 16]}
\item{verbose}{-- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity]}
}
\value{
An object of class pcoa containing the eigenvalues and factor scores
}
\description{
This function takes the genotypes for individuals and undertakes a Pearson Principal Component analysis (PCA) on SNP or Tag P/A (SilicoDArT)
data; it undertakes a Gower Principal Coordinate analysis (PCoA) if supplied with a distance matrix. Technically, any distance matrix can
be represented in an ordinated space using PCoA.
}
\details{
The function is essentially a wrapper for glPca {adegenet} or pcoa \{ape\} with default settings apart from those specified as parameters in this
function.
While, technically, any distance matrix can be represented in an ordinated space, the representation will not typically be exact.There are three
major sources of stress in a reduced-reprentation of distances or dissimilarities among entities using PCA or PCoA. By far the greatest
source comes from the decision to select only the top two or three axes from the ordinated set of axes derived from the PCA or PCoA. The representation of
the entities such a heavily reduced space will not faithfully represent the distances in the input distance matrix simply because of the loss of information
in deeper informative dimensions. For this reason, it is not sensible to be too precious about managing the other two sources of stress in
the visual representation.
The measure of distance between entities in a PCA is the Pearson Correlation Coefficent, essentially a standardized Euclidean distance. This is both a
metric distance and a Euclidean distance. In PCoA, the second source of stress is the choice of distance measure or dissimilarity measure. While any
distance or dissimilarity matrix can be represented in an ordinated space, the distances between entities can befaithfully represented
in that space (that is, without stress) only if the distances are metric. Furthermore, for distances between entities to be faithfully
represented in a rigid Cartesian space, the distance measure needs to be Euclidean. If this is not the case,
the distances between the entities in the ordinated visualized space will not exactly represent the distances in the input matrix
(stress will be non-zero). This source of stress will be evident as negative eigenvalues in the deeper dimensions.
A third source of stress arises from having a sparse dataset, one with missing values. This affects both PCA and PCoA. If the original data matrix
is not fully populated, that is, if there are missing values, then even a Euclidean distance matrix will not necessarily be 'positive definite'.
It follows that some of the eigenvalues may be negative, even though the distance metric is Euclidean. This issue is exacerbated when the number
of loci greatly exceeds the number of individuals, as is typically the case when working with SNP data. The impact of missing values can be minimized
by stringently filtering on Call Rate, albeit with loss of data. An alternative is given in a paper "Honey, I shrunk the sample covariance matrix"
and more recently by Ledoit and Wolf (2018), but their approach has not been implemented here.
The good news is that, unless the sum of the negative eigenvalues, arising from a non-Euclidean distance measure or from missing values, approaches those
of the final PCA or PCoA axes to be displayed, the distortion is probably of no practical consequence and certainly not comparable to the stress arising from
selecting only two or three final dimensions out of several informative dimensions for the visual representation.
Two diagnostic plots are produced. The first is a Scree Plot, showing the percentage variation explained by each of the PCA or PCoA axes, for those axes that
explain more than the original variables (loci) on average. That is, only informative axes are displayed. The scree plot informs the number of dimensions
to be retained in the visual summaries. As a rule of thumb, axes with more than 10% of variation explained should be included.
The second graph shows the distribution of eigenvalues for the remaining uninformative (noise) axes, including those with negative eigenvalues.
Action is recommended (verbose >= 2) if the negative eigenvalues are dominant, their sum approaching in magnitude the eigenvalues for axes selected for
the final visual solution.
Output is a glPca object conforming to adegenet::glPca but with only the following retained.
$call
The call that generated the PCA/PCoA
$eig
Eigenvalues -- All eigenvalues (positive, null, negative).
$scores
Scores (coefficients) for each individual
$loadings
Loadings of each SNP for each principal component
PCA was developed by Pearson (1901) and Hotelling (1933), whilst the best modern reference is Jolliffe (2002). PCoA was developed by Gower (1966) while the
best modern reference is Legendre & Legendre (1998).
}
\examples{
fd <- gl.fixed.diff(testset.gl)
fd <- gl.collapse(fd)
pca <- gl.pcoa(fd)
gl.pcoa.plot(pca,fd)
}
\references{
Cailliez, F. (1983) The analytical solution of the additive constant problem. Psychometrika, 48, 305-308.
Gower, J. C. (1966) Some distance properties of latent root and vector methods used in multivariate analysis. Biometrika, 53, 325-338.
Hotelling, H., 1933. Analysis of a complex of statistical variables into Principal Components. Journal of Educational Psychology 24:417-441, 498-520.
Jolliffe, I. (2002) Principal Component Analysis. 2nd Edition, Springer, New York.
Ledoit, O. and Wolf, M. (2018). Analytical nonlinear shrinkage of large-dimensional covariance matrices. University of Zurich, Department of Economics, Working Paper No. 264, Revised version. Available at SSRN: https://ssrn.com/abstract=3047302 or http://dx.doi.org/10.2139/ssrn.3047302
Legendre, P. and Legendre, L. (1998). Numerical Ecology, Volume 24, 2nd Edition. Elsevier Science, NY.
Lingoes, J. C. (1971) Some boundary conditions for a monotone analysis of symmetric matrices. Psychometrika, 36, 195-203.
Pearson, K. (1901). On lines and planes of closest fit to systems of points in space. Philosophical Magazine. Series 6, vol. 2, no. 11, pp. 559-572.
}
\author{
Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
}
|
118c4a67100abcebfa2fc6b6d4cfd77e96cc561d
|
0e9c3db76085be6fde8604fbc6b99544ce759531
|
/man/stsubpop-class.Rd
|
8079daaa6ce96ac1026f191a7e350f8de10334af
|
[] |
no_license
|
cran/stepp
|
17fdfcf41185fd4c4daa16ac9a373755b7d55e16
|
f3e3283c9514ff9fd3edccbab47dae8bf580233c
|
refs/heads/master
| 2022-07-06T05:50:33.186438
| 2022-06-18T10:50:01
| 2022-06-18T10:50:01
| 17,700,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,841
|
rd
|
stsubpop-class.Rd
|
\name{stsubpop-class}
\Rdversion{1.3}
\docType{class}
\alias{stsubpop-class}
\alias{generate,stsubpop-method}
\alias{summary,stsubpop-method}
\title{Class \code{"stsubpop"}}
\description{
This is the S4 class for stepp subpopulation object. The subpopulations are generated
based on the stepp windows and the covariate of interest.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("stsubpop")} or the constructor method stepp.subpop.
}
\section{Slots}{
\describe{
\item{\code{win}:}{Object of class \code{"stwin"} \cr
the stepp window set up for the analysis }
\item{\code{covar}:}{Object of class \code{"numeric"} \cr
the covariate of interest }
\item{\code{nsubpop}:}{Object of class \code{"numeric"} \cr
the number of subpopulations generated }
\item{\code{subpop}:}{Object of class \code{"ANY"} \cr
a matrix of subpopulations generated based on the stepp window and the specified covariate of interest }
\item{\code{npatsub}:}{Object of class \code{"numeric"}
a vector of size of each subpopulation }
\item{\code{medianz}:}{Object of class \code{"numeric"} \cr
a vector of median value of the covariate of interest for each subpopulation }
\item{\code{minc}:}{Object of class \code{"numeric"} \cr
a vector of the minimum value of the covariate of interest for each subpopulation }
\item{\code{maxc}:}{Object of class \code{"numeric"} \cr
a vector of the maximum value of the covariate of interest for each subpopulation }
\item{\code{neventsubTrt0}:}{Object of class \code{"numeric"} or \code{NULL} \cr
a vector containing the number of events in each subpopulation for the baseline treatment group}
\item{\code{neventsubTrt1}:}{Object of class \code{"numeric"} or \code{NULL} \cr
a vector containing the number of events in each subpopulation for the active treatment group}
\item{\code{init}:}{Object of class \code{"logical"} \cr
a logical value indicating if the subpopulations have already been generated or not }
}
}
\section{Methods}{
\describe{
\item{generate}{\code{signature(.Object = "stsubpop", win, covariate, coltype, coltrt, trts, minsubpops)}: \cr
a method to generate the subpopulations based on the stepp window object and the specified covariate of interest. For event-based windows, also the event type (\code{coltype}), treatment indicator (\code{coltrt}), treatments list (\code{trts}) and minimum number of subpopulations (\code{minsubpops}) must be provided }
\item{summary}{\code{signature(.Object = "stsubpop")}: \cr
a method to display the summary of the subpopulations generated }
}
}
\author{
Wai-Ki Yip
}
\seealso{
\code{\linkS4class{stwin}}, \code{\linkS4class{stmodelKM}},
\code{\linkS4class{stmodelCI}}, \code{\linkS4class{stmodelGLM}},
\code{\linkS4class{steppes}}, \code{\linkS4class{stmodel}},
\code{\link{stepp.win}}, \code{\link{stepp.subpop}}, \code{\link{stepp.KM}},
\code{\link{stepp.CI}}, \code{\link{stepp.GLM}},
\code{\link{stepp.test}}, \code{\link{estimate}}, \code{\link{generate}}
}
\examples{
showClass("stsubpop")
# create a steppp window
win1 <- stepp.win(type="sliding", r1=5,r2=10)
# generate the covariate of interest
Y <- rnorm(100)
# create and generate the stepp subpopulation
sp <- new("stsubpop")
sp <- generate(sp, win=win1, cov=Y)
summary(sp)
# event-based windows using the BIG data set
data(bigKM)
rxgroup <- bigKM$trt
time <- bigKM$time
evt <- bigKM$event
cov <- bigKM$ki67
swin_e <- new("stwin", type = "sliding_events", e1 = 10, e2 = 20)
subp_e <- new("stsubpop")
subp_e <- generate(subp_e, win = swin_e, covariate = cov, coltype = evt,
coltrt = rxgroup, trts = c(1, 2), minsubpops = 5)
summary(subp_e)
}
\keyword{classes}
|
84c1ec3bda4dd290a273cc03820d4c58938bd5dd
|
4fda4291ed5f30c9110866db54d966766dd27a56
|
/R/hat.R
|
698a1dbb13186be8084ed9ce6e5eed37c0a4130e
|
[] |
no_license
|
cran/mrdrc
|
ad24d0609eecf34b926c70415a3125453425958b
|
3dc543f1ebc5a45c676cb748c003fe4c2ce60484
|
refs/heads/master
| 2021-01-22T07:32:24.973345
| 2011-04-12T00:00:00
| 2011-04-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,473
|
r
|
hat.R
|
# Hat matrix for a linear regression model at design points or subset thereof
# Observe that zero-weight obs drop out from design matrix X due to the
# internals of lm. Therefore NAs are put into design points of weight zero.
# This is perhaps not the most intuitive function.
# lm (class lm, object): lm fit object
# rows (integer, vector): rows (design points) at which hat matirx is evaluated
hat.design <- function (lm, rows=1:length(lm$residuals)) {
QR <- lm$qr
weights <- weights(lm)
if ( !is.null(weights) ) {
zero.weights <- (weights==0)
K <- diag(!zero.weights)[!zero.weights,]
W.sqrt <- sqrt(weights[!zero.weights])
# Here trickery to fix rows for zero weights
map <- 1:length(weights) - cumsum (zero.weights)
map[zero.weights] <- NA
rows <- map[rows]
rows <- rows[!is.na(rows)]
}
if ( QR["rank"]==1 ) {
n <- length (lm$residuals)
if ( is.null(weights) ) {
matrix (data=1/n, nrow=length(rows), ncol=n)
} else {
sum.w <- sum(weights)
matrix (data=weights/sum.w, nrow=length(rows), ncol=n, byrow = TRUE)
}
} else if ( QR["rank"]==2 ) {
Q <- qr.Q(QR)
if ( is.null(weights) ) {
Q[rows,] %*% t(Q)
} else {
t(
apply (
cbind( Q[rows,] , 1.0/(W.sqrt[rows]) ) ,
1 ,
function (x) {x[1:2] * x[3] }
)
) %*% t(Q) %*% diag(W.sqrt) %*% K
}
} else {
stop ("hat.design: rank of QR decomposition not 1 or 2")
}
}
# Hat matrix for a linear regression model at points with given covariate
# lm (class lm, object): lm fit object
# xs (numeric, vector): covariate values at which hat matrix is evaluated
hat.point <- function (lm, xs) {
QR <- lm$qr
weights <- weights(lm)
if ( !is.null(weights) ) {
zero.weights <- (weights==0)
K <- diag(!zero.weights)[!zero.weights,]
W.sqrt <- diag(sqrt(weights[!zero.weights]))
}
if ( QR["rank"]==1 ) {
n <- length (lm$residuals)
if ( is.null(weights) ) {
matrix (data=1/n, nrow=length(xs), ncol=n)
} else {
sum.w <- sum(weights)
matrix (data=weights/sum.w, nrow=length(xs), ncol=n, byrow = TRUE)
}
} else if ( QR["rank"]==2 ) {
Q.t <- t(qr.Q(QR))
R.inv <- solve(qr.R(QR))
if ( is.null(weights) ) {
cbind(1,xs) %*% R.inv %*% Q.t
} else {
cbind(1,xs) %*% R.inv %*% Q.t %*% W.sqrt %*% K
}
} else {
stop ("hat.point: rank of QR decomposition not 1 or 2")
}
}
|
ef64a1d6985a62350be796bd5c22c24e25419a3a
|
5c4c58456a2a0f1d80c5eb783d6cbc8f846f8414
|
/man/geo_geometry_type.Rd
|
7a0cbd3bf3d99df01f9eba519c2a5f435239b8af
|
[
"MIT"
] |
permissive
|
paleolimbot/geovctrs
|
6b9882efe4a05d2d3614932feae68c0c6b9a37f6
|
12de2c88d29df5c5e770405b0d772906a2e18533
|
refs/heads/master
| 2021-05-18T01:28:15.343654
| 2020-07-24T18:53:27
| 2020-07-24T18:53:27
| 251,045,555
| 24
| 2
| null | 2020-06-27T02:37:54
| 2020-03-29T13:59:38
|
R
|
UTF-8
|
R
| false
| true
| 812
|
rd
|
geo_geometry_type.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geo-geometry-type.R
\name{geo_geometry_type}
\alias{geo_geometry_type}
\alias{geo_geometry_type.default}
\alias{geo_geometry_type.wk_wkt}
\alias{geo_geometry_type.wk_wkb}
\alias{geo_geometry_type.wk_wksxp}
\title{Extract feature geometry type}
\usage{
geo_geometry_type(x)
\method{geo_geometry_type}{default}(x)
\method{geo_geometry_type}{wk_wkt}(x)
\method{geo_geometry_type}{wk_wkb}(x)
\method{geo_geometry_type}{wk_wksxp}(x)
}
\arguments{
\item{x}{A geometry-like object, or one that can be
coerced to a geometry-like object using \code{\link[=as_geovctr]{as_geovctr()}}.}
}
\value{
A character vector with lowercase geometry types
}
\description{
Extract feature geometry type
}
\examples{
geo_geometry_type("POINT EMPTY")
}
|
e4f8d4f71929f07b8aa4e3c8e6b68726acd496f5
|
c764e8494c52ecd32a17d24d32c7fcbdb0f9a510
|
/R/RobustSVD.R
|
d9c04e63ee4e4a71a1076f33d52eeb8136626242
|
[] |
no_license
|
ericaponzi/RaJIVE
|
a5dbc6c78c65c4e726c98a91f840ab80909cdca9
|
7d494d11029e538587ee9b27688adacf7f1f9841
|
refs/heads/master
| 2023-02-26T18:55:26.323379
| 2021-01-28T12:54:39
| 2021-01-28T12:54:39
| 305,993,799
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,111
|
r
|
RobustSVD.R
|
#' Computes the robust SVD of a matrix
#'
#'
#' @param data Matrix. X matrix.
#' @param nrank Integer. Rank of SVD decomposition
#' @param svdinit List. The standard SVD.
#' @importFrom stats median
#' @return List. The SVD of X.
RobRSVD.all <- function(data, nrank = min(dim(data)), svdinit = svd(data))
{
data.svd1<-RobRSVD1(data, sinit = svdinit$d[1],
uinit = svdinit$u[,1], vinit = svdinit$v[,1])
d = data.svd1$s
u = data.svd1$u
v = data.svd1$v
Red = d*u%*%t(v)
Rm = min(min(dim(data)), nrank)
for(i in 1: (Rm-1)){
data.svd1 <- RobRSVD1((data-Red), sinit = svdinit$d[1],
uinit = svdinit$u[,1],vinit = svdinit$v[,1])
d = c(d,data.svd1$s)
u = cbind(u,data.svd1$u)
v = cbind(v,data.svd1$v)
Red <- (u%*%diag(d)%*%t(v))
}
out = list(d,u,v)
names(out) <- c("d", "u", "v")
return(out)
}
RobRSVD1<- function (data, huberk = 1.345, niter = 1000,
tol = 1e-05, sinit, uinit, vinit)
{
size_data = c(dim(data))
m = size_data[1]
n = size_data[2]
sold = sinit
vold = vinit
uold = sold * uinit
Appold = uold %*% t(vold)
Rmat = data - Appold
Rvec = c(Rmat)
mysigma = median(abs(Rvec))/0.675
iter = 1
localdiff = 9999
while (localdiff > tol & iter < niter) {
Wmat = huberk/abs(Rmat/mysigma)
Wmat[Wmat > 1] = 1
uterm1 = diag(colSums(diag(c(vold^2)) %*% t(Wmat))) +
(2 * mysigma^2) * (c(t(vold) %*% (diag(n)) %*% vold) * (diag(m)) - diag(sum(vold^2), m))
uterm2 = (Wmat * data) %*% vold
unew = solve(uterm1) %*% uterm2
vterm1 = diag(colSums(diag(c(unew^2)) %*% Wmat)) +
(2 * mysigma^2) * (c(t(unew) %*% (diag(m)) %*% unew) * (diag(n)) - diag(sum(unew^2), n))
vterm2 = t(Wmat * data) %*% unew
vnew = solve(vterm1) %*% vterm2
Appnew = unew %*% t(vnew)
Rmat = data - Appnew
localdiff = max(abs(Appnew - Appold))
Appold = Appnew
uold = sqrt(sum(vnew^2)) * unew
vold = vnew/sqrt(sum(vnew^2))
iter = iter + 1
}
v = vold
s = sqrt(sum(uold^2))
u = uold/sqrt(sum(uold^2))
return(list(s = s, v = v, u = u))
}
|
0a7df54e93e61d8f7f35850476740e69bc3e39e3
|
d0d849058ad9c94fb7fcc4a49e2d50d990132e9e
|
/Gene.expression.R
|
b3e3687b4c627d2cf0ecf5cf480dab6bf55b215f
|
[] |
no_license
|
huajiahen/rosette-RNA-Seq
|
862019bf3b22addfb1553d4f260d72d7839e3cea
|
ab6e37a064794ed21cfaa6529e4451bd7391b1ea
|
refs/heads/master
| 2020-12-25T05:07:37.535429
| 2016-05-23T05:51:25
| 2016-05-23T05:51:25
| 59,454,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,846
|
r
|
Gene.expression.R
|
#gene expression
drawGenes <- function(Genes) {
All.gene.expression %>%
filter(Symbol %in% Genes) %>%
subset(select = 2:7) %>%
melt(measure.vars = 1:5) %>%
mutate(value = log2(value + 1)) %>%
ggplot(aes(x=variable, y=value, group=Symbol)) +
geom_line(aes(color=Symbol)) +
geom_point(aes(color=Symbol)) +
ylim(0, 10) +
labs(x = "", y = "", title = "") +
theme(legend.title = element_blank())
}
drawGene.like <- function(Gene, title = "") {
All.gene.expression %>%
filter(grepl(Gene, Symbol)) %>%
subset(select = 2:7) %>%
melt(measure.vars = 1:5) %>%
mutate(value = log2(value + 1)) %>%
ggplot(aes(x=variable, y=value, group=Symbol)) +
geom_line(aes(color=Symbol)) +
geom_point(aes(color=Symbol)) +
ylim(0, 8) +
labs(x = "", y = "", title = title) +
theme(legend.title = element_blank())
}
##Wnt
#R3 WNT8B
#R5 WNT1 WNT3 WNT3A WNT8B
drawGene.like("WNT")
#Frizzled
drawGene.like("FZD")
drawGene.like("^SMO$")
#DVL
drawGene.like("^DVL")
drawGene.like("^APC")
(Nestin <- drawGeneID("10763"))
(NCAM <- drawGeneID("4684"))
(NCAD <- drawGeneID("1000"))#aka. CDH2 Gene
(PAX6 <- drawGeneID("5080"))
(SOX1 <- drawGeneID("6656"))
(ZO1 <- drawGeneID("7082"))#aka. TJP1
(PH3 <- drawGeneID("80012"))#aka. PHC3
(BLBP <- drawGeneID("2173"))#aka. FABP7
(GLAST <- drawGeneID("6507"))#aka. SLC1A3
(SLC9A3R2 <- drawGeneID("9351"))
(SLC1A1 <- drawGeneID("6505"))
(SLC1A5 <- drawGeneID("6510"))
(FOXG1 <- drawGeneID("2290")) #!!!!
(THY1 <- drawGeneID("7070"))
# no significant change for notch signal pathway block gene: NUMB, DVL1, DVL2, DVL3
# change for MAPK signal pathway Key component
#RAS no change
# MAP4K no change
# MAP3K no change
# MAP2K no change
#MAPK ~ JNK
drawGenes("MAPK10") #!
#TNF?
drawGenes(c("TNF", "TNFAIP8L2-SCNM1", "TNFRSF12A"))
#Cadherins
p1 = drawGenes(c("CDH1", "CDH2"), "Cadherins")
#Neuroepithelial/NSC
drawGenes(c("PAX6", "SOX1", "SOX2", "HES5"), "Neuroepithelial/NSC")
#Radial gilal/NSC
#drawGenes(c("FABP7", "SLC1A3", "S100B"), "Radial gilal/NSC")
#Polarity
#drawGenes(c("PARD6B", "CDC42", "PRKCI"), "Polarity")
#R-NSC Marker
p3 = drawGenes(c("PLAGL1", "DACH1", "ZBTB16"), "R-NSC Marker")
#ESC/Reprograming
p2 = drawGenes(c("POU5F1", "KLF4", "MYC", "NANOG"), "ESC/Reprograming")
#Anterior CNS
p4 = drawGenes(c("FOXG1", "EMX2"), "Anterior CNS")
#Posterior CNS
p5 = drawGenes(c("HOXB2", "HOXB3", "HOXB4"), "Posterior CNS")
#Neuron
drawGenes(c("MAP1B", ""), "Neuron")
#Neural blast
drawGenes(c("CD24"), "Neural Blast")
#Neural crest
drawGenes(c("NGFR", "SOX10", "TFAP2B"), "Neural Crest")
#NSC/ Gilal
drawGenes(c("OLIG1", "OLIG2", "AQP4", "S100B", "EGFR"), "NSC/ Gilal")
#DV Axes
p6 = drawGenes(c("BMP4", "TBX5"), "DV Axes")
source("multiplot.R")
multiplot(p1, p2, p3, p4, p5, p6, layout = matrix(c(1,2,3,4,5,6), ncol = 2, byrow=T))
|
1d3da00f6e80ff0e11ecb9d056329bd70618b7f0
|
50c50f418dfc31455ab41a857687f11e90161fda
|
/Rsrc/settings.r
|
530f02891ba0e0a8477c412fa424e1d6604ddd94
|
[] |
no_license
|
ficusvirens/satRuns
|
1ef93fb633a45a93080f52a0dc209a66b2bf2a9e
|
97975435f7be76a5b7900b4b25b30037cd375c53
|
refs/heads/master
| 2022-11-13T13:01:52.509705
| 2020-06-23T06:25:15
| 2020-06-23T06:25:15
| 272,621,364
| 0
| 0
| null | 2020-06-23T06:25:17
| 2020-06-16T05:53:17
|
R
|
UTF-8
|
R
| false
| false
| 2,950
|
r
|
settings.r
|
###choose PREBAS version
vPREBAS <- "v0.2.x" #### choose PREBAS verson to run the model "master"
#####Settings####
testRun = T ####set to TRUE to test the code on a small raster proportion
CSCrun = F ### set to TRUE if you are running on CSC
fracTest <- 0.2 ###fraction of test area
maxSitesRun <- 20000
maxSitesRunTest <- 1000
saveVars <- c(1,11:13,17,30,43,44) ####select variables to save
###library path in CSC project_2000994
if(CSCrun){
.libPaths(c("/projappl/project_2000994/project_rpackages", .libPaths()))
libpath <- .libPaths()[1]
}
####indicate rasterPath and climID path
generalPath <- "C:/Users/minunno/Documents/research/assessCarbon/data/Finland/AC_training_FI_34VEQ/"
rasterPath <- paste0(generalPath,"rasters/")
procDataPath <- paste0(generalPath,"procData/")
outPath <- paste0(generalPath,"output/")
initPrebasPath <- paste0(generalPath,"initPrebas/")
climatepath = "C:/Users/minunno/Documents/research/extarctWeather/inputs/" #### local fm
# climatepath = "/scratch/project_2000994/RCP/" ####on CSC
climIDpath <- "C:/Users/minunno/Documents/research/FinSeg/some stuff/climID10km.tif"
# climIDpath <- "/scratch/project_2000994/PREBASruns/metadata/" ####on CSC
startingYear <- 2016
yearEnd <- 2024
nYears <- yearEnd - startingYear ## number of simulation years
domSPrun = 0.
resX <- 10 ### pixel resolution in meters
### define weather inputs (CurrClim, or climate models)
weather = "CurrClim"
###set harvests
defaultThin = 0.
ClCut = 0.
harvscen = "NoHarv"
####indicate raster files
baRast <- paste0(rasterPath,"FI_34VEQ-2016_BA_10M_1CHS_8BITS.tif")
blPerRast <- paste0(rasterPath,"FI_34VEQ-2016_BLP_10M_1CHS_8BITS.tif")
dbhRast <- paste0(rasterPath,"FI_34VEQ-2016_DIA_10M_1CHS_8BITS.tif")
vRast <- paste0(rasterPath,"FI_34VEQ-2016_GSV_10M_1CHS_16BITS.tif")
hRast <- paste0(rasterPath,"FI_34VEQ-2016_HGT_10M_1CHS_16BITS.tif")
pinePerRast <- paste0(rasterPath,"FI_34VEQ-2016_P_pine_10M_1CHS_8BITS.tif")
sprucePerRast <- paste0(rasterPath,"FI_34VEQ-2016_P_spruce_10M_1CHS_8BITS.tif")
siteTypeRast <- paste0(rasterPath,"FI_34VEQ-2016_SITE_10M_1CHS_8BITS.tif")
####set values for NAs and convert factor for prebas units
baNA <- c(253:255); baConv<- 1
blPerNA <- c(253:255); blPerConv<- 1
dbhNA <- c(253:255); dbhConv <- 1
vNA <- c(65533:65535); vConv <- 1
hNA <- c(65533:65535); hConv <- 0.1
pinePerNA <- c(253:255); pinePerConv <- 1
sprucePerNA <- c(253:255); sprucePerConv <- 1
siteTypeNA <- c(254:255); siteTypeConv <- 1
####thresholds for variables to reset stand from plantation
maxDens <- 10000
initH <- 1.5
initDBH <- 0.5
initN <- 2200
initBA <- pi*(initDBH/200)^2*initN
#####settings for data extraction
varDT <- c(44,30) ####variables to extract in DT
layerDT <- "tot" ###layerID to report in data.tables, if layerDT==tot the totals of all layers is provided
#####settings for raster creation
varRast <- c(44,30) ####variables to extract in DT
yearOut <- startingYear + 1:3
#####end Settings####
|
faa1b2dc3fc115a8e2b4bce945bed02f3fd4f9ff
|
76e67672932655cc635798d00620cdf98881edf1
|
/R可视化/stock.R
|
d506171ff2cd4d271187a69ee678b633454b23c6
|
[] |
no_license
|
rogerjms/R_data_mining
|
b2b4ad8213ced0f2a5ea97c8a4e27ce8ca726e34
|
a336980a13b1f1b72b5cba0dcda0d2aebb26d2b8
|
refs/heads/master
| 2016-09-09T19:03:21.190427
| 2016-01-11T15:32:33
| 2016-01-11T15:32:33
| 19,964,972
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,032
|
r
|
stock.R
|
library(quantmod)
library(ggplot2)
getSymbols('^SSEC',src='yahoo',from = '1997-01-01')
close <- (Cl(SSEC))
time <- index(close)
value <- as.vector(close)
yrng <- range(value)
xrng <- range(time)
data <- data.frame(start=as.Date(c('1997-01-01','2003-01-01')),end=as.Date(c('2002-12-30','2012-01-20')),core=c('jiang','hu'))
timepoint <- as.Date(c('1999-07-02','2001-07-26','2005-04-29','2008-01-10','2010-03-31'))
events <- c('证券法实施','国有股减持','股权分置改革','次贷危机爆发','融资融券试点')
data2 <- data.frame(timepoint,events,stock=value[time %in% timepoint])
p <- ggplot(data.frame(time,value),aes(time,value))
p + geom_line(size=1,colour='turquoise4')+
geom_rect(alpha=0.2,aes(NULL,NULL,xmin = start, xmax = end, fill = core),ymin = yrng[1],ymax=yrng[2],data = data)+
scale_fill_manual(values = c('blue','red'))+
geom_text(aes(timepoint, stock, label = events),data = data2,vjust = -2,size = 5)+
geom_point(aes(timepoint, stock),data = data2,size = 5,colour = 'red',alpha=0.5)
|
7109f5445703a43ec39e2f29c7d059f803d9e0a4
|
796c7162fb14d87e074e4421dc05dd89e1d34ed0
|
/script.R
|
a058a84449c2e3817366e719168a3b08273d9a9a
|
[] |
no_license
|
ODelibalta/firstShinyApp
|
5445e801ee8f8b94674219f5fbfff2e854355a8f
|
8de38457ae644e86d4d70bfa10b7f7f8ed707196
|
refs/heads/master
| 2020-07-14T01:06:04.430636
| 2016-08-27T17:43:44
| 2016-08-27T17:43:44
| 66,727,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 983
|
r
|
script.R
|
library(ggplot2)
setwd("/home/odelibalta/Study/Coursera/9_DevelopingDataProducts/ShinyApp/YearsLeft/data")
allData <- read.csv("data_adjusted.csv")
selectedCountry <- "Turkey"
countryData <- subset( allData, country == selectedCountry, select = c(year, lebfm, lebf, lebm) )
countryData$predFM <- predict( lm( lebfm ~ lebf, data = countryData ) )
countryDataPlot <- ggplot() +
geom_point(data = countryData, aes(x = year, y = lebfm , color = "Both Sexes")) +
geom_point(data = countryData, aes(x = year , y = lebf , color = "Female")) +
geom_point(data = countryData, aes(x = year , y = lebm , color = "Male")) +
xlab('Years') +
ylab('Average life span from birth years')
# countryData <- subset( allData, country == selectedCountry, select = c(year, lesixtyfm, lesixtyf, lesixtym) )
print(countryDataPlot)
# birthLifeMale <- qplot( ggplot(countryData, aes(x=lebfm, y=year, fill=variable)) + geom_bar(stat='identity') )
# print(birthLifeMale)
|
ad481dc5684da6bdeb0a562d207df034c8826c67
|
062d88fcd5c65b34ef8e8b195ac1712972e0ca8a
|
/R/dist.3col.r
|
9697d06204ae505a4a7fc677bdbff31593f8aa98
|
[] |
no_license
|
cran/NST
|
5f5a7a49c2b6f8b55b31a17c32aba2c691e95c28
|
aa0dc93c89e2fb7d5e1033407a3119719a84110e
|
refs/heads/master
| 2022-06-23T11:49:55.012107
| 2022-06-05T15:00:09
| 2022-06-05T15:00:09
| 213,753,906
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
r
|
dist.3col.r
|
dist.3col<-function(dist)
{
# version 2015.5.17
dist=as.matrix(dist)
rowname=rownames(dist)
colname=colnames(dist)
rown=row(dist)
coln=col(dist)
dist.v=as.vector(stats::as.dist(dist))
rown.v=as.vector(stats::as.dist(rown))
coln.v=as.vector(stats::as.dist(coln))
res=data.frame(name1=rowname[rown.v],name2=colname[coln.v],dis=dist.v)
res
}
|
d855384e5f0965c0dfdfa5f5ddf75b417c08742a
|
7a062dd29d3392ef15043d35ad09a67c205904f5
|
/man/rMVhyper.Rd
|
3cc9b9d2cd2fb3eb912c559a38af269166d1b8e4
|
[] |
no_license
|
benaug/Mbmisscap
|
68b0bb83b52f8ef64b45a63a65f8eab8cf4e03a6
|
3340ff0752d71c9d3b64470f57692cc8c55fcae9
|
refs/heads/master
| 2021-01-10T14:08:44.503350
| 2018-10-08T16:53:55
| 2018-10-08T16:53:55
| 51,013,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 346
|
rd
|
rMVhyper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MVHyper.R
\name{rMVhyper}
\alias{rMVhyper}
\title{Random generator for the multivariate hypergeometric distribution}
\usage{
rMVhyper(n, K)
}
\arguments{
\item{n}{ads}
\item{K}{adf}
}
\description{
Random generator for the multivariate hypergeometric distribution
}
|
639777b42a7e17bfc40bc9cc3c745237a7f4b720
|
6c37b3af3e8379222b238cb67b877a563a6f3dd4
|
/R/gui.feature_picker.r
|
63723d84f1bb005c8fb755bda721e7c99fd2e353
|
[] |
no_license
|
ChristopherBarrington/seuratvis
|
e809fefabb9f6125d649558b2b860a0c6fe55772
|
413ddca360790eb4c277d0cdc2b14ec2791f1c04
|
refs/heads/master
| 2021-09-08T09:44:05.645790
| 2021-09-01T07:10:17
| 2021-09-01T07:10:17
| 242,530,342
| 0
| 0
| null | 2020-06-19T06:40:05
| 2020-02-23T14:21:23
|
R
|
UTF-8
|
R
| false
| false
| 9,807
|
r
|
gui.feature_picker.r
|
#'
#'
feature_picker.ui <- function(id, seurat, label='Feature selection', selected='features', include_feature_type=TRUE, include_values_range=TRUE,
choices=list(`Features`='features', `Metadata`='metadata', `Gene modules`='gene_modules'),
features_opts=list(), metadata_opts=list(), gene_modules_opts=list(),
features_regex='.*', metadata_regex='.*', gene_modules_regex='.*',
metadata_filter=function(x) x) {
ns <- NS(id)
# get the possible features and values
## get names of features and metadata
list(features=rownames(seurat$object),
metadata=seurat$metadata %>% metadata_filter() %>% colnames(),
gene_modules=colnames(seurat$gene_module_scores)) -> feature_picker_options
## filter the list for non-empty sets
feature_picker_options <- feature_picker_options[sapply(feature_picker_options, length)>0]
## only use choices with non-empty option sets
choices <- choices[unlist(choices) %in% names(feature_picker_options)]
## filter the options using the regex
feature_picker_options$features %<>% str_subset(pattern=regex(pattern=features_regex, ignore_case=TRUE))
feature_picker_options$metadata %<>% str_subset(pattern=regex(pattern=metadata_regex, ignore_case=TRUE))
## pick a random feature and metadata and gene module column
feature_picker_options %>%
lapply(sample, size=1) -> feature_picker_selected
# pick a feature to display: features or metadata or gene_module
picked_feature <- feature_picker_selected$features
# make ui elements
## feature names autocomplete box
autocomplete_input(id=ns(id='feature_picker_feature_names'), label=NULL, placeholder='Feature',
options=feature_picker_options$features, value=feature_picker_selected$features) %>%
conditionalPanel(condition=sprintf('input["%s"]=="features"', ns(id='feature_type'))) -> feature_names_picker_conditional
## metadata names drop down box
# selectizeInput(inputId=ns(id='feature_picker_metadata'), label=NULL,
# choices=feature_picker_options$metadata, selected=feature_picker_selected$metadata, multiple=FALSE) %>%
# conditionalPanel(condition=sprintf('input["%s"]=="metadata"', ns(id='feature_type'))) -> metadata_picker_conditional
list(inputId=ns(id='feature_picker_metadata'), label=NULL,
choices=feature_picker_options$metadata, selected=feature_picker_selected$metadata, multiple=FALSE) %>%
modifyList(val=metadata_opts) %>%
do.call(what=selectizeInput) %>%
conditionalPanel(condition=sprintf('input["%s"]=="metadata"', ns(id='feature_type'))) -> metadata_picker_conditional
## gene modules drop down box
# list(inputId=ns(id='feature_picker_gene_module'), label=NULL,
# choices=feature_picker_options$gene_modules, selected=feature_picker_options$gene_modules, multiple=FALSE,
# options=list(`actions-box`=TRUE, header='Gene module(s) selection', title='Gene module selection',
# `selected-text-format`='count', `count-selected-text`='{0} module(s)')) %>%
# modifyList(val=gene_modules_opts) %>%
# do.call(what=pickerInput) %>%
# conditionalPanel(condition=sprintf('input["%s"]=="gene_modules"', ns(id='feature_type'))) -> gene_module_picker_conditional
list(inputId=ns(id='feature_picker_gene_module'), label=NULL,
choices=feature_picker_options$gene_modules, selected=feature_picker_options$gene_modules, multiple=FALSE) %>%
modifyList(val=gene_modules_opts) %>%
do.call(what=selectizeInput) %>%
conditionalPanel(condition=sprintf('input["%s"]=="gene_modules"', ns(id='feature_type'))) -> gene_module_picker_conditional
## slider to limit colour range
sliderInput(inputId=ns(id='value_range'), label='Colour range limits',
min=0, max=1, step=0.1, value=c(-Inf,Inf)) -> value_range
## checkbox to use log-scale
prettyToggle(inputId=ns(id='log_scale_toggle'),
label_on='Log', icon_on=icon('tree'), status_on='success',
label_off='Linear', icon_off=icon('signal'), status_off='success',
outline=TRUE, plain=TRUE) -> log_scale_toggle
if(!include_values_range)
log_scale_toggle %<>% hidden()
## checkbox for feature type
prettyRadioButtons(inputId=ns(id='feature_type'), status='primary', label=label,
choices=choices, selected=selected,
icon=icon('check'), bigger=TRUE, animation='jelly') -> feature_type_picker
if(!include_feature_type)
feature_type_picker %<>% hidden()
## hidden text box to serve app
textInput(inputId=ns('picked_feature'), label='picked feature', value=picked_feature) %>% hidden() -> picked_feature_text_input
# return ui element(s)
tagList(feature_type_picker,
feature_names_picker_conditional,
metadata_picker_conditional,
gene_module_picker_conditional,
if(include_values_range) value_range,
log_scale_toggle,
picked_feature_text_input)
}
#'
#'
feature_picker.server <- function(input, output, session, seurat, features_regex='.*', metadata_regex='.*', ...) {
# previously_picked_feature <- reactiveValues()
picked_feature <- reactiveValues()
# react to the feature selection
## if a feature is selected, copy it to the reactive
observeEvent(eventExpr=input$feature_picker_feature_names, handlerExpr={
# make sure these elements are defined
req(input$feature_picker_feature_names)
# update hidden ui element
if(input$feature_type=='features')
picked_feature$name <- input$feature_picker_feature_names})
## if a metadata column is selected, copy it to the reactive
observeEvent(eventExpr=input$feature_picker_metadata, handlerExpr={
# make sure these elements are defined
req(input$feature_picker_metadata)
# update hidden ui element
if(input$feature_type=='metadata')
picked_feature$name <- input$feature_picker_metadata})
## if a gene module column is selected, copy it to the reactive
observeEvent(eventExpr=input$feature_picker_gene_module, handlerExpr={
# make sure these elements are defined
req(input$feature_picker_gene_module)
# update hidden ui element
if(input$feature_type=='gene_modules')
picked_feature$name <- input$feature_picker_gene_module})
## update the hidden ui element when a feature type is selected
observeEvent(eventExpr=input$feature_type, handlerExpr={
# pick the feature to revert to
input_name <- switch(input$feature_type, features='feature_picker_feature_names', metadata='feature_picker_metadata', gene_modules='feature_picker_gene_module')
# update hidden ui element
picked_feature$name <- input[[input_name]]})
## use the selected feature (it may be a feature or metadata)
observe(label='feature_picker/fetch', x={
# make sure these elements are defined
req(seurat$object)
req(input$feature_type)
req(picked_feature$name)
if(is.null(input$log_scale_toggle))
return(NULL)
# create variables for shorthand
picked <- picked_feature$name
# get the values for the selected feature(s) from the loaded Seurat
#! TODO: need to deal with missing feature request; eg switching between species
if(input$feature_type=='gene_modules') {
picked %<>% str_split(pattern=',') %>% unlist()
picked_feature_values <- dplyr::select(seurat$gene_module_scores, any_of(picked))
list(rep(0, times=nrow(picked_feature_values))) %>% # any missing `picked` variables are zero-filled
rep(times=length(picked)) %>%
set_names(picked) %>%
modifyList(val=picked_feature_values) %>%
as.data.frame() -> picked_feature_values
} else {
picked_feature_values <- FetchData(object=seurat$object, vars=picked) #! TODO: need to catch this if it errors
}
if(length(picked)==1) {
picked_feature_values %<>% set_names('value')
# update the ui element(s)
## slider to limit colour range
min_value <- 0
max_value <- 1
if(!is.null(picked_feature_values$value) && is.numeric(picked_feature_values$value)) {
min_value <- min(picked_feature_values$value) %>% subtract(0.05) %>% round(digits=1)
max_value <- max(picked_feature_values$value) %>% add(0.05) %>% round(digits=1)
}
if(input$log_scale_toggle) {
min_value %<>% log(base=10)
max_value %<>% log(base=10)
}
updateSliderInput(session=session, inputId='value_range',
min=min_value, max=max_value, value=c(-Inf,Inf))
} else {
updateSliderInput(session=session, inputId='value_range',
min=0, max=0, value=c(-Inf,Inf))
}
# save feature information in the reactive
picked_feature$values <- picked_feature_values
# invalidate the reactive when both data and slider are updated
picked_feature$refreshed <- rnorm(1)})
# invalidate the reactive value when slider is changed but not after initialisation
observeEvent(eventExpr=input$value_range, ignoreInit=TRUE, handlerExpr={
picked_feature$refreshed <- rnorm(1)
value_range <- input$value_range
# if the scale is logged, unlog it
if(input$log_scale_toggle)
value_range %<>% raise_to_power(e1=10, e2=.)
# determine if the values are divergent
value_range %>% sign() %>% Reduce(f='*') %>% magrittr::equals(-1) -> picked_feature$is_divergent
picked_feature$values_range <- value_range})
# reset the reactive when the seurat is (re)loaded
observeEvent(eventExpr=seurat$object, handlerExpr={
for(i in names(picked_feature))
picked_feature[[i]] <- NULL})
# return the reactiveValues list
return(picked_feature)
}
|
ee6babde649e78726e6414e270046adc1b258315
|
7af6caa67cef08323b14007179a68ca0f1df6f16
|
/practice/generate-keywords.R
|
3c3026b62fa375ac48099a52827cf24e3141334b
|
[] |
no_license
|
zhou-dong/r-study
|
5f57e10e049568f895764896bc66968eafbeebf2
|
4c9976e74f9fd9d79306dfbb8eb02e0ff722dff5
|
refs/heads/master
| 2021-01-13T01:44:03.078373
| 2016-01-26T19:06:00
| 2016-01-26T19:06:00
| 30,156,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
generate-keywords.R
|
library(rmongodb)
library(plyr)
mongo<-mongo.create()
if (!mongo.is.connected(mongo))
error("No connection to MongoDB")
# DB name: linkedin; Collection name: employee
employee = "linkedin.employee"
employee_count = mongo.count(mongo, DBNS)
message("Count of Employee: ", employee_count)
query = mongo.bson.buffer.create()
#mongo.bson.buffer.append(query, "last-name", "A")
query = mongo.bson.from.buffer(query)
property = "skills"
skills = "linkedin.skills"
fields = mongo.bson.buffer.create()
mongo.bson.buffer.append(fields, property, TRUE)
fields = mongo.bson.from.buffer(fields)
print(fields)
cursor = mongo.find(mongo, ns = DBNS, query = query, fields = fields, limit = 1000L)
while (mongo.cursor.next(cursor)) {
list = mongo.bson.to.list(mongo.cursor.value(cursor))
length = (length(list))
if(length==1)
next
attributes = list[2]
for(variable in attributes) {
obj = mongo.bson.buffer.create()
mongo.bson.buffer.append(obj, variable, 1L)
obj = mongo.bson.from.buffer(obj)
print(obj)
print(class(obj))
#mongo.update(mongo = mongo, ns = skills, obj,objNew=1)
mongo.insert(mongo, skills,obj)
next
# print(class(obj))
# print(obj)
#
}
}
err <- mongo.cursor.destroy(cursor)
mongo.disconnect(mongo)
mongo.destroy(mongo)
|
7de8abd26a74617d6ead1a45b1c150ea08655f1f
|
12700d79bb9a790cdc5dc2f0327932e738901139
|
/ui.R
|
9976ccc9f39c84d27d2b1a7487f2a930b452b567
|
[] |
no_license
|
tijoseymathew/ddpCourseProject
|
71126662e41fc8492295953d184d9bb003323d70
|
3d17a7e98a7799d3757adb0d71df1129d34e332f
|
refs/heads/master
| 2021-01-19T10:59:32.618716
| 2014-06-23T12:14:15
| 2014-06-23T12:14:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 875
|
r
|
ui.R
|
library(shiny)
# Define UI for application that plots random distributions
shinyUI(pageWithSidebar(
# Application title
headerPanel("k-Means Visulaization"),
# Sidebar with a slider input for number of observations
sidebarPanel(
print("App to generate random samples and cluster them using k-Means clustering"),
writeLines("Select the parameters below:"),
numericInput("noSamples",
"Number of random samples to be generated:",
value = 30,
min = 0
),
sliderInput("noCenters",
"Number of centers:",
min = 1,
max = 10,
value = 5),
print("Identified cluster centers:"),
tableOutput("centers")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("genPlot"),
plotOutput("clustPlot")
)
))
|
7c5c22c2d2f069017e30c264e66497edc75707c3
|
5bce0396d622ce493e83f063995a972440742a24
|
/man/GelbeKarten_Spielminute_Mannschaft.Rd
|
7f69359302e7f2a6524689d2e8f1b20880a74725
|
[] |
no_license
|
maxxthur/handball.analytica
|
5b3303c7eb3c34a720e040d44367cbe2be78572d
|
9f149cf92f36de1f30558be131853f9f03397160
|
refs/heads/master
| 2020-04-14T03:47:48.035338
| 2019-01-21T13:09:30
| 2019-01-21T13:09:30
| 163,612,573
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 328
|
rd
|
GelbeKarten_Spielminute_Mannschaft.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GelbeKarten_Spielverlauf_Mannschaft.R
\name{GelbeKarten_Spielminute_Mannschaft}
\alias{GelbeKarten_Spielminute_Mannschaft}
\title{Title}
\usage{
GelbeKarten_Spielminute_Mannschaft(Mannschaft, Data)
}
\arguments{
\item{Data}{}
}
\description{
Title
}
|
0349c8be88ea113135ae5acd99b8056d7637fa33
|
e68be20f89b5b098d72c641129d2f8c1e209a81f
|
/man/diagnosticPlot.Rd
|
da41b10cb5dd815ef424358e9bf5c8d2502da2a2
|
[] |
no_license
|
cran/rMEA
|
46cbff5c8fd55cffd3bef08c3716a484c1072386
|
22e8f87a35c3a7b271e327538f5931a5968d80c6
|
refs/heads/master
| 2022-03-24T06:19:45.274794
| 2022-02-17T17:12:07
| 2022-02-17T17:12:07
| 125,495,577
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,374
|
rd
|
diagnosticPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rMEA_graphics.R
\name{diagnosticPlot}
\alias{diagnosticPlot}
\title{Plots the initial, middle and ending part of a MEA object}
\usage{
diagnosticPlot(mea, width = 60, ...)
}
\arguments{
\item{mea}{an object of class \code{MEA} (see function \code{\link{readMEA}}).}
\item{width}{integer. The number of seconds to be plotted for each panel}
\item{...}{further arguments passed to \code{plot}}
}
\description{
This is typically useful to check if the motion energy time-series are good.
The middle section is chosen randomly among possible middle sections.
}
\details{
Motion energy time-series should always be visually inspected for possible artifacts. Periodic peaks or drops in time-series are indicators of e.g. key-frames or duplicated video-frames.
For further information regarding the program MEA, please refer to the documentation available at \code{http://www.psync.ch}.
}
\examples{
## read a single file
path_normal <- system.file("extdata/normal/200_01.txt", package = "rMEA")
mea_normal <- readMEA(path_normal, sampRate = 25, s1Col = 1, s2Col = 2,
s1Name = "Patient", s2Name = "Therapist", skip=1,
idOrder = c("id","session"), idSep="_")
## Visual inspection of the data
diagnosticPlot(mea_normal[[1]])
}
|
341feb0ee3d691ff723ec7087a6172353afb3ab1
|
8e72f7609d9dac0dab575acc87671ccbaae8b16e
|
/shift_model.R
|
13ad29718368e4e46b89cc0509ea6d0ae9ea4c2b
|
[] |
no_license
|
gcgibson/cfr_model
|
aecb2725a46de7605854b88f539fa1d13e8092df
|
4d4aed0e9c7b9cb9c499762530440f0faebd4e13
|
refs/heads/main
| 2023-03-02T03:50:44.661589
| 2021-02-14T18:51:49
| 2021-02-14T18:51:49
| 327,323,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,579
|
r
|
shift_model.R
|
library("splines")
library("rstan")
library(covidcast)
start_day <- "2020-03-15"
timezero_day <- "2021-01-01"
forecast_day <- as.Date(timezero_day) + 28
region <- "tx"
ca_deaths <- covidcast_signal(data_source = "jhu-csse",
signal ="deaths_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
ca_deaths$value[ca_deaths$value <=0] <- NA
interp_obj <- approx(1:nrow(ca_deaths),ca_deaths$value,xout =which(is.na(ca_deaths$value)) )
ca_deaths[interp_obj$x,]$value<-interp_obj$y
ca_deaths$value[is.na(ca_deaths$value)] <- 0
ca_cases <- covidcast_signal(data_source = "jhu-csse",
signal ="confirmed_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
# FIGURE 1
library(ggplot2)
ca_cases$type <- "cases"
ca_deaths$type <- "deaths"
ca_total <- rbind(ca_cases,ca_deaths)
p1 <- ggplot(ca_cases[ca_cases$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Cases"))+ geom_line() +
geom_line(data=ca_deaths[ca_deaths$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Deaths")) + xlab("Date") + ylab("Log") + theme_bw()
start_day <- "2020-03-15"
timezero_day <- "2021-01-01"
forecast_day <- as.Date(timezero_day) + 28
region <- "ny"
ca_deaths <- covidcast_signal(data_source = "jhu-csse",
signal ="deaths_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
ca_deaths$value[ca_deaths$value <=0] <- NA
interp_obj <- approx(1:nrow(ca_deaths),ca_deaths$value,xout =which(is.na(ca_deaths$value)) )
ca_deaths[interp_obj$x,]$value<-interp_obj$y
ca_deaths$value[is.na(ca_deaths$value)] <- 0
ca_cases <- covidcast_signal(data_source = "jhu-csse",
signal ="confirmed_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
# FIGURE 1
library(ggplot2)
ca_cases$type <- "cases"
ca_deaths$type <- "deaths"
ca_total <- rbind(ca_cases,ca_deaths)
p2 <- ggplot(ca_cases[ca_cases$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Cases"))+ geom_line() +
geom_line(data=ca_deaths[ca_deaths$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Deaths")) + xlab("Date") + ylab("Log") + theme_bw()
#################
start_day <- "2020-03-15"
timezero_day <- "2021-01-01"
forecast_day <- as.Date(timezero_day) + 28
region <- "fl"
ca_deaths <- covidcast_signal(data_source = "jhu-csse",
signal ="deaths_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
ca_deaths$value[ca_deaths$value <=0] <- NA
interp_obj <- approx(1:nrow(ca_deaths),ca_deaths$value,xout =which(is.na(ca_deaths$value)) )
ca_deaths[interp_obj$x,]$value<-interp_obj$y
ca_deaths$value[is.na(ca_deaths$value)] <- 0
ca_cases <- covidcast_signal(data_source = "jhu-csse",
signal ="confirmed_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
# FIGURE 1
library(ggplot2)
ca_cases$type <- "cases"
ca_deaths$type <- "deaths"
ca_total <- rbind(ca_cases,ca_deaths)
p3 <- ggplot(ca_cases[ca_cases$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Cases"))+ geom_line() +
geom_line(data=ca_deaths[ca_deaths$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Deaths")) + xlab("Date") + ylab("Log") + theme_bw()
###################
start_day <- "2020-03-15"
timezero_day <- "2021-01-01"
forecast_day <- as.Date(timezero_day) + 28
region <- "ca"
ca_deaths <- covidcast_signal(data_source = "jhu-csse",
signal ="deaths_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
ca_deaths$value[ca_deaths$value <=0] <- NA
interp_obj <- approx(1:nrow(ca_deaths),ca_deaths$value,xout =which(is.na(ca_deaths$value)) )
ca_deaths[interp_obj$x,]$value<-interp_obj$y
ca_deaths$value[is.na(ca_deaths$value)] <- 0
ca_cases <- covidcast_signal(data_source = "jhu-csse",
signal ="confirmed_incidence_num",
start_day = start_day, end_day = timezero_day,
geo_type = "state", geo_values =region)
# FIGURE 1
library(ggplot2)
ca_cases$type <- "cases"
ca_deaths$type <- "deaths"
ca_total <- rbind(ca_cases,ca_deaths)
p4<-ggplot(ca_cases[ca_cases$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Cases"))+ geom_line() +
geom_line(data=ca_deaths[ca_deaths$time_value >= "2020-03-15",],aes(x=time_value,y=log(Lag(pmax(0,value)+1,8)),col="Deaths")) + xlab("Date") + ylab("Log") + theme_bw()
library(cowplot)
fig1 <- cowplot::plot_grid(p1+ theme(legend.position = "none"),p2+ theme(legend.position = "none"),p3+ theme(legend.position = "none"),p4+ theme(legend.position = "none"),labels=c("NY","CA","TX","FL"),ncol=2,align='v')
ggsave(fig1)
|
214f5c31b84f934006e85cc387590599b7588f33
|
5f90cc0c9c8eb6b993d228cfe75277ba50d6d048
|
/R/runmcmc.R
|
30e9d94e268e09df14aec0a689eed9b12b2e70e1
|
[] |
no_license
|
MJAlexander/distortr
|
b812640281590615edd97e22d1df779508710ae3
|
324746567bcb5c7251d911688bca8dfd51223403
|
refs/heads/master
| 2020-12-24T05:37:52.184782
| 2020-07-13T15:42:40
| 2020-07-13T15:42:40
| 73,495,046
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,461
|
r
|
runmcmc.R
|
#' Run MCMC estimation
#'
#' Run MCMC estimation for time series using JAGS. Can either be run on a single time series of a set of time series.
#'
#' @param input.data Input data to JAGS.
#' If single country, this is a dataframe of x and y observations, and standard errors around ys.
#' If a global run, this is a list of required input data. See \code{processData}, \code{getSplinesData} and \code{getGPData} to get required data in compatible form.
#' @param method The method of smoothing to implement (choices: ar, arma, splines, gp)
#' @param nyears For single country runs: number of years of observations
#' @param obs.err For single country runs: is TRUE if standard errors are observed
#' @param measurement.err For single country runs: is TRUE if there is assumed to be measurement error
#' @param cs.arma For global runs: whether ARMA parameter(s) are country specific. If `FALSE`, parameter is global.
#' @param cs.smoothing For global runs: whether smoothing paramter is country specific. If `FALSE`, smoothing parameter is global.
#' @param time.trend For global runs: if `TRUE` a linear time trend is estimated.
#' @param nserror.estimated For global runs: whether to estimate non-sampling error. IF `FALSE`, fixed sampling error is inputted.
#' @param order The order of splines penalization (either 1 or 2)
#' @param I Knot spacing for splines
#' @param matern.cov Whether or not to use Matern covariance function if \code{method=="gp"}. Default is \code{TRUE}.
#' @param nchains Number of MCMC chains
#' @param nburnin Number of iterations to throw away as burn in.
#' @param niter Number of total iterations.
#' @param nthin Degree of thinning of MCMC chains
#' @param model.file.path Text file which contains the model to be fitted. If \code{NULL}, the text file is drawn from the \code{models} folder.
#' @param model.save.file.path For global runs: path to save model, if written.
#' @export
#' @return A JAGS model object
#' @seealso \code{\link{getResults}, \link{plotResults}}
#' @examples
#' nyears <- 100
#' prop.sample <- 0.7
#' obs.err <- TRUE
#' sigma.y <- 0.5
#' seed <- 123
#' method <- 'splines'
#' params <- list(sigma.alpha = 1, order = 1)
#' df <- simulateFluctuations(nyears, prop.sample, method, params, obs.err, sigma.y)
#' df$se <- 1
#' mod <- runMCMC(input.data = df, nyears = 100, method = "splines", order = 1,nchains = 4, nburnin = 100, niter = 100+3000, nthin = 3)
runMCMC <- function(input.data,
method,
nyears = NULL,
obs.err = TRUE,
measurement.err = TRUE,
cs.arma = NULL,
cs.smoothing = TRUE,
time.trend = FALSE,
nserror.estimated = TRUE,
order = NULL,
I = 2.5,
matern.cov=TRUE,
nchains = 3,
nburnin = 1000,
niter = 2000,
nthin = 1,
model.file.path = NULL,
model.save.file.path = "R/model.txt"){
if(is.null(dim(input.data))){
mod <- runMCMCGlobal(method = method,
input.data = input.data,
order = order,
matern.cov=matern.cov,
cs.arma = cs.arma,
cs.smoothing = cs.smoothing,
time.trend = time.trend,
nserror.estimated = nserror.estimated,
nchains = nchains,
nburnin = nburnin,
niter = niter,
nthin = nthin,
model.file.path = model.file.path,
model.save.file.path = model.save.file.path)
}
if(length(dim(input.data))==2){
mod <- runMCMCCountry(df = input.data,
nyears = nyears,
method = method,
order = order,
I = I,
matern.cov=matern.cov,
obs.err = obs.err,
measurement.err = measurement.err,
nchains = nchains,
nburnin = nburnin,
niter = niter,
nthin = nthin,
model.file.path = model.file.path)
}
return(mod)
}
|
49c2beecb7fe20a3bc69c04dc8968d86bbfc7b92
|
03b58eced475bed141a0e81b2b687fd72d42769b
|
/man/get_p_Salje.Rd
|
49655bd65c0bf63a14b389c778bfee5d795b7876
|
[] |
no_license
|
epicentre-msf/covidestim
|
a86a14697bac09e27cee2d802dc12489da5fbe91
|
93785a265a495d9388add08913aba3ab14d96c59
|
refs/heads/master
| 2023-01-04T21:46:17.651857
| 2020-11-02T16:21:49
| 2020-11-02T16:21:49
| 263,291,260
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,302
|
rd
|
get_p_Salje.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_p_Salje.R
\name{get_p_Salje}
\alias{get_p_Salje}
\title{Estimate Covid2019 outcome probabilities for a population given its age
distribution, and age-severity estimates from Salje et al. (2020)}
\source{
Salje, H., Kiem, C.T., Lefrancq, N., Courtejoie, N., Bosetti, P., Paireau,
J., Andronico, A., Hoze, N., Richet, J., Dubost, C.L., and Le Strat, Y.
(2020) Estimating the burden of SARS-CoV-2 in France. Science.
\url{https://doi.org/10.1126/science.abc3517}
}
\usage{
get_p_Salje(x, p_type = c("p_hosp_inf", "p_icu_hosp", "p_dead_hosp",
"p_dead_inf"), p_stat = c("mean", "low_95", "up_95"),
p_sex = c("total", "male", "female"))
}
\arguments{
\item{x}{Either an ISO3 country code used to extract age-specific population
estimates from the UN World Population Prospects 2019 dataset, \emph{or}, a
data.frame containing age categories in the first column and population
counts (or proportions) in the second column}
\item{p_type}{Outcome to estimate (either "p_hosp_inf", "p_icu_hosp",
"p_dead_hosp", or "p_dead_inf")}
\item{p_stat}{Statistic of the severity estimates to use (either "mean",
"low_95", or "up_95")}
\item{p_sex}{Use severity estimate for which sex (either "female", "male", or
"total")}
}
\value{
Estimated outcome probability (scalar)
}
\description{
Estimate Covid19 outcome probabilities including hospitalizion|infection,
ICU|hospitalization, death|hospitalization, and death|infection, using
age-severity estimates from Salje et al. (2020), and the population age
distribution for a given country, either taken from the UN World Population
Prospects 2019 (WPP2019) or directly supplied by the user.
}
\examples{
# mean Pr(hospitalization|infection) for Canada (ISO3 code "CAN"), taking age
# distribution from WPP2019
get_p_Salje(x = "CAN", p_type = "p_hosp_inf", p_stat = "mean", p_sex = "total")
# use custom age-distribution
age_df <- data.frame(
age = c("0-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "70-79", "80+"),
pop = c(1023, 1720, 2422, 3456, 3866, 4104, 4003, 3576, 1210),
stringsAsFactors = FALSE
)
get_p_Salje(x = age_df, p_type = "p_hosp_inf", p_stat = "mean", p_sex = "total")
}
\author{
Anton Camacho
Patrick Barks <patrick.barks@epicentre.msf.org>
}
|
f19e2ce922d5b75aeb5efc9ed3297e3cfea1f6da
|
06b0ec2195aaeb38dbba3efe626c83dedb0483ec
|
/Statistical Graphics with Big Data/team project/wordcloud.R
|
998b879eab6245a51d3d58eca01facce3b759f69
|
[] |
no_license
|
chanmeee/lecture
|
9ced5550216d9200e7ed66a5ce4a97128d60dacc
|
a0473506f7e8b7d9366661bee3a9a4b88e66e870
|
refs/heads/master
| 2021-07-16T04:32:22.309169
| 2020-09-24T12:57:17
| 2020-09-24T12:57:17
| 210,207,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,715
|
r
|
wordcloud.R
|
# Load library
library(tidyverse)
library(data.table)
# Load Data
setwd("C:/Users/Chanmi Yoo/Desktop/빅데/팀프로젝트/data")
spotify <- fread("SpotifyFeatures.csv", encoding = "UTF-8")
# Preprocessing
# 1) NA 값 확인
table(is.na(spotify))
## [설명] 결측치가 전혀 없다.
# 2) 중복 음원 제거
spotify.uniq <- spotify[-which(duplicated(spotify$track_id)),]
dim(spotify.uniq)
## [설명] 총 176774개 음원이 있다.
# Creating Word Cloud - Track Title
# Load Library
library(tm)
library(wordcloud)
library(RColorBrewer)
# Expand memory limit
memory.limit(9999999999999)
# Create a text1 vector
# 데이터가 너무 커서 처리하는 시간이 오래 걸린다.
# 따라서 text11(60000개), text12(60000개), text13(56774개), 총 3개로 분할하여 Corpus를 처리하기로 하였다.
text1 <- spotify.uniq %>%
mutate(rank=row_number(desc(popularity))) %>%
filter(rank %in% 1:60000) %>%
select(track_name)
text2 <- spotify.uniq %>%
mutate(rank=row_number(desc(popularity))) %>%
filter(rank %in% 60001:120000) %>%
select(track_name)
text3 <- spotify.uniq %>%
mutate(rank=row_number(desc(popularity))) %>%
filter(rank %in% 120001:176774) %>%
select(track_name)
######################################################
# 1) featuring 정보
#tail(str_subset(get(paste("text", i, sep=""))$track_name, regex('feat', ignore_case = T)), 10)
## [설명] featuring 정보는 대부분 "(feat. [가수])", "(Feat. [가수])", "[FEAT. [가수]]"의 형태로 들어간다.
# featuring 정보 제거
text1 <- text1 %>%
str_replace("[[:blank:]]{1}[fF]{1}(eat)[[:print:]]+", "") %>%
str_replace_all("[[:blank:]]{1}(FEAT.)[[:print:]]+", "")
# 2) remaster 정보
#tail(str_subset(text1, regex('remaster', ignore_case = T)), 10)
## [설명] remastered 정보는 대부분 "- Remastered" " - Remasterizado [연도]" 의 형태로 들어간다.
# remaster 정보 제거
text1 <- text1 %>%
str_replace_all("(Remastered) +", "") %>%
str_replace_all("[0-9]{4}[[:space:]]{1}(Remaster)", "") %>%
str_replace_all("(-)[[:space:]]{1}(Remasterizado)[[:space:]]{1}[0-9]{4}", "") %>%
str_replace_all("(- Remastered)", "") %>%
str_replace_all("(Remastered)", "") %>%
str_replace_all("(- Remaster)", "") %>%
str_replace_all("[0-9]{4}[[:print:]](Remaster)", "") %>%
str_replace_all("[0-9]{4}( Digital Remaster)+", "") %>%
str_replace_all("[-(]{1}[[:print:]]+", "")
# 3) version 정보
#tail(str_subset(text1$track_name, regex('version', ignore_case = T)), 10)
## [설명] version 정보는 " - [문자] Version " 혹은 " ( [문자] version ) 의 형태로 들어간다.
text1 <- text1 %>%
str_replace_all("(-)[[:space:]][[:print:]]+[Vv]{1}(ersion)", "") %>%
str_replace_all("(- Version)", "") %>%
str_replace_all("(()[[:print:]][Vv]{1}(ersion)())", "")
# create a corpus
#docs <- Corpus(VectorSource(get(paste("text", i, sep=""))))
docs1 <- Corpus(VectorSource(text1))
# clean text1 data
docs1 <- docs1 %>%
#tm_map(removeNumbers) %>% # 숫자 제거
tm_map(removePunctuation) %>% # 구두점 제거
tm_map(stripWhitespace) %>% # 빈 띄어쓰기 제거
tm_map(content_transformer(tolower)) %>% # 소문자화
tm_map(removeWords, c(stopwords("english"), 'dont', 'cant', 'major', 'minor')) # 불용어 제거(english 패키지, 무의미한 단어)
# tm_map(removeWords, stopwords(c('it','its','itself','this','that','these','those',
# 'am','is','are,','been','being','would','did','doing','would','should',
# 'a','an','and','but','if','or','because','as','by'))) # 불용어 제거
# create a document-term-matrix
dtm1 <- TermDocumentMatrix(docs1)
matrix1 <- as.matrix(dtm1)
words1 <- sort(rowSums(matrix1), decreasing = T)
df1 <- data.frame(word = names(words1), freq1 = words1)
# Text2 처리
# 1) featuring 정보
#tail(str_subset(get(paste("text", i, sep=""))$track_name, regex('feat', ignore_case = T)), 10)
## [설명] featuring 정보는 대부분 "(feat. [가수])", "(Feat. [가수])", "[FEAT. [가수]]"의 형태로 들어간다.
# featuring 정보 제거
text2$track_name <- text2$track_name %>%
str_replace("[[:blank:]]{1}[fF]{1}(eat)[[:print:]]+", "") %>%
str_replace_all("[[:blank:]]{1}(FEAT.)[[:print:]]+", "")
# 2) remaster 정보
#tail(str_subset(text2$track_name, regex('remaster', ignore_case = T)), 10)
## [설명] remastered 정보는 대부분 "- Remastered" " - Remasterizado [연도]" 의 형태로 들어간다.
# remaster 정보 제거
text2$track_name <- text2$track_name %>%
str_replace_all("(Remastered) +", "") %>%
str_replace_all("[0-9]{4}[[:space:]]{1}(Remaster)", "") %>%
str_replace_all("(-)[[:space:]]{1}(Remasterizado)[[:space:]]{1}[0-9]{4}", "") %>%
str_replace_all("(- Remastered)", "") %>%
str_replace_all("(Remastered)", "") %>%
str_replace_all("(- Remaster)", "") %>%
str_replace_all("[0-9]{4}[[:print:]](Remaster)", "") %>%
str_replace_all("[0-9]{4}( Digital Remaster)+", "") %>%
str_replace_all("[-(]{1}[[:print:]]+", "")
# 3) version 정보
#tail(str_subset(text2$track_name, regex('version', ignore_case = T)), 10)
## [설명] version 정보는 " - [문자] Version " 혹은 " ( [문자] version ) 의 형태로 들어간다.
text2$track_name <- text2$track_name %>%
str_replace_all("(-)[[:space:]][[:print:]]+[Vv]{1}(ersion)", "") %>%
str_replace_all("(- Version)", "") %>%
str_replace_all("(()[[:print:]][Vv]{1}(ersion)())", "")
# Create a corpus
docs2 <- Corpus(VectorSource(text2))
# Clean text2 data
docs2 <- docs2 %>%
#tm_map(removeNumbers) %>% # 숫자 제거
tm_map(removePunctuation) %>% # 구두점 제거
tm_map(stripWhitespace) %>% # 빈 띄어쓰기 제거
tm_map(content_transformer(tolower)) %>% # 소문자화
tm_map(removeWords, c(stopwords("english"), 'dont', 'cant', 'major', 'minor')) # 불용어 제거(english 패키지, 무의미한 단어)
# Create a document-term-matrix
dtm2 <- TermDocumentMatrix(docs2)
matrix2 <- as.matrix(dtm2)
words2 <- sort(rowSums(matrix2), decreasing = T)
df2 <- data.frame(word = names(words2), freq2 = words2)
# Text3 처리
# 1) featuring 정보
#tail(str_subset(get(paste("text", i, sep=""))$track_name, regex('feat', ignore_case = T)), 10)
## [설명] featuring 정보는 대부분 "(feat. [가수])", "(Feat. [가수])", "[FEAT. [가수]]"의 형태로 들어간다.
# featuring 정보 제거
text3 <- text3 %>%
str_replace("[[:blank:]]{1}[fF]{1}(eat)[[:print:]]+", "") %>%
str_replace_all("[[:blank:]]{1}(FEAT.)[[:print:]]+", "")
# 2) remaster 정보
#tail(str_subset(text3$track_name, regex('remaster', ignore_case = T)), 10)
## [설명] remastered 정보는 대부분 "- Remastered" " - Remasterizado [연도]" 의 형태로 들어간다.
# remaster 정보 제거
text3 <- text3 %>%
str_replace_all("(Remastered) +", "") %>%
str_replace_all("[0-9]{4}[[:space:]]{1}(Remaster)", "") %>%
str_replace_all("(-)[[:space:]]{1}(Remasterizado)[[:space:]]{1}[0-9]{4}", "") %>%
str_replace_all("(- Remastered)", "") %>%
str_replace_all("(Remastered)", "") %>%
str_replace_all("(- Remaster)", "") %>%
str_replace_all("[0-9]{4}[[:print:]](Remaster)", "") %>%
str_replace_all("[0-9]{4}( Digital Remaster)+", "") %>%
str_replace_all("[-(]{1}[[:print:]]+", "")
# 3) version 정보
#tail(str_subset(text3$track_name, regex('version', ignore_case = T)), 10)
## [설명] version 정보는 " - [문자] Version " 혹은 " ( [문자] version ) 의 형태로 들어간다.
text3 <- text3 %>%
str_replace_all("(-)[[:space:]][[:print:]]+[Vv]{1}(ersion)", "") %>%
str_replace_all("(- Version)", "") %>%
str_replace_all("(()[[:print:]][Vv]{1}(ersion)())", "")
# Create a corpus
docs3 <- Corpus(VectorSource(text3))
# Clean text3 data
docs3 <- docs3 %>%
#tm_map(removeNumbers) %>% # 숫자 제거
tm_map(removePunctuation) %>% # 구두점 제거
tm_map(stripWhitespace) %>% # 빈 띄어쓰기 제거
tm_map(content_transformer(tolower)) %>% # 소문자화
tm_map(removeWords, c(stopwords("english"), 'dont', 'cant', 'major', 'minor')) # 불용어 제거(english 패키지, 무의미한 단어)
# Create a document-term-matrix
dtm3 <- TermDocumentMatrix(docs3)
matrix3 <- as.matrix(dtm3)
words3 <- sort(rowSums(matrix3), decreasing = T)
df3 <- data.frame(word = names(words3), freq3 = words3)
######################################################
# 3개의 데이터프레임 합치기
df1$word <- as.character(df1$word)
df2$word <- as.character(df2$word)
df3$word <- as.character(df3$word)
df <- full_join(df1, df2, by="word") %>%
full_join(df3, by="word")
df <- df %>%
mutate(freq = rowSums(subset(df, select=c(freq1,freq2,freq3)), na.rm = T)) %>%
select(word, freq) %>%
arrange(desc(freq))
# Top 10 frequently appear word in track_name
head(df,10)
# Draw wordcloud
set.seed(1234) # for reproducibility
wordcloud(words = df$word, freq = df$freq,
min.freq = 300, max.freq = Inf, # 최소 300번 이상 나온 단어 추출
random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
######################################################
# [결과] love가 2299회로 가장 많았고, act(1318회)가 그 뒤를 이었으며, act, one, time, song, like, man, little, good, life 순으로 많았다.
######################################################
|
ec00ddb5faca9f4b0c74c6b99ba9dd666c23ad6f
|
d847ef908656b3584235ecd026c9c4c6bef76f9e
|
/man/select_matches.Rd
|
64e5c30bc8e7892d53bb8db297d5da2eed75057b
|
[] |
no_license
|
reaganmozer/textmatch
|
3407539aaed8678a16c1fa97b451d500f4076546
|
8a09a6568faed2f4067cbf02ee6979d87f5b3060
|
refs/heads/master
| 2021-07-24T15:35:35.089175
| 2021-07-21T16:16:02
| 2021-07-21T16:16:02
| 178,749,075
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 938
|
rd
|
select_matches.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_matches.R
\name{select_matches}
\alias{select_matches}
\title{Given a model for match quality and a corpus of documents, calculate the
estimated match quality of each potential pairing of treated and control documents
and return a matched dataset containing all pairs of documents with estimated quality
above a specified threshold}
\usage{
select_matches(corpus, Z, mod, threshold)
}
\arguments{
\item{corpus}{description}
\item{Z}{treatment indicator}
\item{threshold}{for quality scores to return}
}
\value{
A \link{data.frame} of matched pairs of documents
}
\description{
Given a model for match quality and a corpus of documents, calculate the
estimated match quality of each potential pairing of treated and control documents
and return a matched dataset containing all pairs of documents with estimated quality
above a specified threshold
}
|
a17f56357965a933d9b9b527f754c9eca25861de
|
b45d456566c9ae65cb3f3fb4e26ff9e054adbf1f
|
/MedidasTendenciaMedia.R
|
08543b71cd0b47d0edd148a2865c4774fcf8c6a4
|
[] |
no_license
|
SindyPin/ExercisesR
|
86e66c072cfc21c49bd9f57d535cec50ac897b45
|
cf181f4e568a53b381316282290dff1289acaba0
|
refs/heads/main
| 2022-12-26T02:06:24.584419
| 2020-10-15T21:17:34
| 2020-10-15T21:17:34
| 304,448,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,199
|
r
|
MedidasTendenciaMedia.R
|
require(e1071)
install.packages ("moments")
library(moments)
# 1. Crio um vetor com os valores a serem analisados:
c0 = c(2,3,6,9)
# 2. Calculo a Média do vetor:
mean(c0)
# 3. Calculo a Mediana:
median(c0)
# 4. Vejo um resumo dos cálculos
# (vai me dar o mín o máx, a média, a mediana e o 1o e 3o quartis):
summary(c0)
# 5. Aplico o comando responsável por criar uma janela gráfica
# para que sejam exibidos os gráficos,
# 2x2 (4 gráficos), 3x3 (9 gráficos) e assim por diante,
# a primeira orientação é por linha e a segunda é por coluna,
# (3,1) uma coluna com 3 gráficos, é o mesmo que o conceito de matrízes.
par(mfrow=c(2,2))
# 6. Ploto o gráfico de barras:
barplot(c0)
# 7. Ploto os histogramas:
# a) de frequência relativa:
hist(c0)
# b) de densidade relativa (vai de 0 a 1):
hist(c0,probability = T)
# 8. Adiciono a linha de densidade:
lines(density(c0))
# 9. Calculo a assimetria:
# Skewness is a measure of symmetry, or more precisely, the lack of symmetry.
# A distribution is symmetric if it looks the same to the left and right of the center point.
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm
skewness(c0)
# 10. Calculo o valor de Kurtosis:
# Kurtosis is a measure of whether the data are heavy-tailed or light-tailed relative to
# a normal distribution. That is, data sets with high kurtosis tend to have heavy tails,
# or outliers. Data sets with low kurtosis tend to have light tails, or lack of outliers.
# A uniform distribution would be the extreme case.
# https://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm
kurtosis(c0)
#LEMBRAR!
# pelo R, temos que se:
# k>0, leptocúrtica
# k=0, Mesocúrtica
# k<0, Platicúrtica
#amostra1:
c1=c(7,1,5,2,3,1,6)
#média da amostra1:
mean(c1)
#mediana da amostra1:
#Observar que o valor da média é maior que o valor da mediana
median(c1)
#moda da amostra1 (R não tem uma fórmula da moda):
as.numeric(names(table(c1))[table(c1) == max(table(c1))])
#Observar que o valor da mediana é maior que o valor da moda
#Distribuição à direita (observar no histograma)
barplot(c1)
hist(c1)
hist(c1,probability=T)
lines(density(c1))
skewness(c1)
kurtosis(c1)
sumary(c1)
|
88f5223f769804b04789935662cdae0a01434b98
|
5da36ba3773c76292a6fee6c941ddf579c5ee1c2
|
/R/logit_irls.R
|
7a1836204b99c7383ca20918cb6776b37d16cc82
|
[
"CC0-1.0"
] |
permissive
|
pat-alt/fromScratch
|
3ea1d1b5b0b87ce02829681504212f8332e675c3
|
f4df04504c0f07610744b47631d9cdfacd9e0b2a
|
refs/heads/master
| 2023-04-22T18:23:30.594616
| 2021-04-20T08:02:15
| 2021-04-20T08:02:15
| 303,605,836
| 0
| 0
|
CC0-1.0
| 2021-04-20T08:02:15
| 2020-10-13T06:14:37
|
TeX
|
UTF-8
|
R
| false
| false
| 841
|
r
|
logit_irls.R
|
logit_irls <- function(X, y, beta_0=NULL, tau=1e-9, max_iter=10000) {
if(!all(X[,1]==1)) {
X <- cbind(1,X)
}
p <- ncol(X)
n <- nrow(X)
# Initialization: ----
if (is.null(beta_0)) {
beta_latest <- matrix(rep(0, p)) # naive first guess
}
W <- diag(n)
can_still_improve <- T
iter <- 1
# Iterative reweighted least-squares (IRLS):
while(can_still_improve & iter < max_iter) {
y_hat <- X %*% beta_latest
p_y <- exp(y_hat)/(1+exp(y_hat))
df_latest <- crossprod(X,y-p_y) # gradient
diag(W) <- p_y*(1-p_y)
Z <- X %*% beta_latest + qr.solve(W) %*% (y-p_y)
beta_latest <- qr.solve(crossprod(X,W%*%X),crossprod(X,W%*%Z))
can_still_improve <- mean(abs(df_latest))>tau # convergence reached?
iter <- iter + 1
}
return(
list(
fitted = p_y,
coeff = beta_latest
)
)
}
|
73aedbc7936c21ac732375a0dbf39c1946856244
|
215ca85a9ff709d1e221b3165e7e10eb783cf29c
|
/R/color.R
|
3cbeb40465078cbdc18ab14d832e8b43517a01b1
|
[] |
no_license
|
dpcarballo/coloR
|
2b845eb3493038d5458db4c76ac25d449c92429c
|
2e1b2b3808d784cbf27a5d464fa8189e5faf4197
|
refs/heads/master
| 2020-05-04T17:06:06.094781
| 2019-04-25T11:38:31
| 2019-04-25T11:38:31
| 179,273,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,581
|
r
|
color.R
|
#' Creates an object of class 'color'
#' @param ... A character containing a color name, a hexcode, a c(r,g,b) vector or
#' three single numbers representing r,g,b respectively
#' @param maxVal Value assigned to represent maximum amount of light. Defaults to 255
#' @return An object of class "color" made from specifications given in the arguments
#' @examples
#' color(255,0,0)
#' color("magenta")
#' color(c(0,255,0))
#' color("#0000ff")
color <- function(..., maxVal=255) {
col = list()
class(col)="color"
input=list(...)
#Function behaives differently depending of number of arguments
if(length(input)==3) {
#color(255,0,0)
r=input[[1]]
g=input[[2]]
b=input[[3]]
} else if(length(input) == 1) {
input=input[[1]]
#Naming simplification
if(is_color(input)) {
return(input)
} else if(is.numeric(input)) {
#color(c(255,0,0))
r=input[1]
g=input[2]
b=input[3]
} else if(is.character(input)) {
if(nchar(input)==7 & substr(input, 1,1) == "#") {
#color("#FF0000")
r=strtoi(paste0("0x", substr(input, 2, 3)))
g=strtoi(paste0("0x", substr(input, 4, 5)))
b=strtoi(paste0("0x", substr(input, 6, 7)))
} else { #color("red")
return(color(gplots::col2hex(input)))
}
}
}
r <- fit_into_interval(r,0,maxVal)
g <- fit_into_interval(g,0,maxVal)
b <- fit_into_interval(b,0,maxVal)
coef = 255/maxVal
col$red=round(r*coef)
col$green=round(g*coef)
col$blue=round(b*coef)
return(col)
}
|
fb8df62a78d63502cf522fab4632f68850032b49
|
e31ab151872ddc8b2ce40c13ad6ac4a4a22c1c7d
|
/functions/0_LoadConfig.R
|
4e8ec5ee1caee9474fd75f2b45242ce76f0e490b
|
[
"MIT"
] |
permissive
|
Alowis/postloss-Spattern
|
8e03a19d1f2e961797081de098cdd8e79a94f2e5
|
8ac9ba126714ffd882ac2e23a50bbcaadd636eb7
|
refs/heads/master
| 2020-09-21T19:00:14.135498
| 2019-11-23T06:57:18
| 2019-11-23T06:57:18
| 224,892,160
| 0
| 0
|
MIT
| 2019-11-29T16:51:03
| 2019-11-29T16:51:03
| null |
UTF-8
|
R
| false
| false
| 2,954
|
r
|
0_LoadConfig.R
|
##############################################################################
# title : grid files (inputs) to extract fragstat and fractal metrics;
# purpose : create separated grids (unit of analysis) from detection grid for fragstat and
# fractal analyses;
# producer : prepared by A. Coca;
# last update : in London, UK June 2015 / Updated in September 2015;
# inputs : deforestation grid by year, fishnet (windows) shapefile;
# outputs : split detection grid using in GeoTIFF format (FRAGSTAT/FRACTAL INPUT);
# remarks 1 : detection grid must be in projected projection (i.e IGH or LAE);
###############################################################################
LoadConfig = function(x)
{
conf.list <- lapply(strsplit(readLines(x)," "), as.character)
#read target lines
root.index <- grep("*path",conf.list)
root.path = conf.list[[root.index]][[length(conf.list[[root.index]])]]
date.ini.index <- grep("*det.ini",conf.list)
date.ini = conf.list[[date.ini.index]][[length(conf.list[[date.ini.index]])]]
date.end.index <- grep("*det.end",conf.list)
date.end = conf.list[[date.end.index]][[length(conf.list[[date.end.index]])]]
proj.acronym.index <- grep("*proj.acronym",conf.list)
proj.acronym = conf.list[[proj.acronym.index]][[length(conf.list[[proj.acronym.index]])]]
proj.CRS.index <- grep("*proj.CRS",conf.list)
proj.CRS = paste(conf.list[[proj.CRS.index]][3:length(conf.list[[proj.CRS.index]])],collapse = " ")
fishnet.sizes.index <- grep("*.sizes",conf.list)
fishnet.sizes = conf.list[[fishnet.sizes.index]][[length(conf.list[[fishnet.sizes.index]])]]
fishnet.list <- unlist(strsplit(fishnet.sizes, ";"))
fragstat.exe.index <- grep("*fragstat.exe",conf.list)
fragstat.exe = conf.list[[fragstat.exe.index]][[length(conf.list[[fragstat.exe.index]])]]
fragstat.fca.index <- grep("*fragstat.fca.file",conf.list)
fragstat.fca = conf.list[[fragstat.fca.index]][[length(conf.list[[fragstat.fca.index]])]]
fragstat.class.index <- grep("*class.metrics",conf.list)
fragstat.class = conf.list[[fragstat.class.index]][[length(conf.list[[fragstat.class.index]])]]
class.list <- unlist(strsplit(fragstat.class, ";"))
fragstat.land.index <- grep("*land.metrics",conf.list)
fragstat.land = conf.list[[fragstat.land.index]][[length(conf.list[[fragstat.land.index]])]]
land.list <- unlist(strsplit(fragstat.land, ";"))
model.CV.repeats.index <- grep("*CV.n.repeats",conf.list)
model.CV.repeats = conf.list[[model.CV.repeats.index]][[length(conf.list[[model.CV.repeats.index]])]]
model.CV.resampling.index <- grep("*CV.n.resampling",conf.list)
model.CV.resampling = conf.list[[model.CV.resampling.index]][[length(conf.list[[model.CV.resampling.index]])]]
newlist = list(root.path,date.ini,date.end,proj.acronym,proj.CRS, fishnet.list,
fragstat.exe,fragstat.fca,class.list,land.list,model.CV.repeats,model.CV.resampling)
return(newlist)
}
LoadConfig(conf.file)
|
8fd80857bc7769222bebc2ea3e52386c5c03b5c9
|
ecfbce1b650c26ed5144291895681f81482b1a29
|
/R/SS_optim_scale.R
|
a4801052252745da40101f7d6803913e2de67b39
|
[] |
no_license
|
wpeterman/ResistanceGA
|
2777371e78f740512ed7c7a606d9e1f8d95c1fa3
|
9897bf48fe017b3b47b22f72d1472d20f05ab3c6
|
refs/heads/master
| 2023-04-29T05:59:14.785140
| 2023-04-18T02:21:44
| 2023-04-18T02:21:44
| 19,353,382
| 32
| 18
| null | 2021-03-17T01:21:38
| 2014-05-01T18:22:03
|
R
|
UTF-8
|
R
| false
| false
| 90,154
|
r
|
SS_optim_scale.R
|
#' Single surface optimization with kernel smoothing
#'
#' Optimize all binary and/or continuous surfaces contained in a directory using a genetic algorithm executed with the \code{\link[GA]{ga}} function in the Genetic Algorithms package \pkg{GA}. Optimizes a kernel smoothing parameter with all surfaces.
#'
#' @param CS.inputs Object created from running \code{\link[ResistanceGA]{CS.prep}} function. Defined if optimizing using CIRCUITSCAPE
#' @param gdist.inputs Object created from running \code{\link[ResistanceGA]{gdist.prep}} function. Defined if optimizing using gdistance
#' @param jl.inputs Object created from running \code{\link[ResistanceGA]{jl.prep}} function. Defined if optimizing using CIRCUITSCAPE run in Julia
#' @param GA.inputs Object created from running \code{\link[ResistanceGA]{GA.prep}} function
#' @param dist_mod Logical, if TRUE, a Distance model will be calculated and added to the output table (default = TRUE)
#' @param null_mod Logical, if TRUE, an intercept-only model will be calculated and added to the output table (default = TRUE)
#' @return This function optimizes resistance surfaces in isolation. Following optimization of all surfaces, several summary objects are created.\cr
#' \enumerate{
#' \item Diagnostic plots of model fit are output to the "Results/Plots" directory that is automatically generated within the folder containing the optimized ASCII files.
#' \item A .csv file with the Maximum Likelihood Population Effects mixed effects model coefficient estimates (MLPE_coeff_Table.csv)
#' \item Three summary .csv files are generated: CategoricalResults.csv, ContinuousResults.csv, & All_Results_AICc.csv. These tables contain AICc values and optimization summaries for each surface.
#' }
#' All results tables are also summarized in a named list ($ContinuousResults, $CategoricalResults, $AICc, $MLPE, $MLPE.list)\cr
#' The \code{lmer} model objects stored $MLPE.list are fit using Restricted Maximum Likelihood
#' @usage SS_optim.scale(CS.inputs,
#' gdist.inputs,
#' jl.inputs,
#' GA.inputs,
#' dist_mod,
#' null_mod)
#' @author Bill Peterman <Peterman.73@@osu.edu>
#' @noRd
SS_optim.scale <- function(CS.inputs = NULL,
gdist.inputs = NULL,
jl.inputs = NULL,
GA.inputs,
dist_mod = TRUE,
null_mod = TRUE) {
if (is.null(GA.inputs$scale)) {
stop(
"`SS_optim.scale` should only be used if you intend to apply kernel smoothing to your resistance surfaces"
)
}
if (!is.null(GA.inputs$scale) & any(GA.inputs$scale.surfaces == 0)) {
stop(
"It is not currently possible to selectively scale surfaces while using 'SS_optim scale'. Either remove surfaces you do not wish to scale and/or do not specify values for `scale.surfaces` in GA.prep."
)
}
t1 <- proc.time()[3]
RESULTS.cat <- list() # List to store categorical results within
RESULTS.cont <- list() # List to store continuous results within
cnt1 <- 0
cnt2 <- 0
k.value <- GA.inputs$k.value
MLPE.list <- list()
cd.list <- list()
k.list <- list()
ga.list <- list()
# Optimize each surface in turn
for (i in 1:GA.inputs$n.layers) {
r <- GA.inputs$Resistance.stack[[i]]
names(r) <- GA.inputs$layer.names[i]
R.orig <- r
# Processing of categorical surfaces
if (!is.null(CS.inputs)) {
# if (CS.inputs$platform == 'pc') {
# if (GA.inputs$parallel != FALSE) {
# warning(
# "\n CIRCUITSCAPE cannot be run in parallel on a Windows machine. \n Ignoring parallel arguement. \n If you want to optimize in parallel, use least cost paths and gdistance.",
# immediate. = TRUE
# )
# }
# }
cnt1 <- cnt1 + 1
names(r) <- GA.inputs$layer.names[i]
# Scaled optimization: CS -----------------------------------------------------
if(GA.inputs$scale.surfaces[i] == 1) {
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single.scale,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
CS.inputs = CS.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
start.vals <- single.GA@solution[-1]
EQ <- get.EQ(single.GA@solution[1])
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0.000123456543210
}
## Adjust unused transformations
if(single.GA@fitnessValue == -99999 | dim(single.GA@solution)[1] > 1) {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
} else {
EQ <- get.EQ(single.GA@solution[1])
}
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
scale = single.GA@solution[4],
r = R.orig
)
NAME <- GA.inputs$layer.names[i]
names(r.tran) <- GA.inputs$layer.names[i]
cd <- Run_CS(CS.inputs,
r.tran,
full.mat = TRUE,
EXPORT.dir = GA.inputs$Results.dir)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, NAME, "_csResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r.tran,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
Diagnostic.Plots(
resistance.mat = lower(cd),
genetic.dist = CS.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "continuous",
name = NAME,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = single.GA@solution[1],
print.dir = GA.inputs$Plots.dir,
scale = single.GA@solution[4]
)
single.GA@solution[single.GA@solution == 0.000123456543210] <- 0
} else {
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
CS.inputs = CS.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
start.vals <- single.GA@solution[-1]
if(single.GA@fitnessValue == -99999 | dim(single.GA@solution)[1] > 1) {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
} else {
EQ <- get.EQ(single.GA@solution[1])
}
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
r = R.orig
)
names(r.tran) <- GA.inputs$layer.names[i]
cd <- Run_CS(CS.inputs,
r.tran,
full.mat = TRUE,
EXPORT.dir = GA.inputs$Results.dir)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, NAME, "_csResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r.tran,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
Diagnostic.Plots(
resistance.mat = lower(cd),
genetic.dist = CS.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "continuous",
name = NAME,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = single.GA@solution[1],
print.dir = GA.inputs$Plots.dir
)
}
fit.stats <-
r.squaredGLMM(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = F,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
aic <-
AIC(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = F,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
LL <-
logLik(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = F,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
MLPE.list[[i]] <- MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = TRUE,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
cd.list[[i]] <- cd
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
names(cd.list)[i] <- GA.inputs$layer.names[i]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- CS.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0
}
if(GA.inputs$scale.surfaces[i] == 1) {
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
single.GA@solution[4]
)
} else {
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
NA
)
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
"Equation",
"shape",
"max",
"scale"
)
RESULTS.cont[[cnt1]] <- RS
if (dist_mod == TRUE) {
# r <- reclassify(r, c(-Inf, Inf, 1))
r <- (r * 0) + 1
names(r) <- "dist"
cd <- Run_CS(CS.inputs, r, full.mat = T)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, "Distance", "_csResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
Dist.AIC <-
AIC(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = FALSE,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
fit.stats <-
r.squaredGLMM(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = FALSE,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
LL <-
logLik(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = FALSE,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
)
if (GA.inputs$method == "AIC") {
dist.obj <- Dist.AIC
} else if (GA.inputs$method == "R2") {
dist.obj <- fit.stats[[1]]
} else {
dist.obj <- LL[[1]]
}
k <- 2
n <- CS.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Dist.AICc <-
data.frame("Distance",
dist.obj,
k,
Dist.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Dist.AICc) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
MLPE.list[[i + 1]] <- MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = CS.inputs$response,
REML = TRUE,
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
cd.list[[i + 1]] <- cd
# (read.table(paste0(
# GA.inputs$Results.dir,
# "dist_resistances.out"))[-1, -1])
names(MLPE.list)[i + 1] <- 'Distance'
names(cd.list)[i + 1] <- 'Distance'
}
if (null_mod == TRUE) {
response = CS.inputs$response
dat <- data.frame(CS.inputs$ID, response = CS.inputs$response)
colnames(dat) <- c("pop1", "pop2", "response")
# Fit model
mod <- lFormula(response ~ 1 + (1 | pop1), data = dat, REML = FALSE)
mod$reTrms$Zt <- CS.inputs$ZZ
dfun <- do.call(mkLmerDevfun, mod)
opt <- optimizeLmer(dfun)
Null.AIC <-
AIC(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
fit.stats <-
r.squaredGLMM(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
LL <-
logLik(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
if (GA.inputs$method == "AIC") {
null.obj <- Null.AIC
} else if (GA.inputs$method == "R2") {
null.obj <- fit.stats[[1]]
} else {
null.obj <- LL[[1]]
}
k <- 1
n <- CS.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Null.AICc <-
data.frame("Null",
null.obj,
k,
Null.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Null.AICc) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
}
#### gdistance ####
if (!is.null(gdist.inputs)) {
# Island GA ----------------
if(isTRUE(GA.inputs$gaisl)) {
cnt2 <- cnt2 + 1
names(r) <- GA.inputs$layer.names[i]
# Scaled optimization -----------------------------------------------------
if(GA.inputs$scale.surfaces[i] == 1) {
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single.scale,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
} else { # Not Scaled optimization ---------------
# * Categorical ----------------
if (GA.inputs$surface.type[i] == 'cat') {
cnt1 <- cnt1 + 1
names(r) <- GA.inputs$layer.names[i]
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
if(dim(single.GA@solution)[1] > 1) {
single.GA@solution <- t(as.matrix(single.GA@solution[1,]))
}
single.GA@solution <-
single.GA@solution / min(single.GA@solution)
df <- data.frame(id = unique(r), t(single.GA@solution))
r <- subs(r, df)
NAME <- GA.inputs$layer.names[i]
names(r) <- NAME
cd <- Run_gdistance(gdist.inputs, r)
# save(cd, file = paste0(GA.inputs$Write.dir, NAME, ".rda"))
write.table(
as.matrix(cd),
file = paste0(GA.inputs$Results.dir, NAME, "_", gdist.inputs$method, "_distMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = cd,
genetic.dist = gdist.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "categorical",
name = NAME,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
aic <- AIC(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
LL <- logLik(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)[[1]]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
single.GA@solution
)
k <- GA.inputs$parm.type$n.parm[i]
Features <- matrix()
for (z in 1:(k)) {
feature <- paste0("Feature", z)
Features[z] <- feature
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
Features
)
RESULTS.cat[[cnt1]] <- RS
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- as.matrix(cd)
names(cd.list)[i] <- GA.inputs$layer.names[i]
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
} else { # * Continuous ---------------
# Processing of unscaled continuous surface
cnt2 <- cnt2 + 1
r <- SCALE(r, 0, 10)
names(r) <- GA.inputs$layer.names[i]
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
}
} # End island
#!#!#!
if(GA.inputs$surface.type[i] != 'cat'){
if(single.GA@fitnessValue == -99999 | dim(single.GA@solution)[1] > 1) {
if(GA.inputs$scale.surfaces[i] == 1) {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
} else {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
}
} else {
start.vals <- single.GA@solution[-1]
EQ <- get.EQ(single.GA@solution[1])
}
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0.000123456543210
}
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
scale = single.GA@solution[4],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = single.GA@solution[1],
print.dir = GA.inputs$Plots.dir,
scale = single.GA@solution[4]
)
} else {
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = EQ,
print.dir = GA.inputs$Plots.dir
)
}
names(r.tran) <- GA.inputs$layer.names[i]
NAME <- GA.inputs$layer.names[i]
cd <- Run_gdistance(gdist.inputs, r.tran)
write.table(
as.matrix(cd),
file = paste0(GA.inputs$Results.dir, NAME, "_", gdist.inputs$method,"_distMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r.tran, paste0(GA.inputs$Results.dir, NAME, ".asc"), overwrite =
TRUE)
# save(single.GA,
# file = paste0(GA.inputs$Results.dir, NAME, ".rda"))
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = cd,
genetic.dist = gdist.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "continuous",
name = NAME,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
fit.stats <-
r.squaredGLMM(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
aic <-
AIC(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
LL <-
logLik(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- as.matrix(cd)
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
names(cd.list)[i] <- GA.inputs$layer.names[i]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- gdist.inputs$n.Pops
AICc <- (-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0
}
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
single.GA@solution[4]
)
} else {
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
NA
)
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
"Equation",
"shape",
"max",
"scale"
)
RESULTS.cont[[cnt2]] <- RS
} # Continuous / scaled processing
if (dist_mod == TRUE) {
r <- reclassify(r, c(-Inf, Inf, 1))
names(r) <- "dist"
cd <- Run_gdistance(gdist.inputs, r)
Dist.AIC <- suppressWarnings(AIC(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
))
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
)
LL <- logLik(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
)[[1]]
MLPE.list[[i + 1]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i + 1]] <- as.matrix(cd)
names(MLPE.list)[i + 1] <- 'Distance'
names(cd.list)[i + 1] <- 'Distance'
ROW <- nrow(gdist.inputs$ID)
k <- 2
if (GA.inputs$method == "AIC") {
dist.obj <- -Dist.AIC
} else if (GA.inputs$method == "R2") {
dist.obj <- fit.stats[[1]]
} else {
dist.obj <- LL[[1]]
}
k.list[[i + 1]] <- k
names(k.list)[i + 1] <- 'Distance'
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Dist.AICc <- data.frame("Distance",
dist.obj,
k,
Dist.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Dist.AICc) <- c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
if (null_mod == TRUE) {
dat <- data.frame(gdist.inputs$ID, response = gdist.inputs$response)
colnames(dat) <- c("pop1", "pop2", "response")
# Fit model
mod <- lFormula(response ~ 1 + (1 | pop1), data = dat, REML = FALSE)
mod$reTrms$Zt <- gdist.inputs$ZZ
dfun <- do.call(mkLmerDevfun, mod)
opt <- optimizeLmer(dfun)
Null.AIC <-
AIC(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
fit.stats <-
r.squaredGLMM(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
LL <-
logLik(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
ROW <- nrow(gdist.inputs$ID)
k <- 1
if (GA.inputs$method == "AIC") {
null.obj <- -Null.AIC
} else if (GA.inputs$method == "R2") {
null.obj <- fit.stats[[1]]
} else {
null.obj <- LL[[1]]
}
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Null.AICc <-
data.frame("Null",
null.obj,
k,
Null.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Null.AICc) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
}
# Standard GA ----------------
else {
cnt2 <- cnt2 + 1
names(r) <- GA.inputs$layer.names[i]
# Scaled optimization -----------------------------------------------------
if(GA.inputs$scale.surfaces[i] == 1) {
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single.scale,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
# suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
} else { # Not Scaled optimization: ------------------
# * Categorical ----------------
if (GA.inputs$surface.type[i] == 'cat') {
cnt1 <- cnt1 + 1
names(r) <- GA.inputs$layer.names[i]
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
# suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
if(dim(single.GA@solution)[1] > 1) {
single.GA@solution <- t(as.matrix(single.GA@solution[1,]))
}
single.GA@solution <-
single.GA@solution / min(single.GA@solution)
df <- data.frame(id = unique(r), t(single.GA@solution))
r <- subs(r, df)
NAME <- GA.inputs$layer.names[i]
names(r) <- NAME
cd <- Run_gdistance(gdist.inputs, r)
# save(cd, file = paste0(GA.inputs$Write.dir, NAME, ".rda"))
write.table(
as.matrix(cd),
file = paste0(GA.inputs$Results.dir, NAME, "_", gdist.inputs$method, "_distMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
# save(single.GA,
# file = paste0(GA.inputs$Results.dir, NAME, ".rda"))
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = cd,
genetic.dist = gdist.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "categorical",
name = NAME,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
aic <- AIC(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
LL <- logLik(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)[[1]]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
single.GA@solution
)
k <- GA.inputs$parm.type$n.parm[i]
Features <- matrix()
for (z in 1:(k)) {
feature <- paste0("Feature", z)
Features[z] <- feature
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
Features
)
RESULTS.cat[[cnt1]] <- RS
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- as.matrix(cd)
names(cd.list)[i] <- GA.inputs$layer.names[i]
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
} else { # * Continuous ---------------
# Processing of unscaled continuous surface
cnt2 <- cnt2 + 1
r <- SCALE(r, 0, 10)
names(r) <- GA.inputs$layer.names[i]
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
gdist.inputs = gdist.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
}
} # End gdist
# *** Optimization summary ----------------------------------------------------
#!#!#!
if(GA.inputs$surface.type[i] != 'cat'){
if(single.GA@fitnessValue == -99999 | dim(single.GA@solution)[1] > 1) {
if(GA.inputs$scale.surfaces[i] == 1) {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
} else {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
}
} else {
start.vals <- single.GA@solution[-1]
EQ <- get.EQ(single.GA@solution[1])
}
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0.000123456543210
}
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
scale = single.GA@solution[4],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = single.GA@solution[1],
print.dir = GA.inputs$Plots.dir,
scale = single.GA@solution[4]
)
} else {
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = EQ,
print.dir = GA.inputs$Plots.dir
)
}
names(r.tran) <- GA.inputs$layer.names[i]
NAME <- GA.inputs$layer.names[i]
cd <- Run_gdistance(gdist.inputs, r.tran)
write.table(
as.matrix(cd),
file = paste0(GA.inputs$Results.dir, NAME, "_", gdist.inputs$method,"_distMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r.tran, paste0(GA.inputs$Results.dir, NAME, ".asc"), overwrite =
TRUE)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = cd,
genetic.dist = gdist.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "continuous",
name = NAME,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
fit.stats <-
r.squaredGLMM(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
aic <-
AIC(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
LL <-
logLik(
MLPE.lmm(
resistance = cd,
pairwise.genetic = gdist.inputs$response,
REML = F,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
)
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- as.matrix(cd)
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
names(cd.list)[i] <- GA.inputs$layer.names[i]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- gdist.inputs$n.Pops
AICc <- (-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0
}
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
single.GA@solution[4]
)
} else {
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
NA
)
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
"Equation",
"shape",
"max",
"scale"
)
RESULTS.cont[[cnt2]] <- RS
} # Continuous / scaled processing
if (dist_mod == TRUE) {
r <- reclassify(r, c(-Inf, Inf, 1))
names(r) <- "dist"
cd <- Run_gdistance(gdist.inputs, r)
Dist.AIC <- suppressWarnings(AIC(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
))
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
)
LL <- logLik(
MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ,
REML = FALSE
)
)[[1]]
MLPE.list[[i + 1]] <- MLPE.lmm2(
resistance = cd,
response = gdist.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i + 1]] <- as.matrix(cd)
names(MLPE.list)[i + 1] <- 'Distance'
names(cd.list)[i + 1] <- 'Distance'
ROW <- nrow(gdist.inputs$ID)
k <- 2
if (GA.inputs$method == "AIC") {
dist.obj <- -Dist.AIC
} else if (GA.inputs$method == "R2") {
dist.obj <- fit.stats[[1]]
} else {
dist.obj <- LL[[1]]
}
k.list[[i + 1]] <- k
names(k.list)[i + 1] <- 'Distance'
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Dist.AICc <- data.frame("Distance",
dist.obj,
k,
Dist.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Dist.AICc) <- c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
if (null_mod == TRUE) {
dat <- data.frame(gdist.inputs$ID, response = gdist.inputs$response)
colnames(dat) <- c("pop1", "pop2", "response")
# Fit model
mod <- lFormula(response ~ 1 + (1 | pop1), data = dat, REML = FALSE)
mod$reTrms$Zt <- gdist.inputs$ZZ
dfun <- do.call(mkLmerDevfun, mod)
opt <- optimizeLmer(dfun)
Null.AIC <-
AIC(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
fit.stats <-
r.squaredGLMM(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
LL <-
logLik(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
ROW <- nrow(gdist.inputs$ID)
k <- 1
if (GA.inputs$method == "AIC") {
null.obj <- -Null.AIC
} else if (GA.inputs$method == "R2") {
null.obj <- fit.stats[[1]]
} else {
null.obj <- LL[[1]]
}
n <- gdist.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Null.AICc <-
data.frame("Null",
null.obj,
k,
Null.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Null.AICc) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
} # End gaisl if else
} # End gdistance
#### Julia ####
if (!is.null(jl.inputs)) {
# Island GA ----------------
if(isTRUE(GA.inputs$gaisl)) {
cnt2 <- cnt2 + 1
names(r) <- GA.inputs$layer.names[i]
# * Scaled optimization: Julia -----------------------------------------------------
if(GA.inputs$scale.surfaces[i] == 1) {
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single.scale,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
# * Not scaled --------------------------------------------------------------
}
else { # Surface not to be scaled
# *-* Categorical ---------------------------------------------------------
if (GA.inputs$surface.type[i] == 'cat') {
cnt1 <- cnt1 + 1
names(r) <- GA.inputs$layer.names[i]
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
if(dim(single.GA@solution)[1] > 1) {
single.GA@solution <- t(as.matrix(single.GA@solution[1,]))
}
single.GA@solution <-
single.GA@solution / min(single.GA@solution)
df <- data.frame(id = unique(r), t(single.GA@solution))
r <- subs(r, df)
NAME <- GA.inputs$layer.names[i]
names(r) <- NAME
cd <- Run_CS.jl(jl.inputs, r, full.mat = TRUE)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, NAME, "_jlResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = lower(cd),
genetic.dist = jl.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "categorical",
name = NAME,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
aic <- AIC(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
LL <- logLik(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)[[1]]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- jl.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
single.GA@solution
)
k <- GA.inputs$parm.type$n.parm[i]
Features <- matrix()
for (z in 1:(k)) {
feature <- paste0("Feature", z)
Features[z] <- feature
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
Features
)
RESULTS.cat[[cnt1]] <- RS
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = jl.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- cd
names(cd.list)[i] <- GA.inputs$layer.names[i]
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
}
else {
# *-* Continuous ----------------------------------------------------------
cnt2 <- cnt2 + 1
r <- SCALE(r, 0, 10)
names(r) <- GA.inputs$layer.names[i]
single.GA <- gaisl(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
numIslands = GA.inputs$numIslands,
migrationRate = GA.inputs$migrationRate,
migrationInterval = GA.inputs$migrationInterval,
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
# keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
} # End unscaled cat cont ifelse
} # Close scale-unscale ifelse
# Standard GA ----------------
} else {
cnt2 <- cnt2 + 1
names(r) <- GA.inputs$layer.names[i]
# * Scaled optimization: Julia -----------------------------------------------------
if(GA.inputs$scale.surfaces[i] == 1) {
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single.scale,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
# suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
# * Not scaled --------------------------------------------------------------
}
else { # Surface not to be scaled
# *-* Categorical ---------------------------------------------------------
if (GA.inputs$surface.type[i] == 'cat') {
cnt1 <- cnt1 + 1
names(r) <- GA.inputs$layer.names[i]
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
# suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
if(dim(single.GA@solution)[1] > 1) {
single.GA@solution <- t(as.matrix(single.GA@solution[1,]))
}
single.GA@solution <-
single.GA@solution / min(single.GA@solution)
df <- data.frame(id = unique(r), t(single.GA@solution))
r <- subs(r, df)
NAME <- GA.inputs$layer.names[i]
names(r) <- NAME
cd <- Run_CS.jl(jl.inputs, r, full.mat = TRUE)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, NAME, "_jlResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r,
paste0(GA.inputs$Results.dir, NAME, ".asc"),
overwrite = TRUE)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = lower(cd),
genetic.dist = jl.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "categorical",
name = NAME,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
aic <- AIC(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
LL <- logLik(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)[[1]]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- jl.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
single.GA@solution
)
k <- GA.inputs$parm.type$n.parm[i]
Features <- matrix()
for (z in 1:(k)) {
feature <- paste0("Feature", z)
Features[z] <- feature
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
"k",
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
Features
)
RESULTS.cat[[cnt1]] <- RS
MLPE.list[[i]] <- MLPE.lmm2(
resistance = cd,
response = jl.inputs$response,
REML = TRUE,
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
cd.list[[i]] <- cd
names(cd.list)[i] <- GA.inputs$layer.names[i]
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
}
else {
# *-* Continuous ----------------------------------------------------------
cnt2 <- cnt2 + 1
r <- SCALE(r, 0, 10)
names(r) <- GA.inputs$layer.names[i]
single.GA <- ga(
type = "real-valued",
fitness = Resistance.Opt_single,
Resistance = r,
population = GA.inputs$population,
selection = GA.inputs$selection,
pcrossover = GA.inputs$pcrossover,
pmutation = GA.inputs$pmutation,
crossover = GA.inputs$crossover,
Min.Max = GA.inputs$Min.Max,
GA.inputs = GA.inputs,
jl.inputs = jl.inputs,
lower = GA.inputs$min.list[[i]],
upper = GA.inputs$max.list[[i]],
optim = GA.inputs$optim,
optimArgs = GA.inputs$optimArgs,
parallel = GA.inputs$parallel,
popSize = GA.inputs$pop.size,
maxiter = GA.inputs$maxiter,
run = GA.inputs$run,
keepBest = GA.inputs$keepBest,
elitism = GA.inputs$percent.elite,
mutation = GA.inputs$mutation,
# suggestions = GA.inputs$SUGGESTS,
seed = GA.inputs$seed,
monitor = GA.inputs$monitor,
iter = i,
quiet = GA.inputs$quiet
)
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, GA.inputs$layer.names[i], "_full.rds"))
} # End unscaled cat cont ifelse
} # Close scale-unscale ifelse
} # Close Island-Standard ifelse
# ***Optimization summary ----------------------------------------------------
#!#!#!
if(GA.inputs$surface.type[i] != 'cat'){
if(single.GA@fitnessValue == -99999 | dim(single.GA@solution)[1] > 1) {
if(GA.inputs$scale.surfaces[i] == 1) {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
} else {
EQ <- get.EQ(9)
c.names <- dimnames(single.GA@solution)
single.GA@solution <- t(as.matrix(rep(9, length(dimnames(single.GA@solution)[[2]]))))
dimnames(single.GA@solution) <- c.names
}
} else {
start.vals <- single.GA@solution[-1]
EQ <- get.EQ(single.GA@solution[1])
}
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0.000123456543210
}
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
scale = single.GA@solution[4],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = single.GA@solution[1],
print.dir = GA.inputs$Plots.dir,
scale = single.GA@solution[4]
)
} else {
r.tran <-
Resistance.tran(
transformation = single.GA@solution[1],
shape = single.GA@solution[2],
max = single.GA@solution[3],
r = R.orig
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = EQ,
print.dir = GA.inputs$Plots.dir
)
}
names(r.tran) <- GA.inputs$layer.names[i]
NAME <- GA.inputs$layer.names[i]
cd <- Run_CS.jl(jl.inputs, r, full.mat = TRUE)
write.table(
as.matrix(cd),
file = paste0(GA.inputs$Results.dir, NAME, "_jlResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
writeRaster(r.tran, paste0(GA.inputs$Results.dir, NAME, ".asc"), overwrite =
TRUE)
# save(single.GA,
# file = paste0(GA.inputs$Results.dir, NAME, ".rda"))
saveRDS(single.GA,
file = paste0(GA.inputs$Results.dir, NAME, ".rds"))
ga.list[[i]] <- single.GA
names(ga.list[i]) <- NAME
Diagnostic.Plots(
resistance.mat = lower(cd),
genetic.dist = jl.inputs$response,
plot.dir = GA.inputs$Plots.dir,
type = "continuous",
name = NAME,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
Plot.trans(
PARM = single.GA@solution[-1],
Resistance = GA.inputs$Resistance.stack[[i]],
transformation = EQ,
print.dir = GA.inputs$Plots.dir
)
fit.stats <-
r.squaredGLMM(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
aic <-
AIC(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
LL <-
logLik(
MLPE.lmm(
resistance = lower(cd),
pairwise.genetic = jl.inputs$response,
REML = F,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
)
MLPE.list[[i]] <- MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = TRUE,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
cd.list[[i]] <- cd
names(MLPE.list)[i] <- GA.inputs$layer.names[i]
names(cd.list)[i] <- GA.inputs$layer.names[i]
if (k.value == 1) {
k <- 2
} else if (k.value == 2) {
k <- GA.inputs$parm.type$n.parm[i] + 1
} else if (k.value == 3) {
k <- GA.inputs$parm.type$n.parm[i] + length(GA.inputs$layer.names) + 1
} else {
k <- length(GA.inputs$layer.names[i]) + 1
}
k.list[[i]] <- k
names(k.list)[i] <- GA.inputs$layer.names[i]
n <- jl.inputs$n.Pops
AICc <- (-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
if(GA.inputs$scale.surfaces[i] == 1) {
if(single.GA@solution[4] < 0.5) {
single.GA@solution[4] <- 0
}
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
single.GA@solution[4]
)
} else {
RS <- data.frame(
GA.inputs$layer.names[i],
single.GA@fitnessValue,
k,
aic,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]],
get.EQ(single.GA@solution[1]),
single.GA@solution[2],
single.GA@solution[3],
NA
)
}
colnames(RS) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
"Equation",
"shape",
"max",
"scale"
)
RESULTS.cont[[cnt2]] <- RS
} # Continuous / scaled processing
if (dist_mod == TRUE) {
r <- reclassify(r, c(-Inf, Inf, 1))
names(r) <- "dist"
cd <- Run_CS.jl(jl.inputs, r, full.mat = TRUE)
write.table(
cd,
file = paste0(GA.inputs$Results.dir, "Distance", "_jlResistMat.csv"),
sep = ",",
row.names = F,
col.names = F
)
Dist.AIC <- suppressWarnings(AIC(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ,
REML = FALSE
)
))
fit.stats <- r.squaredGLMM(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ,
REML = FALSE
)
)
LL <- logLik(
MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ,
REML = FALSE
)
)[[1]]
MLPE.list[[i + 1]] <- MLPE.lmm2(
resistance = lower(cd),
response = jl.inputs$response,
REML = TRUE,
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
cd.list[[i + 1]] <- cd
names(cd.list)[i + 1] <- "Distance"
names(MLPE.list)[i + 1] <- "Distance"
ROW <- nrow(jl.inputs$ID)
k <- 2
k.list[[i + 1]] <- k
names(k.list)[i + 1] <- 'Distance'
if (GA.inputs$method == "AIC") {
dist.obj <- -Dist.AIC
} else if (GA.inputs$method == "R2") {
dist.obj <- fit.stats[[1]]
} else {
dist.obj <- LL[[1]]
}
n <- jl.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Dist.AICc <- data.frame("Distance",
dist.obj,
k,
Dist.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Dist.AICc) <- c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
if (null_mod == TRUE) {
dat <- data.frame(jl.inputs$ID, response = jl.inputs$response)
colnames(dat) <- c("pop1", "pop2", "response")
# Fit model
mod <-
lFormula(response ~ 1 + (1 | pop1),
data = dat,
REML = FALSE)
mod$reTrms$Zt <- jl.inputs$ZZ
dfun <- do.call(mkLmerDevfun, mod)
opt <- optimizeLmer(dfun)
Null.AIC <-
AIC(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
fit.stats <-
r.squaredGLMM(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
LL <-
logLik(mkMerMod(environment(dfun), opt, mod$reTrms, fr = mod$fr))
ROW <- nrow(jl.inputs$ID)
k <- 1
if (GA.inputs$method == "AIC") {
null.obj <- -Null.AIC
} else if (GA.inputs$method == "R2") {
null.obj <- fit.stats[[1]]
} else {
null.obj <- LL[[1]]
}
n <- jl.inputs$n.Pops
AICc <-
(-2 * LL) + (2 * k) + (((2 * k) * (k + 1)) / (n - k - 1))
Null.AICc <-
data.frame("Null",
null.obj,
k,
Null.AIC,
AICc,
fit.stats[[1]],
fit.stats[[2]],
LL[[1]])
colnames(Null.AICc) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL"
)
}
} # End Julia
} # Close ascii loop
# Final summary -----------------------------------------------------------
# Make results data frame
Results.cont <- data.frame()
for (i in 1:GA.inputs$n.layers) {
Results.cont <- do.call(rbind, RESULTS.cont)
}
# Compile results into tables
cat("\n")
cat("\n")
colnames(Results.cont) <-
c(
"Surface",
paste0("obj.func_", GA.inputs$method),
'k',
"AIC",
"AICc",
"R2m",
"R2c",
"LL",
"Equation",
"shape",
"max",
"scale"
)
Results.cont <- Results.cont[order(Results.cont$AICc), ]
write.table(
Results.cont,
paste0(GA.inputs$Results.dir, "Smooth_Optim_Results.csv"),
sep = ",",
col.names = T,
row.names = F
)
# Full Results
Results.All <- (Results.cont[, c(1:8)])
if (dist_mod == TRUE)
Results.All <- rbind(Results.All, Dist.AICc)
if (null_mod == TRUE)
Results.All <- rbind(Results.All, Null.AICc)
Results.All <- Results.All[order(Results.All$AICc), ]
cat("\n")
cat("\n")
write.table(
Results.All,
paste0(GA.inputs$Results.dir, "All_Results_Table_smooth.csv"),
sep = ",",
col.names = T,
row.names = F
)
# Get parameter estimates
if (!is.null(CS.inputs)) {
MLPE.results <- MLPE.lmm_coef(
resistance = GA.inputs$Results.dir,
genetic.dist = CS.inputs$response,
out.dir = GA.inputs$Results.dir,
method = "cs",
ID = CS.inputs$ID,
ZZ = CS.inputs$ZZ
)
} else if(!is.null(jl.inputs)) {
MLPE.results <- MLPE.lmm_coef(
resistance = GA.inputs$Results.dir,
genetic.dist = jl.inputs$response,
out.dir = GA.inputs$Results.dir,
method = "jl",
ID = jl.inputs$ID,
ZZ = jl.inputs$ZZ
)
} else {
MLPE.results <- MLPE.lmm_coef(
resistance = GA.inputs$Results.dir,
genetic.dist = gdist.inputs$response,
out.dir = GA.inputs$Results.dir,
method = "gd",
ID = gdist.inputs$ID,
ZZ = gdist.inputs$ZZ
)
}
rt <- proc.time()[3] - t1
k.list <- plyr::ldply(k.list)
colnames(k.list) <- c("surface", "k")
RESULTS <-
list(
ContinuousResults = Results.cont,
CategoricalResults = NULL,
AICc = Results.All,
MLPE = MLPE.results,
Run.Time = rt,
MLPE.list = MLPE.list,
cd = cd.list,
k = k.list,
ga = ga.list
)
# file.remove(list.files(GA.inputs$Write.dir, full.names = TRUE))
# unlink(GA.inputs$Write.dir, recursive = T, force = T)
return(RESULTS)
}
|
75d62ddefc718048951affa6fbd4b43746dd9fa5
|
8f2f7844354a8b68ad1afe5e5fd322b6bd1b1b8e
|
/lab4/3&4.R
|
5ce70606f7bf35f11180df61de9dd68280d3a497
|
[] |
no_license
|
KUGDev/EMMvSA
|
0f6f97df07ce5e4c0e50272863b5b8481a730853
|
08bfdeab9f84c333774aefb0ed341a444dce2656
|
refs/heads/master
| 2023-04-13T22:53:15.367460
| 2021-05-02T23:10:44
| 2021-05-02T23:10:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,398
|
r
|
3&4.R
|
library(e1071)
area.pallete = function (n = 3)
{
cols = rainbow (n)
cols [1:3] = c("PaleGreen", "PaleTurquoise", "Pink")
return(cols)
}
symbols.pallete = c("SeaGreen", "Blue", "Red")
set.seed(0)
C = 1
kernel = "polynomial"
degree = 1
data = iris [c("Petal.Width", "Petal.Length", "Species")]
trainIdx = sample(nrow(data), nrow(data) / 2, replace = FALSE)
train = data[trainIdx,]
dataTest = data[-trainIdx,]
objects = data[trainIdx, c("Petal.Width", "Petal.Length")]
testObjects = data[-trainIdx, c("Petal.Width", "Petal.Length")]
test_model = function(C, kernel, degree, gamma)
{
cat(paste("C = ", C, "; kernel = ", kernel, "; degree = ", degree, "; gamma = ", gamma, sep = ""), "\n")
linearModel = svm(Species ~ ., data = train, type = "C-classification", cost = C, kernel = kernel, degree = degree, gamma = gamma)
error_count = function(t)
{
return (sum(t[c(2, 3, 4, 6, 7, 8)]))
}
forecastsTrain = predict(linearModel, objects)
train_table = table(train$"Species", forecastsTrain)
forecastsTest = predict(linearModel, testObjects)
test_table = table(dataTest$"Species", forecastsTest)
return (c(error_count(train_table), error_count(test_table)))
}
kernels = c("polynomial", "radial", "sigmoid")
values = c(0.01, 0.05, 0.1, 0.5, 1, 5, 10, 50)
degrees = 1:15
gammas = c(0.1, 0.25, 0.5, 0.75, 1, 2, 5, 10, 25, 50)
dataFrame = data.frame("none", 0.0, 0, 0.0, 999999, 999999, 999999)
names(dataFrame)<-c("kernel", "C", "degree", "gamma", "errors", "train_errors", "test_errors")
add_row = function(dataFrame, C, kernel, degree, gamma)
{
errors = test_model(C, kernel, degree, gamma)
total_error_count = sum(errors)
de <- data.frame(kernel, C, degree, gamma, total_error_count, errors[1], errors[2])
names(de) <- c("kernel", "C", "degree", "gamma", "errors", "train_errors", "test_errors")
return (rbind(dataFrame, de))
}
for (kernel in kernels)
{
for (C in values)
{
for (gamma in gammas)
{
if (kernel != "polynomial")
{
degree = 1
dataFrame = add_row(dataFrame, C, kernel, degree, gamma)
next
}
for (degree in degrees)
{
dataFrame = add_row(dataFrame, C, kernel, degree, gamma)
}
}
}
}
min_error_count = min(dataFrame$test_errors)
print(dataFrame[dataFrame$test_errors == min_error_count,])
|
0dd7925d4f2ca257a4f5c3c0d29c566f93f2dfcc
|
1040a5d6ef334e4fa259138bcaa815c4f1c1bbb8
|
/ui.R
|
2d68f7c454f2e4e250e6763a386bb6f5411e9f12
|
[] |
no_license
|
jimbrig/shiny_react_fluent_app
|
d2d51f14bacf8da1bcf080feafa6ca44af68c402
|
54bb9c1359d972ec99785495cbe0f5d88c7f50a1
|
refs/heads/main
| 2023-04-01T21:12:08.756089
| 2021-04-13T21:00:46
| 2021-04-13T21:00:46
| 352,163,451
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,730
|
r
|
ui.R
|
header <- tagList(
img(src = "images/placeholder_company_logo.jpg", class = "logo"),
div(Text(variant = "xLarge", "Sales Reps Analysis"), class = "title"),
CommandBar(
items = list(
CommandBarItem("New", "Add", subitems = list(
CommandBarItem("Email message", "Mail", key = "emailMessage", href = "mailto:me@example.com"),
CommandBarItem("Calendar event", "Calendar", key = "calendarEvent")
)),
CommandBarItem("Upload sales plan", "Upload"),
CommandBarItem("Share analysis", "Share"),
CommandBarItem("Download report", "Download")
),
farItems = list(
CommandBarItem("Grid view", "Tiles", iconOnly = TRUE),
CommandBarItem("Info", "Info", iconOnly = TRUE)
),
style = list(width = "100%")
)
)
navigation <- Nav(
groups = list(
list(
links = list(
list(
name = 'Home',
url = '#!/',
key = 'home',
icon = 'Home'
),
list(
name = 'Analysis',
url = '#!/other',
key = 'analysis',
icon = 'AnalyticsReport'
),
list(
name = 'shiny.fluent',
url = 'http://github.com/Appsilon/shiny.fluent',
key = 'repo',
icon = 'GitGraph'
),
list(
name = 'shiny.react',
url = 'http://github.com/Appsilon/shiny.react',
key = 'shinyreact',
icon = 'GitGraph'
),
list(
name = 'Appsilon',
url = 'http://appsilon.com',
key = 'appsilon',
icon = 'WebAppBuilderFragment'
)
)
)
),
initialSelectedKey = 'home',
styles = list(
root = list(
height = '100%',
boxSizing = 'border-box',
overflowY = 'auto'
)
)
)
footer <- Stack(
horizontal = TRUE,
horizontalAlign = 'space-between',
tokens = list(childrenGap = 20),
Text(variant = "medium", "Built by Jimmy Briggs 2021", block = TRUE),
Text(variant = "medium", nowrap = FALSE, "If you'd like to learn more, reach out to me at jimbrig2011@outlook.com"),
Text(variant = "medium", nowrap = FALSE, "All rights reserved.")
)
layout <- function(mainUI) {
div(class = "grid-container",
div(class = "header", header),
div(class = "sidenav", navigation),
div(class = "main", mainUI),
div(class = "footer", footer)
)
}
card1 <- make_card(
"Welcome to shiny.fluent demo!",
div(
Text("shiny.fluent is a package that allows you to build Shiny apps using Microsoft's Fluent UI."),
Text("Use the menu on the left to explore live demos of all available components.")
))
card2 <- make_card(
"shiny.react makes it easy to use React libraries in Shiny apps.",
div(
Text("To make a React library convenient to use from Shiny, we need to write an R package that wraps it - for example, a shiny.fluent package for Microsoft's Fluent UI, or shiny.blueprint for Palantir's Blueprint.js."),
Text("Communication and other issues in integrating Shiny and React are solved and standardized in shiny.react package."),
Text("shiny.react strives to do as much as possible automatically, but there's no free lunch here, so in all cases except trivial ones you'll need to do some amount of manual work. The more work you put into a wrapper package, the less work your users will have to do while using it.")
))
home_page <- make_page(
"This is a Fluent UI app built in Shiny",
"shiny.react + Fluent UI = shiny.fluent",
div(card1, card2)
)
analysis_page <- make_page(
"Sales representatives",
"Best performing reps",
div(
Stack(
horizontal = TRUE,
tokens = list(childrenGap = 10),
make_card("Filters", filters, size = 4, style = "max-height: 320px"),
make_card("Deals count", plotlyOutput("plot"), size = 8, style = "max-height: 320px")
),
uiOutput("analysis")
)
)
# router ------------------------------------------------------------------
router <- make_router(
route("/", home_page),
route("other", analysis_page)
)
# add shiny.router dependencies manually ----------------------------------
# they ar not picked up because they are added in a non-standard way
# Add shiny.router dependencies manually: they are not picked up because they're added in a non-standard way.
shiny::addResourcePath("shiny.router", system.file("www", package = "shiny.router"))
shiny_router_js_src <- file.path("shiny.router", "shiny.router.js")
shiny_router_script_tag <- shiny::tags$script(type = "text/javascript", src = shiny_router_js_src)
ui <- fluentPage(
tags$head(
tags$link(href = "styles.css", rel = "stylesheet", type = "text/css"),
shiny_router_script_tag
),
withReact(layout(router$ui))
)
|
123559c0464805140c0666433684328096763e26
|
1d4c729a11381851e0b5c8578bf5cd7289fc082f
|
/man/xEnricherSNPs.Rd
|
f9c70460d1f3d260dc11b3bb0155969cb688f8c4
|
[] |
no_license
|
hfang-bristol/XGR
|
95b484a0350e14ad59fa170ead902689a34be89a
|
7b947080b310363e2b82c24c82d3394335906f54
|
refs/heads/master
| 2023-02-05T12:35:24.074365
| 2023-01-28T05:49:33
| 2023-01-28T05:49:33
| 52,982,296
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 12,180
|
rd
|
xEnricherSNPs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xEnricherSNPs.r
\name{xEnricherSNPs}
\alias{xEnricherSNPs}
\title{Function to conduct enrichment analysis given a list of SNPs and the ontology in query}
\usage{
xEnricherSNPs(
data,
background = NULL,
ontology = c("EF", "EF_disease", "EF_phenotype", "EF_bp"),
include.LD = NA,
LD.r2 = 0.8,
size.range = c(10, 2000),
min.overlap = 5,
which.distance = NULL,
test = c("fisher", "hypergeo", "binomial"),
background.annotatable.only = NULL,
p.tail = c("one-tail", "two-tails"),
p.adjust.method = c("BH", "BY", "bonferroni", "holm", "hochberg",
"hommel"),
ontology.algorithm = c("none", "pc", "elim", "lea"),
elim.pvalue = 0.01,
lea.depth = 2,
path.mode = c("all_paths", "shortest_paths", "all_shortest_paths"),
true.path.rule = T,
verbose = T,
silent = FALSE,
RData.location = "http://galahad.well.ox.ac.uk/bigdata",
guid = NULL
)
}
\arguments{
\item{data}{an input vector. It contains a list of SNPs of interest}
\item{background}{a background vector. It contains a list of SNPs as
the test background. If NULL, by default all annotatable are used as
background}
\item{ontology}{the ontology supported currently. Now it is only "EF"
for Experimental Factor Ontology (used to annotate GWAS Catalog SNPs).
However, there are several subparts of this ontology to choose:
'EF_disease' for the subpart under the term 'disease' (EFO:0000408),
'EF_phenotype' for the subpart under the term 'phenotype'
(EFO:0000651), 'EF_bp' for the subpart under the term 'biological
process' (GO:0008150)}
\item{include.LD}{additional SNPs in LD with Lead SNPs are also
included. By default, it is 'NA' to disable this option. Otherwise, LD
SNPs will be included based on one or more of 26 populations and 5
super populations from 1000 Genomics Project data (phase 3). The
population can be one of 5 super populations ("AFR", "AMR", "EAS",
"EUR", "SAS"), or one of 26 populations ("ACB", "ASW", "BEB", "CDX",
"CEU", "CHB", "CHS", "CLM", "ESN", "FIN", "GBR", "GIH", "GWD", "IBS",
"ITU", "JPT", "KHV", "LWK", "MSL", "MXL", "PEL", "PJL", "PUR", "STU",
"TSI", "YRI"). Explanations for population code can be found at
\url{http://www.1000genomes.org/faq/which-populations-are-part-your-study}}
\item{LD.r2}{the LD r2 value. By default, it is 0.8, meaning that SNPs
in LD (r2>=0.8) with input SNPs will be considered as LD SNPs. It can
be any value from 0.8 to 1}
\item{size.range}{the minimum and maximum size of members of each term
in consideration. By default, it sets to a minimum of 10 but no more
than 2000}
\item{min.overlap}{the minimum number of overlaps. Only those terms
with members that overlap with input data at least min.overlap (3 by
default) will be processed}
\item{which.distance}{which terms with the distance away from the
ontology root (if any) is used to restrict terms in consideration. By
default, it sets to 'NULL' to consider all distances}
\item{test}{the test statistic used. It can be "fisher" for using
fisher's exact test, "hypergeo" for using hypergeometric test, or
"binomial" for using binomial test. Fisher's exact test is to test the
independence between gene group (genes belonging to a group or not) and
gene annotation (genes annotated by a term or not), and thus compare
sampling to the left part of background (after sampling without
replacement). Hypergeometric test is to sample at random (without
replacement) from the background containing annotated and non-annotated
genes, and thus compare sampling to background. Unlike hypergeometric
test, binomial test is to sample at random (with replacement) from the
background with the constant probability. In terms of the ease of
finding the significance, they are in order: hypergeometric test >
fisher's exact test > binomial test. In other words, in terms of the
calculated p-value, hypergeometric test < fisher's exact test <
binomial test}
\item{background.annotatable.only}{logical to indicate whether the
background is further restricted to the annotatable. By default, it is
NULL: if ontology.algorithm is not 'none', it is always TRUE;
otherwise, it depends on the background (if not provided, it will be
TRUE; otherwise FALSE). Surely, it can be explicitly stated}
\item{p.tail}{the tail used to calculate p-values. It can be either
"two-tails" for the significance based on two-tails (ie both over- and
under-overrepresentation) or "one-tail" (by default) for the
significance based on one tail (ie only over-representation)}
\item{p.adjust.method}{the method used to adjust p-values. It can be
one of "BH", "BY", "bonferroni", "holm", "hochberg" and "hommel". The
first two methods "BH" (widely used) and "BY" control the false
discovery rate (FDR: the expected proportion of false discoveries
amongst the rejected hypotheses); the last four methods "bonferroni",
"holm", "hochberg" and "hommel" are designed to give strong control of
the family-wise error rate (FWER). Notes: FDR is a less stringent
condition than FWER}
\item{ontology.algorithm}{the algorithm used to account for the
hierarchy of the ontology. It can be one of "none", "pc", "elim" and
"lea". For details, please see 'Note' below}
\item{elim.pvalue}{the parameter only used when "ontology.algorithm" is
"elim". It is used to control how to declare a signficantly enriched
term (and subsequently all genes in this term are eliminated from all
its ancestors)}
\item{lea.depth}{the parameter only used when "ontology.algorithm" is
"lea". It is used to control how many maximum depth is used to consider
the children of a term (and subsequently all genes in these children
term are eliminated from the use for the recalculation of the
signifance at this term)}
\item{path.mode}{the mode of paths induced by vertices/nodes with input
annotation data. It can be "all_paths" for all possible paths to the
root, "shortest_paths" for only one path to the root (for each node in
query), "all_shortest_paths" for all shortest paths to the root (i.e.
for each node, find all shortest paths with the equal lengths)}
\item{true.path.rule}{logical to indicate whether the true-path rule
should be applied to propagate annotations. By default, it sets to
true}
\item{verbose}{logical to indicate whether the messages will be
displayed in the screen. By default, it sets to false for no display}
\item{silent}{logical to indicate whether the messages will be silent
completely. By default, it sets to false. If true, verbose will be
forced to be false}
\item{RData.location}{the characters to tell the location of built-in
RData files. See \code{\link{xRDataLoader}} for details}
\item{guid}{a valid (5-character) Global Unique IDentifier for an OSF
project. See \code{\link{xRDataLoader}} for details}
}
\value{
an object of class "eTerm", a list with following components:
\itemize{
\item{\code{term_info}: a matrix of nTerm X 4 containing snp/gene set
information, where nTerm is the number of terms, and the 4 columns are
"id" (i.e. "Term ID"), "name" (i.e. "Term Name"), "namespace" and
"distance"}
\item{\code{annotation}: a list of terms containing annotations, each
term storing its annotations. Always, terms are identified by "id"}
\item{\code{g}: an igraph object to represent DAG}
\item{\code{data}: a vector containing input data in consideration. It
is not always the same as the input data as only those mappable are
retained}
\item{\code{background}: a vector containing the background data. It is
not always the same as the input data as only those mappable are
retained}
\item{\code{overlap}: a list of overlapped snp/gene sets, each storing
snps overlapped between a snp/gene set and the given input data (i.e.
the snps of interest). Always, gene sets are identified by "id"}
\item{\code{fc}: a vector containing fold changes}
\item{\code{zscore}: a vector containing z-scores}
\item{\code{pvalue}: a vector containing p-values}
\item{\code{adjp}: a vector containing adjusted p-values. It is the p
value but after being adjusted for multiple comparisons}
\item{\code{or}: a vector containing odds ratio}
\item{\code{CIl}: a vector containing lower bound confidence interval
for the odds ratio}
\item{\code{CIu}: a vector containing upper bound confidence interval
for the odds ratio}
\item{\code{cross}: a matrix of nTerm X nTerm, with an on-diagnal cell
for the overlapped-members observed in an individaul term, and
off-diagnal cell for the overlapped-members shared betwene two terms}
\item{\code{call}: the call that produced this result}
}
}
\description{
\code{xEnricherSNPs} is supposed to conduct enrichment analysis given
the input data and the ontology in query. It returns an object of class
"eTerm". Enrichment analysis is based on either Fisher's exact test or
Hypergeometric test. The test can respect the hierarchy of the
ontology. Now it supports enrichment analysis for SNPs using GWAS
Catalog traits mapped to Experimental Factor Ontology. If required,
additional SNPs that are in linkage disequilibrium (LD) with input SNPs
are also be used for test.
}
\note{
The interpretation of the algorithms used to account for the hierarchy
of the ontology is:
\itemize{
\item{"none": does not consider the ontology hierarchy at all.}
\item{"lea": computers the significance of a term in terms of the
significance of its children at the maximum depth (e.g. 2). Precisely,
once snps are already annotated to any children terms with a more
signficance than itself, then all these snps are eliminated from the
use for the recalculation of the signifance at that term. The final
p-values takes the maximum of the original p-value and the recalculated
p-value.}
\item{"elim": computers the significance of a term in terms of the
significance of its all children. Precisely, once snps are already
annotated to a signficantly enriched term under the cutoff of e.g.
pvalue<1e-2, all these snps are eliminated from the ancestors of that
term).}
\item{"pc": requires the significance of a term not only using the
whole snps as background but also using snps annotated to all its
direct parents/ancestors as background. The final p-value takes the
maximum of both p-values in these two calculations.}
\item{"Notes": the order of the number of significant terms is: "none"
> "lea" > "elim" > "pc".}
}
}
\examples{
\dontrun{
# Load the library
library(XGR)
RData.location <- "http://galahad.well.ox.ac.uk/bigdata/"
# SNP-based enrichment analysis using GWAS Catalog traits (mapped to EF)
# a) provide the input SNPs of interest (eg 'EFO:0002690' for 'systemic lupus erythematosus')
## load GWAS SNPs annotated by EF (an object of class "dgCMatrix" storing a spare matrix)
anno <- xRDataLoader(RData='GWAS2EF', RData.location=RData.location)
ind <- which(colnames(anno)=='EFO:0002690')
data <- rownames(anno)[anno[,ind]!=0]
data
# optionally, provide the test background (if not provided, all annotatable SNPs)
#background <- rownames(anno)
# b) perform enrichment analysis
eTerm <- xEnricherSNPs(data=data, ontology="EF",
path.mode=c("all_paths"), RData.location=RData.location)
# b') optionally, enrichment analysis for input SNPs plus their LD SNPs
## LD based on European population (EUR) with r2>=0.8
#eTerm <- xEnricherSNPs(data=data, include.LD="EUR", LD.r2=0.8, RData.location=RData.location)
# c) view enrichment results for the top significant terms
xEnrichViewer(eTerm)
# d) save enrichment results to the file called 'EF_enrichments.txt'
res <- xEnrichViewer(eTerm, top_num=length(eTerm$adjp), sortBy="adjp",
details=TRUE)
output <- data.frame(term=rownames(res), res)
utils::write.table(output, file="EF_enrichments.txt", sep="\t",
row.names=FALSE)
# e) barplot of significant enrichment results
bp <- xEnrichBarplot(eTerm, top_num="auto", displayBy="adjp")
print(bp)
# f) visualise the top 10 significant terms in the ontology hierarchy
# color-code terms according to the adjust p-values (taking the form of 10-based negative logarithm)
xEnrichDAGplot(eTerm, top_num=10, displayBy="adjp",
node.info=c("full_term_name"))
# color-code terms according to the z-scores
xEnrichDAGplot(eTerm, top_num=10, displayBy="zscore",
node.info=c("full_term_name"))
}
}
\seealso{
\code{\link{xRDataLoader}}, \code{\link{xEnricher}}
}
|
877f1e145c567199ca462ece1c94f3c4ce020938
|
2016b18e19dce214fa3362eca26c4d23e75750ee
|
/R/corpus.R
|
800ea2b2feade371ee07148cbd702514d784ed02
|
[
"MIT"
] |
permissive
|
news-r/nltk4r
|
828967a4c787f4bbf52fade8d08a723f602ba35b
|
ceb0b8097723dc1729a9e79f12f2923c7282c626
|
refs/heads/master
| 2022-02-21T19:57:06.914225
| 2019-08-20T08:23:56
| 2019-08-20T08:23:56
| 198,101,782
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 577
|
r
|
corpus.R
|
#' Corpus Reader
#'
#' Read collection of text files as corpus.
#'
#' @param root The root directory.
#' @param pattern Pattern to match file neames against.
#'
#' @name corpus_reader
#'
#' @export
plain_text_corpus_reader <- function(root, pattern = ".*"){
assert_that(!missing(root), msg = "Missing root")
nltk$corpus$PlaintextCorpusReader(root, pattern)
}
#' @rdname corpus_reader
#'
#' @export
bracket_parse_corpus_reader <- function(root, pattern = ".*"){
assert_that(!missing(root), msg = "Missing root")
nltk$corpus$BracketParseCorpusReader(root, pattern)
}
|
0ef4f6788ce2ff658efa6033a1c4bfcfb27d6a44
|
4a2c6f223ff6063640475840209927bf85a9f33b
|
/writeup/Fig2_all_PCA_plots_for_3peaks_Chr2L_byMDS.R
|
a7cbd6e96966d55575f8dd7e432bc8318a7dc3b4
|
[] |
no_license
|
petrelharp/local_pca
|
d69cc4122c381bf981af65a8beb8914fabede4d5
|
abf0c31da5cd74a1de62083580d482f5bd08d7de
|
refs/heads/master
| 2023-06-25T18:12:39.355780
| 2023-06-14T04:39:12
| 2023-06-14T04:39:12
| 47,361,457
| 61
| 13
| null | 2021-02-25T17:20:18
| 2015-12-03T21:23:41
|
HTML
|
UTF-8
|
R
| false
| false
| 1,486
|
r
|
Fig2_all_PCA_plots_for_3peaks_Chr2L_byMDS.R
|
setwd("~/Documents/Drosophila/Chr2L")
c1=as.matrix(read.table("Chr2L_recomchunk_win103_cov1_byMDS_ordered.txt"))
c2=as.matrix(read.table("Chr2L_recomchunk_win103_cov2_byMDS_ordered.txt"))
c3=as.matrix(read.table("Chr2L_recomchunk_win103_cov3_byMDS_ordered.txt"))
c=as.matrix(read.table("cov_data_for_all_samples_seqs_both_low_NAs_Chr2L.txt"))
PCA1=eigen(c1)
Vec1=PCA1$vectors
lam1=PCA1$values
PC11=as.matrix(Vec1[,1])
PC12=as.matrix(Vec1[,2])
PCA2=eigen(c2)
Vec2=PCA2$vectors
lam2=PCA2$values
PC21=as.matrix(Vec2[,1])
PC22=as.matrix(Vec2[,2])
PCA3=eigen(c3)
Vec3=PCA3$vectors
lam3=PCA3$values
PC31=as.matrix(Vec3[,1])
PC32=as.matrix(Vec3[,2])
#PCA=eigen(c)
#Vec=PCA$vectors
#lam=PCA$values
#PC1=as.matrix(Vec[,1])
#PC2=as.matrix(Vec[,2])
origin=colnames(c)
origin1=substring(origin,1,2)
countrys=as.matrix(read.table("population_country.txt"))
for(i in 1:nrow(countrys)){origin1[which(origin1==countrys[i,1])]=countrys[i,2]}
rownames(PC31)=origin1
rownames(PC32)=origin1
pdf(file="Fig2_all_pca_plots_for_Chr2L_3peaks_byMDS.pdf",width=12,height=4)
layout(matrix(c(1,2,3), nrow=1,byrow=TRUE))
group=as.numeric(as.factor(origin1))
par(mar=c(5,4,3,7),xpd=TRUE)
plot(PC11,PC12,pch=group,col=rainbow(16)[group])
plot(PC21,PC22,pch=group,col=rainbow(16)[group],main="Drosophila chromosome 2L")
plot(PC31,PC32,pch=group,col=rainbow(16)[group])
#plot(PC1,PC2,pch=group,col=rainbow(16)[group])
legend(0.068,0.05,pch=1:16,col=rainbow(16),legend=levels(factor(origin1)))
dev.off()
|
883b22a17f2fdccc66d0e0397edda6348953fe3c
|
07aa21831674b145d4e501a85bb329fb37b6a677
|
/R/zzz.R
|
6a54566d082b534784d3b775572cf231ae79e1bd
|
[] |
no_license
|
ifellows/DeducerRDSAnalyst
|
883f598b2ee5216d5016bff55079044c3f8295e9
|
f0ce730ea2a841308760a665451d48b4b9cb34d7
|
refs/heads/master
| 2021-01-10T10:18:55.008911
| 2015-10-13T05:08:12
| 2015-10-13T05:08:12
| 44,152,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,022
|
r
|
zzz.R
|
.onLoad <- function(libname, pkgname){
ops <- options(warn = -1)
on.exit(options(ops))
deducerNotLoaded <- try(.deducer == .jnull(),silent=TRUE)
if(inherits(deducerNotLoaded,"try-error") || deducerNotLoaded)
return(NULL)
.jpackage(pkgname, lib.loc=libname)
RDSAnalyst <- J("RDSAnalyst.RDSAnalyst")
RDSAnalyst$startUp()
deducer.addMenu("RDS Data")
deducer.addMenuItem("Load .rdsat File",,"DeducerRDSAnalyst:::.loadRDSATDialog()","RDS Data")
deducer.addMenuItem("Load .rdsobj File",,"DeducerRDSAnalyst:::.loadRDSOBJDialog()","RDS Data")
deducer.addMenuItem("Save RDS Data",,".getDialog('Save RDS Data')$run()","RDS Data")
deducer.addMenuItem("Save RDS NetDraw",,".getDialog('Save RDS NetDraw')$run()","RDS Data")
deducer.addMenuItem("Save RDS GraphViz",,".getDialog('Save RDS GraphViz')$run()","RDS Data")
deducer.addMenuItem("Save RDS Gephi",,".getDialog('Save RDS Gephi')$run()","RDS Data")
deducer.addMenuItem("Convert: Coupon --> RDS",,".getDialog('Convert: Coupon --> RDS')$run()","RDS Data")
deducer.addMenuItem("Convert: Recruiter ID --> RDS",,".getDialog('Convert: Recruiter ID --> RDS')$run()","RDS Data")
deducer.addMenuItem("Edit Meta Data",,".getDialog('Edit Meta Data')$run()","RDS Data")
deducer.addMenuItem("Compute Weights",,".getDialog('Compute Weights')$run()","RDS Data")
deducer.addMenuItem("Compute Weighted Degree",,".getDialog('Compute Weighted Degree')$run()","RDS Data")
deducer.addMenu("RDS Sample")
deducer.addMenuItem("Plot Recruitment Tree",,".getDialog('Plot Recruitment Tree')$run()","RDS Sample")
deducer.addMenuItem("Diagnostic Plots",,".getDialog('Diagnostic Plots')$run()","RDS Sample")
deducer.addMenuItem("Recruitment Homophily",,".getDialog('Recruitment Homophily')$run()","RDS Sample")
deducer.addMenu("RDS Population")
deducer.addMenuItem("Population Homophily",,".getDialog('Population Homophily')$run()","RDS Population")
deducer.addMenuItem("Differential Activity",,".getDialog('Differential Activity')$run()","RDS Population")
deducer.addMenuItem("Frequency Estimates",,".getDialog('Frequency Estimates')$run()","RDS Population")
deducer.addMenuItem("Descriptive Estimates",,".getDialog('Descriptive Estimates')$run()","RDS Population")
deducer.addMenuItem("Population Crosstabs",,".getDialog('Population Crosstabs')$run()","RDS Population")
deducer.addMenuItem("Test Difference in Proportions",,".getDialog('Test Difference in Proportions')$run()","RDS Population")
deducer.addMenuItem("Test Trend in Proportions",,".getDialog('Test Trend in Proportions')$run()","RDS Population")
deducer.addMenuItem("Prior Distribution",,".getDialog('Prior Distribution')$run()","RDS Population")
deducer.addMenuItem("Posterior Distribution",,".getDialog('Posterior Distribution')$run()","RDS Population")
if(.jgr){
menus <- jgr.getMenuNames()
index <- which(menus=="Packages & Data")
if(length(index)==0)
index <- 1
if(RDSAnalyst$isPro()){
jgr.insertMenu("RDS Data",index)
jgr.insertMenuItem("RDS Data", "Import RDS Data", "J('RDSAnalyst.DataLoader')$run()",1)
jgr.insertSubMenu("RDS Data","Export RDS Data",
c("RDS Analyst (.rdsobj)","Flat File","Netdraw (.dl, .vna)","GraphViz (.gv)", "Gephi (.gexf)"),
c("J('RDSAnalyst.RDSAnalyst')$runSave('rdsobj')","J('RDSAnalyst.RDSAnalyst')$runSaveFlatDialog()",
"J('RDSAnalyst.RDSAnalyst')$runSave('netdraw')",
"J('RDSAnalyst.RDSAnalyst')$runSave('graphviz')",
"J('RDSAnalyst.RDSAnalyst')$runSave('gephi')"),
3)
jgr.addSubMenu("RDS Data", "Convert to RDS",
c("Coupon Format","Recruiter ID Format"),
c("deducer('Convert: Coupon --> RDS')","deducer('Convert: Recruiter ID --> RDS')"))
jgr.addMenuSeparator("RDS Data")
jgr.addMenuItem("RDS Data",'Edit Meta Data',"deducer('Edit Meta Data')")
jgr.addMenuItem("RDS Data",'Compute Weights',"deducer('Compute Weights')")
jgr.addMenuItem("RDS Data",'Compute Weighted Degree',"deducer('Compute Weighted Degree')")
jgr.insertMenu("RDS Sample",index+1)
jgr.addMenuItem("RDS Sample",'Plot Recruitment Tree',"deducer('Plot Recruitment Tree')")
jgr.addMenuItem("RDS Sample",'Diagnostic Plots',"deducer('Diagnostic Plots')")
jgr.addMenuItem("RDS Sample",'Recruitment Homophily',"deducer('Recruitment Homophily')")
jgr.insertMenu("RDS Population",index+2)
jgr.addMenuItem("RDS Population",'Frequency Estimates',"deducer('Frequency Estimates')")
jgr.addMenuItem("RDS Population",'Descriptive Estimates',"deducer('Descriptive Estimates')")
jgr.addMenuItem("RDS Population",'Population Crosstabs',"deducer('Population Crosstabs')")
jgr.addMenuItem("RDS Population",'Test Difference in Proportions',"deducer('Test Difference in Proportions')")
jgr.addMenuItem("RDS Population",'Test Trend in Proportions',"deducer('Test Trend in Proportions')")
jgr.addMenuSeparator("RDS Population")
jgr.addMenuItem("RDS Population",'Population Homophily',"deducer('Population Homophily')")
jgr.addMenuItem("RDS Population",'Differential Activity',"deducer('Differential Activity')")
jgr.addMenuSeparator("RDS Population")
jgr.addSubMenu("RDS Population","Population Size",c("Prior Knowlege","Posterior Prediction"),
c("deducer('Prior Distribution')","deducer('Posterior Distribution')"))
jgr.addMenuSeparator("Packages & Data")
jgr.addMenuItem("Packages & Data", "Example: faux", "data(faux)", silent=FALSE)
jgr.addMenuItem("Packages & Data", "Example: fauxmadrona", "data(fauxmadrona)", silent=FALSE)
jgr.addMenuItem("Packages & Data", "Example: fauxsycamore", "data(fauxsycamore)", silent=FALSE)
}else{
jgr.removeMenuItem("File",7)
jgr.removeMenuItem("File",3)
jgr.removeMenuItem("File",2)
jgr.removeMenuItem("File",1)
jgr.insertMenuItem("File", "Import RDS Data", "J('RDSAnalyst.DataLoader')$run()",1)
jgr.insertSubMenu("File","Export RDS Data",
c("RDS Analyst (.rdsobj)","Flat File","Netdraw (.dl, .vna)","GraphViz (.gv)","Gephi (.gexf)"),
c("J('RDSAnalyst.RDSAnalyst')$runSave('rdsobj')","J('RDSAnalyst.RDSAnalyst')$runSaveFlatDialog()",
"J('RDSAnalyst.RDSAnalyst')$runSave('netdraw')",
"J('RDSAnalyst.RDSAnalyst')$runSave('graphviz')",
"J('RDSAnalyst.RDSAnalyst')$runSave('gephi')"),
3)
jgr.removeMenuItem("Data",10)
jgr.removeMenuItem("Data",9)
jgr.removeMenuItem("Data",8)
jgr.removeMenuItem("Data",7)
jgr.insertMenuItem("Data",'Compute Weights',"deducer('Compute Weights')",4)
jgr.insertMenuItem("Data",'Compute Weighted Degree',"deducer('Compute Weighted Degree')",4)
jgr.addMenuSeparator("Data")
jgr.addMenuItem("Data",'Edit Meta Data',"deducer('Edit Meta Data')")
jgr.addSubMenu("Data", "Convert to RDS",
c("Coupon Format","Recruiter ID Format"),
c("deducer('Convert: Coupon --> RDS')","deducer('Convert: Recruiter ID --> RDS')"))
jgr.removeMenu(5)
jgr.insertMenu("Sample",index-2)
jgr.addMenuItem("Sample",'Frequencies',"deducer('Frequencies')")
jgr.addMenuItem("Sample",'Descriptives',"deducer('Descriptives')")
jgr.addMenuItem("Sample",'Contingency Tables',"deducer('Contingency Tables')")
jgr.addMenuSeparator("Sample")
jgr.addMenuItem("Sample",'Recruitment Homophily',"deducer('Recruitment Homophily')")
jgr.insertMenu("Population",index-1)
jgr.addMenuItem("Population",'Frequency Estimates',"deducer('Frequency Estimates')")
jgr.addMenuItem("Population",'Descriptive Estimates',"deducer('Descriptive Estimates')")
jgr.addMenuItem("Population",'Population Crosstabs',"deducer('Population Crosstabs')")
jgr.addMenuItem("Population",'Test Difference in Proportions',"deducer('Test Difference in Proportions')")
jgr.addMenuItem("Population",'Test Trend in Proportions',"deducer('Test Trend in Proportions')")
jgr.addMenuSeparator("Population")
jgr.addMenuItem("Population",'Population Homophily',"deducer('Population Homophily')")
jgr.addMenuItem("Population",'Differential Activity',"deducer('Differential Activity')")
jgr.addMenuSeparator("Population")
jgr.addSubMenu("Population","Population Size",c("Prior Knowlege","Posterior Prediction"),
c("deducer('Prior Distribution')","deducer('Posterior Distribution')"))
jgr.insertMenuSeparator("Plots",1)
jgr.insertMenuItem("Plots",'Plot Recruitment Tree',"deducer('Plot Recruitment Tree')",1)
jgr.insertMenuItem("Plots",'Recruitment Diagnostics',"deducer('Diagnostic Plots')",2)
jgr.addMenuSeparator("Packages & Data")
jgr.addMenuItem("Packages & Data", "Example: faux", "data(faux)", silent=FALSE)
jgr.addMenuItem("Packages & Data", "Example: fauxmadrona", "data(fauxmadrona)", silent=FALSE)
jgr.addMenuItem("Packages & Data", "Example: fauxsycamore", "data(fauxsycamore)",silent=FALSE)
}
jgr.addMenuItem("Help", "RDS Analyst User Manual", "J('org.rosuda.deducer.toolkit.HelpButton')$showInBrowser('http://www.deducer.org/pmwiki/pmwiki.php?n=Main.RDSAnalyst')")
jgr.addMenuItem("Help", "RDS Analyst Reference Manual", "help(package='RDS')")
jgr.addMenuItem("Help", "Citation information", "citation('DeducerRDSAnalyst')",silent=FALSE)
}
# .registerDialog("Save RDS Data", .makeSaveRDSDataDialog)
# .registerDialog("Save RDS NetDraw", .makeSaveNetDrawDialog)
# .registerDialog("Save RDS GraphViz", .makeSaveGraphVizDialog)
.registerDialog("Convert: Coupon --> RDS", function() .makeConvertDataDialog(TRUE))
.registerDialog("Convert: Recruiter ID --> RDS", function() .makeConvertDataDialog(FALSE))
.registerDialog("Edit Meta Data", .makeMetaDataDialog)
.registerDialog("Compute Weights", .makeComputeWeightsDialog)
.registerDialog("Compute Weighted Degree", .makeComputeWeightedDegreeDialog)
.registerDialog("Population Homophily", function() .makeHomophilyDialog(TRUE))
.registerDialog("Differential Activity", .makeDifferentialActivityDialog)
.registerDialog("Recruitment Homophily", function() .makeHomophilyDialog(FALSE))
.registerDialog("Plot Recruitment Tree", .makePlotRecruitmentTreeDialog)
.registerDialog("Diagnostic Plots", .makePlotDialog)
.registerDialog("Frequency Estimates", .makeFrequencyDialog)
.registerDialog("Descriptive Estimates", .makeDescriptivesDialog)
.registerDialog("Population Crosstabs", .makeContingencyDialog)
.registerDialog("Test Difference in Proportions", .makeTestDiffProportionsDialog)
.registerDialog("Test Trend in Proportions", .makeTestTrendProportionsDialog)
.registerDialog("Prior Distribution", .makePriorDistribution)
.registerDialog("Posterior Distribution", .makePosteriorDistribution)
}
.onAttach <- function(libname, pkgname){
msg<-paste("copyright (c) 2012, Mark S. Handcock, University of California - Los Angeles\n",
" Ian E. Fellows, Fellows Statistics\n",
" Krista J. Gile, University of Massachusetts - Amherst\n",sep="")
msg<-paste(msg,'For citation information, click "Help > Citation information".\n')
msg<-paste(msg,'To get started, click "Help > RDS Analyst User Manual".\n')
packageStartupMessage(msg)
}
|
22d81b1d3d7bbb78c0fe792c6bfd6764911a92d4
|
82d379f9fc52fccdbcc3b27b13137bf4753770f0
|
/R/adapter_content.R
|
084f4548f897c7102ac7af1b9ac4185182a0bfce
|
[] |
no_license
|
compbiocore/qckitfastq
|
8070616f055fb0c69cd65ba32361ba5d9cd93a9a
|
2c7366d5bb81a815134d242d2c4c0371bbb208fd
|
refs/heads/master
| 2020-03-09T21:00:10.217105
| 2019-09-17T20:00:19
| 2019-09-17T20:00:19
| 128,998,398
| 1
| 1
| null | 2018-06-05T21:35:52
| 2018-04-10T21:43:27
|
C++
|
UTF-8
|
R
| false
| false
| 1,684
|
r
|
adapter_content.R
|
#' Creates a sorted from most frequent to least frequent abundance table
#' of adapters that are found to be present in the reads at greater than
#' 0.1\% of the reads. If output_file is selected then will save the
#' entire set of adapters and counts. Only available for macOS/Linux due to dependency
#' on C++14.
#' @param infile the path to a gzipped FASTQ file
#' @param adapter_file Path to adapters.txt file. Default from package.
#' @param output_file File to save data frame to. Default NA.
#' @return Sorted table of adapters and counts.
#'
#' @examples
#' if(.Platform$OS.type != "windows") {
#' infile <- system.file("extdata","test.fq.gz",
#' package = "qckitfastq")
#' adapter_content(infile)[1:5]
#' }
#' @importFrom utils write.csv
#' @export
adapter_content <- function(infile,
adapter_file=system.file("extdata",
"adapters.txt",
package = "qckitfastq"),
output_file=NA){
if(.Platform$OS.type == "windows") {
stop("This function is not available on Windows due to the lack of C++14 support, sorry.")
}
ac <- calc_adapter_content(infile, adapter_file)
nr <- as.numeric(ac["num_reads"]) # num_reads indicates nr in map
ac <- ac[!names(ac)=="num_reads"] # remove num_reads from named vector
if (!is.na(output_file)){
ac_df <- data.frame(adapter=names(ac),count=ac)
rownames(ac_df) <- seq(1, nrow(ac_df))
write.csv(file=output_file,ac_df)
}
ac_table <- ac[ac>0.001*nr]
ac_sorted <- sort(ac_table,decreasing=TRUE)
return(ac_sorted)
}
|
33fa0095c3eda424f8e3c437adb02929d03aa00a
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/longevity/eLife revision/2_wildImport.R
|
80e8f11136ac74d06f2ade7918f89324a514c50a
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
2_wildImport.R
|
### Wild chimp data
library(lubridate)
wch <- read.csv(file = 'WildChimp_biography.csv')
wch$birthdate = as.Date(wch$birthdate,format='%Y-%m-%d')
wch$departdate = as.Date(wch$departdate,format='%Y-%m-%d')
wch$age = difftime(wch$departdate,wch$birthdate, units = 'weeks')/52.25
wch = wch[wch$age!=0,]
wch$departtype = (wch$departtype=='D')
wch$sample = 'Wild'
colnames(wch)[18] = 'status'
wch = wch[wch$status==TRUE,]
wch = rbind(datX[,c('age','status','sample')],wch[,c('age','status','sample')])
wch$sample[wch$sample!= 'Wild'] = '6'
wch$sample = droplevels(wch$sample)
levels(wch$sample) = c('Captive','Wild')
colnames(wch)[3] = 'Sample'
y.wild <- Surv(as.numeric(wch$age),wch$status)
|
00f8c8fe7d2fef33ac9b95952d196808f99db1aa
|
af5fdc8473585759439e661c55a83628f1a87359
|
/R/print.kResults.R
|
3586bd32c9e6b6501ff7b61b73eccb0d382573df
|
[] |
no_license
|
ljzhao007/mnspc
|
f3790be940d0d15c49565eea1eb9c1f62726cc6a
|
25313d150768afcb692bbf0d0eda20ceef8dce6d
|
refs/heads/master
| 2021-03-28T18:27:59.900273
| 2011-02-21T00:00:00
| 2011-02-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
print.kResults.R
|
print.kResults<-function(all.results.sarl1, k.results)
{
p.k<-all.results.sarl1$k.1
p.h<-all.results.sarl1$h.1
p.arl0<-all.results.sarl1$arl0.1
p.arl1<-all.results.sarl1$arl1.1
h<-k.results$h
k<-k.results$k
arl1<-k.results$arl1
arl0<-k.results$arl0.actual
nrowin<-length(p.h)
.C("printkResults", p.k=as.double(p.k),p.h=as.double(p.h), p.arl0=as.double(p.arl0), pr.arl1=as.double(p.arl1), k=as.double(k), h=as.double(h), arl0=as.double(arl1), arl1=as.double(arl0), nrowin=as.integer(nrowin))
}
|
23a6238e2db4c1c4060b8c1416a9b781cbbbf034
|
4bf18671ead81f5355aeed1a9e481bda8d708b80
|
/Course2/Week 4/rankhospital.R
|
982b0883d332828adb52bbd7896708d161f78625
|
[] |
no_license
|
erjicles/datasciencecoursera
|
2bf23c76528eb770f54a26b7a485e5e74244a716
|
42092b3669137261f03709bb0f9a1db9ecb4c45f
|
refs/heads/master
| 2021-06-26T03:46:56.797749
| 2019-05-28T19:49:49
| 2019-05-28T19:49:49
| 104,016,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
rankhospital.R
|
## Week 4 Programming Assignment
## Takes three arguments: the 2-character abbreviated name of a state (state),
## an outcome (outcome), and the ranking of a hospital in that state for that
## outcome (num).
## Reads the outcome-of-care-measures.csv file and returns a character vector
## with the name of the hospital that has the ranking specified by the num
## argument.
## Test cases:
## rankhospital("TX", "heart failure", 4)
## "DETAR HOSPITAL NAVARRO"
## rankhospital("MD", "heart attack", "worst")
## "HARFORD MEMORIAL HOSPITAL"
## rankhospital("MN", "heart attack", 5000)
## NA
rankhospital <- function(state, outcome, num = "best") {
## Put the arguments into lower case
outcome_lower <- tolower(outcome)
state_lower <- tolower(state)
num_lower <- tolower(num)
## Read outcome data from the file
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Select only the rows for the given state
state_outcome_data <- outcome_data[tolower(outcome_data[,7]) == state_lower,]
## Check if there are any hospitals with this state
## If not, then the state is invalid and throw an error
if (nrow(state_outcome_data) == 0) {
stop("invalid state")
}
## Check if the outcome provided is valid
if (outcome_lower != "heart attack"
&& outcome_lower != "heart failure"
&& outcome_lower != "pneumonia") {
stop("invalid outcome")
}
## Keep only the hospital name and the relevant outcome column
if (outcome_lower == "heart attack") {
state_outcome_data <- state_outcome_data[,c(2,11)]
} else if (outcome_lower == "heart failure") {
state_outcome_data <- state_outcome_data[,c(2,17)]
} else { ## pneumonia
state_outcome_data <- state_outcome_data[,c(2,23)]
}
## Remove "Not Available" from data column
state_outcome_data <- state_outcome_data[state_outcome_data[,2] != "Not Available",]
## Coerce the data column to numeric
state_outcome_data[,2] <- as.numeric(state_outcome_data[,2])
## Remove NA
state_outcome_data <- state_outcome_data[!is.na(state_outcome_data[,2]),]
## Order by mortality rate, then by hospital name
state_outcome_data <-
state_outcome_data[
order(
state_outcome_data[,2]
, state_outcome_data[,1]
),
]
## Convert the num parameter into an integer
rank_to_check <- 1
if (num_lower == "best") {
rank_to_check <- 1
} else if (num_lower == "worst") {
rank_to_check <- nrow(state_outcome_data)
} else if (!is.integer(as.integer(num_lower))) {
stop("invalid num")
} else {
rank_to_check <- as.integer(num_lower)
}
if (rank_to_check < 1) {
stop("invalid num")
}
if (rank_to_check > nrow(state_outcome_data)) {
return(NA)
}
## Return the nth ranked hospital in the state for the given outcome
nth_hospital <- state_outcome_data[rank_to_check,1]
nth_hospital
}
|
33274f6012819340237d7f67c9f1461d4d10bf88
|
136a8eb2d2a782dda9519b84fe192028813a2a5d
|
/code/sql_cfpb_df.R
|
62836a66fb82bdad422e1cf7152699f1ad775090
|
[] |
no_license
|
zoeang/rulemaking
|
c979593d9b70eade8c0c524ac3b4b7ebc0c3bf6f
|
9acd9a3fbbc1023a8161c352d87d9ec90fecf00f
|
refs/heads/master
| 2022-10-28T15:02:04.163504
| 2022-07-15T22:21:25
| 2022-07-15T22:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,331
|
r
|
sql_cfpb_df.R
|
# Create RSQLite database
library(DBI)
# install.packages("RSQLite")
1
library(RSQLite)
library(tidyverse)
library(magrittr)
# Create RSQLite database
con <- dbConnect(RSQLite::SQLite(), here::here("db", "regs_dot_gov.sqlite"))
# fetch results:
comments_cfpb <- dbGetQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
head(comments_cfpb)
# every document_id should contain docket_id
sum(str_detect(comments_cfpb$document_id, comments_cfpb$docket_id))
nrow(comments_cfpb)
# Refining for uniqueness
d <- comments_cfpb
names(d)
# duplicates
d %>% distinct(docket_id, comment_start_date) %>%
count(docket_id, sort = T) %>% filter(n>1)
# oddly there are duplicate comments with only different start dates
# fill up and down by docket
d$comment_start_date %<>% as.Date()
d %<>% group_by(docket_id) %>%
fill(comment_start_date, .direction = "downup")
# replace NAs so that we can slice the max date
d$comment_start_date %<>% replace_na(as.Date("2000-01-01"))
# FIXME I use comment_start_date to merge in fr_document_id below; selecting max here may leave some without a match below. Hopefully we can get fr_doc id from the new regulations.gov metatdata
nrow(d)
d %<>% group_by(document_id) %>%
slice_max(comment_start_date) %>%
ungroup()
nrow(d)
# problem observations
c("CFPB-2011-0008", "CFPB-2013-0001") %in% d$docket_id
d %>% filter(docket_id %in% c("CFPB-2011-0008", "CFPB-2013-0001")) %>%
distinct(docket_id, comment_start_date) #%>% slice_max(comment_start_date)
d %>% filter(docket_id %in% c("CFPB-2011-0008", "CFPB-2013-0001")) %>%
distinct(document_id, docket_id)
# some dockets have more than one due date due to comment period extensions
d$comment_due_date %<>% as.Date()
d %<>% group_by(docket_id) %>%
fill(comment_due_date, .direction = "downup")
# replace NAs so that we can slice the max date
d$comment_due_date %<>% replace_na(as.Date("2000-01-01"))
dim(d)
d %<>% group_by(document_id) %>%
slice_max(comment_due_date)
dim(d)
c("CFPB-2011-0008", "CFPB-2013-0001") %in% d$docket_id
# oddly, some comments have more than one posted date; maybe they were updated between my scrapes?
d$posted_date %<>% as.Date()
d %<>% group_by(docket_id) %>%
fill(posted_date, .direction = "downup")
# replace NAs so that we can slice the max date
d$posted_date %<>% replace_na(as.Date("2000-01-01"))
d %<>% group_by(document_id) %>%
slice_max(posted_date) %>%
ungroup()
c("CFPB-2011-0008", "CFPB-2013-0001") %in% d$docket_id
# check for duplicate urls (primary key to attachments table)
look <- d %>%
add_count(document_id, sort = T) %>%
filter(n >1) %>%
arrange(document_id)
# inspect duplicates
head(look)
comments_cfpb <- d %>% ungroup()
#/dedupe
comments_cfpb %<>% rename(comment_title = title)
comments_cfpb %<>% select(#fr_document_id,
agency_acronym,
rin,
docket_id,
docket_title,
attachment_count,
document_id,
posted_date,
submitter_name,
comment_title,
organization,
#comment_url,
#late_comment,
comment_start_date,
comment_text
) %>%
mutate(source = "regulations.gov",
comment_url = str_c("https://www.regulations.gov/document/", document_id)
) %>%
distinct()
names(comments_cfpb)
# check for dupes
comments_cfpb %>%
add_count(comment_url) %>%
filter(n > 1) %>%
select(submitter_name, rin, posted_date) %>%
head() %>% knitr::kable()
###################################################
# Subset to Davis Polk Dodd-Frank rules
# Dodd-Frank rules from Davis Polk Data
df <- read_csv(here::here("data", "dockets_to_scrape.csv"))
names(df)
head(df)
df_rins <- df$RIN %>% na.omit() %>% unique()
df_dockets <- df$identifier %>% na.omit() %>% unique()
# dockets with multiple fr docs
df %>% count(identifier, sort = T)
# Subset to Dodd-Frank rules
# overinclusive subset?
comments_cfpb_df <- comments_cfpb %>% ungroup() %>%
filter(docket_id %in% df_dockets | rin %in% df_rins)
# rins not in dockets to scrape
comments_cfpb_df %>%
filter(!rin %in% df_rins, rin != "Not Assigned") %>%
select(docket_id, rin) %>%
distinct()
# dockets not in dockets to scrape
comments_cfpb_df %>%
filter(!docket_id %in% df_dockets) %>%
select(docket_id, rin) %>%
distinct() %>% knitr::kable()
# FIXME - investigate these
# |docket_id |rin |
# |:--------------|:------------|
# |CFPB-2011-0040 |Not Assigned |
# |CFPB-2014-0014 |7100-AD68 |
# |CFPB-2015-0004 |3170-AA43 |
# |CFPB-2016-0016 |3170-AA49 |
comments_cfpb_df$docket_id %>% unique()
comments_cfpb_df$rin %>% unique()
# look back to see how many we matched
matched <- df %>%
filter(RIN %in% na.omit(comments_cfpb_df$rin) | identifier %in% na.omit(comments_cfpb_df$docket_id))
unmatched <- df %>% anti_join(matched) %>% filter(agency == 'CFPB')
unmatched %>%
select(identifier) %>%
distinct()
# # 0 comments
# RIN identifier
# <chr> <chr>
# 1 NA CFPB-2013-0038
# 2 3170-AA30 CFPB-2012-0040
# 3 NA CFPB-2014-0030
# 4 3170-AA36 CFPB-2013-0006
# 5 NA CFPB-2012-0042
# 6 NA CFPB-2013-0034
# 7 NA CFPB-2017-0026
# 8 NA CFPB-2012-0043
# 9 NA CFPB-2013-0035
# 10 NA CFPB-2017-0027
# THESE TWO HAD MISSING comment_start, end, or posted date, now fixed
# 11 CFPB-2011-0008 26 comments
# 12 CFPB-2013-0001 1 comment
############################
names(all)
all %>% filter(docketId %in% c("CFPB-2011-0008", "CFPB-2013-0001")) %>%
distinct(docketId, agencyAcronym)
c("CFPB-2011-0008", "CFPB-2013-0001") %in% df_dockets
c("CFPB-2011-0008", "CFPB-2013-0001") %in% all$docketId
c("CFPB-2011-0008", "CFPB-2013-0001") %in% d$docket_id
c("CFPB-2011-0008", "CFPB-2013-0001") %in% comments_cfpb$docket_id
unmatched %>% filter(identifier %in% c("CFPB-2011-0008", "CFPB-2013-0001"))
# Create RSQLite database
con <- dbConnect(RSQLite::SQLite(), here::here("db", "comment_metadata_CFPB_df.sqlite"))
# check
list.files("db")
dbListTables(con)
dbWriteTable(con, "comments", comments_cfpb_df, overwrite = T)
dbListTables(con)
# check for unique comment urls
d <- dbGetQuery(con, "SELECT * FROM comments WHERE agency_acronym = 'CFPB'")
nrow(d)
d %>% count(comment_url, sort = T) %>% filter(n>1)
d %>% count(docket_title, sort = T) %>% head(10) %>% knitr::kable()
dbDisconnect(con)
# Create RSQLite database
con <- dbConnect(RSQLite::SQLite(), here::here("db", "metadata_CFPB_df.sqlite"))
# check
list.files("db")
dbListTables(con)
dbWriteTable(con, "comments", comments_cfpb_df, overwrite = T)
dbListTables(con)
dbWriteTable(con, "actions", actions_cfpb %>% select(-fr_document_id_length, -fr_number), overwrite = T)
dbListTables(con)
dbListFields(con, "comments")
dbListFields(con, "actions")
# fetch results:
dbGetQuery(con, "SELECT * FROM actions WHERE docket_id = 'CFPB-2012-0029'") %>% head()
dbGetQuery(con, "SELECT * FROM comments WHERE docket_id = 'CFPB-2012-0029'") %>% head()
dbDisconnect(con)
|
546dbd2f7ced49ad6bde398581593b9076a40b8e
|
e1c85152190571d098cb195b331556e4ca9c1c06
|
/Rpackages/gemtcPlus/man/nma_fit.Rd
|
bea3eaaac92fb16667d37c24eee9e191d00eb2e0
|
[
"Apache-2.0"
] |
permissive
|
Diarmuid78/Global-HTA-Evidence-Open
|
4227ad9be6012a4083aba741827abe4e648fac37
|
050767457dc82da1d5a7f14967c72d626615effb
|
refs/heads/master
| 2023-06-03T23:10:56.353787
| 2021-06-29T11:54:50
| 2021-06-29T11:54:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 811
|
rd
|
nma_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nma_fit.R
\name{nma_fit}
\alias{nma_fit}
\title{Takes input data and a model plan and passes to the model engine specified.
Current supported engines are the `gemtc` package (using mtc.model & mtc.run) or `rjags` (using jags and dic.samples functions)}
\usage{
nma_fit(model_input)
}
\arguments{
\item{model_input}{a list containing named elements fitting_data (data which has been pre-processed) and plan (list object containing all input parameters)}
}
\value{
model object of class `rjags` or `mtc.result`
}
\description{
Takes input data and a model plan and passes to the model engine specified.
Current supported engines are the `gemtc` package (using mtc.model & mtc.run) or `rjags` (using jags and dic.samples functions)
}
|
024e27b171369e2270add8ff8f2cde0bbf77e8dd
|
fad3e5382d23eb599a34b67d7e7f6bdddbe218e7
|
/multidots.R
|
edca044b4b3ec9df9bcb05a2e2aca3daadc919f0
|
[] |
no_license
|
rrbm823/maps
|
aa4a68f62c70658465bf5044ea009f18d999a233
|
5bb7432e21a6d06a2e5f845918cbb4b8e2fcda66
|
refs/heads/master
| 2016-09-10T17:21:53.634437
| 2014-10-25T18:59:26
| 2014-10-25T18:59:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,004
|
r
|
multidots.R
|
outputsource<-"C:\\Users\\Erik J\\Documents\\R\\output"
inputsource<-"C:\\Users\\Erik J\\Documents\\R\\test3"
shapefilepath<-"C:\\Users\\Erik J\\Documents\\R\\spatial\\shapefiles"
outputfile<-"multidots.jpg"
#########################
library(maptools)
library(PBSmapping)
setwd(inputsource)
a<-list.files(path=inputsource,pattern=".csv")
ae<-length(a)
agglistorig<-do.call("list", lapply(a, read.csv, header = TRUE))
shapelist<-agglistorig
for (i in 1:ae)
{
shapelist[[i]]<-cbind(shapelist[[i]][,dim(shapelist[[i]])[2]],shapelist[[i]][,dim(shapelist[[i]])[2]-1],matrix(1:dim(shapelist[[i]])[1],dim(shapelist[[i]])[1]))
shapelist[[i]]<-data.frame(shapelist[[i]])
colnames(shapelist[[i]])<-c("X","Y","EID")
}
addressEvents<-vector("list",ae)
for (i in 1:ae)
{
addressEvents[[i]]<-as.EventData(shapelist[[i]],projection=NA)
}
colornames<-c("blue","red","yellow","green","orange","purple","brown","teal","pink")
color<-colornames[1:ae]
setwd(shapefilepath)
myshpfl<-read.csv("worldshapefileunifiedus.csv")
addressPolys<-vector("list",ae)
for (i in 1:ae)
{
addressPolys[[i]]<-findPolys(addressEvents[[i]],myshpfl)
}
usmap<-plotPolys(myshpfl,xlim=c(-125,-67), ylim=c(20,60),axes=FALSE,bg="white",xlab="",ylab="",col="white")
myTrtFC<-vector("list",ae)
for (i in 1:ae)
{
myTrtFC[[i]]<-table(factor(addressPolys[[i]]$PID,levels=levels(as.factor(usmap$PID))))
}
mapColors<-heat.colors(max(myTrtFC[[2]])+1,alpha=.6)[max(myTrtFC[[2]])-myTrtFC[[2]]+1]
mapcolors1<-gsub("#FFFFFE99","gray75",mapColors)
mapcolors2<-gsub("#FF000099","lightgoldenrodyellow",mapcolors1)
usmapcol<-plotPolys(myshpfl,xlim=c(-125,-67), ylim=c(20,60),axes=FALSE,bg="skyblue1",xlab="",ylab="",col=mapcolors2)
for (i in 1:ae)
{
addPoints(addressEvents[[i]],pch=16,col=color[i],cex=.5)
}
for (i in 1:ae)
{
addPoints(addressEvents[[ae-i+1]],pch=16,col=color[ae-i+1],cex=(.5/(i+1)))
}
legend("bottomright", cex=0.75, pch=16,col=colornames[1:ae], legend=c(a[1:ae]), ncol=2,bg="white")
setwd(outputsource)
savePlot(outputfile, type = "jpg")
|
95105c2bc4200b71dd63919ec6a8f594661a3a0e
|
cb20852e1607f6145194199bcca82b040090edd4
|
/code/helpers/_helpers_big_query_tables.R
|
35b1241f54c97216dcddb17edf2ac348ad4f72f6
|
[
"MIT"
] |
permissive
|
yangchuhua/gtex-gwas-analysis
|
7f2f60950ae362ffd1dc8734804dfe36d230cef6
|
300aa123dd769ab175fbf7bb9b26c9e3e4439fa0
|
refs/heads/master
| 2023-07-04T16:50:41.747961
| 2021-08-06T17:35:09
| 2021-08-06T17:35:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,250
|
r
|
_helpers_big_query_tables.R
|
# derived from Rodrigo's code
# This script defines some objects containing the information
# necessary to connect to each of the BQ tables.
tableInfo <- function(dataset="GTEx_V8_ElasticNet_EUR_2018_07_05", table="predixcan_results", project="gtex-awg-im") {
info <- list()
info$project <- project
info$dataset_name <- dataset
info$table_name <- table
# add column names here?
info
}
########################## DEFINITION OF BigQuery TABLES ########################
# elastic net models and gene-level associations
gwas_tbl <- tableInfo("GWAS_all", "gwas")
gwas_tbl_count <- tableInfo("GWAS_all", "gwas_results_count")
gwas_formatted_tbl <- tableInfo("GWAS_all", "formatted_gwas")
gwas_imputation_verification_tbl <- tableInfo("GWAS_all", "gwas_imputation_verification")
#elastic net predixcan GTEX v7
v7_prediction_en_models_tbl_eqtl <- tableInfo("GTEx_V7_HapMap_2017_11_29", "weights")
v7_prediction_en_models_extra_tbl_eqtl <- tableInfo("GTEx_V7_HapMap_2017_11_29", "extra")
#elastic net predixcan
prediction_en_models_tbl_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "weights_eqtl")
prediction_en_models_extra_tbl_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "extra_eqtl")
prediction_en_models_tbl_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "weights_sqtl")
prediction_en_models_extra_tbl_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "extra_sqtl")
predixcan_en_tbl_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_eqtl")
predixcan_en_tbl_count_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_eqtl_count")
predixcan_en_tbl_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_sqtl")
predixcan_en_tbl_count_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_sqtl_count")
multixcan_en_tbl_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "smultixcan_eqtl")
multixcan_en_tbl_count_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "smultixcan_eqtl_count")
multixcan_en_tbl_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "smultixcan_sqtl")
multixcan_en_tbl_count_sqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "smultixcan_sqtl_count")
#elastic net predixcan without palindromic
predixcan_en_np_tbl_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_eqtl_np")
predixcan_en_np_tbl_count_eqtl <- tableInfo("GTEx_V8_ElasticNet_EUR_v1", "spredixcan_eqtl_np_count")
#mashr predixcan
prediction_mashr_models_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "weights_eqtl")
prediction_mashr_models_extra_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "extra_eqtl")
prediction_mashr_models_tbl_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "weights_sqtl")
prediction_mashr_models_extra_tbl_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "extra_sqtl")
predixcan_mashr_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl")
predixcan_mashr_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl_count")
predixcan_mashr_tbl_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_sqtl")
predixcan_mashr_tbl_count_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_sqtl_count")
multixcan_mashr_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "smultixcan_eqtl")
multixcan_mashr_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "smultixcan_eqtl_count")
multixcan_mashr_tbl_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "smultixcan_sqtl")
multixcan_mashr_tbl_count_sqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "smultixcan_sqtl_count")
#mashr with harmonized(unimputed) gwas
predixcan_mashr_hq_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl_hq")
predixcan_mashr_hq_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl_hq_count")
predixcan_mashr_hn_tbl_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl_hn")
predixcan_mashr_hn_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_MASHR_EUR_v1", "spredixcan_eqtl_hn_count")
#EN-DAPGW predixcan
prediction_en_dapgw_models_tbl_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "weights_eqtl")
prediction_en_dapgw_models_extra_tbl_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "extra_eqtl")
prediction_en_dapgw_models_tbl_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "weights_sqtl")
prediction_en_dapgw_models_extra_tbl_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "extra_sqtl")
predixcan_en_dapgw_tbl_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "spredixcan_eqtl")
predixcan_en_dapgw_tbl_count_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "spredixcan_eqtl_count")
predixcan_en_dapgw_tbl_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "spredixcan_sqtl")
predixcan_en_dapgw_tbl_count_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "spredixcan_sqtl_count")
multixcan_en_dapgw_tbl_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "smultixcan_eqtl")
multixcan_en_dapgw_tbl_count_eqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "smultixcan_eqtl_count")
multixcan_en_dapgw_tbl_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "smultixcan_sqtl")
multixcan_en_dapgw_tbl_count_sqtl <- tableInfo("GTEx_V8_ENDAPGW_EUR_v1", "smultixcan_sqtl_count")
#CTIMP
prediction_ctimp_models_tbl_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "weights_eqtl")
prediction_ctimp_models_extra_tbl_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "extra_eqtl")
predixcan_ctimp_tbl_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "spredixcan_eqtl")
predixcan_ctimp_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "spredixcan_eqtl_count")
#CTIMP without palindromic
predixcan_ctimp_np_tbl_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "spredixcan_eqtl_np")
predixcan_ctimp_np_tbl_count_eqtl <- tableInfo("GTEx_V8_PF_CTIMP_EUR_v1", "spredixcan_eqtl_np_count")
# conditional analysis (LDACC)
CA_eqtl_tbl <- tableInfo("GTEx_V8_ConditionalAnalysis_2018_10_05", "eqtl_analysis")
CA_gwas_tbl <- tableInfo("GTEx_V8_ConditionalAnalysis_2018_10_05", "gwas_results")
CA_eqtl_and_gwas_tbl <- tableInfo("GTEx_V8_ConditionalAnalysis_2018_10_05", "gwas_and_eqtl")
# DAPG
#DAPG_eqtl_tbl <- tableInfo("GTEx_V8_DAPG_2018_10_05", "eqtl_analysis")
DAPG_eqtl_tbl <- tableInfo("GTEx_V8_DAPG_2018_10_05", "eqtl_analysis")
DAPG_gwas_tbl <- tableInfo("GTEx_V8_DAPG_2018_10_05", "gwas_results")
DAPG_eqtl_and_gwas_tbl <- tableInfo("GTEx_V8_DAPG_2018_10_05", "gwas_and_eqtl")
DAPG_eqtl_clusters <- tableInfo("GTEx_V8_DAPG_EUR_v1", "clusters_eqtl")
DAPG_sqtl_clusters <- tableInfo("GTEx_V8_DAPG", "clusters_sqtl")
# colocalization results
coloc_tbl_eqtl <- tableInfo("GTEx_V8_COLOC", "coloc_with_enloc_priors")
enloc_tbl_eqtl <- tableInfo("GTEx_V8_ENLOC", "enloc_all_results")
enloc_tbl_eqtl_eur <- tableInfo("GTEx_V8_ENLOC_v1", "enloc_eqtl_eur")
enloc_tbl_sqtl_eur <- tableInfo("GTEx_V8_ENLOC_v1", "enloc_sqtl_eur")
# annotations and other metadata
ensembl_collapsed_annotations_tbl <- tableInfo("annotations", "ensembl_collapsed")
gene_essentiality_annotation_tbl <- tableInfo("annotations", "human_gene_essentiality_scores")
gencode_all_annotation_tbl <- tableInfo("annotations", "gencode_v26_all")
gencode_annotation_tbl <- tableInfo("annotations", "gencode_v26")
intron_annotation_tbl <- tableInfo("annotations", "introns")
gtex_sample_size_tbl <- tableInfo("annotations", "sample_size")
intron_gene_mapping_tbl <- tableInfo("annotations", "intron_gene_map")
gwas_metadata_tbl <- tableInfo("GTEx_V8_metadata", "gwas_metadata")
trait_metadata_tbl <- tableInfo("GTEx_V8_metadata", "phenotype_classes_colors")
gtex_tissue_metadata_tbl <- tableInfo("GTEx_V8_metadata", "gtex_tissue_metadata")
# miscellaneous
ld_independent_regions_tbl <- tableInfo("miscellaneous", "ld_independent_regions")
ld_independent_regions_2_tbl <- tableInfo("annotations", "ld_independent_regions_2")
gwas_catalog_tbl <- tableInfo("miscellaneous", "gwas_catalog_v102")
|
75094346c0b6e26d55dd3280832c899c268772ec
|
61c091c21d06b7c61f35a24d4fe3d8882e9fb254
|
/man/PlotSexRatioStrata.fn.Rd
|
e06197b46ef1fb9516d5b286a55b8325d61aca50
|
[] |
no_license
|
pfmc-assessments/nwfscSurvey
|
b3be76b410bdc5dae168e84d2ee1a2c64c98e098
|
423800ecb91137cba1587ac19226a3ebb8d50c2d
|
refs/heads/main
| 2023-07-28T08:35:55.810331
| 2023-07-20T17:10:25
| 2023-07-20T18:17:33
| 26,344,817
| 4
| 2
| null | 2023-07-20T17:31:58
| 2014-11-08T00:38:17
|
R
|
UTF-8
|
R
| false
| true
| 1,228
|
rd
|
PlotSexRatioStrata.fn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotSexRatioStrata.fn.R
\name{PlotSexRatioStrata.fn}
\alias{PlotSexRatioStrata.fn}
\title{Function to plot sex ratio by strata}
\usage{
PlotSexRatioStrata.fn(
dir = NULL,
dat,
type = "length",
strat.vars = c("Depth_m", "Latitude_dd"),
strat.df = NULL,
circleSize = 0.05,
dopng = lifecycle::deprecated(),
...
)
}
\arguments{
\item{dir}{directory location for saving the png}
\item{dat}{data object}
\item{type}{length/age which data type to use}
\item{strat.vars}{the parameters to stratify the data}
\item{strat.df}{the created strata matrix with the calculated areas by the createStrataDF.fn function}
\item{circleSize}{circle size}
\item{dopng}{Deprecated with {nwfscSurvey} 2.1 because providing a non-NULL
value to \code{dir} can serve the same purpose as \code{dopng = TRUE} without the
potential for errors when \code{dopng = TRUE} and \code{dir = NULL}. Thus, users
no longer have to specify \code{dopng} to save the plot as a png.}
\item{...}{Additional arguments for the plots}
}
\description{
Function to plot sex ratio by strata
}
\seealso{
\code{\link{StrataFactors.fn}}
}
\author{
Allan Hicks and Chantel Wetzel
}
|
b502968b47730ed775715fc9add34975b30ec63c
|
404ad3e1c95f0b503d3ed259f3c364ec120d233d
|
/R/archive_extract.R
|
56127fa895b2dfe5bc079a9c4a122d62ad3d5983
|
[
"MIT"
] |
permissive
|
minghao2016/archive
|
e2b6f0743a06d4395fc63ba1483ddfd878abc2d2
|
2442b899881475e8d60fcbee27aeb187d835e7e5
|
refs/heads/master
| 2023-06-28T10:51:21.011216
| 2021-07-29T18:55:52
| 2021-07-29T18:55:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,688
|
r
|
archive_extract.R
|
#' Extract contents of an archive to a directory
#'
#' @inheritParams archive_read
#' @param files `character() || integer() || NULL` One or more files within the archive,
#' specified either by filename or by position.
#' @param dir `character(1)` Directory location to extract archive contents, will be created
#' if it does not exist.
#' @param strip_components Remove the specified number of leading path
#' elements. Pathnames with fewer elements will be silently skipped.
#' @details
#' If `files` is `NULL` (the default) all files will be extracted.
#' @returns An 'archive' object describing the archive (invisibly).
#' @examples
#' a <- system.file(package = "archive", "extdata", "data.zip")
#' d <- tempfile()
#'
#' # When called with default arguments extracts all files in the archive.
#' archive_extract(a, d)
#' list.files(d)
#' unlink(d)
#'
#' # Can also specify one or more files to extract
#' d <- tempfile()
#' archive_extract(a, d, c("iris.csv", "airquality.csv"))
#' list.files(d)
#' unlink(d)
#' @export
archive_extract <- function(archive, dir = ".", files = NULL, options = character(), strip_components = 0L) {
assert("`files` must be a character or numeric vector or `NULL`",
is.null(files) || is.numeric(files) || is.character(files))
if (!inherits(archive, "connection")) {
archive <- file(archive, "rb")
}
if (!isOpen(archive)) {
open(archive, "rb")
}
if (!identical(dir, ".")) {
if (!dir.exists(dir)) {
dir.create(dir)
}
old <- setwd(dir)
on.exit(setwd(old))
}
options <- validate_options(options)
archive_extract_(archive, files, as.integer(strip_components), options, sz = 2^14)
invisible()
}
|
d55c9c7fa667f291d3a60759f6d7b4ac1d4ca30a
|
018c421a17aecd4bf2c452bc88232b0ffb127e25
|
/scripts/1_data preparation.R
|
3b091d9693d76af4c1df8a9856c9b25cdb4e7fdc
|
[] |
no_license
|
samfranks/eu_meadow_birds
|
2dafb955c69a36e62c9243d1efe773529713ad43
|
9126946f27c381e0b8d2051872203ee9306a0fe9
|
refs/heads/master
| 2020-03-26T23:41:05.239515
| 2018-11-20T15:18:43
| 2018-11-20T15:18:43
| 145,559,672
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,085
|
r
|
1_data preparation.R
|
############################################################################################
#
# Step 1: EU meadow birds meta-analysis - DATA PREPARATION FROM EXTRACTED DATABASE
#
############################################################################################
# Samantha Franks
# 11 March 2016
# 22 Dec 2016
#================================= SET LOGIC STATEMENTS ====================
#================================= LOAD PACKAGES =================================
list.of.packages <- c("MASS","reshape","raster","sp","rgeos","rgdal","dplyr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, library, character.only=TRUE)
#================================= SET DIRECTORY STRUCTURE ================================
# LOCAL
if(.Platform$OS =='windows') {
cluster <- FALSE
Mac <- FALSE
}
# HPCBTO
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samf') {
cluster <- TRUE
Mac <- FALSE
Wales <- FALSE
}
# Mac
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samantha') {
cluster <- FALSE
Mac <- TRUE
Wales <- FALSE
}
#### SET DIRECTORY PATHS
# # Wales HPC cluster
# if (cluster) parentwd <- c("/home/samantha.franks/")
if (cluster) parentwd <- c("/users1/samf") # BTO cluster
if (!cluster) {
if (!Mac) parentwd <- c("C:/Users/samf/Documents/Git/eu_meadow_birds")
if (Mac) parentwd <- c("/Volumes/SAM250GB/BTO PC Documents/Git/eu_meadow_birds")
}
scriptswd <- paste(parentwd, "scripts", sep="/")
datawd <- paste(parentwd, "data", sep="/")
outputwd <- paste(parentwd, "output/revision Dec 2016", sep="/")
workspacewd <- paste(parentwd, "workspaces", sep="/")
options(digits=6)
#================================= LOAD & CLEAN DATA ===============================
# d0 <- read.csv(paste(datawd, "meadow birds data extraction template_final_primary.csv", sep="/"), header=TRUE, skip=1)
d0 <- read.csv(paste(datawd, "Meadow birds data extraction template_primary and grey_standardized_FINAL.csv", sep="/"), header=TRUE)
#------- Meta-data reference for studies -------------
# create a meta-data reference file for studies with reference numbers, reference name, summary, country, region
metadat0 <- unique(d0[,c("reference.number","reference","literature.type","one.sentence.summary","score","country","region1","region2")])
#------- Clean dataset -----------
# columns required
cols.required <- c("reference.number","record.number","literature.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","type.of.study","species","assemblage","agri.environment","basic.agri.environment", "targeted.agri.environment..wader.specific.or.higher.level.", "site.protection...nature.reserve","site.protection...designation", "mowing","grazing","fertilizer","herbicides...pesticides","nest.protection...agricultural.activities","nest.protection...predation..enclosures.or.exclosures.", "ground.water.management..drainage.inhibited.","wet.features...surface.water.management","predator.control","other.mgmt", "management.notes","overall.metric","specific.metric","reference.metric.before.management","metric.after.management","standardized.metric","standardisation.calculation","stand..reference.metric.before.management","stand..metric.after.management", "stand..effect.size","sample.size.before","sample.size.after", "uncertainty.measure.before","uncertainty.measure.after","uncertainty.measure.type","significant.effect..Y.N..U.","direction.of.effect..positive...negative...none...no.data.","unit.of.analysis","sample.size","analysis.type.1","analysis.type.2","analysis.type.details","values.obtained.from.plot.")
d0.1 <- subset(d0, select=cols.required)
# rename to easier variables
d0.2 <- d0.1
names(d0.2) <- c("reference","record","lit.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","study.type","species","assemblage","AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt","mgmt.notes","overall.metric","specific.metric","metric.before","metric.after","stan.metric","stan.calc","stan.metric.before","stan.metric.after","stan.effect.size","n.before","n.after","var.before","var.after","var.type","sig","effect.dir","analysis.unit","sample.size","analysis1","analysis2","analysis3","values.from.plot")
# management intervention variables
mgmtvars <- c("AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt")
### exlude studies 2 and 36
# 2: remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
# 36: remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- subset(d0.2, reference!=36) # remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
d0.2 <- subset(d0.2, reference!=2) # remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- droplevels(d0.2)
d0.3 <- d0.2
# recode certain factor variable classes to more sensible classes
recode.as.char <- c("region1","mgmt.notes","specific.metric","stan.metric","stan.calc","var.before","var.after","analysis3")
d0.3[,recode.as.char] <- apply(d0.3[,recode.as.char], 2, as.character)
d0.3$stan.effect.size <- as.numeric(as.character(d0.3$stan.effect.size))
# recode manamgement vars as characters to be able to use string substitution find and replace to create generic applied, restricted, removed levels for all management types
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, as.character)
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied site scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied landscape scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted site scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted landscape scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed site scale", "removed", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed landscape scale", "removed", x)
})
# plug 'none' into all the blanks where management intervention not used
for (i in 1:length(mgmtvars)) {
d0.3[d0.3[,mgmtvars[i]]=="",mgmtvars[i]] <- "none"
}
# recode sample size as small, medium, large
d0.3$sample.size <- ifelse(d0.3$sample.size=="small (< 30)", "small", ifelse(d0.3$sample.size=="medium (30-100)", "medium", "large"))
# redefine dataset
d0.4 <- d0.3
# # change management vars back to factors for analysis
# # d0.4[,mgmtvars] <- apply(d0.4[,mgmtvars], 2, function(x) as.factor(x)) # this line won't convert back to factors for some reason!
# for (i in 1:length(mgmtvars)) {
# d0.4[,mgmtvars[i]] <- as.factor(d0.4[,mgmtvars[i]])
# }
# summary(d0.4)
#---------- Add some additional grouping variables -----------
# group fertilizer and pesticides into single variable
d0.4$fertpest <- ifelse(d0.4$fertilizer=="applied" | d0.4$pesticide=="applied", "applied", ifelse(d0.4$fertilizer=="restricted" | d0.4$pesticide=="restricted", "restricted", ifelse(d0.4$fertilizer=="removed" | d0.4$pesticide=="removed", "removed", "none")))
# group groundwater.drainage and surface.water into single variable meaning 'more water'
# restricted/removed groundwater drainage equates to more water (same as applying surface water)
# combinations of drainage/surface water in dataset
unique(d0.4[,c("groundwater.drainage","surface.water")])
d0.4$water <- ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed" & d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed", "applied", ifelse(d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="applied","restricted","none"))))
# group nest protection (predation and agricultural) variables together
unique(d0.4[,c("nest.protect.ag","nest.protect.predation")])
d0.4$nest.protect <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$nest.protect.ag=="applied", "applied","none")
# # group nest protection (predation) with predator control (more sensible than grouping it with nest protection for agriculture given predation measures are more likely to go together)
# unique(d0.4[,c("nest.protect.ag","nest.protect.predation","predator.control")])
# d0.4$predation.reduction <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$predator.control=="applied", "applied", ifelse(d0.4$predator.control=="restricted", "restricted", ifelse(d0.4$predator.control=="removed", "removed","none")))
# group reserves and site designations
d0.4$reserve.desig <- ifelse(d0.4$reserve=="applied" | d0.4$designation=="applied", "applied", "none")
# create a AE-level variable (with basic and higher as levels) for analysis 1a
# if no info was provided on type of AES, then assume it was basic rather than higher-level or targetted
d0.4$AE.level <- ifelse(d0.4$higher.AE=="applied", "higher", ifelse(d0.4$AE=="none", "none", "basic"))
# calculate study duration variable
d0.4$study.length <- d0.4$end.year - d0.4$start.year + 1
# add some overall metrics which lump all productivity metrics, all abundance metrics, all occupancy metrics
d0.4$metric <- ifelse(grepl("productivity", d0.4$overall.metric), "productivity", ifelse(grepl("abundance", d0.4$overall.metric), "abundance", ifelse(grepl("recruitment", d0.4$overall.metric), "recruitment", ifelse(grepl("survival", d0.4$overall.metric), "survival", "occupancy"))))
#------------- Change the predator.control level for studies 5 & 10 ---------------
# these 2 studies both deal with the effects of a halt in predator control/game-keepering on grouse moors and the impacts on wader populations
# kind of a reverse of what the conservation measure would normally be (control applied), so reverse the level of predator control to 'applied' and change the direction of the effect (but obviously leave the significance)
# create 5 new records for these studies (2 and 3 each), then add them to the dataset WITH THEIR EFFECT SIZES REMOVED so there is no confusion
temp <- d0.4[d0.4$reference=="5" | d0.4$reference=="10",]
newtemp <- temp
# change predator control to applied
newtemp$predator.control <- "applied"
# change positives to negatives and vice versa
newtemp$effect.dir <- ifelse(newtemp$effect.dir=="positive","negative","positive")
newtemp$metric.before <- temp$metric.after
newtemp$metric.after <- temp$metric.before
newtemp$stan.metric.before <- temp$stan.metric.after
newtemp$stan.metric.after <- temp$stan.metric.before
newtemp$stan.effect.size <- (newtemp$stan.metric.after - newtemp$stan.metric.before)/abs(newtemp$stan.metric.before)
# remove the original records from the dataset and add these new ones in
d0.4 <- d0.4[-which(d0.4$reference %in% c("5","10")),]
d0.5 <- rbind(d0.4, newtemp)
#------------ Add the success/failure/outcome variables --------------
# success variable defined as 1 = significant positive effect, 0 = neutral or negative effect
d0.4$success <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, 0) # success variable
# failure variable defined as 1 = significant negative effect, 0 = neutral or positive effect
d0.4$failure <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", 1, 0) # failure variable
# outcome variable: -1 = significant negative, 0 = no effect, 1 = significant positive
d0.4$outcome <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", -1, 0)) # success variable) # success variable
#------------- Recode removed/restricted as single level=reduced --------------
# final dataset for analysis
d1 <- d0.4
# new set of management variables
mgmtvars <- c("AE","AE.level","reserve.desig","mowing","grazing","fertpest","nest.protect","predator.control","water")
# convert removed or restricted levels of the management vars (all but AE.level) to a single level = reduced
# use find and replace with gsub
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("removed", "reduced", x)
})
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("restricted", "reduced", x)
})
#------------- Definitive dataset --------------
### Save definitive dataset
saveRDS(d1, file=paste(workspacewd, "/revision Dec 2016/meadow birds analysis dataset_full.rds", sep="/"))
write.table(d1, file=paste(datawd, "meadow birds analysis dataset_full.txt", sep="/"), row.names=FALSE, quote=FALSE, sep="\t")
write.csv(d1, file=paste(datawd, "meadow birds analysis dataset_full.csv", sep="/"), row.names=FALSE)
|
b01e6ffc86e0c398857a6ff841d4dd2395d5942b
|
e1d4cceb6474ac0d600136ffb4fcbdad59762236
|
/man/get_superstructures.Rd
|
3577760157c9458a696260be98e7c433946eb7ca
|
[] |
no_license
|
sgrote/ABAEnrichment
|
64a3f24cdefa2b39d2c365e57ea83d81328146c6
|
6f9fb17b300e831900a252c78f3f3efa7b39582d
|
refs/heads/master
| 2022-01-05T16:55:38.319400
| 2019-07-14T14:14:59
| 2019-07-14T14:14:59
| 107,267,872
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,789
|
rd
|
get_superstructures.Rd
|
\name{get_superstructures}
\alias{get_superstructures}
\title{
Returns all superstructures of a brain region using the Allen Brain Atlas ontology
}
\description{
Returns all superstructures of a brain region and the brain region itself given a structure ID, e.g. 'Allen:10657' as used throughout the ABAEnrichment package.
The output vector contains the superstructures according to the hierarchy provided by the Allen Brain Atlas ontology [1,2] beginning with the root ('brain' or 'neural plate') and ending with the requested brain region.
}
\usage{
get_superstructures(structure_id)
}
\arguments{
\item{structure_id}{
a brain structure ID, e.g. 'Allen:10657' or '10657'
}
}
\value{
vector of brain structure IDs that contains all superstructures of the requested brain region and the brain region itself.
The order of the brain regions follows the hierarchical organization of the brain.
}
\references{
[1] Allen Institute for Brain Science. Allen Human Brain Atlas.
Available from: \url{http://human.brain-map.org/} \cr
[2] Allen Institute for Brain Science. BrainSpan Atlas of the Developing Human Brain.
Available from: \url{http://brainspan.org/}
}
\author{
Steffi Grote
}
\note{
The ontologies for the adult and the developing human brain are different.
}
\seealso{
\code{\link{get_name}}\cr
\code{\link{get_id}}\cr
\code{\link{get_sampled_substructures}}\cr
}
\examples{
## Get the IDs of the superstructures of the precentral gyrus
## (adult brain ontology)
get_superstructures('Allen:4010')
## Get the IDs and the names of the superstructures
## of the dorsolateral prefrontal cortex
## (developing brain ontology)
data.frame(superstructure=get_name(get_superstructures("Allen:10173")))
}
|
e4db41f98a4ea244f8d261db826d0660ac242c97
|
ae4ec2824fd201bb731e7b1379eadfc092d1ab25
|
/functions/sublist.R
|
bb00af65438e5f6bf9247ee16fe93f6f7baaa574
|
[] |
no_license
|
embubiz/getdataProject
|
24a87d14aa5b930465241e50f29d82b3e8288621
|
95a342896186d8c8c3538cfda515b7ea9107b1d7
|
refs/heads/master
| 2021-01-22T07:13:39.420373
| 2014-10-26T23:42:06
| 2014-10-26T23:42:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
sublist.R
|
sublist <- function( ) {
union( names( unlist( sapply( feat,
grep,
pattern = "mean()",
fixed = T,
value = F ) ) ),
names( unlist( sapply( feat,
grep,
pattern = "std()",
fixed = T,
value = F ) ) ) )
}
|
df3854b4c575154fd99b66274f3c7c2bc2fbd714
|
54cc718d91d90a151c94833be049b95198ee3556
|
/atlas/solve.R
|
678b35c7af8ec0a8ea218f20117a646cec6044b1
|
[
"Apache-2.0"
] |
permissive
|
rzo1/effective-debugging
|
1411d3e1030160e3d7fb74a3f5bcecdd8a7262c8
|
8a1faae5028d382a59b71073450a8634e10fc972
|
refs/heads/master
| 2023-03-22T21:49:49.511805
| 2019-01-01T22:57:46
| 2019-01-01T22:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
solve.R
|
#!/usr/bin/env Rscript
# Matrix size
n <- 10000
# Create a square matrix of random numbers
m <- replicate(n, rnorm(n))
# Calculate the matrix inverse
r <- solve(m)
|
f49420e80903bef468dbd57b371c5a0d36152734
|
d3da3172e2164d35f2c0841a98aed16dfebb6351
|
/Part4/RDExP4nalinkum.R
|
34215f55861416365dba2efa6dcf6cf31a1d4aa2
|
[] |
no_license
|
nalin-iitd/EDA-RealDirect
|
1c000c34411f9b4de4dbf3e44ec7e26da54dd90d
|
a96f2da493dbe425b5f9791c3ebed08b2fa1151e
|
refs/heads/master
| 2020-04-06T06:35:02.903834
| 2016-11-13T04:19:22
| 2016-11-13T04:19:22
| 73,594,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,899
|
r
|
RDExP4nalinkum.R
|
#install the required packages
install.packages("twitteR")
install.packages("wordcloud")
install.packages("tm")
install.packages("ggplot2")
install.packages("Rgraphviz")
#load the required libraries
library("twitteR")
library("wordcloud")
library("tm")
library("ggplot2")
library("Rgraphviz")
#perform real esate tweet analysis for Real Direct
#Read twitter data in json format from a file on disk and extract the text portion
json_data1 <- readLines('Mar1001.json', warn = FALSE)
json_df <- jsonlite::fromJSON(json_data1)
tweets_text <- json_df$text
#remove unnecessary garbage charcters from tweets text
rent_tweets_text <- iconv(tweets_text, 'UTF-8', 'ASCII')
#create a corpus using tm package and remove stopwords, punctuations, whitespace and other unnecessary charcters from tweets text
rent_clean_text <- Corpus(VectorSource(rent_tweets_text))
rent_clean_text <- tm_map(rent_clean_text, content_transformer(function (x , pattern ) gsub(pattern, " ", x)), "/")
rent_clean_text <- tm_map(rent_clean_text, content_transformer(function (x , pattern ) gsub(pattern, " ", x)), "@")
rent_clean_text <- tm_map(rent_clean_text, content_transformer(function (x , pattern ) gsub(pattern, " ", x)), "\\|")
rent_clean_text <- tm_map(rent_clean_text, removePunctuation)
rent_clean_text <- tm_map(rent_clean_text, content_transformer(tolower))
rent_clean_text <- tm_map(rent_clean_text, removeWords, stopwords("english"))
rent_clean_text <- tm_map(rent_clean_text, stripWhitespace)
rent_clean_text <- tm_map(rent_clean_text, removeWords, c("http","https", "tco"))
#construct a term document matrix and make data frame which consists of terms and their respective term frequencies
tdm <- TermDocumentMatrix(rent_clean_text)
tmatrix <- as.matrix(tdm)
term_freqs <- sort(rowSums(tmatrix),decreasing=TRUE)
term_freqs <- subset(term_freqs, term_freqs >= 100)
termfreq_df <- data.frame(term = names(term_freqs),freq=term_freqs)
termfreq_df[1:10, ]
#visualize term frequencies using a term freq ggplot and constructing a wordcloud
ggplot(termfreq_df, aes(x = term, y = freq)) + geom_bar(stat = "identity") + xlab("Terms") + ylab("Frequency") + coord_flip()
set.seed(1234)
wordcloud(words = termfreq_df$term, freq = termfreq_df$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
#use some functions such as findAssociations and findFreqTerms for analysis of most frequent terms
findAssocs(tdm, "rent", 0.2)
frequent_terms <- findFreqTerms(tdm, lowfreq = 25)
print(frequent_terms)
#construct a cluster dendrogram to perform a final correlation among most frequent terms at this stage
#remove sparse terms
tdm2 <- removeSparseTerms(tdm, sparse = 0.95)
tmatrix2 <- as.matrix(tdm2)
#cluster terms
distMatrix <- dist(scale(tmatrix2))
fit <- hclust(distMatrix, method = "ward.D2")
plot(fit)
rect.hclust(fit, k=6) #cut tree into 6 clusters
|
4ef53918b41303afa2073182951476e5713f1311
|
e725e28b66bf0a13793f172b51c0111a068bcfbe
|
/tests/testthat/test-generate_typos.R
|
efade47f7f2c3d77977535a950f38f2d75555c7e
|
[
"Apache-2.0"
] |
permissive
|
mikemahoney218/typogenerator
|
86aa47c3450654df6ef2de4ac2c44a227671c82b
|
23cf8a5ed05f10adf2029c4a6cb482a6f6e376c2
|
refs/heads/main
| 2023-03-18T09:23:43.327024
| 2021-03-03T00:12:48
| 2021-03-03T00:12:48
| 343,555,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 428
|
r
|
test-generate_typos.R
|
test_that("generate_typos is stable", {
expect_equal(
length(generate_typos("Mike", "*")[[1]]),
351
)
expect_equal(
generate_typos("Michael", "typo_repetition"),
typo_repetition("Michael")
)
expect_equal(
length(
generate_typos(
"Michael",
c("typo_addition", "typo_prefix"),
c(gh_allowed(), gh_allowed())
)[[1]]
),
length(gh_allowed()) * 2
)
})
|
7cf7a31b52cff1a5027d5f99d1be9de1767cb5fd
|
a2324da916cef0ea41d0c935640d5e4f551940da
|
/dofiles/analysis/tab2-read-by-age-sex.R
|
d85d62c1aa40d43f381bf4b82c97cc9662910c5c
|
[] |
no_license
|
hendersonad/2020_multimorbidity
|
d4339ef15e76b00f651968ef2dc212b8dc3eca89
|
32db3bab262fb55e3e2ac0eb615e415680faeb83
|
refs/heads/main
| 2023-04-09T17:40:25.511958
| 2022-11-09T10:45:04
| 2022-11-09T10:45:04
| 327,689,240
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,922
|
r
|
tab2-read-by-age-sex.R
|
library(arrow)
library(tidyr)
library(dplyr)
library(readr)
library(haven)
library(ggplot2)
library(stringr)
library(data.table)
library(here)
library(grid)
library(gridExtra)
source(here::here("mm-filepaths.R"))
theme_ali <- theme_bw() %+replace%
theme(legend.position = "top",
strip.background = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.y = element_text(hjust = 1, angle = 0),
axis.text.x = element_text(hjust = 1, angle = 0))
theme_ali_noFlip <- theme_bw() %+replace%
theme(legend.position = "top",
strip.background = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.minor.x = element_blank(),
axis.text.y = element_text(hjust = 0, angle = 0),
axis.text.x = element_text(angle=0, hjust = 0.5))
theme_set(theme_ali)
names <- read_csv(here("codelists/chapter_names.csv"))
asthma_CC <- read_parquet(paste0(datapath,"asthma_case_control_set.gz.parquet"))
eczema_CC <- read_parquet(paste0(datapath,"eczema_case_control_set.gz.parquet"))
## Slightly bodgy way of finding max and min event date
# readcodes <- write_parquet(readcodes, sink = paste0(datapath,"asthma_read_chapter.gz.parquet"))
# ## date range of events
# min(readcodes$eventdate, na.rm = T)
# max(readcodes$eventdate, na.rm = T)
# eventdates <- readcodes$eventdate
# eventdates[eventdates >= as.Date("2020-07-01") & !is.na(eventdates)] ## any non-NA eventdates that are greater than July 2020
#
# ## 1 event in 2052, assume it is 2012
# readcodes$eventdate[readcodes$eventdate >= as.Date("2020-07-01") & !is.na(readcodes$eventdate)] <- as.Date("2012-01-01")
#
# min(readcodes$eventdate, na.rm = T) #1916-08-01
# max(readcodes$eventdate, na.rm = T) #2020-06-26
# read summary by age/sex -------------------------------------------------
summ_read_agesex <- function(study = "asthma"){
study_info <- read_parquet(paste0(datapath,study,"_patient_info.gz.parquet"))
case_control <- read_parquet(paste0(datapath,study,"_case_control_set.gz.parquet"))
readcodes <- read_parquet(paste0(datapath,study,"_read_chapter.gz.parquet"))
cases <- case_control %>%
dplyr::select(caseid) %>%
distinct() %>%
mutate(exposed = 1)
controls <- case_control %>%
dplyr::select(contid) %>%
distinct() %>%
mutate(exposed = 0)
patid_CC <- study_info %>%
dplyr::select(patid, gender, realyob) %>%
left_join(cases, by = c("patid" = "caseid")) %>%
mutate(exp = replace_na(exposed, 0)) %>%
dplyr::select(-exposed) %>%
left_join(controls, by =c("patid" = "contid", "exp" = "exposed")) %>%
mutate_at(c("exp", "gender"), ~as.factor(.))
# summary stats -----------------------------------------------------------
n_summ <- patid_CC %>%
group_by(exp) %>%
summarise(n = n()) %>%
pivot_wider(names_from = exp, values_from = n) %>%
mutate(var = "TOTAL", control_pc = NA, case_pc = NA) %>%
dplyr::select(var, control_n = `0`, control_pc, case_n = `1`, case_pc)
age_summ <- patid_CC %>%
mutate(age = 2015-realyob) %>%
group_by(exp) %>%
summarise(med = median(age), sd = sd(age)) %>%
ungroup() %>%
pivot_wider(names_from = exp, values_from = c(med, sd)) %>%
mutate(var = "age") %>%
dplyr::select(var, control_n = med_0, control_pc = sd_0, case_n = med_1, case_pc = sd_1)
age_summ
agecut_summ <- patid_CC %>%
mutate(age = 2018-realyob,
agecut = cut(age, breaks = c(0,18,seq(20,100,20)))) %>%
group_by(exp, agecut) %>%
summarise(n = n()) %>%
ungroup() %>%
pivot_wider(id_cols = agecut, names_from = exp, values_from = n) %>%
rename(control_n = `0`, case_n = `1`) %>%
mutate(control_pc = (control_n / sum(control_n)),
case_pc = (case_n / sum(case_n))) %>%
dplyr::select(var = agecut, control_n, control_pc , case_n , case_pc )
agecut_summ
if(sum(agecut_summ$case_n) != n_summ$case_n){stop("Sum age groups don't match total")}
(t1 <- table(patid_CC$gender, patid_CC$exp))
(t1p <- prop.table(t1, margin=2))
sex_summ <- rbind(t1, t1p)
sex_summ_df <- as.data.frame(apply(sex_summ, 2, unlist))
sex_summ_df$gender <- rownames(sex_summ)
sex_summ_df <- sex_summ_df %>%
rename(control = `0`, case = `1`) %>%
mutate(var = c("n","n","pc","pc")) %>%
pivot_wider(id_cols = gender, names_from = var, values_from = c(case, control)) %>%
dplyr::select(var = gender, control_n , control_pc, case_n , case_pc)
if(sum(sex_summ_df$case_n) != n_summ$case_n){stop("Sum age groups don't match total")}
out1 <- bind_rows(
n_summ,
age_summ,
agecut_summ,
sex_summ_df
)
out1$gender = "All"
out1$age = NA
# READ chapter ------------------------------------------------------------
DT <- data.table(readcodes)
PTD <- data.table(patid_CC)
## summarise PATID
fullPTDcount <- PTD[, list(count = uniqueN(patid)), by=list(exp)]
## merge Read records with PATID info
fullDT <- merge(DT, PTD, by = "patid")
## get n records per patid per chapter
fullDT_nobreakdown <- fullDT
# set key
fullfirsteventDT <- fullDT_nobreakdown[ fullDT_nobreakdown[, .I[1] , by = list(patid,readchapter)]$V1]
## Count number of Read codes by chapter, exposure
fullreadcollapse <- fullfirsteventDT[, list(sum_read = .N), by=list(exp,readchapter)]
## merge in denominator
fullread <- merge(fullreadcollapse, fullPTDcount, by = c("exp"))
fullread[,pc_read:=sum_read/count]
fullread[,gender:="All"]
fullread[,age:=NA]
### Do the same at two age points
## count number of unique Patids with a record
agePTD <- PTD[,age:=2016-realyob]
agePTD <- agePTD[age<=23&age>=13 | age<=55&age>=45,] ## filter age = 18 or 50 \pm 5
agePTD[age<=23, age:=18]
agePTD[age>=45, age:=50]
agePTDcount <- agePTD[, list(count = uniqueN(patid)), by=list(exp,gender,age)]
# calculate age
fullDT <- fullDT[,age:=2016-realyob]
# filter age = 18 or 50 \pm 5
fullDT <- fullDT[age<=23&age>=13 | age<=55&age>=45,]
# replace age == 18 if 18\pm5 or 50 if age == 50\pm5
fullDT[age<=23, age:=18]
fullDT[age>=45, age:=50]
# set key
firsteventDT <- fullDT[ fullDT[, .I[1] , by = list(patid,readchapter)]$V1]
## Count number of Read codes by chapter, exposure, gender and age
readcollapse_agesex <- firsteventDT[, list(sum_read = .N), by=list(exp,readchapter,gender,age)]
## get denominator - total number of patids by group
#readdenomDT <- fullDT[,.(total = uniqueN(patid)), by=.(exp,gender,age)]
## total number of codes by gender age and exp
#chapter_sum <- fullDT[, .(total = sum(.N)), by=.(exp,gender,age)]
## merge in denominator by exposure, age and gender
read_agesex <- merge(readcollapse_agesex, agePTDcount, by = c("exp","gender","age"))
## calculate total percentage
read_agesex[,pc_read:=sum_read/count]
## bit of formatting of fullPTD and chapter_sum so we can bind_rows later
setnames(agePTDcount, "count", "sum_read")
agePTDcount[, readchapter:="_AgeCount"]
agePTDcount[, pc_read:=NA]
setnames(fullPTDcount, "count", "sum_read")
fullPTDcount[, readchapter:="_FullCount"]
fullPTDcount[, pc_read:=NA]
fullPTDcount[,gender:="All"]
fullPTDcount[,age:=NA]
#setnames(chapter_sum, "total", "sum_read")
#chapter_sum[, readchapter:="_Total"]
#chapter_sum[, pc_read:=NA]
DF <- tibble(fullread) %>%
bind_rows(read_agesex,fullPTDcount, agePTDcount) %>%
arrange(exp, gender, age, readchapter) %>%
mutate_at("exp", ~ifelse(.==0, "control", "case")) %>%
dplyr::select(-count) %>%
pivot_wider(names_from = exp, values_from = c(sum_read, pc_read))
#mutate_at(c("total_control", "total_case",
# "sum_read_control", "sum_read_case",
# "pc_read_control", "pc_read_case"), ~replace_na(., 0))
DF_out <- out1 %>%
dplyr::select(readchapter = var,
sum_read_control = control_n,
pc_read_control = control_pc,
sum_read_case = case_n,
pc_read_case = case_pc,
gender, age) %>%
bind_rows(DF) %>%
dplyr::select(var = readchapter,
gender, age,
control_n = sum_read_control, control_pc = pc_read_control,
case_n = sum_read_case, case_pc = pc_read_case) %>%
arrange(var, gender, age)
DF_out
}
# run table extract -------------------------------------------------------
asthma_tab2 <- summ_read_agesex("asthma")
eczema_tab2 <- summ_read_agesex("eczema")
asthma_out <- asthma_tab2 %>%
mutate(exposure = "Asthma")
eczema_out <- eczema_tab2 %>%
mutate(exposure = "Eczema")
DF_out <- bind_rows(
asthma_out,
eczema_out
) %>%
pivot_wider(names_from = exposure,
id_cols = c(var, gender, age),
values_from = c(control_n, control_pc, case_n, case_pc)) %>%
dplyr::select(var, gender, age, ends_with("Asthma"), ends_with("Eczema")) %>%
mutate_if(is.numeric, ~ifelse(.<1, signif(.,2), round(.,2))) %>%
mutate(asthma_dif = case_pc_Asthma-control_pc_Asthma,
eczema_dif = case_pc_Eczema-control_pc_Eczema)
DF_out_all <- DF_out
# AGE and Gender breakdown ------------------------------------------------
DF_out <- DF_out %>%
filter(!grepl("_",var)) %>%
filter(!is.na(age))
fig1 <- DF_out %>%
dplyr::select(var, gender, age, control_n_Asthma, control_n_Eczema,
case_n_Asthma, case_n_Eczema) %>%
pivot_longer(names_to = "cohort", cols = -c(var,gender,age)) %>%
separate(col = cohort, into = c("exp", "n", "condition"), sep = "_")
fig1 <- fig1 %>%
left_join(names, by = "var") %>%
mutate_at(c("exp", "condition"), ~stringr::str_to_title(.)) %>%
mutate(prettyval = prettyNum(value, big.mark = ",", scientific = F))
label <- paste0(unique(fig1$name), collapse = ", ")
write_lines(label, here::here("out/Fig2_caption.txt"))
plot2 <- ggplot(fig1, aes(x = reorder(name, -value), y = value, colour = exp, fill = exp, group = exp)) +
geom_col(data = filter(fig1, !grepl("_", var)), position = position_dodge(), alpha = 0.2) +
#geom_text(data = filter(fig1, grepl("_Count", var) & exp == "Case"), aes(x = "U", y = 5e5, label = prettyNum(value, big.mark = ",", scientific = F)), hjust = 0.5) +
#geom_text(data = filter(fig1, grepl("_Total", var) & exp == "Case"), aes(x = "T", y = 5e5, label = prettyNum(value, big.mark = ",", scientific = F)), hjust = 0.5) +
#geom_text(data = filter(fig1, grepl("_Count", var) & exp == "Control"), aes(x = "S", y = 5e5, label = prettyNum(value, big.mark = ",", scientific = F)), hjust = 0.5) +
#geom_text(data = filter(fig1, grepl("_Total", var) & exp == "Control"), aes(x = "R", y = 5e5, label = prettyNum(value, big.mark = ",", scientific = F)), hjust = 0.5) +
facet_grid(cols = vars(gender, age),
rows = vars(condition),
scales = "fixed") +
labs(x = "Read Chapter", y = "No. of primary care records", colour = "Exposed", fill = "Exposed") +
coord_flip() +
scale_fill_manual(values = c("Control" = "tomato", "Case" = "darkblue")) +
scale_colour_manual(values = c("Control" = "tomato", "Case" = "darkblue"))
plot2
dev.copy(pdf, here::here("out/Fig2.pdf"), width = 10, height = 11)
dev.off()
fig2 <- DF_out %>%
dplyr::select(var, gender, age, control_pc_Asthma, control_pc_Eczema,
case_pc_Asthma, case_pc_Eczema) %>%
pivot_longer(names_to = "cohort", cols = -c(var,gender,age)) %>%
separate(col = cohort, into = c("exp", "n", "condition"), sep = "_")
fig2 <- fig2 %>%
left_join(names, by = "var") %>%
mutate_at(c("exp", "condition"), ~stringr::str_to_title(.))
plot2_pc <- ggplot(fig2, aes(x = reorder(name, -value), y = value*100, colour = exp, fill = exp, group = exp)) +
geom_col(data = filter(fig2, !grepl("_", var)), position = position_dodge(), alpha = 0.2) +
facet_grid(cols = vars(gender, age),
rows = vars(condition)) +
labs(x = "Read Chapter", y = "Percentage of all primary care records by Read chapter", colour = "Exposed", fill = "Exposed") +
scale_fill_manual(values = c("Control" = "tomato", "Case" = "darkblue")) +
scale_colour_manual(values = c("Control" = "tomato", "Case" = "darkblue")) +
coord_flip() +
theme(text = element_text(size = 12),
strip.text.y = element_text(angle = 0))
plot2_pc
dev.copy(pdf, here::here("out/Fig2_pc.pdf"), width = 10, height = 11)
dev.off()
# Plot chart and table into one object
SummaryTable_age <- DF_out_all %>%
#filter(grepl("_", var)) %>%
filter(var == "_AgeCount") %>%
dplyr::select(!contains("pc")) %>%
dplyr::select(!contains("dif")) %>%
pivot_longer(cols = -c(var, gender, age)) %>%
tidyr::separate(name, into=paste("V",1:3,sep="_")) %>%
rename(exp = V_1, name = V_3) %>%
dplyr::select(-V_2) %>%
mutate_at("value", ~prettyNum(., big.mark = ",", scientific = F)) %>%
pivot_wider(id_cols = c("var", "name", "exp"),
names_from = c("gender", "age"),
names_glue = "{gender} ({age})",
values_from = value) %>%
arrange(name, var, exp) %>%
mutate_at("var", ~str_remove(., "_")) %>%
mutate_at("var", ~ifelse(.=="AgeCount", "No. patients", "All records")) %>%
mutate_at("exp", ~str_to_title(.)) %>%
#mutate_at(vars("name", "var") , ~ifelse(duplicated(.),"",.)) %>%
dplyr::select(Condition = name, Variable = var,Exposed = exp, everything())
# Set theme to allow for plotmath expressions
tt <- ttheme_minimal(core = list(fg_params = list(hjust = 0,
x = 0.1,
fontsize = 9)),
colhead=list(fg_params = list(hjust = 0,
x = 0.1,
fontsize = 10,
fontface = "bold",
parse=TRUE)))
SummaryTable_age[duplicated(SummaryTable_age[, c('Condition', 'Variable')]),
c('Condition', 'Variable')] <- ""
SummaryTable_age[duplicated(SummaryTable_age[,'Condition']), 'Condition'] <- ""
SummaryTable_age
tbl <- tableGrob(SummaryTable_age, rows=NULL, theme=tt)
pdf(here::here("out/Fig2_table.pdf"), width = 10, height = 10)
grid.arrange(plot2, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
pdf(here::here("out/Fig2_table_pc.pdf"), width = 10, height = 10)
grid.arrange(plot2_pc, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
## output as png for google docs purposes
png(here::here("out/Fig2_table_pc.png"), width = 600, height = 650)
grid.arrange(plot2_pc, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
# who is in both? ----------------------------------------------------------
asthma_cases <- asthma_CC %>%
dplyr::select(caseid) %>%
distinct() %>%
mutate(cohort = "asthma")
asthma_conts <- asthma_CC %>%
dplyr::select(contid) %>%
distinct() %>%
mutate(cohort = "asthma")
eczema_cases <- eczema_CC %>%
dplyr::select(caseid) %>%
distinct() %>%
mutate(cohort = "eczema")
eczema_conts <- eczema_CC %>%
dplyr::select(contid) %>%
distinct() %>%
mutate(cohort = "eczema")
head(eczema_cases); head(asthma_cases)
both_surveys <- full_join(eczema_cases, asthma_cases, by = c("caseid")) %>%
arrange(caseid)
summ_cohorts <- both_surveys %>%
count(cohort.x, cohort.y) %>%
mutate(name = ifelse(!is.na(cohort.x) & !is.na(cohort.y), "Both",
ifelse(!is.na(cohort.x) & is.na(cohort.y), "Eczema only",
ifelse(is.na(cohort.x) & !is.na(cohort.y), "Asthma only",NA)))) %>%
dplyr::select(-starts_with("cohort"))
summ_cohorts
summ_full_case <- summ_cohorts %>%
bind_rows(
filter(summ_cohorts, name == "Both")
) %>%
mutate(id = c(0,0,1,1)) %>%
pivot_wider(values_from = n, names_from = id) %>%
rename(eczema_n = `0`, asthma_n = `1`) %>%
mutate(eczema_pc = eczema_n / sum(eczema_n, na.rm = T),
asthma_pc = asthma_n / sum(asthma_n, na.rm = T)) %>%
dplyr::select(var = name, eczema_n, eczema_pc , asthma_n , asthma_pc)
summ_full_case
both_controls <- full_join(eczema_conts, asthma_conts, by = c("contid")) %>%
arrange(contid)
summ_controls <- both_controls %>%
count(cohort.x, cohort.y) %>%
mutate(name = ifelse(!is.na(cohort.x) & !is.na(cohort.y), "Both",
ifelse(!is.na(cohort.x) & is.na(cohort.y), "Eczema only",
ifelse(is.na(cohort.x) & !is.na(cohort.y), "Asthma only",NA)))) %>%
dplyr::select(-starts_with("cohort"))
summ_full_cont <- summ_controls %>%
bind_rows(
filter(summ_controls, name == "Both")
) %>%
mutate(id = c(0,0,1,1)) %>%
pivot_wider(values_from = n, names_from = id) %>%
rename(eczema_n = `0`, asthma_n = `1`) %>%
mutate(eczema_pc = eczema_n / sum(eczema_n, na.rm = T),
asthma_pc = asthma_n / sum(asthma_n, na.rm = T)) %>%
dplyr::select(var = name, eczema_n, eczema_pc , asthma_n , asthma_pc)
summ_full_cont
# full table 1 -------------------------------------------------------------
new_names <- c("var", "AcontN", "AcontPC", "AcaseN", "AcasePC", "EcontN", "EcontPC", "EcaseN", "EcasePC")
tab1 <- DF_out_all %>%
dplyr::select(!contains("dif")) %>%
filter(is.na(age)) %>%
dplyr::select(-gender, -age)
names(tab1) <- new_names
tab1_sd <- tab1 %>%
filter(var %in% c("_FullCount", "age")) %>%
mutate(
Ecase = paste0(prettyNum(EcaseN, big.mark = ",", scientific = F), " (", signif(EcasePC,3),")"),
Econt = paste0(prettyNum(EcontN, big.mark = ",", scientific = F), " (", signif(EcontPC,3),")"),
Acase = paste0(prettyNum(AcaseN, big.mark = ",", scientific = F), " (", signif(AcasePC,3),")"),
Acont = paste0(prettyNum(AcontN, big.mark = ",", scientific = F), " (", signif(AcontPC,3),")")
)
tab1_prop <- tab1 %>%
filter(!var %in% c("_FullCount","TOTAL","age")) %>%
mutate(
Ecase = paste0(prettyNum(EcaseN, big.mark = ",", scientific = F), " (", signif(EcasePC*100,3),")"),
Econt = paste0(prettyNum(EcontN, big.mark = ",", scientific = F), " (", signif(EcontPC*100,3),")"),
Acase = paste0(prettyNum(AcaseN, big.mark = ",", scientific = F), " (", signif(AcasePC*100,3),")"),
Acont = paste0(prettyNum(AcontN, big.mark = ",", scientific = F), " (", signif(AcontPC*100,3),")")
)
tab1 <- tab1_sd %>%
bind_rows(tab1_prop) %>%
filter(!var %in% c("W", "V")) %>%
left_join(names, by = c("var")) %>%
mutate_at("name", ~ifelse(is.na(.), var, .)) %>%
dplyr::select(-var, var = name)
summ_full_case <- summ_full_case %>%
rename(AcaseN = asthma_n, AcasePC = asthma_pc,
EcaseN = eczema_n, EcasePC = eczema_pc)
summ_full_cont <- summ_full_cont %>%
rename(AcontN = asthma_n, AcontPC = asthma_pc,
EcontN = eczema_n, EcontPC = eczema_pc)
summ_full2 <- summ_full_case %>%
bind_cols(dplyr::select(summ_full_cont, -var)) %>%
mutate(arrange = c("both", "one", "one")) %>%
group_by(arrange) %>%
summarise_all(~max(., na.rm = T)) %>%
dplyr::select(-var) %>%
rename(var = arrange) %>%
mutate(
Ecase = paste0(prettyNum(EcaseN, big.mark = ",", scientific = F), " (", signif(EcasePC*100,3),")"),
Econt = paste0(prettyNum(EcontN, big.mark = ",", scientific = F), " (", signif(EcontPC*100,3),")"),
Acase = paste0(prettyNum(AcaseN, big.mark = ",", scientific = F), " (", signif(AcasePC*100,3),")"),
Acont = paste0(prettyNum(AcontN, big.mark = ",", scientific = F), " (", signif(AcontPC*100,3),")")
)
blank_rows <- slice(tab1, 1:3) %>%
mutate_all(~NA)
tab1_out <- bind_rows(tab1, summ_full2, blank_rows) %>%
dplyr::select("var", "Ecase", "Econt", "Acase", "Acont") %>%
mutate(order = c(1, ## total
5:10, ## age groups
16:21, ## first 6 chapters
13, ## female
22:27, ## next 6 chapters
14, ## male
28:34, ## last chapters
11, ## NA age group
2:4, ## both cohorts
12,15 ## NA rows
)) %>%
arrange(order) %>%
dplyr::select(-order)
write.csv(tab1_out, here::here("out/table1_v2.csv"))
# Read chapter bar charts -------------------------------------------------
theme_set(theme_ali_noFlip)
figure_df <- DF_out_all %>%
filter(is.na(age),
!is.na(var),
!var %in% c("Female", "Male", "_FullCount", "age", "TOTAL"),
!grepl("[0-9]]", var))
fig1 <- figure_df %>%
dplyr::select(var, control_n_Asthma, control_n_Eczema,
case_n_Asthma, case_n_Eczema) %>%
pivot_longer(names_to = "cohort", cols = -var) %>%
separate(col = cohort, into = c("exp", "n", "condition"), sep = "_")
fig1 <- fig1 %>%
left_join(names, by = "var") %>%
mutate_at(c("exp", "condition"), ~stringr::str_to_title(.))%>%
filter(!is.na(name))
plot1_n_full <- ggplot(fig1, aes(x = reorder(name, -value), y = value, colour = exp, fill = exp, group = exp)) +
geom_col(position = position_dodge(), alpha = 0.2) +
facet_wrap(~condition) +
labs(x = "Read Chapter", y = "No. of primary care records", colour = "Exposed", fill = "Exposed") +
coord_flip() +
scale_fill_manual(values = c("Control" = "tomato", "Case" = "darkblue")) +
scale_colour_manual(values = c("Control" = "tomato", "Case" = "darkblue"))
plot1_n_full
dev.copy(pdf, here::here("out/Fig1.pdf"), width = 8, height = 5)
dev.off()
fig2 <- figure_df %>%
dplyr::select(var, control_pc_Asthma, control_pc_Eczema,
case_pc_Asthma, case_pc_Eczema) %>%
pivot_longer(names_to = "cohort", cols = -var) %>%
separate(col = cohort, into = c("exp", "n", "condition"), sep = "_")
fig2 <- fig2 %>%
left_join(names, by = "var") %>%
mutate_at(c("exp", "condition"), ~stringr::str_to_title(.)) %>%
filter(!is.na(name))
plot1_pc_full <- ggplot(fig2, aes(x = reorder(name, -value), y = value, colour = exp, fill = exp, group = exp)) +
geom_col(position = position_dodge(), alpha = 0.2) +
facet_wrap(~condition) +
labs(x = "Read Chapter", y = "Percentage of all primary care records by Read chapter", colour = "Exposed", fill = "Exposed") +
coord_flip() +
scale_fill_manual(values = c("Control" = "tomato", "Case" = "darkblue")) +
scale_colour_manual(values = c("Control" = "tomato", "Case" = "darkblue"))
plot1_pc_full
dev.copy(pdf, here::here("out/Fig1_pc.pdf"), width = 8, height = 5)
dev.off()
# Plot chart and table into one object
SummaryTable <- DF_out_all %>%
filter(var == "_FullCount") %>%
dplyr::select(!contains("pc")) %>%
dplyr::select(!contains("dif")) %>%
dplyr::select(-gender, -age) %>%
pivot_longer(cols = -c(var)) %>%
tidyr::separate(name, into=paste("V",1:3,sep="_")) %>%
rename(exp = V_1, name = V_3) %>%
dplyr::select(-V_2) %>%
mutate_at("value", ~prettyNum(., big.mark = ",", scientific = F)) %>%
pivot_wider(id_cols = c(var, exp),
names_from = c(name),
values_from = value) %>%
mutate_at("var", ~"N") %>%
mutate_at("exp", ~str_to_title(.)) %>%
dplyr::select(Variable = var, Exposed = exp, everything())
# Set theme to allow for plotmath expressions
tt <- ttheme_minimal(core = list(fg_params = list(hjust = 0,
x = 0.1,
fontsize = 9)),
colhead=list(fg_params = list(hjust = 0,
x = 0.1,
fontsize = 10,
fontface = "bold",
parse=TRUE)))
SummaryTable[duplicated(SummaryTable[, c('Variable')]),
c('Variable')] <- ""
SummaryTable
tbl <- tableGrob(SummaryTable, rows=NULL, theme=tt)
pdf(here::here("out/Supp_barchart_full.pdf"), width = 6, height = 6)
grid.arrange(plot1_n_full, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
pdf(here::here("out/Supp_barchart_full_pc.pdf"), width = 6, height = 6)
grid.arrange(plot1_pc_full, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
png(here::here("out/Supp_barchart_full_pc.png"), width = 450, height = 480)
grid.arrange(plot1_pc_full, tbl,
nrow=2,
as.table=TRUE,
heights=c(5,1))
dev.off()
lay <- rbind(
c(1,1,1),
c(1,1,1),
c(1,1,1),
c(2,2,2),
c(2,2,2),
c(2,2,2),
c(3,3,3)
)
pdf(here::here("out/Supp_barchart_full_both.pdf"), width = 8, height = 8)
grid.arrange(plot1_n_full,plot1_pc_full, tbl,
nrow=2,
as.table=TRUE,
#heights=c(5,1),
layout_matrix = lay)
dev.off()
|
91c09773a46b749bd4e11b5c0490c37f0204b584
|
d6cf97021abf02e87f87ce7f2509c516d7474950
|
/man/sbc_compare_df_var_perc.Rd
|
d1c8e3c940f698fdf3420ffd0a68ffa0ec07c188
|
[] |
no_license
|
mygeorgyboy/SbcRepTables
|
89ad9cef6a736db670a1de881fff17570e330dc6
|
ced4ac9dc889e43d5d94df39c3917516eac604dc
|
refs/heads/main
| 2023-03-17T01:55:58.467512
| 2023-02-13T22:13:56
| 2023-02-13T22:13:56
| 142,940,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 622
|
rd
|
sbc_compare_df_var_perc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sbc_functions.R
\name{sbc_compare_df_var_perc}
\alias{sbc_compare_df_var_perc}
\title{Compare one column on different data frames}
\usage{
sbc_compare_df_var_perc(
x,
var_name,
transpos_ind = F,
incidence_var = NULL,
showCount = F
)
}
\arguments{
\item{x}{A list contaning the dataframes, names are used as name columns.}
\item{var_name}{variable to be compared}
\item{incidence_var}{if not null is used to calculate incedence (divide by length(unique(incidence_var)))}
}
\description{
Used to compare several years of a variable
}
|
c67a6ff2d73a323713c40d7231c8578094f3e34a
|
9231e6508f109c8be584041c5678247d9a90daa9
|
/R/rmf-create-dis.R
|
502da65fe6d38fd6ef06b317c144c5081771f2a6
|
[] |
no_license
|
CasillasMX/RMODFLOW
|
ba6c030d2c6c259d3d7aecddec9572e4c57c424e
|
94055e120da756b4ab66c64a257774a8a7d08f09
|
refs/heads/master
| 2022-05-21T10:25:04.130795
| 2022-05-04T09:12:37
| 2022-05-04T09:12:37
| 174,107,033
| 0
| 0
| null | 2019-03-06T08:48:35
| 2019-03-06T08:48:35
| null |
UTF-8
|
R
| false
| false
| 3,085
|
r
|
rmf-create-dis.R
|
#' Create an \code{RMODFLOW} dis object
#'
#' \code{rmf_create_dis} creates an \code{RMODFLOW} dis object.
#'
#' @param nlay number of layers; defaults to 3
#' @param nrow number of rows; defaults to 10
#' @param ncol number of columns; defaults to 10
#' @param nper number of stress periods; defaults to 1
#' @param itmuni time unit; defaults to 1 (seconds)
#' @param lenuni length unit; defaults to 2 (metres)
#' @param laycbd vector of quasi-3D confining bed flags; defaults to 0 for all layers
#' @param delr vector of cell widths along rows; defaults to 100 for all columns
#' @param delc vector of cell widths along columns; defaults to 100 for all rows
#' @param top matrix with the top elevation of layer 1; defaults to 0 for all nrow x ncol cells
#' @param botm 3D array with the bottom elevations of all layers; defaults to nlay layers, equally spaced between 0 (top first layer) and -100 (bottom last layer)
#' @param perlen vector of stress period lengths
#' @param nstp vector of stress period time steps
#' @param tsmult vector of successive time step length multipliers
#' @param sstr character vector with steady state ('SS') or transient ('TS') stress period indicator
#' @return Object of class dis
#' @export
#' @seealso \code{\link{rmf_read_dis}}, \code{\link{rmf_write_dis}} and \url{http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?dis.htm}
rmf_create_dis <- function(nlay = 3,
nrow = 10,
ncol = 10,
nper = 1,
itmuni = 1,
lenuni = 2,
laycbd = rep(0, nlay),
delr = rep(100, ncol),
delc = rep(100, nrow),
top = matrix(0, nrow = nrow, ncol = ncol),
botm = array(rep(seq(0,-10 * nlay,length = nlay + 1)[2:(nlay + 1)], each = nrow * ncol), dim = c(nrow, ncol, nlay)),
perlen = rep(1, nper),
nstp = rep(1, nper),
tsmult = rep(1, nper),
sstr = c('SS', rep('TS', nper - 1))) {
dis <- NULL
# data set 0
# to provide comments, use ?comment on the resulting dis object
# data set 1
dis$nlay <- nlay
dis$nrow <- nrow
dis$ncol <- ncol
dis$nper <- nper
dis$itmuni <- itmuni
dis$lenuni <- lenuni
# data set 2
dis$laycbd <- laycbd
# data set 3
dis$delr <- delr
# data set 4
dis$delc <- delc
# data set 5
dis$top <- top
class(dis$top) <- 'rmf_2d_array'
# data set 6
dis$botm <- botm
class(dis$botm) <- 'rmf_3d_array'
# data set 7
dis$perlen <- perlen
dis$nstp <- nstp
dis$tsmult <- tsmult
dis$sstr <- sstr
#comment(dis) <- comments
class(dis) <- c('dis','rmf_package')
return(dis)
}
#' @describeIn rmf_create_dis Deprecated function name
#' @export
create_dis <- function(...) {
.Deprecated(new = "rmf_create_dis", old = "create_dis")
rmf_create_dis(...)
}
|
800cfa472ec2a0ca429d64904526688d17c3d0bf
|
b08b7e3160ae9947b6046123acad8f59152375c3
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/extend-your-language-3.r
|
f275a8eea13e7adde4b8daec612a89a8aceb825f
|
[] |
no_license
|
dlaststark/machine-learning-projects
|
efb0a28c664419275e87eb612c89054164fe1eb0
|
eaa0c96d4d1c15934d63035b837636a6d11736e3
|
refs/heads/master
| 2022-12-06T08:36:09.867677
| 2022-11-20T13:17:25
| 2022-11-20T13:17:25
| 246,379,103
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 282
|
r
|
extend-your-language-3.r
|
if2 <- function(condition1, condition2, expr_list = NULL)
{
cl <- as.call(expr_list)
cl_name <- if(condition1)
{
if(condition2) "" else "else1"
} else if(condition2) "else2" else "else"
if(!nzchar(cl_name)) cl_name <- which(!nzchar(names(cl)))
eval(cl[[cl_name]])
}
|
67bf082d722f26fb321ecbb1294093705696cff3
|
a58436d809eb1715d6d1b35ca1ba2564ce6082c0
|
/Practical Machine Learning/quiz3.R
|
73addf5269778dd073a18fbb3d95d7fefe692ccb
|
[] |
no_license
|
donelianc/coursera-data-science
|
7801066849b7cf16f96d554139e07c70433228f7
|
62cb135167e9d67ffa900c1c6af15cf7bc814365
|
refs/heads/master
| 2022-09-08T17:34:40.703864
| 2020-06-02T00:33:20
| 2020-06-02T00:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,289
|
r
|
quiz3.R
|
# Ian Castillo Rosales
# Quizz 3 - Practical Machine Learning
# 19/01/2015
library(caret)
library(rattle)
library(AppliedPredictiveModeling)
# ===== Question 1 =====
data(segmentationOriginal)
data <- data.frame(segmentationOriginal)
trainIndex <- data$Case == levels(data$Case)[2]
trainIndex
training <- data[trainIndex, ]
testing <- data[trainIndex == F, ]
set.seed(125)
fitModel <- train(Class ~ ., method = "rpart", data = training)
fancyRpartPlot(fitModel$finalModel)
# a. PS
# b. WS
# c. PS
# d. Not possible to predict
# ===== Question 2 =====
# The bias is larger and the variance is smaller.
# Under leave one out cross validation K is equal to the sample size.
# ===== Question 3 =====
set.seed(123)
library(pgmm)
data(olive)
olive <- olive[,-1]
trainIndex <- createDataPartition(olive$Area, p = 0.7, list = F)
train <- olive[trainIndex, ]
test <- olive[-trainIndex, ]
getModelInfo()
treeModel <- train(Area ~ ., method = "rpart", data = train)
newdata = as.data.frame(t(colMeans(olive)))
fancyRpartPlot(treeModel$finalModel)
# 2.875. It is strange because Area should be a qualitative variable -
# but tree is reporting the average value of Area as a numeric variable
# in the leaf predicted for newdata
# ===== Question 4 =====
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
set.seed(13234)
fitModel <- train(chd ~ age + alcohol + obesity + tobacco + typea + ldl,
method = "glm", family = "binomial", data = trainSA)
missClass = function(values,prediction){sum(((prediction > 0.5)*1) != values)/length(values)}
# Test Set
values <- testSA$chd
predictions <- predict(fitModel, testSA)
missClass(values, predictions)
# Train Set
values <- trainSA$chd
predictions <- predict(fitModel, trainSA)
missClass(values, predictions)
# Test Set Misclassification: 0.31
# Training Set: 0.27
# ===== Question 5 =====
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
set.seed(33833)
rdmforest <- randomForest(y ~ ., data = vowel.train, importance = FALSE)
order(varImp(rdmforest))
# x.2, x.1, x.5, x.6, x.8, x.4, x.9, x.3, x.7,x.10
|
f7510579e8c20d03d0060076935d9c4847be3da3
|
8c775ab452277ecfb111b1e4bce1af6f65b5508c
|
/plot1.R
|
654278f76ab617d9f8860bda0c4ef4fb07a79051
|
[] |
no_license
|
dchow2201/ExData_Plotting1
|
cc6af2a85d4ee2d6f78663fdc2d95764bfded510
|
0e3f9b2e4323cfb798117d512926b150ac476ce2
|
refs/heads/master
| 2021-01-18T05:20:26.182278
| 2014-05-11T20:04:16
| 2014-05-11T20:04:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
plot1.R
|
plot1=function(){
setwd("C:/Users/David/Dropbox/Coursea/Exploratory Data Analysis")
#read data set as data table, sep by ;, missing values are coded as ?, header is included
DT=fread("household_power_consumption.txt",sep=";",header=T,
na.strings="?")
DT=DT[DT$Date=="1/2/2007"|DT$Date=="2/2/2007",]
#create histogram
png("Plot1.png",width=480,height=480)
hist(as.numeric(DT$Global_active_power),
main= "Global Active Power",
xlab="Global Active power (kilowatts)",
col="red")
dev.off()
}
|
bb0002686cd360eed21486cb003a8374c706fd16
|
03c1325893b502b7855f83287e02e7f14af4f1c7
|
/projects/R/chapter7/fooC2.R
|
92d386410a3f5bd68e24c9270f8d231cc5528ebb
|
[] |
no_license
|
elgeish/Computing-with-Data
|
8562a15a74df6f379296b84e393a358eebf3d3fc
|
5547dc28c027e023783238be78eab216ec5204f4
|
refs/heads/master
| 2023-07-29T06:00:26.625191
| 2023-07-16T00:32:38
| 2023-07-16T00:32:38
| 145,339,359
| 15
| 24
| null | 2023-07-16T00:32:40
| 2018-08-19T21:38:09
|
Java
|
UTF-8
|
R
| false
| false
| 133
|
r
|
fooC2.R
|
dyn.load("chapter7/fooC2.so") # load the compiled C code
a = seq(0, 1, length = 10)
b = seq(0, 1, length = 10)
.Call("fooC2", a, b)
|
6da21970af0359a49431f3ea97037f4d25ebb0de
|
979fd9ba2ebd923cc5a8e84810081da7dfc8e9e6
|
/Lab9/dotchart.r
|
e23b66128317eea29d2366cfa43d09faa2616e67
|
[] |
no_license
|
shivam-raj-4/DSR-LAB
|
b3198b06701905dbd0ccff06f7ef48cfdf1a0a18
|
d6fd8fa23f1044bbfee0552c97eb2d5a6078ed9f
|
refs/heads/master
| 2023-01-30T05:59:54.323531
| 2020-12-12T18:27:11
| 2020-12-12T18:27:11
| 299,533,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 92
|
r
|
dotchart.r
|
mtcars
dotchart(mtcars$mpg,labels=row.names(mtcars),main="Miles/gallon",cex=0.6,xlab="mpg")
|
faa8201a86eddb6c5330e3065877c2eb1e32b508
|
34289a04a4dd4088079d4598faee0d3d4e41fea0
|
/Script/2-2-Tanimoto_accuracy.r
|
5da83a3a3135b49f4cf89eee4cbc06e6dac4e0db
|
[
"MIT"
] |
permissive
|
david-beauchesne/Predict_interactions
|
6034004897860ced2ed47d4ba503330a5374a7b0
|
bcddde0b04325a7c8a64467d4adcf8f13d7208c5
|
refs/heads/master
| 2020-05-21T20:44:17.918693
| 2018-06-27T18:40:12
| 2018-06-27T18:40:12
| 65,501,383
| 7
| 1
| null | 2016-08-12T14:35:17
| 2016-08-11T21:03:01
|
R
|
UTF-8
|
R
| false
| false
| 3,609
|
r
|
2-2-Tanimoto_accuracy.r
|
# -----------------------------------------------------------------------------
# PROJECT:
# Evaluating the structure of the communities of the estuary
# and gulf of St.Lawrence
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# STEP:
# 2. Evaluation of analysis accuracy + tables and figures
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# FILES:
# Script <- file = 'Script/2-2-Tanimoto_accuracy.r'
# RData <- file = 'RData/Tanimoto_accuracy.RData'
# Figures <- file = ''
# Tables <- file = ''
# -----------------------------------------------------------------------------
load("./RData/Tanimoto_analysis.RData")
Tanimoto_accuracy <- tanimoto_accuracy(Tanimoto_analysis = Tanimoto_analysis)
# Creating an empty plot
eplot <- function(x, y) {
plot(x = x, y = y, bty = "n",ann = FALSE,xaxt = "n",yaxt = "n",type = "n",bg = "grey", ylim = c(-0.09,1.09), xlim = c(-0.09,1.09))
}
pdf("./Article/results4.pdf",width=7,height=7)
# Plots
par(mfrow=c(2,2))
# Graph
for(j in 8:11) {
eplot(x = accuracy[, 'wt'], y = accuracy[, j])
par(pch = 21, xaxs = "i", yaxs = "i", family = "serif")
# foodwebs <- to.verify
# col <- c('blue','green','black','red','yellow','darkgrey','orange','brown','grey','green','darkgreen','darkblue')
col <- gray.colors(11, start = 0, end = 0.8, gamma = 2.2, alpha = NULL)
names <- c('TSS','Score y', 'Score -y', 'Accuracy score')
# sample(colours(), length(foodwebs))
# cols <- c("#FF000088","#00FF0088","#0000FF88")
# cols2 <- c("#FF0000","#00FF00","#0000FF")
# Axes
# rect(0, 0, 1, 1, col = "#eeeeee", border = NA)
axis(side = 1, at = seq(0, 1, by = 0.2), labels = seq(0, 1, by = 0.2), las = 1, pos = 0)
axis(side = 2, at = seq(0, 1, by = 0.2), labels = seq(0, 1, by = 0.2), las = 1, pos = -0.09)
axis(side = 3, at = seq(0, 1, by = 0.2), labels = seq(0, 1, by = 0.2), las = 1, pos = 1)
axis(side = 4, at = seq(0, 1, by = 0.2), labels = seq(0, 1, by = 0.2), las = 1, pos = 1.09)
# abline(v = seq(0,6,by = 2), col = "white", lty = 2)
# abline(h = seq(1,2,by = 1), col = "white", lty = 2)
#
mtext(text = names[j-7], side = 2, line = 2, at = 0.5, font = 2, cex = 1)
mtext(text = "Similarity weight", side = 1, line = 2, at = 0.5, font = 2, cex = 1)
for(i in 1:12) {
x <- as.numeric(names(Tanimoto_analysis[[1]]))
y <- numeric()
for(k in 1:6) {
y <- c(y,mean(as.numeric(accuracy[which(accuracy[, 'K'] == i & accuracy[, 'wt'] == names(Tanimoto_analysis[[1]])[k]), j][-5])))
# mean(as.numeric(accuracy[which(accuracy[, 'K'] == i & accuracy[, 'wt'] == names(Tanimoto_analysis[[1]])[k]), j][-5]))
}
points(x = x, y = y, bg = col[i], cex = 1.25, pch = 18, col = col[i])
lines(x = x, y = y, col = col[i], lwd = 0.5)
# points(x = accuracy[which(accuracy[, 'K'] == i), 'wt'], y = accuracy[which(accuracy[, 'K'] == i), j], bg = col[i], cex = 1.25, pch = 18, col = col[i])
# lines(x = accuracy[which(accuracy[, 'K'] == i), 'wt'], y = accuracy[which(accuracy[, 'K'] == i), j], col = col[i], lwd = 0.5)
}
# boxplot(formula = as.numeric(accuracy[, j]) ~ accuracy[, 'wt'],
# data = accuracy,
# boxwex = 0.075,
# axes = FALSE,
# add = TRUE,
# at = seq(0, 1, by = 0.1))
}
dev.off()
|
54d6c12072452c84a75272b035718bef882c59af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/plsRglm/examples/plots.confints.bootpls.Rd.R
|
fbf2b7c6ef94dd539cd6152feb2e52f528f86cea
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,709
|
r
|
plots.confints.bootpls.Rd.R
|
library(plsRglm)
### Name: plots.confints.bootpls
### Title: Plot bootstrap confidence intervals
### Aliases: plots.confints.bootpls
### Keywords: regression models
### ** Examples
data(Cornell)
modpls <- plsR(Y~.,data=Cornell,3)
# Lazraq-Cleroux PLS (Y,X) bootstrap
set.seed(250)
Cornell.bootYX <- bootpls(modpls, R=250)
temp.ci <- confints.bootpls(Cornell.bootYX,2:8)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,prednames=FALSE)
plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
main="Bootstrap confidence intervals for the bj")
plots.confints.bootpls(temp.ci,indices=1:3,prednames=FALSE)
plots.confints.bootpls(temp.ci,c(2,4,6),"bottomright")
plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
main="Bootstrap confidence intervals for some of the bj")
temp.ci <- confints.bootpls(Cornell.bootYX,typeBCa=FALSE)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,2:8)
plots.confints.bootpls(temp.ci,prednames=FALSE)
# Bastien CSDA 2005 (Y,T) bootstrap
Cornell.boot <- bootpls(modpls, typeboot="fmodel_np", R=250)
temp.ci <- confints.bootpls(Cornell.boot,2:8)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,prednames=FALSE)
plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
main="Bootstrap confidence intervals for the bj")
plots.confints.bootpls(temp.ci,indices=1:3,prednames=FALSE)
plots.confints.bootpls(temp.ci,c(2,4,6),"bottomright")
plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
main="Bootstrap confidence intervals for some of the bj")
temp.ci <- confints.bootpls(Cornell.boot,typeBCa=FALSE)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,2:8)
plots.confints.bootpls(temp.ci,prednames=FALSE)
## No test:
data(aze_compl)
modplsglm <- plsRglm(y~.,data=aze_compl,3,modele="pls-glm-logistic")
# Lazraq-Cleroux PLS (Y,X) bootstrap
# should be run with R=1000 but takes much longer time
aze_compl.bootYX3 <- bootplsglm(modplsglm, typeboot="plsmodel", R=250)
temp.ci <- confints.bootpls(aze_compl.bootYX3)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,prednames=FALSE)
plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
main="Bootstrap confidence intervals for the bj")
plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
plots.confints.bootpls(temp.ci,c(2,4,6),"bottomleft")
plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
main="Bootstrap confidence intervals for some of the bj")
plots.confints.bootpls(temp.ci,indices=1:34,prednames=FALSE)
plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE,ltyIC=1,colIC=c(1,2))
temp.ci <- confints.bootpls(aze_compl.bootYX3,1:34,typeBCa=FALSE)
plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
# Bastien CSDA 2005 (Y,T) Bootstrap
# much faster
aze_compl.bootYT3 <- bootplsglm(modplsglm, R=1000)
temp.ci <- confints.bootpls(aze_compl.bootYT3)
plots.confints.bootpls(temp.ci)
plots.confints.bootpls(temp.ci,typeIC="Normal")
plots.confints.bootpls(temp.ci,typeIC=c("Normal","Basic"))
plots.confints.bootpls(temp.ci,typeIC="BCa",legendpos="bottomleft")
plots.confints.bootpls(temp.ci,prednames=FALSE)
plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
main="Bootstrap confidence intervals for the bj")
plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
plots.confints.bootpls(temp.ci,c(2,4,6),"bottomleft")
plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
main="Bootstrap confidence intervals for some of the bj")
plots.confints.bootpls(temp.ci,prednames=FALSE,ltyIC=c(2,1),colIC=c(1,2))
temp.ci <- confints.bootpls(aze_compl.bootYT3,1:33,typeBCa=FALSE)
plots.confints.bootpls(temp.ci,prednames=FALSE)
## End(No test)
|
7d5eba6c9d1ee07175d83914b9570c78c4aed84c
|
ee8862eecd3fb5eb95146fa8cc9f49caf10975d8
|
/cachematrix.R
|
3d8774eb00a7cca655a42a8d970bbcd9164769c2
|
[] |
no_license
|
mikesn922/ProgrammingAssignment2
|
cc6e36a18303ee892121f26938867f9395b16d70
|
9372be8324c48378f751bce310af06e885866858
|
refs/heads/master
| 2021-01-18T07:24:04.378942
| 2015-09-24T03:20:12
| 2015-09-24T03:20:12
| 42,757,957
| 0
| 0
| null | 2015-09-19T03:27:36
| 2015-09-19T03:27:36
| null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#cerate a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
#initial inverseValue as NULL
inverseValue <- NULL
#set the value of the matrix
set<- function(y){
#check whether two matrix are identical
#if not identical, update new matrix
if(!identical(x, y)){
x <<- y
inverseValue <<- NULL
}
}
#get the matrix
getMatrix <- function() x
#set inverse
setInverse <- function(newInverse) inverseValue <<- newInverse
#get the cached inverse value
getInverse <- function() inverseValue
#return a list of all the functions
list(set = set, getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
#firstly, get the inverse value
inverseValue <- x$getInverse()
#then check whether the inverse value is NULL or whether the matrix has been changed
if(!is.null(inverseValue)){
#output cached value
message("getting cached data")
return(inverseValue)
}
#if the inverse is not cached or it is a new matrix
#get the new matrix
data <- x$getMatrix()
#calculate inverse
inverseValue <- solve(data)
#save the value
x$setInverse(inverseValue)
#return the calculated inverse
inverseValue
}
|
82ff8b47f23d1d040e439761feb5d1f0b80ba62b
|
4ebdddb600a4468652936161471cc06beee05873
|
/man/CircResidual.Rd
|
6bbe6296f0997f209d284ef62fae16d95a2189ba
|
[] |
no_license
|
cran/CircSpatial
|
52962e4a2d3a2cd4b4a4d5ccd51a38aba7c5ff0d
|
84ed28c8e5378255c0520e2b669f4ddb8dcbfc72
|
refs/heads/master
| 2016-09-05T17:00:21.112703
| 2009-10-27T00:00:00
| 2009-10-27T00:00:00
| 17,717,327
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,935
|
rd
|
CircResidual.Rd
|
\name{CircResidual}
\alias{CircResidual}
\alias{Circular Residuals}
\title{Compute or Plot Circular Residuals}
\description{
Returns the residuals or plots data, model, and residuals with black, tan, and dashed black arrows, respectively.
}
\usage{CircResidual(X, Y, Raw, Trend, Plot=FALSE, AdjArrowLength=1, \dots)}
\arguments{
\item{X}{Vector of horizontal coordinates of observations and trend locations}
\item{Y}{Vector of vertical coordinates of observations and trend locations}
\item{Raw}{Vector of direction of observations in radians}
\item{Trend}{Vector of fitted model direction in radians, NAs not allowed}
\item{Plot}{If FALSE return value. If TRUE, plot data (black), model(tan), and residuals(dashed black) with asp=1.}
\item{AdjArrowLength}{Multiplies length of arrows in plots}
\item{\dots}{Additional parameters for plotting}
}
\note{
To characterize the spatial cosine structure, the first order trend, if any, must be removed via an appropriate fitted model (See Examples).
}
\details{
In the installed folder doc, Section J.3 in Appendices.J.PackageDocumentation provides additional detail and illustrations.\cr\cr
Spatial dependence is encoded in the residual rotations=the rotation in radians from the fitted mean direction to the data direction. The first order trend, if any, must be removed from the data via an appropriate fitted model. Separately fit the cosine and sine components of direction to functions of the spatial coordinates to avoid the cross over problem (direction of 0 deg equals direction of 360 deg). Then, the fitted direction is obtained using R function atan2(fitted sines, fitted cosines). A positive residual rotation indicates that counter clockwise (CCW) rotation is required to rotate the fitted model direction to the data direction. A negative residual rotation indicates that clockwise (CW) rotation is required. CircResidual returns the residuals or plots data, model, and residuals with black, thick tan, and dashed red arrows, respectively.
}
\value{
If Plot=FALSE, value is list of
\item{x}{Vector of horizontal coordinates of residuals}
\item{y}{Vector of vertical coordinates of residuals}
\item{direction}{Vector of direction residuals in radians}
}
\author{Bill Morphet}
\seealso{
\code{\link{CosinePlots}}\cr
\code{\link{KrigCRF}}
}
\examples{
## Model
x1<- 1:11; y1 <- 1:11; y1 <- rep(y1, 11); x1 <- rep(x1, each=11)
model.direction1 <- matrix(data=c(
157, 141, 126, 113, 101, 90, 79, 67, 54, 40, 25, 152, 137, 123, 111,
100, 90, 80, 69, 57, 44, 30, 147, 133, 120, 109, 99, 90, 81, 71, 60,
48, 35, 142, 129, 117, 107, 98, 90, 82, 73, 63, 52, 40, 137, 125,
114, 105, 97, 90, 83, 75, 66, 56, 45, 132, 121, 111, 103, 96, 90,
84, 77, 69, 60, 50, 127, 117, 108, 101, 95, 90, 85, 79, 72, 64, 55,
122, 113, 105, 99, 94, 90, 86, 81, 75, 68, 60, 117, 109, 102, 97,
93, 90, 87, 83, 78, 72, 65, 112, 105, 99, 95, 92, 90, 88, 85, 81,
76, 70, 107, 101, 96, 93, 91, 90, 89, 87, 84, 80, 75), ncol=11,
byrow=TRUE)
model.direction1 <- as.vector(model.direction1)*pi/180
## Plot Trend Model
plot(x1, y1, type="n", xlab="", ylab="", asp=1)
arrow.plot(x1, y1, u=cos(model.direction1), v=sin(model.direction1),
arrow.ex=0.1, xpd=TRUE, true.angle=TRUE, length=.1)
## Compute vM CRF of 121 observations, Rho=sqrt(0.5) so sill about 0.5,
## from GRF (Range=4, spherical covariance).
set.seed(666)
crf1<- SimulateCRF(CircDistr="vM", Rho=sqrt(0.5), Range=4, CovModel=
"spherical", Grid=cbind(x1, y1), OverFit=TRUE)
## Plot CRF
par(mai=c(0.4, 0.35, .25, 0.25))
plot(crf1$x, crf1$y, type="n", xlab="", ylab="", asp=1)
arrow.plot(a1=crf1$x, a2=crf1$y, u=cos(crf1$direction), v=
sin(crf1$direction), arrow.ex=0.1, xpd=TRUE, true.angle=TRUE,
length=.1)
# Make sample
sample.direction1 <- model.direction1 + crf1$direction
## Plot Sample
sample.direction1 <- model.direction1 + crf1$direction
plot(x1, y1, type="n", asp=1)
arrow.plot(a1=x1, a2=y1, u=cos(sample.direction1), v=
sin(sample.direction1), arrow.ex=0.125, xpd=TRUE, true.angle=TRUE,
length=.1)
## Fit An Appropriate Model
FitHoriz1 <- lm(cos(sample.direction1) ~ (x1 + y1))
FitVert1 <- lm(sin(sample.direction1) ~ (x1 + y1))
fitted.direction1 <- atan2(FitVert1$fitted.values,
FitHoriz1$fitted.values)
## Plot Fitted Model
plot(x1, y1, type="n", asp=1, xlab="", ylab="")
arrow.plot(x1, y1, u=cos(fitted.direction1), v=sin(fitted.direction1),
arrow.ex=0.1, xpd=TRUE, true.angle=TRUE, length=.1)
## Compute Residuals
resids1 <- CircResidual(X=x1, Y=y1, Raw=sample.direction1,
Trend=fitted.direction1, Plot=FALSE)
## Plot Sample, Fitted Model, and Residual Rotations
CircResidual(X=x1, Y=y1, Raw=sample.direction1, Trend=fitted.direction1,
Plot=TRUE, xlim=c(3,7), ylim=c(3,7))
}
\keyword{hplot}
\keyword{graphs}
\keyword{spatial}
|
3e17df4f44b3d4833aec831009a086f6662fdc84
|
73d67b4e0e9a461d4fe01a2476d16216b8f0eb76
|
/tests/registry.R
|
1d00e399af9d242e5e1033f4b097bc373643e8bf
|
[] |
no_license
|
cran/proxy
|
ad2d7d787be62bf93a2e22acc9f0e78d4a9df9c7
|
311a8569a534460ef04473ffa442dc7b72ba9a41
|
refs/heads/master
| 2022-06-20T10:04:04.795015
| 2022-06-09T05:15:32
| 2022-06-09T05:15:32
| 17,698,758
| 3
| 4
| null | 2018-05-15T20:44:52
| 2014-03-13T05:53:09
|
R
|
UTF-8
|
R
| false
| false
| 1,882
|
r
|
registry.R
|
##########################
### registry test instances
library(proxy)
.my_check_fun <- function(x) if (x$Z == 999 && x$New2 == 999) stop("No evil allowed!")
## create registry
R <- proxy:::registry(entry_class = "simple.list",
validity_FUN = .my_check_fun)
R
## set fields
R$set_field("X", type = TRUE, is_mandatory = TRUE)
R$set_field("Y", type = "character")
R$set_field("Z", default = 123)
R$get_fields()
## add entries
R$set_entry(names = "test", X = TRUE, Y = "bla")
R$set_entry(names = "test2", X = FALSE, Y = "foo", Z = 99)
R$set_entry(names = "test3", X = FALSE, Y = "bar", Z = "chars")
R$get_entry("test")
R[["test2"]]
R[["test3"]]
## add new field
R$set_field("New")
R$get_field("New")
## change entries
R$modify_entry(names = "test", New = 123)
R$modify_entry(names = "test2", New = "test")
## field check function (checks for strict positive values)
R$set_field("New2", type = "numeric", validity_FUN = function(x) stopifnot(x > 0))
R$set_entry(names = "test5", X = TRUE, New2 = 2)
## add field with fixed alternatives
R$set_field("New3", type = c("A", "B"))
R$get_field("New")
R$set_entry(names = "test6", X = TRUE, New3 = "A")
## print/summary = as.data.frame
R
summary(R)
## seal entries
R$seal_entries()
R$set_field("New4")
R$set_entry(names = "test7", X = TRUE, Y = "bla")
R$delete_entry("test7")
R$modify_entry(names = "test", New4 = "test")
## error cases:
TRY <- function(...) stopifnot(inherits(try(..., silent = TRUE), "try-error"))
TRY(R$set_field("bla", type = "character", default = 123))
TRY(R$set_entry("err1", Y = "bla"))
TRY(R$set_entry("err2", X = "bla"))
TRY(R$set_entry("err3", X = TRUE, New2 = -2))
TRY(R$set_entry("err4", X = TRUE, Z = 999, New2 = 999))
TRY(R$set_entry("err5", X = TRUE, New3 = "C"))
TRY(R$modify_entry("Bla", "New", 123))
TRY(R$modify_entry("X", "Bla", 123))
TRY(R$modify_entry("test","X",TRUE))
|
a2d4d2d560f4c17cfb63e997b903ea37d35f841d
|
fea4cdbc6a13800cba7a68196dc98bdcdaeec8a0
|
/cachematrix.R
|
b403234708cab8264fb9f5bd9489cf78fbff0911
|
[] |
no_license
|
markgreenwood/ProgrammingAssignment2
|
1b00936cb8e1dfe624b96ad0c969de5368bdca92
|
504c7613ad1af09357492e511ff48672f6fd365c
|
refs/heads/master
| 2021-01-16T19:52:03.629356
| 2014-05-23T23:14:00
| 2014-05-23T23:14:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,898
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special matrix object that can cache its inverse using cacheSolve().
##
## Example:
##
## >>> M <- makeCacheMatrix(matrix(c(1,2,3,2,1,2,3,2,1),c(3,3)))
## >>> cacheSolve(M) # returns the computed value of the inverse
## ...
## ...do some other stuff that leaves M unchanged...
## ...
## >>> cacheSolve(M) # returns the cached value of the inverse
makeCacheMatrix <- function(x = matrix()) {
# Default: no inverse computed/cached yet upon creation of CacheMatrix
xinv <- NULL
# set() method assigns argument y to the CacheMatrix's internal matrix
# representation x; the only way to see what's contained in x is
# via the get() method.
set <- function(y) {
x <<- y
# whenever the matrix is "set", xinv gets NULL so cacheSolve
# is forced to recompute the inverse
xinv <<- NULL
}
# get() method returns CacheMatrix's internal matrix representation x;
# the only way to change x is via the set() method.
get <- function() x
# The setinv() method assigns argument minv to CacheMatrix's internal
# inverse representation xinv
setinv <- function(minv) xinv <<- minv
# The getinv() method returns CacheMatrix's internal inverse
# representation xinv
getinv <- function() xinv
# Finally, makeCacheMatrix returns a list of methods used
# to access the internals of the created object; this controls
# access to the object by limiting the interface to the defined
# methods
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## cacheSolve returns the inverse of the CacheMatrix x.
## If the inverse has previously been cached and the matrix has not changed
## (via the set() method), it returns the cached value.
## If the inverse has not been cached or the matrix has changed
## (via the set() method), it calculates the inverse, caches it, and returns it.
## Note that additional arguments to the solve() function can be passed to cacheSolve()
## in addition to the CacheMatrix argument x.
cacheSolve <- function(x, ...) {
# Get the internal representation of the inverse
minv <- x$getinv()
# If the stored inverse is not NULL, you can use the cached value
if (!is.null(minv)) {
message("getting cached inverse")
return(minv)
}
# Otherwise, you have to get the representation of the matrix,
# calculate the inverse using solve(), store the calculated
# inverse in the cache, and return it
m <- x$get()
minv <- solve(m, ...)
x$setinv(minv)
minv
}
|
a617ec2226dec83ee4fbb9e5b05fed6afd08f4a3
|
2771dbd8f9e59522f1a61db5a0c505adc4b2a6de
|
/src/viz.R
|
77dda694daf92c4d5765c71ce7dec2b6f73139b2
|
[
"MIT"
] |
permissive
|
susan-fung/tertiary_edu_GDP
|
d7fa7ad631d7c66f7ed76a7ad29e70fdeb391e16
|
c2c36298628e6181d6f1a3e5eba7c56ee76236e4
|
refs/heads/master
| 2021-08-28T16:28:37.998299
| 2017-12-12T18:50:21
| 2017-12-12T18:50:21
| 112,645,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,392
|
r
|
viz.R
|
#! /usr/bin/env Rscript
# Susan Fung, Dec 2017
# This script reads in data from the wrangled data set that contains tertiary education, ISO country
# code, GDP per capita and country latitude and longitude data.
# This script makes two visualizations and they are saved as png
# Usage: Rscript viz.R "path to gap_map_edu.csv" "path to save the visualizations"
library(ggplot2)
args <- commandArgs(trailingOnly = TRUE)
from <- args[1]
path1<-args[2]
# Read in data from the wrangled data set
gap_map_edu<-read.csv(from, strip.white=TRUE)
# Make a scatter graph
scatter<-ggplot(gap_map_edu, aes(gdpPercap,tertiary))+
geom_point(aes(color=continent), size = 3)+
geom_smooth(method = "lm", color = "grey")+
labs(title="World Tertiary Education Attaintment and GDP")
ggsave(filename = "scatter.png", plot = scatter, path = path1)
# Make a world map
map<-ggplot(gap_map_edu) +
geom_polygon(aes(x = long, y = lat, group = group, fill = tertiary))+
scale_fill_gradient(low = "white", high = "blue", name="Tertiary Education Attaintment %")+
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank())+
labs(title="World Tertiary Education Attaintment by Country")
ggsave(filename = "map.png", plot = map, path = path1)
|
e719e521ab98a49091a2a66864a90ba6c566b9bb
|
ef6681b93278cbba78b89d5679c88517cdf70305
|
/update_data.R
|
1b954a36cecfc8a8989931a58facb5874daa4088
|
[] |
no_license
|
lehmkudc/mtg-database
|
966f48f28023b828a738c81b065356ea934e59e3
|
43cb3a51df48cac8f2ba7e55ad1fd55ec1ad5ea2
|
refs/heads/master
| 2020-03-09T06:39:04.878364
| 2018-09-01T19:08:31
| 2018-09-01T19:08:31
| 128,599,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,698
|
r
|
update_data.R
|
rm( list=ls() )
library(rjson)
options(stringsAsFactors = FALSE)
source('mtg-database/transaction_functions.R')
source('C:/Users/Dustin/Desktop/config.R')
source('mtg-database/app_functions.R')
kill_connections()
add_card_name <- function(conn, card_name){
card_name <- paste0( '"', card_name, '"' )
q <- paste( 'INSERT INTO all_cards (CardName) VALUES',
'(', card_name, ');')
sq <- dbSendQuery( conn, q )
}
add_set_name <- function( conn, set_name, set_code){
set_name <- paste0( '"', set_name, '"' )
set_code <- paste0( '"', set_code, '"' )
q <- paste( 'INSERT INTO all_sets (SetName, SetCode)',
'SELECT', set_name,',', set_code,
'WHERE NOT EXISTS ( SELECT * FROM all_sets',
'WHERE SetName =', set_name,
'OR SetCode = ', set_code,
') LIMIT 1;' )
dbSendQuery( conn, q)
}
add_print <- function( conn, card_name, set_code, cnum, promo ){
#print( card_name)
#print( set_code )
card_name <- paste0( '"', card_name, '"' )
set_code <- paste0( '"', set_code, '"')
q1 <- paste( 'SELECT (SELECT CardID FROM all_cards',
'WHERE CardName =', card_name,
') as CardID,(SELECT SetID FROM all_sets',
'WHERE SetCode =', set_code,
') as SetID' )
#print( q1 )
sq1 <- dbSendQuery( conn, q1 )
d1 <- fetch( sq1 )
q2 <- paste( 'INSERT INTO all_prints ( SetID, CardID, CNumber, Promo)',
'SELECT',
'(SELECT SetID from all_sets WHERE SetCode = ', set_code,
'),(SELECT CardID from all_cards WHERE CardName =', card_name,
'),', cnum,',', promo,
'WHERE NOT EXISTS ( SELECT * FROM all_prints',
'WHERE SetID = (SELECT SetID from all_sets WHERE SetCode =', set_code,
') AND CardID = (SELECT CardID from all_cards WHERE CardName =', card_name,
') AND CNumber =', cnum, 'AND Promo =', promo, ');' )
dbSendQuery( conn, q2 )
}
add_set_prints <- function( conn, set_code ){
api <- paste0('https://api.scryfall.com/cards/search?q=e:',
set_code, '&unique=prints' )
page <- fromJSON( file = api )
end <- F
while ( end == F ){ # Per Page
N_cards <- length( page$data )
for (i in 1:N_cards){ # For Each Card
card <- page$data[[i]]
x <- rep( 0, 4 )
if ( length(card$card_faces) == 0 ){
x[1] <- iconv( card$name, to = "ASCII//TRANSLIT")
x[1] <- gsub( '"', '', x[1])
x[1] <- gsub( ' //.+', '', x[1])
if (nchar(x[1]) > 100){
x[1] <- 'Our Market Research'
}
} else {
cf <- card$card_faces[[1]]
x[1] <- iconv( cf$name, to = "ASCII//TRANSLIT")
x[1] <- gsub( '"', '', x[1])
x[1] <- gsub( ' //.+', '', x[1])
}
x[2] <- set_code
cn <- card$collector_number
x[3] <- gsub( '\\D', '', cn, perl=F)
x[4] <- as.integer(length(grep( '\\D', cn, perl=F)) >0)
add_print( conn, x[1], x[2], x[3], x[4] )
}
if ( page$has_more == F ){
end <- T
} else {
page <- fromJSON(file= as.character(page$next_page) )
}
}
}
update_card_names <- function(){
all <- fromJSON(file= 'https://api.scryfall.com/catalog/card-names')
t <- unlist(all$data,use.names = F)
t1 <- iconv(t, to = "ASCII//TRANSLIT")
t2 <- gsub( '"', '', t1)
t3 <- gsub( ' //.+', '', t2)
t3[ grep( 'Our Market Research', t3) ] <- 'Our Market Research'
x <- readLines('mtg-database/data_prep/card_names.txt' )
y <- setdiff(t3,x)
yq <- paste0( '"', y, '"')
conn <- connect()
for ( i in 1:length(y) ){
add_card_name( conn, y[i] )
x <- c( x, y[i] )
}
write.table( x, 'mtg-database/data_prep/card_names.txt', row.names = F, quote = F, col.names = F)
dbDisconnect( conn )
}
update_sets <- function(){
full_json <- fromJSON(file= 'https://api.scryfall.com/sets')
data_json <- full_json$data
api_sets <- list()
for ( i in 1:length(data_json) ){
set_data <- rep(0,2)
aset <- data_json[[i]]
set_data[1] <- aset$code
set_data[2] <- aset$name
api_sets[[i]] <- set_data
}
api_sets <- data.frame( matrix( unlist(api_sets), nrow=length(data_json), byrow =T) )
colnames(api_sets) <- c( 'SetCode','SetName')
local_sets <- read.csv( 'mtg-database/data_prep/set_names.csv' )
new_set_names <- setdiff( api_sets$SetName, local_sets$SetName )
print( new_set_names )
for ( i in 1:length(new_set_names) ){
new_set <- api_sets[ api_sets$SetName == new_set_names[i], ]
# Add Set to database
conn <- connect()
add_set_name( conn, new_set$SetName, new_set$SetCode )
dbDisconnect( conn )
# Add set's printings to database
conn <- connect()
add_set_prints(conn, new_set$SetCode)
dbDisconnect(conn )
# Add Set to local file
local_sets <- rbind( local_sets, new_set )
write.csv( local_sets, 'mtg-database/data_prep/set_names.csv',
row.names = F, quote = F)
}
}
#=====================================================================
# update_sets()
# SELECT statement isnt
# conn <- connect()
#
# add_set_name( conn, 'unh' )
#
# dbDisconnect( conn )
#nchar("Duel Decks Anthology: Divine vs. Demonic Tokens")
#write.csv( df, 'mtg-database/data_prep/set_names.csv', row.names = F, quote = F)
#update_card_names()
|
145dfe9e5a5be7e8f1896456c7560e349af57475
|
573934c36f13c6dc1ca579d66cf046ceae5e5cae
|
/man/Detroit.Rd
|
fe2fa96f2b1c93b0af6cfc82fac282b15100190a
|
[] |
no_license
|
cran/bestglm
|
293997ebb536ba44c2c2acff18fca7a2f1a8e6f6
|
d022e904cc278bb87c43b6e3a9fe35d938cd4214
|
refs/heads/master
| 2020-05-30T14:53:52.305806
| 2020-03-13T09:10:02
| 2020-03-13T09:10:02
| 17,694,695
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,875
|
rd
|
Detroit.Rd
|
\name{Detroit}
\alias{Detroit}
\docType{data}
\title{Detroit homicide data for 1961-73 used in the book Subset Regression by
A.J. Miller}
\description{
For convenience we have labelled the input variables 1 through 11 to be
consistent with the notation used in Miller (2002).
Only the first 11 variables were used in Miller's analyses.
The best fitting subset regression with these 11 variables, uses only 3 inputs
and has a residual sum of squares of
6.77 while using forward selection produces a best fit with 3 inputs with
residual sum of squares 21.19.
Backward selection and stagewise methods produce similar results.
It is remarkable that there is such a big difference.
Note that the usual forward and backward selection algorithms may fail since
the linear regression using 11 variables gives essentially a perfect fit.
}
\usage{data(Detroit)}
\format{
A data frame with 13 observations on the following 14 variables.
\describe{
\item{\code{FTP.1}}{Full-time police per 100,000 population}
\item{\code{UEMP.2}}{Percent unemployed in the population}
\item{\code{MAN.3}}{Number of manufacturing workers in thousands}
\item{\code{LIC.4}}{Number of handgun licences per 100,000 population}
\item{\code{GR.5}}{Number of handgun registrations per 100,000 population}
\item{\code{CLEAR.6}}{Percent homicides cleared by arrests}
\item{\code{WM.7}}{Number of white males in the population}
\item{\code{NMAN.8}}{Number of non-manufacturing workers in thousands}
\item{\code{GOV.9}}{Number of government workers in thousands}
\item{\code{HE.10}}{Average hourly earnings}
\item{\code{WE.11}}{Average weekly earnings}
\item{\code{ACC}}{Death rate in accidents per 100,000 population}
\item{\code{ASR}}{Number of assaults per 100,000 population}
\item{\code{HOM}}{Number of homicides per 100,000 of population}
}
}
\details{
The data were orginally collected and discussed by Fisher (1976) but
the complete dataset first appeared in Gunst and Mason (1980, Appendix A).
Miller (2002) discusses this dataset throughout his book.
The data were obtained from StatLib.
}
\source{
\url{http://lib.stat.cmu.edu/datasets/detroit}
}
\references{
Fisher, J.C. (1976). Homicide in Detroit: The Role of Firearms. Criminology,
vol.14, 387-400.
Gunst, R.F. and Mason, R.L. (1980).
Regression analysis and its application: A data-oriented approach.
Marcel Dekker.
Miller, A. J. (2002). Subset Selection in Regression. 2nd Ed.
Chapman & Hall/CRC. Boca Raton.
}
\examples{
#Detroit data example
data(Detroit)
#As in Miller (2002) columns 1-11 are used as inputs
p<-11
#For possible comparison with other algorithms such as LARS
# it is preferable to work with the scaled inputs.
#From Miller (2002, Table 3.14), we see that the
#best six inputs are: 1, 2, 4, 6, 7, 11
X<-as.data.frame(scale(Detroit[,c(1,2,4,6,7,11)]))
y<-Detroit[,ncol(Detroit)]
Xy<-cbind(X,HOM=y)
#Use backward stepwise regression with BIC selects full model
out <- lm(HOM~., data=Xy)
step(out, k=log(nrow(Xy)))
#
#Same story with exhaustive search algorithm
out<-bestglm(Xy, IC="BIC")
out
#But many coefficients have p-values that are quite large considering
# the selection bias. Note: 1, 6 and 7 are all about 5% only.
#We can use BICq to reduce the number of variables.
#The qTable let's choose q for other possible models,
out$qTable
#This suggest we try q=0.05 or q=0.0005
bestglm(Xy,IC="BICq", t=0.05)
bestglm(Xy,IC="BICq", t=0.00005)
#It is interesting that the subset model of size 2 is not a subset
# itself of the size 3 model. These results agree with
#Miller (2002, Table 3.14).
#
#Using delete-d CV with d=4 suggests variables 2,4,6,11
set.seed(1233211)
bestglm(Xy, IC="CV", CVArgs=list(Method="d", K=4, REP=50))
}
\keyword{datasets}
|
1fddb750da354b276561a736d3b498f9e79e75a2
|
bd9aaf55e5640718b97d4e40ada44271489789ac
|
/How to code.R
|
ca0bff0202163fcdc9f69992bcb48435ef0fbfd4
|
[] |
no_license
|
helenesandsten/BIO302-Biostat-II
|
bb42008a8e0b51be7e2551a6cdc35bcd8ae52ba8
|
8b286c6327e6bb545616de76f9d32328faa051d2
|
refs/heads/master
| 2023-05-08T14:52:07.297153
| 2021-06-04T19:32:03
| 2021-06-04T19:32:03
| 369,514,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
How to code.R
|
### BIO302 BIOSTATS II
### SESSION 03
### ABOUT SCRIPTS - DO'S AND DON'TS
#### HOW TO BEGIN WITH YOUR DATA ####
# 1. Open you project where you have all your data
# TIP: Dont use absolute paths (you computer/you specific folder/your
# specific folder 2/etc)
# Use PROJECTS instead (will work on any computer)
# 2. Run the following to extract and name the datafile you want to use:
# datafile <- "datafile.xls" ---> put " " and use tab to browse the files in your project
# Example:
# mtgonga <- "biomass2015.xls"
# 3. If importing several datasheets
# 'excel_sheets(datafile)' will give you the names of your sheets in you datafile
# You dont want to use sheet numbers as they can change
# I you copy you code more than 2 or 3 times when you import you data you should use 'map()'
# dfr_map(functionname) -> will gather/merge the datasheets for you
#### CLEAN STUFF ####
# SECTION HEADINGS ARE IMPORTANT
# Section headings are gathered in a list so it is easy to navigate in your script
# Lines should not have more than 80 characters
#### IMPORTING DATA ####
### .csv files
csvfile <- read_delim(file = "data/filename.csv", delim = ",",
locale = locale(decimal_mark = "."),
skip = 2) %>% # skips rows that are not useful
clean_names() # cleans messy names and makes regular, nice R-names
# you can read cvs-files with any program, and are always to import
# use read_delim() and delim = "" when you have an csv-file
# read_delim() better at guessing you columns
### .xls files
# use --- when you have an excel-file
### .txt files
# use read.table() and sep = "" when you have a txt-file
#
#
|
a7effb48fce9abf9be497c873bac1de8db9f8c54
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610382704-test.R
|
9772f920dfb466c0dc42dff63cee136b23d43581
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,073
|
r
|
1610382704-test.R
|
testlist <- list(rates = c(4.65661287307739e-10, NaN, 2.18007543808417e-106, 9.41512232947306e-307, -5.49989590899971e+303, 5.95750278497579e+228, 5.95750278984877e+228, 5.95718054236814e+228, 5.95750278984877e+228, 2.78923464737621e+228, 2.08809742756317e-53, -Inf, 5.95750278984886e+228, 5.95750278984879e+228, 9.16794260529633e-302, 6.64033212419108e-07, 2.68838251105223e-299, 2.78134232313304e-309, NaN, 3.24260049547299e+178, 1.04291228749732e+40, 2.39524526325109e-08, -2.39536098123135e+260, 7.56881123708566e-304, 2.78350675884078e-309, 1.68815768077876e+69, 2.45258486785002e-106, NaN, 4.65661287307739e-10, -4.28076304470408e+221, NaN, 4.77830972972642e-299, -1.50465302948754e-43, NaN, NaN, -1.10232624616122e+45, NaN, NaN, -1.50465358798835e-43, NaN, 1.39217565661517e-308, -1.10232624616122e+45, NaN, -3.29659155073893e+118, NaN, -1.45094702117037e+128, 1.63335743813925e+40, NaN, NaN, 0, NaN, NaN, NaN, -5.17512497239813e+245, NaN, NaN, NaN, NaN, NaN, NaN, 0), thresholds = c(6.02669610142975e+175, 3.67142983950248e+228, NaN, 2.75744471482275e-10, NaN, 3.04660497931763e-05, NaN, NaN, -1.1821097904989e-125, 4.77045810117177e-299, 5.54095540936847e-310, -1.1255454651159e-256, 2.48084742099826e-265, 2.78134232318839e-309, -3.45507950618034e+304, -Inf, 3.23785921002061e-319, NaN, NaN, NaN, -3.23465380139002e+244, -Inf, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -4.95668081957698e+245, -5.82900682308922e+303, -8.52438965099359e+245, -8.72554259640141e+245, -6.17189479381467e+303, 2.70150264931866e-307, 1.71193373937516e-260, 3.94212982853171e-259, 2.82820116936561e-110, 1.36845553155725e-48, -5.17538999909499e+245, NaN, 2.78134231816057e-307, NA, NaN, NaN, 0), x = c(2.33589462432972e-310, 6.21370296905965e-313, NaN, NaN, NaN, NaN, NaN, 1.25986739689518e-321, 0, 0, 0, 0, 0, 3.92660099062145e-310, NaN, 1.25986739689518e-321, 0, 0, 0, 0, 0, 0, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 2.12199579047121e-314, NaN, 1.25986739689518e-321, NaN, NaN, NaN, -2.27610495947272e-159 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.