blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1115418185b733b34d4882e911197a0877c8dd2
|
73942206c0ef2162a60c33c7b6bedb7d58c023a2
|
/Treningowe/PART 2/Statystyki graficzne 2.R
|
2fbcef93be9164506635737ecb53d228f1d0f6a6
|
[] |
no_license
|
adrian00b/GNU-R-basics
|
61a659da3c3e27c4b12ddc6d6bde0fc178766121
|
9a5d0cd14f931aa0699c7232edca6824f56ea7c6
|
refs/heads/master
| 2021-07-08T04:25:35.000088
| 2021-01-15T12:39:39
| 2021-01-15T12:39:39
| 221,065,059
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 870
|
r
|
Statystyki graficzne 2.R
|
library(Przewodnik)
library(dplyr)
library(car)
boxplot(daneSoc$cisnienie.skurczowe, daneSoc$cisnienie.rozkurczowe
, names = c('skurczowe','rozkurczowe')
, main = 'Ciśnienie'
, horizontal = F
)
boxplot(wiek~wyksztalcenie, data = daneSoc
, col = 'lightgrey'
, ylab = 'Wiek'
, las = 1
)
density(daneSoc$wiek, bw = 'SJ') %>%
plot( , main = 'Age density'
, xlab = 'Age')
# argument smooth - regresja z wygladzaniem
sp(daneSoc$cisnienie.skurczowe~daneSoc$cisnienie.rozkurczowe
, xlab = 'rozkurczowe'
, ylab = 'skurczowe'
, main = 'Cisnienie'
, smooth = T
, pch = 1
)
sp(daneSoc$cisnienie.skurczowe~daneSoc$cisnienie.rozkurczowe|daneSoc$plec
, xlab = 'rozkurczowe'
, ylab = 'skurczowe'
, main = 'Cisnienie'
, smooth = F
, pch = c(8, 1)
, legend = list(title = 'Plec')
)
|
41611dfa2832575b639a5fa2eb4da694a25256c7
|
e56c763de315654d5f4b801eca86b2475b2461d1
|
/presentation.R
|
d2060e806a1fb71f90418b01c95d3bafed975d11
|
[] |
no_license
|
usuallycwdillon/world-systems-project
|
c8385eb1551045a44052bc403cad813945d75131
|
aac50d2d03f575a70ace4dff33a19e08aca0dd63
|
HEAD
| 2016-09-05T19:36:18.383059
| 2014-12-03T17:18:40
| 2014-12-03T17:18:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 522
|
r
|
presentation.R
|
library("rstudio", lib.loc="~/R/x86_64-suse-linux-gnu-library/3.1")
library("R6", lib.loc="~/R/x86_64-suse-linux-gnu-library/3.1")
library("slidify", lib.loc="/usr/lib64/R/library")
library("slidifyLibraries", lib.loc="/usr/lib64/R/library")
#author("presentation")
setwd("presentation")
slidify("index.Rmd")
# ```{r simple-plot, fig.height = 6, fig.align = 'center', message = F}
# require(ggplot2)
# qplot(wt, mpg, data = mtcars)
# ```
publish(user="usuallycwdillon", repo="world-systems-project", host="github")
|
0591f9a0a03797fdd43fe7361d04341376f31a62
|
d080effd2b36deb9be0e0e7d9fed15267adccea1
|
/man/reexports.Rd
|
ead1c4bca0050f5f27f28d0f0abbebd202476455
|
[] |
no_license
|
xiangpin/tidytree
|
cf2246e2746a50b493ed0295ad35738917888087
|
ea4bf11d0b2f45312a22afad10c1b3f397248a5c
|
refs/heads/master
| 2023-08-18T02:11:11.826299
| 2023-07-15T07:57:00
| 2023-07-15T07:57:00
| 227,064,787
| 0
| 0
| null | 2019-12-10T08:19:15
| 2019-12-10T08:19:14
| null |
UTF-8
|
R
| false
| true
| 1,369
|
rd
|
reexports.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/reexports.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{as.phylo}
\alias{\%>\%}
\alias{\%<>\%}
\alias{as_tibble}
\alias{tibble}
\alias{filter}
\alias{arrange}
\alias{select}
\alias{rename}
\alias{mutate}
\alias{transmute}
\alias{summarise}
\alias{summarize}
\alias{full_join}
\alias{.data}
\alias{left_join}
\alias{pull}
\alias{unnest}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{ape}{\code{\link[ape]{as.phylo}}}
\item{dplyr}{\code{\link[dplyr]{arrange}}, \code{\link[dplyr]{filter}}, \code{\link[dplyr:mutate-joins]{full_join}}, \code{\link[dplyr:mutate-joins]{left_join}}, \code{\link[dplyr]{mutate}}, \code{\link[dplyr]{pull}}, \code{\link[dplyr]{rename}}, \code{\link[dplyr]{rename}}, \code{\link[dplyr]{select}}, \code{\link[dplyr]{summarise}}, \code{\link[dplyr:summarise]{summarize}}, \code{\link[dplyr]{transmute}}}
\item{magrittr}{\code{\link[magrittr:compound]{\%<>\%}}, \code{\link[magrittr:pipe]{\%>\%}}}
\item{rlang}{\code{\link[rlang:dot-data]{.data}}}
\item{tibble}{\code{\link[tibble]{as_tibble}}, \code{\link[tibble]{tibble}}}
\item{tidyr}{\code{\link[tidyr]{unnest}}}
}}
|
834703a3161a65557afbc597ffa7b62f8f0da058
|
143b4224abf9cd7fb8fd9b01d7903a3a44a64fed
|
/2) Summary.R
|
aacfd7dc0f411edbc5e6d96e00e398325ec0c743
|
[] |
no_license
|
SebPouliot/E85-survey---data-and-code
|
991097193ba78dc079a713cea0ec65d58d0d0601
|
147b2105481052448429cad59eaff35ea2ede54d
|
refs/heads/master
| 2020-03-18T22:54:16.575559
| 2018-10-15T18:10:02
| 2018-10-15T18:10:02
| 135,365,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,951
|
r
|
2) Summary.R
|
# Clear memory
rm(list = ls())
#########################
### Load packages ###
#########################
if (!require("pacman")) install.packages("pacman")
pacman::p_load(dplyr)
pacman::p_load(xlsx)
pacman::p_load(readxl)
pacman::p_load(stringr)
pacman::p_load(tidyverse, ggplot2, actuar, viridis)
pacman::p_load(extrafont) #The package extrafont imports fonts that can be used with ggplot2
#font_import() #To run once under windows to import fonts
loadfonts(device = "win") #Load the fonts
############################
### Load the dataset ###
############################
dta <- readRDS("Data/SP-off-RP data.rds")
##################################
### Normalize the price of E85 ###
##################################
adj <- (0.74*2/3 + 0.26*1)/(0.1*2/3 + 0.9*1)
dta <- dta %>%
mutate(E85P = E85P/adj)
################################
### Table 1: Summary of data ###
################################
dta <- tbl_df(dta) %>%
mutate(E85 = as.numeric(E85),
CHOICE = as.character(CHOICE))
#Per station
dta %>% group_by(STNID) %>%
summarize(N = sum(ONES),
E85p = mean(E85P) %>% round(2),
E10p = mean(G1P) %>% round(2),
premium = mean(E85P-G1P) %>% round(2),
ratio = mean((E85P/G1P)) %>% round(2),
ratio_min = min((E85P/G1P)) %>% round(2),
ratio_max = max((E85P/G1P)) %>% round(2),
share = 100*mean(E85) %>% round(3),
drive_0 = 100*(mean(as.numeric(QD==0 & E85 == 1))/mean(E85 == 1)) %>% round(3),
dist_1 = 100*(mean(as.numeric(DIST ==0 & E85 == 1))/mean(E85 == 1)) %>% round(3))
#Per location
dta %>% mutate(STNST = ifelse(str_detect(STNID, "DM"), "DM", ifelse(str_detect(STNID, "CS"), "CS", ifelse(str_detect(STNID, "LR"), "LR", ifelse(str_detect(STNID, "LA"), "LA", ifelse(str_detect(STNID, "TS"), "TS", "SAC")))))) %>%
group_by(STNST) %>%
summarize(N = sum(ONES),
E85p = mean(E85P) %>% round(2),
E10p = mean(G1P) %>% round(2),
premium = mean(E85P-G1P) %>% round(2),
ratio = mean((E85P/G1P)) %>% round(2),
ratio_min = min((E85P/G1P)) %>% round(2),
ratio_max = max((E85P/G1P)) %>% round(2),
share = 100*mean(E85 == 1) %>% round(3),
drive_0 = 100*(mean(as.numeric(QD==0 & E85 == 1))/mean(E85 == 1)) %>% round(3),
dist_1 = 100*(mean(as.numeric(DIST ==0 & E85 == 1))/mean(E85 == 1)) %>% round(3))
#Per retailer
dta %>% group_by(STNRET) %>%
summarize(N = sum(ONES),
E85p = mean(E85P) %>% round(2),
E10p = mean(G1P) %>% round(2),
premium = mean(E85P-G1P) %>% round(2),
ratio = mean((E85P/G1P)) %>% round(2),
ratio_min = min((E85P/G1P)) %>% round(2),
ratio_max = max((E85P/G1P)) %>% round(2),
share = 100*mean(E85 == 1) %>% round(3),
drive_0 = 100*(mean(as.numeric(QD==0 & E85 == 1))/mean(E85 == 1)) %>% round(3),
dist_1 = 100*(mean(as.numeric(DIST ==0 & E85 == 1))/mean(E85 == 1)) %>% round(3))
#Total
dta %>%
summarize(N = sum(ONES),
E85p = mean(E85P) %>% round(2),
E10p = mean(G1P) %>% round(2),
premium = mean(E85P-G1P) %>% round(2),
ratio = mean((E85P/G1P)) %>% round(2),
ratio_min = min((E85P/G1P)) %>% round(2),
ratio_max = max((E85P/G1P)) %>% round(2),
share = 100*mean(E85 == 1) %>% round(3),
drive_0 = 100*(mean(as.numeric(QD==0 & E85 == 1))/mean(E85 == 1)) %>% round(3),
dist_1 = 100*(mean(as.numeric(DIST ==0 & E85 == 1))/mean(E85 == 1)) %>% round(3))
dta %>%
summarize(N_E85 = sum(E85 == 1),
N_E10 = 881- sum(E85 == 1),
N_no = sum(as.numeric(QD==0)),
N_0 = sum(as.numeric(DIST==0)),
N_no = sum(as.numeric(QD==0 & CHOICE == "E85")),
N_0 = sum(as.numeric(DIST==0 & CHOICE == "E85")))
###############################################################################
### Table 2: Responses to questions to flex motorists who refueled with E10 ###
###############################################################################
dta %>% dplyr::filter(E85 == 0) %>%
summarize(N_QA = sum(QA==1, na.rm = TRUE),
QA = mean(QA==1, na.rm = TRUE))
dta %>% dplyr::filter(E85 == 0 & QA ==1) %>%
summarize(N_QB = sum(QB==1, na.rm = TRUE),
N_QC = sum(QC==1, na.rm = TRUE),
QB = mean(QB==1, na.rm = TRUE),
QC = mean(QC==1, na.rm = TRUE))
dta %>% dplyr::filter(E85 == 0 & QA ==1) %>%
summarize(N_QB = sum(QB==0, na.rm = TRUE),
N_QC = sum(QC==0, na.rm = TRUE),
QB = mean(QB==0, na.rm = TRUE),
QC = mean(QC==0, na.rm = TRUE))
###############################################################################
### Table 3: Responses to fuel opinion questions by region and fuel choice ###
###############################################################################
### E10 motorists ###
#Question about the environment
env <- dta %>% dplyr::filter(E85 == 0) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for the environment?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q5==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q5==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q5==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q5==4))/N) %>% round())
#Question about the engine
eng <- dta %>% dplyr::filter(E85 == 0) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for your engine?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q6==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q6==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q6==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q6==4))/N) %>% round())
#Question about the economy
econ <- dta %>% dplyr::filter(E85 == 0) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for the economy?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q7==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q7==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q7==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q7==4))/N) %>% round())
#Question about national security
sec <- dta %>% dplyr::filter(E85 == 0) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for national security?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q8==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q8==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q8==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q8==4))/N) %>% round())
#Question about fuel efficiency
eff <- dta %>% dplyr::filter(E85 == 0) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel yields more miles per gallon?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q9==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q9==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q9==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q9==4))/N) %>% round())
rbind(env, eng, econ, sec, eff) %>% data.frame
# Totals for E10
#Question about the environment
env <- dta %>% dplyr::filter(E85 == 0) %>%
summarize(Q = "Which fuel is better for the environment?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q5==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q5==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q5==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q5==4))/N) %>% round())
#Question about the engine
eng <- dta %>% dplyr::filter(E85 == 0) %>%
summarize(Q = "Which fuel is better for your engine?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q6==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q6==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q6==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q6==4))/N) %>% round())
#Question about the economy
econ <- dta %>% dplyr::filter(E85 == 0) %>%
summarize(Q = "Which fuel is better for the economy?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q7==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q7==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q7==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q7==4))/N) %>% round())
#Question about national security
sec <- dta %>% dplyr::filter(E85 == 0) %>%
summarize(Q = "Which fuel is better for national security?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q8==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q8==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q8==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q8==4))/N) %>% round())
#Question about fuel efficiency
eff <- dta %>% dplyr::filter(E85 == 0) %>%
summarize(Q = "Which fuel yields more miles per gallon?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q9==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q9==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q9==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q9==4))/N) %>% round())
rbind(env, eng, econ, sec, eff) %>% data.frame
### E85 motorists ###
#Question about the environment
env <- dta %>% dplyr::filter(E85 == 1) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for the environment?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q5==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q5==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q5==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q5==4))/N) %>% round())
#Question about the engine
eng <- dta %>% dplyr::filter(E85 == 1) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for your engine?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q6==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q6==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q6==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q6==4))/N) %>% round())
#Question about the economy
econ <- dta %>% dplyr::filter(E85 == 1) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for the economy?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q7==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q7==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q7==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q7==4))/N) %>% round())
#Question about national security
sec <- dta %>% dplyr::filter(E85 == 1) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel is better for national security?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q8==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q8==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q8==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q8==4))/N) %>% round())
#Question about fuel efficiency
eff <- dta %>% dplyr::filter(E85 == 1) %>%
group_by(STNST) %>%
summarize(Q = "Which fuel yields more miles per gallon?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q9==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q9==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q9==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q9==4))/N) %>% round())
rbind(env, eng, econ, sec, eff) %>% data.frame
# Totals for E10
#Question about the environment
env <- dta %>% dplyr::filter(E85 == 1) %>%
summarize(Q = "Which fuel is better for the environment?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q5==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q5==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q5==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q5==4))/N) %>% round())
#Question about the engine
eng <- dta %>% dplyr::filter(E85 == 1) %>%
summarize(Q = "Which fuel is better for your engine?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q6==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q6==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q6==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q6==4))/N) %>% round())
#Question about the economy
econ <- dta %>% dplyr::filter(E85 == 1) %>%
summarize(Q = "Which fuel is better for the economy?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q7==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q7==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q7==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q7==4))/N) %>% round())
#Question about national security
sec <- dta %>% dplyr::filter(E85 == 1) %>%
summarize(Q = "Which fuel is better for national security?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q8==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q8==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q8==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q8==4))/N) %>% round())
#Question about fuel efficiency
eff <- dta %>% dplyr::filter(E85 == 1) %>%
summarize(Q = "Which fuel yields more miles per gallon?",
N = sum(Ones),
Eth = (100*sum(as.numeric(Q9==1))/N) %>% round(),
Gas = (100*sum(as.numeric(Q9==2))/N) %>% round(),
ND = (100*sum(as.numeric(Q9==3))/N) %>% round(),
DK = (100*sum(as.numeric(Q9==4))/N) %>% round())
rbind(env, eng, econ, sec, eff) %>% data.frame
###################
### Amount paid ###
###################
#Setup for the figures
mytheme <- theme(text=element_text(size=10, family = "Times New Roman", colour = 'black'),
axis.line.y = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.margin=unit(c(0.5,0.5,0.5,0.5), "lines"),
legend.title=element_blank(),
legend.text = element_text(size = 8, colour = 'black', family = "Times New Roman"),
legend.background=element_blank(),
legend.key=element_blank(),
legend.position= "none",
legend.direction="vertical")
### Check expenditures near round numbers based on recorded expenditures ###
ggplot(data = dta, aes(x = EXP)) +
geom_histogram(bins = 150, color = "black", fill = "white") +
scale_x_continuous(breaks=seq(0,90,by=5), labels=seq(0,90,by=5)) +
mytheme
dta <- dta %>% mutate(exp_round = case_when(EXP > 4.95 & EXP < 10.05 ~ 1,
EXP > 9.95 & EXP < 10.05 ~ 1,
EXP > 14.95 & EXP < 15.05 ~ 1,
EXP > 19.95 & EXP < 20.05 ~ 1,
EXP > 24.95 & EXP < 25.05 ~ 1,
EXP > 29.95 & EXP < 30.05 ~ 1,
EXP > 34.95 & EXP < 35.05 ~ 1,
EXP > 39.95 & EXP < 40.05 ~ 1,
EXP > 44.95 & EXP < 45.05 ~ 1,
EXP > 49.95 & EXP < 50.05 ~ 1,
EXP > 54.95 & EXP < 55.05 ~ 1,
EXP > 59.95 & EXP < 60.05 ~ 1,
is.na(EXP) ~ as.numeric("NA"),
TRUE ~ 0))
summary(dta$exp_round)
#Calculate expenditure from volumes and prices
dta <- dta %>% mutate(exp_calc = case_when(!is.na(EXP) ~ EXP,
CHOICE == "E85" ~ VOL*E85P,
CHOICE == "Gas1" ~ VOL*G1P,
CHOICE == "Gas2" ~ VOL*G2P,
CHOICE == "Gas3" ~ VOL*G3P,
CHOICE == "Gas3" ~ VOL*G3P,
CHOICE == "E30" ~ VOL*G2P,
CHOICE == "Gas1CSH" ~ EXP,
CHOICE == "Gas2CW" ~ EXP,
CHOICE == "Prem" ~ EXP,
TRUE ~ as.numeric("NA")),
exp_calc = round(exp_calc,1),
choice = case_when(CHOICE == "E85" ~ "E85",
TRUE ~ "E10"))
# Compare recorded and calculated. Use calculated because fewer missing observations
summary(dta[,c("EXP", "exp_calc")])
#################################
### Expenditures - figure D.1 ###
#################################
exp_plot <- ggplot(data = dta, aes(x = exp_calc, fill = choice)) +
geom_histogram(bins = 150, color = "black") +
facet_grid(choice ~.) +
scale_x_continuous(breaks=seq(0,100,by=5), labels=seq(0,100,by=5)) +# ggtitle("Fuel expenditure") +
ylab("Count") +
xlab("Expenditure ($)") +
mytheme
ggsave(exp_plot, filename = "Figures/Figure D1.png", width = 6, height = 4, units = "in", dpi = 600)
#####################################
### Volume purchased - figure D.2 ###
#####################################
vol_plot <- ggplot(data = dta, aes(x = VOL, fill = choice)) +
geom_histogram(bins = 150, color = "black") +
facet_grid(choice ~.) +
scale_x_continuous(breaks=seq(0,100,by=5), labels=seq(0,100,by=5)) + #ggtitle("Volume purchased") +
ylab("Count") +
xlab("Volume (gallon)") +
mytheme
ggsave(vol_plot, filename = "Figures/Figure D2.png", width = 6, height = 4, units = "in", dpi = 600)
######################################################################################
### Same histograms but only for buyers of E85 - not shown in paper and not saved ###
######################################################################################
dta <- dta %>% mutate(distance = case_when(DIST==0 ~ "Dist = 0",
TRUE ~ "Dist > 0"))
exp_plot <- ggplot(data = dta %>% dplyr::filter(choice == "E85"), aes(x = exp_calc, fill = distance)) +
geom_histogram(bins = 150, color = "black") +
facet_grid(distance ~.) +
scale_x_continuous(breaks=seq(0,100,by=5), labels=seq(0,100,by=5)) +
ggtitle("Fuel expenditure - E85 motorists") +
ylab("Count") +
xlab("Expenditure ($)") +
mytheme
exp_plot
vol_plot <- ggplot(data = dta %>% dplyr::filter(choice == "E85"), aes(x = VOL, fill = distance)) +
geom_histogram(bins = 150, color = "black") +
facet_grid(distance ~.) +
scale_x_continuous(breaks=seq(0,100,by=5), labels=seq(0,100,by=5)) +
ggtitle("Volume purchased - E85 motorists") +
ylab("Count") +
xlab("Volume (gallon)") +
mytheme
vol_plot
|
8546a76cda791bb2d3c31b29b51ca91f99248f7b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/regclass/examples/see_models.Rd.R
|
49c9a479fcc10119a8653ac66f53be9035e805a0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 500
|
r
|
see_models.Rd.R
|
library(regclass)
### Name: see_models
### Title: Examining model AICs from the "all possible" regressions
### procedure using regsubsets
### Aliases: see.models see_models
### ** Examples
data(SALARY)
ALL <- regsubsets(Salary~.^2,data=SALARY,method="exhaustive",nbest=4)
see_models(ALL)
#By default, regsubsets considers up to 8 predictors, here it looks at up to 15
data(ATTRACTF)
ALL <- regsubsets(Score~.,data=ATTRACTF,nvmax=15,nbest=1)
see_models(ALL,aicc=TRUE,report=5)
|
5327b27f84c1fe98f7e54119ffbd9d6477d52933
|
27b622ba3d99a98cd886c75fa321592c387b42ef
|
/Wyjsciowki/W2/gr1/NowikowskiAndrzej/sample/app.R
|
0079df15b40545ae5a0dc56d30063b9fb7ff2159
|
[] |
no_license
|
Kaketo/2020Z-ProgramowanieWR
|
76c560d06b1705a6ba8ab904bbab7fafba035d99
|
d4c3b8654666716ac93f7c55c841e0f79fc9cd01
|
refs/heads/master
| 2020-08-05T15:44:34.804707
| 2020-01-30T07:13:43
| 2020-01-30T07:13:43
| 212,601,214
| 1
| 0
| null | 2020-01-30T07:09:58
| 2019-10-03T14:30:35
|
HTML
|
UTF-8
|
R
| false
| false
| 2,020
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(DT)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
# sliderInput("bins","Number of bins:",min = 1,max = 50,value = 30),
fileInput("my_csv", "Tutaj podaj proszę csv")
),
# Show a plot of the generated distribution
mainPanel(
# plotOutput("distPlot"),
head("MY CSV Info"),
dataTableOutput("tabelka"),
head('CSV Summary'),
tableOutput("csv_summary"),
head("Session Info"),
verbatimTextOutput("urlText")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$tabelka <- renderDataTable({
if (!is.null(input$my_csv)) {
DT::datatable(as.data.frame(read.csv(input$my_csv$datapath, header=TRUE)), editable = TRUE)
}
})
output$csv_summary <- renderTable({
if (!is.null(input$my_csv)) {
summary(as.data.frame(read.csv(input$my_csv$datapath, header=TRUE)))
}
})
# https://shiny.rstudio.com/articles/client-data.html
output$urlText <- renderText({
# paste(sep = "",
# "protocol: ", session$clientData$url_protocol, "\n",
# "hostname: ", session$clientData$url_hostname, "\n",
# "pathname: ", session$clientData$url_pathname, "\n",
# "port: ", session$clientData$url_port, "\n",
# "search: ", session$clientData$url_search, "\n"
#)
paste(sep="\n", sessionInfo())
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
8613ab7b2ed3d7bcad261f9d1535aaebcf411f97
|
6eeffc5b83a920bc7f357af3312970fa0a5a84d3
|
/man/ols_prep_rstudlev_data.Rd
|
6d976ce110b9e57b772e73df58d612f3d084beee
|
[] |
no_license
|
cran/olsrr
|
81fe16ddb7b43e33254a7262283d39e37ce4a533
|
215958dfa67b03943c34a12cf6e3774d628fcda7
|
refs/heads/master
| 2021-06-24T07:12:32.809254
| 2020-02-10T11:00:02
| 2020-02-10T11:00:02
| 90,952,056
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 500
|
rd
|
ols_prep_rstudlev_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ols-plots-data.R
\name{ols_prep_rstudlev_data}
\alias{ols_prep_rstudlev_data}
\title{Studentized residual vs leverage plot data}
\usage{
ols_prep_rstudlev_data(model)
}
\arguments{
\item{model}{An object of class \code{lm}.}
}
\description{
Generates data for studentized resiudual vs leverage plot.
}
\examples{
model <- lm(read ~ write + math + science, data = hsb)
ols_prep_rstudlev_data(model)
}
|
ac22d5b0f5a48f239db38177a410db2d85937a0e
|
7ab0b6d8bad7e7824528d1f05c10792759cabab1
|
/man/department_quotes.Rd
|
ccba087271d30631db3f33ecce1478d6ecd5a054
|
[] |
permissive
|
tbradley1013/dundermifflin
|
a4711e3cd02d494885a30a34ddca877f04cb2ff9
|
691045dbfe6ab526caa4db4240ea378f81f5262d
|
refs/heads/master
| 2020-05-01T06:33:37.546458
| 2020-02-27T13:18:15
| 2020-02-27T13:18:15
| 177,332,858
| 20
| 1
|
MIT
| 2020-02-04T14:45:54
| 2019-03-23T19:42:59
|
R
|
UTF-8
|
R
| false
| true
| 614
|
rd
|
department_quotes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/departments.R
\name{department_quotes}
\alias{department_quotes}
\alias{sales}
\alias{accounting}
\alias{customer_service}
\alias{hr}
\alias{reception}
\alias{corporate}
\title{Get quotes from different departments of the office}
\usage{
sales(...)
accounting(...)
customer_service(...)
hr(...)
reception(...)
corporate(...)
}
\arguments{
\item{...}{arguments to be passed to \link[dundermifflin]{get_quote}}
}
\description{
Get quotes from different departments of the office
}
\examples{
\dontrun{
sales()
accounting()
}
}
|
068c71eefa30fa78878a9b7978c270cef5aff2d9
|
2c38fc71287efd16e70eb69cf44127a5f5604a81
|
/inst/pipelines/_targets_r.R
|
61c9de9106602159dd4d5cdaaea1520db11d7885
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ropensci/targets
|
4ceef4b2a3cf7305972c171227852338dd4f7a09
|
a906886874bc891cfb71700397eb9c29a2e1859c
|
refs/heads/main
| 2023-09-04T02:27:37.366455
| 2023-09-01T15:18:21
| 2023-09-01T15:18:21
| 200,093,430
| 612
| 57
|
NOASSERTION
| 2023-08-28T16:24:07
| 2019-08-01T17:33:25
|
R
|
UTF-8
|
R
| false
| false
| 747
|
r
|
_targets_r.R
|
# Generated by targets: do not edit by hand
library(targets)
lapply(
X = list.files(
path = file.path(targets::tar_path_script_support(), "globals"),
pattern = "\\.R$",
full.names = TRUE,
all.files = TRUE,
recursive = TRUE
),
FUN = function(path) {
eval(
expr = parse(file = path, keep.source = TRUE),
envir = targets::tar_option_get(name = "envir")
)
}
)
lapply(
X = list.files(
path = file.path(targets::tar_path_script_support(), "targets"),
pattern = "\\.R$",
full.names = TRUE,
all.files = TRUE,
recursive = TRUE
),
FUN = function(path) {
eval(
expr = parse(file = path, keep.source = TRUE),
envir = targets::tar_option_get(name = "envir")
)
}
)
|
df14280deed5134b1b846a3c90849d209b3d7a68
|
0a1bc1eb634a00bc9d540ef166316f1920ec2df8
|
/man/search_criteria.Rd
|
cfbada3bbc5e11634ba2d7a6ff9a421281cfe788
|
[] |
no_license
|
gorcha/vision6
|
58809b277e3f8052ad348d5d1755d2a776ba5890
|
e3d2a1036bbe88d0237f5686b2de450c7bd563b9
|
refs/heads/master
| 2021-06-03T05:32:29.461543
| 2019-01-31T00:33:49
| 2019-01-31T00:33:49
| 26,483,732
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,390
|
rd
|
search_criteria.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_criteria.R
\docType{class}
\name{search_criteria}
\alias{search_criteria}
\title{Vision6 Search Criteria}
\description{
Vision6 Search Criteria
}
\section{Introduction}{
The API includes search methods for most of its objects. Overall, the search
methods for each object work in the same way. Each search method looks
through groups of its object type (for example, Contacts within a List or all
Folders in the system) to find those that match user-supplied criteria,
returns data for each matching item. If no search criteria are supplied, the
search methods return data arrays for all items.
}
\section{When to Use Get and Count Methods}{
Most search methods have corresponding get methods and count methods.
If you want to retrieve a single object and you know its ID, use the get
method. The get method searches for one record and is much faster.
If you only need to know the number of objects, and not the details of each
object, use the Count method. It can use the same search criteria and spends
much less time transferring data.
}
\section{Obtaining IDs}{
You can use the basic search method for each type of object to obtain its ID
for use in another method. If possible, use information that is unique to the
object so that you don't get more than a few records returned. For example,
if you know a Contact's List ID and Email address, you can use searchContacts
to search the List for the Email address and get back a few Contacts (or one
Contact if there are no duplicates) and find the Contact ID in the data
returned. This is true of any object. For example, if you know the name of a
Message but not its ID, you can use searchMessages to search on the name. The
method responds with no more than a few matching Messages, from which you can
find the Message you are looking for, and use its ID.
}
\section{Optional Parameters}{
Many of the search methods include optional limit, sort_by, sort_order, and
offset parameters. You can use the first three individually. limit is the
maximum number of items returned from the results. sort_by is the name of the
value to sort the results by before returning them. If you do not specify an
sort_by, the search method returns results unsorted. The value for sort_order
is only significant if you specify sort_by. sort_order is descending by
default, but you can also specify ascending. offset specifies how many
results to skip before returning them.
}
\section{Pagination}{
You can use limit, sort_by, sort_order, and offset together for the purpose
of paginating your results. Most likely you want pages to appear in some
significant order and direction, such as ascending by last_name. In this
case, you set sort_by to last_name and order_direction to ASC. Sorting takes
place in the system after it retrieves results. Then it narrows the results
using offset and limit and returns them. To show 20 of the results in
last_name order per page, set up a series of searches with limit and offset
containing the following values:
\itemize{
\item 1st page: limit=20, offset=0
\item 2nd page: limit=20, offset=20
\item 3rd page: limit=20, offset=40
}
and so on.
}
\section{Using Search Criteria}{
The search methods use search arrays to specify search criteria. Search
arrays have the format:
\code{(array(name, relational operator, value))}
For example:
\code{(array('estatement', 'not', '1'))}
All search arrays are contained within multidimensional arrays (container
arrays that contain one or more arrays). The following example shows how a
container array holding two search arrays uses the searchLists method. Since
multiple search arrays are ANDed, this example looks for Contacts in List
12345 who want to buy a house.
}
\section{Valid Relational Operators}{
\tabular{ll}{
Operator \tab Data Type \cr
exactly \tab string/int \cr
not \tab string/int \cr
greater_than \tab int \cr
less_than \tab int \cr
in \tab comma separated strings and/or ints \cr
starts_with \tab string/int \cr
ends_with \tab string/int \cr
contains \tab string/int \cr
}
If an Operator is invalid, it defaults to exactly.
}
\seealso{
\code{\link{searchContacts}}
\code{\link{searchLists}}
\code{\link{searchFolders}}
\code{\link{searchFiles}}
\code{\link{searchPreviousUnsubscribers}}
}
|
fb114921c85d80da6018c8de58c8ba9a50e946a9
|
12bceb84f607a7de1d7f30f8f31aef8a2a794491
|
/Project 2.R
|
1c156b6816dc03f840438ecf3515db4195c8c4ff
|
[] |
no_license
|
K-Shao/RML
|
74be4cd00bb8aeb60f33c0c754017e736176a385
|
43c8016bb6edba2f386c6c61b4d3e6dc1e9e7c94
|
refs/heads/master
| 2020-03-16T23:24:00.420221
| 2018-05-28T00:58:18
| 2018-05-28T00:58:18
| 133,075,699
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,605
|
r
|
Project 2.R
|
library("png")
library(class)
library(magick)
library(randomForest)
library(MASS)
library(party)
#GENERAL
rawtrain = read.csv(("C:/Users/imjef/Documents/Schoolwork/Machine Learning/Project 2/mnist_train.csv"))
for(i in 1:784) {
colnames(rawtrain)[i+1] = paste(toString(ceiling(i/28)),toString(i%%28),sep = ".")
}
colnames(rawtrain)[1] = "true"
#true = rawtrain[,1]
rawtest = read.csv(("C:/Users/imjef/Documents/Schoolwork/Machine Learning/Project 2/mnist_test.csv"))
for(i in 1:784) {
colnames(rawtest)[i+1] = paste(toString(ceiling(i/28)),toString(i%%28),sep = ".")
}
colnames(rawtest)[1] = "truetest"
trainlabel = rawtrain[,1]
testlabel = rawtest[,1]
#knn
ktrain = subset(rawtrain,select= -c(1))
ktest = subset(rawtest,select= -c(1))
ktest = ktest[c(1:200),]
knnpredict = knn(train = ktrain, test = ktest, cl = trainlabel, k=5)
check = data.frame(knnpredict,testlabel[c(1:200)])
names(check) = c('Predicted','Actual')
length(which(check$Predicted!=check$Actual))/length(knnpredict)
show = function(x) {
#rawimg = image_read("C:/Users/imjef/Documents/Schoolwork/Machine Learning/Project 2/Eight.png")
rawimg = image_read("C:/Users/imjef/Documents/Schoolwork/Machine Learning/Project 2/RML/Write-up/Test.png")
print(rawimg)
rawimg = image_scale(rawimg,"28")
img = readPNG(image_write(rawimg,format = 'png'))
gray = ((img)*-255)+255
input = c()
for(i in c(1:28)) {
input <- c(input, gray[i,])
}
result = knn(train = ktrain, test = input, cl = trainlabel, k=x)
ktrain2 = ktrain[which(trainlabel!=result),]
trainlabel2 = trainlabel[which(trainlabel!=result)]
result2 = knn(train = ktrain2, test = input, cl = trainlabel2,k=x)
ktrain3 = ktrain2[which(trainlabel2!=result2),]
trainlabel3 = trainlabel2[which(trainlabel2!=result2)]
result3 = knn(train = ktrain3, test = input, cl = trainlabel3,k=x)
print(result)
print(result2)
print(result3)
}
show(5)
#RANDOM TREES
subtrain = rawtrain[c(1:2000),]
subtest = rawtrain[c(1:500),]
subtest = subset(subtest,select= -c(1))
kforest = randomForest(formula = true ~ ., data = subtrain)
gforest <- ctree(true ~ ., data=subtrain)
plot(gforest, type="simple")
length(which(round(predict(kforest))!=trainlabel[c(1:2000)]))/2000
kimportance = importance(kforest, type = 2)
kimportance[order(-kimportance)]
forestres = predict(kforest,newdata = subtest)
forestcheck = data.frame(round(forestres), testlabel[c(1:500)])
names(forestcheck) = c('Predicted','Actual')
length(which(forestcheck$Predicted!=forestcheck$Actual))/500
table(data = round(predict(kforest)), reference = trainlabel[c(1:2000)])
table(data=forestcheck$Predicted,
reference=forestcheck$Actual)
#qda
qsubtrain = rawtrain[c(1:20000),]
qsubtest = rawtest[c(1:5000),]
for(i in c(1:785)) {
qsubtrain[20001,i] <- sum(qsubtrain[c(1:20000),i])
}
#daoptimize = function(x){
qdatrain = qsubtrain[c(1,which(qsubtrain[20001,]>1000000))]
qdatest = qsubtest[c(1,which(qsubtrain[20001,]>1000000))]
qdatrain = qdatrain[c(1:20000),]
qdatest = subset(qdatest,select= -c(1))
kqda = qda(true~.,data =qdatrain)
qdares = predict(kqda,newdata = qdatest)
qdacheck = data.frame(qdares$class,testlabel[c(1:5000)])
names(qdacheck) = c('Predicted','Actual')
length(which(predict(kqda)$class != trainlabel[1:20000]))/20000
length(which(qdacheck$Predicted != qdacheck$Actual))/5000
#}
#qdaoptimize(1000)
#gldaoptimize = data.frame()
#for(i in c(1:40)){
# gldaoptimize[i,1]<- 50*i
# gldaoptimize[i,2]<- ldaoptimize(50*i)
#gldaoptimize[i,3]<- length(which(ldacheck$Predicted != ldacheck$Actual))/500
#}
#gldaoptimize
table(data = predict(kqda)$class, reference = trainlabel[1:20000])
table(data = qdacheck$Predicted, reference = qdacheck$Actual)
#lda
subtrain = rawtrain[c(1:2000),]
subtest = rawtest[c(1:500),]
for(i in c(1:785)) {
subtrain[2001,i] <- sum(subtrain[c(1:2000),i])
}
ldaoptimize = function(x){
ldatrain = subtrain[which(subtrain[2001,]>x)]
ldatest = subtest[which(subtrain[2001,]>x)]
ldatrain = ldatrain[c(1:2000),]
ldatest = subset(ldatest,select= -c(1))
klda = lda(true~.,data =ldatrain)
ldares = predict(klda,newdata = ldatest)
ldacheck = data.frame(ldares$class,testlabel[c(1:500)])
names(ldacheck) = c('Predicted','Actual')
#length(which(predict(klda)$class != trainlabel[1:2000]))/2000
length(which(ldacheck$Predicted != ldacheck$Actual))/500
}
ldaoptimize(1000)
gldaoptimize = data.frame()
for(i in c(1:40)){
gldaoptimize[i,1]<- 50*i
gldaoptimize[i,2]<- ldaoptimize(50*i)
#gldaoptimize[i,3]<- length(which(ldacheck$Predicted != ldacheck$Actual))/500
}
gldaoptimize
table(data = predict(klda)$class, reference = trainlabel[1:2000])
table(data = ldacheck$Predicted, reference = ldacheck$Actual)
#lda2
subtrain2 = rawtrain[c(1:10000),]
subtest2 = rawtest[c(1:2500),]
ldatrain2 = data.frame(subtrain2[,1])
for(i in c(1:nrow(subtrain2))) {
ldatrain2[i,2] <- length(which(subtrain2[i,]>0))
}
names(ldatrain2)=c("true", "fill")
klda2 = lda(formula = true~fill,data = ldatrain2)
ldatest2 = data.frame()
for(i in c(1:nrow(subtest2))) {
ldatest2[i,1] <- length(which(subtest2[i,]>0))
}
names(ldatest2)="fill"
ldares2 = predict(klda2, newdata = ldatest2)
ldacheck2 = data.frame(ldares2$class,testlabel[c(1:2500)])
names(ldacheck2) = c('Predicted','Actual')
length(which(ldacheck2$Predicted != ldacheck2$Actual))/2500
table(data = ldacheck2$Predicted, reference = ldacheck2$Actual)
ldacheck2
klda2
load_data <- function () {
setwd("~/Desktop/Lawrenceville/Term 9/RML/MNIST")
train = read.csv("train.csv")
test = read.csv("test.csv")
train_matrix = data.matrix(train[1:60000, 2:785])
}
knn_case <- function(input_case, k = 7) {
input_data = data.matrix(test[input_case, 2:785])
knn(input_data, k)
}
knn <- function (input, k = 7) {
#arr = data.matrix(train[1:60000,2:785])
arr = train_matrix
differences = sweep(arr, 2, input, "-", check.margin = FALSE )
differences = differences * differences
distances = rowSums(differences)
indices = order(distances)[1:k]
votes = train[indices, 1]
prediction = strtoi(names(which.max(table(votes))))
#print(votes)
return (prediction)
}
check <- function (input, truth, input_case = -1) { #I'm using input case -1 to denote outside input
guess = knn(data.matrix(input))
sprintf("%s, %s, %s, %s", input_case, guess, truth, guess==truth)
#sprintf("Index: %s | KNN: %s | Actual: %s | Correct: %s ", input_case, guess, truth, guess==truth)
}
check_case <- function (input_case) {
check(test[input_case, 2:785], test[input_case,1], input_case)
}
show <- function (input) {
data = matrix(input, 28, 28)
data = data[,c(28:1)]
image(data)
}
show_case <- function (case) {
array = as.matrix(test[case,2:785])
show(array)
}
save <- function (case, name) {
data = as.matrix(test[case,2:785])
data = matrix(data, 28, 28)
data = data[,c(28:1)]
png(paste("~/Desktop/Lawrenceville/Term 9/RML/MNIST/Images/",name,".png", sep=""))
image(data)
dev.off()
}
full <- function(case) {
print(check_case(case))
show_case(case)
}
#This will take a long time! 10000 cases * ~6 seconds/case = ~16-17 hours
classify_all <- function () {
total = 0
correct = 0
for (i in 1:10000) {
guess = knn(i)
truth = test[i,1]
if (guess==truth) {
correct = correct + 1
}
total = total + 1
logFile = "~/Desktop/Lawrenceville/Term 9/RML/MNIST/log_file.txt"
cat(check(i), file=logFile, append = TRUE, sep = "\n")
save(i, as.character(i))
print(i)
}
}
|
7e3ee5fa6ceb5c1bbb67c154e3457aa42b6877df
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/coalitions/examples/pool_austria.Rd.R
|
cbfaf21b8af26ebee7871771b84ea82c1b81c32d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
pool_austria.Rd.R
|
library(coalitions)
### Name: pool_austria
### Title: Pool surveys from different pollsters
### Aliases: pool_austria
### Keywords: internal
### ** Examples
library(coalitions)
library(dplyr)
latest <- get_latest(surveys_sample)
pool_surveys(surveys_sample, last_date=as.Date("2017-09-02"))
|
30bbc0c9f0afc3ab37f9f6c228e447e197b3091e
|
0baf34856c8799b5005029618cda2d75ee37f85a
|
/run_analysis.R
|
b1df6fca7e074553c9a0de41d4d97b6cc06e5121
|
[] |
no_license
|
jroemer01/R-ProgrammingW4
|
f9ed5d201340d7612806b01369e506244d2e5be4
|
1b9e5e2140a4ed6c086d7a67dffdd7c5c32041a0
|
refs/heads/master
| 2021-01-23T04:45:08.532828
| 2017-02-04T21:55:15
| 2017-02-04T21:55:15
| 80,380,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,925
|
r
|
run_analysis.R
|
# Program to analyze and produce a clean dataset from the a wearable data set
# Goals of the assignment
# Merges the training and the test sets to create one data set.
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive variable names.
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Steps
# 1) Load the necessary packages
# 2) Download and unzip the data
# 3) Read the file names and parse into three chucks
# a) The activities (y files)
# b) the data (X files)
# c) the subjects
# 4) Create a train table that cbinds the activity and subject file
# 5) Create a test table that cbinds the activity and subject file
# 6) Select only the mean and std columns into a new data set
# Need to add logic to test installation status prior to loading
pckgs<-installed.packages()
if (!("dplyr" %in% pckgs[,1])) install.packages("dplyr")
if (!("gdata" %in% pckgs[,1])) install.packages("gdata")
library(dplyr)
library(gdata)
#Need to download the dataset
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipname <- "wearable.zip"
#download.file(fileURL,zipname)
# Get the names of the files
datafilenames<-unzip(zipname, list = TRUE)
#Train rows
train<-grepl("train.[xX]_train.txt",datafilenames$Name)
trainfiles<-datafilenames$Name[train]
#Test rows
test<- grepl("test.[xX]_test.txt",datafilenames$Name)
testfiles<-datafilenames$Name[test]
# Load activity lables
activity_file<-grepl("activity_labels.txt",datafilenames$Name)
my_activity<-datafilenames$Name[activity_file]
activity_labels<-read.table(my_activity)
#Load Features
feature<-grepl("features.txt",datafilenames$Name)
featurestable<-datafilenames$Name[feature]
#Load subject files
subjects<-grepl("subject",datafilenames$Name)
subjectstable<-datafilenames$Name[subjects]
#print(trainfiles)
# UPDATE LATER TO USE DIR AND FILES TO CYCLE THROUGH THE TRAIN AND TEST FOLDERS
#Unzip the file
unzip(zipname)
# Getting the file names from the train and test folders
#Need to extract the data
features<-read.table(featurestable)
#This table has 561 columns and 7352 observations
#
#x_train<-read.table("./train/X_train.txt",header = FALSE)
# The fist step is to assign labels to all the columns
# TO do this we will need to make the features file human readable
# I will create a data table with extra columns that then get pasted together to form the new name
# The tidy column format will be function name then description of the measurement and rate if applicable
# Example Mean Body Acceleration
# t prefix become Time
# f prefix will be Fourier Transform
function_names<-c("std","Mean","-Coeff","\\(\\)","-[XYZ]","^t","^f"
,"gyro","Acc","sma","iqr"
,"-[Aa]r[cC]oeff","[-,][123456789]","-","mean","min","max"
,"mad","entropy","energy","([Bb]ody)\\1+",",[XYZ]","[Mm]ag","Gyro"
,"Jerk","[Ii]nds","[Aa]ngle\\(","$\\)",",","[Gg]ravity","bandsEnergy[1-9]?[1-9]?",
"[sS]kewness","[Kk]urtosis","X","Y","Z"," \\)", " t",".1")
#human_functions<-c("Standard Deviation", "Mean", "Coefficient",$Energy[123456789]+)
decode<-c("Standard Deviation ", "Mean ", "Coefficient ","","","Time ","Fast Fourier "
,"Gyroscope ","Acceleration ","Signal Magnitude Area ","Interquartile Range "
,"Autorregresion Coefficients","","","Mean ","Min ","Max "
,"Median Absolute Deviation","Entropy","Energy ","\\1 ","","Magnitude ","Gyroscope "
,"Jerk ","Index "," Angle of ","","","Gravity ","Frequency Interval Energy"
,"Skewness", "Kurtosis","X ","Y ","Z ",""," Time ","")
# Loop through the elements to update and apply the decodes
x<-1
for (i in function_names) {
features$V2<-trim(gsub(i,decode[x],features$V2))
x<-x+1
}
#tidytable_files<-c(trainfiles,testfiles)
test_train<-c("test","train")
file_prefix<-paste(getwd(),"/UCI HAR Dataset/",sep = "")
dim_files<-c("/X_","/y_","/subject_")
# Create the final data set
# Read the Training data set
# should make this into a function
x<-1
for (f in test_train)
{
#print(c("variable f: ",f))
file_path<-paste(file_prefix,f,dim_files[1],f,".txt",sep="")
#print(file_path)
X_data_table<-read.table(file_path)
file_path<-paste(file_prefix,f,dim_files[2],f,".txt",sep="")
y_data_table<-read.table(file_path)
y_data_table<- merge.data.frame(activity_labels,y_data_table,by.x = "V1",by.y = "V1" )
file_path<-paste(file_prefix,f,dim_files[3],f,".txt",sep="")
subj_data_table<-read.table(file_path)
#print("got here?")
X_data_table<-cbind(y_data_table,X_data_table)
X_data_table<-cbind(subj_data_table,X_data_table)
if (x==1)
{
tidytable<-X_data_table
#print("if loop")
}
else
{
tidytable<-rbind(tidytable,X_data_table)
}
#num_obs<-nrow(tidytable)
#print(num_obs)
x<- x+1
}
#Assign the names to the Tiday Table
names(tidytable) <- c("Subject","Activity ID","Activity Name",features$V2)
tidytable<-tbl_df(tidytable)
#extract the columns that are mean or standard deviation
keep_columns1<-grep("Mean",names(tidytable))
keep_columns2<-grep("Standard Deviation", names(tidytable))
keep_columns<-c(1,3,keep_columns1,keep_columns2)
final_tidy<-select(tidytable,keep_columns)
# Setting the group by and creating an table with means by the group by for each calculation
# column and then writing it to disk
avg_final<-group_by(final_tidy,"Subject","Activity Name")
avg_final<-summarise_each(avg_final,funs(mean),3:88)
write.table(avg_final, file = "tidy_avg_table.txt")
|
096a8a007bb0952b53d1a06217cd05e53d1341f0
|
0bd8aa996d316ba43987899fa5aab62f4e807fad
|
/inst/testdata/nmrData.R
|
2bb780929a588be9daeff06c02f6476e521d4ebc
|
[
"BSD-2-Clause"
] |
permissive
|
clabornd/pmartR
|
060e2c848558669787ef06d78b3f990d76f5053a
|
6cf6edc071a7d4689d2eca845b71e19d9028025e
|
refs/heads/master
| 2023-04-14T08:56:05.621140
| 2023-03-01T21:40:51
| 2023-03-01T21:40:51
| 146,666,950
| 0
| 0
|
BSD-2-Clause
| 2023-03-01T21:40:52
| 2018-08-29T22:44:12
|
R
|
UTF-8
|
R
| false
| false
| 1,205
|
r
|
nmrData.R
|
# The purpose of this script is to demonstrate how the e_data, f_data, and
# e_meta data frames were created. It should NOT be rerun because the data sets
# in pmartRdata will change over time and this will lead to errors in the unit
# tests.
# Construct the data to test the as.nmrData function. --------------------------
# I am saving these data sets (even though they are exact copies) in case the
# data sets in the pmartRdata package change in the future. If they do, the
# tests will not need to be updated to reflect changes in the new data.
# Load necessary libraries.
library (pmartRdata)
# Load the nmr data objects.
data("nmr_edata_identified")
data("nmr_fdata_identified")
data("nmr_emeta_identified")
# These data sets are small, allowing us to use the entire data set for testing
# purposes.
edata <- nmr_edata_identified
fdata <- nmr_fdata_identified
emeta <- nmr_emeta_identified
# Fashion an nmr "type" column in the emeta data frame.
emeta <- data.frame(emeta,
nmrClass = sub("_.*", "", emeta[, 2]))
# The sub function extracts all characters before the first _.
# save(edata,
# fdata,
# emeta,
# file = '~/pmartR/inst/testdata/nmrData.RData')
|
f2771334d206bec13e3bb5dce7e2ed83141bf111
|
2449a7f03f9abeb0deb99988f8c3a1a2edbaa7b6
|
/R_Churn_Assignment.R
|
5acd39f603407dbd2344831e5616899b3fd5554e
|
[] |
no_license
|
parag-123/R_Assignments
|
4cd45340c94671bf00a86b97d9499472b569125d
|
43408ebf8614edb8d1635f9886479fe0cf166056
|
refs/heads/master
| 2021-09-05T13:48:20.253123
| 2018-01-28T10:48:27
| 2018-01-28T10:48:27
| 113,855,940
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
R_Churn_Assignment.R
|
setwd("D:\\Project\\parag Personal\\aegis")
getwd()
churn_data = read.csv("Churn .csv")
str(churn_data)
summary(churn_data)
names(churn_data)
head(churn_data)
class1 = subset(churn_data,Churn ==1)
class0= subset(churn_data, Churn ==0)
smpsize1 = floor(0.70 * nrow(class1))
smpsize2 = floor(0.70* nrow(class0))
train_ind1 = sample((seq_len((nrow(class1)))), size = smpsize1)
train_ind0 = sample((seq_len((nrow(class0)))), size = smpsize2)
train0 = class0[train_ind0, ]
train1 = class1[train_ind1, ]
test0 = class0[-train_ind0, ]
test1 = class1[-train_ind1, ]
train = rbind(train0,train1)
test = rbind(test0,test1)
str(train)
str(test)
library(rpart)
churn_mod = rpart(Churn ~ CustServ.Calls+Eve.Charge+Intl.Charge+Night.Charge+Day.Charge, data = churn_data, method = "class", minsplit = 30 )
churn_mod1 = prune(churn_mod,cp=0.010)
printcp(churn_mod1) # display the results
plotcp(churn_mod1) # visualize cross-validation results
summary(churn_mod1) # detailed summary of splits
str(churn_mod1)
churnpredict = predict(churn_mod1,test, type ="class")
str(churnpredict)
library("caret")
library("e1071")
confusionMatrix(churnpredict, (test$Churn))
library(rpart)
f = rpart(Churn~CustServ.Calls+Eve.Calls+Intl.Calls+Night.Calls
+Day.Calls,method="class", data=churn_data)
plot(f, uniform=TRUE,main="Classification Tree for Churn")
text(f, use.n=TRUE, all=TRUE, cex=.7)
plotcp(f,lty=4,col="red")
########################################################################
# plot trees
plot(churn_mod1, uniform=TRUE,
main="Classification Tree for Churn")
text(churn_mod1, use.n=TRUE, all=TRUE, cex=.8)
|
11ce94033b8bef030a0f27b6b20300b91a2fde98
|
0284de80cfff37487af4e7ad3323cb1b0f189c3c
|
/svm_trend_follower.r
|
83d1f3d53c5887efddd8b41ddbf1aee2d32de526
|
[
"MIT"
] |
permissive
|
dkanu/quantpac
|
f12bce1092550331d3d4a7c4a29f69bbf2444eb7
|
b3a00dffb9f8e98f67b7563356661e35f2f59921
|
refs/heads/master
| 2020-04-27T00:58:12.651924
| 2019-03-12T02:57:38
| 2019-03-12T02:57:38
| 173,951,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,440
|
r
|
svm_trend_follower.r
|
####################
## SVM Model
## Kernel types - linear, polynomial, radial, sigmoid
####################
##############################
## MODELS
##############################
kern_type = "polynomial"
#TSLA
tsla.svm.factors <- c('LR1.2', 'LR3.2','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
tsla.svm.formula <- as.formula(paste('as.factor(PosR.2)~', paste(tsla.svm.factors, collapse = '+')))
tsla.svm.model <- svm(tsla.svm.formula, data = data.split$train.data, kernel = kern_type)
tsla.svm.eval <- SvmEval2(tsla.svm.model, data.split$test.data)
#PYPL --------------
pypl.svm.factors <- c('LR1.3', 'LR3.3','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
pypl.svm.formula <- as.formula(paste('as.factor(PosR.3)~', paste(pypl.svm.factors, collapse = '+')))
pypl.svm.model <- svm(pypl.svm.formula, data = data.split$train.data, kernel = kern_type)
pypl.svm.eval <- SvmEval2(pypl.svm.model, data.split$test.data)
#SQ---------------
sq.svm.factors <- c('LR1.4', 'LR3.4','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
sq.svm.formula <- as.formula(paste('as.factor(PosR.4)~', paste(sq.svm.factors, collapse = '+')))
sq.svm.model <- svm(sq.svm.formula, data = data.split$train.data, kernel = kern_type)
sq.svm.eval <- SvmEval2(sq.svm.model, data.split$test.data)
#AAPL----------------------
aapl.svm.factors <- c('LR1.5', 'LR3.5','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
aapl.svm.formula <- as.formula(paste('as.factor(PosR.5)~', paste(aapl.svm.factors, collapse = '+')))
aapl.svm.model <- svm(aapl.svm.formula, data = data.split$train.data, kernel = kern_type)
aapl.svm.eval <- SvmEval2(aapl.svm.model, data.split$test.data)
#V-----------------------
v.svm.factors <- c('LR1.6', 'LR3.6','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
v.svm.formula <- as.formula(paste('as.factor(PosR.6)~', paste(v.svm.factors, collapse = '+')))
v.svm.model <- svm(v.svm.formula, data = data.split$train.data, kernel = kern_type)
v.svm.eval <- SvmEval2(v.svm.model, data.split$test.data)
#FB-----------------------
fb.svm.factors <- c('LR1.7', 'LR3.7','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
fb.svm.formula <- as.formula(paste('as.factor(PosR.7)~', paste(fb.svm.factors, collapse = '+')))
fb.svm.model <- svm(fb.svm.formula, data = data.split$train.data, kernel = kern_type)
fb.svm.eval <- SvmEval2(fb.svm.model, data.split$test.data)
#AMD-----------------------
amd.svm.factors <- c('LR1.8', 'LR3.8','LR1','LR3','LR1.1','LR3.1','LR1.10', 'LR3.10')
amd.svm.formula <- as.formula(paste('as.factor(PosR.8)~', paste(amd.svm.factors, collapse = '+')))
amd.svm.model <- svm(amd.svm.formula, data = data.split$train.data, kernel = kern_type)
amd.svm.eval <- SvmEval2(amd.svm.model, data.split$test.data)
############################
## BULK LIST BASED ACTIONS
############################
evaluations <- list(tsla.svm.eval, pypl.svm.eval, sq.svm.eval,
aapl.svm.eval, v.svm.eval, fb.svm.eval, amd.svm.eval)
names(evaluations) <- basket2
models <- list(tsla.svm.model, pypl.svm.model, sq.svm.model,
aapl.svm.model, v.svm.model, fb.svm.model, amd.svm.model)
names(models) <- basket2
##############################
## FILENAME DETAILS
##############################
filename <- sprintf("%s ANALYSIS %s.txt", "SVM", toString(format(Sys.time(), "%Y-%m-%d %H-%M-%S")))
############################
## SAVE TO FILE
############################
sink(filename)
Filter(function(x) x$accuracy > 0.5, evaluations)
sink()
|
778141a37c886ceb977cb83c28a26e67c1bd2f55
|
68a25655e8c34aa342358292db38ceaca9b77352
|
/man/walk_git.Rd
|
fff2e99dd84dcfe5a2846d37995b02b417d7dcbc
|
[
"MIT"
] |
permissive
|
maurolepore/checkout
|
5071bd4df7fae1563b2073b563882a43f6dc298c
|
904634713fd80242ad28bef0db4a49913287bd64
|
refs/heads/main
| 2023-02-14T16:23:22.612469
| 2021-01-07T22:23:42
| 2021-01-07T22:23:42
| 326,776,447
| 0
| 0
|
NOASSERTION
| 2021-01-08T00:48:06
| 2021-01-04T18:41:41
|
R
|
UTF-8
|
R
| false
| true
| 1,372
|
rd
|
walk_git.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/walk_git.R
\name{walk_git}
\alias{walk_git}
\title{Pipe-able, vectorized, and lightweight implementation of git in R}
\usage{
walk_git(path, command, verbose = FALSE, stop_on_error = TRUE, ...)
}
\arguments{
\item{path}{Path to one or multiple Git repos.}
\item{command}{A Git command, e.g. "status" or "log --oneline -n 1".}
\item{verbose}{Print Git's output?}
\item{stop_on_error}{If Git fails, do you want an R error?}
\item{...}{Other arguments passed to \link{system}.}
}
\value{
\code{walk_git()} is called for its side effect; it returns \code{path}
invisibly.
}
\description{
Apply a Git command to each \code{path}.
}
\examples{
library(magrittr)
# helper
walk <- function(x, f, ...) {
lapply(x, f, ...)
invisible(x)
}
repos <- file.path(tempdir(), paste0("repo", 1:2))
repos \%>\% walk(dir.create)
# Fails because the repo isn't initialized
repos \%>\%
walk_git("status") \%>\%
try()
# Don't throw an error
repos \%>\%
walk_git("status", stop_on_error = FALSE)
repos \%>\% walk_git("init")
repos \%>\% walk_git("status")
repos \%>\% walk_git("status", verbose = TRUE)
repos \%>\%
walk_git("add .") \%>\%
walk_git("commit -m 'Initialize' --allow-empty") \%>\%
walk_git("log --oneline -n 1", verbose = TRUE)
# Cleanup
walk(repos, unlink, recursive = TRUE)
}
|
f439827b1129286f7d9b3b4558ee644983652cc5
|
51952fc1aff4919a94dafadbe41e62f6662aabe0
|
/man/panel.function.demo.Rd
|
8a0cfc3d3c33585055332da5ae1cbb9ef688da4c
|
[] |
no_license
|
claus-e-andersen/clanLattice
|
6bcdec4cebcf055fe7f38e799b82f3f28b9421c0
|
1a5c691d8d8fc65e757b13435248daaae4274690
|
refs/heads/master
| 2023-04-06T13:36:34.638721
| 2023-03-20T18:40:53
| 2023-03-20T18:40:53
| 22,569,616
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,219
|
rd
|
panel.function.demo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subscript-groups-panel-function-demo-function.R
\name{panel.function.demo}
\alias{panel.function.demo}
\title{Demonstration of how to write a panel function (subscripts and groups)}
\usage{
require(grid)
require(lattice)
pp <- panel.function.demo()
pp
pp[[3]]
}
\arguments{
\item{sec.delay:}{Delay between the plots.}
}
\value{
A Lattice plot
}
\description{
This function demonstrate how to write a panel function that
takes care of subscripts and groups. The main trick is to always use:
panel = function(x, y, subscripts, groups,...){ }
subscripts are always available, but groups may not be, so to mitigate that
simply use:
if(missing(groups))\{
panel.xyplot(x, y, ...)\} else \{
panel.superpose(x, y, subscripts,groups, ...)
\}#if
The use of \code{panel.number()}, \code{current.row}, and \code{current.column()} is also demonstrated.
Further tricks:
\code{par.strip.text(cex=1.2)}
and
how to get data in and out of a panel using assign and get to the top environment (\code{df.outside}).
}
\author{
Claus E. Andersen
}
|
edac18622eba8e9a9c24aac8d8676a630bb0a50c
|
c1ab4b3a822f1407267e60819925dfe027e1a603
|
/utils/runApp.R
|
1c7932fcaf80b17e2444910eb3ee1635f345f87c
|
[] |
no_license
|
Gnolam/shinyNodeProxy
|
abd752bd3cbf09db345101751c7cb05611b2ed0b
|
99ed425438d239fc7db234d2747a03e75d4a680f
|
refs/heads/master
| 2021-01-17T21:32:27.874444
| 2015-10-02T09:48:45
| 2015-10-02T09:48:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
runApp.R
|
# lauch app using given port
args <- commandArgs(TRUE)
appWd = file.path(args[2])
port <- as.integer(args[1])
Sys.setlocale("LC_ALL", 'en_US.UTF-8')
print(sessionInfo())
message(paste('Set working directory to',appWd))
setwd(file.path(args[2]))
if(dir.exists('packrat')){
message('Packrat found. Initialisation.')
source('packrat/init.R')
}
shiny::runApp('.',port=port,launch.browser=FALSE)
|
4d23c85870e7aec7ce5c70f29017fd9b6c3b10a4
|
5749c63ff48ff868bff23f9bcb4fddd5870c0b40
|
/R/modify_sql.R
|
183a73d7f934d0a949d5e68a03cd4df00b0eee50
|
[
"MIT"
] |
permissive
|
thuyetbao/pool.extra
|
234164a3f23f196d7b12fdda663322a8b20b9346
|
6ab7dee7e49fe815a9082b3eb3942a90903d2d8b
|
refs/heads/master
| 2023-03-31T18:54:41.457249
| 2021-04-02T18:05:52
| 2021-04-02T18:05:52
| 352,385,884
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
modify_sql.R
|
#' Modify SQL
#'
#' @description build-in function to manage SQL statement for pool method with binding fields features
#' @export
modify_sql <- function(pool, statement, ...) {
#' Arguments
parameters <- rlang::enquos(...,
.named = FALSE, .ignore_empty = "all",
.homonyms = "error", .check_assign = TRUE
)
#' Named Quosures and Parse Parameter
names(parameters) %>% purrr::map(
~ base::assign(.,
value = rlang::eval_tidy(parameters[[.]]),
pos = base::sys.frame(which = 1))
)
#' Builder
sql <- glue::glue_sql(.con = pool, statement, .envir = base::sys.frame(which = 1))
#' Return
return(sql)
}
|
2f225a5decf6a96f951a6c531ff911b9cc026161
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MRH/examples/plot.MRH.Rd.R
|
49fb454e1e4ea751d330cace30d4dee3aabe5783
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,813
|
r
|
plot.MRH.Rd.R
|
library(MRH)
### Name: plot.MRH
### Title: Plots the hazard rate (with credible bands) of an MRH object.
### Aliases: plot.MRH
### ** Examples
# These MRH fit portion of the examples are from the
# estimateMRH() help page.
# They do not need to be re-run if the objects
# are already in the active workspace.
data(cancer)
cancer$censorvar = cancer$status - 1
## Not run:
##D fit.lung = estimateMRH(formula = Surv(time, censorvar) ~
##D age + as.factor(sex) + ph.karno, data = cancer,
##D M = 3, maxStudyTime = 960, burnIn = 200, maxIter = 1000,
##D thin = 1, outfolder = 'MRH_lung')
## End(Not run)
data(tongue)
## Not run:
##D fit.tongue = estimateMRH(formula = Surv(time, delta) ~
##D nph(type), data = tongue, M = 4,
##D burnIn = 200, maxIter = 2000, thin = 1, outfolder = 'MRH_tongue_nph')
## End(Not run)
# Plot the hazard rate, cumulative hazard,
# and survival function of the lung model side-by-side:
## Not run:
##D par(mfrow = c(1,3))
##D plot(fit.lung, main = 'Hazard rate')
##D plot(fit.lung, plot.type = 'H', main = 'Cumulative hazard')
##D plot(fit.lung, plot.type = 'S', main = 'Survival function')
## End(Not run)
# Plot the hazard rates for the tongue (i.e. non-proportional hazards)
# model with smoothed estimates.
## Not run:
##D plot(fit.tongue, smooth.graph = TRUE)
## End(Not run)
# Separate the graphs for less crowding
## Not run:
##D plot(fit.tongue, smooth.graph = TRUE, combine.graphs = FALSE)
## End(Not run)
# Plot the hazard ratios
## Not run:
##D plot(fit.tongue, plot.type = 'r')
## End(Not run)
# Plot the hazard rate of the lung model using the chains.
# This requires maxStudyTime to be entered.
## Not run:
##D lung.chains = MRH(read.table('MRH_lung/MCMCchains.txt', header = TRUE))
##D plot(lung.chains, maxStudyTime = 960)
## End(Not run)
|
6f0c04750246d00087ad1703c36e0ee45d38daaf
|
3b11f8cf2b040254ab2d178be28645564746f9e5
|
/graphs_displayed_in_post.r
|
4c95dfa053f7707bfbef7e190171455f0acb959d
|
[] |
no_license
|
rcuevass/GAMs_exploration
|
6dea3ecad24f225e070e99d61d26e2b87e3683a5
|
3c56953ab89b3b5617ef805a7bd374381aebf995
|
refs/heads/master
| 2020-12-30T14:01:00.153118
| 2017-05-16T02:10:08
| 2017-05-16T02:10:08
| 91,277,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,148
|
r
|
graphs_displayed_in_post.r
|
install.packages("demandr")
library(mgcv)
library(demandr)
library(grid)
library(ggplot2)
library(splines)
### Simulated data
set.seed(3)
x <- seq(0,2*pi,0.1)
z <- sin(x)
y <- z + rnorm(mean=0, sd=0.5*sd(z), n=length(x))
d <- cbind.data.frame(x,y,z)
d1 <- cbind.data.frame(data.frame(predict(smooth.spline(x=d, spar=0), x)), z)
e <- sqrt(sum((d1$z-d1$y)**2))
e
p1 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d1, aes(x=x, y=y), linetype=1) + geom_line(aes(x=x, y=z), linetype=2) + ggtitle(paste0("Lambda=0, Dist = ", round(e,2)))
d2 <- cbind.data.frame(data.frame(predict(smooth.spline(x=d, spar=0.3), x)), z)
e <- sqrt(sum((d2$z-d2$y)**2))
p2 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d1, aes(x=x, y=y), linetype=1) + geom_line(aes(x=x, y=z), linetype=2) + ggtitle(paste0("Lambda=0.3, Dist = ", round(e,2)))
e
d3 <- cbind.data.frame(data.frame(predict(smooth.spline(x=d, spar=0.6), x)), z)
e <- sqrt(sum((d3$z-d3$y)**2))
e
p3 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d3, aes(x=x, y=y), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2) + ggtitle(paste0("Lambda=0.6, Dist = ", round(e,2)))
d4 <- cbind.data.frame(data.frame(predict(smooth.spline(x=d, spar=1), x)), z)
e <- sqrt(sum((d4$z-d4$y)**2))
e
p4 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d4, aes(x=x, y=y), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2) + ggtitle(paste0("Lambda=1, Dist = ", round(e,2)))
multiplot(p1, p2, p3, p4, cols=2)
d5 <- cbind.data.frame(data.frame(ksmooth(d$x, d$y, kernel="box", n.points=length(x), bandwidth=1.5)), z)
p5 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d5, aes(x=x, y=y), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2) + ggtitle("Basic Runnuing Mean")
d6 <- cbind.data.frame(loess(y ~ x, data=d, span=0.6)$fitted, z, y, x)
names(d6) <- c("loess", "z", "y", "x")
p6 <- ggplot(data=d, aes(x=x, y=y)) + geom_point() + geom_line(data=d6, aes(x=x, y=loess), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2) + ggtitle("Loess")
multiplot(p5, p6, cols=2)
min(x)
max(x)
quantile(x, probs=c(0.25, .50, .75))
B <- bs(x, degree=3, intercept=TRUE, Boundary.knots=c(0, 6.2), knots=c(1.55, 3.10, 4.65))
model <- lm(y~0 + B)
model$coef
d7 <- cbind.data.frame(d, B, model$fitted)
names(d7) <- c("x", "y", "z", "B13", "B23", "B33", "B43", "B53", "B63", "B73", "Spline")
for (i in 1:7){
d7[,3+i] <- d7[,3+i] * model$coef[i]
}
ggplot(data=d7, aes(x=x, y=y)) + geom_point() + geom_line(data=d7, aes(x=x, y=Spline), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2)
p7 <- ggplot(data=d7, aes(x=x, y=y)) + geom_point() + geom_line(data=d7, aes(x=x, y=Spline), linetype=1) + ylab("") + geom_line(aes(x=x, y=z), linetype=2) + ggtitle("Cubic B-Spline (3 inner knots, no penalty)")
p7
d7_melt <- melt(d7[,c("x", "B13", "B23", "B33", "B43", "B53", "B63", "B73", "Spline")], id.vars="x")
line.cols <- terrain.colors(8)
line.cols[8] <- "black"
ggplot(data=d7_melt, aes(y=value, x=x, colour=variable)) + geom_line() + scale_color_manual(values=line.cols) + ylab("")
|
d824c97952b4d879a5eb6f5eab4dc2e04686b3c4
|
eb4667b178e418d936c35569383e5cb0663f93ad
|
/R/MVA.trajplot.R
|
4e48c68d320cfd64a670734ee58ee2442b800453
|
[] |
no_license
|
cran/RVAideMemoire
|
21081d49de9999a7438c40de05ab67a145336a02
|
6a48aaa7facac606e954b06a9cc1ea46b387d575
|
refs/heads/master
| 2023-08-31T00:44:09.327145
| 2023-08-23T07:30:05
| 2023-08-23T09:30:39
| 17,692,998
| 7
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,507
|
r
|
MVA.trajplot.R
|
MVA.trajplot <- function(x,xax=1,yax=2,trajects,trajlab=NULL,scaling=2,set=c(12,1,2),space=1,xlab=NULL,ylab=NULL,
main=NULL,pch=16,cex=1,trajlab.cex=1,col=1,lwd=1,lty=1,points=TRUE,allpoints=TRUE,arrows=TRUE,labels=NULL,
main.pos=c("bottomleft","topleft","bottomright","topright"),main.cex=1.3,legend=FALSE,legend.pos=c("topleft",
"topright","bottomleft","bottomright"),legend.title=NULL,legend.lab=NULL,legend.cex=1,drawextaxes=TRUE,
drawintaxes=TRUE,xlim=NULL,ylim=NULL) {
if (!is.list(trajects)) {trajects <- list(trajects)}
if (ncol(as.data.frame(trajects[[1]]))!=1) {stop("wrong 'trajects', must be a vector or a list of vectors")}
xax <- xax[1]
yax <- yax[1]
coord <- MVA.scores(x,xax,yax,scaling,set,space)$coord
if (ncol(coord)==1) {stop("choose a second axis")}
ntraj <- length(trajects)
if (!is.null(trajlab) & length(trajlab)!=ntraj) {stop("non-convenient 'trajlab' argument")}
which.in <- unique(unlist(trajects))
rest <- !length(which.in)==nrow(coord)
if (length(col)==1) {
if (rest) {col <- rep(col,ntraj+1)} else {col <- rep(col,ntraj)}
}
if ((rest & length(col)!=ntraj+1) | (!rest & length(col)!=ntraj)) {stop("non-convenient 'col' argument")}
if (length(lwd)==1) {
if (rest) {lwd <- rep(lwd,ntraj+1)} else {lwd <- rep(lwd,ntraj)}
}
if ((rest & length(lwd)!=ntraj+1) | (!rest & length(lwd)!=ntraj)) {stop("non-convenient 'lwd' argument")}
if (length(lty)==1) {
if (rest) {lty <- rep(lty,ntraj+1)} else {lty <- rep(lty,ntraj)}
}
if ((rest & length(lty)!=ntraj+1) | (!rest & length(lty)!=ntraj)) {stop("non-convenient 'lty' argument")}
if (length(trajlab.cex)==1) {trajlab.cex <- rep(trajlab.cex,ntraj)}
if (length(trajlab.cex)!=ntraj) {stop("non-convenient 'trajlab.cex' argument")}
if (points) {
if (length(pch)==1) {
if (rest) {pch <- rep(pch,ntraj+1)} else {pch <- rep(pch,ntraj)}
}
if ((rest & length(pch)!=ntraj+1) | (!rest & length(pch)!=ntraj)) {stop("non-convenient 'pch' argument")}
}
if (!points & is.null(labels)) {labels <- rownames(coord)}
main.pos <- match.arg(main.pos)
legend.pos <- match.arg(legend.pos)
oldmar <- par()$mar
marinf <- ifelse(drawextaxes,5.1,3.5)
if (drawextaxes) {
par(mar=c(marinf,4.1,2.1,0.1))
} else {
par(mar=c(marinf,2.5,2.1,0.1))
}
coordx <- coord[,1]
coordy <- coord[,2]
if (is.null(xlab)) {xlab <- colnames(coord)[1]}
if (is.null(ylab)) {ylab <- colnames(coord)[2]}
xy.min <- min(min(coordx),min(coordy))
xy.max <- max(max(coordx),max(coordy))
if (is.null(xlim)) {xlim <- c(xy.min,xy.max)}
if (is.null(ylim)) {ylim <- c(xy.min,xy.max)}
plot(coordx,coordy,xlab="",ylab="",xlim=xlim,ylim=ylim,axes=FALSE,type="n")
if(drawextaxes) {
axis(1)
axis(2)
}
if (drawintaxes) {abline(v=0,h=0,col="grey")}
lab.line <- c(ifelse(drawextaxes,3,1),ifelse(drawextaxes,2.3,0.8))
mtext(c(xlab,ylab),side=c(1,2),line=lab.line,at=c(mean(range(coordx)),mean(range(coordy))))
if (rest & allpoints) {
if (points) {
points(coordx[-which.in],coordy[-which.in],pch=pch[ntraj+1],col=col[ntraj+1])
} else {
text(coordx[-which.in],coordy[-which.in],labels[-which.in],col=col[ntraj+1],cex=cex)
}
}
for (i in 1:ntraj) {
traj.i <- trajects[[i]]
n <- length(traj.i)
if (arrows) {
cx <- coordx[traj.i]
cy <- coordy[traj.i]
arrows(cx[-n],cy[-n],cx[-n]+diff(cx)/2,cy[-n]+diff(cy)/2,col=col[i],lwd=lwd[i],lty=lty[i],
length=0.12,angle=20)
segments(cx[-n]+diff(cx)/2,cy[-n]+diff(cy)/2,coordx[traj.i[-1]],coordy[traj.i[-1]],col=col[i],
lwd=lwd[i],lty=lty[i])
} else {
segments(coordx[traj.i[-n]],coordy[traj.i[-n]],coordx[traj.i[-1]],coordy[traj.i[-1]],col=col[i],
lwd=lwd[i],lty=lty[i])
}
if (points) {
points(coordx[traj.i],coordy[traj.i],pch=pch[i],col=col[i])
} else {
text(coordx[traj.i],coordy[traj.i],labels[traj.i],col=col[i],cex=cex)
}
if (!is.null(trajlab)) {
tlx <- cx[-n]+diff(cx)/2
tly <- cy[-n]+diff(cy)/2
wh <- if (length(tlx)%%2==0) {length(tlx)/2} else {(length(tlx)+1)/2}
ctlx <- tlx[wh]
ctly <- tly[wh]
lab <- paste0(" ",trajlab[i]," ")
xh <- strwidth(lab,cex=trajlab.cex[i])
yh <- strheight(lab,cex=trajlab.cex[i])*5/3
rect(ctlx-xh/2,ctly-yh/2,ctlx+xh/2,ctly+yh/2,col="white",border=col[i])
text(ctlx,ctly,lab,col=col[i],cex=trajlab.cex[i])
}
}
if (!is.null(main)) {
xmain <- if (main.pos %in% c("bottomleft","topleft")) {xlim[1]-0.02*diff(xlim)} else {xlim[2]+0.02*diff(xlim)}
ymain <- if (main.pos %in% c("bottomleft","bottomright")) {ylim[1]} else {ylim[2]}
adjmain <- if (main.pos %in% c("bottomleft","topleft")) {c(0,NA)} else {c(1,NA)}
text(xmain,ymain,main,adj=adjmain,cex=main.cex)
}
if (legend) {
if (is.null(legend.lab)) {
if (!is.null(trajlab)) {legend.lab <- trajlab} else {legend.lab <- as.character(1:ntraj)}
}
if (points) {
if (!is.null(legend.title) && nchar(legend.title)>0) {
legend(legend.pos,legend.lab,col=col,pch=pch,lty=lty,cex=legend.cex,bg="white",title=legend.title)
} else {
legend(legend.pos,legend.lab,col=col,pch=pch,lty=lty,cex=legend.cex,bg="white")
}
} else {
if (!is.null(legend.title) && nchar(legend.title)>0) {
legend(legend.pos,legend.lab,col=col,lty=lty,cex=legend.cex,bg="white",title=legend.title)
} else {
legend(legend.pos,legend.lab,col=col,lty=lty,cex=legend.cex,bg="white")
}
}
}
box()
par(mar=oldmar)
}
|
d432c6d91ece6ca65a9098f262529ab0be539db7
|
c3e04ef4e700775930070ad531746e2b3fab08ef
|
/13_factors_i/13_factors_i.R
|
fe465dbff05251b162bd44e2936c61734572a303
|
[] |
no_license
|
pinkstonax/r-training
|
fa27190959c14d68db3406f01a31b2ec5df6d8af
|
49e6a4955e0c0aa9f66d363e338ea11b09643754
|
refs/heads/master
| 2021-05-18T16:16:16.566183
| 2020-05-29T03:01:18
| 2020-05-29T03:01:18
| 251,313,316
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,171
|
r
|
13_factors_i.R
|
##### Factors Part 1 (R for Data Science, chapter 15)
##### Amanda Pinkston
##### May 14, 2020
setwd("C:\\Users\\Amanda\\Documents\\work stuff\\r-training\\13_factors_i")
library(tidyverse)
### Factors are used to work with categorical variables,
### variables that have a fixed and known set of possible values.
### They are also useful when you want to display character vectors
### in a non-alphabetical order.
### Functions (many from the package forcats within tidyverse):
### factor(), sort(), unique(), fct_inorder(), levels(),
### fct_reorder(), fct_relevel(), fct_reorder2(), fct_infreq(),
### fct_rev(), fct_recode(), fct_collapse(), fct_lump()
### read in the data--this is acled 2015-2019 Sudan.
### see API script at the bottom.
dat <- read_csv("2020-05-14_acled.csv")
### what type of variable is event_type currently?
class(dat$event_type)
### warning: if you use the function read.csv(),
### strings will be read in as factors unless you explicitly say not to, e.g.,
### read.csv("filename.csv", stringsAsFactors=FALSE)
### event_type is a categorical variable, what R calls a "factor"
### so let's make it a factor
dat$event_type <- factor(dat$event_type)
### check the variable type now
class(dat$event_type)
### what are the possible values for event_type?
levels(dat$event_type)
### what is the default ordering of the factor values?
### you can set the ordering however you want.
### sometimes you may want the ordering to follow the first appearance of each value
### you can do this when creating the factor:
first_appearance <- factor(dat$event_type, levels=unique(dat$event_type))
levels(first_appearance)
### or after the fact:
alpha <- factor(dat$event_type)
levels(alpha)
first_appearance <- alpha %>% fct_inorder()
levels(first_appearance)
### let's see a quick count of events by event_type
dat %>% count(event_type)
### how about a bar chart?
ggplot(dat, aes(event_type)) +
geom_bar()
### what if we want to order the bar chart by number of events?
dat$event_type <- fct_infreq(dat$event_type)
ggplot(dat, aes(event_type)) +
geom_bar()
### what if we want it to go the other way?
dat$event_type <- fct_rev(dat$event_type)
ggplot(dat, aes(event_type)) +
geom_bar()
### or you can put it all in one line:
dat <- read_csv("2020-05-14_acled.csv") ### reload the data
dat %>% mutate(event_type = event_type %>% fct_infreq() %>% fct_rev()) %>%
ggplot(aes(event_type)) +
geom_bar()
### side note: str_wrap()
dat %>% mutate(event_type = event_type %>% str_wrap(15) %>% fct_infreq() %>% fct_rev()) %>%
ggplot(aes(event_type)) +
geom_bar() +
labs(x="",
y="Count",
title="Sudan: Number of Events by Event Type, 2015-2019")
### let's plot average fatalities per year by admin1
fat_admin1 <- dat %>% group_by(admin1, year) %>%
summarize(fatalities=sum(fatalities)) %>%
ungroup() %>%
group_by(admin1) %>%
summarize(mean_fatalities = mean(fatalities))
ggplot(fat_admin1, aes(x=mean_fatalities, y=admin1)) +
geom_point()
### order by mean fatalities
### within ggplot()
ggplot(fat_admin1, aes(x=mean_fatalities, y=fct_reorder(admin1, mean_fatalities))) +
geom_point()
### or outside ggplot()
fat_admin1 %>% mutate(admin1 = fct_reorder(admin1, mean_fatalities)) %>%
ggplot(aes(x=mean_fatalities, y=admin1)) +
geom_point()
### what if we want to add "total" for all of Sudan?
### calculate/add the row
years_tab <- dat %>% group_by(year) %>%
summarize(fatalities=sum(fatalities))
total_mean <- mean(years_tab$fatalities)
tab_with_tot <- fat_admin1 %>% add_row(admin1="Total", mean_fatalities=total_mean)
### plot
tab_with_tot %>% mutate(admin1 = fct_reorder(admin1, mean_fatalities)) %>%
ggplot(aes(x=mean_fatalities, y=admin1)) +
geom_point()
### what if we want the total on the bottom?
tab_with_tot %>% mutate(admin1 = admin1 %>% fct_reorder(mean_fatalities)
%>% fct_relevel("Total")) %>%
ggplot(aes(x=mean_fatalities, y=admin1)) +
geom_point()
### let's look at number of events by event_type over time
### make the table
events_tab <- dat %>% group_by(event_type, year) %>%
summarize(events=n())
### plot
ggplot(events_tab, aes(x=year, y=events, color=event_type)) +
geom_line()
### make the plot nicer by ordering the legend according to the order of the lines
ggplot(events_tab, aes(x=year, y=events,
color=fct_reorder2(event_type,year,events))) +
geom_line() +
labs(color="Event Type")
### the end.
#### API script to download the data
#### 1. create the path name
base_path <- "https://api.acleddata.com/acled/read.csv?terms=accept"
country <- "country=Sudan"
year <- "year=2015|2016|2017|2018|2019"
path <- paste(base_path, country, year, "limit=0", sep="&")
### 2. create the name to give to the downloaded file
##### I attach the date so I know when it was downloaded
filename <- paste(Sys.Date(), "acled.csv", sep="_")
### 3. Download the file
download.file(url=path, destfile=filename)
|
6a92a9075c5a317e6dbbe89a4a9af33c32ee7801
|
089f560b12e6de236bc52852a05c6ad6c09df17e
|
/man/dengue_nowcast.Rd
|
f41f5cd630941f7a265bea2b904ca23ebfa96851
|
[
"MIT"
] |
permissive
|
mlamias/delphiepidata
|
e392c518f6f4c0c0d1c8d102c62845243e75ec30
|
7dc9eb67a3530cc027e66d3ccc7e31003cd7f536
|
refs/heads/master
| 2022-01-15T20:01:27.577372
| 2019-05-29T12:18:00
| 2019-05-29T12:18:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 450
|
rd
|
dengue_nowcast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f.R
\name{dengue_nowcast}
\alias{dengue_nowcast}
\title{Fetch Delphi's PAHO Dengue nowcast}
\usage{
dengue_nowcast(locations, epiweeks)
}
\arguments{
\item{locations}{a \code{list} of \link{delphi_regions} and/or \link{dephi_states}}
\item{epiweeks}{a \code{list} of epiweeks (format for an epiweek is \code{YYYYWW})}
}
\description{
Fetch Delphi's PAHO Dengue nowcast
}
|
727cce8aeeac4be890938c73eab0e5447cc2b1e3
|
8a4c12aa2cee6ccefb9dee6d512654edab7cef49
|
/man/add_site_metadata.Rd
|
eeae4a3e71f076842b7d7de8e674d3d0a4ec66d6
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.pilot
|
f44701480daf2616ee0c5393bfc2d1aad63e134a
|
e7189f774e3b675a30410a05c13bcd075c07fe92
|
refs/heads/master
| 2023-03-26T00:59:41.048255
| 2022-10-25T05:37:38
| 2022-10-25T05:37:38
| 224,645,693
| 1
| 0
|
MIT
| 2022-10-25T05:37:39
| 2019-11-28T12:11:19
|
R
|
UTF-8
|
R
| false
| true
| 750
|
rd
|
add_site_metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_data_basel.R
\name{add_site_metadata}
\alias{add_site_metadata}
\title{Helper function: add site metadata}
\usage{
add_site_metadata(
df,
df_col_sitecode = "SiteCode",
meta_site_path = package_file("shiny/basel/data/metadata/meta_site.csv")
)
}
\arguments{
\item{df}{data frame containing at least a column "SiteCode"}
\item{df_col_sitecode}{column in df containing site code (default: "SiteCode")}
\item{meta_site_path}{Define path of "meta_site.csv" to be imported
(default: sema.pilot:::package_file("shiny/basel/data/metadata/meta_site.csv"))}
}
\value{
returns input data frame with joined metadata
}
\description{
Helper function: add site metadata
}
|
8958937bab3da8dc41db1c23bcfd1ef8ca7859ea
|
12e3d5f8618bbc113e6f039b7346fc5d723015c9
|
/Stats_I/Class14/2-way Lab Practice ForClass.R
|
2e9e4485d10263f9f4f12203bdee61e6c6504763
|
[] |
no_license
|
raschroeder/R-Coursework
|
4af2ded6e9af2c0c64697dcc796a12e508f38ae4
|
1e9800b00f84cb4092c956d9910a710729b9aff3
|
refs/heads/master
| 2020-04-05T12:44:28.824912
| 2019-02-06T15:59:07
| 2019-02-06T15:59:07
| 156,878,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,268
|
r
|
2-way Lab Practice ForClass.R
|
# Load necessary packages
library(ggplot2)
library(afex)
library(dplyr)
library(emmeans)
################################
######## Readin data ##########
################################
setwd("/Users/rachel/Box\ Sync/R\ Coursework/Class14")
DataSet<-read.csv("PracticeData.csv", sep = ",")
DataSet$Training <- factor(DataSet$training,
levels=c(1,2),
labels=c("No Social Training","Social ineptness reduction"))
DataSet$Emotional <- factor(DataSet$Emotional,
levels=c(1,2,3),
labels=c("Control","Mild electric shocks","Never ending statistics class"))
################################
######## Descriptives ##########
################################
Sum.Table<-DataSet %>%
group_by(Training,Emotional) %>%
summarize(n = n(),
Means = mean(DV),
SD = sd(DV),
SEM = SD/n^.5)
Sum.Table
################################
############# Plot #############
################################
Plot.1<-ggplot(Sum.Table, aes(x = Training, y = Means, group=Emotional, fill=Emotional))+
geom_bar(stat='identity',position="dodge", color='black')+
geom_errorbar(aes(ymax = Means + SEM, ymin= Means),
position=position_dodge(width=0.9), width=0.25)+
xlab('')+
ylab('Weirdness')+
theme_bw()+
theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
panel.border = element_rect(fill = NA, colour = "NA"),
axis.line = element_line(size = 1, colour = "grey80"),
legend.position = "top",
legend.title = element_blank())
Plot.1
################################
############# ANOVA ############
################################
Nice.Table<-aov_car(DV~Emotional*Training + Error(SubjectID),
data=DataSet)
Nice.Table
################################
######### Follow-up ############
################################
Simple.Effects.By.Type<-emmeans(Nice.Table, ~Training|Emotional)
Simple.Effects.By.Type
################################
###### Theory question #########
################################
Nice.Table2<-aov_car(DV~Emotional + Error(SubjectID),
data=DataSet)
Nice.Table2
|
4d48053b7ea238438e30a716a63f71f6bc665ab5
|
c54c80b4a54bbfc73249f5cb500183ff2e3c89cd
|
/man/calc_abundance.Rd
|
430f9e317568a3738a6af8cf7e8331d3bba8a266
|
[
"MIT"
] |
permissive
|
friedue/SCEdjvdj
|
23822d84cae0c882fafe327b6f28a3b5e5e3f8c0
|
de4976f60fea0bb45f464a0796475753fa0af72e
|
refs/heads/master
| 2023-04-04T14:25:35.593631
| 2021-04-19T18:58:16
| 2021-04-19T18:58:16
| 339,117,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 852
|
rd
|
calc_abundance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_abundance.R
\name{calc_abundance}
\alias{calc_abundance}
\title{Calculate clonotype abundance}
\usage{
calc_abundance(
SCE_in,
clonotype_col = "cdr3_nt",
cluster_col = NULL,
prefix = "",
return_SCE = TRUE
)
}
\arguments{
\item{SCE_in}{SCE object containing V(D)J data}
\item{clonotype_col}{meta.data column containing clonotype IDs}
\item{cluster_col}{meta.data column containing cluster IDs to use for
grouping cells when calculating clonotype abundance}
\item{prefix}{Prefix to add to new meta.data columns}
\item{return_SCE}{Return an SCE object. If set to FALSE, a tibble
summarizing the results is returned.}
}
\value{
Seurat object with clonotype abundance added to meta.data
}
\description{
Calculate clonotype abundance
}
\author{
djvdj authors
}
|
e859370dc2a4331575769ac21fddfd082ce09d2f
|
929c1c7a62e838e09ff576a41e96b6799d355508
|
/R/percentageAlignment.R
|
302c507d63f25d667377fcb9cbdeeb40ccef7306
|
[] |
no_license
|
pjiang1105/TimeMeter
|
be6ac05bcda96f1089387bdde7c6f5ad66b95707
|
970970b5b3b495f789c0e6a532d9436e856300ca
|
refs/heads/master
| 2022-02-11T07:42:49.654558
| 2022-01-27T02:23:15
| 2022-01-27T02:23:15
| 251,404,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,161
|
r
|
percentageAlignment.R
|
#' Percentage of Alignment
#'
#' This funciton calcualtes the percentage of alignment for query and for reference, respectively: the length of reliable aligned time interval in query (or in reference) divided by total length of query (or reference) time interval.
#'
#' @param timePoints_query A vector containing time points (Query)
#' @param timePoints_reference A vector containing time points (Reference)
#' @param alignableRegion_timePoints_query A vector containing truncated time points (Query)
#' @param alignableRegion_timePoints_reference A vector containing truncated time points (Reference)
#'
#' @return
#' \item{percentage_alignment_query }{percentage of alignment for query}
#' \item{percentage_alignment_reference }{percentage of alignment for reference}
#'
#' @examples
#' data(simData)
#' data=simdata$TimeShift_10
#' gene=data$gene
#' query=data$query
#' timePoints_query=data$timePoints_query
#' reference=data$reference
#' timePoints_reference=data$timePoints_reference
#' alignment=dtw(query,reference)
#' dtw_results=list(alignment$index1,alignment$index2)
#' index_1=dtw_results[[1]]
#' index_2=dtw_results[[2]]
#' aligned_values_query=query[index_1]
#' aligned_values_reference=reference[index_2]
#' aligned_timePoints_query=timePoints_query[index_1]
#' aligned_timePoints_reference=timePoints_reference[index_2]
#' index_alignableRegion=alignableRegionIndex(aligned_timePoints_query,aligned_timePoints_reference)
#' alignableRegion_values_query=aligned_values_query[index_alignableRegion]
#' alignableRegion_values_reference=aligned_values_reference[index_alignableRegion]
#' alignableRegion_timePoints_query=aligned_timePoints_query[index_alignableRegion]
#' alignableRegion_timePoints_reference=aligned_timePoints_reference[index_alignableRegion]
#' percentageAlignmentQuery=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_query']
#' percentageAlignmentReference=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_reference']
#'
#' @export
#' @author Peng Jiang \email{PJiang@morgridge.org}
percentageAlignment <- function(timePoints_query,
timePoints_reference,
alignableRegion_timePoints_query,
alignableRegion_timePoints_reference) {
percentage_alignment_query=(max(alignableRegion_timePoints_query)-min(alignableRegion_timePoints_query))/(max(timePoints_query)-min(timePoints_query))
percentage_alignment_reference=(max(alignableRegion_timePoints_reference)-min(alignableRegion_timePoints_reference))/(max(timePoints_reference)-min(timePoints_reference))
percentage_alignment=c('percentage_alignment_query'=percentage_alignment_query, 'percentage_alignment_reference'=percentage_alignment_reference)
return(percentage_alignment)
}
|
f4fea42f409bb799045766cd0025481057c82ea7
|
2a3a7c24fa92e7bc167a3b058dfc0f49ecb749b4
|
/cachematrix.R
|
9f98729d744a30ba1abc8069e5affebe91b0ca3e
|
[] |
no_license
|
lclaudiotj/ProgrammingAssignment2
|
acf04710ffadac4138103b33fdfb9a5f0ef20772
|
39686adba18bf8394a97eec497345ddb7df9c0b7
|
refs/heads/master
| 2020-12-25T11:31:55.561996
| 2015-02-21T00:28:54
| 2015-02-21T00:28:54
| 31,077,236
| 0
| 0
| null | 2015-02-20T17:58:36
| 2015-02-20T17:58:36
| null |
UTF-8
|
R
| false
| false
| 1,550
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function returns a list containing four functions: set, get,
## setmatrix and getmatrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { # This function sets the value of the matrix passed as a parameter and copy its value to global matrix "x"
x <<- y
i <<- NULL
}
get <- function() x # This function gets the value of the global matrix ("x")
setmatrix <- function(solve) i <<- solve # This function inverts global matrix
getmatrix <- function() i # This function gets the inverted matrix
list(set = set, get = get, # This returns the list
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getmatrix() # Get i from x (object returned from makeCacheMatrix function)
if(!is.null(i)) { # If "i" is not null this means that it was calculated previously, then it is not necessary to calculate it again
message("getting cached data")
return(i)
}
matrix <- x$get() # If "i" is null, which means it never calculated before or new data is assigned to the object, get new data, and calculate mean
i <- solve(matrix, ...)
x$setmatrix(i) # Save inverted matrix to x
i # Return "i" to the caller
}
|
76a52e50e10453fdce156a09f367b93725f8e5f7
|
818dd3954e873a4dcb8251d8f5f896591942ead7
|
/Mouse/S1_S2_AIL/preprocessing.R
|
6f9054080ebdf80d06730d4d20149ea4d21db74e
|
[] |
no_license
|
DannyArends/HU-Berlin
|
92cefa16dcaa1fe16e58620b92e41805ebef11b5
|
16394f34583e3ef13a460d339c9543cd0e7223b1
|
refs/heads/master
| 2023-04-28T07:19:38.039132
| 2023-04-27T15:29:29
| 2023-04-27T15:29:29
| 20,514,898
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,575
|
r
|
preprocessing.R
|
setwd("D:/Edrive/Mouse/S1_S2")
locusxdnaheader <- unlist(strsplit(readLines("Humboldt_Univ_Zu_Berlin_MURGIGV01_20191205_LocusXDNA.csv", n=16)[16],","))
locusxdnasnps <- unlist(strsplit(readLines("Humboldt_Univ_Zu_Berlin_MURGIGV01_20191205_LocusXDNA.csv", n=18)[18],","))
locusxdna <- readLines("Humboldt_Univ_Zu_Berlin_MURGIGV01_20191205_LocusXDNA.csv")[-c(1:22)]
splitted <- strsplit(locusxdna, ",")
calls <- matrix(NA, length(locusxdna) / 2, length(splitted[[1]]))
scores <- matrix(NA, length(locusxdna) / 2, length(splitted[[1]]))
for(x in 1:length(splitted)) {
if(x %% 2 == 1) calls[ceiling(x/2),] <- splitted[[x]]
if(x %% 2 == 0) scores[ceiling(x/2),] <- splitted[[x]]
}
markers <- locusxdnaheader[4:length(locusxdnaheader)]
colnames(calls) <- c("Label", "plateWell", "Date","oligoPoolId","bundleId", "status", "Type", "Nas", markers)
colnames(scores) <- c("Label", "plateWell", "Date","oligoPoolId","bundleId", "status", "Type", "Nas", markers)
gts <- calls[,markers]
rownames(gts) <- gsub("V 888-", "AIL", calls[, "Label"])
qual <- apply(scores[,markers],2,as.numeric)
rownames(qual) <- gsub("V 888-", "AIL", calls[, "Label"])
gts[qual < 0.7] <- NA
gts[gts == "U"] <- NA
# Write out the raw genotypes
gts <- t(gts)
write.table(gts, "genotypes.raw.txt", sep="\t", quote=FALSE)
gts <- read.table("genotypes.raw.txt", sep="\t")
# Groups with less than 10 individuals are set to missing
tbls <- apply(gts, 1, table)
for(x in 1:length(tbls)){
for(gt in names(tbls[[x]])){
if(tbls[[x]][gt] < 10){
gts[x, which(gts[x,] == gt)] <- NA
}
}
}
# All missing
idx <- which(apply(gts,1, function(x){sum(is.na(x)) == length(x)}))
gts <- gts[-idx,]
# Not segregating
idx <- which(apply(gts,1,function(x){length(table(x)) == 1}))
gts <- gts[-idx,]
# More than 10 % missing data
ismissing <- apply(apply(gts, 1, is.na),2,sum)
tooMuchMissing <- names(which((ismissing / ncol(gts)) > 0.1))
gts <- gts[-which(rownames(gts) %in% tooMuchMissing),]
tbls <- apply(gts, 1, table)
map <- read.table("snp_map.karl.txt", sep = ",", header = TRUE, row.names=1)
map <- map[rownames(gts),]
chrs <- 1:21
names(chrs) <- c(1:19, "X", "Y")
plot(c(1,21), c(0,200000000), t = 'n', xaxt = "n", las= 2, ylab = "Position (mb)", xlab = "Chr", yaxt = 'n')
aa <- apply(map, 1, function(r) { points(x = chrs[r[1]], y = r[2], pch = "-"); })
axis(1, at = chrs, names(chrs))
axis(2, at = seq(0,200000000, 25000000), seq(0,200000000, 25000000)/1000000)
write.table(gts, "genotypes.cleaned.txt", sep="\t", quote=FALSE)
write.table(map, "map.cleaned.txt", sep="\t", quote=FALSE)
|
bd24bb70ea4bc0699ddf067e565ff81c82d279fc
|
283409d2a37155d58855bc9be3b78e0ad7cdacb8
|
/Assignment-1/4.R
|
f0fdc658ed9b5fb84ffa119172e5bec9a395d9ae
|
[] |
no_license
|
VivianeLovatel/Brasil_2019
|
ddca243336145336c94ce09ff97d1918bf67e95d
|
82128b52ed7fa47d343cfaccf6da698f44e9883a
|
refs/heads/master
| 2020-07-07T00:45:58.305182
| 2019-08-20T15:20:36
| 2019-08-20T15:20:36
| 203,190,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 99
|
r
|
4.R
|
z=8460.2
print(z)
as.numeric(z)
print(as.numeric (z))
as.integer(z)
print(as.integer(z))
print(z>0)
|
14256aafee76d7e38696e2adda1aed7e1f331984
|
c442d726a8e6301ccc17557ca91bde15f71372af
|
/man/dryad_metadata.Rd
|
f35982b217dbeae900a28360e2653267751bb8ba
|
[
"MIT"
] |
permissive
|
alrutten/rdryad
|
5dcad01bcef7167e2d47c0eac99fad39d717f83e
|
134707a89a1156f4e6d0b3cf4f5ab6e11501ecd8
|
refs/heads/master
| 2020-12-28T04:17:47.720281
| 2019-12-09T14:44:49
| 2019-12-09T14:44:49
| 238,179,247
| 0
| 0
|
NOASSERTION
| 2020-02-04T10:26:21
| 2020-02-04T10:26:20
| null |
UTF-8
|
R
| false
| true
| 884
|
rd
|
dryad_metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dryad_metadata.R
\name{dryad_metadata}
\alias{dryad_metadata}
\title{Download Dryad file metadata}
\usage{
dryad_metadata(doi, ...)
}
\arguments{
\item{doi}{(character) A Dryad DOI for a dataset of files within
a dataset}
\item{...}{Further args passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\value{
named (list) with slots for:
\itemize{
\item desc: object metadata
\item files: file information
\item attributes: metadata about the metadata file
\item structMap: not sure what this is
}
}
\description{
Download Dryad file metadata
}
\examples{
\dontrun{
dryad_metadata('10.5061/dryad.1758')
dryad_metadata('10.5061/dryad.9t0n8/1')
dryad_metadata('10.5061/dryad.60699/3')
out <- dryad_metadata('10.5061/dryad.60699/5')
out$desc$text[out$desc$qualifier \%in\% c("pageviews", "downloads")]
}
}
|
dbab06a1fcaea8d059cdbd6ac877feb7049f03dd
|
8b053a2925e8a6bd7a494ef3ec32a755030435e9
|
/figCode/fig4.R
|
39fff42e4b7972af068e9fccfe12f105c309bd9d
|
[] |
no_license
|
nofarbardugo/BCR_lineage_Trees
|
95f05fffa96eb78c3cf7b6aa833110a123102175
|
2228349d0b01ec07be627e5ab3bf97cef934aa63
|
refs/heads/master
| 2021-01-10T05:41:47.028043
| 2015-12-05T18:09:40
| 2015-12-05T18:09:40
| 47,058,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,477
|
r
|
fig4.R
|
source.dir <-'/home/nofar/Desktop/project papaers/dataForFinalGraph'
source.dir <-'/home/nofar/Desktop/LabProject/data'
setwd(source.dir)
library(ggplot2)
library(reshape2)
library(grid)
library(scales)
library(plyr)
library(gridExtra)
library(cowplot)
library(lattice)
library(RGraphics)
grid_arrange_shared_legend <- function(...) {
plots <- list(...)
g <- ggplotGrob(plots[[1]] + theme(legend.position="bottom"))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
grid.arrange(
do.call(arrangeGrob, lapply(plots, function(x)
x + theme(legend.position="none"))),
legend,
ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight))
}
############### ~~~~~~~~~~~fig 4 ~~~~~~~~~~~~~~~~ ################################
#~~~~~ fig4
# pfizer details
df.pfizer_depthVSchildren<- read.csv('pfizer/PFIZER_mutation_num_per_isotype_with_certain_children_numberWithInternal.csv', header = T, stringsAsFactors = F)
df.pfizer_depthVSchildren$mutPerChild <- df.pfizer_depthVSchildren$depth/df.pfizer_depthVSchildren$Children.Number
df.pfizer_depthVSchildren$type <- "Pfizer"
# flu details
df.flu_depthVSchildren<- read.csv('flu/FLU_mutation_num_per_isotype_with_certain_children_numberWithInternal.csv', header = T, stringsAsFactors = F)
df.flu_depthVSchildren$mutPerChild <- df.flu_depthVSchildren$depth/df.flu_depthVSchildren$Children.Number
df.flu_depthVSchildren <- df.flu_depthVSchildren[df.flu_depthVSchildren$Children.Number >0 ,]
df.flu_depthVSchildren[df.flu_depthVSchildren$Isotype.Name=="IGHG-1" |df.flu_depthVSchildren$Isotype.Name=="IGHG-2","Isotype.Name"] <-"IGHG"
df.flu_depthVSchildren$type <- "Flu"
# merge both data set
df.combine_depthVSchildren <- rbind(df.pfizer_depthVSchildren,df.flu_depthVSchildren)
# remove rows with ziro child (not supposed to be but append in igG2)
df.combine_depthVSchildren <- df.combine_depthVSchildren[df.combine_depthVSchildren$Children.Number >0 ,]
# get avg of the ratio between mutation num to children num
#df.combine_depthVSchildrenwitoutZiroLength <- df.combine_depthVSchildren[df.combine_depthVSchildren$depth >0 ,]
df.combine_depthVSchildrenMueserd <- ddply(df.combine_depthVSchildren, c("type","Isotype.Name"), summarise,
isotypeNumber = length(Isotype.Name),
childrenNumber = sum(Children.Number),
mutationNumber = sum(depth),
meanDepthVSchildren = mutationNumber/childrenNumber,
#meanDepthVSchildren = mean(mutPerChild),
meanSynFR = sum(synonyms_FR)/mutationNumber,
meanNonSynFR = sum(nonSynonyms_FR)/mutationNumber,
meanSynCDR = sum(synonyms_CDR)/mutationNumber,
meanNonSynCDR = sum(nonSynonyms_CDR)/mutationNumber,
meanSynonymus = sum(Synonymus)/mutationNumber,
meanNonSynonymus = sum(nonSynonymus)/mutationNumber
)
# get the ratio beteewn each mutation type in flu
df.flu_mutation <- ddply(df.flu_depthVSchildren, c("Isotype.Name"), summarise,
mutationNumber = sum(depth),
meanSynFR = sum(synonyms_FR)/mutationNumber,
meanNonSynFR = sum(nonSynonyms_FR)/mutationNumber,
meanSynCDR = sum(synonyms_CDR)/mutationNumber,
meanNonSynCDR = sum(nonSynonyms_CDR)/mutationNumber
)
df.flu_mutation <- subset(df.flu_mutation,select = -c(mutationNumber))
df.flu_mutation<- melt(df.flu_mutation, id.vars = 'Isotype.Name', variable.name='muteType')
df.flu_mutation$type <- "Flu"
# get the ratio beteewn each mutation type in pfizer
df.pfizer_mutation <- ddply(df.pfizer_depthVSchildren, c("Isotype.Name"), summarise,
mutationNumber = sum(depth),
meanSynFR = sum(synonyms_FR)/mutationNumber,
meanNonSynFR = sum(nonSynonyms_FR)/mutationNumber,
meanSynCDR = sum(synonyms_CDR)/mutationNumber,
meanNonSynCDR = sum(nonSynonyms_CDR)/mutationNumber
)
df.pfizer_mutation <- subset(df.pfizer_mutation,select = -c(mutationNumber))
df.pfizer_mutation<- melt(df.pfizer_mutation, id.vars = 'Isotype.Name', variable.name='muteType')
df.pfizer_mutation$type <- "Pfizer"
# combine flu and pfizer
df.combine_mutation <- rbind(df.pfizer_mutation,df.flu_mutation)
figure4A <- ggplot(data=df.combine_depthVSchildrenMueserd, aes(x=Isotype.Name, y=meanDepthVSchildren, fill=Isotype.Name)) +
facet_wrap(~type, ncol=2, scale="free_x") + #facet_grid(type ~ . ,scale = "free_y") +
geom_bar(colour="black", stat="identity",position=position_dodge(),size=.3) + # Thinner lines
scale_fill_manual(values=c("IGHA" = "orange","IGHG-1" = "#CC6666","IGHG-2" = "brown",
"IGHM" ="blue","IGHD" = "#FF3399","IGHE" = "purple",
"naive_IGHM" = "green","IGHG" = "red")) +
# scale_fill_hue(name="Isotype name") + # Set legend title
ylab("Avg mutation per child") + # Set axis labels
xlab("") +
ggtitle("Average mutations number per child in each isotype") + # Set title
background_grid(major = 'y', minor = "y") + # add thin horizontal
theme_linedraw()+
theme( legend.title=element_blank(),
legend.position="none", # legend position
legend.justification=c(1,1),
panel.grid.minor=element_blank(),# remove grid
panel.grid.major=element_blank()
)
#~~~~~ fig4B
figure4B1 <-ggplot(data=df.pfizer_mutation, aes(x=Isotype.Name, y=value, fill=muteType)) +
geom_bar(colour="black",stat="identity", size=.3) + # Thinner lines
xlab("") + ylab("") + # Set axis labels
ggtitle("Pfizer") + # Set title
scale_fill_brewer(palette="Set1")+
scale_colour_discrete(name ="Mutation type",
breaks=c("meanSynFR", "meanNonSynFR","meanSynCDR", "meanNonSynCDR"),
labels=c("Syn FR", "NonSyn FR","Syn CDR", "NonSyn CDR")) +
theme_linedraw()+
theme(
panel.grid.minor=element_blank(), # remove grid
panel.grid.major=element_blank())
figure4B2 <-ggplot(data=df.flu_mutation, aes(x=Isotype.Name, y=value, fill=muteType)) +
geom_bar(colour="black",stat="identity", size=.3) + # Thinner lines
scale_fill_brewer(palette="Set1")+
xlab("") + ylab("") + # Set axis labels
ggtitle("Flu") + # Set title
# coord_flip() +# convert exes
theme_linedraw()+
theme( legend.position="none",
panel.grid.minor=element_blank(), # remove grid
panel.grid.major=element_blank())
grid_arrange_shared_legend(figure4B1,figure4B2)
# theme(text = element_text(lineheight=.8,size = 14),
# legend.position="none",axis.text.x = element_text(angle = 90,hjust = 1))
a1 <- grid.arrange(figure4B2, figure4B1, ncol=1)
a <- grid.arrange(figure4A, a1, ncol=2)
#grid.arrange(figure4B2, figure4B1,ncol =2)
a <- ggdraw() +
draw_plot(figure4A, 0, 0, 0.5, 1) +
draw_plot(figure4B2, 0.5, 0, .24, 1) +
draw_plot(figure4B1, 0.74, 0, .25, 1) +
draw_plot_label(c("A", "B"),c(0, 0.55), c(0.98, 0.98), size = 15)
|
2bbb5f2387f0b75ae7c863e4e6f48e0a65ad58b9
|
5212d838901fc5172dbc995b87ed014d2766a7f2
|
/sensitivity_analysis/exo1.R
|
8868a7c8e6061c1c74f94a11ecf40531201d0119
|
[] |
no_license
|
fideledegni/small-data-emse
|
cb92db1abcb4844807ecaecbc82a8b28bf12f980
|
dddbd436ae54a0a4eecf92992e4fdd137f93f659
|
refs/heads/master
| 2021-05-13T16:56:25.677353
| 2018-01-12T18:11:36
| 2018-01-12T18:11:36
| 116,806,472
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,169
|
r
|
exo1.R
|
#####################################################
# Compte rendu de TP Sensibility analysis #
# DEGNI Fidèle #
# RODRIGUES Leticia #
#####################################################
rm(list=ls()) # cleaning up
#setwd('~/Dropbox/ICM-EMSE/0_3A/Data_Science/4_Small_Data/sensitivity_analysis/TP')
setwd('C:/Users/Fidèle DEGNI/Dropbox/ICM-EMSE/0_3A/Data_Science/4_Small_Data/sensitivity_analysis/TP')
n <- 1000
X1 <- runif(n, min = -pi, max = pi)
X2 <- runif(n, min = -pi, max = pi)
X3 <- runif(n, min = -pi, max = pi)
# Ishigami
f <- function(X) {
return( sin(X[,1]) + 7*(sin(X[,2]))^2 + 0.1*(X[,3]^4)*sin(X[,1]) )
}
Y <- f(cbind(X1, X2, X3))
mu <- mean(Y)
# Splines
ss1 <- smooth.spline(X1, Y-mu)
ss2 <- smooth.spline(X2, Y-mu)
ss3 <- smooth.spline(X3, Y-mu)
op <-par(mfrow = c(1,3))
plot(X1, Y-mu)
lines(ss1, col = "blue", lwd = 3)
plot(X2, Y-mu)
lines(ss2, col = "blue", lwd = 3)
plot(X3, Y-mu)
lines(ss3, col = "blue", lwd = 3)
par(op)
library(sensitivity)
#library(DiceView)
#library(DiceOptim)
f2 <- function(X, b12, b11) {
return( X[,1] - 2*X[,2] + b12*X[,1]*X[,2] + b11*X[,1]^2 )
}
mMooris <- morris(model = f2, b12 = 10, b11 = 1, factors = 3, r = 10,
design = list(type = "oat", levels = 5, grid.jump = 3), binf = -0.5, bsup = 0.5)
plot(mMooris)
# With Ishigami function
mMooris2 <- morris(model = f, factors = 3, r = 10,
design = list(type = "oat", levels = 5, grid.jump = 3), binf = -pi, bsup = pi)
plot(mMooris2)
f3 <- function(X) {
return( X[,1]*X[,2] )
}
# With product function
mMooris3 <- morris(model = f3, factors = 2, r = 100,
design = list(type = "oat", levels = 5, grid.jump = 3), binf = 0, bsup = 3)
plot(mMooris3)
# Sobol indices
fast1 <- fast99(model = f2, b12 = 10, b11 = 1, factors = 3, n = 1000, q = "qunif", q.arg = list(min = -0.5, max = 0.5))
plot(fast1)
fast2 <- fast99(model = f, factors = 3, n = 1000, q = "qunif", q.arg = list(min = -pi, max = pi))
plot(fast2)
fast3 <- fast99(model = f3, factors = 2, n = 1000, q = "qunif", q.arg = list(min = 0, max = 3))
plot(fast3)
|
2babd926e8fe0bc85be5b516a2155bf1bc33f626
|
2a1b80a49c7aaf7a97ed8721dc95b30f382fb802
|
/MI_RBIG_2016_copy.R
|
4f7214faafa483a4c9b3e19513a7ae9e315f3f65
|
[] |
no_license
|
thaos/RBIG
|
42a334c61edebc2177a435d078031620adaa075a
|
9b9e5177943eed770aeebf748a57a9e361e1d669
|
refs/heads/master
| 2021-01-13T03:47:12.220454
| 2017-02-03T09:04:03
| 2017-02-03T09:04:03
| 77,227,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,421
|
r
|
MI_RBIG_2016_copy.R
|
# Multi-information estimation using RBIG
#
# The user can choose two orthogonal transforms:
#
# 'PCA' = PCA
# 'RND' = Random Rotations
#
# USE:
#
# [MI] = MI_RBIG_2016(dat,N_lay,transform,porc,precision)
#
# INPUTS:
# dat = data ( #dimensions , #samples );aim
# N_lay = number of layers (default N_lay = 1000);
# porc = extra domain percentage (default porc = 10)
# precision = number of points for the marginal PDFs estimation (default precision = 1000)
# transformation = linear transformation applied ('RND','PCA' default transformation = 'PCA')
#
# OUTPUTS
# MI = Multi-information
# MIs = Multi-information reduction at each layer.
# datT = Gaussianized data.
#
# e.g.
#
# dat = rand(5)*(rand(5,1000).^2);
# dat <- runif(5) * matrix(runif(5*2000), nrow=5)^2
# dat <- matrix(runif(5*1000), nrow=5)
# dat <- t(dat)
# dat <- dat[, 1:2]
# n <- 1e4
# rho <- sqrt(runif(n))
# theta <- runif(n, 0, 2*pi)
# x <- rho * cos(theta)
# y <- rho * sin(theta)
# dat <- cbind(x,y)[rho>0.9,]
# pairs(dat)
# N_lay = 50;
# porc = 1;
# precision = 1000;
# transformation = 'PCA';
# MI = MI_RBIG_2016(dat,N_lay,transformation,porc,precision);
#
#
# Citation:
# Iterative Gaussianization: from ICA to Random Rotations.
# V. Laparra, G. Camps & J. Malo
# IEEE Transactions on Neural Networks, 22:4, 537 - 549, (2011)
#
library(entropy)
library(sROC)
library(mixAK)
library(MVN)
library(hexbin)
library(cramer)
library(lpSolve)
MI_RBIG_2016 <- function(dat,N_lay=1000){
DIM = dim(dat)
Nsamples = DIM[1]
nbins <- floor(sqrt(Nsamples))
DIM = DIM[2]
delta_I <- numeric(N_lay)
ee <- numeric(ncol(dat)*1000)
for(rep in seq_along(ee)){
x <- rnorm(Nsamples)
dx <- discretize(x, nbins)
delta = diff(range(x))/ nbins
hx = entropy.MillerMadow(dx, unit="log2")+log2(delta);
y <- rnorm(Nsamples)
dy <- discretize(y, nbins)
delta <- diff(range(y)/ nbins)
hy <- entropy.MillerMadow(dy, unit="log2")+log2(delta);
ee[rep] <- hy - hx
}
tol_m <- mean(ee);
tol_d <- sd(ee);
# browser()
ee <- matrix(ee, nrow=ncol(dat))
ee <- apply(ee, 2, sum)
tol <- quantile(abs(ee), probs=0.975)
for (n in 1:N_lay){
tic <- Sys.time ()
# marginal gaussianization
p <- numeric(DIM)
for(d in 1:DIM){
margin <- marginal_gaussianization(dat[,d]);
p[d] <- margin$shapiro.test$p.value
while(p[d] < 0.05){
margin <- marginal_gaussianization(margin$x_gauss);
# print(p[d])
p[d] <- margin$shapiro.test$p.value
}
dat[, d] <- margin$x_gauss
# pairs(dat)
# plot(hexplom(dat))
# scan(n=1)
}
dat_aux = dat;
# PCA rotation
C <- cov(dat)
eig <- eigen(C);
V <- eig$vectors
V <- rRotationMatrix(1, ncol(C))
# print(V)
dat <- dat %*% V
# multi-information reduction
delta_I[n] = information_reduction_LT(dat,dat_aux, tol_d=tol_d, tol_m=tol_m, nbins=nbins);
# delta_I[n] = information_reduction_LT(dat,dat_aux, tol=tol, nbins=nbins);
# print (n)
# print(delta_I[n])
toc <- Sys.time ()
# print(toc - tic)
# pairs(dat)
# plot(hexplom(dat))
if(n>10){
# browser()
# mt <- mardiaTest(dat, qqplot = FALSE)
# cat(rt@p.value, " / ", hzt@p.value, " / ", mt@p.value, "\n")
# print(cor(dat))
if (isTRUE(all.equal(tail(delta_I[(n-9):n], 9), rep(0, 9)))) break
}
# rt <- roystonTest(dat, qqplot = FALSE)
# hzt <- hzTest(dat, qqplot = FALSE)
# cat(rt@p.value, " / ", hzt@p.value, " \n")
# if (rt@p.value >= 0.1 & hzt@p.value > 0.1) break
# if (rt@p.value >= 0.9 & hzt@p.value > 0.9) break
}
ans <- list(dat=dat, MIs=delta_I, MI=sum(delta_I))
}
test_unif <- sapply(1:100, function(x){
dat <- matrix(runif(5*2000), ncol=5)
MI_RBIG_2016(dat)$MI
})
information_reduction_LT <- function(X, Y, tol_d, tol_m, nbins){
# information_reduction_LT <- function(X, Y, tol, nbins){
# should discretize first
hx <- apply(X, 2, function(x)entropy.MillerMadow(discretize(x, nbins), unit="log2") + log2(diff(range(x))/nbins))
hy <- apply(Y, 2, function(y)entropy.MillerMadow(discretize(y, nbins), unit="log2") + log2(diff(range(y))/nbins))
# hx <- apply(X, 2, knn_entropy_1D)
# hy <- apply(Y, 2, knn_entropy_1D)
# wrong use
# dix <- sum(apply(X, 2, FNN::entropy))
# diy <- sum(apply(Y, 2, FNN::entropy))
# browser()
# print(dix)
# print(diy)
I <- sum(hy - hx)
# print(I)
# scan(n=1)
# I <- dix - log(sqrt(2*pi*exp(1)))
II = sqrt(sum((hy - hx)^2));
p = 0.25;
# print(abs(II))
# print(sqrt(ncol(X)*((p*tol_d^2))))
# scan(n=1)
if (abs(II)<sqrt(ncol(X)*((p*tol_d^2)))){
# if (abs(I) <= tol){
# I= (runif(1) <= 0.95) * I
I=0
# print("inside")
}
I
}
marginal_gaussianization <- function(x){
# x_order <- order(x)
# x_cdfk <- kCDF(x, xgrid=x)
# x_unif <- x_cdfk$Fhat
x_unif <- ecdf(x)(x)
# x_gauss <- qnorm(x_unif)[x_order]
x_gauss <- qnorm(x_unif)
x_gauss[x_unif==1] <- 1 - 1/length(x)^2
ans <- list(x_gauss=x_gauss, shapiro.test=shapiro.test(x_gauss))
}
knn_entropy_1D <- function(x){
N <- length(x)
x_order <- sort(x)
x_diff <- diff(x_order)
mean(log(x_diff)) + digamma(1) - digamma(N)
# mean(log(N*x_diff)) - digamma(1) + log(1)
}
mis <- MI_RBIG_2016(dat)
mi100 <- sapply(1:100, function(x){
dat <- matrix(runif(5*1000), nrow=5)
dat <- t(dat)
MI_RBIG_2016(dat)$MI
}
)
cond_MI <- function(dat, x_ind, y_ind, c_ind=integer(0)){
if(length(c_ind) == 0){
ans <- MI_RBIG_2016(dat[, c(x_ind, y_ind)])$MI
}else{
ans <- MI_RBIG_2016(dat[, c(x_ind, y_ind, c_ind)])$MI
ans <- ans - MI_RBIG_2016(dat[, c(x_ind, c_ind)])$MI
ans <- ans - MI_RBIG_2016(dat[, c(y_ind, c_ind)])$MI
if(length(c_ind) > 1)
ans <- ans + MI_RBIG_2016(dat[, c_ind])$MI
}
ans
}
dat <- matrix(runif(5*2000), ncol=5)
cmi <- cond_MI(dat, 1, 2)
dat[, 2] <- dat[, 2] + dat[, 1] * 10
plot(hexplom(dat[, 1:2]))
cmi <- cond_MI(dat, 1, 2)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10
plot(hexplom(dat[, 1:3]))
pairs(dat[, 1:3])
cmi <- cond_MI(dat, 1, 2)
cmi <- cond_MI(dat, 1, 2, 3)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
plot(hexplom(dat[, 1:5]))
cmi1 <- cond_MI(dat, 1, 2)
cmi2 <- cond_MI(dat, 1, 2, 3)
cmi3 <- cond_MI(dat, 1, 2, c(3, 4))
cmi4 <- cond_MI(dat, 1, 2, c(4, 5))
cmi5 <- cond_MI(dat, 1, 2, c(3, 4, 5))
# boot_mi <- function(dat, x_ind, y_ind, c_ind=integer(0)){
# if(length(c_ind) == 0){
sample_mi <- function(dat, x_ind, y_ind){
dat <- dat[,c(x_ind, y_ind)]
dat[, 1] <- sample(dat[,x_ind])
dat[, 2] <- sample(dat[,y_ind])
dat
}
s1 <- sample_mi(dat, 1, 2)
plot(hexplom(s1))
sample_cmi <- function(dat, x_ind, y_ind, c_ind){
dat <- dat[sample.int(nrow(dat), replace=TRUE), ]
dat_c <- dat[, c_ind, drop=FALSE]
dat_xy <- dat[, c(x_ind, y_ind)]
dist_mat <- as.matrix(dist(dat_c))
two_closest <- apply(dist_mat, 2, function(x) order(x)[(1:2)+sum(x == 0)])
new_x <- dat[c(two_closest[1, ], two_closest[2, ]), x_ind]
new_y <- dat[c(two_closest[2, ], two_closest[1, ]), y_ind]
dat_b <- cbind(new_x, new_y, rbind(dat_c, dat_c) )
dat_b <- dat_b[sample.int(nrow(dat)/2), ]
dat_b
}
s1 <- sample_cmi(dat, 1, 2, c(3, 4))
s1 <- sample_cmi(dat, 1, 2, c(4, 5))
s1 <- sample_cmi(dat, 1, 2, c(3, 4, 5))
s1 <- sample_cmi(dat, 1, 2, 3)
plot(hexplom(s1))
boot_mi <- function(dat, x_ind, y_ind){
dat <- sample_mi(dat, x_ind, y_ind)
cond_MI(dat, 1, 2)
}
boot_cmi <- function(dat, x_ind, y_ind, c_ind){
dat <- sample_cmi(dat, x_ind, y_ind, c_ind)
cond_MI(dat, 1, 2, 3:ncol(dat))
}
boot_cmi(dat, 1, 2, c(3, 4))
boot_cmi(dat, 1, 2, 3)
nboot_cmi <- function(n,dat, x_ind, y_ind, c_ind=numeric(0)){
if(length(c_ind) == 0)
ans <- unlist(lapply(seq.int(n), function(x) boot_mi(dat, x_ind, y_ind)))
else
ans <- unlist(lapply(seq.int(n), function(x){print(x); boot_cmi(dat, x_ind, y_ind, c_ind)}))
ans
}
ncmi5 <- nboot_cmi(100, dat, 1, 2, c(3, 4, 5))
ncmi4 <- nboot_cmi(100, dat, 1, 2, c(4, 5))
ncmi3 <- nboot_cmi(100, dat, 1, 2, c(3, 4))
ncmi2 <- nboot_cmi(10, dat, 1, 2, 3)
ncmi1 <- nboot_cmi(10, dat, 1, 2)
cmi_btest <- function(nboot ,dat, x_ind, y_ind, c_ind=numeric(0)){
cmi <- cond_MI(dat, x_ind, y_ind, c_ind)
ncmi <- nboot_cmi(nboot, dat, x_ind, y_ind, c_ind)
1 - sum(cmi > ncmi) / nboot
}
tcmi1 <-cmi_btest(10, dat, 1, 2)
tcmi3 <-cmi_btest(10, dat, 1, 2, 3:4)
conf_tcmi1 <- sapply(1:20, function(x){
print("***********************************************")
print(x)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
tcmi <-cmi_btest(10, dat, 1, 2)
tcmi
})
conf_tcmi2 <- sapply(1:20, function(x){
print("***********************************************")
print(x)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
tcmi <-cmi_btest(10, dat, 1, 2, 3)
tcmi
})
conf_tcmi3 <- sapply(1:20, function(x){
print("***********************************************")
print(x)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
tcmi <-cmi_btest(10, dat, 1, 2, 3:4)
tcmi
})
conf_tcmi4 <- sapply(1:20, function(x){
print("***********************************************")
print(x)
dat <- matrix(runif(5*2000), ncol=5)
dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
tcmi <-cmi_btest(10, dat, 1, 2, 4:5)
tcmi
})
# code translated to R from Gary Doran et al. "A permutation-Based Kernel Conditional Independence Test
linear_permutation <- function(D){
# D <- as.matrix(dist(dat[1:3, 3:4]))
D <- as.matrix(D)
n <- nrow(D)
# Rescale Distances
D <- D / max(max(D))
# Objective Function
f <- c(t(D))
# Inequality contraint
# lb <- numeric(n^2)
# Equality constraints
Aeq <- matrix(0, nrow=2*n, ncol=n^2)
b <- matrix(1, nrow=2*n, ncol=1)
# Columns sum to 1
for(c in 0:n-1){
Aeq[c + 1, (c*n+1):((c+1)*n)] <- 1
}
# Rows sum to 1 (last row constraint not necessary
# it is implied by other constraints)
for(r in 1:(n-1)){
for(c in 1:n){
Aeq[r+n, r+(c-1)*n] <- 1
}
}
# Diagonal entries zero
for (z in 1:n){
Aeq[2*n, (z-1)*(n+1) + 1] <- 1
}
b[2*n, 1] <- 0
cdir <- paste(rep("=", 2*n))
ans <- lp (direction = "min", objective.in=f, const.mat=Aeq, const.dir=cdir, const.rhs=b, transpose.constraints = TRUE, all.int=TRUE, all.bin=TRUE)
ans <- matrix(ans$sol, ncol=n, byrow=FALSE) #%*% D
ans
}
# check who to simulate from RBIG
KCIPT <- function(dat,xy_ind, c_ind, dist, B, b, M){
MMD <- numeric(B)
samples <- numeric(B)
inner_null <- matrix(numeric(B*b), nrow=B)
outer_null <- numeric(M)
for( i in 1:B){
omega <- dat[, c(xy_ind, c_ind)]
idx <- sample.int(nrow(omega), round(nrow(omega)/2))
omega1 <- omega[idx, ]
omega2 <- omega[-idx, ]
P <- linear_permutation(dist(omega2[, 2:ncol(omega2)]))
omega2 <- cbind(P%*%omega2[, 1], omega2[, 2:ncol(omega2)])
MMD[i] <- cramer.test(omega1, omega2, sim="ordinary", just.statistic=TRUE)$statistic
# print("***************************************")
# print(cramer.test(omega1, omega2, sim="ordinary"))
# print("***************************************")
# browser()
# plot(hexplom(omega1))
# scan(n=1)
# plot(hexplom(omega2))
omega <- rbind(omega1, omega2)
for( j in 1:b){
idx <- sample.int(nrow(dat), round(nrow(dat)/2))
omega1 <- omega[idx, ]
omega2 <- omega[-idx, ]
# plot(hexplom(omega1))
# plot(hexplom(omega2))
# print(cramer.test(omega1, omega2, sim="ordinary"))
# browser()
# scan(n=1)
inner_null[i, j] <- cramer.test(omega1, omega2, sim="ordinary", just.statistic=TRUE)$statistic
# cat(inner_null[i, j], " / ", MMD[i], "\n")
}
# print(sort(inner_null[i,]), round(0.05 * b)])
# print(sort(inner_null[i,])[round(0.95 * b)])
}
statistics <- mean(MMD)
for(k in 1:M){
for(i in 1:B){
r <- ceiling(runif(1) * b)
samples[i] <- inner_null[i, r]
}
outer_null[k] <- mean(samples)
}
# print(statistics)
# print(outer_null)
# p.value <- mean(statistics >= outer_null)
p.value <- 1 - rank(c(statistics, outer_null))[1]/(length(outer_null) + 1)
# crit.value <- sort(outer_null)[round(0.95 * length(outer_null))]
p.value
}
KCIPT <- function(dat,xy_ind, c_ind, dist, B, b, M){
MMD <- numeric(B)
samples <- numeric(B)
inner_null <- matrix(numeric(B*b), nrow=B)
outer_null <- numeric(M)
for( i in 1:B){
idx <- sample.int(nrow(dat), round(nrow(dat)/2))
omega1 <- dat[idx, ]
omega2 <- dat[-idx, ]
P <- linear_permutation(dist(omega2[, c_ind]))
# MMD[i] <- cramer.test(omega1[, xy_ind], P%*%omega2[, xy_ind], sim="ordinary", just.statistic=TRUE)$statistic
print(cramer.test(omega1[, c(xy_ind, c_ind)], cbind(P%*%omega2[, xy_ind[1]], omega2[, c(xy_ind[2], c_ind)]), sim="ordinary"))
MMD[i] <- cramer.test(omega1[, c(xy_ind, c_ind)], cbind(P%*%omega2[, xy_ind[1]], omega2[, c(xy_ind[2], c_ind)]), sim="ordinary")$p.value
}
MMD
}
pv3 <- KCIPT(dat[1:500, ], c(1:2), c(3,4), dist=dist, B=10, b=10, M=100)
pv4 <- KCIPT(dat[1:500, ], c(1:2), c(4,5), dist=dist, B=20, b=20, M=100)
pv5 <- KCIPT(dat[1:500, ], c(1:2), c(3,4,5), dist=dist, B=20, b=20, M=100)
pv1 <- KCIPT(dat[1:500, ], c(1:2), c(5), dist=dist, B=20, b=20, M=100)
conf_kcipt3 <- sapply(1:20, function(x){
print("***********************************************")
print(x)
dat <- matrix(runif(5*2000), ncol=5)
# dat[, 1] <- dat[, 1] + dat[, 3] * 10 - dat[, 4] * 10
# dat[, 2] <- dat[, 2] + dat[, 3] * 10 - dat[, 4] * 5
dat[, 1] <- dat[, 1] + dat[, 3] * 2
dat[, 2] <- dat[, 2] + dat[, 3] * 2
plot(hexplom(dat))
pv <- KCIPT(dat[1:700, ], c(1:2), c(4), dist=dist, B=10, b=50, M=100)
pv
})
|
3d091a3ce884bb33d40a78d87cb140329bd65e0c
|
3622743043d910d9893c9f9a19a035e4ead1c275
|
/man/cumQdate.Rd
|
2708f42955f06282c115b4a082322f8e36382dc9
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
cran/EGRET
|
1513c548cfc95a8882e216438bafe1f6c0779150
|
89bec3ad99d63be00eac2ca5817ac2c7fca7fbc8
|
refs/heads/master
| 2023-04-29T13:23:05.071777
| 2023-04-18T15:30:02
| 2023-04-18T15:30:02
| 26,901,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,822
|
rd
|
cumQdate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cumQdate.R
\name{cumQdate}
\alias{cumQdate}
\title{Cumulative flow calculation}
\usage{
cumQdate(eList, paStart = 10, paLong = 12, fract = 0.5)
}
\arguments{
\item{eList}{named list with at least the Sample and INFO dataframes}
\item{paStart}{numeric integer specifying the starting month for the period of analysis, 1 <= paStart <= 12, default is 10}
\item{paLong}{numeric integer specifying the length of the period of analysis, in months, 1 <= paLong <= 12, default is 12}
\item{fract}{numeric fraction of the flow}
}
\value{
annualSeries an integer matrix of two columns.
The first column is the calendar year for the end of the period
The second column is day of the year when the flow has exceeded the specified fraction
of the entire period being considered
}
\description{
This function computes the first day of the calendar year
at which a specific fraction of the cumulative flow for that year
has been exceeded. Typically one looks for the point where
half the cumulative flow has happened (fract = 0.5).
The portion of the year being considered is set by paStart and paLong.
The matrix returned has 2 columns:
the first is the year (integer when the period of analysis ends),
the second is the day of the year when the fraction has been exceeded.
None of the rows will have any NA values.
}
\details{
It is common to use this type of analysis on the snowmelt period of the year.
If (for example) we assume that snowmelt starts with the month of March and ends in July
then we would set paStart = 3 and paLong = 5
}
\examples{
eList <- Choptank_eList
annualFlow <- cumQdate(eList)
head(annualFlow)
plot(annualFlow)
mod1 <- lm(annualFlow[,2] ~ annualFlow[,1])
summary(mod1)
}
|
78db640d9fcce234e5acf6890283bf0fdc6b7c66
|
d47118bb523fd113bd0a092e0664389bd39ed0a3
|
/code/Ailaoshan_species_richness.R
|
dbd5fec5f7900be29c64b0f63d51939f311f46f4
|
[
"MIT"
] |
permissive
|
bakerccm/ailaoshan
|
7b23087e6ed8e0cc617fb2813c5df22694f399e8
|
1be8d47fb88718997f2a2cec63e442c67ce1f55e
|
refs/heads/main
| 2023-01-05T18:10:50.056963
| 2020-10-28T22:30:35
| 2020-10-28T22:30:35
| 308,143,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,950
|
r
|
Ailaoshan_species_richness.R
|
# R code to examine difference between species richness and community occupancy
library("here")
library("tidyverse")
library("R2jags")
########################################################################################
# input and output file names
filenames <- list(
modeldata.LSU.rdata = here("rdata", "Ailaoshan_model_data_final_LSU.rdata"), # rdata file containing data for SSU model
modeldata.both.rdata = here("rdata", "Ailaoshan_model_data_final.rdata"), # rdata file containing data for both LSU and SSU models
modeloutput.rds = here("rds", "Ailaoshan_model_output_final_LSU.rds") # file containing modelling results
)
########################################################################################
# get data
# file containing LSU model data
# (take polygon and OTU labels from jags.data$model.data$y or jags.data$model.data$z.start as stored in this rdata file)
load(filenames$modeldata.LSU.rdata)
# file containing model data prior to pulling out only the data required for the models
# just get leech.augmented from this .rdata file for the unscaled covariate values
both = new.env()
load(filenames$modeldata.both.rdata, envir = both)
leech.augmented = both$leech.augmented
rm(both)
# modelling output
model.output <- readRDS(file = filenames$modeloutput.rds)
########################################################################################
# number of MCMC samples
# sims.list has model output arranged by variable
# and appears to be a rearranged version of sims.matrix
nsamp <- nrow(model.output$BUGSoutput$sims.list[[1]]) # nrow should be the same for any item in the list
nsites <- dim(model.output$BUGSoutput$sims.list$z)[2]
nspec <- dim(model.output$BUGSoutput$sims.list$z)[3]
nmammals <- sum(jags.data$model.data$g == 1)
nfrogs <- sum(jags.data$model.data$g == 2)
########################################################################################
# occupancy estimates
# elevation
# predictor values
# jags.data$model.data$occ[1,] is elev
# jags.data$model.data$occ[2,] is reserve
# jags.data$model.data$occ["elev",]
# jags.data$model.data$occ["reserve",]
# note that these do have colnames
community.pred <- rep(NA, nsites)
names(community.pred) <- colnames(jags.data$model.data$occ)
# make sites with NA = zero
# omit this if you just want to exclude those points
jags.data$model.data$occ[is.na(jags.data$model.data$occ)] <- 0
# posterior mean community occupancy per site
for(site in 1:nsites){
mammals <- plogis(model.output$BUGSoutput$sims.list$mu.eta[,1,1] + model.output$BUGSoutput$sims.list$mu.beta[,1] * jags.data$model.data$occ["elev",site] + model.output$BUGSoutput$sims.list$mu.beta[,2] * jags.data$model.data$occ["reserve",site])
frogs <- plogis(model.output$BUGSoutput$sims.list$mu.eta[,1,2] + model.output$BUGSoutput$sims.list$mu.beta[,1] * jags.data$model.data$occ["elev",site] + model.output$BUGSoutput$sims.list$mu.beta[,2] * jags.data$model.data$occ["reserve",site])
community <- (mammals * nmammals/nspec) + (frogs * nfrogs/nspec)
community.pred[site] <- mean(community)
}
Nsite <- model.output$BUGSoutput$summary[grep("^Nsite", rownames(model.output$BUGSoutput$summary)),]
plot(Nsite[,"mean"], community.pred)
cor(Nsite[,"mean"], community.pred, use="complete.obs")
# [1] 0.9345153
sites.occupied.LSU002 <- apply(jags.data$model.data$y[,,"LSU002"],MAR =1 ,FUN = function (X) sum(X,na.rm=TRUE)) > 0
###
z.mean.LSU002 <- apply(model.output$BUGSoutput$sims.list$z[,,1] ,MAR =2 ,mean)
data.frame(sites.occupied = sites.occupied.LSU002, z.mean = z.mean.LSU002) %>% arrange(sites.occupied)
###
# session info
writeLines(capture.output(sessionInfo()), here("sessioninfo", "Ailaoshan_species_richness.sessioninfo.txt"))
|
ebe3bbbc7f5beb429fe4e2cf175d463520fffd15
|
11eb0ab12619b909519e47113b90441afc0fa272
|
/bloom filter.R
|
7c66ed16eab2f491a80a82d4fda5f47fad5f417b
|
[] |
no_license
|
pareshg18/Spam-Detection
|
b67ac8c826896ac39e0b3f13725c728081832705
|
89ae5474bf65abe51ee8cc8e1fd87f3330f49832
|
refs/heads/master
| 2020-04-12T02:50:31.306496
| 2018-12-18T10:35:37
| 2018-12-18T10:35:37
| 162,255,510
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,510
|
r
|
bloom filter.R
|
#Bloom Filter
install.packages("digest")
install.packages("bit")
library(digest)
library(bit)
spam <- read.table("listed_username_30.txt", sep = "\t", stringsAsFactors = FALSE)
colnames(spam) <- "usernames"
str(spam)
n <- nrow(spam)
p <- 0.07
m <- ceiling(-n*log(p) / (log(2)^2))
k <- ceiling(m/n * log(2))
hex_to_int = function(h) {
xx = strsplit(tolower(h), "")[[1L]]
pos = match(xx, c(0L:9L, letters[1L:6L]))
sum((pos - 1L) * 16^(rev(seq_along(xx) - 1)))
}
hashing1 <- NULL
hashing2 <- NULL
hashing3 <- NULL
hashing4 <- NULL
h1 <- NULL
h2 <- NULL
h3 <- NULL
h4 <- NULL
for (i in 1:nrow(spam)){
hashing1[i] <- digest(spam$usernames[i], algo = "murmur32", serialize = TRUE)
h1[i] <- hex_to_int(hashing1[i])
h1[i] <- (h1[i] %% m) + 1
hashing2[i] <- digest(spam$usernames[i], algo = "xxhash32", serialize = TRUE)
h2[i] <- hex_to_int(hashing2[i])
h2[i] <- (h2[i] %% m) + 1
hashing3[i] <- digest(spam$usernames[i], algo = "crc32", serialize = TRUE)
h3[i] <- hex_to_int(hashing3[i])
h3[i] <- (h3[i] %% m) + 1
hashing4[i] <- digest(spam$usernames[i], algo = "xxhash64", serialize = TRUE)
h4[i] <- hex_to_int(hashing4[i])
h4[i] <- (h4[i] %% m) + 1
}
bit_vector <- bit(m)
for (i in 1:nrow(spam)){
bit_vector[h1[i]] = 1
bit_vector[h2[i]] = 1
bit_vector[h3[i]] = 1
bit_vector[h4[i]] = 1
}
stream <- read.table("listed_username_365.txt", stringsAsFactors = FALSE, sep = "\t")
summary(stream)
colnames(stream) <- "usernames"
str(stream)
h_n1 <- NULL
h_n2 <- NULL
h_n3 <- NULL
h_n4 <- NULL
for (i in 1:nrow(stream)){
hashing1[i] <- digest(stream$usernames[i], algo = "murmur32", serialize = TRUE)
h_n1[i] <- hex_to_int(hashing1[i])
h_n1[i] <- (h_n1[i] %% m) + 1
hashing2[i] <- digest(stream$usernames[i], algo = "xxhash32", serialize = TRUE)
h_n2[i] <- hex_to_int(hashing2[i])
h_n2[i] <- (h_n2[i] %% m) + 1
hashing3[i] <- digest(stream$usernames[i], algo = "crc32", serialize = TRUE)
h_n3[i] <- hex_to_int(hashing3[i])
h_n3[i] <- (h_n3[i] %% m) + 1
hashing4[i] <- digest(stream$usernames[i], algo = "xxhash64", serialize = TRUE)
h_n4[i] <- hex_to_int(hashing4[i])
h_n4[i] <- (h_n4[i] %% m) + 1
}
fp = 0
tn = 0
for (i in 1: nrow(stream)){
if (bit_vector[h_n1[i]] ==1 && bit_vector[h_n2[i]] ==1 && bit_vector[h_n3[i]] ==1 && bit_vector[h_n4[i]] ==1){
fp= fp+1
}
else {
tn = tn+1
}
}
fp/(fp+tn)*100
|
b17fa2d17665d1c0aad5401a2ace698a21c853c6
|
52e7fbbac675d80127e7a5a9dc838c66b494cc15
|
/plot4.R
|
7d3193cb42b51698a7804c1a268246980135ae32
|
[] |
no_license
|
greenisagoodcolor/ExData_Plotting1
|
1bbeccf68d5901b72e537b52fd9093854c1762e4
|
bb64a48019faae23672051874601d64afdf0e835
|
refs/heads/master
| 2021-01-18T11:25:50.312575
| 2014-05-11T15:57:57
| 2014-05-11T15:57:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
plot4.R
|
#plot 4
elec <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", colClasses="character")
elec[, 1] <- paste(elec[, 1], elec[, 2], sep= " ")
require(lubridate)
elec[, 1] <- dmy_hms(elec[, 1])
mar <- with(elec, elec[year(Date) == 2007 &
month(Date) == 2 & day(Date) >= 1 & day(Date) <= 2, ])
rm(elec)
png(file = "plot4.png", bg = "NA", width=480, height=480)
par(mfrow=c(2,2))
with(mar, {
plot(mar[, 1], mar[, 3], type="n", xlab="", ylab="Global Active Power (kilowatts)")
lines(mar[, 1], mar[, 3])
plot(mar[, 1], mar[, 5], xlab="datetime", ylab="Voltage", type="l")
plot(mar[,1], mar[,7], type="n", xlab="", ylab="Energy Sub Metering")
lines(mar[,1], mar[,7], col="black", type="l")
lines(mar[,1], mar[,8], col="red", type="l")
lines(mar[,1], mar[,9], col="blue", type="l")
legend("topright", col=c("black", "red", "blue"), lty="solid", legend=names(mar[7:9]), bty="n")
plot(mar[, 1], mar[, 4], xlab="datetime", ylab=names(mar[4]), type="l")
})
dev.off()
|
30c38c037d99eb804779060ef0a3af74d67a391a
|
8d50d409e7aa23e8d6cb45dd4999eda36043f08d
|
/man/SW.Study-class.Rd
|
794854bdacb202e84b8681f5fd41b27b6e2c13fc
|
[
"MIT"
] |
permissive
|
mattmoo/stepmywedge
|
edaa2f50dc2665be0c5bd9fdfd924262dd40a9c5
|
2e1ac9853998a8258978610b48b87bc748f0aab2
|
refs/heads/master
| 2023-04-28T01:16:41.150738
| 2023-04-13T01:09:57
| 2023-04-13T01:09:57
| 178,087,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,750
|
rd
|
SW.Study-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SW.Study.R
\docType{class}
\name{SW.Study-class}
\alias{SW.Study-class}
\alias{SW.Study}
\title{A Reference Class to represent a stepped-wedge cluster-randomised trial.}
\description{
A Reference Class to represent a stepped-wedge cluster-randomised trial.
}
\section{Fields}{
\describe{
\item{\code{clusters}}{List of all clusters stored as a factor.}
\item{\code{study.dt}}{data.table that includes all site and cluster parameters
(some duplicated data).}
\item{\code{cluster.dt}}{data.table that includes cluster parameters \.}
\item{\code{site.dt}}{data.table that includes site parameters.}
\item{\code{data.dt}}{data.table that includes the data from participants.}
\item{\code{sim.ppt.per.unit.time.mean}}{Mean patients per unit time (will be forced
positive)}
\item{\code{sim.ppt.per.unit.time.sd}}{Variance in patients per unit time.}
\item{\code{sim.normal.preintervention.mean}}{The outcome before applying any
effects.}
\item{\code{sim.normal.intervention.effect.mean}}{The mean effect of the
intervention across all sites.}
\item{\code{sim.normal.intervention.effect.sd}}{The variance of the intervention
effect between sites.}
\item{\code{sim.site.effect.mean}}{The mean of the effect added separately to each
site.}
\item{\code{sim.site.effect.sd}}{The variance of the effect added separately to each
site.}
\item{\code{sim.site.effect.force.sign}}{Force the time effect to a certain sign
(should be in c(-1,0,+1))}
\item{\code{sim.time.effect.per.unit.mean}}{The mean of the effect added per unit
time for each site.}
\item{\code{sim.time.effect.per.unit.sd}}{The variance of the effect added per unit
time for each site.}
\item{\code{sim.individual.noise.mean}}{The mean of the variance between individuals
for each site.}
\item{\code{sim.individual.noise.sd}}{The variance of the variance between
individuals for each site.}
\item{\code{perm.dt}}{A table that holds a number of ways in which to permute sites
to different clusters.}
\item{\code{stat.dt}}{A statistic distribution generated by permuting sites to
different clusters.}
}}
\section{Methods}{
\describe{
\item{\code{add.clusters(clusters.to.add)}}{Add clusters to the experiment.}
\item{\code{cut.data.dt.time.to.periods(period.starts)}}{Cuts the continuous time variable in data.dt into periods, takes a vector of start times for cut}
\item{\code{faceted.line.plot(dot.size = 2, ylims = NULL, outcome.name = "outcome")}}{Plots the study as a line plot with facets for different sites. ylims and outcome.name are mainly for animations.}
\item{\code{generate.cluster.dt()}}{Generate a data.table of the clusters in the study.}
\item{\code{generate.perm.dt(max.r)}}{Wrapper for function generate.perm.dt in analysis.r}
\item{\code{generate.sim.data.normal(save.intermediates = T)}}{Generates synthetic data, can save the intermediate values for a nice animation if you want.}
\item{\code{generate.sim.data.tsd()}}{Generate data.dt from Timed.Sampling.Dist object.}
\item{\code{generate.site.dt()}}{Generate a data.table of the sites in the study.}
\item{\code{generate.site.sim.parameters()}}{Generate simultation parameters for each site.}
\item{\code{generate.stat.dt(
max.r,
outcome.col.name = "outcome",
intervention.col.name = "group",
stat.per.site = F,
statistic = "WMWU",
other.predictors = NULL,
...
)}}{Wrapper for function generate.stat.dt in analysis.r}
\item{\code{generate.study.dt()}}{Generate a data.table of the study.}
\item{\code{get.cluster.by.name(cluster.name)}}{Get a cluster given it's name.}
\item{\code{get.cluster.dt(force.generate = F)}}{Get a data.table of the clusters in the study.}
\item{\code{get.cluster.names()}}{Get all cluster names.}
\item{\code{get.clusters()}}{Get clusters in the study.}
\item{\code{get.site.by.name(site.name)}}{Get a site given it's name, needs to do a bit of trawling.}
\item{\code{get.site.dt(force.generate = F)}}{Get a data.table of the sites in the study.}
\item{\code{get.sites(cluster.name = "")}}{Gets a list of site objects, returning all sites for all cluster if no cluster.name provided.}
\item{\code{get.study.dt(force.generate = F)}}{Get a data.table of the study.}
\item{\code{rank.data.dt()}}{Gives ranks to outcomes in data.dt}
\item{\code{set.clusters(clusters)}}{Replace clusters in the cluster, clusters should be provided as a SW.Cluster object.}
\item{\code{set.sim.parameters(
sim.ppt.per.unit.time.mean = NA_integer_,
sim.ppt.per.unit.time.sd = NA_integer_,
sim.normal.preintervention.mean = NA_integer_,
sim.normal.intervention.effect.mean = NA_integer_,
sim.normal.intervention.effect.sd = NA_integer_,
sim.site.effect.mean = NA_integer_,
sim.site.effect.sd = NA_integer_,
sim.time.effect.per.unit.mean = NA_integer_,
sim.time.effect.per.unit.sd = NA_integer_,
sim.site.effect.force.sign = NA_integer_,
sim.individual.noise.mean = NA_integer_,
sim.individual.noise.sd = NA_integer_
)}}{Set simulation parameters for the study.}
\item{\code{set.sim.parameters.default(
sim.ppt.per.unit.time.mean = 10,
sim.ppt.per.unit.time.sd = 2,
sim.normal.preintervention.mean = 15,
sim.normal.intervention.effect.mean = 2,
sim.normal.intervention.effect.sd = 0.6,
sim.site.effect.mean = 0,
sim.site.effect.sd = 2,
sim.time.effect.per.unit.mean = 0,
sim.time.effect.per.unit.sd = 0.005,
sim.site.effect.force.sign = c(-1, 0, +1)[3],
sim.individual.noise.mean = 2,
sim.individual.noise.sd = 0.4
)}}{Set simulation parameters for the study.}
\item{\code{update.groups.data.dt()}}{Updates the group of data.dt according to timing of clusters.}
}}
\section{Normal simulation}{
NA
}
\section{Permutation testing}{
NA
}
|
07e9a13c184a13d473646b1cef5782f50ca4bc38
|
3c7cc5d2ebea22a3d44b5fb04051dd1980b422cd
|
/rscript.R
|
8e9995be817c0448757cf6b6f885fe76ff623bde
|
[] |
no_license
|
karinahrne/uppsala-workshop
|
18c95e3a1f0db7ce8ba2ae483e3b78c6c89ee746
|
16a28872b0c9e8ff703635751ee3a75dbd979fba
|
refs/heads/master
| 2020-03-31T22:08:30.733014
| 2018-10-12T13:10:23
| 2018-10-12T13:10:23
| 152,607,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
r
|
rscript.R
|
cats <- read.csv(file = 'data/feline-data.csv')
str(cats)
typeof(cats$likes_string)
str(cats$likes_string)
typeof(TRUE)
typeof(3.14)
typeof(3L)
typeof(1+1i)
another_vector<-vector(mode='character',length=3)
###work on cats dataframe to clean
if(!dir.exists('cleaned_data')){
dir.create('cleaned_data', showWarnings = FALSE)
}
write.csv(cats, file='cleaned_data/feline-data.csv',
row.names=FALSE)
|
cda33de86ef1051496581b4de13295efb95e6bef
|
8a08e766f0e12d6679ad32164f8f4e022db6128c
|
/8-02_GeneratorPurchasingProblem.R
|
9c6b65b4a3147cd3093561738d0fc8481b2eddff
|
[] |
no_license
|
earnor/IM3
|
ff0198c235901023e7af5c91cb936a90490ddd0d
|
6f2ed42f27832ae90ef85ccb1dec1377dc546fbc
|
refs/heads/master
| 2021-01-19T15:19:19.098957
| 2017-09-07T15:40:52
| 2017-09-07T15:40:52
| 100,959,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,211
|
r
|
8-02_GeneratorPurchasingProblem.R
|
# *-----------------------------------------------------------------
# | PROBLEM NUMBER: 8.2
# | PROBLEM NAME: Generator Purchasing Program
# | UPDATE: AE
# | DESCRIPTION: Minimising purchase cost is the ultimate goal in
# | the search for generators to be puchased to meet
# | demand for a five year period.
# |
# | KEYWORDS: generating capacity, purchasing, program,
# | generators
# *-----------------------------------------------------------------
# Note: To run this program, you must have installed the packages
# 'linprog' and 'lpSolve'
### ------------------DATA AND PACKAGE IMPORT-----------------
library(linprog)
library(lpSolve)
### ------------------VARIABLE DEFINITION---------------------
# Z is a vector of coefficients of the objective function.
# f is a vector of left side of functional constraints, with indices
# from 1
# g is the corresponding right side of the constraint
# A.l is the collection of all constraints' left sides (f)
# that have a less than or equal to sign.
# b.l is the corresponding right side (g).
# A and b also carry the indices .m for constraints that has a more
# than or equal to sign, and .e for one that has an equal sign.
# The constraint is then constructed using A*x=b where x is a vector
# of variables
### ---------------VARIABLE INPUT-------------------
# Based on the following objective function and constraints to
# minimise costs.
# We set up the objective function for all 4 generator types
# and every year.
# Z = 300*x.11 + 460*x.12 + 670*x.13 + 950*x.14 # First year
# + 250*x.21 + 375*x.22 + 558*x.23 + 790*x.24 # Second year etc.
# + 200*x.31 + 350*x.32 + 465*x.33 + 670*x.34
# + 170*x.41 + 280*x.42 + 380*x.43 + 550*x.44
# + 145*x.51 + 235*x.52 + 320*x.53 + 460*x.54
# The indices of the variables are replaced by the numbers 1-20, so
# that indices 1-4 are for year 1, 5-8 for year 2, and so on.
# We set the functional constraints for each year equal to those
# stated in the exercise.
# The types of generators generate 10 MW, 25 MW, 50 MW and 100 MW
# respectively.
# The power company currently has a generating capacity of 750 MW.
# Constraints 1-5 are the functional constraints for years 1-5
# P.1 = 750 + 10*x.1 + 25*x.2 + 50*x.3 + 100*x.4 >= 780
# P.2 = P.1 + 10*x.5 + 25*x.6 + 50*x.7 + 100*x.8 >= 860
# P.3 = P.2 + 10*x.9 + 25*x.10 + 50*x.11 + 100*x.12 >= 950
# P.4 = P.3 + 10*x.13 + 25*x.14 + 50*x.15 + 100*x.16 >= 1060
# P.5 = P.4 + 10*x.17 + 25*x.18 + 50*x.19 + 100*x.20 >= 1180
# Before entering these into the models, we get all constants to the
# right-hand side.
# The variables x.1 - x.20 should be limited to positive integers.
# Constraints 6-25 are non-negativity constraints :
# x.1,...,x.20 >= 0
### ---------------PROGRAM OUTPUT-------------------
# Objective function
Z <- c(300,460,670,950,
250,375,558,790,
200,350,465,670,
170,280,380,550,
145,235,320,460)
# Functionality constraints
f.1 <- c(10,25,50,100
, 0, 0, 0, 0
, 0, 0, 0, 0
, 0, 0, 0, 0
, 0, 0, 0, 0)
g.1 <- c(30)
f.2 <- c(10,25,50,100
,10,25,50,100
, 0, 0, 0, 0
, 0, 0, 0, 0
, 0, 0, 0, 0)
g.2 <- c(110)
f.3 <- c(10,25,50,100
,10,25,50,100
,10,25,50,100
, 0, 0, 0, 0
, 0, 0, 0, 0)
g.3 <- c(200)
f.4 <- c(10,25,50,100
,10,25,50,100
,10,25,50,100
,10,25,50,100
, 0, 0, 0, 0)
g.4 <- c(310)
f.5 <- c(10,25,50,100
,10,25,50,100
,10,25,50,100
,10,25,50,100
,10,25,50,100)
g.5 <- c(430)
# non-negativity constraints
f.6 <- c(1,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0) # x.1 >= 0
g.6 <- c(0)
f.7 <- c(0,1,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0) # x.2 >= 0
g.7 <- c(0)
f.8 <- c(0,0,1,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0) # x.3 >= 0 and so on..
g.8 <- c(0)
f.9 <- c(0,0,0,1
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0)
g.9 <- c(0)
f.10 <- c(0,0,0,0
,1,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0)
g.10 <- c(0)
f.11 <- c(0,0,0,0
,0,1,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0)
g.11 <- c(0)
f.12 <- c(0,0,0,0
,0,0,1,0
,0,0,0,0
,0,0,0,0
,0,0,0,0)
g.12 <- c(0)
f.13 <- c(0,0,0,0
,0,0,0,1
,0,0,0,0
,0,0,0,0
,0,0,0,0)
g.13 <- c(0)
f.14 <- c(0,0,0,0
,0,0,0,0
,1,0,0,0
,0,0,0,0
,0,0,0,0)
g.14 <- c(0)
f.15 <- c(0,0,0,0
,0,0,0,0
,0,1,0,0
,0,0,0,0
,0,0,0,0)
g.15 <- c(0)
f.16 <- c(0,0,0,0
,0,0,0,0
,0,0,1,0
,0,0,0,0
,0,0,0,0)
g.16 <- c(0)
f.17 <- c(0,0,0,0
,0,0,0,0
,0,0,0,1
,0,0,0,0
,0,0,0,0)
g.17 <- c(0)
f.18 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,1,0,0,0
,0,0,0,0)
g.18 <- c(0)
f.19 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,1,0,0
,0,0,0,0)
g.19 <- c(0)
f.20 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,1,0
,0,0,0,0)
g.20 <- c(0)
f.21 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,1
,0,0,0,0)
g.21 <- c(0)
f.22 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,1,0,0,0)
g.22 <- c(0)
f.23 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,1,0,0)
g.23 <- c(0)
f.24 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,1,0)
g.24 <- c(0)
f.25 <- c(0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,0
,0,0,0,1)
g.25 <- c(0)
# We now build the input into our simplex function.
# Noting that variables 1-20 should be integers
int <- 1:20
# For our constraints:
# Manually the number of constraints are typed in
c.l <- 0 # For less-than-or-equal-to constraints
c.m <- 25 # For more-than-or-equal-to constraints
c.e <- 0 # For equal-to constraints
n <- length(Z) # amount of x-variables.
A.l <- NULL
b.l <- NULL
A.m <- matrix(c(f.1,f.2,f.3,f.4,f.5, # functionality constraints
f.6 ,f.7 ,f.8 ,f.9 , # non-negativity constraints
f.10,f.11,f.12,f.13,
f.14,f.15,f.16,f.17,
f.18,f.19,f.20,f.21,
f.22,f.23,f.24,f.25)
,nrow=c.m,ncol=n, byrow=TRUE)
b.m <- matrix(c(g.1,g.2,g.3,g.4,g.5, # functionality constraints
g.6 ,g.7 ,g.8 ,g.9 , # non-negativity constraints
g.10,g.11,g.12,g.13,
g.14,g.15,g.16,g.17,
g.18,g.19,g.20,g.21,
g.22,g.23,g.24,g.25)
,nrow=c.m,ncol=1, byrow=TRUE)
A.e <- NULL
b.e <- NULL
A <- rbind(A.l,A.m,A.e)
b <- rbind(b.l,b.m,b.e)
const.dir <- c(rep("<=",c.l),rep(">=",c.m),rep("=",c.e))
### ------------------CALCULATIONS-------------------
results <- lp (direction = "min", objective.in=Z, const.mat = A,
const.dir, const.rhs=b,
int.vec = int, all.int=FALSE, all.bin=FALSE,
num.bin.solns=1, use.rw=FALSE)
results$solution
results$objval
# The result of the simplex algorithm is that the
# optimal value is 3130
sum(Z*results$solution)
# In fact, the optimal value is 3115 for the vector
resvec <- c(0,0,0,1
,1,0,0,0
,0,0,0,1
,0,0,0,1
,0,1,0,1)
resvec
sum(Z*resvec)
### --------------------PLOT--------------------
### ------------------PLOT END-------------------
# END
|
07aaba988f0ddeedff83e2262e301f597d5ddb92
|
094ec4bbec762605f83d6af1b4de8266c7c6e0a6
|
/man/pickPeaks_rcpp.Rd
|
ee55dec08512e811cf40159d391b18cd9f9f7b97
|
[] |
no_license
|
tkimhofer/jres
|
09ada8e14ba4fe6f62babb722a304d7da2ba8bc9
|
2929fb11eb5581292ecf2386b620e9ef355f0fdd
|
refs/heads/master
| 2023-04-21T05:27:26.463814
| 2021-04-29T11:23:16
| 2021-04-29T11:23:16
| 242,622,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 842
|
rd
|
pickPeaks_rcpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{pickPeaks_rcpp}
\alias{pickPeaks_rcpp}
\title{Perform peak picking and get bounding box dimensions}
\usage{
pickPeaks_rcpp(jr, f1hz, f2ppm, noise, boundary, sf, dbug)
}
\arguments{
\item{jr}{Jres matrix, f1 in rows and f2 in cols}
\item{f1hz}{F1 scale of jr (Hz)}
\item{noise}{Intensity threshold for noise (no peaks are detected below this value)}
\item{boundary}{Initial bounding box estimate for determining peak size (one side estimate: x +/- boundary), this should be large enought to capture large signals (expressed in Hz)}
\item{sf}{Spectrometer frequency}
\item{f2hz}{F2 scale of jr (ppm)}
}
\value{
List of dataframes summarising the detected peaks/features
}
\description{
Perform peak picking and get bounding box dimensions
}
|
63bb32927d8ee9c1ad9017b8069ccc76af1433aa
|
ed20ede20b5d75036148e771d353ca742a4bffce
|
/code/TT_es_calc.R
|
45dbdb5aa7b7ffdafd0d6643ba057cd147044481
|
[] |
no_license
|
yaroslavtsevam/TT-
|
4048befb6b4f8bc8ea489f8fd01f5c618c5157ba
|
31093aff8360a16fc04b079f55a45e9c44d57016
|
refs/heads/master
| 2023-08-23T03:48:06.459193
| 2021-09-22T15:10:39
| 2021-09-22T15:10:39
| 238,436,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,369
|
r
|
TT_es_calc.R
|
source("code/TT_Moscow_data_collection.R")
library(pracma)
# for wind interpolation
library(zoo)
library(tidyquant)
#Loading weather data
#AllData = BLTNdata[[2]]
BLTN = AllData %>% filter(Site=="BOLOTNAYA")
BLTN = BLTN %>% BEFadd( verboseFlag = "con")
BLTN = BLTN %>% left_join(read_delim("data/Bolotnaya_growth.csv", delim=";"),by="id")
BLTN = BLTN %>% mutate(biomas_stored = pi*tree_height*1/3*(growth/1000)*(d/100+growth/1000) * C_V*1000)
Moscow_center_weather_19 <- read_delim("data/Moscow_center_weather_19.csv",delim = ";",
escape_double = FALSE, comment = "#",
col_types = cols(Pa = col_double()), trim_ws = TRUE) %>%
rename(time ="Местное время в Москве (центр, Балчуг)" ) %>%
mutate(time = as_datetime(time,format = "%d.%m.%Y %H:%M"))
#Calculating wind speed at height 20m
MCW19 = Moscow_center_weather_19 %>% mutate(ff20 = Ff*log(20/.6)/log(10/.6))
MCW19 = MCW19 %>% select(time,"T",Po, Ff,ff20, RRR) %>% rename("sT" = "T")
#Gap fill data
Bdata = BLTN %>% ungroup() %>% left_join(MCW19, by ="time") %>% as.data.frame()
Bdata$ff20 = c(na.approx(Bdata$ff20, x = index(Bdata$ff20), na.rm = TRUE, maxgap = Inf), rep(NA,8))
Bdata$Ff = c(na.approx(Bdata$Ff, x = index(Bdata$Ff), na.rm = TRUE, maxgap = Inf), rep(NA,8))
Bdata$sT = c(na.approx(Bdata$sT, x = index(Bdata$sT), na.rm = TRUE, maxgap = Inf), rep(NA,8))
#Aerodynamic resistance according to Tom and Eddy Pro
Bdata = Bdata %>% mutate(r =log((20-20*0.67)/(20*.15))^2/(ff20*0.41^2))
Bdata = Bdata %>% mutate(ra =log((20-20*0.67)/(20*0.0123))*log((20-20*0.67)/(20*.123))/(Ff*0.41^2))
#Bdata = Bdata %>% mutate(a_d = air_density(tair,rh, Po, 3.5) )
# C = ρCp ∆T/r kW/m2
Bdata = Bdata %>% mutate(H = 1.006*1.202*(nt1 - TTair)/(r) )
Bdata$Flux[Bdata$Flux >100] = NA
#lambda_T = Flux * 2264.705 KJ
#/3600 in kWh
Bdata = Bdata %>% mutate(L = Flux * 2264.705 /3600)
Bdata = Bdata %>% mutate(Rn = (L+H)/.9 )
Bdata = Bdata %>% mutate(G = 0.1*Rn )
air_density = function(tc, rh, Po, z){
# Calculate the saturation vapor pressure given the temperature(celsius)
# Polynomial from Herman Wobus
eso=6.1078;
c0=0.99999683
c1=-0.90826951E-02
c2=0.78736169E-04
c3=-0.61117958E-06
c4=0.43884187E-08
c5=-0.29883885E-10
c6=0.21874425E-12
c7=-0.17892321E-14
c8=0.11112018E-16
c9=-0.30994571E-19
pol=c0+tc*(c1+tc*(c2+tc*(c3+tc*(c4+tc*(c5+tc*(c6+tc*(c7+tc*(c8+tc*(c9)))))))))
esmb=eso/pol^8
# Calculate the vapor pressures (mb) given the ambient temperature (c) and dewpoint (c)
emb=esmb*rh/100;
# Calculate the actual pressure (mb)from the altimeter setting (mb) and geopotential altitude (m)
r = 6369E3
k1 = 0.190263;
k2 = 8.417286E-5;
# Convertin pressure quicksulver mm to Bar
p = ((Po*1.33322^k1)-(k2*((r*z)/(r+z))))^(1/k1)
# Calculate the air density (kg/m3) from actual pressure (mb) vapor pressure (mb) and temp (c)
Rv = 461.4964
Rd = 287.0531
tk = tc + 273.15
pv = emb * 100
pd = (p - emb)*100
density = (pv/(Rv*tk)) + (pd/(Rd*tk))
return(density)
}
Bdata = Bdata %>% filter(Species != "TTR")
TTR = AllData %>% filter(!is.na(TTair))
####################################### Temperature #############################################
Tdif = TTR %>% group_by(Site, Species, doy) %>%
summarise(dTairmax = max(dTair, na.rm = T), dTairmin = min(dTair,na.rm = T), dTairmean = mean(dTair, na.rm=T))
Bdata = Bdata %>% mutate( dTair = tair - TTair)
# Example of couple of days timeseries
Sys.setlocale("LC_ALL","English")
ggplot(data=Bdata %>% filter(doy>222 & doy <229, Species != "TTR", id !="218A0248",id !="218A0248" ), aes(x = time, y = dTair))+
geom_point(aes(color=Species))+
#geom_line(aes(color=id, group = id))+
geom_ma( aes(group =id, color = Species), n=3,size=.4,)+
geom_ma(data = MCW19 %>% mutate(doy = yday(time)) %>% filter(doy>222 & doy <229),
aes( x = time, y = sT-20),color ="black",linetype="42",size=1,alpha=.4, n=3)+
geom_hline(aes(yintercept = 0))+
scale_y_continuous(sec.axis = sec_axis(~ . + 20))+
scale_x_continuous(n.breaks = 7, trans="time")+
facet_wrap(~Species, nrow = 2)+
theme_bw()
# Per species main dynamics
Tdifn = Bdata %>% filter(hour<5 | hour >21) %>% filter(Species != "TTR") %>% group_by(Species,doy,id) %>%
summarise(dt = max(dTair,na.rm = T)) %>% group_by(Species,doy) %>%
summarise(dtm = mean(dt,na.rm = T), sdt = sd(dt,na.rm = T)) %>%
filter(Species != "TTR", sdt < 2)
ggplot(data = Tdifn)+
geom_point(aes(x=doy, y = dtm, color = Species ))+
#geom_smooth(aes(x=doy, y = dtm, color = Species), span =4 )+
geom_errorbar(aes(x=doy, ymin = dtm-2*sdt,ymax=dtm+2*sdt, color = Species),linetype="dashed")+
geom_ma(aes(x=doy, y = dtm, color = Species), linetype="solid", n=7 )+
facet_wrap(~Species,nrow = 2, scales = "free")+
theme_bw()
# Diurnal temperature difference inside and outside canopy
Bdiurnal = Bdata %>% filter(Species != "TTR") %>%
mutate(month = month(time)) %>%filter(month < 11) %>% mutate(minute = minute(time))%>%
group_by(month, hour, Species) %>% summarise(dT = mean(dTair, na.rm = T), sdT = sd(dTair, na.rm=T)) %>% as.data.frame()
Bdiurnal$hour[ceiling((Bdiurnal$hour-1)/3) == (Bdiurnal$hour-1)/3 ] = Bdiurnal$hour[ceiling((Bdiurnal$hour-1)/3) == (Bdiurnal$hour-1)/3 ]+.5
ggplot(data = Bdiurnal)+
geom_point(aes(x = hour, y = dT, color = Species), position = position_dodge(width = 1) )+
geom_smooth(aes(x = hour, y = dT, color = Species, group = Species), se = F)+
geom_errorbar(aes(x=hour, ymin = dT-sdT,ymax=dT+sdT, color = Species),linetype="dashed",
position = position_dodge(width = 1))+
geom_hline(aes(yintercept=0))+
facet_wrap(~month, nrow=2, scales = "free")+
theme_bw()
# Maximum temperature difference inside day per species
Bvar = Bdata %>% group_by(doy,id, Species) %>% filter(!is.na(tair)) %>% filter(Species != "TTR") %>%
summarise(Tvar = max(tair) - min(tair), TTvar = max(TTair)-min(TTair)) %>%
group_by(doy, Species) %>% summarise(dT = mean(Tvar), sTvar = sd(Tvar),
dTT = mean(TTvar), sTTvar = sd(TTvar))
ggplot(data = Bvar)+
geom_point(aes(x = doy, y = dT, color = Species) )+
geom_smooth(aes(x = doy, y = dT, color = Species), se = F)+
#geom_errorbar(aes(x=doy, ymin = dT-sTvar,ymax=dT+sTvar, color = Species),linetype="dashed",
# position = position_dodge(width = 1))+
geom_point(aes(x = doy, y = dTT), color = "black")+
geom_smooth(aes(x = doy, y = dTT), color = "black", se = F)+
#geom_errorbar(aes(x=doy, ymin = dTT-sTTvar,ymax=dTT+sTTvar), color = "black",linetype="dashed",
# position = position_dodge(width = 1))+
geom_hline(aes(yintercept=0))+
facet_wrap(~Species, nrow=2)+
theme_bw()
#################################### Energy ##########################################################
##### Diurnal graph per month per species
Bde = Bdata %>% mutate(month = month(time))%>% group_by(month,hour,Species) %>% filter(Species != "TTR") %>%
summarise(Rn = sum(Rn, na.rm = T)*1.5, L = sum(L, na.rm=T)*1.5, G = sum(G, na.rm = T)*1.5, H = sum(H, na.rm = T)*1.5) %>%
filter(!is.na(hour))
Bde$hour[ceiling((Bde$hour-1)/3) == (Bde$hour-1)/3 ] = Bde$hour[ceiling((Bde$hour-1)/3) == (Bde$hour-1)/3 ]+.5
ggplot(data = Bde%>% filter(Species != "TTR"))+
geom_point(aes(x=hour, y = Rn), color = "red")+
geom_point(aes(x=hour, y = L), color = "blue", shape = 2)+
geom_point(aes(x=hour, y = H), color = "green", shape = 3)+
geom_point(aes(x=hour, y = G), color="brown", shape = 4)+
geom_smooth(aes(x=hour, y = Rn), color = "red", se = F)+
geom_smooth(aes(x=hour, y = L), color = "blue",linetype="dotted", se = F)+
geom_smooth(aes(x=hour, y = H), color = "green",linetype="dashed", se = F)+
geom_smooth(aes(x=hour, y = G), color="brown", se = F)+
facet_grid(month~Species, scales = "free")+
theme_bw()
##### Stacked column per month
Bsum = Bdata %>% mutate(month = month(time)) %>% filter(!is.na(month))%>%
group_by(id,doy,month, Species) %>% filter(Species != "TTR") %>%
summarise(Rn = sum(Rn, na.rm = T)*1.5, L = sum(L, na.rm=T)*1.5,
G = sum(G, na.rm = T)*1.5, H = sum(H, na.rm = T)*1.5, d = mean(d,na.rm = T))%>%
pivot_longer(cols=c(L,H,G), names_to = "heat_type", values_to = "energy") %>%
group_by(month,id, Species, heat_type) %>% filter(!is.na(month))%>%
summarise(E = sum(energy, na.rm = T), Rn = sum(Rn, na.rm = T))
ggplot(data= Bsum)+
geom_col(aes(x=id, y = E, fill = heat_type))+
geom_errorbar(aes(x = id, ymin = Rn, ymax = Rn))+
facet_grid(month~Species, scales = "free")+
theme_bw()
#
# ggplot(data = Bdata %>% filter(Species != "TTR") %>% mutate(month = month(time)))+
# geom_point(aes(x=time,y = Rn ),alpha = .1, color = "red")+
# geom_smooth(aes(x=time,y = Rn),color = "red")+
# geom_point(aes(x=time,y = L), alpha = .1, color = "blue")+
# geom_smooth(aes(x=time,y = L), color = "red")+
# geom_point(aes(x=time,y = H), alpha = .1, color = "green")+
# geom_smooth(aes(x=time,y = H), color = "green")+
# geom_point(aes(x=time,y = G), alpha = .1, color = "brown")+
# geom_smooth(aes(x=time,y = G), color = "brown")+
# facet_wrap(~Species, scales = "free")+
# theme_bw()
#
#
#
# ggplot(data = Bst)+
# geom_col(aes( x = id,y = Rn))+
# facet_wrap(~Species, ncol = 2, scale="free" )+
# #scale_y_continuous(limits=c(-.45,.5))+
# theme_bw()
write.csv(Bdata, file="data.csv")
################### Growth
#Total biomass stored per season
ggplot(data = BLTN %>% filter(Site == "BOLOTNAYA") %>%filter(Species != "TTR")%>% group_by(id, Species)%>% summarise(kg=mean(biomas_stored)))+
geom_col(aes( x = id,y = kg))+
facet_wrap(~Species, ncol = 2, scale="free" )+
#scale_y_continuous(limits=c(-.45,.5))+
theme_bw()
#Artificial graph of biomass growth according to accumulated LAI
Bgr = Bdata %>%filter(Species != "TTR") %>% mutate(NDVIc = replace(NDVIc, NDVIc > 1, 1) , NDVIc = replace(NDVIc, NDVIc < -1, -1)) %>%
group_by(id, Species,doy) %>% summarise(bio_proxy = quantile(NDVIc, 0.85,na.rm = T), kg=mean(biomas_stored), n=n()) %>%
mutate(bio_proxy = replace(bio_proxy,bio_proxy<0,0), bioproxy = cumsum(bio_proxy)) %>%
mutate(biomas_stored = kg*bioproxy/max(bioproxy)) %>% filter(id != "218A0186")
doy = rep(180:310,unique(Bgr$id)%>%length )
id = rep(unique(Bgr$id), rep(311-180,unique(Bgr$id)%>%length))
df = data.frame(id,doy) %>% left_join(Bgr, by=c("id","doy"))
df$biomas = 0
for( i in df$id %>% unique()){
biomas = df$biomas_stored[df$id == i]
Species = as.factor(df$Species[df$id == i])%>% levels
df$Species[df$id == i] = Species
print(biomas)
biomas = na.approx(biomas, x = index(biomas), na.rm = T, maxgap = Inf)
print(biomas)
df$biomas[df$id == i] = biomas
}
ggplot(data = df%>%filter(doy<300))+
geom_point(aes(x=doy, y=biomas, group=id), shape=3,size=.5, alpha=4/10)+
geom_line(aes(x=doy, y=biomas, group=id),size=.5)+
#geom_ma(aes(x=doy, y=biomas, color =id), n=3, linetype=1, size=1)+
facet_wrap(~Species, scales = "free")+
theme_bw()
################################## LAI
TTR = TTR %>% mutate( dTair = tair - TTair)
TTR = TTR %>% filter(Site %in% c("BOLOTNAYA","TROITSK"))
TTR = TTR %>% filter(Species != "TTR")
TTR$LAIb[is.infinite(TTR$LAIb)] = NA
# T dif
LAI = Bdata%>% filter(Species != "TTR")
LAI$LAIb[is.infinite(LAI$LAIb)] = NA
LAI = LAI %>% mutate(lightness = TTR_450c+TTR_500c+TTR_550c+TTR_570c+TTR_600c+TTR_650c+TTR_610c+
TTR_680c+TTR_730c+TTR_760c+TTR_810c+TTR_860c)
LAI = LAI %>% mutate(blueness = TTR_450c+TTR_500c)
LAI = LAI %>% mutate(blueness2 = (b_V_450+b_B_500)/(TTR_450+TTR_500c))
-log(LAI$blueness2) %>% summary
LAI = LAI %>% mutate(pPARic = TTR_450c*2.55+TTR_500c*.58+TTR_550c*.78+TTR_570c*.9+TTR_600c*.95+TTR_650c+
TTR_680c*.8+TTR_730c*.2+TTR_760c*.05)
LAI = LAI %>% mutate(pPARbc = b_V_450c*2.55+b_B_500c*0.58+b_G_550c*0.78+b_Y_570c*0.9+b_O_600c*0.95+b_R_650c+
b_S_680c*0.8+b_T_730c*0.2+b_U_760c*0.05)
LAI = LAI %>% mutate(LAIparc = -log(pPARbc/pPARic)/3)
LAI = LAI %>% mutate(pPARi = TTR_450*2.55+TTR_500*.58+TTR_550*.78+TTR_570*.9+TTR_600*.95+TTR_650+
TTR_680*.8+TTR_730*.2+TTR_760*.05)
LAI = LAI %>% mutate(pPARb = b_V_450*20.55+b_B_500*0.58+b_G_550*0.78+b_Y_570*0.9+b_O_600*0.95+b_R_650+
b_S_680*0.8+b_T_730*0.2+b_U_760*0.05)
LAI = LAI %>% mutate(LAIpar = -log(pPARb/pPARi)/3)
LAI = LAI %>% group_by(id)%>%mutate(nlightness = lightness /max(lightness, na.rm=T))
LAI = LAI %>% mutate(qlightness = order(lightness) /n())
#LAI = LAI %>% group_by( Species, id,doy)%>% summarise(b = mean(blueness2,na.rm=T),nl = max(nlightness,na.rm=T),
# q = mean(qlightness,na.rm=T),LAImax = max(LAIpar, na.rm = T),
# LAImin = min(LAIb,na.rm = T), LAImean = mean(LAIb, na.rm=T))
LAI$LAIparc[!is.na(LAI$LAIparc) & LAI$TTR_450c<1000] = LAI$LAIparc[!is.na(LAI$LAIparc) & LAI$TTR_450c<1000]+2
LAI$LAIparc[ LAI$hour != 13 ] = NA
LAI = LAI %>% mutate(month = month(time))%>% group_by(month) %>%
mutate(se = sd(LAIparc, na.rm = T),m = mean(LAIparc, na.rm = T), ) %>%
mutate(LAIparc = replace(LAIparc,LAIparc < m-se,NA))
LAI = LAI %>% mutate(LAIparc = replace(LAIparc,doy >285 & LAIparc >1.9,0.5))
LAI = LAI %>% mutate(LAIparc = replace(LAIparc,LAIparc < 0.1,0.5))
# PAI dynamics
ggplot(data = LAI)+
#geom_point(aes(x=doy, y = q, color = q ))+
geom_point(aes(x=time, y = LAIparc, color = id ))+
#geom_ma(aes(x=doy, y = LAIparc, color = nl ), n=7)+
geom_smooth(aes(x=time, y = LAIparc, color = id ), span=.1, se=F)+
#geom_point(aes(x=time, y = q, color = id ))+
#geom_line(aes(x=doy, y = LAImax, color = Species ))+
facet_wrap(~Species,nrow = 2)+
theme_bw()
# LAI, WAI per species
PAI = LAI %>%group_by(id,Species) %>% summarise(PAI = mean(replace(LAIparc, doy>290,NA),na.rm=T),
WAI =mean(replace(LAIparc, doy<290,NA),na.rm=T))%>% mutate(LAI = PAI -WAI) %>%
pivot_longer(cols = c("WAI","LAI"), names_to = "index_name", values_to = "index")%>% as.data.table()
PAI[17,5] = 0.52
PAI[18,5] = 2.8
ggplot(data = PAI )+
geom_col(aes(x = id, y = index, fill = index_name))+
facet_wrap(~Species, scales = "free")+
theme_bw()
###################### Partciles absorption #############################
Cpm <- read_delim("data/Moscow_center_pm25.csv", ";", escape_double = FALSE,
col_types = cols(time = col_datetime(format = "%d.%m.%Y %H:%M")),
trim_ws = TRUE)
Cpm = Cpm %>% mutate(doy = yday(time),hour=hour(time)) # g10-6 m-3
LAI = LAI %>% group_by(id,doy) %>% mutate(PAI = mean(LAIparc,na.rm=T)) %>%
left_join(Cpm, by = c("doy","hour")) %>% select(-time.y)
LAI = LAI %>% left_join(PAI %>% filter(index_name == "WAI")%>% select(id,index), by ="id") %>%
rename(WAI = index)
for( i in LAI$id %>% unique()){
lai = LAI$PAI[LAI$id == i]
#Species = as.factor(LAI$Species[LAI$id == i])%>% levels
#df$Species[df$id == i] = Species
#print(biomas)
lai = na.approx(lai, x = index(lai), na.rm = T, maxgap = Inf)
print(biomas)
LAI$PAI[LAI$id == i] = lai
}
Vdavg = 0.64
Vdmin = 0.25
Vdmax = 1
LAIpm10 = 6
LAI = LAI %>% mutate(LAI = PAI-WAI) %>%
mutate(V_avg = Vdavg*(PAI)/(WAI + LAIpm10)) %>%
mutate(V_min = Vdmin*(PAI)/(WAI + LAIpm10)) %>%
mutate(V_max = Vdmax*(PAI)/(WAI + LAIpm10)) %>%
mutate(P_avg =V_avg*pm10*0.036) %>%
mutate(P_min =V_min*pm10*0.036) %>%
mutate(P_max =V_max*pm10*0.036) %>% rename(time = time.x) # g m-3
pm10 = LAI %>% group_by(id,doy, Species) %>%
summarise(P_avg = sum(P_avg, na.rm = T)*1.5,P_min = sum(P_min, na.rm = T)*1.5,P_max = sum(P_max, na.rm = T)*1.5)
ggplot(data = pm10)+
#geom_point(aes(x=doy, y = P_avg, color = id)) +
#geom_smooth(aes(x=doy, y = P_avg, color = id)) +
geom_errorbar(aes(x=doy, y = P_avg,ymin=P_min,ymax=P_max, color = id), position = position_dodge(3))+
geom_smooth(aes(x=doy, y = P_avg, color = id), se =F, span=1)+
facet_wrap(~Species, scales = "free")+
theme_bw()
pm10sum = pm10 %>% group_by(id, Species) %>%
summarise(P_max= sum(P_max,na.rm=T),P_avg= sum(P_avg,na.rm=T),P_min= sum(P_min,na.rm=T))
ggplot(data = pm10sum) +
geom_crossbar(aes(x=id, y= P_avg,ymin=P_min,ymax=P_max, color=Species))+
facet_wrap(~Species, scales = "free")+
theme_bw()
###### Final corrplot
summary(LAI)
names(LAI)%>% sort
cordata_d = LAI %>% ungroup %>% mutate(age = case_when(
id== "218A0077" ~ 55,
id== "218A0212" ~ 55,
id== "218A0255" ~ 55,
id== "218A0262" ~ 55,
id== "218A0281" ~ 55,
id== "218A0104" ~ 30,
id== "218A0210" ~ 30,
id== "218A0285" ~ 30,
id== "218A0079" ~ 90,
id== "218A0138" ~ 90,
id== "218A0277" ~ 90,
id== "218A0121" ~ 55,
id== "218A0111" ~ 55,
id== "218A0153" ~ 45,
id== "218A0186" ~ 45,
id== "218A0270" ~ 35 )) %>%
select(biomas_stored,id,doy, canopy_area,d,PAI,VPD, tair, VTA_score,VPD,week,tree_height,LAI,P_avg,Flux, month, age) %>%
filter(month <10) %>% group_by(doy,id) %>% summarise(
"Biomas stored" = mean(biomas_stored, na.rm = T),W
"Canopy area" = mean(canopy_area, na.rm = T),
Diameter = mean(d, na.rm = T),
VTA = mean(VTA_score, na.rm = T),
Height = mean(tree_height, na.rm = T),
LAI = mean(LAI, na.rm = T),
PAI = mean(PAI, na.rm = T),
PM10 = meWan(P_avg, na.rm = T),
Transpiration = mean(Flux, na.rm = T),
"Transpiration / canopy area" = mean(Flux/canopy_area, na.rm = T),
VPD = mean(VPD, na.rm = T),
Tair = mean(tair, na.rm = T),
Age = mean(age, na.rm = T)
)
cordata = cordata_d %>% ungroup()%>% select(-doy) %>% group_by(id) %>% summarise(
"Biomas stored" = mean(`Biomas stored`, na.rm = T),
"Canopy area" = mean(`Canopy area`, na.rm = T),
Diameter = mean(Diameter, na.rm = T),
VTA = mean(VTA, na.rm = T),
Height = mean( Height , na.rm = T),
LAI = mean(LAI, na.rm = T),
PAI = mean(PAI, na.rm = T),
PM10 = sum(PM10, na.rm = T),
Transpiration = sum(Transpiration, na.rm = T),
"Transpiration / canopy area" = mean("Transpiration / canopy area", na.rm = T),
VPD = mean(VPD, na.rm = T),
Tair = mean(Tair, na.rm = T),
Age = mean(Age, na.rm = T)
)%>% select(-id)
cordata_d = cordata_d %>%ungroup()%>% select(-id, -doy)
res <- rcorr(as.matrix(cordata_d))
res2 <- rcorr(as.matrix(cordata))
corrplot(res2$r, type="upper", order="hclust",
p.mat = res2$P, sig.level = 0.01, insig = "blank")
corrplot(res2$r^2,p.mat = res2$P, insig = "blank", pch.cex = 1,cl.pos = "n",cl.ratio = .1, sig.level = 0.05,
order = "hclust",method = "number",cl.align.text="r" )
corrplot(res$r,p.mat = res$P, order = "hclust",method = "number", sig.level = 0.05, insig = "blank",
cl.pos = "n",cl.ratio = .1, cl.align.text="r" )
|
5c351740e5b7d8c264a1b9ec327aaaa5d66222a1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/discrimARTs/R/methods.R
|
eb7aafed99a9a133cbf72b38d7311f79c795116c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,406
|
r
|
methods.R
|
print.discrimARTs <- function(x, ...) {
.dat <- x[c('method', 'convergence','neglogLik','pars.init','MLE.est')]
.dat$pars.init <- unlist(.dat$pars.init)
print(.dat)
}
plot.discrimARTs <- function(x, npoints=1e3, main=NULL, xlab='Measured trait',
legend=T, legend.x=0.9, legend.y=0.9, legend.digits=3, legend.fontsize=8, ...) {
## Input data for plotting
.dat <- x$input
.quants <- seq(from=min(.dat), to=max(.dat), length.out=npoints)
.est <- as.list(x$MLE.est)
if (x$method == 'normal' ) {
dist1 <- with(.est, {
(1-mix.prob) * dnorm( .quants, mean=dist1.par1, sd=dist1.par2)
})
dist2 <- with(.est, {
mix.prob * dnorm( .quants, mean=dist2.par1, sd=dist2.par2)
})
} else if (x$method == 'facing.gamma') {
dist1 <- with(.est, {
(1-mix.prob) * dgamma( .quants - x$lower, shape=dist1.par1, scale=dist1.par2)
})
dist2 <- with(.est, {
mix.prob * dgamma( x$upper - .quants, shape=dist2.par1, scale=dist2.par2)
})
} else { stop('Plotting not implemented for method %s', x$method)}
dist.mix <- dist1 + dist2
## Histogram of original observations,
## Make sure ylim doesn't cut off distribs
hist(.dat, freq=FALSE, ylim=c(0, max(dist.mix)), main=main, xlab=xlab)
## Original observations
points(.dat, rep(0, length(.dat)))
## over-plot individual dists and mix
lines( .quants, dist1, lty=3)
lines( .quants, dist2, lty=3)
lines( .quants, dist.mix )
if (legend) {
## collapse estimated parameter names and values into one string
## with each par separated by newlines
.param.text <- paste(paste( names(.est), round(as.numeric(.est), digits=legend.digits), sep='='), collapse='\n')
if( x$method == 'facing.gamma') {
## Add upper and lower
.bounds.text <- paste(paste( c('Lower bound', 'Upper bound'), c(x$lower, x$upper), sep='='), collapse='\n')
.param.text <- paste(.param.text, .bounds.text, sep='\n')
}
## Make a "legend" including negative log likelihood and parameter estimates
grid.text(x=legend.x, y=legend.y, sprintf('Negative logLik = %s\nMLE Parameter Estimates:\n%s', round(x$neglogLik, digits=legend.digits), .param.text), just=c('right', 'top'), gp=gpar(fontsize=legend.fontsize))
}
return()
}
|
08c184fc22778aa81c8d41ba655ed5653e7e64d9
|
6d03d53a99e228c29a9cdadbb58508de30905e16
|
/man/gg_miss_span.Rd
|
b0844d4b3d1ddd78a6c04cd95b5f87b33cb9591a
|
[] |
no_license
|
rpodcast/naniar
|
4e8f6547d4aed9cbe7d7b189ce93cd25ea76b554
|
b67795b110a25315894e02c433433e3965127d68
|
refs/heads/master
| 2021-06-22T06:39:47.063573
| 2017-07-31T08:52:15
| 2017-07-31T08:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,070
|
rd
|
gg_miss_span.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{gg_miss_span}
\alias{gg_miss_span}
\title{Plot the number of missings in a given repeating span}
\usage{
gg_miss_span(data, var, span_every)
}
\arguments{
\item{data}{data.frame}
\item{var}{a bare unquoted variable name from the data.frame}
\item{span_every}{integer describing the length of the span to be explored}
}
\value{
ggplot2 object
}
\description{
`gg_miss_span` is a replacement function to
`imputeTS::plotNA.distributionBar(tsNH4, breaksize = 100)``, which shows the
number of missings in a given span, or breaksize. The produced plot is a
ggplot object which you can customise the way you wish, just like classic
ggplot.
}
\examples{
miss_var_span(pedestrian, hourly_counts, span_every = 3000)
library(ggplot2)
gg_miss_span(pedestrian, hourly_counts, span_every = 3000)
# works with the rest of ggplot
gg_miss_span(pedestrian, hourly_counts, span_every = 3000) + labs(x = "custom")
gg_miss_span(pedestrian, hourly_counts, span_every = 3000) + theme_dark()
}
|
33cf2ac875a033997b5d6e777a919482e002e5e0
|
49679b97305617476aa1acd685ae31e0c7fadb87
|
/All data extract/All data extract EXP5 EARLY AP5.R
|
7a36038c6f6eb94e6b20f312568053723e309b4f
|
[] |
no_license
|
mvegavillar/Accumbens-Rew-learning
|
2541e07dc6e93f7ea1b39516f783f75f97470a20
|
be221cf5777ec62365927213c613bc9dd6066664
|
refs/heads/master
| 2020-05-24T11:19:13.151823
| 2019-07-09T17:01:57
| 2019-07-09T17:01:57
| 187,246,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 97,474
|
r
|
All data extract EXP5 EARLY AP5.R
|
#############################################################
### EXPERIMENT 5A: EARLY AP5 VS VEH TEST ###
#############################################################
### LOAD IMPORTANT LIBRARIES
install.packages("matrixStats")
install.packages('ez')
library(matrixStats)
library(ez)
Exp1folder <- "E:/Dropbox/NMDA/EXP5_Performance/"
##########################
##########################
### LOAD FUNCTIONS ###
##########################
##########################
funcdirect <- "E:/Dropbox/NMDA/R functions/"
CPfuncFolder <- paste(funcdirect, 'Change_Point-master/', sep="")
#Load functions
load(file=paste(funcdirect, "MedPCextract.r", sep=""))
load(file=paste(funcdirect, "mpcextract_blockSingle.Rfunc", sep=""))
load(file=paste(funcdirect, "CPextract.r", sep=""))
load(file=paste(funcdirect, "neuralhist.r", sep=""))
load(file=paste(funcdirect, "FRbyNEURONbyBINcue.r", sep=""))
load(file=paste(funcdirect, "errBars.r", sep=""))
load(file=paste(funcdirect, "errCloud.r", sep=""))
load(file=paste(funcdirect, "psthInf.r", sep=""))
load(file=paste(funcdirect, "BinIndexCalculator.R", sep=""))
load(file=paste(funcdirect, "dotplot.r", sep=""))
load(file=paste(funcdirect, "KC.sigbins.R", sep=""))
load(file=paste(funcdirect, "KC.inhib.sigbins.R", sep=""))
load(file=paste(funcdirect, "prePostInf_FR.r", sep=""))
###########################
###########################
### DEFINE FOLDERS ###
###########################
###########################
# Define folders for one group *OR* the other before running the rest of the code (not both because then you'll just rewrite the folders you defined for the first group)
### EARLY VEH #################################################################
subTestFolder <- paste(Exp1folder, "Early VEH/", sep="")
datafolder <- paste(subTestFolder, "MedPC files/", sep="")
dataForRdir <- paste(subTestFolder, "Data for R/", sep="")
dataForRCumulative <- paste(subTestFolder, "Data for R cumulative/", sep="")
dataForRCumulativeEarlyVEH <- dataForRCumulative
behGraphFolder <- paste(subTestFolder, "Graphs/Behavior/", sep="")
neuGraphFolder <- paste(Exp1folder, "Graphs/Neuronal/", sep="")
MixedGraphFolder <- paste(subTestFolder, "Graphs/Mixed/", sep="")
CPGraphFolder <- paste(subTestFolder, "Graphs/Behavior/Change point/", sep="")
NEXfiles <- paste(subTestFolder, "NEX files/", sep="")
preVsPostFRFolder <- "E:/Dropbox/NMDA/EXP5_Performance/Graphs/Neuronal/FR pre vs post scatterplot/"
### EARLY AP5 #################################################################
subTestFolder <- paste(Exp1folder, "Early AP5/", sep="")
datafolder <- paste(subTestFolder, "MedPC files/", sep="")
dataForRdir <- paste(subTestFolder, "Data for R/", sep="")
dataForRCumulative <- paste(subTestFolder, "Data for R cumulative/", sep="")
dataForRCumulativeEarlyAP5 <- dataForRCumulative
behGraphFolder <- paste(subTestFolder, "Graphs/Behavior/", sep="")
MixedGraphFolder <- paste(subTestFolder, "Graphs/Mixed/", sep="")
CPGraphFolder <- paste(subTestFolder, "Graphs/Behavior/Change point/", sep="")
NEXfiles <- paste(subTestFolder, "NEX files/", sep="")
################################
################################
### CREATE IMPORTANT OBJECTS ###
################################
################################
# Extract behavioral data from MedPC files. This function saves the generated objects in the "dataForRdir". You have to load them (see next line of code) to bring them to your environment.
# This will give you a few error messages if, in any file, the first cue comes on after 5s of session onset. Ignore it, it just assigns NA to that trial, which is what you want.
# The parameter 'consumeRewWdw' is just the segment of the ITI that we discard (for ITI latency calculations) bc we assume that, if the animal got a reward on the previous trial, he might still be consuming the reward.
MedPCextract(MovAvg="Impinged only", cuelength=10, funcdirect = funcdirect, datafolder = datafolder, dataForRdir = dataForRdir, dataForRCumulative=dataForRCumulative)
# Load the behavior-related objects that you generated with the previous function. The main objects that we loaded are 'alldata' (detailed data by session by animal) and 'csacqidx' (an index of all the files). Name all sessions on csacqidx the same (i.e. '1')
files <- paste(dataForRdir, list.files(dataForRdir), sep=""); for(i in 1:length(files)){load(files[[i]])}
filesCum <- paste(dataForRCumulative, list.files(dataForRCumulative), sep=""); for(i in 1:length(filesCum)){load(filesCum[[i]])}
#This function will create the following objects: DSbinIdx, NSbinIdx and AllCueBinIdx. These are indexes indicating, for each rat and each kind of event, to what bin the events belong. The were generated and saved in the dataForRCumulative folder, so I need to load them
binsize <- 600
BinIndexCalculator(data=alldata, binsize=binsize, sessLength = 9000); filesCum <- paste(dataForRCumulative, list.files(dataForRCumulative), sep=""); for(i in 1:length(filesCum)){load(filesCum[[i]])}
# Create an object with data per bin for each one of our behavioral parameters
# Response ratio:
minBinNo <- min(sapply(DSbinIdx, max))
DSrespRatioByBin <- lapply(seq(1, length(DSrespAll)), function(x){
sapply(seq(1, minBinNo), function(y){
DSinBin <- DSrespAll[[x]][DSbinIdx[[x]]==y]
DSrespRatio <- sum(DSinBin)/length(DSinBin)
})
})
NSrespRatioByBin <- lapply(seq(1, length(NSrespAll)), function(x){
sapply(seq(1, minBinNo), function(y){
NSinBin <- NSrespAll[[x]][NSbinIdx[[x]]==y]
NSrespRatio <- sum(NSinBin)/length(NSinBin)
})
})
# Latency:
DSlatencyByBin <- lapply(seq(1, length(DSlatency)), function(x){
sapply(seq(1, minBinNo), function(y){
DSinBin <- DSlatency[[x]][DSbinIdx[[x]]==y]
DSlatencyByBin <- mean(DSinBin, na.rm=T)
})
})
NSlatencyByBin <- lapply(seq(1, length(NSlatency)), function(x){
sapply(seq(1, minBinNo), function(y){
NSinBin <- NSlatency[[x]][NSbinIdx[[x]]==y]
NSlatencyByBin <- mean(NSinBin, na.rm=T)
})
})
# Task Accuracy
DStaskAccByBin <- lapply(seq(1, length(DStaskAcc)), function(x){
sapply(seq(1, minBinNo), function(y){
DSinBin <- DStaskAcc[[x]][DSbinIdx[[x]]==y]
DStaskAccByBin <- mean(DSinBin, na.rm=T)
})
})
NStaskAccByBin <- lapply(seq(1, length(NStaskAcc)), function(x){
sapply(seq(1, minBinNo), function(y){
NSinBin <- NStaskAcc[[x]][NSbinIdx[[x]]==y]
NStaskAccByBin <- mean(NSinBin, na.rm=T)
})
})
# ITI latency
ITIlatByBin <- lapply(seq(1, length(ITIlatency)), function(x){
sapply(seq(1, minBinNo), function(y){
ITIlatInBin <- ITIlatency[[x]][AllCueBinIdx[[x]]==y]
ITIlatByBin <- mean(ITIlatInBin, na.rm=T)
})
})
# Make an object with the DStaskAccByBin for later statistical analyses
# Change "drug="VEH"" when I run this for AP5
DStaskAccByBin_LongFormat <- do.call("rbind", lapply(seq(1, length(DStaskAccByBin)), function(k){
rat=as.character(rats[[k]])
a <- 1:length(DStaskAccByBin[[k]])
data.frame(rat=rat, bin=a, drug="VEH", perf=DStaskAccByBin[[k]], index="DS")
}))
NStaskAccByBin_LongFormat <- do.call("rbind", lapply(seq(1, length(NStaskAccByBin)), function(k){
rat=as.character(rats[[k]])
a <- 1:length(NStaskAccByBin[[k]])
data.frame(rat=rat, bin=a, drug="VEH", perf=NStaskAccByBin[[k]], index="NS")
}))
### Make a long-format object with all these data for statistical analyses
#Run all of the above lines FIRST for VEH rats and then this line:
# Let's create objects to help us select the bins of interest for the pre and the post
# The infusion took place after 30min and it lasted 12min. I'm going to use the 30min before the infusion as baseline and the 30min after the infusion as the post.
PreInfLength <- 30*60 #In sec, baseline period (in my MedPC code it's always 30min)
PostInfStart <- (30*60)+12*60 #In sec, time of infusion end (when my postinfusion period starts)
PostInfEnd <- PostInfStart+30*60 #In sec, end of the window of interest after infusion (I made it 30min to match BL)
BLbinIndex <- (1:minBinNo)[1:(PreInfLength/binsize)] #Bins that correspond with the baseline period (the 30min before the infusion).
PostInfBinIndex <- (1:minBinNo)[ceiling(PostInfStart/binsize):(PostInfEnd/binsize)] #Bins that correspond with the postinfusion period I want to study. In this case, the 30min after infusion.
byBinDataEarlyVEH <- list(DSrespRatioByBin, NSrespRatioByBin, DSlatencyByBin, NSlatencyByBin, DStaskAccByBin, NStaskAccByBin, ITIlatByBin)
IndexLabel <- c("S+.RR", "S-.RR", "S+.Latency", "S-.Latency", "S+.Spec.", "S-.Spec.", "ITI.Latency.")
EarlyVEH_LongFormat <- do.call("rbind", lapply(seq(1, length(byBinDataEarlyVEH)), function(x){ #For each index
mat <- do.call("rbind", byBinDataEarlyVEH[[x]])
if(length(BLbinIndex)>1){
BLmean <- rowMeans(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
} else {
BLmean <- mean(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
}
PostMean <- rowMeans(mat[,PostInfBinIndex], na.rm=T) #Mean by subject POST infusion
ratnames <- paste("VEH", 1:nrow(mat), sep="_")
return(data.frame(Drug="VEH", Rat=ratnames, Index=IndexLabel[x], Infusion=c(rep("Pre", nrow(mat)), rep("Post", nrow(mat))), Performance=c(BLmean, PostMean)))
}))
EarlyVEH_DStaskAccByBin_LongFormat <- DStaskAccByBin_LongFormat
EarlyVEH_NStaskAccByBin_LongFormat <- NStaskAccByBin_LongFormat
save(EarlyVEH_LongFormat, file=paste(dataForRdir, "EarlyVEH_LongFormat.rdat", sep=""))
save(EarlyVEH_DStaskAccByBin_LongFormat, file=paste(dataForRdir, "EarlyVEH_DStaskAccByBin_LongFormat.rdat", sep=""))
save(EarlyVEH_NStaskAccByBin_LongFormat, file=paste(dataForRdir, "EarlyVEH_NStaskAccByBin_LongFormat.rdat", sep=""))
#Then repeat for AP5 rats and run these lines
byBinDataEarlyAP5 <- list(DSrespRatioByBin, NSrespRatioByBin, DSlatencyByBin, NSlatencyByBin, DStaskAccByBin, NStaskAccByBin, ITIlatByBin)
IndexLabel <- c("S+.RR", "S-.RR", "S+.Latency", "S-.Latency", "S+.Spec.", "S-.Spec.", "ITI.Latency.")
EarlyAP5_LongFormat <- do.call("rbind", lapply(seq(1, length(byBinDataEarlyAP5)), function(x){ #For each index
mat <- do.call("rbind", byBinDataEarlyAP5[[x]])
BLmean <- rowMeans(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
PostMean <- rowMeans(mat[,PostInfBinIndex], na.rm=T) #Mean by subject POST infusion
ratnames <- paste("AP5", 1:nrow(mat), sep="_")
return(data.frame(Drug="AP5", Rat=ratnames, Index=IndexLabel[x], Infusion=c(rep("Pre", nrow(mat)), rep("Post", nrow(mat))), Performance=c(BLmean, PostMean)))
}))
EarlyAP5_DStaskAccByBin_LongFormat <- DStaskAccByBin_LongFormat
EarlyAP5_NStaskAccByBin_LongFormat <- NStaskAccByBin_LongFormat
save(EarlyAP5_LongFormat, file=paste(dataForRdir, "EarlyAP5_LongFormat.rdat", sep=""))
save(EarlyAP5_DStaskAccByBin_LongFormat, file=paste(dataForRdir, "EarlyAP5_DStaskAccByBin_LongFormat.rdat", sep=""))
save(EarlyAP5_NStaskAccByBin_LongFormat, file=paste(dataForRdir, "EarlyAP5_NStaskAccByBin_LongFormat.rdat", sep=""))
###
Early_LongFormat <- rbind(EarlyVEH_LongFormat, EarlyAP5_LongFormat)
Early_LongFormatByBin <- rbind(EarlyVEH_DStaskAccByBin_LongFormat, EarlyAP5_DStaskAccByBin_LongFormat)
Early_LongFormatByBin_DSandNS <- rbind(EarlyVEH_DStaskAccByBin_LongFormat, EarlyVEH_NStaskAccByBin_LongFormat,
EarlyAP5_DStaskAccByBin_LongFormat, EarlyAP5_NStaskAccByBin_LongFormat)
save(Early_LongFormat, file=paste(dataForRdir, "Early_LongFormat.rdat", sep=""))
save(Early_LongFormatByBin, file=paste(dataForRdir, "Early_LongFormatByBin.rdat", sep=""))
save(Early_LongFormatByBin_DSandNS, file=paste(dataForRdir, "Early_LongFormatByBin_DSandNS.rdat", sep=""))
# Extract neuronal data from NEX files.
#VEH test: data aligned to DS and NS onset BEFORE and AFTER the infusion.
postInfTargetWdw <- 1800+12*60+30*60 #For the post infusion window, I'll choose the period between the end of the infusion +30'.
allNeuronsDSEarlyVEHPreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=1, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsDSEarlyVEHPostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=1, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsNSEarlyVEHPreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=2, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsNSEarlyVEHPostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=2, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryDSEarlyVEHPreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=9, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryDSEarlyVEHPostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=9, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryNSEarlyVEHPreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=14, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryNSEarlyVEHPostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=14, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryITIEarlyVEHPreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=10, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryITIEarlyVEHPostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=10, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
#AP5 test: data aligned to DS and NS onset BEFORE and AFTER the infusion.
postInfTargetWdw <- 1800+12*60+30*60 #For the post infusion window, I'll choose the period between the end of the infusion +30'.
allNeuronsDSEarlyAP5PreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=1, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsDSEarlyAP5PostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=1, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsNSEarlyAP5PreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=2, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsNSEarlyAP5PostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=2, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryDSEarlyAP5PreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=9, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryDSEarlyAP5PostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=9, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryNSEarlyAP5PreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=14, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryNSEarlyAP5PostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=14, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryITIEarlyAP5PreInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=10, startt=0, endt=1800, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
allNeuronsEntryITIEarlyAP5PostInf <- neuralhist (funcdirect=funcdirect, path=NEXfiles, event=10, startt=2520, endt=postInfTargetWdw, binw=50, psthmin=2, psthmax=10, cueexonly=F, allResults=T, side="both")
### GIVE THESE OBJECTS A UNIQUE NAME
## VEH SIDE
# csacqidxEarlyVEH <- csacqidx
# alldataEarlyVEH <- alldata
# ratsEarlyVEH <- rats
# idxEarlyVEH <- idx
# cumDataEarlyVEH <- list(DSrespAll, DStaskAcc, DStimeToSpare, NSrespAll, NStaskAcc, NStimeToSpare)
#
# ## AP5 SIDE
# csacqidxEarlyAP5 <- csacqidx
# alldataEarlyAP5 <- alldata
# ratsEarlyAP5 <- rats
# idxEarlyAP5 <- idx
# cumDataEarlyAP5 <- list(DSrespAll, DStaskAcc, DStimeToSpare, NSrespAll, NStaskAcc, NStimeToSpare)
######################################################
######################################################
### PLOT GRAPHS ###
######################################################
######################################################
###################
### 1. BEHAVIOR ###
###################
# Let's create objects to help us select the bins of interest for the pre and the post
# The infusion took place after 30min and it lasted 12min. I'm going to use the 30min before the infusion as baseline and the 30min after the infusion as the post.
PreInfLength <- 30*60 #In sec
PostInfStart <- (30*60)+12*60 #In sec
PostInfEnd <- PostInfStart+30*60 #In sec
BLbinIndex <- (1:minBinNo)[1:(PreInfLength/binsize)]
PostInfBinIndex <- (1:minBinNo)[ceiling(PostInfStart/binsize):(PostInfEnd/binsize)]
#Function for plotting lines more easily. I just need to adjust the data I feed the function, the color and the points
plotPrePostLines <- function(data, color, pch, scores, jitter=0){
mat <- do.call("rbind", data) #Create matrix in which rows are different rats and columns are bins
if(scores=="absolute"){
BLmean <- rowMeans(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
PostMean <- rowMeans(mat[,PostInfBinIndex], na.rm=T) #Mean by subject POST infusion
}
if(scores=="percentBL"){
BLmeanAll <- mean(rowMeans(mat[,BLbinIndex], na.rm=T), na.rm=T) #Mean all subjects PRE infusion
PostMeanAll <- mean(rowMeans(mat[,PostInfBinIndex], na.rm=T), na.rm=T) #Mean all subjects POST infusion
BLmeanEach <- rowMeans(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
PostMeanEach <- rowMeans(mat[,PostInfBinIndex], na.rm=T) #Mean by subject POST infusion
BLmean <- (BLmeanEach/BLmeanEach)*100 #Mean by subject PRE infusion in terms of percentage of BL performance of that same subject (it has to be 100%)
PostMean <- (PostMeanEach/BLmeanEach)*100 #Mean by subject POST infusion in terms of percentage of BL performance
}
lines(x=c(0, 1), y=c(mean(BLmean), mean(PostMean)), col=color, cex=2)
errBars(x=c(0, 1), y=c(mean(BLmean), mean(PostMean)), err=c(sd(BLmean)/sqrt(length(BLmean)), sd(PostMean)/sqrt(length(PostMean))), color=color, jitter=jitter)
points(x=c(0, 1), y=c(mean(BLmean), mean(PostMean)), pch=pch, col=color, cex=2)
if(pch==22){points(x=c(0, 1), y=c(mean(BLmean), mean(PostMean)), pch=pch, col=color, cex=2, bg="white")}
}
#Function for plotting bars more easily. I just need to adjust the data I feed the function, the color and the points
plotPrePostBars <- function(data, color, xmiddle, barwidth, labelY, colLabel){
mat <- do.call("rbind", data) #Create matrix in which rows are different rats and columns are bins
BLmean <- rowMeans(mat[,BLbinIndex], na.rm=T) #Mean by subject PRE infusion
PostMean <- rowMeans(mat[,PostInfBinIndex], na.rm=T) #Mean by subject POST infusion
#Pre
rect(xleft=xmiddle-barwidth, xright = xmiddle, ybottom=0, ytop=mean(BLmean), col=color, border="white")
text(x=xmiddle-barwidth/2, y=labelY, labels = "Pre", col=colLabel, font=2)
#Post
rect(xleft=xmiddle, xright=xmiddle+barwidth, ybottom=0, ytop=mean(PostMean), col=color, border="white")
text(x=xmiddle+barwidth/2, y=labelY, labels = "Post", col=colLabel, font=2)
#Individual lines
for(i in 1:length(data)){lines(x=c(xmiddle-barwidth/2, xmiddle+barwidth/2), y=c(BLmean[i], PostMean[i]))}
}
#Define colors
colindx <- c("#2171b5", "#cb181d") #Strong blue and red
colindxB <- c("#bdd7e7", "#fcae91") #Less strong blue and red
colindxC <- c("#eff3ff", "#fb6a4a") #Even less strong blue and red
colindxD <- c("#6baed6", "#fee5d9") #Lightest blue and red
#### 1.1. RESPONSE RATIO
### 1.1.1. Response ratio: S+ and S- responding pre vs. post infusion in AP5 vs. VEH
# In the objects 'byBinDataEarlyVEH' and 'byBinDataEarlyAP5', the first and second items are DSrespratio and NSrespratio by subject by bin
## 1.1.1.1. Absolute scores
plot.new()
par(oma=c(2,2,2,2))
plot.window(xlim=c(0, 1), ylim=c(0, 1))
plotPrePostLines(data=byBinDataEarlyVEH[[1]], color=colindx[1], pch=15, scores="absolute") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[1]], color=colindx[2], pch=15, scores="absolute") #AP5 group, S+
plotPrePostLines(data=byBinDataEarlyVEH[[2]], color=colindx[1], pch=22, scores="absolute", jitter=0.015) #VEH group, S-
plotPrePostLines(data=byBinDataEarlyAP5[[2]], color=colindx[2], pch=22, scores="absolute", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 1, by=0.2, labels=seq(0, 1, 0.2)), font=2, las=2, pos=-0.1)
mtext(side=2, line=4, text="Proportion", cex=1.4, font=2)
legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n")
## 1.1.1.2. Percentage of BL
plot.new()
par(oma=c(2,2,2,2))
plot.window(xlim=c(0, 1), ylim=c(0, 120))
plotPrePostLines(data=byBinDataEarlyVEH[[1]], color=colindx[1], pch=15, scores="percentBL") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[1]], color=colindx[2], pch=15, scores="percentBL") #AP5 group, S+
#plotPrePostLines(data=byBinDataEarlyVEH[[2]], color=colindx[1], pch=22, scores="percentBL", jitter=0.015) #VEH group, S-. It's confusing so I'm not plotting it
#plotPrePostLines(data=byBinDataEarlyAP5[[2]], color=colindx[2], pch=22, scores="percentBL", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 120, by=20), labels=seq(0, 120, 20), font=2, las=2, pos=-0.1)
mtext(side=2, line=4, text="% of BL response ratio", font=2, cex.axis=1.5)
#legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n")
### 1.1.2. Response ratio: S+ and S- responding by bin on test day in AP5 vs. VEH
## 1.1.2.1. Absolute scores
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 1))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=1.2, col="gray95", border="white")
#lapply(seq(1, length(ratsEarlyVEH)), function(x) {lines(byBinDataEarlyVEH[[1]][[x]], col=colindx[1])})
#lapply(seq(1, length(ratsEarlyAP5)), function(x) {lines(byBinDataEarlyAP5[[1]][[x]], col=colindx[2])})
matVEH <- do.call("rbind", byBinDataEarlyVEH[[1]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[1]])
lines(colMeans(matVEH), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEH), err=colSds(matVEH)/sqrt(nrow(matVEH)), color=colindx[1])
points(colMeans(matVEH), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5), err=colSds(matAP5)/sqrt(nrow(matAP5)), color=colindx[2])
points(colMeans(matAP5), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 1, by=0.2), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.4)
mtext(side=2, line=2, text="Proportion", font=2, cex=1.4)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
## 1.1.2.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 120))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=120, col="gray95", border="white")
#Get data ready
matVEH <- do.call("rbind", byBinDataEarlyVEH[[1]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[1]])
VEHbl <- colMeans(matVEH[,BLbinIndex], na.rm=T); AP5bl <- colMeans(matAP5[,BLbinIndex], na.rm=T)
matVEHperc <- (matVEH/VEHbl)*100
matAP5perc <- (matAP5/AP5bl)*100
#Plot
lines(colMeans(matVEHperc), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEHperc), err=colSds(matVEHperc)/sqrt(nrow(matVEHperc)), color=colindx[1])
points(colMeans(matVEHperc), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5perc), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5perc), err=colSds(matAP5perc)/sqrt(nrow(matAP5perc)), color=colindx[2])
points(colMeans(matAP5perc), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 120, by=20), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.4)
mtext(side=2, line=2, text="% of baseline", font=2, cex=1.4)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
### 1.1.3. Response ratio: barplots of pre and post infusion, S+ vs S- and VEH vs AP5
plot.new()
plot.window(xlim=c(0, 6), ylim=c(0, 1))
#S+ both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[1]], color=colindx[1], xmiddle=1, barwidth=0.5, colLabel = "white", labelY = 0.05)
plotPrePostBars(data=byBinDataEarlyAP5[[1]], color=colindx[2], xmiddle=2, barwidth=0.5, colLabel = "white", labelY = 0.05)
#S- both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[2]], color=colindxB[1], xmiddle=3.25, barwidth=0.5, colLabel = "black", labelY = 0.05)
plotPrePostBars(data=byBinDataEarlyAP5[[2]], color=colindxB[2], xmiddle=4.25, barwidth=0.5, colLabel = "black", labelY = 0.05)
#Axis and labels
axis(side=1, tick = F, at=c(1.5, 3.75), labels=c("S+", "S-"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 1, 0.2), cex.axis=1, font=2, las=2)
mtext(side=2, line=2.5, text="Response ratio", cex=1.4, font=2)
rect(xleft=3, xright=3.5, ybottom=0.95, ytop=1, col=colindx[1], border="white")
rect(xleft=3.5, xright=4, ybottom=0.95, ytop=1, col=colindxB[1], border="white")
rect(xleft=3, xright=3.5, ybottom=0.85, ytop=0.9, col=colindx[2], border="white")
rect(xleft=3.5, xright=4, ybottom=0.85, ytop=0.9, col=colindxB[2], border="white")
text(x=4.5, y=0.98, labels="VEH", cex=1.5)
text(x=4.5, y=0.88, labels="AP5", cex=1.5)
#### 1.2. CUED LATENCY
# In the objects 'byBinDataEarlyVEH' and 'byBinDataEarlyAP5', the 3rd and 4th items are DSlatency and NSlatency by subject by bin
### 1.2.1. Cued latency: S+ and S- latency pre vs. post infusion in AP5 vs. VEH
## 1.2.1.1. Absolute scores
plot.new()
plot.window(xlim=c(0, 1), ylim=c(0, 10))
plotPrePostLines(data=byBinDataEarlyVEH[[3]], color=colindx[1], pch=15, scores="absolute") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[3]], color=colindx[2], pch=15, scores="absolute") #AP5 group, S+
plotPrePostLines(data=byBinDataEarlyVEH[[4]], color=colindx[1], pch=22, scores="absolute", jitter=0.015) #VEH group, S-
plotPrePostLines(data=byBinDataEarlyAP5[[4]], color=colindx[2], pch=22, scores="absolute", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 10, by=2, labels=seq(0, 10, 2)), font=2, las=2, pos=-0.05)
mtext(side=2, line=2.5, text="Latency (s)", cex=1.4, font=2)
legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n")
## 1.2.1.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, 1), ylim=c(0, 400))
plotPrePostLines(data=byBinDataEarlyVEH[[3]], color=colindx[1], pch=15, scores="percentBL") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[3]], color=colindx[2], pch=15, scores="percentBL") #AP5 group, S+
#plotPrePostLines(data=byBinDataEarlyVEH[[2]], color=colindx[1], pch=22, scores="percentBL", jitter=0.015) #VEH group, S-. It's confusing so I'm not plotting it
#plotPrePostLines(data=byBinDataEarlyAP5[[2]], color=colindx[2], pch=22, scores="percentBL", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 400, by=50), labels=seq(0, 400, 50), font=2, las=2, pos=-0.05)
mtext(side=2, line=3, text="% of BL latency", font=2, cex=1.4)
#legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n", cex=1.5)
### 1.2.2. Cued latency: S+ and S- responding by bin on test day in AP5 vs. VEH
## 1.2.2.1. Absolute scores
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 10))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=10, col="gray95", border="white")
#lapply(seq(1, length(ratsEarlyVEH)), function(x) {lines(byBinDataEarlyVEH[[1]][[x]], col=colindx[1])})
#lapply(seq(1, length(ratsEarlyAP5)), function(x) {lines(byBinDataEarlyAP5[[1]][[x]], col=colindx[2])})
matVEH <- do.call("rbind", byBinDataEarlyVEH[[3]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[3]])
lines(colMeans(matVEH), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEH), err=colSds(matVEH)/sqrt(nrow(matVEH)), color=colindx[1])
points(colMeans(matVEH), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5), err=colSds(matAP5)/sqrt(nrow(matAP5)), color=colindx[2])
points(colMeans(matAP5), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 10, by=2), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.4)
mtext(side=2, line=1, text="Latency (s)", font=2, cex=1.4)
legend("topright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
## 1.2.2.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 300))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=300, col="gray95", border="white")
#Get data ready
matVEH <- do.call("rbind", byBinDataEarlyVEH[[3]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[3]])
VEHbl <- colMeans(matVEH[,BLbinIndex], na.rm=T); AP5bl <- colMeans(matAP5[,BLbinIndex], na.rm=T)
matVEHperc <- (matVEH/VEHbl)*100
matAP5perc <- (matAP5/AP5bl)*100
#Plot
lines(colMeans(matVEHperc), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEHperc), err=colSds(matVEHperc)/sqrt(nrow(matVEHperc)), color=colindx[1])
points(colMeans(matVEHperc), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5perc), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5perc), err=colSds(matAP5perc)/sqrt(nrow(matAP5perc)), color=colindx[2])
points(colMeans(matAP5perc), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 300, by=50), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="% of baseline latency", font=2, cex=1.2)
legend("topright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
### 1.2.3. Cued latency: barplots of pre and post infusion, S+ vs S- and VEH vs AP5
plot.new()
plot.window(xlim=c(0, 6), ylim=c(0, 10))
#S+ both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[3]], color=colindx[1], xmiddle=1, barwidth=0.5, colLabel = "white", labelY = 0.5)
plotPrePostBars(data=byBinDataEarlyAP5[[3]], color=colindx[2], xmiddle=2, barwidth=0.5, colLabel = "white", labelY = 0.5)
#S- both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[4]], color=colindxB[1], xmiddle=3.25, barwidth=0.5, colLabel = "black", labelY = 0.5)
plotPrePostBars(data=byBinDataEarlyAP5[[4]], color=colindxB[2], xmiddle=4.25, barwidth=0.5, colLabel = "black", labelY = 0.5)
#Axis and labels
axis(side=1, tick = F, at=c(1.5, 3.75), labels=c("S+", "S-"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 10, 2), cex.axis=1, font=2, las=2)
mtext(side=2, line=2.5, text="Latency (s)", cex=1.4, font=2)
rect(xleft=0, xright=0.5, ybottom=9.5, ytop=10, col=colindx[1], border="white")
rect(xleft=0.5, xright=1, ybottom=9.5, ytop=10, col=colindxB[1], border="white")
rect(xleft=0, xright=0.5, ybottom=8.5, ytop=9, col=colindx[2], border="white")
rect(xleft=0.5, xright=1, ybottom=8.5, ytop=9, col=colindxB[2], border="white")
text(x=1.5, y=9.8, labels="VEH", cex=1.5)
text(x=1.5, y=8.8, labels="AP5", cex=1.5)
#### 1.3. ITI latency
### 1.3.1. ITI latency: ITI latency pre vs. post infusion in AP5 vs. VEH
## 1.3.1.1. Absolute scores
plot.new()
plot.window(xlim=c(0, 1), ylim=c(0, 10))
plotPrePostLines(data=byBinDataEarlyVEH[[7]], color=colindx[1], pch=15, scores="absolute") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[7]], color=colindx[2], pch=15, scores="absolute") #AP5 group, S+
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 10, by=2), font=2, las=2, pos=-0.05)
mtext(side=2, line=2.5, text="ITI latency (s)", cex=1.4, font = 2)
legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n")
## 1.3.1.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, 1), ylim=c(0, 150))
plotPrePostLines(data=byBinDataEarlyVEH[[7]], color=colindx[1], pch=15, scores="percentBL") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[7]], color=colindx[2], pch=15, scores="percentBL") #AP5 group, S+
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1, font=2)
axis(side=2, at=seq(0, 200, by=50), font=2, las=2, pos=-0.05)
mtext(side=2, line=3, text="% of BL ITI latency", font=2, cex.axis=1.5)
#legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomright", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n", cex=1.5)
### 1.3.2. ITI latency: ITI latency by bin on test day in AP5 vs. VEH
## 1.3.2.1. Absolute scores
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 10))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=10, col="gray95", border="white")
#lapply(seq(1, length(ratsEarlyVEH)), function(x) {lines(byBinDataEarlyVEH[[1]][[x]], col=colindx[1])})
#lapply(seq(1, length(ratsEarlyAP5)), function(x) {lines(byBinDataEarlyAP5[[1]][[x]], col=colindx[2])})
matVEH <- do.call("rbind", byBinDataEarlyVEH[[7]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[7]])
lines(colMeans(matVEH), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEH), err=colSds(matVEH)/sqrt(nrow(matVEH)), color=colindx[1])
points(colMeans(matVEH), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5), err=colSds(matAP5)/sqrt(nrow(matAP5)), color=colindx[2])
points(colMeans(matAP5), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 10, by=2), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="ITI latency (s)", font=2, cex=1.2)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
## 1.3.2.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 150))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=150, col="gray95", border="white")
#Get data ready
matVEH <- do.call("rbind", byBinDataEarlyVEH[[7]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[7]])
VEHbl <- colMeans(matVEH[,BLbinIndex], na.rm=T); AP5bl <- colMeans(matAP5[,BLbinIndex], na.rm=T)
matVEHperc <- (matVEH/VEHbl)*100
matAP5perc <- (matAP5/AP5bl)*100
#Plot
lines(colMeans(matVEHperc), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEHperc), err=colSds(matVEHperc)/sqrt(nrow(matVEHperc)), color=colindx[1])
points(colMeans(matVEHperc), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5perc), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5perc), err=colSds(matAP5perc)/sqrt(nrow(matAP5perc)), color=colindx[2])
points(colMeans(matAP5perc), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 150, by=50), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="% of baseline ITI latency", font=2, cex=1.2)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
### 1.3.3. ITI latency: barplots of pre and post infusion, S+ vs S- and VEH vs AP5
plot.new()
plot.window(xlim=c(0, 6), ylim=c(0, 10))
#S+ both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[7]], color=colindx[1], xmiddle=1, barwidth=0.5, colLabel = "white", labelY = 0.5)
plotPrePostBars(data=byBinDataEarlyAP5[[7]], color=colindx[2], xmiddle=2, barwidth=0.5, colLabel = "white", labelY = 0.5)
#Axis and labels
axis(side=2, at=seq(0, 10, 2), cex.axis=1, font=2, las=2)
mtext(side=2, line=2.5, text="ITI latency (s)", cex=1.4, font=2)
rect(xleft=0, xright=0.5, ybottom=9.5, ytop=10, col=colindx[1], border="white")
rect(xleft=0, xright=0.5, ybottom=8.5, ytop=9, col=colindx[2], border="white")
text(x=1, y=9.8, labels="VEH", cex=1.5)
text(x=1, y=8.8, labels="AP5", cex=1.5)
#### 1.4. CUED SPECIFICITY
### 1.4.1. Cue specificity: S+ and S- specificity pre vs. post infusion in AP5 vs. VEH
# In the objects 'byBinDataEarlyVEH' and 'byBinDataEarlyAP5', the 5th and 6th items are DStaskAccuracy and NStaskAccuracy by subject by bin
## 1.4.1.1. Absolute scores
plot.new()
plot.window(xlim=c(0, 1), ylim=c(-2, 6))
abline(h=0, lty=3)
plotPrePostLines(data=byBinDataEarlyVEH[[5]], color=colindx[1], pch=15, scores="absolute") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[5]], color=colindx[2], pch=15, scores="absolute") #AP5 group, S+
plotPrePostLines(data=byBinDataEarlyVEH[[6]], color=colindx[1], pch=22, scores="absolute", jitter=0.015) #VEH group, S-
plotPrePostLines(data=byBinDataEarlyAP5[[6]], color=colindx[2], pch=22, scores="absolute", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(-2, 6, by=1), font=2, las=2, pos=-0.04)
mtext(side=2, line=2, text="S+ Specificity (s)", font=2, cex=1.4)
legend("topright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("topleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n")
## 1.4.1.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, 1), ylim=c(0, 140))
plotPrePostLines(data=byBinDataEarlyVEH[[5]], color=colindx[1], pch=15, scores="percentBL") #VEH group, S+
plotPrePostLines(data=byBinDataEarlyAP5[[5]], color=colindx[2], pch=15, scores="percentBL") #AP5 group, S+
#plotPrePostLines(data=byBinDataEarlyVEH[[2]], color=colindx[1], pch=22, scores="percentBL", jitter=0.015) #VEH group, S-. It's confusing so I'm not plotting it
#plotPrePostLines(data=byBinDataEarlyAP5[[2]], color=colindx[2], pch=22, scores="percentBL", jitter=0.015) #AP5 group, S-
axis(side=1, at=c(0, 1), labels=c("Preinfusion", "Postinfusion"), cex.axis=1.4, font=2)
axis(side=2, at=seq(0, 140, by=20), font=2, las=2, pos=-0.1)
mtext(side=2, line=4, text="% of BL S+ specificity", font=2, cex=1.4)
#legend("bottomright", legend = c("S+", "S-"), pch = c(15, 22), bty = "n" )
legend("bottomleft", legend=c("VEH", "AP5"), lty=1, col=colindx, bty="n", cex=1.2)
### 1.4.2. Cue specificity: S+ and S- specificity by bin on test day in AP5 vs. VEH
## 1.4.2.1. Absolute scores
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(-2, 7))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=-2, ytop=6, col="gray95", border="white")
abline(h=0, lty=3)
#lapply(seq(1, length(ratsEarlyVEH)), function(x) {lines(byBinDataEarlyVEH[[1]][[x]], col=colindx[1])})
#lapply(seq(1, length(ratsEarlyAP5)), function(x) {lines(byBinDataEarlyAP5[[1]][[x]], col=colindx[2])})
matVEH <- do.call("rbind", byBinDataEarlyVEH[[5]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[5]])
lines(colMeans(matVEH), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEH), err=colSds(matVEH)/sqrt(nrow(matVEH)), color=colindx[1])
points(colMeans(matVEH), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5), err=colSds(matAP5)/sqrt(nrow(matAP5)), color=colindx[2])
points(colMeans(matAP5), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(-2, 6, by=2), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="S+ specificity (s)", font=2, cex=1.2)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
## 1.4.2.1.B. s- Cued specificity. Absolute scores
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(-2, 7))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=-2, ytop=6, col="gray95", border="white")
abline(h=0, lty=3)
#lapply(seq(1, length(ratsEarlyVEH)), function(x) {lines(byBinDataEarlyVEH[[1]][[x]], col=colindx[1])})
#lapply(seq(1, length(ratsEarlyAP5)), function(x) {lines(byBinDataEarlyAP5[[1]][[x]], col=colindx[2])})
matVEH <- do.call("rbind", byBinDataEarlyVEH[[6]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[6]])
lines(colMeans(matVEH), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEH), err=colSds(matVEH)/sqrt(nrow(matVEH)), color=colindx[1])
points(colMeans(matVEH), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5), err=colSds(matAP5)/sqrt(nrow(matAP5)), color=colindx[2])
points(colMeans(matAP5), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(-2, 6, by=2), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="S+ specificity (s)", font=2, cex=1.2)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.5)
## 1.4.2.2. Percentage of BL
plot.new()
plot.window(xlim=c(0, minBinNo), ylim=c(0, 150))
#Mark infusion period
screenPerSec <- minBinNo/(12*binsize) #Length of one second in the X axis
infusionStart <- 1800; infusionEnd <- 1800+12*60
infusionStartScreen <- infusionStart*screenPerSec; infusionEndScreen <- infusionEnd*screenPerSec
rect(xleft=infusionStartScreen, xright=infusionEndScreen, ybottom=0, ytop=140, col="gray95", border="white")
#abline(h=100, lty=3)
#Get data ready
matVEH <- do.call("rbind", byBinDataEarlyVEH[[5]]) #Create matrix in which rows are different rats and columns are bins
matAP5 <- do.call("rbind", byBinDataEarlyAP5[[5]])
VEHbl <- colMeans(matVEH[,BLbinIndex], na.rm=T); AP5bl <- colMeans(matAP5[,BLbinIndex], na.rm=T)
matVEHperc <- (matVEH/VEHbl)*100
matAP5perc <- (matAP5/AP5bl)*100
#Plot
lines(colMeans(matVEHperc), col=colindx[1], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matVEHperc), err=colSds(matVEHperc)/sqrt(nrow(matVEHperc)), color=colindx[1])
points(colMeans(matVEHperc), col=colindx[1], pch=15, cex=1.5)
lines(colMeans(matAP5perc), col=colindx[2], lwd=2)
errBars(x=seq(1, minBinNo), y=colMeans(matAP5perc), err=colSds(matAP5perc)/sqrt(nrow(matAP5perc)), color=colindx[2])
points(colMeans(matAP5perc), col=colindx[2], pch=15, cex=1.5)
axis(side=1, at=seq(1, minBinNo, by=1), labels=seq(binsize/60, (minBinNo*binsize)/60, by=binsize/60), font=2)
axis(side=2, at=seq(0, 140, by=20), font=2, las=2, pos=0.5)
mtext(side=1, line=2.5, text = "Time (min)", font=2, cex=1.2)
mtext(side=2, line=1, text="% of baseline S+ specificity", font=2, cex=1.2)
legend("bottomright", legend=c("VEH", "AP5"), lty=1, lwd=2, col=colindx, bty="n", cex=1.2)
### 1.4.3. Cued specificity: barplots of pre and post infusion, S+ vs S- and VEH vs AP5
plot.new()
plot.window(xlim=c(0, 6), ylim=c(-2, 6))
#S+ both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[5]], color=colindx[1], xmiddle=1, barwidth=0.5, colLabel = "white", labelY = 0.5)
plotPrePostBars(data=byBinDataEarlyAP5[[5]], color=colindx[2], xmiddle=2, barwidth=0.5, colLabel = "white", labelY = 0.5)
#S- both groups pre and post infusion
plotPrePostBars(data=byBinDataEarlyVEH[[6]], color=colindxB[1], xmiddle=3.25, barwidth=0.5, colLabel = "black", labelY = 0.5)
plotPrePostBars(data=byBinDataEarlyAP5[[6]], color=colindxB[2], xmiddle=4.25, barwidth=0.5, colLabel = "black", labelY = 0.5)
#Axis and labels
axis(side=1, tick = F, at=c(1.5, 3.75), labels=c("S+", "S-"), cex.axis=1.4, font=2)
axis(side=2, at=seq(-2, 6, 2), cex.axis=1, font=2, las=2)
mtext(side=2, line=2.5, text="Cued specificity", cex=1.4, font=2)
rect(xleft=3, xright=3.5, ybottom=5.5, ytop=6, col=colindx[1], border="white")
rect(xleft=3.5, xright=4, ybottom=5.5, ytop=6, col=colindxB[1], border="white")
rect(xleft=3, xright=3.5, ybottom=4.5, ytop=5, col=colindx[2], border="white")
rect(xleft=3.5, xright=4, ybottom=4.5, ytop=5, col=colindxB[2], border="white")
text(x=4.5, y=5.8, labels="VEH", cex=1.5)
text(x=4.5, y=4.8, labels="AP5", cex=1.5)
######################################################
######################################################
### STATISTICAL ANALYSES ###
######################################################
######################################################
Early_LongFormat #This is our object of reference
indexes <- unique(Early_LongFormat$Index)
## S+ Response ratio
DSRR <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[1])
vehap5prepost.test <- ezANOVA(data=DSRR, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 50.99187 5.419987e-05 * 0.6694572
# 3 Infusion 1 9 14.72451 3.983238e-03 * 0.5124852
# 4 Drug:Infusion 1 9 15.22492 3.608893e-03 * 0.5208306
#The interaction was significant. As post-hoc test, I'll split the dataset into the groups and, within each group,
# use a paired t-test for the pre vs. post
DSRR_VEH <- subset(DSRR, Drug=="VEH")
DSRR_AP5 <- subset(DSRR, Drug=="AP5")
vehtest <- t.test(x=DSRR_VEH$Performance[DSRR_VEH$Infusion=="Pre"],
y=DSRR_VEH$Performance[DSRR_VEH$Infusion=="Post"],
paired=T, alternative="greater") #t(5)= -0.44473, p=0.66244712
ap5test <- t.test(x=DSRR_AP5$Performance[DSRR_AP5$Infusion=="Pre"],
y=DSRR_AP5$Performance[DSRR_AP5$Infusion=="Post"],
paired=T, alternative="greater") #t(4)=3.5043, p=0.02479956
p.adjust(p=c(vehtest$p.value, ap5test$p.value), method="holm") # 0.66244712 0.02479956
## S- Response ratio
NSRR <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[2])
ezANOVA(data=NSRR, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3) #Nothing was significant
# Effect DFn DFd F p p<.05 ges
#2 Drug 1 9 0.5167905 0.49045629 0.02197836
#3 Infusion 1 9 4.2795119 0.06850836 0.22445136
#4 Drug:Infusion 1 9 2.8361728 0.12645010 0.16093399
### DS RR AND NS RR
DSRR_NSRR <- rbind(DSRR, NSRR)
vehap5prepost.DSRRNSRR.test <- ezANOVA(data=DSRR_NSRR, dv=Performance, within=c(Infusion, Index), between=Drug, wid=Rat, type=3)
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 18.7354306 1.909441e-03 * 0.33331346
# 3 Infusion 1 9 15.7074285 3.288008e-03 * 0.35874208
# 5 Index 1 9 136.2991734 9.727380e-07 * 0.67663028
# 4 Drug:Infusion 1 9 13.7593887 4.849745e-03 * 0.32888322
# 6 Drug:Index 1 9 19.0464483 1.812275e-03 * 0.22624393
# 7 Infusion:Index 1 9 0.6816821 4.303494e-01 0.02229936
# 8 Drug:Infusion:Index 1 9 1.3698530 2.718946e-01 0.04382443
## S+ latency
DSlat <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[3])
vehAP5.PrePost.test.Lat <- ezANOVA(data=DSlat, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
#The interaction was significant. As post-hoc test, I'll split the dataset into the groups and, within each group, use a paired t-test for the pre vs. post
DSlat_VEH <- subset(DSlat, Drug=="VEH")
DSlat_AP5 <- subset(DSlat, Drug=="AP5")
vehtest <- t.test(x=DSlat_VEH$Performance[DSlat_VEH$Infusion=="Pre"],
y=DSlat_VEH$Performance[DSlat_VEH$Infusion=="Post"],
paired=T, alternative="less") #t(5)= 0.70908, p=0.74502111
ap5test <- t.test(x=DSlat_AP5$Performance[DSlat_AP5$Infusion=="Pre"],
y=DSlat_AP5$Performance[DSlat_AP5$Infusion=="Post"],
paired=T, alternative="less") #t(4)=-3.0849, p=0.03675593
p.adjust(p=c(vehtest$p.value, ap5test$p.value), method="holm") #0.74502111 0.03675593
# $ANOVA
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 88.27426 0.000005998055 * 0.7005632
# 3 Infusion 1 9 11.00157 0.008985138980 * 0.4820834
# 4 Drug:Infusion 1 9 12.03800 0.007053418470 * 0.5045832
## S- latency
NSlat <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[4])
ezANOVA(data=NSlat, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
#Nothing was significant:
# $ANOVA
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 0.05383936 0.82170236 0.00198363
# 3 Infusion 1 9 4.40878841 0.06514878 0.24648147
# 4 Drug:Infusion 1 9 2.69415562 0.13513460 0.16659113
###DS AND NS LATENCY
DSNSlat <- rbind(DSlat, NSlat)
ezANOVA(data=DSNSlat, dv=Performance, within=c(Infusion, Index), between=Drug, wid=Rat, type=3)
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 33.656896 2.590016e-04 * 0.42003301
# 3 Infusion 1 9 13.306906 5.335225e-03 * 0.38005858
# 5 Index 1 9 410.898873 8.064236e-09 * 0.79177201
# 4 Drug:Infusion 1 9 12.228779 6.755671e-03 * 0.36036261
# 6 Drug:Index 1 9 71.014208 1.457603e-05 * 0.39655808
# 7 Infusion:Index 1 9 2.051263 1.858816e-01 0.06567702
# 8 Drug:Infusion:Index 1 9 3.488967 9.461821e-02 0.10679325
## ITI latency
ITIlat <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[7])
ITIlat.aov_test <- ezANOVA(data=ITIlat, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
# $ANOVA
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 6.241261 0.03395910 * 0.3262542
# 3 Infusion 1 9 5.825659 0.03901543 * 0.1633909
# 4 Drug:Infusion 1 9 9.284387 0.01385936 * 0.2373706
#The interaction was significant. As post-hoc test, I'll split the dataset into the groups and, within each group, use a paired t-test for the pre vs. post
ITIlat_VEH <- subset(ITIlat, Drug=="VEH")
ITIlat_AP5 <- subset(ITIlat, Drug=="AP5")
vehtest <- t.test(x=ITIlat_VEH$Performance[ITIlat_VEH$Infusion=="Pre"],
y=ITIlat_VEH$Performance[ITIlat_VEH$Infusion=="Post"],
paired=T, alternative="less") #t(5)= -0.29979, p=0.38819996
ap5test <- t.test(x=ITIlat_AP5$Performance[ITIlat_AP5$Infusion=="Pre"],
y=ITIlat_AP5$Performance[ITIlat_AP5$Infusion=="Post"],
paired=T, alternative="less") #t(4)=-2.9156, p=0.04343353
p.adjust(p=c(vehtest$p.value, ap5test$p.value), method="holm") #0.73076380 0.04378754
## S+ specificity
DSspec <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[5])
spec.test <- ezANOVA(data=DSspec, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
# $ANOVA
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 12.857249 0.00587814 * 0.3688511
# 3 Infusion 1 9 4.437363 0.06443437 0.2256135
# 4 Drug:Infusion 1 9 4.277391 0.06856527 0.2192633
#The interaction was significant. As post-hoc test, I'll split the dataset into the groups and, within each group, use a paired t-test for the pre vs. post
DSspec_VEH <- subset(DSspec, Drug=="VEH")
DSspec_AP5 <- subset(DSspec, Drug=="AP5")
vehtest <- t.test(x=DSspec_VEH$Performance[DSspec_VEH$Infusion=="Pre"], y=DSspec_VEH$Performance[DSspec_VEH$Infusion=="Post"], paired=T, alternative="greater") #t = 0.061249, df = 5, p-value = 0.4768
ap5test <- t.test(x=DSspec_AP5$Performance[DSspec_AP5$Infusion=="Pre"], y=DSspec_AP5$Performance[DSspec_AP5$Infusion=="Post"], paired=T, alternative="greater") #t = 2.0081, df = 4, p-value = 0.04752
p.adjust(p=c(vehtest$p.value, ap5test$p.value), method="holm") #0.4767669 0.1150489
## S- specificity
NSspec <- subset(x=Early_LongFormat, Early_LongFormat$Index==indexes[6])
ezANOVA(data=NSspec, dv=Performance, within=Infusion, between=Drug, wid=Rat, type=3)
# $ANOVA
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 3.6204787 0.08949286 0.18594300
# 3 Infusion 1 9 0.3845323 0.55056370 0.01813095
# 4 Drug:Infusion 1 9 0.5718089 0.46887081 0.02672518
###S+ and S- specificity together
DSNS.spec <- rbind(DSspec, NSspec)
ezANOVA(data=DSNS.spec, dv=Performance, within=c(Index, Infusion), between=Drug, wid=Rat, type=3)
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 2 Drug 1 9 0.8882808 3.705530e-01 0.03748976
# 3 Index 1 9 386.6251147 1.054812e-08 * 0.77438776
# 5 Infusion 1 9 8.1118638 1.914740e-02 * 0.11151720
# 4 Drug:Index 1 9 44.9048800 8.842506e-05 * 0.28502838
# 6 Drug:Infusion 1 9 2.9049243 1.225049e-01 0.04301426
# 7 Index:Infusion 1 9 1.2421203 2.939386e-01 0.05060416
# 8 Drug:Index:Infusion 1 9 3.0622127 1.140584e-01 0.11614290
### PERFORMANCE INDEX BY BIN
Early_LongFormatByBin$bin <- as.character(Early_LongFormatByBin$bin)
bins <- unique(Early_LongFormatByBin$bin)
#newBinsIndex <- c(1, 1, 1, 0, 2, 2, 2, 3, 3, 3, 4, 4) #I want to create 30min bins instead of 10min bins because to compare so many bins reduces my p values a lot when adjusting
newBinsIndex <- c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4) #I want to create 30min bins instead of 10min bins because to compare so many bins reduces my p values a lot when adjusting
newBinsVals <- sapply(seq(1, nrow(Early_LongFormatByBin)), function(l){
sel <- as.numeric(Early_LongFormatByBin$bin[l])
newBinsIndex[sel]
})
Early_LongFormatByBin$Bigbins <- newBinsVals
smallbins.aov <- summary(aov(perf ~ drug * bins + Error(rat/bins), data=Early_LongFormatByBin))
bigbins.aov <- summary(aov(perf ~ drug * Bigbins + Error(rat/(Bigbins)), data=Early_LongFormatByBin))
#Including kind of cue (DS vs NS) as a within factor too
Early_LongFormatByBin_DSandNS$bin <- as.character(Early_LongFormatByBin_DSandNS$bin)
bins <- unique(Early_LongFormatByBin_DSandNS$bin)
newBinsIndex <- c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4) #I want to create 30min bins instead of 10min bins because to compare so many bins reduces my p values a lot when adjusting
newBinsVals <- sapply(seq(1, nrow(Early_LongFormatByBin_DSandNS)), function(l){
sel <- as.numeric(Early_LongFormatByBin_DSandNS$bin[l])
newBinsIndex[sel]
})
Early_LongFormatByBin_DSandNS$Bigbins <- newBinsVals
ezANOVA(data=Early_LongFormatByBin_DSandNS, dv=perf, within=c(index, Bigbins), between=drug, wid=rat, type=3)
### Results of the Mixed-effects (1 within, 1 btwn-subject factor) ANOVA with the original 10min bins
# Error: rat
# Df Sum Sq Mean Sq F value Pr(>F)
# drug 1 232.0 232.01 12.12 0.00692 **
# Residuals 9 172.3 19.14
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Error: rat:bin
# Df Sum Sq Mean Sq F value Pr(>F)
# bin 11 48.71 4.429 1.433 0.170
# drug:bin 11 54.56 4.960 1.605 0.109
# Residuals 99 306.02 3.091
### Results of the Mixed-effects (1 within, 1 btwn-subject factor) ANOVA with the BIG BINS (30 min)
# Error: rat
# Df Sum Sq Mean Sq F value Pr(>F)
# drug 1 232.0 232.01 12.12 0.00692 **
# Residuals 9 172.3 19.14
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Error: rat:Bigbins
# Df Sum Sq Mean Sq F value Pr(>F)
# Bigbins 1 9.40 9.40 1.511 0.2502
# drug:Bigbins 1 31.79 31.79 5.111 0.0501 .
# Residuals 9 55.98 6.22
# ---
# Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
#
# Error: Within
# Df Sum Sq Mean Sq F value Pr(>F)
# Residuals 110 312.1 2.837
#Just to double-check
ezANOVA(data=Early_LongFormatByBin, dv=perf, within=Bigbins, between=drug, wid=rat, type=1)
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 1 drug 1 9 12.119157 0.006924822 * 0.50405442
# 2 Bigbins 1 9 1.510524 0.250223403 0.03953167
# 3 drug:Bigbins 1 9 5.111312 0.050109313 0.12224727
ttestPerBin <- do.call("rbind", lapply(seq(1, length(unique(Early_LongFormatByBin$bin))), function(m){
bindex <- unique(Early_LongFormatByBin$bin)[m]
tst <- t.test(x=EarlyVEH_DStaskAccByBin_LongFormat$perf[EarlyVEH_DStaskAccByBin_LongFormat$bin==bindex],
y=EarlyAP5_DStaskAccByBin_LongFormat$perf[EarlyAP5_DStaskAccByBin_LongFormat$bin==bindex], paired=F, alternative="greater")
data.frame(bin=bindex, t=tst$statistic, df=tst$parameter, p=tst$p.value)
})
)
#Adjust the t test p values and also the p values of the ANOVA (using the small bins)
padjusted <- p.adjust(p=c(0.00692, 0.17, 0.109, ttestPerBin$p), method="holm")
ttestPerBin$p.adjusted <- padjusted[-c(1:3)]
# bin t df p p.adjusted
# t 1 1.83145811 5.304251 0.061588185 0.23241359
# t1 2 -0.09012978 8.012100 0.534801740 0.69922324
# t2 3 0.40517671 6.078269 0.349611621 0.69922324
# t3 4 2.12717518 6.890756 0.035793356 0.18215673
# t4 5 2.92773629 7.727084 0.009899460 0.09899460
# t5 6 2.65123274 8.242288 0.014228686 0.11500863
# t6 7 3.59036615 8.723828 0.003069769 0.03683723
# t7 8 2.79555498 4.178221 0.023336776 0.16335743
# t8 9 1.83100539 6.076459 0.058103398 0.23241359
# t9 10 2.82825212 6.977097 0.012778737 0.11500863
# t10 11 3.00188759 8.884798 0.007562351 0.08318586
# t11 12 2.21600537 7.294561 0.030359454 0.18215673
newBinsVals <- sapply(seq(1, nrow(EarlyVEH_DStaskAccByBin_LongFormat)), function(l){
sel <- EarlyVEH_DStaskAccByBin_LongFormat$bin[l]
newBinsIndex[sel]
})
EarlyVEH_DStaskAccByBin_LongFormat$Bigbins <- newBinsVals
newBinsVals <- sapply(seq(1, nrow(EarlyAP5_DStaskAccByBin_LongFormat)), function(l){
sel <- EarlyAP5_DStaskAccByBin_LongFormat$bin[l]
newBinsIndex[sel]
})
EarlyAP5_DStaskAccByBin_LongFormat$Bigbins <- newBinsVals
ttestPerBigBin <- do.call("rbind", lapply(seq(1, length(unique(Early_LongFormatByBin$Bigbins))), function(m){
bindex <- unique(Early_LongFormatByBin$Bigbins)[m]
tst <- t.test(x=EarlyVEH_DStaskAccByBin_LongFormat$perf[EarlyVEH_DStaskAccByBin_LongFormat$Bigbins==bindex],
y=EarlyAP5_DStaskAccByBin_LongFormat$perf[EarlyAP5_DStaskAccByBin_LongFormat$Bigbins==bindex], paired=F, alternative="greater")
data.frame(Bigbins=bindex, t=tst$statistic, df=tst$parameter, p=tst$p.value)
})
)
#Adjust the t test p values (using the big bins)
padjusted <- p.adjust(p=c(ttestPerBigBin$p), method="holm")
ttestPerBigBin$p.adjusted <- p.adjust(p=ttestPerBigBin$p, method="holm")
###Making 31-40 its own separate bin
# Bigbins t df p p.adjusted
# t 1 0.8568663 23.645328 0.200060020152 0.34000000000 #1-30 min. I'll use this as the PRE window
# t1 0 2.1271752 6.890756 0.035793355919 0.14317342368 #31-40 min. I discard this because it's the time at which the infusion is taking place
# t2 2 5.4051789 28.468158 0.000004356194 0.00003484955 #41-70 min. I'll use this as the POST window
# t3 3 4.4557853 21.256654 0.000106486237 0.00074540366 #71-100 min
# t4 4 3.7628519 18.653759 0.000676119814 0.00405671888 #101-120 min
### including 31-40min
# Bigbins t df p p.adjusted
# t 1 0.3039309 22.70217 0.3819743836 0.3819743836
# t1 2 4.2915647 25.93471 0.0001093981 0.0004375925
# t2 3 4.0209585 25.39606 0.0002292323 0.0006876970
# t3 4 3.5534131 27.75646 0.0006919061 0.0013838123
#################################################################
####### NEURONAL FIRING ##########
#################################################################
###############################################################
#1. PSTH pre and post infusion
###############################################################
######
#VEH #
######
# S+ Onset
psthInf(formatDat="Zscores", group="VEH", event="S+", comp=c("Pre VEH injection", "Post VEH injection"),
expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder,
col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=2, binw=50, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf),
stimulus="cue", imgFormat="pdf", BLNeuData=0)
psthInf(formatDat="raw", group="VEH", event="S+", comp=c("Pre VEH injection", "Post VEH injection"),
expName = "Early", errShade=T, ymax=26, graphFolder=neuGraphFolder,
col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf),
stimulus="cue", imgFormat="pdf", BLNeuData=0)
#S- Onset
psthInf(formatDat="Zscores", group="VEH", event="S-", comp=c("Pre VEH injection", "Post VEH injection"),
expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]),
infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf),
stimulus="cue", imgFormat="pdf", BLNeuData=0)
psthInf(formatDat="raw", group="VEH", event="S-", comp=c("Pre VEH injection", "Post VEH injection"),
expName = "Early", errShade=T, ymax=26, graphFolder=neuGraphFolder, col=c("black", colindx[1]),
infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf),
stimulus="cue", imgFormat="pdf", BLNeuData=0)
#S+ Entry
psthInf(formatDat="Zscores", group="VEH", event="S+ Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryDSEarlyVEHPreInf, allNeuronsEntryDSEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="VEH", event="S+ Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryDSEarlyVEHPreInf, allNeuronsEntryDSEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
#S- Entry
psthInf(formatDat="Zscores", group="VEH", event="S- Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryNSEarlyVEHPreInf, allNeuronsEntryNSEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="VEH", event="S- Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryNSEarlyVEHPreInf, allNeuronsEntryNSEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
#ITI Entry
psthInf(formatDat="Zscores", group="VEH", event="ITI Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryITIEarlyVEHPreInf, allNeuronsEntryITIEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="VEH", event="ITI Entry", comp=c("Pre VEH injection", "Post VEH injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[1]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryITIEarlyVEHPreInf, allNeuronsEntryITIEarlyVEHPostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf), imgFormat="pdf")
########
#AP5
########
#S+ Onset
psthInf(formatDat="Zscores", group="AP5", event="S+", comp=c("Pre AP5 injection", "Post AP5 injection"),
expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]),
infTime=1800, infDur=12*60, xmin=0.5, xmax=2, binw=50,
neudata=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), stimulus="cue", imgFormat="pdf", BLNeuData=0)
psthInf(formatDat="raw", group="AP5", event="S+", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=26, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), stimulus="cue", imgFormat="pdf", BLNeuData=0)
#S- Onset
psthInf(formatDat="Zscores", group="AP5", event="S-", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf), stimulus="cue", imgFormat="pdf", BLNeuData=0)
psthInf(formatDat="raw", group="AP5", event="S-", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=26, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf), stimulus="cue", imgFormat="pdf", BLNeuData=0)
#S+ Entry
psthInf(formatDat="Zscores", group="AP5", event="S+ Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryDSEarlyAP5PreInf, allNeuronsEntryDSEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="AP5", event="S+ Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryDSEarlyAP5PreInf, allNeuronsEntryDSEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
#S- Entry
psthInf(formatDat="Zscores", group="AP5", event="S- Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryNSEarlyAP5PreInf, allNeuronsEntryNSEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="AP5", event="S- Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryNSEarlyAP5PreInf, allNeuronsEntryNSEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
#ITI Entry
psthInf(formatDat="Zscores", group="AP5", event="ITI Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryITIEarlyAP5PreInf, allNeuronsEntryITIEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
psthInf(formatDat="raw", group="AP5", event="ITI Entry", comp=c("Pre AP5 injection", "Post AP5 injection"), expName = "Early", errShade=T, ymax=14, graphFolder=neuGraphFolder, col=c("black", colindx[2]), infTime=1800, infDur=12*60,
xmin=0.5, xmax=1.5, binw=50, neudata=list(allNeuronsEntryITIEarlyAP5PreInf, allNeuronsEntryITIEarlyAP5PostInf), stimulus="entry", BLNeuData=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf), imgFormat="pdf")
###############################################################
#2. POINTS pre and post infusion around time of cue
###############################################################
dotplot(neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early", dot="Medians", Lines=T, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400)
dotplot(neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early", dot="Means", Lines=T, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400)
dotplot(neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early", dot="Medians", Lines=F, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400)
dotplot(neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early", dot="Means", Lines=F, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400)
#Same but with boxplot instead of dotplot
#100-400ms
dotPlotByGroupDS <- dotplot(boxplot=T, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early 100-400", Lines=T, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400, ytop=12)
dotPlotByGroupNS <- dotplot(boxplot=T, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf, allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf),
expName="Early 100-400", Lines=T, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400, ytop=12)
save(dotPlotByGroupDS, file=paste(dataForRdir, "dotPlotByGroupDS.rdat", sep=""))
save(dotPlotByGroupNS, file=paste(dataForRdir, "dotPlotByGroupNS.rdat", sep=""))
#750-2000
dotPlotByGroupDS_TAIL <- dotplot(boxplot=T, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
expName="Early 750-2000", Lines=T, col=colindx, plotWidth=0.3, event="S+", winmin=750, winmax=2000, ytop=12)
dotPlotByGroupNS_TAIL <- dotplot(boxplot=T, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf, allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf),
expName="Early 750-2000", Lines=T, ytop=12, ybottom=-2, col=colindx, plotWidth=0.3, event="S-", winmin=750, winmax=2000)
save(dotPlotByGroupDS_TAIL, file=paste(dataForRdir, "dotPlotByGroupDS_TAIL.rdat", sep=""))
save(dotPlotByGroupNS_TAIL, file=paste(dataForRdir, "dotPlotByGroupNS_TAIL.rdat", sep=""))
#Raw
#100-400ms after the S+
# dotplot(boxplot=T, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
# expName="Early 100-400", Lines=T, formatDat="Raw", ytop=20, ybottom=0, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400, comp=c("VEH", "AP5"))
# dotplot(boxplot=T, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf, allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf),
# expName="Early 100-400", Lines=T, formatDat="Raw", ytop=20, ybottom=0, col=colindx, plotWidth=0.3, event="S-", winmin=100, winmax=400, comp=c("VEH", "AP5"))
# dotplot(boxplot=T, neudata=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf, allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
# expName="Early 100-400", Lines=F, formatDat="Raw", ytop=20, ybottom=0, col=colindx, plotWidth=0.3, event="S+", winmin=100, winmax=400, comp=c("VEH", "AP5"))
# dotplot(boxplot=T, neudata=list(allNeuronsNSEarlyVEHPreInf, allNeuronsNSEarlyVEHPostInf, allNeuronsNSEarlyAP5PreInf, allNeuronsNSEarlyAP5PostInf),
# expName="Early 100-400", Lines=F, formatDat="Raw", ytop=20, ybottom=0, col=colindx, plotWidth=0.3, event="S-", winmin=100, winmax=400, comp=c("VEH", "AP5"))
### STATISTICAL TEST
#Comparison of cue-evoked firing rate (100ms-400ms window) pre vs post injection for both groups. I'll use a Wilcoxon paired test
# separately for each group
load(file=paste(dataForRdir, "dotPlotByGroup.rdat", sep=""))
dotPlotByGroupEarly <- dotPlotByGroupDS
dotplotDataVEH_Early <- dotPlotByGroupEarly$VEH
dotplotDataAP5_Early <- dotPlotByGroupEarly$AP5
dotPlotByGroupEarlyNS <- dotPlotByGroupNS
dotplotDataVEH_Early_NS <- dotPlotByGroupEarlyNS$VEH
dotplotDataAP5_Early_NS <- dotPlotByGroupEarlyNS$AP5
#100-400ms after S+
# DS_VEH_Early pre vs. post
wilcox.test(x=dotplotDataVEH_Early[,1], y=dotplotDataVEH_Early[,2], paired=T, alternative = "greater") #V = 22, p=0.3203; pcorrected=6.4060e-01 ; #Raw scores: V=24, p=0.4609
# DS_AP5_Early pre vs. post
wilcox.test(x=dotplotDataAP5_Early[,1], y=dotplotDataAP5_Early[,2], paired=T, alternative = "greater") #V = 424, p=9.122e-06; pcorrected= 5.4732e-05; Raw scores: V=80, p=0.0011
# NS_VEH_Early pre vs. post
wilcox.test(x=dotplotDataVEH_Early_NS[,1], y=dotplotDataVEH_Early_NS[,2], paired=T, alternative = "greater") #V = 22, p=8.2020e-02; pcorrected=1
# NS_AP5_Early pre vs. post
wilcox.test(x=dotplotDataAP5_Early_NS[,1], y=dotplotDataAP5_Early_NS[,2], paired=T, alternative = "greater") #V = 89, p=9.9890e-01; pcorrected= 1.0944e-04
# DS vs NS: Early VEH PRE
wilcox.test(x=dotplotDataVEH_Early[,1], y=dotplotDataVEH_Early_NS[,1], paired=T, alternative = "greater") #V = 36, p=1.9530e-02; pcorrected=3.1200e-02
# DS vs. NS: Early AP5 PRE
wilcox.test(x=dotplotDataAP5_Early[,1], y=dotplotDataAP5_Early_NS[,1], paired=T, alternative = "greater") #V = 442, p=4.7680e-06 ; pcorrected=9.5360e-06
# DS vs. NS: Early VEH POST
wilcox.test(x=dotplotDataVEH_Early[,2], y=dotplotDataVEH_Early_NS[,2], paired=T, alternative = "greater") #V = 35, p=3.1252e-02; pcorrected=3.1689e-02
# DS vs. NS: Early AP5 POST
wilcox.test(x=dotplotDataAP5_Early[,2], y=dotplotDataAP5_Early_NS[,2], paired=T, alternative = "greater") #V = 430, p=2.7979e-05; pcorrected=5.5958e-05
p.adjust(p=c(0.3203, 9.122e-06, 0.02734, 0.9989, 0.003906, 5.96e-07, 0.007813, 3.997e-06))
# 6.4060e-01 5.4732e-05 8.2020e-02 9.9890e-01 1.9530e-02 4.7680e-06 3.1252e-02 2.7979e-05
#750-2000ms after S+
load(file=paste(dataForRdir, "dotPlotByGroupDS_TAIL.rdat", sep=""))
load(file=paste(dataForRdir, "dotPlotByGroupNS_TAIL.rdat", sep=""))
# Create objects that will be useful for the comparisons
dotPlotByGroupEarly <- dotPlotByGroupDS_TAIL
dotplotDataVEH_Early <- dotPlotByGroupEarly$VEH
dotplotDataAP5_Early <- dotPlotByGroupEarly$AP5
dotPlotByGroupEarlyNS <- dotPlotByGroupNS_TAIL
dotplotDataVEH_Early_NS <- dotPlotByGroupEarlyNS$VEH
dotplotDataAP5_Early_NS <- dotPlotByGroupEarlyNS$AP5
#750-2000ms after S+
# DS_VEH_Early pre vs. post
wilcox.test(x=dotplotDataVEH_Early[,1], y=dotplotDataVEH_Early[,2], paired=T, alternative = "greater") #V = 6, p=0.9609; pcorrected=6.4060e-01 ; #Raw scores: V=24, p=0.4609
# DS_AP5_Early pre vs. post
wilcox.test(x=dotplotDataAP5_Early[,1], y=dotplotDataAP5_Early[,2], paired=T, alternative = "greater") #V = 150, p=0.956; pcorrected= 5.4732e-05; Raw scores: V=80, p=0.0011
# NS_VEH_Early pre vs. post
wilcox.test(x=dotplotDataVEH_Early_NS[,1], y=dotplotDataVEH_Early_NS[,2], paired=T, alternative = "greater") #V = 22, p=0.3203; pcorrected=1
# NS_AP5_Early pre vs. post
wilcox.test(x=dotplotDataAP5_Early_NS[,1], y=dotplotDataAP5_Early_NS[,2], paired=T, alternative = "greater") #V = 162, p=0.927; pcorrected= 1.0944e-04
# DS vs NS: Early VEH PRE
wilcox.test(x=dotplotDataVEH_Early[,1], y=dotplotDataVEH_Early_NS[,1], paired=T, alternative = "greater") #V = 31, p=0.03906; pcorrected=3.1200e-02
# DS vs. NS: Early AP5 PRE
wilcox.test(x=dotplotDataAP5_Early[,1], y=dotplotDataAP5_Early_NS[,1], paired=T, alternative = "greater") #V = 260, p=0.2919 ; pcorrected=9.5360e-06
# DS vs. NS: Early VEH POST
wilcox.test(x=dotplotDataVEH_Early[,2], y=dotplotDataVEH_Early_NS[,2], paired=T, alternative = "greater") #V = 30, p=0.05469; pcorrected=3.1689e-02
# DS vs. NS: Early AP5 POST
wilcox.test(x=dotplotDataAP5_Early[,2], y=dotplotDataAP5_Early_NS[,2], paired=T, alternative = "greater") #V = 292, p=0.1143; pcorrected=5.5958e-05
#Correct p values taking into account the other 2 wilcoxon tests from the "Late" test
p.adjust(p=c(0.9609, 0.956, 0.3203, 0.927, 0.03906, 0.2919, 0.05469, 0.1143), method="holm")
# 1.00000 1.00000 1.00000 1.00000 0.31248 1.00000 0.38283 0.68580
###############################################################
#3. SCATTERPLOT pre and post infusion around time of cue
###############################################################
prePostInf_FR(data=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf),
dataformat="Raw", BLwdw=2, winmin=100, winmax=400, col_labels="purple",
comparison="Early VEH Pre vs. Post", graphfolder=preVsPostFRFolder,
xmin=0, ymin=0)
prePostInf_FR(data=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
dataformat="Raw", BLwdw=2, winmin=100, winmax=400, col_labels="purple",
comparison="Early AP5 Pre vs. Post", graphfolder=preVsPostFRFolder)
#Baseline
prePostInf_FR(data=list(allNeuronsDSEarlyVEHPreInf, allNeuronsDSEarlyVEHPostInf),
dataformat="Raw", BLwdw=2, winmin=-2000, winmax=0,
comparison="Baseline_Early VEH Pre vs. Post", graphfolder=preVsPostFRFolder)
prePostInf_FR(data=list(allNeuronsDSEarlyAP5PreInf, allNeuronsDSEarlyAP5PostInf),
dataformat="Raw", BLwdw=2, winmin=-2000, winmax=0,
comparison="Baseline_Early AP5 Pre vs. Post", graphfolder=preVsPostFRFolder)
################################################
### EXCITATION AND INHIBITION BY BIN
################################################
postInfTargetWdw <- 1800+12*60+30*60 #For the post infusion window, I'll choose the period between the end of the infusion +30'.
#Matrix in which rows are 50ms bins after the cue, columns are individual neurons and the values indicate if the neuron was EXCITED (ExcBins) or INHIBITED (InhBins) on that bin
NEXfiles <- "E:/Dropbox/NMDA/EXP1_Performance/Early VEH/NEX files/"
EarlyVEHPreInf_ExcBins <- KC.sigbins(path=NEXfiles, startt=0, endt=1800, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyVEHPostInf_ExcBins <- KC.sigbins(path=NEXfiles, startt=2520, endt=postInfTargetWdw, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyVEHPreInf_InhBins <- KC.inhib.sigbins(path=NEXfiles, startt=0, endt=1800, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyVEHPostInf_InhBins <- KC.inhib.sigbins(path=NEXfiles, startt=2520, endt=postInfTargetWdw, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
# In neuralhist, I flagged neurons as CUE-EXCITED if they were excited (>99.9% confidence interval of a Poisson distribution given by BL firing) for 3 consecutive 10ms bins in the 500ms window after the cue. I used the 2s precue window as baseline to define my Poisson distribution.
#EarlyVEH_ExcUnits <- unlist(allNeuronsDSEarlyVEHPreInf$cueexidx) #Index of cue-excited units
#Redefine NEXfiles now so that it sends the function to the AP5 files and repeat
NEXfiles <- "E:/Dropbox/NMDA/EXP1_Performance/Early AP5/NEX files/"
EarlyAP5PreInf_ExcBins <- KC.sigbins(path=NEXfiles, startt=0, endt=1800, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyAP5PostInf_ExcBins <- KC.sigbins(path=NEXfiles, startt=2520, endt=postInfTargetWdw, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyAP5PreInf_InhBins <- KC.inhib.sigbins(path=NEXfiles, startt=0, endt=1800, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
EarlyAP5PostInf_InhBins <- KC.inhib.sigbins(path=NEXfiles, startt=2520, endt=postInfTargetWdw, event=1, BLwdw=5, PostEvent_wdw=1, pbin=0.05, funcdirect=funcdirect)
#Save these files
save(EarlyVEHPreInf_ExcBins, file=paste(dataForRdir, "EarlyVEHPreInf_ExcBins.rdat", sep=""))
save(EarlyVEHPostInf_ExcBins, file=paste(dataForRdir, "EarlyVEHPostInf_ExcBins.rdat", sep=""))
save(EarlyVEHPreInf_InhBins, file=paste(dataForRdir, "EarlyVEHPreInf_InhBins.rdat", sep=""))
save(EarlyVEHPostInf_InhBins, file=paste(dataForRdir, "EarlyVEHPostInf_InhBins.rdat", sep=""))
save(EarlyAP5PreInf_ExcBins, file=paste(dataForRdir, "EarlyAP5PreInf_ExcBins.rdat", sep=""))
save(EarlyAP5PostInf_ExcBins, file=paste(dataForRdir, "EarlyAP5PostInf_ExcBins.rdat", sep=""))
save(EarlyAP5PreInf_InhBins, file=paste(dataForRdir, "EarlyAP5PreInf_InhBins.rdat", sep=""))
save(EarlyAP5PostInf_InhBins, file=paste(dataForRdir, "EarlyAP5PostInf_InhBins.rdat", sep=""))
### DEFINE WHAT UNITS QUALIFY AS CUE-EXCITED BASED ON THE BINS THEY'RE SIGNIFICANTLY CUE-EXCITED (criterion: 3 consecutive 50ms bins)
###############################################
### PROPORTION OF CUE EXCITED NEURONS
###############################################
# % of CUE-EXCITED UNITS
#This function tells me, based on the "ExcBins" matrix, which units qualify as cue-excited based on my criterion
CueExcIndex <- function(excbybin, threhold=3){
sapply(seq(1, ncol(excbybin)), function(x){
exc <- as.numeric(excbybin[1:10, x])
oneruns <- rle(exc)$lengths[rle(exc)$values==1]
cueexc <- FALSE
if(length(oneruns)>0 & sum(oneruns)>=threshold){cueexc <- TRUE}
cueexc
})
}
EarlyVEHPreInf_ExcUnits <- CueExcIndex(EarlyVEHPreInf_ExcBins)
EarlyVEHPostInf_ExcUnits <- CueExcIndex(EarlyVEHPostInf_ExcBins)
EarlyAP5PreInf_ExcUnits <- CueExcIndex(EarlyAP5PreInf_ExcBins)
EarlyAP5PostInf_ExcUnits <- CueExcIndex(EarlyAP5PostInf_ExcBins)
contTable_EarlyVEH <- t(data.frame(Pre=as.matrix(table(EarlyVEHPreInf_ExcUnits)), Post=as.matrix(table(EarlyVEHPostInf_ExcUnits))))
contTable_EarlyAP5 <- t(data.frame(Pre=as.matrix(table(EarlyAP5PreInf_ExcUnits)), Post=as.matrix(table(EarlyAP5PostInf_ExcUnits))))
chisq.test(contTable_EarlyVEH) #X-squared = 0, df = 1, p-value = 1
chisq.test(contTable_EarlyAP5) #X-squared = 0.28202, df = 1, p-value = 0.5954
fisher.test(contTable_EarlyVEH) #CI: 0.1557766 18.7666075, odds ratio= 1.613716, p-value = 1
fisher.test(contTable_EarlyAP5) #CI: 0.2007157 2.1061495, odds ratio=0.6585366, p-value = 0.5959
#Early VEH
plot.new()
par(mar=c(2, 6, 2, 2))
plot.window(xlim=c(0, 2), ylim=c(0, 1))
rect(xleft=0, xright=1, ybottom=0, ytop=sum(EarlyVEHPreInf_ExcUnits)/length(EarlyVEHPreInf_ExcUnits), col="gray30", border = F)
rect(xleft=1, xright=2, ybottom=0, ytop=sum(EarlyVEHPostInf_ExcUnits)/length(EarlyVEHPostInf_ExcUnits), col=colindx[1], border= F)
axis(side=1, at=c(0.5, 1.5), tick = F, labels=c("Pre", "Post"), cex.axis=1.5, font=2)
axis(side=2, at=seq(0, 1, 0.25), cex.axis=1.4, las=2)
mtext(side=2, line=4, text="Proportion", cex=1.5, font=2)
#Early AP5
plot.new()
par(mar=c(2, 6, 2, 2))
plot.window(xlim=c(0, 2), ylim=c(0, 1))
rect(xleft=0, xright=1, ybottom=0, ytop=sum(EarlyAP5PreInf_ExcUnits)/length(EarlyAP5PreInf_ExcUnits), col="gray30", border = F)
rect(xleft=1, xright=2, ybottom=0, ytop=sum(EarlyAP5PostInf_ExcUnits)/length(EarlyAP5PostInf_ExcUnits), col=colindx[2], border= F)
axis(side=1, at=c(0.5, 1.5), tick = F, labels=c("Pre", "Post"), cex.axis=1.5, font=2)
axis(side=2, at=seq(0, 1, 0.25), cex.axis=1.4, las=2)
mtext(side=2, line=4, text="Proportion", cex=1.5, font=2)
############################################################################
#Plot % bins excited/inhibited before and after infusion of VEH or AP5
############################################################################
#Function to calculate the percentage of units exc/inh to apply on the objects that I created with KC.sigbins.R and KC.inhib.sigbins.R
PercBins <- function(sigBinData){
sapply(seq(1, nrow(sigBinData)), function (x){
sum(sigBinData[x,])/ncol(sigBinData)
})
}
#Early VEH All units
plot.new()
plot.window(xlim = c(0, nrow(EarlyVEHPreInf_ExcBins)), ylim=c(0, 1))
abline(h=seq(-1, 1, by=0.25), col="gray90")
lines(x=seq(1, nrow(EarlyVEHPreInf_ExcBins)), y=PercBins(EarlyVEHPreInf_ExcBins), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyVEHPostInf_ExcBins)), y=PercBins(EarlyVEHPostInf_ExcBins), col="blue", lwd=2)
lines(x=seq(1, nrow(EarlyVEHPreInf_InhBins)), y=-PercBins(EarlyVEHPreInf_InhBins), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyVEHPostInf_InhBins)), y=-PercBins(EarlyVEHPostInf_InhBins), col="blue", lwd=2)
axis(side=1, at=seq(0, nrow(EarlyVEHPreInf_ExcBins), by=10), labels=seq(0, 1, by=0.5), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, 1, by=0.5), labels=seq(0, 100, 50), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, -1, by=-0.5), labels=seq(0, 100, 50), cex.axis=1.4)
mtext(side=1, text="Time from S+ onset (s)", font=2, cex=1.5, line=2.5)
mtext(side=2, text="% Excited", at=0.5, font=2, cex = 1.5, line=2.5)
mtext(side=2, text="% Inhibited", at=-0.5, font=2, cex = 1.5, line=2.5)
#Early VEH Cue-excited units only
plot.new()
plot.window(xlim = c(0, nrow(EarlyVEHPreInf_ExcBins[,EarlyVEHPreInf_ExcUnits])), ylim=c(0, 1))
abline(h=seq(-1, 1, by=0.25), col="gray90")
lines(x=seq(1, nrow(EarlyVEHPreInf_ExcBins)), y=PercBins(EarlyVEHPreInf_ExcBins[,EarlyVEHPreInf_ExcUnits]), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyVEHPostInf_ExcBins)), y=PercBins(EarlyVEHPostInf_ExcBins[,EarlyVEHPreInf_ExcUnits]), col="blue", lwd=2)
axis(side=1, at=seq(0, nrow(EarlyVEHPreInf_ExcBins), by=10), labels=seq(0, 1, by=0.5), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, 1, by=0.5), labels=seq(0, 100, 50), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, -1, by=-0.5), labels=seq(0, 100, 50), cex.axis=1.4)
mtext(side=1, text="Time from S+ onset (s)", font=2, cex=1.5, line=2.5)
mtext(side=2, text="% Excited", at=0.5, font=2, cex = 1.5, line=2.5)
#Early AP5 All units
plot.new()
plot.window(xlim = c(0, nrow(EarlyAP5PreInf_ExcBins)), ylim=c(0, 1))
abline(h=seq(-1, 1, by=0.25), col="gray90")
lines(x=seq(1, nrow(EarlyAP5PreInf_ExcBins)), y=PercBins(EarlyAP5PreInf_ExcBins), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyAP5PostInf_ExcBins)), y=PercBins(EarlyAP5PostInf_ExcBins), col="red", lwd=2)
lines(x=seq(1, nrow(EarlyAP5PreInf_InhBins)), y=-PercBins(EarlyAP5PreInf_InhBins), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyAP5PostInf_InhBins)), y=-PercBins(EarlyAP5PostInf_InhBins), col="red", lwd=2)
axis(side=1, at=seq(0, nrow(EarlyVEHPreInf_ExcBins), by=10), labels=seq(0, 1, by=0.5), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, 1, by=0.5), labels=seq(0, 100, 50), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, -1, by=-0.5), labels=seq(0, 100, 50), cex.axis=1.4)
mtext(side=1, text="Time from S+ onset (s)", font=2, cex=1.5, line=2.5)
mtext(side=2, text="% Excited", at=0.5, font=2, cex = 1.5, line=2.5)
mtext(side=2, text="% Inhibited", at=-0.5, font=2, cex = 1.5, line=2.5)
#Early AP5 Cue-excited units only
plot.new()
plot.window(xlim = c(0, nrow(EarlyAP5PreInf_ExcBins[,EarlyAP5PreInf_ExcUnits])), ylim=c(0, 1))
abline(h=seq(-1, 1, by=0.25), col="gray90")
lines(x=seq(1, nrow(EarlyAP5PreInf_ExcBins)), y=PercBins(EarlyAP5PreInf_ExcBins[,EarlyAP5PreInf_ExcUnits]), col="gray30", lwd=2)
lines(x=seq(1, nrow(EarlyAP5PostInf_ExcBins)), y=PercBins(EarlyAP5PostInf_ExcBins[,EarlyAP5PreInf_ExcUnits]), col=colindx[2], lwd=2)
axis(side=1, at=seq(0, nrow(EarlyVEHPreInf_ExcBins), by=10), labels=seq(0, 1, by=0.5), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, 1, by=0.5), labels=seq(0, 100, 50), cex.axis=1.4)
axis(side=2, las=2, at=seq(0, -1, by=-0.5), labels=seq(0, 100, 50), cex.axis=1.4)
mtext(side=1, text="Time from S+ onset (s)", font=2, cex=1.5, line=2.5)
mtext(side=2, text="% Excited", at=0.5, font=2, cex = 1.5, line=2.5)
########
#Dot plot (or boxplot) that says: of the cue-excited neurons, during what % of bins were those units excited before and after injection
EarlyVEHPre_cueExcOnly_ExcPerBin <- EarlyVEHPreInf_ExcBins[,EarlyVEH_ExcUnits]
EarlyVEHPost_cueExcOnly_ExcPerBin <- EarlyVEHPostInf_ExcBins[,EarlyVEH_ExcUnits]
EarlyVEHPre_ExcDotPlot <- colSums(EarlyVEHPre_cueExcOnly_ExcPerBin)/nrow(EarlyVEHPre_cueExcOnly_ExcPerBin)
EarlyVEHPost_ExcDotPlot <- colSums(EarlyVEHPost_cueExcOnly_ExcPerBin)/nrow(EarlyVEHPost_cueExcOnly_ExcPerBin)
EarlyAP5Pre_cueExcOnly_ExcPerBin <- EarlyAP5PreInf_ExcBins[,EarlyAP5_ExcUnits]
EarlyAP5Post_cueExcOnly_ExcPerBin <- EarlyAP5PostInf_ExcBins[,EarlyAP5_ExcUnits]
EarlyAP5Pre_ExcDotPlot <- colSums(EarlyAP5Pre_cueExcOnly_ExcPerBin)/nrow(EarlyAP5Pre_cueExcOnly_ExcPerBin)
EarlyAP5Post_ExcDotPlot <- colSums(EarlyAP5Post_cueExcOnly_ExcPerBin)/nrow(EarlyAP5Post_cueExcOnly_ExcPerBin)
#Make dotplot
makeBoxPlot <- function(data, xmin, xmax, color){
rect(xleft=xmin, xright=xmax, ybottom=summary(data)[2], ytop=summary(data)[5], border=color, lwd=2) #IQR
segments(x0=xmin, x1=xmax, y0=summary(data)[3], col=color, lwd=2) #Median
segments(x0=xmin, x1=xmax, y0=summary(data)[4], col="black", lwd=2) #Mean
}
plot.new()
plot.window(xlim=c(0, 2), ylim=c(0, 0.5))
makeBoxPlot(data=EarlyVEHPre_ExcDotPlot, xmin=0, xmax=0.3, color=colindx[1])
makeBoxPlot(data=EarlyVEHPost_ExcDotPlot, xmin=0.4, xmax=0.7, color=colindx[1])
makeBoxPlot(data=EarlyAP5Pre_ExcDotPlot, xmin=1, xmax=1.3, color=colindx[2])
makeBoxPlot(data=EarlyAP5Post_ExcDotPlot, xmin=1.4, xmax=1.7, color=colindx[2])
axis(side=1, at=c(0.15, 0.55), labels =c("Pre", "Post"), cex.axis=1.5, font=2)
axis(side=1, at=c(1.15, 1.55), labels =c("Pre", "Post"), cex.axis=1.5, font=2)
axis(side=2, las=2, cex.axis=1.4)
mtext(side=2, line=3, text="% excited bins", cex=1.5, font=2)
wilcox.test(EarlyVEHPre_ExcDotPlot, EarlyVEHPost_ExcDotPlot, paired=T) #V = 6.5, p-value = 0.8918
wilcox.test(EarlyAP5Pre_ExcDotPlot, EarlyAP5Post_ExcDotPlot, paired=T) #V = 194.5, p-value = 0.0008807
|
9285175071535db093eb4c3fa115636fbd12d746
|
27cf5892118e03c3102ae3bb87c863820b36fa09
|
/Basics/SVM.R
|
2ad360d168b580a7bc197466e0881c92f9d904fb
|
[] |
no_license
|
Mayuri666/Datascience_R_Codes
|
76cd3bd11591dd8afc65a4aa8663446502da89e3
|
82392e03a566e601f63973117f51949879a7d452
|
refs/heads/master
| 2020-12-29T14:42:39.830894
| 2020-06-16T16:31:45
| 2020-06-16T16:31:45
| 238,641,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
SVM.R
|
####support vector machine###
##optical character recognition
letterdata<-read.csv("C:/Users/Mayuri/Desktop/R/letterdata.csv")
#divide into training and test data
letters_train<-letterdata[1:16000,]
lettersdata_test<-letterdata[16001:20000,]
#training a model on the data
#begin by training a simple linear SVM
library(kernlab)
letter_classifier<-ksvm(letter~.,data=letters_train,kernel="vanilladot")
##evaluating model performance
#prediction on test data
letter_predictions<-predict(letter_classifier,lettersdata_test)
head(letter_predictions)
table(letter_predictions,lettersdata_test$letter)
agreement<-letter_predictions==lettersdata_test$letter
prop.table(table(agreement))
#improving model performance
letter_classifier_rbf<-ksvm(letter~.,data=letters_train,kernel="rbfdot")
letter_predictions_rbf<-predict(letter_classifier_rbf,lettersdata_test)
head(letter_predictions_rbf)
agreement_rbf<-letter_predictions_rbf==lettersdata_test$letter
table(agreement_rbf)
prop.table(table(agreement_rbf))
|
dbd321260b5f307f168992d95582f14f6ecdc01c
|
5c81fca53d67d7d542b249e358867302b8785aef
|
/chapter3.R
|
fa38883db3048eb844ffdb6b68da1ade8a1c3981
|
[
"MIT"
] |
permissive
|
standardgalactic/Principles-of-Strategic-Data-Science
|
ddc5fca95ebf6325e13a555c256d1a79f50f690e
|
076f1bf0e6c97d3948a74ea43e1e7aeb2fdba396
|
refs/heads/main
| 2023-02-22T20:59:14.546675
| 2021-01-22T08:58:20
| 2021-01-22T08:58:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
chapter3.R
|
## Chapter 3
library(tidyverse)
library(datasauRus)
ggplot(filter(datasaurus_dozen, dataset %in% c("dino", "away", "star", "bullseye", "slant_up", "dots")), aes(x = x, y = y)) +
geom_point(colour = "#002859") +
theme_void(base_size = 20) +
theme(legend.position = "none") +
facet_wrap(~dataset, ncol=3)
ggsave("../manuscript/images/figure13_Datasuarus.png",
width = 6, height = 4, units = "in")
datasaurus_dozen %>%
group_by(dataset) %>%
summarise(meanX = mean(x),
meanY = mean(x),
cor = cor(x, y))
cor(datasaurus_dozen[,-1])
|
61f48f9725609c1b55c9fcaf357afe3a8af82f8c
|
1455df4c711d01ffb2f92a0141e541c8650068a7
|
/man/bivariate.mixalg.Rd
|
ec454ef36d52aedce3ae61657cd68aa073b97060
|
[] |
no_license
|
cran/CAMAN
|
f3b9528fdb3c9bdbb68493e1b76b97340b4ebbb9
|
1321cb8e6fcc69c38eb403b1a3882ba875414559
|
refs/heads/master
| 2023-04-13T23:43:11.142110
| 2023-04-10T22:50:12
| 2023-04-10T22:50:12
| 17,678,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,957
|
rd
|
bivariate.mixalg.Rd
|
\name{bivariate.mixalg}
\alias{ bivariate.mixalg }
\title{ EM algorithm and classification for univariate data, for bivariate data and for meta data }
\description{
Function}
\usage{
bivariate.mixalg(obs1, obs2, type, data = NULL,
var1, var2, corr, lambda1, lambda2,
p,startk, numiter=5000, acc=1.e-7, class)}
\arguments{
\item{obs1}{the first column of the observations}\cr
\item{obs2}{the second column of the observations}\cr
\item{type}{kind of data}\cr
\item{data}{an optional data frame}\cr
\item{var1}{Variance of the first column of the observations(except meta-analysis)} \cr
\item{var2}{Variance of the second column of the observations (except meta-analysis)} \cr
\item{corr}{correlation coefficient}\cr
\item{lambda1}{Means of the first column of the observations}\cr
\item{lambda2}{Means of the second column of the observations}\cr
\item{p}{Probability}\cr
\item{startk}{ starting/maximal number of components. This number will be used to compute the grid in the VEM. Default is 20.} \cr
\item{numiter}{parameter to control the maximal number of iterations in the VEM and EM loops. Default is 5000.} \cr
\item{acc}{convergence criterion. Default is 1.e-7} \cr
\item{class}{ classification of studies } \cr
}
\examples{
\dontrun{
#1.EM and classification for bivariate data
#Examples
data(rs12363681)
test <- bivariate.mixalg(obs1=x, obs2=y, type="bi",
lambda1=0, lambda2=0, p=0,
data=rs12363681, startk=20, class="TRUE")
#scatter plot with ellipse
plot(test)
#scatter plot without ellipse
plot(test, ellipse = FALSE)
#2.EM and classification for meta data
#Examples
data(CT)
bivariate.mixalg(obs1=logitTPR, obs2=logitTNR,
var1=varlogitTPR, var2=varlogitTNR,
type="meta", lambda1=0, lambda2=0,
p=0,data=CT,startk=20,class="TRUE")
}
}
\keyword{models}
\concept{mixture model}
|
2de9078fac4c34d82bc96188ee22d97c3e49e560
|
6203d49517402d700771023ce2f0644eb599c0a6
|
/Lake Shenandoah PIT array.R
|
95c0365383da8f57df0b48221d85cbc69b991ba1
|
[] |
no_license
|
Petersen-n/Shenandoah_Ladder
|
8904341fd2049e0146465f17484098e97bfb0be5
|
4937440fe61d34424986a3e386580535e099fbf0
|
refs/heads/master
| 2022-12-02T04:54:02.944009
| 2020-08-17T17:42:14
| 2020-08-17T17:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,660
|
r
|
Lake Shenandoah PIT array.R
|
##########################################
#####Lake Shenandoah PIT Tagging Data#####
##########################################
library(PITR)
library("xlsx")
library(dplyr)
library(lubridate)
##set working directory for file locations##
setwd("Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis")
#####2013 files are from firmware earlier than V5 (April 2014)
old <- "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/array_old"
######2014 files are from current firmware
new <- "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/array_new"
######assign text tag numbers#####
tt <- c("0000_0000000174764544","0000_0000000174764573", "0000_0000000180573686", "0000_0000000181177608", "0000_0000000174764573", "0000_00000181177608")
####collate the data####
old_dat <- old_pit(data=old, test_tags = tt, print_to_file = FALSE)
alldat <- new_pit(data=new, test_tags = tt, print_to_file = FALSE, old_format_data = old_dat)
tag_fish<- alldat$all_det ####dataframe with only detections of tagged fish
tag_fish<- filter(tag_fish, tag_code != "900_226000135123") ##tag used for test on exit antenna
tag_fish<- filter(tag_fish, tag_code != "900_226000135118") ##tag used for test on exit antenna
tag_all<-alldat$multi_data ##dataframe with all detections (test tags and tagged fish)
######rename antennas so that A1 is downstream and A4 is upstream##############
rename_one <- array_config(data = tag_fish, configuration = "rename_antennas",
reader_name = "R1", ao1 = "1", an1 = "4")
rename_two <- array_config(data = rename_one, configuration = "rename_antennas",
reader_name = "R1", ao1 = "3", an1 = "1")
##############calculate detection efficiency##################################
efficiency_2013_week <-det_eff(data=rename_two, resolution = "week", by_array = FALSE,
direction = "up", start_date = "2013-04-15 12:27:23",
end_date = "2013-06-19 05:24:25")
efficiency_2014_week <-det_eff(data=rename_two, resolution = "week", by_array = FALSE,
direction = "up", start_date = "2014-04-11 07:27:28",
end_date = "2014-07-01 09:48:08")
write.xlsx(efficiency_2013_week, "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/detection_efficiency.xlsx", sheetName = "2013",
row.names=FALSE, showNA = TRUE)
write.xlsx(efficiency_2014_week, "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/detection_efficiency.xlsx", sheetName = "2014",
row.names=FALSE, append = TRUE, showNA = TRUE)
####################### determine first and last hits on an antenna#################
fi_la2013 <- first_last(data=rename_two, resolution = "year",
start_date = "2013-04-15 12:27:23", end_date = "2013-06-19 05:24:25")
fi_la2014 <-first_last(data=rename_two, resolution = "year", start_date = "2014-04-11 07:27:28",
end_date = "2014-07-01 09:48:08")
write.xlsx(fi_la2013, "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/first_last_detections.xlsx", sheetName = "2013",
row.names=FALSE)
write.xlsx(fi_la2014, "Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/first_last_detections.xlsx", sheetName = "2014",
row.names=FALSE, append = TRUE)
########################fishway efficiency####################
fish <-read.xlsx("Z:/BBP Projects/Herring work/Lake Shenandoah ladder/Analysis/tagged fish.xlsx", sheetName = "Sheet1", as.data.frame=TRUE, header=TRUE)
fish$tag <- '900_'
fish$Tag..<- as.character((fish$Tag..))
fish$tag_code <- paste(fish$tag, fish$Tag.., sep = "")
fish<- fish %>%
rename('Tag_date'='Date') %>%
select (-'tag', -'Tag..')
detect <- left_join(tag_fish, fish, by = 'tag_code')
detect$year<- as.factor(format(detect$date_time, format = "%Y"))
detect_by_year <-detect %>%
group_by(year) %>%
summarise(
n_distinct(tag_code)
)
#ant 1 is upstream, 3 is downstream
antenna_by_year <-detect %>%
group_by(year,antenna) %>%
summarise(
n_distinct(tag_code)
)
detect2014 <- detect %>%
filter(year == "2014") %>%
distinct(tag_code, .keep_all = TRUE) %>%
arrange(Species)
detect2014A3 <- detect %>%
filter(year == "2014" & antenna == 3) %>%
distinct(tag_code, .keep_all = TRUE) %>%
arrange(Species)
detect2014A2 <- detect %>%
filter(year == "2014" & antenna == 2) %>%
distinct(tag_code, .keep_all = TRUE) %>%
arrange(tag_code)
detect2014A1 <- detect %>%
filter(year == "2014" & antenna == 1) %>%
distinct(tag_code, .keep_all = TRUE) %>%
arrange(Species)
|
de0891fed2592baf67296705d9fc1d61917b79c7
|
446373433355171cdb65266ac3b24d03e884bb5d
|
/man/saga_polygonshapeindices.Rd
|
35f285c0b8868cce993bab87070b6e6b2d84bfde
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/r_package_qgis
|
233a49cbdb590ebc5b38d197cd38441888c8a6f3
|
8a5130ad98c4405085a09913b535a94b4a2a4fc3
|
refs/heads/master
| 2023-06-27T11:52:21.538634
| 2021-08-01T01:05:01
| 2021-08-01T01:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,047
|
rd
|
saga_polygonshapeindices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saga_polygonshapeindices.R
\name{saga_polygonshapeindices}
\alias{saga_polygonshapeindices}
\title{QGIS algorithm Polygon shape indices}
\usage{
saga_polygonshapeindices(
SHAPES = qgisprocess::qgis_default_value(),
INDEX = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{SHAPES}{\code{source} - Shapes. Path to a vector layer.}
\item{INDEX}{\code{vectorDestination} - Shape Index. Path for new vector layer.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by SAGA Polygon shape indices (saga:polygonshapeindices)
}
\details{
\subsection{Outputs description}{
\itemize{
\item INDEX - outputVector - Shape Index
}
}
}
|
74cb0d9a5400b799da50f30598b882607f2fa09d
|
300164a320c3c7dd68e9df178f1f10d076d543fc
|
/man/url.exists.Rd
|
5c80fc8aca54c4b01dc6e122a26980111089679f
|
[] |
no_license
|
TuCai/phuse
|
948fbe71cd10c69dd825dd880b7da1c74a7e585e
|
7d55ffa8a2c5f5a87e06d8d7014446d1c323183b
|
refs/heads/master
| 2022-09-25T11:25:49.612286
| 2022-08-27T21:56:19
| 2022-08-27T21:56:19
| 104,520,487
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 621
|
rd
|
url.exists.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/url_exists.R
\name{url.exists}
\alias{url.exists}
\title{Check URL based on httr package}
\usage{
url.exists(
url = "https://github.com/phuse-org/phuse-scripts.git",
show = FALSE
)
}
\arguments{
\item{url}{a URL for a remote repository and default to
'https://github.com/phuse-org/phuse-scripts.git'}
\item{show}{bolean variable; default to FALSE}
}
\value{
TRUE or FALSE
}
\description{
Check if URL exists.
}
\examples{
url.exists('https://github.com/phuse-org/phuse-scripts.git')
}
\author{
Hanming Tu
}
|
e754660867b3577f17a6cc136f1d3b6074223712
|
86fec6bb6508d40823a0d13a3e21d150533ef718
|
/man/aa.Rd
|
6570c49c523e053bba599755cca945745220f5cc
|
[] |
no_license
|
cran/ra4bayesmeta
|
13a3db75206fdf536dda4143fa2f69bd92affc39
|
0f99279a852f29aed717c733781cf17a5745667b
|
refs/heads/master
| 2023-04-09T06:14:44.552987
| 2021-04-23T06:50:03
| 2021-04-23T06:50:03
| 360,933,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,171
|
rd
|
aa.Rd
|
\name{aa}
\alias{aa}
\docType{data}
\title{
Auricular acupuncture data
%% ~~ data name/kind ... ~~
}
\description{
Meta-analysis data set including 4 randomized, controlled trials comparing treatment completion among
cocaine dependents treated with auricular acupuncture versus sham acupuncture.
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data(aa)}
\format{
A data frame with 4 observations on the following 7 variables:
\describe{
\item{\code{study}}{character string, label of the study}
%TODO: specify the direction of the OR
\item{\code{f.t}}{integer, number of persons who did not complete treatment among those treated with auricular acupuncture }
\item{\code{n.t}}{integer, number of persons treated with auricular acupuncture}
\item{\code{f.c}}{integer, number of persons who did not complete treatment among those treated with sham acupuncture}
\item{\code{n.c}}{integer, number of persons treated with sham acupuncture (control group)}
\item{\code{y}}{numeric, log odds ratio estimates for the individual trials}
\item{\code{sigma}}{numeric, standard error of the log odds ratio estimate for the individual trials}
}
}
\details{
This data set was originally analyzed by Gates et al. (2006) and reanalyzed by Bodnar et al. (2017).
The log odds ratio estimates and standard errors were computed using the \code{escalc} function in the
package \code{metafor}.
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
Bodnar, O., Link, A., Arendacka, B., Possolo, A., Elster, C. (2017). Bayesian estimation in random effects meta-analysis using a non-informative prior.
\emph{Statistics in Medicine} \bold{36}, 378--399.
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
Gates S, Smith LA, Foxcroft D. Auricular acupuncture for cocaine dependence. \emph{Cochrane Database of Systematic Reviews 2006},
Issue 1. Art. No.: CD005192. \doi{10.1002/14651858.CD005192.pub2}
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(aa)
str(aa)
# forest plot
forest(x=aa$y, sei=aa$sigma, xlab="log odds ratio")
}
\keyword{datasets}
|
834b7115145364c213c30aec647d32d38cd90f8c
|
bae1fac453e2dab83be5be06d4361a79a18d331f
|
/TobiasDienerowitz/r/curation.R
|
088a60b2d6a3c6d26276aeab42ad602966f817c0
|
[] |
no_license
|
TPeschel/life-for-postgraduates
|
95d96cf4f1cfc3753164814aad0493f10ee5cb37
|
cd77d99badb188c4164fc5fa9397b28795049673
|
refs/heads/master
| 2021-01-20T08:20:26.815652
| 2017-12-15T17:11:58
| 2017-12-15T17:11:58
| 90,131,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,483
|
r
|
curation.R
|
rm( list = ls( ) )
hlpr4life::load.pkgs(
c(
"hlpr4life",
"lifecuration",
"ggplot2",
"dplyr",
"reshape2" ) )
load( "sent/2017.09.14/raw.dat.Rd" )
load( "data/main/main.table.2017.09.29.Rd" )
names( main.1205 )
obs.non.usable <-
read.csv( "sent/2017.09.14/non_usable_obs." )
names( obs.non.usable )
names( obs.non.usable ) <-
gsub( "NA\\.", "T00865\\.", names( obs.non.usable ) )
out <-
paste0( obs.non.usable$PSEUDONYM, obs.non.usable$C_ANTHRO_KH_GRP )
main.1205.only.usable <-
main.1205[ !paste0( main.1205$PSEUDONYM, main.1205$C_ANTHRO_KH_GRP ) %in% out, ]
obs.extr <-
read.csv( "sent/2017.09.14/obs_extremes" )
names( obs.extr )
names( obs.extr ) <-
gsub( "NA\\.", "T00865\\.", names( obs.extr ) )
res$proftype[ 255 < res$proftype ] <-
3
nams <- c(
"singing silent", ## 0
"singing loud", ## 1
"speaking point cloud maximum", ## 2
"speaking type point", ## 3
"singing formant loud", ## 5
"speaking point cloud average" ) ## 6
res$pt <-
nams[ match( res$proftype, c( 0, 1, 2, 3, 5, 6 ) ) ]
main.1205.only.usable$PSEUDONYM
# WriteXLS( main.1205, ExcelFileName = "data/main/PV0278_datajoin_20170929.xlsx" ) #doesn't save correctly pseudonyms
write.xlsx( main.1205, file = "sent/2017.09.14/main.1205.only.usable.xlsx" )
save( main.1205.only.usable, file = "sent/2017.09.14/main.1205.only.usable.Rd" )
|
e6ba5be3704103c31a02eebdec64cab48cf582f1
|
dd4cbe0993d048caf2d42a36ba366cd160e81c98
|
/R_code/AverageReads_boxplot.r
|
911a8b662e45d3bc75048b2ebfac69a073f3a1c8
|
[] |
no_license
|
WilsonSayresLab/XY_RNAseq
|
d4d566d90b2d19d01ed95d08896598513d85eb20
|
e0952a303f9bf4fa4f5095ea16f0ff168a3d62c8
|
refs/heads/master
| 2021-01-20T02:16:31.827701
| 2018-12-11T20:56:19
| 2018-12-11T20:56:19
| 101,312,363
| 3
| 3
| null | 2017-08-24T16:36:17
| 2017-08-24T15:49:19
| null |
UTF-8
|
R
| false
| false
| 55
|
r
|
AverageReads_boxplot.r
|
# Averagere reads mapped per trimming per tissue type
|
eff4f9ab5461524ffdcd48143c9303352d6efb80
|
c1463667cf3ff1057859b4bbd956b7e1737bc187
|
/Deploying an RShiny app.R
|
11b11dfc40b81acf76f541a6787f8ddd3da86185
|
[] |
no_license
|
kshirasaagar/R-U-Ready
|
0c6ce8d8d0bb297a754d2229c86ff268755720d7
|
1caf81814cdd9cc779771f763f34bbfa2fc424c9
|
refs/heads/master
| 2021-08-03T19:23:51.091305
| 2021-01-28T23:06:36
| 2021-01-28T23:06:36
| 30,675,127
| 1
| 1
| null | 2021-01-28T23:08:06
| 2015-02-11T23:24:22
|
R
|
UTF-8
|
R
| false
| false
| 126
|
r
|
Deploying an RShiny app.R
|
#Deploying an RShiny app
library(shinyapps)
shiny::runApp()
shinyapps::deployApp('ui.R')
shinyapps::deployApp('D:\\shiny')
|
9610dfb5887e2e9496bde476b81bc2fccf71b214
|
9bd345ce451e3710781394dfd90c6e187395d11b
|
/man/ProbLFacil.Rd
|
959cfe51b67f93d6e2bea9804ceb0a2ceaed430e
|
[
"MIT"
] |
permissive
|
farias741/Lotto
|
76a4b63317c5eb513aef6e4185e62ca3dd690727
|
00539ad7817a5f4d47873d1267ee2be679bcd917
|
refs/heads/master
| 2023-07-20T14:50:42.943991
| 2021-09-02T14:15:39
| 2021-09-02T14:15:39
| 376,154,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 763
|
rd
|
ProbLFacil.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ProbLFacil.R
\name{ProbLFacil}
\alias{ProbLFacil}
\title{Lotofacil lottery game probability}
\usage{
ProbLFacil(x, y)
}
\arguments{
\item{x}{number}
\item{y}{number}
}
\value{
number
}
\description{
The player simply chooses and dials between 15 and 20 numbers, among the 25 available,
the draw consists of extracting 15 different numbers, in the universe from 1 to 25. In this game,
the prizes are awarded to the winners of 11, 12, 13, 14 or 15 numbers.
}
\details{
of hits is 15". However if y < 11, notice that there is no such probability.
Otherwise, notice that the bettor has won.
x is the amount of numbers bet
y is the amount number of hits
}
\examples{
ProbLFacil(15,11)
}
|
da0813bcccc7edf665bb4e5d164cc861294e53fb
|
52364584b0ef82609cbaaed17cbbdc399fd0ae17
|
/Part4/graph1.R
|
9793772c8ed4e97aa478f791ee4c5296ebca2b19
|
[] |
no_license
|
Pis02/R-Lecture
|
b8bdedfe9d68079921b3e06a4d08823bab22c1a7
|
805d0bf171e3bc5867bc453d577076008ed1ea39
|
refs/heads/master
| 2020-05-30T22:06:56.577331
| 2019-06-10T08:48:20
| 2019-06-10T08:48:20
| 189,987,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,812
|
r
|
graph1.R
|
# 그래픽 기호
setwd("D:/Workspace/R_Data_Analysis/Part4") #setwd() -> directory 설정하기
getwd() #getwd() 설정된 directroy 확인하기
var1 <- 1:5 #1:5 = c(1,2,3,4,5)와 같음
plot(var1) #plot() <- R에서 그래픽 관련 가장 많이 사용되는 함수
var1 <- 5:1 #5:1 순서로 표현
plot(var1)
var2 <- c(2,2,2)
plot(var2)
x <- 1:3
y <- 4:2
plot(x, y)
plot(x, y, xlim = c(0, 5), ylim = c(0, 5)) #x, y축에 lim를 사용해 시작부터 마무리 지점을 정해준다.
plot(x, y, xlim = c(0, 5), ylim = c(0, 5),
xlab = "X축값", ylab = "Y축값", main="Plat Test") #lab을 이용해 x,y축 제목 지정 및 main을 이용해 제목 지정정
v1 <- c(100,130,120,160,150)
plot(v1, type='o',col='red',ylim=c(0,200),axes=FALSE,ann=FALSE)
axis(1,at=1:5,lab=c("MON","TUE","WED","THU","FRI"))
axis(2,ylim=c(0,200))
title(main="FRUIT",col.main="red",font.main=4)
title(xlab = "Day",col.lab="black")
title(ylab = "PRICE",col.lab="blue")
v1 <- c(100,130,120,160,150)
par(mfrow=c(1,3)) #한 화면에 여러 개의 그래프를 동시에 배치해야 하는 경우에 사용
#par(mfrow = c(nr,nc)) <- nr : 행의 갯수, nr : 열의 갯수
plot(v1,type="o")
plot(v1,type="s")
plot(v1,type="l")
v1 <- c(100,130,120,160,150)
par(mfrow=c(1,3))
pie(v1)
plot(v1,type="o")
barplot(v1)
par(mfrow=c(1,1))
a <- c(1,2,3)
plot(a,xlab = "aaa")
par(mgp=c(0,1,0)) # mgp = c(제목위치,지표값위치,지표선위치) 입니다.
plot(a,xlab="aaa")
par(mgp=c(3,1,0))
plot(a,xlab="aaa")
par(mgp=c(3,2,0))
plot(a,xlab="aaa")
par(mgp=c(3,2,1))
plot(a,xlab="aaa")
par(oma=c(2,1,0,0)) #oma = c() 그래프 전체의 여백 조정하기
plot(a,xlab="aaa")
par(oma=c(0,2,0,0))
plot(a,xlab="aaa")
par(mfrow=c(1,1))
v1 <- c(1,2,3,4,5)
v2 <- c(5,4,3,2,1)
v3 <- c(3,4,5,6,7)
plot(v1,type="s",col="red",ylim=c(1,7))
par(new=T) #이 부분이 중복 허용 부분입니다.
plot(v2,type="o",col="blue",ylim=c(1,7))
par(new=T) #이 부분이 중복 허용 부분입니다.
plot(v3,type="l",col="green",ylim=c(1,7))
par(mfrow=c(1,1))
v1 <- c(1,2,3,4,5)
v2 <- c(5,4,3,2,1)
v3 <- c(3,4,5,6,7)
plot(v1,type="s",col="red",ylim=c(1,10))
lines(v2,type="o",col="blue",ylim=c(1,5))
lines(v3,type="l",col="green",ylim=c(1,15)) #y축 제목이나 값들이 plot()함수를 par(new = T)를 이용해 중첩하는 것보다
#훨씬 깔끔하므로 이 방법을 사용하길 추천
v1 <- c(1,2,3,4,5)
v2 <- c(5,4,3,2,1)
v3 <- c(3,4,5,6,7)
plot(v1, type = "s", col = "red", ylim=c(1,10))
lines(v2, type = "o", col = "blue", ylim=c(1,5))
lines(v3, type = "l", col = "green", ylim=c(1,15))
legend(4,9,c("v1","v2","v3"),cex=0.9,col=c("red","blue","green"),lty=1)
#범례 추가 <- legend(x축위치, y축위치, 내용, cex=글자크기, col=색상, pch=크기, lty=선모양)
x <- 1:5
barplot(x)
x <- 1:5
barplot(x,horiz=T) #옵션 horiz <- TRUE를 지정하면 막대를 옆으로 눕혀서 그립니다.
x <- matrix(c(5,4,3,2), 2,2) #matrix(c(5,4,3,2), 2,2) = matrix(c(5,4,3,2), nrow=2)
x
barplot(x,beside=F) #beside = FALSE인 경우
barplot(x,beside=T) #beside = TRUE인 경우
barplot(x,beside=T,names=c(5,3), col=c("green","yellow")) #beside = TRUE이면서, 막대 이름 및 색상 지정정
barplot(x,beside=T,names=c(5,3), col=c("green","yellow"),
horiz=T) #위 값을 가로로 표현(옵션은 horiz)
par(oma=c(1,0.5,1,0.5)) #하, 좌, 상, 우 여백 지정
barplot(x,horiz=T,names=c(5,3),col=c("green","yellow"),xlim=c(0,10))
par(oma=c(0,0,0,0))
v1 <- c(100,120,140,160,180)
v2 <- c(120,130,150,140,170)
v3 <- c(140,170,120,110,160)
qty <- data.frame(BANANA=v1, CHERRY=v2, ORANGE=v3) #qty <- 각 벡터를 모아서 데이터 프레임으로 만든다.
qty
barplot(as.matrix(qty),main="Fruit's Sales QTY",
beside=T, col=rainbow(nrow(qty)),ylim=c(0,400))
legend(14,400,c("MON","TUE","WED","THU","FRI"),cex=0.8,fill=rainbow(nrow(qty))) #범주
barplot(t(qty),main="Fruit's Sales QTY",ylim=c(0,900), #Transpose = t() 전치 행렬
col=rainbow(length(qty)),space=0.1,cex.axis=0.8,las=1,
names.arg = c("MON","TUE","WED","THU","FRI"), cex=0.8)
legend(0.2,800,names(qty),cex=0.7,fill=rainbow(length(qty))) #범주 <- 위치를 지정하지 않으면 오류
peach <- c(180,200,250,198,170) #peach의 값이 200 이상일 경우 "red", 180~199일 경우 "yellow", 그 이하는 "green"
colors <- c()
for(i in 1:length(peach)){ #length는 배열의 길이를 계산해주는 함수이다.
if(peach[i]>=200){
colors <- c(colors,"red")
} else if(peach[i]>=180){
colors <- c(colors,"yellow")
} else{
colors <- c(colors,"green")
}
}
barplot(peach,main="Peach Sales QTY",
names.arg=c("MON","TUE","WED","THU","FRI"), col=colors)
height <- c(182, 175, 167, 172, 163, 178, 181, 166, 159, 155)
par(mfrow=c(1,2))
barplot(height)
hist(height) #히스토그램 hist() = 특정 데이터의 빈도수(도수)를 막대모양으로 표현한 것
par(mfrow=c(1,1))
p1 <- c(10,20,30,40)
pie(p1,radius=1) #radius = 반지름 / 데이터의 기본은 반시계 방향으로 회전이 기본
#clockwise(T) 시계방향 / clockwise(F) 반시계방향
pie(p1, radius=1, init.angle = 90) #시작되는 지점에서 반시계방향으로 90도 회전
pie(p1, radius=1, init.angle = 90, col=rainbow(length(p1)), #col()로 색깔 지정
label=c("Week1","Week2","Week3","Week4")) #label()로 각각의 이름 지정
pct <- round(p1/sum(p1) * 100,1) #수치 값을 함께 출력, round(대상, 반올림할 자리수)=지정된 자리에서 반올림 하는 함수
lab <- paste(pct," %") #paste(a,b)=두개를 붙여서 하나처럼 만드는 역할
pie(p1,radius=1,init.angle=90,col=rainbow(length(p1)),
label=lab)
legend(1,1.1,c("Week1","Week2","Week3","Week4"),
cex=0.5,fill=rainbow(length(p1)))
pct <- round(p1/sum(p1) * 100,1) #범례를 생략하고 그래프에 바로 출력하기
lab1 <- c("Week1","Week2","Week3","Week4")
lab2 <- paste(lab1, "\n", pct, " %")
pie(p1,radius=1,init.angle=90,col=rainbow(length(p1)),label=lab2)
install.packages("plotrix") #pie3D() 함수를 사용하려면 해당 패키지를 설치해야한다.
library(plotrix)
p1 <- c(10,20,30,40,50)
f_day <- round(p1/sum(p1)*100,1)
f_label <- paste(f_day,"%")
pie3D(p1,main="3D Pie Chart",col=rainbow(length(p1)),
cex=0.5,labels=f_label,explode=0.05) #explode = 각 파이 조각간의 간격을 지정하는 파라미터
legend(0.5,1,c("MON","TUE","WED","THU","FRI"),cex=0.6,
fill=rainbow(length(p1)))
v1 <- c(10,12,15,11,20)
v2 <- c(5,7,15,8,9)
v3 <- c(11,20,15,18,13)
boxplot(v1,v2,v3)
boxplot(v1,v2,v3,col=c("blue","yellow","pink"),
names=c("Blue","Yellow","Pink"),
horizontal=T) #여기에서는 horiz 옵션이 horizontal이다.
|
0237bfd897c6ef456397847bb49d271f3bb0ee46
|
b5dc57aabe210849d34f7fa336c67e4916fcf599
|
/man/rdd_robust_bw.Rd
|
dc26ee165b6e334dfcfbbdd73c7c84464a2870c2
|
[] |
no_license
|
shommazumder/shomR
|
ce9ad2ad591d095baa77ee38377376b79af080b5
|
29431d80e0f601b7186f89185ef95eeafcc60ae1
|
refs/heads/master
| 2020-03-21T09:38:54.136715
| 2018-12-20T19:14:34
| 2018-12-20T19:14:34
| 138,410,300
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 955
|
rd
|
rdd_robust_bw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_utils.R
\name{rdd_robust_bw}
\alias{rdd_robust_bw}
\title{Robustness to bandwidth choice}
\usage{
rdd_robust_bw(data, forcing, outcome, cutoff = 0, weights = NULL,
ylab = "Outcome", xlab = "Running Variable", bw = NULL, se = T)
}
\arguments{
\item{data}{A dataframe}
\item{forcing}{The forcing/running variable as a string}
\item{outcome}{The outcome varaible as a string}
\item{cutoff}{The cutoff. Defaults to 0.}
\item{weights}{Any weights you want to use as a string. Defaults to NULL.}
\item{ylab}{The label for the y axis.}
\item{xlab}{The label for the x axis.}
\item{bw}{The bandwidth. Defaults to half the IK bw to twice the IK bw.}
\item{se}{Logical for whether to include the standard errors in the plot.}
}
\value{
a list of the plot (in ggplot) and the corresponding effect estimates and standard errors
}
\description{
Robustness to bandwidth choice
}
|
999bb4202a5b600de1efdd69b7569198fc434781
|
5df4825b51b52eec94fcd80fb5fea637f31e6e16
|
/tests/testthat/test-vignette-numbers.R
|
f56ca1bb1f4c67c94b991618e2ae918b891134ae
|
[
"MIT"
] |
permissive
|
tidyverse/tibble
|
4963bbc2f50ed2be854a68521bdc841700605c69
|
4de5c153ca5411fe2a02804a8cafb0edd9c664dc
|
refs/heads/main
| 2023-09-02T03:16:56.769783
| 2023-05-28T02:09:45
| 2023-05-28T02:09:45
| 45,148,983
| 578
| 144
|
NOASSERTION
| 2023-05-27T07:18:08
| 2015-10-28T23:57:00
|
R
|
UTF-8
|
R
| false
| false
| 60
|
r
|
test-vignette-numbers.R
|
test_that("numbers vignette", {
test_galley("numbers")
})
|
6d27b6de317e7880ae999f005705119ac2d05062
|
0c45b9e10dcbc6401e661056972d4d7315c68e11
|
/branches/micEconIndexEKS/data-raw/generateData.R
|
eccea7ffeba53c6e965357ec102d41b33e189e1f
|
[] |
no_license
|
scfmolina/micecon
|
f102528c1f306f0d3906438de97ccca0b55b6f6e
|
a61aae42d5c5bfdd4d0e34c4f44171fbabcff148
|
refs/heads/master
| 2023-02-10T07:24:28.365680
| 2021-01-07T13:10:32
| 2021-01-07T13:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,417
|
r
|
generateData.R
|
# Run this part:
library(devtools)
set.seed(100)
nProd <- 10
nFirms <- 100
# Max: 26
prodNames <- letters[1:nProd]
outputPriceMat <- matrix(runif(nProd * nFirms), ncol = nProd)
colnames(outputPriceMat) <- paste0("P", prodNames)
outputQuantMat <- matrix(runif(nProd * nFirms), ncol = nProd)
colnames(outputQuantMat) <- paste0("Q", prodNames)
outputPriceMat <- round(outputPriceMat + 1, 5)
outputQuantMat <- round(outputQuantMat + 1, 5)
# write.csv(as.data.frame(cbind(outputPriceMat, outputQuantMat) ),
# #file = "~/tests/testthat/priceQuantMat.txt",
# file = "~/svn/micEcon/pkg/micEconIndex/tests/testthat/priceQuantMat.txt",
# row.names = FALSE)
priceQuantMat <- as.data.frame(cbind(outputPriceMat, outputQuantMat) )
# devtools::use_data(, internal = TRUE, overwrite = TRUE)
# Then pause and run TFPIP/calcTFPIP.R according to instructions there
# And then run the below:
load("data-raw/TFPIPcheck.Rdata", verbose = TRUE)
devtools::use_data(TFPIPresult, priceQuantMat, internal = TRUE, overwrite = TRUE)
#quantityIndex( paste0("P", prodNames),
# paste0("Q", prodNames), 1,
# as.data.frame(cbind(outputPriceMat, outputQuantMat) ))
#eg1-dta.txt DATA FILE NAME
#eg1-out.txt OUTPUT FILE NAME
#5 NUMBER OF OBSERVATIONS
#2 NUMBER OF OUTPUTS
#3 NUMBER OF INPUTS
#0 0=TORNQVIST AND 1=FISHER
#0 0=NON-TRANSITIVE AND 1=TRANSITIVE
|
5dbb63da54e51efd32313fa7c90b2648801ec197
|
74b185b24f88e23f3627f64dcce8b6ed660b8beb
|
/src/functions/HistoricalDataFunctions.R
|
06c6819550720ef59ce77c40d310e74f7ebcba36
|
[] |
no_license
|
CD-LINKS/factsheet
|
6765752288e0a58a52348542451d67afb13ec6a1
|
e12d3bf39d33a226fd291b67b64c9e22ff3cca88
|
refs/heads/master
| 2021-12-03T18:09:04.978258
| 2021-11-17T14:48:30
| 2021-11-17T14:48:30
| 57,221,386
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,637
|
r
|
HistoricalDataFunctions.R
|
CreateEmptyIEA_energy <- function(IEA_energy, regions_iea, iea_flows, iea_products)
{
# region, FLOW, PRODUCT, period
periods <- sort(unique(IEA_energy$period))
#iea_tfc_flows <- c("TFC", "TOTIND", "TOTTRANS", "RESIDENT", "COMMPUB",
# "ELMAINE","ELAUTOE", "ELMAINC", "ELAUTOC",
# "HEMAINH", "HEMAINC", "HEAUTOH", "HEAUTOC")
#iea_products <- c("GEOTHERM", "SOLARPV", "SOLARTH", "WIND", "OTHER",
# "PRIMSBIO", "CHARCOAL", "BIOGASES", "BIODIESEL", "BIOGASOL", "BIOJETKERO", "OBIOLIQ", "INDWASTE", "MUNWASTEN", "MUNWASTER",
# "NUCLEAR", "HYDRO",
# "ELECTR", "HEAT",
# "TOTAL")
iea_full_products <- unique(IEA_energy$PRODUCT)
iea_full_flows <- unique(IEA_energy$FLOW)
dim_tfc <- length(regions_iea)*length(iea_flows)*length(iea_products)*length(periods)
period <- rep(periods, length.out=dim_tfc)
PRODUCT <- rep(iea_products, each=length(periods), length.out=dim_tfc)
FLOW <- rep(iea_flows, each=length(periods)*length(iea_products), length.out=dim_tfc)
region <- rep(regions_iea, each=length(periods)*length(iea_products)*length(iea_flows), length.out=dim_tfc)
IEA_empty <- cbind(region, FLOW) %>% cbind(PRODUCT) %>% cbind(period)
IEA_empty <- as.data.frame(IEA_empty)
#IEA_empty$period <- as.numeric(IEA_empty$period)
IEA_empty$period <- as.integer(trimws(IEA_empty$period))
IEA_empty$PRODUCT <- factor(PRODUCT, levels=iea_full_products)
IEA_empty$FLOW <- factor(FLOW, levels=iea_full_flows)
IEA_empty$region <- factor(IEA_empty$region, levels=unique(IEA_energy$region))
# add other columns, same as IEA_energy
# region, FLOW, PRODUCT, period, value, unit, ISO3, IMAGE, TIMER_CARRIER
IEA_empty <- mutate(IEA_empty, value=0) %>% mutate(unit="TJ")
IEA_empty$unit <- factor(IEA_empty$unit)
IEA_empty <- select(IEA_empty, region, FLOW, PRODUCT, period, value, unit)
IEA_empty <- as.data.frame(IEA_empty)
}
ConvertPRIMAP2IAM <- function(data, CATEGORY="CAT0", ENTITY="KYOTOGHGAR4", variable="Emissions|Kyoto Gases")
{
data <- filter(data, category%in%CATEGORY, entity%in%ENTITY)
data <- select(data, scenario, region, category, entity, unit, num_range("X", 1990:2015))
colnames(data) = gsub("X", "", colnames(data))
data <- gather(data, 6:ncol(data), key="period", value=value)
data$value <- data$value/1000
data$unit <- "Mt"
data <- as.data.frame(data)
data <- group_by(data, region, entity, unit, period) %>%
summarise(value=sum(value))
data <- mutate(data, scenario="Historical") %>% mutate(Category="Historical") %>% mutate(Baseline="") %>% mutate(model="History") %>% mutate(Scope="") %>%
mutate(variable=variable)
data <- ungroup(data)
data$period <- as.numeric(as.character(data$period))
data <- select(data, scenario, Category, Baseline, model, region, period, Scope, value, unit, variable)
data <- as.data.frame(data)
}
ConvertIEA2IAM <- function(data, flow="TFC", product="TOTAL", variable="Final Energy")
{
data <- as.data.frame(data)
data <- filter(data, FLOW%in%flow, PRODUCT%in%product) %>%
group_by(region, period, unit) %>%
summarise(value=sum(value)) %>%
mutate(scenario="Historical") %>% mutate(Category="Historical") %>% mutate(Baseline="") %>% mutate(model="History") %>% mutate(Scope="") %>%
mutate(variable=variable) %>%
select(scenario, Category, Baseline, model, region, period, Scope, value, unit, variable)
# convert TJ/yr to EJ/yr
data$value <- 10^-6*data$value
data$unit <- "EJ/yr"
data <- as.data.frame(data)
}
|
9f5e256cbfbc9d4524af06a200896162495bdebd
|
520ee4fc8b70b11576f45a3822bba505bd453aa6
|
/fastq_pooled/REC8/peaks/PeakRanger1.18/ranger/MYC_Rep2_input_p0.05_q0.05/REC8_MYC_Rep1_peak_profiles/motifs/weeder2_bg_armranLoc_200bpseq/distribution/matchPWM_genome/matchPWM_genome.R
|
8fb46ffb30e93cf19cf9a202e6ebfddf4eed65bf
|
[] |
no_license
|
ajtock/170920_Chris_ChIP_REC8_histone
|
a749ccd1178c27749e1d305c197b71dfe4f13c15
|
4b28f3d184b5c32cc8a17a0f32ad0ea51b906c66
|
refs/heads/master
| 2020-06-19T04:43:10.322900
| 2019-07-12T11:55:59
| 2019-07-12T11:55:59
| 196,566,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,514
|
r
|
matchPWM_genome.R
|
# Identify genomic loci that match motifs enriched at REC8 peaks
# Profile REC8 and nucleosomes at these loci
# Profile base composition (proportion and -log10(probability)) in regions flanking these loci
library(Biostrings)
library(BSgenome.Athaliana.TAIR.TAIR9)
library(segmentSeq)
library(regioneR)
#library(zoo)
library(TTR)
motifDir <- "/home/meiosis/ajt200/analysis/170920_Chris_ChIP_REC8_histone/fastq_pooled/REC8/peaks/PeakRanger1.18/ranger/MYC_Rep2_input_p0.05_q0.05/REC8_MYC_Rep1_peak_profiles/motifs/weeder2_bg_armranLoc_200bpseq/"
outDir <- "/home/meiosis/ajt200/analysis/170920_Chris_ChIP_REC8_histone/fastq_pooled/REC8/peaks/PeakRanger1.18/ranger/MYC_Rep2_input_p0.05_q0.05/REC8_MYC_Rep1_peak_profiles/motifs/weeder2_bg_armranLoc_200bpseq/distribution/matchPWM_genome/"
plotDir <- "/home/meiosis/ajt200/analysis/170920_Chris_ChIP_REC8_histone/fastq_pooled/REC8/peaks/PeakRanger1.18/ranger/MYC_Rep2_input_p0.05_q0.05/REC8_MYC_Rep1_peak_profiles/motifs/weeder2_bg_armranLoc_200bpseq/distribution/matchPWM_genome/plots/"
chrs <- c("Chr1","Chr2","Chr3","Chr4","Chr5")
chrStart <- c(1, 1, 1, 1, 1)
chrLens <- c(30427671, 19698289, 23459830, 18585056, 26975502)
centromeres <- c(15086045, 3607929, 13587786, 3956021, 11725024)
pericenStart <- c(11330001, 990001, 10200001, 990001, 8890001)
pericenEnd <- c(18480000, 7540000, 16860000, 6850000, 15650000)
genome <- toGRanges(data.frame(chrs, chrStart, chrLens))
seqlevels(genome) <- sub("Chr", "", seqlevels(genome))
mask <- toGRanges(data.frame(chrs, pericenStart, pericenEnd))
seqlevels(mask) <- sub("Chr", "", seqlevels(mask))
chr1 <- Athaliana$Chr1
chr2 <- Athaliana$Chr2
chr3 <- Athaliana$Chr3
chr4 <- Athaliana$Chr4
chr5 <- Athaliana$Chr5
chr_list <- list()
chr_list[[1]] <- chr1
chr_list[[2]] <- chr2
chr_list[[3]] <- chr3
chr_list[[4]] <- chr4
chr_list[[5]] <- chr5
num_pwm <- as.numeric(system(paste0("ls -1 ", motifDir, "MAT*.pwm | wc -l"), intern = T))
pwm_list <- list()
for(i in 1:num_pwm) {
pwm_list[[i]] <- as.matrix(read.table(file = system(paste0("ls ", motifDir, "MAT", i, "_*.pwm"), intern = T), skip = 1, row.names = 1))
}
mclapply(seq_along(pwm_list), function(x) {
motif.GRanges <- GRanges()
for(i in 1:5) {
print(i)
match.pwm_list <- matchPWM(pwm_list[[x]], chr_list[[i]], min.score = "87.5%")
motif.ranges <- match.pwm_list@ranges
motif.GRanges.chr <- GRanges(seqnames = i, ranges = motif.ranges, strand = "*")
motif.GRanges <- append(motif.GRanges, motif.GRanges.chr)
}
save(motif.GRanges, file = paste0(outDir, "motif", x, "_matchPWM_GRanges.RData"))
}, mc.cores = length(pwm_list))
library(doParallel)
# Change number of cores to reflect number of samples you want to process simultaneously
registerDoParallel(cores = length(pwm_list))
print("Currently registered parallel backend name, version and cores")
print(getDoParName())
print(getDoParVersion())
print(getDoParWorkers())
foreach(x = 1:num_pwm) %dopar% {
print(x)
load(file = paste0(outDir, "motif", x, "_matchPWM_GRanges.RData"))
tmp <- DNAStringSet()
for(h in 1:5) {
tmp.chr <- DNAStringSet()
# Obtain sequences for each motif match and flanking 20-bp regions as DNAStringSet object
for(i in 1:length(motif.GRanges[seqnames(motif.GRanges) == h])) {
tmp.chr <- c(tmp.chr, DNAStringSet(chr_list[[h]][(start(ranges(motif.GRanges)[seqnames(motif.GRanges) == h][i])-20):(end(ranges(motif.GRanges)[seqnames(motif.GRanges) == h][i])+20)]))
}
tmp <- c(tmp, tmp.chr)
}
# Generate position frequency matrix (PFM)
pfm <- consensusMatrix(tmp)
# Convert frequencies to proportions and retain rows 1:4
prm <- prop.table(pfm, 2)[1:4,]
# Re-order rows for stack barplot representation
prm_AGTC <- rbind(prm[1,], prm[3,], prm[4,], prm[2,])
rownames(prm_AGTC) <- c("A", "G", "T", "C")
pdf(paste0(plotDir, "REC8_MYC_Rep1_peak_motif", x, "_", paste0(strsplit(consensusString(tmp), split = "")[[1]][21:(21+mean(width(motif.GRanges))-1)], collapse = ""), "_base_proportions.pdf"))
par(mgp = c(2, 1, 0))
barplot(prm_AGTC,
col = c("green", "yellow", "red", "blue"),
xlab = paste0("Position within REC8-MYC Rep1 peak motif", x, "_", paste0(strsplit(consensusString(tmp), split = "")[[1]][21:(21+mean(width(motif.GRanges))-1)], collapse = ""), " matches and 20-bp flanks"),
ylab = "Proportion",
legend.text = TRUE,
args.legend = list(
x = ncol(prm_AGTC) + 16,
y = 0.2,
bty = "n"
)
)
dev.off()
}
|
4ba9aed3ad6de0db979e6229705562f202b4e8f1
|
2d9b846c51e54b8d7ba85b37a071dcb64c95f4e9
|
/R/produto5.R
|
9226dac6e749a0a18e715633073f908f4a3d9f92
|
[] |
no_license
|
mikael83/git-produto5-OPAS
|
10b34500341d8652282f4d69db1b97dce8ea77cd
|
6d6ecc00a176dd6051cb303598f1c2904a442b59
|
refs/heads/master
| 2022-04-17T10:33:22.598271
| 2020-04-14T05:55:51
| 2020-04-14T05:55:51
| 255,521,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 180,401
|
r
|
produto5.R
|
############ Produto V ##########################
##################################################
###### script desenvolvido por Mikael Lemos ######
###### versão 1.0 - 23.03.2020 ##################
##################################################
######
### Carregando / instalando pacotes
######
#install.packages('dplyr')
library('dplyr')
#install.packages("tidyr")
library('tidyr')
#install.packages("data.table")
library('data.table')
#install.packages('stringr')
library('stringr')
#install.packages('Amelia')
library('Amelia')
# install.packages("tidyverse")
library(tidyverse)
# install.packages("lubridate")
library(lubridate)
# install.packages("ggplot2")
library(ggplot2)
#library(xlsx)
#install.packages("rJava")
library(rJava)
#install.packages("read.dbc")
library(read.dbc)
#install.packages("forcats")
#library(forcats)
library("foreign")
#install.packages("foreign")
#install.packages("openxlsx")
library("openxlsx")
library(RColorBrewer)
##########################
##########################
###############################
#### Cobertura Vacinal ########
###############################
### Dados PNI
# 2016, 2017, 2018, 2019
pni_2016 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/A110506189_6_37_15.xlsx")
pni_2017 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/A110437189_6_37_15.xlsx")
pni_2018 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/A110040189_6_37_15.xlsx")
pni_2019 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/A110410189_6_37_15.xlsx")
### organizando tabelas
pni_2016 <- select(pni_2016,"Unidade.da.Federação", "099.Hepatite.B.em.crianças.até.30.dias", "073.Hepatite.B")
pni_2017 <- select(pni_2017,"Unidade.da.Federação", "099.Hepatite.B.em.crianças.até.30.dias", "073.Hepatite.B")
pni_2018 <- select(pni_2018,"Unidade.da.Federação", "099.Hepatite.B.em.crianças.até.30.dias", "073.Hepatite.B")
pni_2019 <- select(pni_2019,"Unidade.da.Federação", "099.Hepatite.B.em.crianças.até.30.dias", "073.Hepatite.B")
## chr para num
#2016
pni_2016$`099.Hepatite.B.em.crianças.até.30.dias` <- as.numeric(pni_2016$`099.Hepatite.B.em.crianças.até.30.dias`)
pni_2016$`073.Hepatite.B` <- as.numeric(pni_2016$`073.Hepatite.B`)
#2017
pni_2017$`099.Hepatite.B.em.crianças.até.30.dias` <- as.numeric(pni_2017$`099.Hepatite.B.em.crianças.até.30.dias`)
pni_2017$`073.Hepatite.B` <- as.numeric(pni_2017$`073.Hepatite.B`)
#2018
pni_2018$`099.Hepatite.B.em.crianças.até.30.dias` <- as.numeric(pni_2018$`099.Hepatite.B.em.crianças.até.30.dias`)
pni_2018$`073.Hepatite.B` <- as.numeric(pni_2018$`073.Hepatite.B`)
#2019
pni_2019$`099.Hepatite.B.em.crianças.até.30.dias` <- as.numeric(pni_2019$`099.Hepatite.B.em.crianças.até.30.dias`)
pni_2019$`073.Hepatite.B` <- as.numeric(pni_2019$`073.Hepatite.B`)
## organizando tabelas
pni_2016 <- select(pni_2016, "UF" = "Unidade.da.Federação" , "Hepatite B em crianças até 30 dias" = "099.Hepatite.B.em.crianças.até.30.dias", "Hepatite B" = "073.Hepatite.B")
pni_2017 <- select(pni_2017, "UF" = "Unidade.da.Federação" , "Hepatite B em crianças até 30 dias" = "099.Hepatite.B.em.crianças.até.30.dias", "Hepatite B" = "073.Hepatite.B")
pni_2018 <- select(pni_2018, "UF" = "Unidade.da.Federação" , "Hepatite B em crianças até 30 dias" = "099.Hepatite.B.em.crianças.até.30.dias", "Hepatite B" = "073.Hepatite.B")
pni_2019 <- select(pni_2019, "UF" = "Unidade.da.Federação" , "Hepatite B em crianças até 30 dias" = "099.Hepatite.B.em.crianças.até.30.dias", "Hepatite B" = "073.Hepatite.B")
## Salvar tabelas xlsx
write.xlsx(pni_2016, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2016.xlsx")
pni_2016_qgis <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2016_qgis.xlsx")
pni_2016_R <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2016_R.xlsx")
pni_2016_R2 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2016_R2.xlsx")
write.csv(pni_2016_qgis, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2016_qgis.csv", fileEncoding = "UTF-8")
write.xlsx(pni_2017, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2017.xlsx")
pni_2017_qgis <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2017_qgis.xlsx")
pni_2017_R <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2017_R.xlsx")
pni_2017_R2 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2017_R2.xlsx")
write.csv(pni_2017_qgis, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2017_qgis.csv", fileEncoding = "UTF-8")
write.xlsx(pni_2018, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2018.xlsx")
pni_2018_qgis <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2018_qgis.xlsx")
pni_2018_R <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2018_R.xlsx")
pni_2018_R2 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2018_R2.xlsx")
write.csv(pni_2018_qgis, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2018_qgis.csv", fileEncoding = "UTF-8")
write.xlsx(pni_2019, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2019.xlsx")
pni_2019_qgis <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2019_qgis.xlsx")
pni_2019_R <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2019_R.xlsx")
pni_2019_R2 <- read.xlsx("/Users/lemos/OneDrive/Documentos/produtoV/pni_2019_R2.xlsx")
write.csv(pni_2019_qgis, "/Users/lemos/OneDrive/Documentos/produtoV/pni_2019_qgis.csv", fileEncoding = "UTF-8")
#### Plots PNI
## 2016
################################################
ggplot(data=pni_2016_R , aes(x=reorder(UF, -Hepatite.B.em.crianças.até.30.dias), y=Hepatite.B.em.crianças.até.30.dias )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B.em.crianças.até.30.dias), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2016_R$UF, "Minas Gerais")))
##################################################
################################################
ggplot(data=pni_2016_R , aes(x=reorder(UF, -Hepatite.B), y=Hepatite.B )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2016_R$UF, "Mato Grosso do Sul")))
##################################################
## 2017
################################################
ggplot(data=pni_2017_R , aes(x=reorder(UF, -Hepatite.B.em.crianças.até.30.dias), y=Hepatite.B.em.crianças.até.30.dias )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B.em.crianças.até.30.dias), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2017_R$UF, "Paraná")))
##################################################
################################################
ggplot(data=pni_2017_R , aes(x=reorder(UF, -Hepatite.B), y=Hepatite.B )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2017_R$UF, "Pará")))
##################################################
## 2018
################################################
ggplot(data=pni_2018_R , aes(x=reorder(UF, -Hepatite.B.em.crianças.até.30.dias), y=Hepatite.B.em.crianças.até.30.dias )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B.em.crianças.até.30.dias), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2018_R$UF, "Rio Grande do Norte")))
##################################################
################################################
ggplot(data=pni_2018_R , aes(x=reorder(UF, -Hepatite.B), y=Hepatite.B )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2018_R$UF, "Minas Gerais")))
##################################################
## 2019
################################################
ggplot(data=pni_2019_R , aes(x=reorder(UF, -Hepatite.B.em.crianças.até.30.dias), y=Hepatite.B.em.crianças.até.30.dias )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B.em.crianças.até.30.dias), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2019_R$UF, "Rio Grande do Norte")))
##################################################
################################################
ggplot(data=pni_2019_R , aes(x=reorder(UF, -Hepatite.B), y=Hepatite.B )) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Hepatite.B), position = position_stack(vjust = 1.05), size=3.5)+
theme_minimal() + labs(x="UF", y = "Cobertura Vacinal (%)") + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2017_R$UF, "Pará")))
##################################################
##### Função para destacar um elemento do eixo X ##########
colorado <- function(src, boulder) {
if (!is.factor(src)) src <- factor(src) # make sure it's a factor
src_levels <- levels(src) # retrieve the levels in their order
brave <- boulder %in% src_levels # make sure everything we want to make bold is actually in the factor levels
if (all(brave)) { # if so
b_pos <- purrr::map_int(boulder, ~which(.==src_levels)) # then find out where they are
b_vec <- rep("plain", length(src_levels)) # make'm all plain first
b_vec[b_pos] <- "bold" # make our targets bold
b_vec # return the new vector
} else {
stop("All elements of 'boulder' must be in src")
}
}
#############################################################
pni_2016_R2$ano <- "2016"
pni_2017_R2$ano <- "2017"
pni_2018_R2$ano <- "2018"
pni_2019_R2$ano <- "2019"
sc_pni <- do.call("rbind", list(pni_2016_R2, pni_2017_R2, pni_2018_R2, pni_2019_R2))
############################################################
ggplot(data = sc_pni, aes(x =UF,
y = Hepatite.B.em.crianças.até.30.dias,
group=ano,
color=ano )) +
geom_line() +
geom_point() +
labs(
y="Cobertura Vacinal (%)",
x="Unidade Federativa"
) + geom_text(aes(label=Hepatite.B.em.crianças.até.30.dias),hjust=0, vjust=0, check_overlap = TRUE, size = 3) + theme_minimal() + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(pni_2016_R2$UF, "Ceará")))
##############################################################
####################
#### Tratamento ####
####################
## 2019
AC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1901.dbc")
AC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1902.dbc")
AC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1903.dbc")
AC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1904.dbc")
AC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1905.dbc")
AC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1906.dbc")
AC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1907.dbc")
AC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1908.dbc")
AC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1909.dbc")
AC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1910.dbc")
AC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1911.dbc")
AC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1912.dbc")
AL_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1901.dbc")
AL_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1902.dbc")
AL_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1903.dbc")
AL_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1904.dbc")
AL_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1905.dbc")
AL_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1906.dbc")
AL_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1908.dbc")
AL_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1909.dbc")
AL_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1910.dbc")
AL_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1911.dbc")
AL_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1912.dbc")
AM_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1901.dbc")
AM_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1902.dbc")
AM_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1903.dbc")
AM_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1904.dbc")
AM_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1905.dbc")
AM_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1906.dbc")
AM_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1907.dbc")
AM_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1908.dbc")
AM_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1909.dbc")
AM_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1910.dbc")
AM_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1911.dbc")
AM_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1912.dbc")
AP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1901.dbc")
AP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1902.dbc")
AP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1903.dbc")
AP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1904.dbc")
AP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1905.dbc")
AP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1906.dbc")
AP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1907.dbc")
AP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1908.dbc")
AP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1909.dbc")
AP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1910.dbc")
AP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1911.dbc")
AP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1912.dbc")
BA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1901.dbc")
BA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1902.dbc")
BA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1903.dbc")
BA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1904.dbc")
BA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1905.dbc")
BA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1906.dbc")
BA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1907.dbc")
BA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1908.dbc")
BA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1909.dbc")
BA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1910.dbc")
BA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1911.dbc")
BA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1912.dbc")
CE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1901.dbc")
CE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1902.dbc")
CE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1903.dbc")
CE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1904.dbc")
CE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1905.dbc")
CE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1906.dbc")
CE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1907.dbc")
CE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1908.dbc")
CE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1909.dbc")
CE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1910.dbc")
CE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1911.dbc")
CE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1912.dbc")
DF_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1901.dbc")
DF_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1902.dbc")
DF_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1903.dbc")
DF_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1904.dbc")
DF_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1905.dbc")
DF_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1906.dbc")
DF_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1907.dbc")
DF_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1908.dbc")
DF_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1909.dbc")
DF_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1910.dbc")
DF_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1911.dbc")
DF_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1912.dbc")
ES_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1901.dbc")
ES_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1902.dbc")
ES_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1903.dbc")
ES_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1904.dbc")
ES_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1905.dbc")
ES_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1906.dbc")
ES_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1907.dbc")
ES_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1908.dbc")
ES_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1909.dbc")
ES_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1910.dbc")
ES_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1911.dbc")
ES_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1912.dbc")
GO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1901.dbc")
GO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1902.dbc")
GO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1903.dbc")
GO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1904.dbc")
GO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1905.dbc")
GO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1906.dbc")
GO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1907.dbc")
GO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1908.dbc")
GO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1909.dbc")
GO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1910.dbc")
GO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1911.dbc")
GO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1912.dbc")
MA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1901.dbc")
MA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1902.dbc")
MA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1903.dbc")
MA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1904.dbc")
MA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1905.dbc")
MA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1906.dbc")
MA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1907.dbc")
MA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1908.dbc")
MA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1909.dbc")
MA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1910.dbc")
MA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1911.dbc")
MA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1912.dbc")
MG_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1901.dbc")
MG_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1902.dbc")
MG_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1903.dbc")
MG_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1904.dbc")
MG_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1905.dbc")
MG_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1906.dbc")
MG_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1907.dbc")
MG_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1908.dbc")
MG_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1909.dbc")
MG_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1910.dbc")
MG_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1911.dbc")
MG_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1912.dbc")
MS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1901.dbc")
MS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1902.dbc")
MS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1903.dbc")
MS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1904.dbc")
MS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1905.dbc")
MS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1906.dbc")
MS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1907.dbc")
MS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1908.dbc")
MS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1909.dbc")
MS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1910.dbc")
MS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1911.dbc")
MS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1912.dbc")
MT_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1901.dbc")
MT_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1902.dbc")
MT_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1903.dbc")
MT_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1904.dbc")
MT_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1905.dbc")
MT_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1906.dbc")
MT_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1907.dbc")
MT_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1908.dbc")
MT_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1909.dbc")
MT_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1910.dbc")
MT_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1911.dbc")
MT_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1912.dbc")
PA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1901.dbc")
PA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1902.dbc")
PA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1903.dbc")
PA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1905.dbc")
PA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1906.dbc")
PA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1907.dbc")
PA_APAC_med_08 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1908.dbc")
PA_APAC_med_09 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1909.dbc")
PA_APAC_med_10 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1910.dbc")
PA_APAC_med_11 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1911.dbc")
PA_APAC_med_12 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1912.dbc")
PB_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1901.dbc")
PB_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1902.dbc")
PB_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1903.dbc")
PB_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1904.dbc")
PB_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1905.dbc")
PB_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1906.dbc")
PB_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1907.dbc")
PB_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1908.dbc")
PB_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1909.dbc")
PB_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1910.dbc")
PB_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1911.dbc")
PB_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1912.dbc")
PE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1901.dbc")
PE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1902.dbc")
PE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1903.dbc")
PE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1904.dbc")
PE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1905.dbc")
PE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1906.dbc")
PE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1907.dbc")
PE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1908.dbc")
PE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1909.dbc")
PE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1910.dbc")
PE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1911.dbc")
PE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1912.dbc")
PI_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1901.dbc")
PI_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1902.dbc")
PI_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1903.dbc")
PI_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1904.dbc")
PI_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1905.dbc")
PI_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1906.dbc")
PI_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1907.dbc")
PI_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1908.dbc")
PI_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1909.dbc")
PI_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1910.dbc")
PI_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1911.dbc")
PI_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1912.dbc")
###################
######
###################
AC_APAC_med <- do.call("rbind", list(AC_APAC_med_01, AC_APAC_med_02, AC_APAC_med_03, AC_APAC_med_04, AC_APAC_med_05,AC_APAC_med_06, AC_APAC_med_07, AC_APAC_med_08, AC_APAC_med_09, AC_APAC_med_10, AC_APAC_med_11, AC_APAC_med_12))
AC_APAC_med$UF <- "AC"
AL_APAL_med <- do.call("rbind", list(AL_APAC_med_01, AL_APAC_med_02, AL_APAC_med_03, AL_APAC_med_04, AL_APAC_med_05,AL_APAC_med_06, AL_APAC_med_08,AL_APAC_med_09, AL_APAC_med_10, AL_APAC_med_11 , AL_APAC_med_12))
AL_APAL_med$UF <- "AL"
AM_APAL_med <- do.call("rbind", list(AM_APAC_med_01, AM_APAC_med_02, AM_APAC_med_03, AM_APAC_med_04, AM_APAC_med_05,AM_APAC_med_06, AM_APAC_med_07, AM_APAC_med_08, AM_APAC_med_09, AM_APAC_med_10, AM_APAC_med_11, AM_APAC_med_12))
AM_APAL_med$UF <- "AM"
AP_APAL_med <- do.call("rbind", list(AP_APAC_med_01, AP_APAC_med_02, AP_APAC_med_03, AP_APAC_med_04, AP_APAC_med_05,AP_APAC_med_06, AP_APAC_med_07, AP_APAC_med_08, AP_APAC_med_09, AP_APAC_med_10, AP_APAC_med_11, AP_APAC_med_12))
AP_APAL_med$UF <- "AP"
BA_APAL_med <- do.call("rbind", list(BA_APAC_med_01, BA_APAC_med_02, BA_APAC_med_03, BA_APAC_med_04, BA_APAC_med_05,BA_APAC_med_06, BA_APAC_med_07, BA_APAC_med_08,BA_APAC_med_09, BA_APAC_med_10, BA_APAC_med_11, BA_APAC_med_12 ))
BA_APAL_med$UF <- "BA"
CE_APAL_med <- do.call("rbind", list(CE_APAC_med_01, CE_APAC_med_02, CE_APAC_med_03, CE_APAC_med_04, CE_APAC_med_05,CE_APAC_med_06, CE_APAC_med_07, CE_APAC_med_08, CE_APAC_med_09, CE_APAC_med_10, CE_APAC_med_11, CE_APAC_med_12))
CE_APAL_med$UF <- "CE"
DF_APAL_med <- do.call("rbind", list(DF_APAC_med_01, DF_APAC_med_02, DF_APAC_med_03, DF_APAC_med_04, DF_APAC_med_05,DF_APAC_med_06, DF_APAC_med_07, DF_APAC_med_08, DF_APAC_med_09, DF_APAC_med_10, DF_APAC_med_11, DF_APAC_med_12))
DF_APAL_med$UF <- "DF"
ES_APAL_med <- do.call("rbind", list(ES_APAC_med_01, ES_APAC_med_02, ES_APAC_med_03, ES_APAC_med_04, ES_APAC_med_05,ES_APAC_med_06, ES_APAC_med_07, ES_APAC_med_08, ES_APAC_med_09, ES_APAC_med_10, ES_APAC_med_11, ES_APAC_med_12))
ES_APAL_med$UF <- "ES"
GO_APAL_med <- do.call("rbind", list(GO_APAC_med_01, GO_APAC_med_02, GO_APAC_med_03, GO_APAC_med_04, GO_APAC_med_05,GO_APAC_med_06, GO_APAC_med_07, GO_APAC_med_08, GO_APAC_med_09, GO_APAC_med_10, GO_APAC_med_11, GO_APAC_med_12))
GO_APAL_med$UF <- "GO"
MA_APAL_med <- do.call("rbind", list(MA_APAC_med_01, MA_APAC_med_02, MA_APAC_med_03, MA_APAC_med_04, MA_APAC_med_05,MA_APAC_med_06, MA_APAC_med_07, MA_APAC_med_08, MA_APAC_med_09, MA_APAC_med_10, MA_APAC_med_11, MA_APAC_med_12))
MA_APAL_med$UF <- "MA"
MG_APAL_med <- do.call("rbind", list(MG_APAC_med_01, MG_APAC_med_02, MG_APAC_med_03, MG_APAC_med_04, MG_APAC_med_05,MG_APAC_med_06, MG_APAC_med_07, MG_APAC_med_08, MG_APAC_med_09, MG_APAC_med_10, MG_APAC_med_11, MG_APAC_med_12))
MG_APAL_med$UF <- "MG"
MS_APAL_med <- do.call("rbind", list(MS_APAC_med_01, MS_APAC_med_02, MS_APAC_med_03, MS_APAC_med_04, MS_APAC_med_05,MS_APAC_med_06, MS_APAC_med_07, MS_APAC_med_08, MS_APAC_med_09, MS_APAC_med_10, MS_APAC_med_11, MS_APAC_med_12))
MS_APAL_med$UF <- "MS"
MT_APAL_med <- do.call("rbind", list(MT_APAC_med_01, MT_APAC_med_02, MT_APAC_med_03, MT_APAC_med_04, MT_APAC_med_05,MT_APAC_med_06, MT_APAC_med_07, MT_APAC_med_08, MT_APAC_med_09, MT_APAC_med_10, MT_APAC_med_11, MT_APAC_med_12))
MT_APAL_med$UF <- "MT"
PA_APAL_med <- do.call("rbind", list(PA_APAC_med_01, PA_APAC_med_02, PA_APAC_med_03, PA_APAC_med_05,PA_APAC_med_06, PA_APAC_med_07, PA_APAC_med_08, PA_APAC_med_09, PA_APAC_med_10, PA_APAC_med_11, PA_APAC_med_12))
PA_APAL_med$UF <- "PA"
PB_APAL_med <- do.call("rbind", list(PB_APAC_med_01, PB_APAC_med_02, PB_APAC_med_03, PB_APAC_med_04, PB_APAC_med_05,PB_APAC_med_06, PB_APAC_med_07, PB_APAC_med_08, PB_APAC_med_09, PB_APAC_med_10, PB_APAC_med_11, PB_APAC_med_12))
PB_APAL_med$UF <- "PB"
PE_APAL_med <- do.call("rbind", list(PE_APAC_med_01, PE_APAC_med_02, PE_APAC_med_03, PE_APAC_med_04, PE_APAC_med_05,PE_APAC_med_06, PE_APAC_med_07,PE_APAC_med_08, PE_APAC_med_09, PE_APAC_med_10, PE_APAC_med_11, PE_APAC_med_12))
PE_APAL_med$UF <- "PE"
PI_APAL_med <- do.call("rbind", list(PI_APAC_med_01, PI_APAC_med_02, PI_APAC_med_03, PI_APAC_med_04, PI_APAC_med_05,PI_APAC_med_06, PI_APAC_med_07, PI_APAC_med_08, PI_APAC_med_09, PI_APAC_med_10, PI_APAC_med_11, PI_APAC_med_12))
PI_APAL_med$UF <- "PI"
BR_19_01_trat <- do.call("rbind", list(AC_APAC_med, AP_APAL_med, AM_APAL_med, AL_APAL_med, BA_APAL_med, CE_APAL_med, DF_APAL_med, ES_APAL_med, GO_APAL_med, MA_APAL_med, MG_APAL_med, MS_APAL_med, MT_APAL_med, PA_APAL_med, PB_APAL_med, PE_APAL_med, PI_APAL_med))
BR_19_01_trat_CID_hepb <- filter(BR_19_01_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_19_01_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_01_trat_CID_hepb.csv")
###################
######
###################
PR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1901.dbc")
PR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1902.dbc")
PR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1903.dbc")
PR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1904.dbc")
PR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1905.dbc")
PR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1906.dbc")
PR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1907.dbc")
PR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1908.dbc")
PR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1909.dbc")
PR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1910.dbc")
PR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1911.dbc")
PR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1912.dbc")
RJ_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1901.dbc")
RJ_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1902.dbc")
RJ_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1903.dbc")
RJ_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1904.dbc")
RJ_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1905.dbc")
RJ_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1906.dbc")
RJ_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1907.dbc")
RJ_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1908.dbc")
RJ_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1909.dbc")
RJ_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1910.dbc")
RJ_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1911.dbc")
RJ_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1912.dbc")
RN_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1901.dbc")
RN_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1902.dbc")
RN_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1903.dbc")
RN_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1904.dbc")
RN_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1905.dbc")
RN_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1906.dbc")
RN_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1907.dbc")
RN_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1908.dbc")
RN_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1909.dbc")
RN_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1910.dbc")
RN_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1911.dbc")
RN_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1912.dbc")
RO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1901.dbc")
RO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1902.dbc")
RO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1903.dbc")
RO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1904.dbc")
RO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1905.dbc")
RO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1906.dbc")
RO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1907.dbc")
RO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1908.dbc")
RO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1909.dbc")
RO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1910.dbc")
RO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1911.dbc")
RO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1912.dbc")
RR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1901.dbc")
RR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1902.dbc")
RR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1903.dbc")
RR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1904.dbc")
RR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1905.dbc")
RR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1906.dbc")
RR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1907.dbc")
RR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1908.dbc")
RR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1909.dbc")
RR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1910.dbc")
RR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1911.dbc")
RR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1912.dbc")
RS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1901.dbc")
RS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1902.dbc")
RS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1903.dbc")
RS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1904.dbc")
RS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1905.dbc")
RS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1906.dbc")
RS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1907.dbc")
RS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1908.dbc")
RS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1909.dbc")
RS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1910.dbc")
RS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1911.dbc")
RS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1912.dbc")
SC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1901.dbc")
SC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1902.dbc")
SC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1903.dbc")
SC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1904.dbc")
SC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1905.dbc")
SC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1906.dbc")
SC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1907.dbc")
SC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1908.dbc")
SC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1909.dbc")
SC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1910.dbc")
SC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1911.dbc")
SC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1912.dbc")
SE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1901.dbc")
SE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1902.dbc")
SE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1903.dbc")
SE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1904.dbc")
SE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1905.dbc")
SE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1906.dbc")
SE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1908.dbc")
SE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1909.dbc")
SE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1910.dbc")
SE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1911.dbc")
SE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1912.dbc")
#####################
##########
#####################
PR_APAL_med <- do.call("rbind", list(PR_APAC_med_01, PR_APAC_med_02, PR_APAC_med_03, PR_APAC_med_04, PR_APAC_med_05,PR_APAC_med_06, PR_APAC_med_07, PR_APAC_med_08, PR_APAC_med_09, PR_APAC_med_10, PR_APAC_med_11, PR_APAC_med_12 ))
PR_APAL_med$UF <- "PR"
RJ_APAL_med <- do.call("rbind", list(RJ_APAC_med_01, RJ_APAC_med_02, RJ_APAC_med_03, RJ_APAC_med_04, RJ_APAC_med_05,RJ_APAC_med_06, RJ_APAC_med_07, RJ_APAC_med_08,RJ_APAC_med_09, RJ_APAC_med_10, RJ_APAC_med_11, RJ_APAC_med_12 ))
RJ_APAL_med$UF <- "RJ"
RN_APAL_med <- do.call("rbind", list(RN_APAC_med_01, RN_APAC_med_02, RN_APAC_med_03, RN_APAC_med_04, RN_APAC_med_05,RN_APAC_med_06, RN_APAC_med_07, RN_APAC_med_08, RN_APAC_med_09, RN_APAC_med_10, RN_APAC_med_11, RN_APAC_med_12))
RN_APAL_med$UF <- "RN"
RO_APAL_med <- do.call("rbind", list(RO_APAC_med_01, RO_APAC_med_02, RO_APAC_med_03, RO_APAC_med_04, RO_APAC_med_05,RO_APAC_med_06, RO_APAC_med_07, RO_APAC_med_08, RO_APAC_med_10, RO_APAC_med_12))
RO_APAL_med$UF <- "RO"
RR_APAL_med <- do.call("rbind", list(RR_APAC_med_01, RR_APAC_med_02, RR_APAC_med_03, RR_APAC_med_04, RR_APAC_med_05,RR_APAC_med_06, RR_APAC_med_07, RR_APAC_med_08, RR_APAC_med_09, RR_APAC_med_10, RR_APAC_med_11 , RR_APAC_med_12 ))
RR_APAL_med$UF <- "RR"
RS_APAL_med <- do.call("rbind", list(RS_APAC_med_01, RS_APAC_med_02, RS_APAC_med_03, RS_APAC_med_04, RS_APAC_med_05,RS_APAC_med_06, RS_APAC_med_07, RS_APAC_med_08, RS_APAC_med_09, RS_APAC_med_10, RS_APAC_med_11, RR_APAC_med_12))
RS_APAL_med$UF <- "RS"
SC_APAL_med <- do.call("rbind", list(SC_APAC_med_01, SC_APAC_med_02, SC_APAC_med_03, SC_APAC_med_04, SC_APAC_med_05,SC_APAC_med_06, SC_APAC_med_07, SC_APAC_med_08,SC_APAC_med_09, SC_APAC_med_10, SC_APAC_med_11, SC_APAC_med_12 ))
SC_APAL_med$UF <- "SC"
SE_APAL_med <- do.call("rbind", list(SE_APAC_med_01, SE_APAC_med_02, SE_APAC_med_03, SE_APAC_med_04, SE_APAC_med_05,SE_APAC_med_06, SE_APAC_med_08, SE_APAC_med_09, SE_APAC_med_10, SE_APAC_med_11, SE_APAC_med_12))
SE_APAL_med$UF <- "SE"
BR_19_02_trat <- do.call("rbind", list(PR_APAL_med, RJ_APAL_med, RN_APAL_med, RO_APAL_med, RR_APAL_med, RS_APAL_med, SC_APAL_med, SE_APAL_med))
BR_19_02_trat_CID_hepb <- filter(BR_19_02_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_19_02_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_02_trat_CID_hepb.csv")
#####################
##########
#####################
SP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1901.dbc")
SP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1902.dbc")
SP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1903.dbc")
SP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1904.dbc")
SP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1905.dbc")
SP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1906.dbc")
SP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1907.dbc")
SP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1908.dbc")
SP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1909.dbc")
SP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1910.dbc")
SP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1911.dbc")
SP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1912.dbc")
TO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1901.dbc")
TO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1902.dbc")
TO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1903.dbc")
TO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1904.dbc")
TO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1905.dbc")
TO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1906.dbc")
TO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1907.dbc")
TO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1908.dbc")
TO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1909.dbc")
TO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1910.dbc")
TO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1911.dbc")
TO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1912.dbc")
#####################
##########
#####################
SP_APAL_med <- do.call("rbind", list(SP_APAC_med_01, SP_APAC_med_02, SP_APAC_med_03, SP_APAC_med_04, SP_APAC_med_05,SP_APAC_med_06, SP_APAC_med_07, SP_APAC_med_08, SP_APAC_med_09, SP_APAC_med_10, SP_APAC_med_11, SP_APAC_med_12))
SP_APAL_med$UF <- "SP"
TO_APAL_med <- do.call("rbind", list(TO_APAC_med_01, TO_APAC_med_02, TO_APAC_med_03, TO_APAC_med_04, TO_APAC_med_05,TO_APAC_med_06, TO_APAC_med_07, TO_APAC_med_08, TO_APAC_med_09, TO_APAC_med_10, TO_APAC_med_11, TO_APAC_med_12))
TO_APAL_med$UF <- "TO"
BR_19_03_trat <- do.call("rbind", list(SP_APAL_med, TO_APAL_med))
BR_19_03_trat_CID_hepb <- filter(BR_19_03_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_19_03_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_03_trat_CID_hepb.csv")
BR_19_01_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_01_trat_CID_hepb.csv")
BR_19_02_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_02_trat_CID_hepb.csv")
BR_19_03_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_19_03_trat_CID_hepb.csv")
BR_APAC_trat__hepb_2019 <- do.call("rbind", list( BR_19_01_trat_CID_hepb, BR_19_02_trat_CID_hepb, BR_19_03_trat_CID_hepb))
BR_APAC_trat__hepb_2019un <- distinct(BR_APAC_trat__hepb_2019, AP_CNSPCN , .keep_all = TRUE)
write.csv(BR_APAC_trat__hepb_2019, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2019.csv")
write.csv(BR_APAC_trat__hepb_2019un, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2019un.csv")
BR_APAC_trat__hepb_2019 <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2019.csv")
BR_APAC_trat__hepb_2019un <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2019un.csv")
#####################
##########
#####################
## 2018
AC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1801.dbc")
AC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1802.dbc")
AC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1803.dbc")
AC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1804.dbc")
AC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1805.dbc")
AC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1806.dbc")
AC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1807.dbc")
AC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1808.dbc")
AC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1809.dbc")
AC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1810.dbc")
AC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1811.dbc")
AC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1812.dbc")
AL_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1801.dbc")
AL_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1802.dbc")
AL_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1803.dbc")
AL_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1804.dbc")
AL_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1805.dbc")
AL_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1806.dbc")
AL_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1807.dbc")
AL_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1808.dbc")
AL_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1809.dbc")
AL_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1810.dbc")
AL_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1811.dbc")
AL_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1812.dbc")
AM_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1801.dbc")
AM_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1802.dbc")
AM_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1803.dbc")
AM_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1804.dbc")
AM_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1805.dbc")
AM_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1806.dbc")
AM_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1807.dbc")
AM_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1808.dbc")
AM_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1809.dbc")
AM_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1810.dbc")
AM_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1811.dbc")
AM_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1812.dbc")
AP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1801.dbc")
AP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1802.dbc")
AP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1803.dbc")
AP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1804.dbc")
AP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1805.dbc")
AP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1806.dbc")
AP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1807.dbc")
AP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1808.dbc")
AP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1809.dbc")
AP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1810.dbc")
AP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1811.dbc")
AP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1812.dbc")
BA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1801.dbc")
BA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1802.dbc")
BA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1803.dbc")
BA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1804.dbc")
BA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1805.dbc")
BA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1806.dbc")
BA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1807.dbc")
BA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1808.dbc")
BA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1809.dbc")
BA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1810.dbc")
BA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1811.dbc")
BA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1812.dbc")
CE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1801.dbc")
CE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1802.dbc")
CE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1803.dbc")
CE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1804.dbc")
CE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1805.dbc")
CE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1806.dbc")
CE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1807.dbc")
CE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1808.dbc")
CE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1809.dbc")
CE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1810.dbc")
CE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1811.dbc")
CE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1812.dbc")
DF_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1801.dbc")
DF_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1802.dbc")
DF_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1803.dbc")
DF_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1804.dbc")
DF_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1805.dbc")
DF_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1806.dbc")
DF_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1807.dbc")
DF_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1808.dbc")
DF_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1809.dbc")
DF_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1810.dbc")
DF_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1811.dbc")
DF_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1812.dbc")
ES_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1801.dbc")
ES_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1802.dbc")
ES_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1803.dbc")
ES_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1804.dbc")
ES_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1805.dbc")
ES_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1806.dbc")
ES_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1807.dbc")
ES_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1808.dbc")
ES_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1809.dbc")
ES_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1810.dbc")
ES_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1811.dbc")
ES_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1812.dbc")
GO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1801.dbc")
GO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1802.dbc")
GO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1803.dbc")
GO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1804.dbc")
GO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1805.dbc")
GO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1806.dbc")
GO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1807.dbc")
GO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1808.dbc")
GO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1809.dbc")
GO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1810.dbc")
GO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1811.dbc")
GO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1812.dbc")
MA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1801.dbc")
MA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1802.dbc")
MA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1803.dbc")
MA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1804.dbc")
MA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1805.dbc")
MA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1806.dbc")
MA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1807.dbc")
MA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1808.dbc")
MA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1809.dbc")
MA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1810.dbc")
MA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1811.dbc")
MA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1812.dbc")
MG_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1801.dbc")
MG_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1802.dbc")
MG_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1803.dbc")
MG_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1804.dbc")
MG_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1805.dbc")
MG_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1806.dbc")
MG_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1807.dbc")
MG_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1808.dbc")
MG_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1809.dbc")
MG_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1810.dbc")
MG_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1811.dbc")
MG_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1812.dbc")
MS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1801.dbc")
MS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1802.dbc")
MS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1803.dbc")
MS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1804.dbc")
MS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1805.dbc")
MS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1806.dbc")
MS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1807.dbc")
MS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1808.dbc")
MS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1809.dbc")
MS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1810.dbc")
MS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1811.dbc")
MS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1812.dbc")
MT_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1801.dbc")
MT_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1802.dbc")
MT_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1803.dbc")
MT_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1804.dbc")
MT_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1805.dbc")
MT_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1806.dbc")
MT_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1807.dbc")
MT_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1808.dbc")
MT_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1809.dbc")
MT_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1810.dbc")
MT_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1811.dbc")
MT_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1812.dbc")
PA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1801.dbc")
PA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1802.dbc")
PA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1803.dbc")
PA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1804.dbc")
PA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1805.dbc")
PA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1806.dbc")
PA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1807.dbc")
PA_APAC_med_08 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1808.dbc")
PA_APAC_med_09 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1809.dbc")
PA_APAC_med_10 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1810.dbc")
PA_APAC_med_11 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1811.dbc")
PA_APAC_med_12 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1812.dbc")
PB_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1801.dbc")
PB_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1802.dbc")
PB_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1803.dbc")
PB_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1804.dbc")
PB_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1805.dbc")
PB_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1806.dbc")
PB_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1807.dbc")
PB_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1808.dbc")
PB_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1809.dbc")
PB_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1810.dbc")
PB_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1811.dbc")
PB_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1812.dbc")
PE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1801.dbc")
PE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1802.dbc")
PE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1803.dbc")
PE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1804.dbc")
PE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1805.dbc")
PE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1806.dbc")
PE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1807.dbc")
PE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1808.dbc")
PE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1809.dbc")
PE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1810.dbc")
PE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1811.dbc")
PE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1812.dbc")
PI_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1801.dbc")
PI_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1802.dbc")
PI_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1803.dbc")
PI_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1804.dbc")
PI_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1805.dbc")
PI_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1806.dbc")
PI_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1807.dbc")
PI_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1808.dbc")
PI_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1809.dbc")
PI_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1810.dbc")
PI_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1811.dbc")
PI_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1812.dbc")
###################
######
###################
AC_APAC_med <- do.call("rbind", list(AC_APAC_med_01, AC_APAC_med_02, AC_APAC_med_03, AC_APAC_med_04, AC_APAC_med_05,AC_APAC_med_06, AC_APAC_med_07, AC_APAC_med_09, AC_APAC_med_11, AC_APAC_med_12))
AC_APAC_med$UF <- "AC"
AL_APAL_med <- do.call("rbind", list(AL_APAC_med_01, AL_APAC_med_02, AL_APAC_med_03, AL_APAC_med_04, AL_APAC_med_05,AL_APAC_med_06,AL_APAC_med_07, AL_APAC_med_08,AL_APAC_med_09, AL_APAC_med_10, AL_APAC_med_11 , AL_APAC_med_12))
AL_APAL_med$UF <- "AL"
AM_APAL_med <- do.call("rbind", list(AM_APAC_med_01, AM_APAC_med_02, AM_APAC_med_03, AM_APAC_med_04, AM_APAC_med_05,AM_APAC_med_06, AM_APAC_med_07, AM_APAC_med_08, AM_APAC_med_09, AM_APAC_med_10, AM_APAC_med_11, AM_APAC_med_12))
AM_APAL_med$UF <- "AM"
AP_APAL_med <- do.call("rbind", list(AP_APAC_med_01, AP_APAC_med_02, AP_APAC_med_03, AP_APAC_med_04, AP_APAC_med_05,AP_APAC_med_06, AP_APAC_med_07, AP_APAC_med_08, AP_APAC_med_09, AP_APAC_med_10, AP_APAC_med_11, AP_APAC_med_12))
AP_APAL_med$UF <- "AP"
BA_APAL_med <- do.call("rbind", list(BA_APAC_med_01, BA_APAC_med_02, BA_APAC_med_03, BA_APAC_med_04, BA_APAC_med_05,BA_APAC_med_06, BA_APAC_med_07, BA_APAC_med_08,BA_APAC_med_09, BA_APAC_med_10, BA_APAC_med_11, BA_APAC_med_12 ))
BA_APAL_med$UF <- "BA"
CE_APAL_med <- do.call("rbind", list(CE_APAC_med_01, CE_APAC_med_02, CE_APAC_med_03, CE_APAC_med_04, CE_APAC_med_05,CE_APAC_med_06, CE_APAC_med_07, CE_APAC_med_08, CE_APAC_med_09, CE_APAC_med_10, CE_APAC_med_11, CE_APAC_med_12))
CE_APAL_med$UF <- "CE"
DF_APAL_med <- do.call("rbind", list(DF_APAC_med_01, DF_APAC_med_02, DF_APAC_med_03, DF_APAC_med_04, DF_APAC_med_05,DF_APAC_med_06, DF_APAC_med_07, DF_APAC_med_08, DF_APAC_med_09, DF_APAC_med_10, DF_APAC_med_11, DF_APAC_med_12))
DF_APAL_med$UF <- "DF"
ES_APAL_med <- do.call("rbind", list(ES_APAC_med_01, ES_APAC_med_02, ES_APAC_med_03, ES_APAC_med_04, ES_APAC_med_05,ES_APAC_med_06, ES_APAC_med_07, ES_APAC_med_08, ES_APAC_med_09, ES_APAC_med_10, ES_APAC_med_11, ES_APAC_med_12))
ES_APAL_med$UF <- "ES"
GO_APAL_med <- do.call("rbind", list(GO_APAC_med_01, GO_APAC_med_02, GO_APAC_med_03, GO_APAC_med_04, GO_APAC_med_05,GO_APAC_med_06, GO_APAC_med_07, GO_APAC_med_08, GO_APAC_med_09, GO_APAC_med_10, GO_APAC_med_11, GO_APAC_med_12))
GO_APAL_med$UF <- "GO"
MA_APAL_med <- do.call("rbind", list(MA_APAC_med_01, MA_APAC_med_02, MA_APAC_med_03, MA_APAC_med_04, MA_APAC_med_05,MA_APAC_med_06, MA_APAC_med_07, MA_APAC_med_08, MA_APAC_med_09, MA_APAC_med_10, MA_APAC_med_11, MA_APAC_med_12))
MA_APAL_med$UF <- "MA"
MG_APAL_med <- do.call("rbind", list(MG_APAC_med_01, MG_APAC_med_02, MG_APAC_med_03, MG_APAC_med_04, MG_APAC_med_05,MG_APAC_med_06, MG_APAC_med_07, MG_APAC_med_08, MG_APAC_med_09, MG_APAC_med_10, MG_APAC_med_11, MG_APAC_med_12))
MG_APAL_med$UF <- "MG"
MS_APAL_med <- do.call("rbind", list(MS_APAC_med_01, MS_APAC_med_02, MS_APAC_med_03, MS_APAC_med_04, MS_APAC_med_05,MS_APAC_med_06, MS_APAC_med_07, MS_APAC_med_08, MS_APAC_med_09, MS_APAC_med_10, MS_APAC_med_11, MS_APAC_med_12))
MS_APAL_med$UF <- "MS"
MT_APAL_med <- do.call("rbind", list(MT_APAC_med_01, MT_APAC_med_02, MT_APAC_med_03, MT_APAC_med_04, MT_APAC_med_05,MT_APAC_med_06, MT_APAC_med_07, MT_APAC_med_08, MT_APAC_med_09, MT_APAC_med_10, MT_APAC_med_11, MT_APAC_med_12))
MT_APAL_med$UF <- "MT"
PA_APAL_med <- do.call("rbind", list(PA_APAC_med_01, PA_APAC_med_02, PA_APAC_med_03,PA_APAC_med_04, PA_APAC_med_05,PA_APAC_med_06, PA_APAC_med_07, PA_APAC_med_08, PA_APAC_med_09, PA_APAC_med_10, PA_APAC_med_11, PA_APAC_med_12))
PA_APAL_med$UF <- "PA"
PB_APAL_med <- do.call("rbind", list(PB_APAC_med_01, PB_APAC_med_02, PB_APAC_med_03, PB_APAC_med_04, PB_APAC_med_05,PB_APAC_med_06, PB_APAC_med_07, PB_APAC_med_08, PB_APAC_med_09, PB_APAC_med_10, PB_APAC_med_11))
PB_APAL_med$UF <- "PB"
PE_APAL_med <- do.call("rbind", list(PE_APAC_med_01, PE_APAC_med_02, PE_APAC_med_03, PE_APAC_med_04, PE_APAC_med_05,PE_APAC_med_06, PE_APAC_med_07,PE_APAC_med_08, PE_APAC_med_09, PE_APAC_med_10, PE_APAC_med_11, PE_APAC_med_12))
PE_APAL_med$UF <- "PE"
PI_APAL_med <- do.call("rbind", list(PI_APAC_med_01, PI_APAC_med_02, PI_APAC_med_03, PI_APAC_med_04, PI_APAC_med_05,PI_APAC_med_06, PI_APAC_med_07, PI_APAC_med_08, PI_APAC_med_09, PI_APAC_med_10, PI_APAC_med_11, PI_APAC_med_12))
PI_APAL_med$UF <- "PI"
BR_18_01_trat <- do.call("rbind", list(AC_APAC_med, AP_APAL_med, AM_APAL_med, AL_APAL_med, BA_APAL_med, CE_APAL_med, DF_APAL_med, ES_APAL_med, GO_APAL_med, MA_APAL_med, MG_APAL_med, MS_APAL_med, MT_APAL_med, PA_APAL_med, PB_APAL_med, PE_APAL_med, PI_APAL_med))
BR_18_01_trat_CID_hepb <- filter(BR_18_01_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_18_01_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_01_trat_CID_hepb.csv")
###################
######
###################
PR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1801.dbc")
PR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1802.dbc")
PR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1803.dbc")
PR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1804.dbc")
PR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1805.dbc")
PR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1806.dbc")
PR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1807.dbc")
PR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1808.dbc")
PR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1809.dbc")
PR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1810.dbc")
PR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1811.dbc")
PR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1812.dbc")
RJ_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1801.dbc")
RJ_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1802.dbc")
RJ_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1803.dbc")
RJ_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1804.dbc")
RJ_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1805.dbc")
RJ_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1806.dbc")
RJ_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1807.dbc")
RJ_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1808.dbc")
RJ_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1809.dbc")
RJ_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1810.dbc")
RJ_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1811.dbc")
RJ_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1812.dbc")
RN_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1801.dbc")
RN_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1802.dbc")
RN_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1803.dbc")
RN_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1804.dbc")
RN_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1805.dbc")
RN_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1806.dbc")
RN_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1807.dbc")
RN_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1808.dbc")
RN_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1809.dbc")
RN_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1810.dbc")
RN_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1811.dbc")
RN_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1812.dbc")
RO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1801.dbc")
RO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1802.dbc")
RO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1803.dbc")
RO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1804.dbc")
RO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1805.dbc")
RO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1806.dbc")
RO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1807.dbc")
RO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1808.dbc")
RO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1809.dbc")
RO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1810.dbc")
RO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1811.dbc")
RO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1812.dbc")
RR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1801.dbc")
RR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1802.dbc")
RR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1803.dbc")
RR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1804.dbc")
RR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1805.dbc")
RR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1806.dbc")
RR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1807.dbc")
RR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1808.dbc")
RR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1809.dbc")
RR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1810.dbc")
RR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1811.dbc")
RR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1812.dbc")
RS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1801.dbc")
RS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1802.dbc")
RS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1803.dbc")
RS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1804.dbc")
RS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1805.dbc")
RS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1806.dbc")
RS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1807.dbc")
RS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1808.dbc")
RS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1809.dbc")
RS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1810.dbc")
RS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1811.dbc")
RS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1812.dbc")
SC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1801.dbc")
SC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1802.dbc")
SC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1803.dbc")
SC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1804.dbc")
SC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1805.dbc")
SC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1806.dbc")
SC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1807.dbc")
SC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1808.dbc")
SC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1809.dbc")
SC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1810.dbc")
SC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1811.dbc")
SC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1812.dbc")
SE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1801.dbc")
SE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1802.dbc")
SE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1803.dbc")
SE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1804.dbc")
SE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1805.dbc")
SE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1806.dbc")
SE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1808.dbc")
SE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1809.dbc")
SE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1810.dbc")
SE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1811.dbc")
SE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1812.dbc")
#####################
##########
#####################
PR_APAL_med <- do.call("rbind", list(PR_APAC_med_01, PR_APAC_med_02, PR_APAC_med_03, PR_APAC_med_04, PR_APAC_med_05,PR_APAC_med_06, PR_APAC_med_07, PR_APAC_med_08, PR_APAC_med_09, PR_APAC_med_10, PR_APAC_med_11, PR_APAC_med_12 ))
PR_APAL_med$UF <- "PR"
RJ_APAL_med <- do.call("rbind", list(RJ_APAC_med_01, RJ_APAC_med_02, RJ_APAC_med_03, RJ_APAC_med_04, RJ_APAC_med_05,RJ_APAC_med_06, RJ_APAC_med_07, RJ_APAC_med_08,RJ_APAC_med_09, RJ_APAC_med_10, RJ_APAC_med_11, RJ_APAC_med_12 ))
RJ_APAL_med$UF <- "RJ"
RN_APAL_med <- do.call("rbind", list(RN_APAC_med_01, RN_APAC_med_02, RN_APAC_med_03, RN_APAC_med_04, RN_APAC_med_05,RN_APAC_med_06, RN_APAC_med_07, RN_APAC_med_08, RN_APAC_med_09, RN_APAC_med_10, RN_APAC_med_11, RN_APAC_med_12))
RN_APAL_med$UF <- "RN"
RO_APAL_med <- do.call("rbind", list(RO_APAC_med_01, RO_APAC_med_02, RO_APAC_med_03, RO_APAC_med_04, RO_APAC_med_05,RO_APAC_med_06, RO_APAC_med_07, RO_APAC_med_08,RO_APAC_med_09, RO_APAC_med_10, RO_APAC_med_11, RO_APAC_med_12))
RO_APAL_med$UF <- "RO"
RR_APAL_med <- do.call("rbind", list(RR_APAC_med_01, RR_APAC_med_02, RR_APAC_med_03, RR_APAC_med_04, RR_APAC_med_05,RR_APAC_med_06, RR_APAC_med_07, RR_APAC_med_08,RR_APAC_med_09, RR_APAC_med_10, RR_APAC_med_11, RR_APAC_med_12 ))
RR_APAL_med$UF <- "RR"
RS_APAL_med <- do.call("rbind", list(RS_APAC_med_01, RS_APAC_med_02, RS_APAC_med_03, RS_APAC_med_04, RS_APAC_med_05,RS_APAC_med_06, RS_APAC_med_07, RS_APAC_med_08, RS_APAC_med_09, RS_APAC_med_10, RS_APAC_med_11, RS_APAC_med_12))
RS_APAL_med$UF <- "RS"
SC_APAL_med <- do.call("rbind", list(SC_APAC_med_01, SC_APAC_med_02, SC_APAC_med_03, SC_APAC_med_04, SC_APAC_med_05,SC_APAC_med_06, SC_APAC_med_07, SC_APAC_med_08,SC_APAC_med_09, SC_APAC_med_10, SC_APAC_med_11, SC_APAC_med_12 ))
SC_APAL_med$UF <- "SC"
SE_APAL_med <- do.call("rbind", list(SE_APAC_med_01, SE_APAC_med_02, SE_APAC_med_03, SE_APAC_med_04, SE_APAC_med_05,SE_APAC_med_06, SE_APAC_med_08, SE_APAC_med_09, SE_APAC_med_10, SE_APAC_med_11, SE_APAC_med_12))
SE_APAL_med$UF <- "SE"
BR_18_02_trat <- do.call("rbind", list(PR_APAL_med, RJ_APAL_med, RN_APAL_med, RO_APAL_med, RR_APAL_med, RS_APAL_med, SC_APAL_med, SE_APAL_med))
BR_18_02_trat_CID_hepb <- filter(BR_18_02_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_18_02_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_02_trat_CID_hepb.csv")
#####################
##########
#####################
SP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1801.dbc")
SP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1802.dbc")
SP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1803.dbc")
SP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1804.dbc")
SP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1805.dbc")
SP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1806.dbc")
SP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1807.dbc")
SP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1808.dbc")
SP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1809.dbc")
SP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1810.dbc")
SP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1811.dbc")
SP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1812.dbc")
TO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1801.dbc")
TO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1802.dbc")
TO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1803.dbc")
TO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1804.dbc")
TO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1805.dbc")
TO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1806.dbc")
TO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1807.dbc")
TO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1808.dbc")
TO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1809.dbc")
TO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1810.dbc")
TO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1811.dbc")
TO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1812.dbc")
#####################
##########
#####################
SP_APAL_med <- do.call("rbind", list(SP_APAC_med_01, SP_APAC_med_02, SP_APAC_med_03, SP_APAC_med_04, SP_APAC_med_05,SP_APAC_med_06, SP_APAC_med_07, SP_APAC_med_08, SP_APAC_med_09, SP_APAC_med_10, SP_APAC_med_11, SP_APAC_med_12))
SP_APAL_med$UF <- "SP"
TO_APAL_med <- do.call("rbind", list(TO_APAC_med_01, TO_APAC_med_02, TO_APAC_med_03, TO_APAC_med_04, TO_APAC_med_05,TO_APAC_med_06, TO_APAC_med_07, TO_APAC_med_08, TO_APAC_med_09, TO_APAC_med_10, TO_APAC_med_11, TO_APAC_med_12))
TO_APAL_med$UF <- "TO"
BR_18_03_trat <- do.call("rbind", list(SP_APAL_med, TO_APAL_med))
BR_18_03_trat_CID_hepb <- filter(BR_18_03_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_18_03_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_03_trat_CID_hepb.csv")
BR_18_01_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_01_trat_CID_hepb.csv")
BR_18_02_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_02_trat_CID_hepb.csv")
BR_18_03_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_18_03_trat_CID_hepb.csv")
BR_APAC_trat__hepb_2018 <- do.call("rbind", list( BR_18_01_trat_CID_hepb, BR_18_02_trat_CID_hepb, BR_18_03_trat_CID_hepb))
BR_APAC_trat__hepb_2018un <- distinct(BR_APAC_trat__hepb_2018, AP_CNSPCN , .keep_all = TRUE)
write.csv(BR_APAC_trat__hepb_2018, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2018.csv")
write.csv(BR_APAC_trat__hepb_2018un, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2018un.csv")
BR_APAC_trat__hepb_2018 <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2018.csv")
BR_APAC_trat__hepb_2018un <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2018un.csv")
#####################
##########
#####################
## 2017
AC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1701.dbc")
AC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1702.dbc")
AC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1703.dbc")
AC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1704.dbc")
AC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1705.dbc")
AC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1706.dbc")
AC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1707.dbc")
AC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1708.dbc")
AC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1709.dbc")
AC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1710.dbc")
AC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1711.dbc")
AC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1712.dbc")
AL_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1701.dbc")
AL_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1702.dbc")
AL_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1703.dbc")
AL_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1704.dbc")
AL_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1705.dbc")
AL_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1706.dbc")
AL_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1708.dbc")
AL_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1709.dbc")
AL_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1710.dbc")
AL_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1711.dbc")
AL_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1712.dbc")
AM_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1701.dbc")
AM_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1702.dbc")
AM_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1703.dbc")
AM_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1704.dbc")
AM_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1705.dbc")
AM_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1706.dbc")
AM_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1707.dbc")
AM_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1708.dbc")
AM_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1709.dbc")
AM_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1710.dbc")
AM_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1711.dbc")
AM_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1712.dbc")
AP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1701.dbc")
AP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1702.dbc")
AP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1703.dbc")
AP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1704.dbc")
AP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1705.dbc")
AP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1706.dbc")
AP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1707.dbc")
AP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1708.dbc")
AP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1709.dbc")
AP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1710.dbc")
AP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1711.dbc")
AP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1712.dbc")
BA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1701.dbc")
BA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1702.dbc")
BA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1703.dbc")
BA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1704.dbc")
BA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1705.dbc")
BA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1706.dbc")
BA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1707.dbc")
BA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1708.dbc")
BA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1709.dbc")
BA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1710.dbc")
BA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1711.dbc")
BA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1712.dbc")
CE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1701.dbc")
CE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1702.dbc")
CE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1703.dbc")
CE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1704.dbc")
CE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1705.dbc")
CE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1706.dbc")
CE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1707.dbc")
CE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1708.dbc")
CE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1709.dbc")
CE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1710.dbc")
CE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1711.dbc")
CE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1712.dbc")
DF_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1701.dbc")
DF_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1702.dbc")
DF_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1703.dbc")
DF_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1704.dbc")
DF_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1705.dbc")
DF_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1706.dbc")
DF_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1707.dbc")
DF_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1708.dbc")
DF_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1709.dbc")
DF_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1710.dbc")
DF_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1711.dbc")
DF_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1712.dbc")
ES_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1701.dbc")
ES_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1702.dbc")
ES_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1703.dbc")
ES_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1704.dbc")
ES_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1705.dbc")
ES_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1706.dbc")
ES_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1707.dbc")
ES_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1708.dbc")
ES_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1709.dbc")
ES_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1710.dbc")
ES_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1711.dbc")
ES_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1712.dbc")
GO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1701.dbc")
GO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1702.dbc")
GO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1703.dbc")
GO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1704.dbc")
GO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1705.dbc")
GO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1706.dbc")
GO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1707.dbc")
GO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1708.dbc")
GO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1709.dbc")
GO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1710.dbc")
GO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1711.dbc")
GO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1712.dbc")
MA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1701.dbc")
MA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1702.dbc")
MA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1703.dbc")
MA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1704.dbc")
MA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1705.dbc")
MA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1706.dbc")
MA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1707.dbc")
MA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1708.dbc")
MA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1709.dbc")
MA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1710.dbc")
MA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1711.dbc")
MA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1712.dbc")
MG_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1701.dbc")
MG_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1702.dbc")
MG_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1703.dbc")
MG_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1704.dbc")
MG_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1705.dbc")
MG_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1706.dbc")
MG_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1707.dbc")
MG_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1708.dbc")
MG_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1709.dbc")
MG_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1710.dbc")
MG_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1711.dbc")
MG_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1712.dbc")
MS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1701.dbc")
MS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1702.dbc")
MS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1703.dbc")
MS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1704.dbc")
MS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1705.dbc")
MS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1706.dbc")
MS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1707.dbc")
MS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1708.dbc")
MS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1709.dbc")
MS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1710.dbc")
MS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1711.dbc")
MS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1712.dbc")
MT_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1701.dbc")
MT_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1702.dbc")
MT_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1703.dbc")
MT_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1704.dbc")
MT_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1705.dbc")
MT_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1706.dbc")
MT_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1707.dbc")
MT_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1708.dbc")
MT_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1709.dbc")
MT_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1710.dbc")
MT_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1711.dbc")
MT_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1712.dbc")
PA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1701.dbc")
PA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1702.dbc")
PA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1703.dbc")
PA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1705.dbc")
PA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1706.dbc")
PA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1707.dbc")
PA_APAC_med_08 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1708.dbc")
PA_APAC_med_09 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1709.dbc")
PA_APAC_med_10 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1710.dbc")
PA_APAC_med_11 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1711.dbc")
PA_APAC_med_12 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1712.dbc")
PB_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1701.dbc")
PB_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1702.dbc")
PB_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1703.dbc")
PB_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1704.dbc")
PB_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1705.dbc")
PB_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1706.dbc")
PB_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1707.dbc")
PB_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1708.dbc")
PB_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1709.dbc")
PB_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1710.dbc")
PB_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1711.dbc")
PB_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1712.dbc")
PE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1701.dbc")
PE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1702.dbc")
PE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1703.dbc")
PE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1704.dbc")
PE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1705.dbc")
PE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1706.dbc")
PE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1707.dbc")
PE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1708.dbc")
PE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1709.dbc")
PE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1710.dbc")
PE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1711.dbc")
PE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1712.dbc")
PI_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1701.dbc")
PI_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1702.dbc")
PI_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1703.dbc")
PI_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1704.dbc")
PI_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1705.dbc")
PI_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1706.dbc")
PI_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1707.dbc")
PI_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1708.dbc")
PI_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1709.dbc")
PI_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1710.dbc")
PI_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1711.dbc")
PI_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1712.dbc")
###################
######
###################
AC_APAC_med <- do.call("rbind", list(AC_APAC_med_01, AC_APAC_med_02, AC_APAC_med_03, AC_APAC_med_04, AC_APAC_med_05,AC_APAC_med_06, AC_APAC_med_07, AC_APAC_med_08, AC_APAC_med_09, AC_APAC_med_10, AC_APAC_med_11, AC_APAC_med_12))
AC_APAC_med$UF <- "AC"
AL_APAL_med <- do.call("rbind", list(AL_APAC_med_01, AL_APAC_med_02, AL_APAC_med_03, AL_APAC_med_04, AL_APAC_med_05,AL_APAC_med_06, AL_APAC_med_08,AL_APAC_med_09, AL_APAC_med_10, AL_APAC_med_11 , AL_APAC_med_12))
AL_APAL_med$UF <- "AL"
AM_APAL_med <- do.call("rbind", list(AM_APAC_med_01, AM_APAC_med_02, AM_APAC_med_03, AM_APAC_med_04, AM_APAC_med_05,AM_APAC_med_06, AM_APAC_med_07, AM_APAC_med_08, AM_APAC_med_09, AM_APAC_med_10, AM_APAC_med_11, AM_APAC_med_12))
AM_APAL_med$UF <- "AM"
AP_APAL_med <- do.call("rbind", list(AP_APAC_med_01, AP_APAC_med_02, AP_APAC_med_03, AP_APAC_med_04, AP_APAC_med_05,AP_APAC_med_06, AP_APAC_med_08, AP_APAC_med_10, AP_APAC_med_11, AP_APAC_med_12))
AP_APAL_med$UF <- "AP"
BA_APAL_med <- do.call("rbind", list(BA_APAC_med_01, BA_APAC_med_02, BA_APAC_med_03, BA_APAC_med_04, BA_APAC_med_05,BA_APAC_med_06, BA_APAC_med_07, BA_APAC_med_08,BA_APAC_med_09, BA_APAC_med_10, BA_APAC_med_11, BA_APAC_med_12 ))
BA_APAL_med$UF <- "BA"
CE_APAL_med <- do.call("rbind", list(CE_APAC_med_01, CE_APAC_med_02, CE_APAC_med_03, CE_APAC_med_04, CE_APAC_med_05,CE_APAC_med_06, CE_APAC_med_07, CE_APAC_med_08, CE_APAC_med_09, CE_APAC_med_10, CE_APAC_med_11, CE_APAC_med_12))
CE_APAL_med$UF <- "CE"
DF_APAL_med <- do.call("rbind", list(DF_APAC_med_01, DF_APAC_med_02, DF_APAC_med_03, DF_APAC_med_04, DF_APAC_med_05,DF_APAC_med_06, DF_APAC_med_07, DF_APAC_med_08, DF_APAC_med_09, DF_APAC_med_10, DF_APAC_med_11, DF_APAC_med_12))
DF_APAL_med$UF <- "DF"
ES_APAL_med <- do.call("rbind", list(ES_APAC_med_01, ES_APAC_med_02, ES_APAC_med_03, ES_APAC_med_04, ES_APAC_med_05,ES_APAC_med_06, ES_APAC_med_07, ES_APAC_med_08, ES_APAC_med_09, ES_APAC_med_10, ES_APAC_med_11, ES_APAC_med_12))
ES_APAL_med$UF <- "ES"
GO_APAL_med <- do.call("rbind", list(GO_APAC_med_01, GO_APAC_med_02, GO_APAC_med_03, GO_APAC_med_04, GO_APAC_med_05,GO_APAC_med_06, GO_APAC_med_07, GO_APAC_med_08, GO_APAC_med_09, GO_APAC_med_10, GO_APAC_med_11, GO_APAC_med_12))
GO_APAL_med$UF <- "GO"
MA_APAL_med <- do.call("rbind", list( MA_APAC_med_02, MA_APAC_med_03, MA_APAC_med_04, MA_APAC_med_05,MA_APAC_med_06, MA_APAC_med_07, MA_APAC_med_08, MA_APAC_med_09, MA_APAC_med_10, MA_APAC_med_11, MA_APAC_med_12))
MA_APAL_med$UF <- "MA"
MG_APAL_med <- do.call("rbind", list(MG_APAC_med_01, MG_APAC_med_02, MG_APAC_med_03, MG_APAC_med_04, MG_APAC_med_05,MG_APAC_med_06, MG_APAC_med_07, MG_APAC_med_08, MG_APAC_med_09, MG_APAC_med_10, MG_APAC_med_11, MG_APAC_med_12))
MG_APAL_med$UF <- "MG"
MS_APAL_med <- do.call("rbind", list(MS_APAC_med_01, MS_APAC_med_02, MS_APAC_med_03, MS_APAC_med_04, MS_APAC_med_05,MS_APAC_med_06, MS_APAC_med_07, MS_APAC_med_08, MS_APAC_med_09, MS_APAC_med_10, MS_APAC_med_11, MS_APAC_med_12))
MS_APAL_med$UF <- "MS"
MT_APAL_med <- do.call("rbind", list(MT_APAC_med_01, MT_APAC_med_02, MT_APAC_med_03, MT_APAC_med_04, MT_APAC_med_05,MT_APAC_med_06, MT_APAC_med_07, MT_APAC_med_08, MT_APAC_med_09, MT_APAC_med_10, MT_APAC_med_11, MT_APAC_med_12))
MT_APAL_med$UF <- "MT"
PA_APAL_med <- do.call("rbind", list(PA_APAC_med_01, PA_APAC_med_02, PA_APAC_med_03, PA_APAC_med_05,PA_APAC_med_06, PA_APAC_med_07, PA_APAC_med_08, PA_APAC_med_09, PA_APAC_med_10, PA_APAC_med_11, PA_APAC_med_12))
PA_APAL_med$UF <- "PA"
PB_APAL_med <- do.call("rbind", list(PB_APAC_med_01, PB_APAC_med_02, PB_APAC_med_03, PB_APAC_med_04, PB_APAC_med_05,PB_APAC_med_06, PB_APAC_med_07, PB_APAC_med_08, PB_APAC_med_09, PB_APAC_med_10, PB_APAC_med_11, PB_APAC_med_12))
PB_APAL_med$UF <- "PB"
PE_APAL_med <- do.call("rbind", list(PE_APAC_med_01, PE_APAC_med_02, PE_APAC_med_03, PE_APAC_med_04, PE_APAC_med_05,PE_APAC_med_06, PE_APAC_med_07,PE_APAC_med_08, PE_APAC_med_09, PE_APAC_med_10, PE_APAC_med_11, PE_APAC_med_12))
PE_APAL_med$UF <- "PE"
PI_APAL_med <- do.call("rbind", list(PI_APAC_med_01, PI_APAC_med_02, PI_APAC_med_03, PI_APAC_med_04, PI_APAC_med_05,PI_APAC_med_06, PI_APAC_med_07, PI_APAC_med_08, PI_APAC_med_09, PI_APAC_med_10, PI_APAC_med_11, PI_APAC_med_12))
PI_APAL_med$UF <- "PI"
BR_17_01_trat <- do.call("rbind", list(AC_APAC_med, AP_APAL_med, AM_APAL_med, AL_APAL_med, BA_APAL_med, CE_APAL_med, DF_APAL_med, ES_APAL_med, GO_APAL_med, MA_APAL_med, MG_APAL_med, MS_APAL_med, MT_APAL_med, PA_APAL_med, PB_APAL_med, PE_APAL_med, PI_APAL_med))
BR_17_01_trat_CID_hepb <- filter(BR_17_01_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_17_01_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_01_trat_CID_hepb.csv")
###################
######
###################
PR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1701.dbc")
PR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1702.dbc")
PR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1703.dbc")
PR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1704.dbc")
PR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1705.dbc")
PR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1706.dbc")
PR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1707.dbc")
PR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1708.dbc")
PR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1709.dbc")
PR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1710.dbc")
PR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1711.dbc")
PR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1712.dbc")
RJ_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1701.dbc")
RJ_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1702.dbc")
RJ_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1703.dbc")
RJ_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1704.dbc")
RJ_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1705.dbc")
RJ_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1706.dbc")
RJ_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1707.dbc")
RJ_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1708.dbc")
RJ_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1709.dbc")
RJ_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1710.dbc")
RJ_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1711.dbc")
RJ_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1712.dbc")
RN_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1701.dbc")
RN_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1702.dbc")
RN_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1703.dbc")
RN_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1704.dbc")
RN_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1705.dbc")
RN_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1706.dbc")
RN_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1707.dbc")
RN_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1708.dbc")
RN_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1709.dbc")
RN_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1710.dbc")
RN_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1711.dbc")
RN_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1712.dbc")
RO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1701.dbc")
RO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1702.dbc")
RO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1703.dbc")
RO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1704.dbc")
RO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1705.dbc")
RO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1706.dbc")
RO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1707.dbc")
RO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1708.dbc")
RO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1709.dbc")
RO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1710.dbc")
RO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1711.dbc")
RO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1712.dbc")
RR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1701.dbc")
RR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1702.dbc")
RR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1703.dbc")
RR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1704.dbc")
RR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1705.dbc")
RR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1706.dbc")
RR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1707.dbc")
RR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1708.dbc")
RR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1709.dbc")
RR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1710.dbc")
RR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1711.dbc")
RR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1712.dbc")
RS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1701.dbc")
RS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1702.dbc")
RS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1703.dbc")
RS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1704.dbc")
RS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1705.dbc")
RS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1706.dbc")
RS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1707.dbc")
RS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1708.dbc")
RS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1709.dbc")
RS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1710.dbc")
RS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1711.dbc")
RS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1712.dbc")
SC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1701.dbc")
SC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1702.dbc")
SC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1703.dbc")
SC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1704.dbc")
SC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1705.dbc")
SC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1706.dbc")
SC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1707.dbc")
SC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1708.dbc")
SC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1709.dbc")
SC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1710.dbc")
SC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1711.dbc")
SC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1712.dbc")
SE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1701.dbc")
SE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1702.dbc")
SE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1703.dbc")
SE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1704.dbc")
SE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1705.dbc")
SE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1706.dbc")
SE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1708.dbc")
SE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1709.dbc")
SE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1710.dbc")
SE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1711.dbc")
SE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1712.dbc")
#####################
##########
#####################
PR_APAL_med <- do.call("rbind", list(PR_APAC_med_01, PR_APAC_med_02, PR_APAC_med_03, PR_APAC_med_04, PR_APAC_med_05,PR_APAC_med_06, PR_APAC_med_07, PR_APAC_med_08, PR_APAC_med_09, PR_APAC_med_10, PR_APAC_med_11, PR_APAC_med_12 ))
PR_APAL_med$UF <- "PR"
RJ_APAL_med <- do.call("rbind", list(RJ_APAC_med_01, RJ_APAC_med_02, RJ_APAC_med_03, RJ_APAC_med_04, RJ_APAC_med_05,RJ_APAC_med_06, RJ_APAC_med_08,RJ_APAC_med_09, RJ_APAC_med_10, RJ_APAC_med_11, RJ_APAC_med_12 ))
RJ_APAL_med$UF <- "RJ"
RN_APAL_med <- do.call("rbind", list(RN_APAC_med_01, RN_APAC_med_02, RN_APAC_med_03, RN_APAC_med_04, RN_APAC_med_05,RN_APAC_med_06, RN_APAC_med_07, RN_APAC_med_08, RN_APAC_med_09, RN_APAC_med_10, RN_APAC_med_11, RN_APAC_med_12))
RN_APAL_med$UF <- "RN"
RO_APAL_med <- do.call("rbind", list(RO_APAC_med_01, RO_APAC_med_02, RO_APAC_med_03, RO_APAC_med_04, RO_APAC_med_05,RO_APAC_med_06, RO_APAC_med_07, RO_APAC_med_08,RO_APAC_med_09, RO_APAC_med_10, RO_APAC_med_11, RO_APAC_med_12))
RO_APAL_med$UF <- "RO"
RR_APAL_med <- do.call("rbind", list(RR_APAC_med_01, RR_APAC_med_02, RR_APAC_med_03, RR_APAC_med_04, RR_APAC_med_05,RR_APAC_med_06, RR_APAC_med_07, RR_APAC_med_08,RR_APAC_med_09, RR_APAC_med_10, RR_APAC_med_11, RR_APAC_med_12 ))
RR_APAL_med$UF <- "RR"
RS_APAL_med <- do.call("rbind", list(RS_APAC_med_01, RS_APAC_med_02, RS_APAC_med_03, RS_APAC_med_04, RS_APAC_med_05,RS_APAC_med_06, RS_APAC_med_07, RS_APAC_med_08, RS_APAC_med_09, RS_APAC_med_10, RS_APAC_med_11, RS_APAC_med_12))
RS_APAL_med$UF <- "RS"
SC_APAL_med <- do.call("rbind", list(SC_APAC_med_01, SC_APAC_med_02, SC_APAC_med_03, SC_APAC_med_04, SC_APAC_med_05,SC_APAC_med_06, SC_APAC_med_07, SC_APAC_med_08,SC_APAC_med_09, SC_APAC_med_10, SC_APAC_med_11, SC_APAC_med_12 ))
SC_APAL_med$UF <- "SC"
SE_APAL_med <- do.call("rbind", list(SE_APAC_med_01, SE_APAC_med_02, SE_APAC_med_03, SE_APAC_med_04, SE_APAC_med_05,SE_APAC_med_06, SE_APAC_med_08, SE_APAC_med_09, SE_APAC_med_10, SE_APAC_med_11, SE_APAC_med_12))
SE_APAL_med$UF <- "SE"
BR_17_02_trat <- do.call("rbind", list(PR_APAL_med, RJ_APAL_med, RN_APAL_med, RO_APAL_med, RR_APAL_med, RS_APAL_med, SC_APAL_med, SE_APAL_med))
BR_17_02_trat_CID_hepb <- filter(BR_17_02_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_17_02_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_02_trat_CID_hepb.csv")
#####################
##########
#####################
SP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1701.dbc")
SP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1702.dbc")
SP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1703.dbc")
SP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1704.dbc")
SP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1705.dbc")
SP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1706.dbc")
SP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1707.dbc")
SP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1708.dbc")
SP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1709.dbc")
SP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1710.dbc")
SP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1711.dbc")
SP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1712.dbc")
TO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1701.dbc")
TO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1702.dbc")
TO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1703.dbc")
TO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1704.dbc")
TO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1705.dbc")
TO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1706.dbc")
TO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1707.dbc")
TO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1708.dbc")
TO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1709.dbc")
TO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1710.dbc")
TO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1711.dbc")
TO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1712.dbc")
#####################
##########
#####################
SP_APAL_med <- do.call("rbind", list(SP_APAC_med_01, SP_APAC_med_02, SP_APAC_med_03, SP_APAC_med_04, SP_APAC_med_05,SP_APAC_med_06, SP_APAC_med_07, SP_APAC_med_08, SP_APAC_med_09, SP_APAC_med_10, SP_APAC_med_11, SP_APAC_med_12))
SP_APAL_med$UF <- "SP"
TO_APAL_med <- do.call("rbind", list(TO_APAC_med_01, TO_APAC_med_02, TO_APAC_med_03, TO_APAC_med_04, TO_APAC_med_05,TO_APAC_med_06, TO_APAC_med_07, TO_APAC_med_08, TO_APAC_med_09, TO_APAC_med_10, TO_APAC_med_11, TO_APAC_med_12))
TO_APAL_med$UF <- "TO"
BR_17_03_trat <- do.call("rbind", list(SP_APAL_med, TO_APAL_med))
BR_17_03_trat_CID_hepb <- filter(BR_17_03_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_17_03_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_03_trat_CID_hepb.csv")
BR_17_01_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_01_trat_CID_hepb.csv")
BR_17_02_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_02_trat_CID_hepb.csv")
BR_17_03_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_17_03_trat_CID_hepb.csv")
BR_APAC_trat__hepb_2017 <- do.call("rbind", list( BR_17_01_trat_CID_hepb, BR_17_02_trat_CID_hepb, BR_17_03_trat_CID_hepb))
BR_APAC_trat__hepb_2017un <- distinct(BR_APAC_trat__hepb_2017, AP_CNSPCN , .keep_all = TRUE)
write.csv(BR_APAC_trat__hepb_2017, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2017.csv")
write.csv(BR_APAC_trat__hepb_2017un, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2017un.csv")
BR_APAC_trat__hepb_2017<- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2017.csv")
BR_APAC_trat__hepb_2017un <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2017un.csv")
#####################
##########
#####################
## 2016
AC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1601.dbc")
AC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1602.dbc")
AC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1603.dbc")
AC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1604.dbc")
AC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1605.dbc")
AC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1606.dbc")
AC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1607.dbc")
AC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1608.dbc")
AC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1609.dbc")
AC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1610.dbc")
AC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1611.dbc")
AC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AC_APAC_med/AMAC1612.dbc")
AL_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1601.dbc")
AL_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1602.dbc")
AL_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1603.dbc")
AL_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1604.dbc")
AL_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1605.dbc")
AL_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1606.dbc")
AL_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1608.dbc")
AL_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1609.dbc")
AL_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1610.dbc")
AL_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1611.dbc")
AL_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AL_APAC_med/AMAL1612.dbc")
AM_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1601.dbc")
AM_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1602.dbc")
AM_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1603.dbc")
AM_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1604.dbc")
AM_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1605.dbc")
AM_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1606.dbc")
AM_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1607.dbc")
AM_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1608.dbc")
AM_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1609.dbc")
AM_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1610.dbc")
AM_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1611.dbc")
AM_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AM_APAC_med/AMAM1612.dbc")
AP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1601.dbc")
AP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1602.dbc")
AP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1603.dbc")
AP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1604.dbc")
AP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1605.dbc")
AP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1606.dbc")
AP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1607.dbc")
AP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1608.dbc")
AP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1609.dbc")
AP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1610.dbc")
AP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1611.dbc")
AP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/AP_APAC_med/AMAP1612.dbc")
BA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1601.dbc")
BA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1602.dbc")
BA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1603.dbc")
BA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1604.dbc")
BA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1605.dbc")
BA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1606.dbc")
BA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1607.dbc")
BA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1608.dbc")
BA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1609.dbc")
BA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1610.dbc")
BA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1611.dbc")
BA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/BA_APAC_med/AMBA1612.dbc")
CE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1601.dbc")
CE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1602.dbc")
CE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1603.dbc")
CE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1604.dbc")
CE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1605.dbc")
CE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1606.dbc")
CE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1607.dbc")
CE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1608.dbc")
CE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1609.dbc")
CE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1610.dbc")
CE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1611.dbc")
CE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/CE_APAC_med/AMCE1612.dbc")
DF_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1601.dbc")
DF_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1602.dbc")
DF_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1603.dbc")
DF_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1604.dbc")
DF_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1605.dbc")
DF_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1606.dbc")
DF_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1607.dbc")
DF_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1608.dbc")
DF_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1609.dbc")
DF_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1610.dbc")
DF_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1611.dbc")
DF_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/DF_APAC_med/AMDF1612.dbc")
ES_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1601.dbc")
ES_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1602.dbc")
ES_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1603.dbc")
ES_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1604.dbc")
ES_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1605.dbc")
ES_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1606.dbc")
ES_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1607.dbc")
ES_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1608.dbc")
ES_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1609.dbc")
ES_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1610.dbc")
ES_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1611.dbc")
ES_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/ES_APAC_med/AMES1612.dbc")
GO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1601.dbc")
GO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1602.dbc")
GO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1603.dbc")
GO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1604.dbc")
GO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1605.dbc")
GO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1606.dbc")
GO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1607.dbc")
GO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1608.dbc")
GO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1609.dbc")
GO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1610.dbc")
GO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1611.dbc")
GO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/GO_APAC_med/AMGO1612.dbc")
MA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1601.dbc")
MA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1602.dbc")
MA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1603.dbc")
MA_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1604.dbc")
MA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1605.dbc")
MA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1606.dbc")
MA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1607.dbc")
MA_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1608.dbc")
MA_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1609.dbc")
MA_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1610.dbc")
MA_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1611.dbc")
MA_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MA_APAC_med/AMMA1612.dbc")
MG_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1601.dbc")
MG_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1602.dbc")
MG_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1603.dbc")
MG_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1604.dbc")
MG_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1605.dbc")
MG_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1606.dbc")
MG_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1607.dbc")
MG_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1608.dbc")
MG_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1609.dbc")
MG_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1610.dbc")
MG_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1611.dbc")
MG_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MG_APAC_med/AMMG1612.dbc")
MS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1601.dbc")
MS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1602.dbc")
MS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1603.dbc")
MS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1604.dbc")
MS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1605.dbc")
MS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1606.dbc")
MS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1607.dbc")
MS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1608.dbc")
MS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1609.dbc")
MS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1610.dbc")
MS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1611.dbc")
MS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MS_APAC_med/AMMS1612.dbc")
MT_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1601.dbc")
MT_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1602.dbc")
MT_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1603.dbc")
MT_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1604.dbc")
MT_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1605.dbc")
MT_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1606.dbc")
MT_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1607.dbc")
MT_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1608.dbc")
MT_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1609.dbc")
MT_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1610.dbc")
MT_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1611.dbc")
MT_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/MT_APAC_med/AMMT1612.dbc")
PA_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1601.dbc")
PA_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1602.dbc")
PA_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1603.dbc")
PA_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1605.dbc")
PA_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1606.dbc")
PA_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1607.dbc")
PA_APAC_med_08 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1608.dbc")
PA_APAC_med_09 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1609.dbc")
PA_APAC_med_10 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1610.dbc")
PA_APAC_med_11 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1611.dbc")
PA_APAC_med_12 <-read.dbc("C:/Users/lemos/Downloads/PA_APAC_med/AMPA1612.dbc")
PB_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1601.dbc")
PB_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1602.dbc")
PB_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1603.dbc")
PB_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1604.dbc")
PB_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1605.dbc")
PB_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1606.dbc")
PB_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1607.dbc")
PB_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1608.dbc")
PB_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1609.dbc")
PB_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1610.dbc")
PB_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1611.dbc")
PB_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PB_APAC_med/AMPB1612.dbc")
PE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1601.dbc")
PE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1602.dbc")
PE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1603.dbc")
PE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1604.dbc")
PE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1605.dbc")
PE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1606.dbc")
PE_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1607.dbc")
PE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1608.dbc")
PE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1609.dbc")
PE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1610.dbc")
PE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1611.dbc")
PE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PE_APAC_med/AMPE1612.dbc")
PI_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1601.dbc")
PI_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1602.dbc")
PI_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1603.dbc")
PI_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1604.dbc")
PI_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1605.dbc")
PI_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1606.dbc")
PI_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1607.dbc")
PI_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1608.dbc")
PI_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1609.dbc")
PI_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1610.dbc")
PI_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1611.dbc")
PI_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PI_APAC_med/AMPI1612.dbc")
###################
######
###################
AC_APAC_med_01$AP_NATJUR <- 1023
AC_APAC_med_02$AP_NATJUR <- 1023
AC_APAC_med_03$AP_NATJUR <- 1023
AC_APAC_med_04$AP_NATJUR <- 1023
AC_APAC_med_05$AP_NATJUR <- 1023
AC_APAC_med_06$AP_NATJUR <- 1023
AC_APAC_med <- do.call("rbind", list(AC_APAC_med_01, AC_APAC_med_02, AC_APAC_med_03, AC_APAC_med_04, AC_APAC_med_05,AC_APAC_med_06, AC_APAC_med_07, AC_APAC_med_08, AC_APAC_med_09, AC_APAC_med_10, AC_APAC_med_11, AC_APAC_med_12))
AC_APAC_med$UF <- "AC"
AL_APAC_med_01$AP_NATJUR <- 1023
AL_APAC_med_02$AP_NATJUR <- 1023
AL_APAC_med_03$AP_NATJUR <- 1023
AL_APAC_med_04$AP_NATJUR <- 1023
AL_APAC_med_05$AP_NATJUR <- 1023
AL_APAC_med_06$AP_NATJUR <- 1023
AL_APAL_med <- do.call("rbind", list(AL_APAC_med_01, AL_APAC_med_02, AL_APAC_med_03, AL_APAC_med_04, AL_APAC_med_05,AL_APAC_med_06, AL_APAC_med_08,AL_APAC_med_09, AL_APAC_med_10, AL_APAC_med_11 , AL_APAC_med_12))
AL_APAL_med$UF <- "AL"
AM_APAC_med_01$AP_NATJUR <- 1023
AM_APAC_med_02$AP_NATJUR <- 1023
AM_APAC_med_03$AP_NATJUR <- 1023
AM_APAC_med_04$AP_NATJUR <- 1023
AM_APAC_med_05$AP_NATJUR <- 1023
AM_APAC_med_06$AP_NATJUR <- 1023
AM_APAL_med <- do.call("rbind", list(AM_APAC_med_01, AM_APAC_med_02, AM_APAC_med_03, AM_APAC_med_04, AM_APAC_med_05,AM_APAC_med_06, AM_APAC_med_07, AM_APAC_med_08, AM_APAC_med_09, AM_APAC_med_10, AM_APAC_med_11, AM_APAC_med_12))
AM_APAL_med$UF <- "AM"
AP_APAC_med_01$AP_NATJUR <- 1023
AP_APAC_med_02$AP_NATJUR <- 1023
AP_APAC_med_03$AP_NATJUR <- 1023
AP_APAC_med_04$AP_NATJUR <- 1023
AP_APAC_med_05$AP_NATJUR <- 1023
AP_APAC_med_06$AP_NATJUR <- 1023
AP_APAL_med <- do.call("rbind", list(AP_APAC_med_01, AP_APAC_med_02, AP_APAC_med_03, AP_APAC_med_04, AP_APAC_med_05,AP_APAC_med_06, AP_APAC_med_07, AP_APAC_med_08, AP_APAC_med_09, AP_APAC_med_10, AP_APAC_med_11, AP_APAC_med_12))
AP_APAL_med$UF <- "AP"
BA_APAC_med_01$AP_NATJUR <- 1023
BA_APAC_med_02$AP_NATJUR <- 1023
BA_APAC_med_03$AP_NATJUR <- 1023
BA_APAC_med_04$AP_NATJUR <- 1023
BA_APAC_med_05$AP_NATJUR <- 1023
BA_APAC_med_06$AP_NATJUR <- 1023
BA_APAL_med <- do.call("rbind", list(BA_APAC_med_01, BA_APAC_med_02, BA_APAC_med_03, BA_APAC_med_04, BA_APAC_med_05,BA_APAC_med_06, BA_APAC_med_07, BA_APAC_med_08,BA_APAC_med_09, BA_APAC_med_10, BA_APAC_med_11, BA_APAC_med_12 ))
BA_APAL_med$UF <- "BA"
CE_APAC_med_01$AP_NATJUR <- 1023
CE_APAC_med_02$AP_NATJUR <- 1023
CE_APAC_med_03$AP_NATJUR <- 1023
CE_APAC_med_04$AP_NATJUR <- 1023
CE_APAC_med_05$AP_NATJUR <- 1023
CE_APAC_med_06$AP_NATJUR <- 1023
CE_APAL_med <- do.call("rbind", list(CE_APAC_med_01, CE_APAC_med_02, CE_APAC_med_03, CE_APAC_med_04, CE_APAC_med_05,CE_APAC_med_06, CE_APAC_med_07, CE_APAC_med_08, CE_APAC_med_09, CE_APAC_med_10, CE_APAC_med_11, CE_APAC_med_12))
CE_APAL_med$UF <- "CE"
DF_APAC_med_01$AP_NATJUR <- 1023
DF_APAC_med_02$AP_NATJUR <- 1023
DF_APAC_med_03$AP_NATJUR <- 1023
DF_APAC_med_04$AP_NATJUR <- 1023
DF_APAC_med_05$AP_NATJUR <- 1023
DF_APAC_med_06$AP_NATJUR <- 1023
DF_APAL_med <- do.call("rbind", list(DF_APAC_med_01, DF_APAC_med_02, DF_APAC_med_03, DF_APAC_med_04, DF_APAC_med_05,DF_APAC_med_06, DF_APAC_med_07, DF_APAC_med_08, DF_APAC_med_09, DF_APAC_med_10, DF_APAC_med_11, DF_APAC_med_12))
DF_APAL_med$UF <- "DF"
ES_APAC_med_01$AP_NATJUR <- 1023
ES_APAC_med_02$AP_NATJUR <- 1023
ES_APAC_med_03$AP_NATJUR <- 1023
ES_APAC_med_04$AP_NATJUR <- 1023
ES_APAC_med_05$AP_NATJUR <- 1023
ES_APAC_med_06$AP_NATJUR <- 1023
ES_APAL_med <- do.call("rbind", list(ES_APAC_med_01, ES_APAC_med_02, ES_APAC_med_03, ES_APAC_med_04, ES_APAC_med_05,ES_APAC_med_06, ES_APAC_med_07, ES_APAC_med_08, ES_APAC_med_09, ES_APAC_med_10, ES_APAC_med_11, ES_APAC_med_12))
ES_APAL_med$UF <- "ES"
GO_APAC_med_01$AP_NATJUR <- 1023
GO_APAC_med_02$AP_NATJUR <- 1023
GO_APAC_med_03$AP_NATJUR <- 1023
GO_APAC_med_04$AP_NATJUR <- 1023
GO_APAC_med_05$AP_NATJUR <- 1023
GO_APAC_med_06$AP_NATJUR <- 1023
GO_APAL_med <- do.call("rbind", list(GO_APAC_med_01, GO_APAC_med_02, GO_APAC_med_03, GO_APAC_med_04, GO_APAC_med_05,GO_APAC_med_06, GO_APAC_med_07, GO_APAC_med_08, GO_APAC_med_09, GO_APAC_med_10, GO_APAC_med_11, GO_APAC_med_12))
GO_APAL_med$UF <- "GO"
MA_APAC_med_01$AP_NATJUR <- 1023
MA_APAC_med_02$AP_NATJUR <- 1023
MA_APAC_med_03$AP_NATJUR <- 1023
MA_APAC_med_04$AP_NATJUR <- 1023
MA_APAC_med_05$AP_NATJUR <- 1023
MA_APAC_med_06$AP_NATJUR <- 1023
MA_APAL_med <- do.call("rbind", list(MA_APAC_med_01, MA_APAC_med_02, MA_APAC_med_03, MA_APAC_med_04, MA_APAC_med_05,MA_APAC_med_06, MA_APAC_med_07, MA_APAC_med_08, MA_APAC_med_09, MA_APAC_med_10, MA_APAC_med_11, MA_APAC_med_12))
MA_APAL_med$UF <- "MA"
MG_APAC_med_01$AP_NATJUR <- 1023
MG_APAC_med_02$AP_NATJUR <- 1023
MG_APAC_med_03$AP_NATJUR <- 1023
MG_APAC_med_04$AP_NATJUR <- 1023
MG_APAC_med_05$AP_NATJUR <- 1023
MG_APAC_med_06$AP_NATJUR <- 1023
MG_APAL_med <- do.call("rbind", list(MG_APAC_med_01, MG_APAC_med_02, MG_APAC_med_03, MG_APAC_med_04, MG_APAC_med_05,MG_APAC_med_06, MG_APAC_med_07, MG_APAC_med_08, MG_APAC_med_09, MG_APAC_med_10, MG_APAC_med_11, MG_APAC_med_12))
MG_APAL_med$UF <- "MG"
MS_APAC_med_01$AP_NATJUR <- 1023
MS_APAC_med_02$AP_NATJUR <- 1023
MS_APAC_med_03$AP_NATJUR <- 1023
MS_APAC_med_04$AP_NATJUR <- 1023
MS_APAC_med_05$AP_NATJUR <- 1023
MS_APAC_med_06$AP_NATJUR <- 1023
MS_APAL_med <- do.call("rbind", list(MS_APAC_med_01, MS_APAC_med_02, MS_APAC_med_03, MS_APAC_med_04, MS_APAC_med_05,MS_APAC_med_06, MS_APAC_med_07, MS_APAC_med_08, MS_APAC_med_09, MS_APAC_med_10, MS_APAC_med_11, MS_APAC_med_12))
MS_APAL_med$UF <- "MS"
MT_APAC_med_01$AP_NATJUR <- 1023
MT_APAC_med_02$AP_NATJUR <- 1023
MT_APAC_med_03$AP_NATJUR <- 1023
MT_APAC_med_04$AP_NATJUR <- 1023
MT_APAC_med_05$AP_NATJUR <- 1023
MT_APAC_med_06$AP_NATJUR <- 1023
MT_APAL_med <- do.call("rbind", list(MT_APAC_med_01, MT_APAC_med_02, MT_APAC_med_03, MT_APAC_med_04, MT_APAC_med_05,MT_APAC_med_06, MT_APAC_med_07, MT_APAC_med_08, MT_APAC_med_09, MT_APAC_med_10, MT_APAC_med_11, MT_APAC_med_12))
MT_APAL_med$UF <- "MT"
PA_APAC_med_01$AP_NATJUR <- 1023
PA_APAC_med_02$AP_NATJUR <- 1023
PA_APAC_med_03$AP_NATJUR <- 1023
PA_APAC_med_04$AP_NATJUR <- 1023
PA_APAC_med_05$AP_NATJUR <- 1023
PA_APAC_med_06$AP_NATJUR <- 1023
PA_APAL_med <- do.call("rbind", list(PA_APAC_med_01, PA_APAC_med_02, PA_APAC_med_03, PA_APAC_med_05,PA_APAC_med_06, PA_APAC_med_07, PA_APAC_med_08, PA_APAC_med_09, PA_APAC_med_10, PA_APAC_med_11, PA_APAC_med_12))
PA_APAL_med$UF <- "PA"
PB_APAC_med_01$AP_NATJUR <- 1023
PB_APAC_med_02$AP_NATJUR <- 1023
PB_APAC_med_03$AP_NATJUR <- 1023
PB_APAC_med_04$AP_NATJUR <- 1023
PB_APAC_med_05$AP_NATJUR <- 1023
PB_APAC_med_06$AP_NATJUR <- 1023
PB_APAL_med <- do.call("rbind", list(PB_APAC_med_01, PB_APAC_med_02, PB_APAC_med_03, PB_APAC_med_04, PB_APAC_med_05,PB_APAC_med_06, PB_APAC_med_07, PB_APAC_med_08, PB_APAC_med_09, PB_APAC_med_10, PB_APAC_med_11, PB_APAC_med_12))
PB_APAL_med$UF <- "PB"
PE_APAC_med_01$AP_NATJUR <- 1023
PE_APAC_med_02$AP_NATJUR <- 1023
PE_APAC_med_03$AP_NATJUR <- 1023
PE_APAC_med_04$AP_NATJUR <- 1023
PE_APAC_med_05$AP_NATJUR <- 1023
PE_APAC_med_06$AP_NATJUR <- 1023
PE_APAL_med <- do.call("rbind", list(PE_APAC_med_01, PE_APAC_med_02, PE_APAC_med_03, PE_APAC_med_04, PE_APAC_med_05,PE_APAC_med_06, PE_APAC_med_07,PE_APAC_med_08, PE_APAC_med_09, PE_APAC_med_10, PE_APAC_med_11, PE_APAC_med_12))
PE_APAL_med$UF <- "PE"
PI_APAC_med_01$AP_NATJUR <- 1023
PI_APAC_med_02$AP_NATJUR <- 1023
PI_APAC_med_03$AP_NATJUR <- 1023
PI_APAC_med_04$AP_NATJUR <- 1023
PI_APAC_med_05$AP_NATJUR <- 1023
PI_APAC_med_06$AP_NATJUR <- 1023
PI_APAL_med <- do.call("rbind", list(PI_APAC_med_01, PI_APAC_med_02, PI_APAC_med_03, PI_APAC_med_04, PI_APAC_med_05,PI_APAC_med_06, PI_APAC_med_07, PI_APAC_med_08, PI_APAC_med_09, PI_APAC_med_10, PI_APAC_med_11, PI_APAC_med_12))
PI_APAL_med$UF <- "PI"
BR_16_01_trat <- do.call("rbind", list(AC_APAC_med, AP_APAL_med, AM_APAL_med, AL_APAL_med, BA_APAL_med, CE_APAL_med, DF_APAL_med, ES_APAL_med, GO_APAL_med, MA_APAL_med, MG_APAL_med, MS_APAL_med, MT_APAL_med, PA_APAL_med, PB_APAL_med, PE_APAL_med, PI_APAL_med))
BR_16_01_trat_CID_hepb <- filter(BR_16_01_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_16_01_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_01_trat_CID_hepb.csv")
###################
######
###################
PR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1601.dbc")
PR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1602.dbc")
PR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1603.dbc")
PR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1604.dbc")
PR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1605.dbc")
PR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1606.dbc")
PR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1607.dbc")
PR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1608.dbc")
PR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1609.dbc")
PR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1610.dbc")
PR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1611.dbc")
PR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/PR_APAC_med/AMPR1612.dbc")
RJ_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1601.dbc")
RJ_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1602.dbc")
RJ_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1603.dbc")
RJ_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1604.dbc")
RJ_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1605.dbc")
RJ_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1606.dbc")
RJ_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1607.dbc")
RJ_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1608.dbc")
RJ_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1609.dbc")
RJ_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1610.dbc")
RJ_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1611.dbc")
RJ_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RJ_APAC_med/AMRJ1612.dbc")
RN_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1601.dbc")
RN_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1602.dbc")
RN_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1603.dbc")
RN_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1604.dbc")
RN_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1605.dbc")
RN_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1606.dbc")
RN_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1607.dbc")
RN_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1608.dbc")
RN_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1609.dbc")
RN_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1610.dbc")
RN_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1611.dbc")
RN_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RN_APAC_med/AMRN1612.dbc")
RO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1601.dbc")
RO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1602.dbc")
RO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1603.dbc")
RO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1604.dbc")
RO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1605.dbc")
RO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1606.dbc")
RO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1607.dbc")
RO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1608.dbc")
RO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1609.dbc")
RO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1610.dbc")
RO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1611.dbc")
RO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RO_APAC_med/AMRO1612.dbc")
RR_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1601.dbc")
RR_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1602.dbc")
RR_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1603.dbc")
RR_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1604.dbc")
RR_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1605.dbc")
RR_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1606.dbc")
RR_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1607.dbc")
RR_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1608.dbc")
RR_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1609.dbc")
RR_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1610.dbc")
RR_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1611.dbc")
RR_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RR_APAC_med/AMRR1612.dbc")
RS_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1601.dbc")
RS_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1602.dbc")
RS_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1603.dbc")
RS_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1604.dbc")
RS_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1605.dbc")
RS_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1606.dbc")
RS_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1607.dbc")
RS_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1608.dbc")
RS_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1609.dbc")
RS_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1610.dbc")
RS_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1611.dbc")
RS_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/RS_APAC_med/AMRS1612.dbc")
SC_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1601.dbc")
SC_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1602.dbc")
SC_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1603.dbc")
SC_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1604.dbc")
SC_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1605.dbc")
SC_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1606.dbc")
SC_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1607.dbc")
SC_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1608.dbc")
SC_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1609.dbc")
SC_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1610.dbc")
SC_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1611.dbc")
SC_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SC_APAC_med/AMSC1612.dbc")
SE_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1601.dbc")
SE_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1602.dbc")
SE_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1603.dbc")
SE_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1604.dbc")
SE_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1605.dbc")
SE_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1606.dbc")
SE_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1608.dbc")
SE_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1609.dbc")
SE_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1610.dbc")
SE_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1611.dbc")
SE_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SE_APAC_med/AMSE1612.dbc")
#####################
##########
#####################
PR_APAC_med_01$AP_NATJUR <- 1023
PR_APAC_med_02$AP_NATJUR <- 1023
PR_APAC_med_03$AP_NATJUR <- 1023
PR_APAC_med_04$AP_NATJUR <- 1023
PR_APAC_med_05$AP_NATJUR <- 1023
PR_APAC_med_06$AP_NATJUR <- 1023
PR_APAL_med <- do.call("rbind", list(PR_APAC_med_01, PR_APAC_med_02, PR_APAC_med_03, PR_APAC_med_04, PR_APAC_med_05,PR_APAC_med_06, PR_APAC_med_07, PR_APAC_med_08, PR_APAC_med_09, PR_APAC_med_10, PR_APAC_med_11, PR_APAC_med_12 ))
PR_APAL_med$UF <- "PR"
RJ_APAC_med_01$AP_NATJUR <- 1023
RJ_APAC_med_02$AP_NATJUR <- 1023
RJ_APAC_med_03$AP_NATJUR <- 1023
RJ_APAC_med_04$AP_NATJUR <- 1023
RJ_APAC_med_05$AP_NATJUR <- 1023
RJ_APAC_med_06$AP_NATJUR <- 1023
RJ_APAL_med <- do.call("rbind", list(RJ_APAC_med_01, RJ_APAC_med_02, RJ_APAC_med_03, RJ_APAC_med_04, RJ_APAC_med_05,RJ_APAC_med_06, RJ_APAC_med_07, RJ_APAC_med_08,RJ_APAC_med_09, RJ_APAC_med_10, RJ_APAC_med_11, RJ_APAC_med_12 ))
RJ_APAL_med$UF <- "RJ"
RN_APAC_med_01$AP_NATJUR <- 1023
RN_APAC_med_02$AP_NATJUR <- 1023
RN_APAC_med_03$AP_NATJUR <- 1023
RN_APAC_med_04$AP_NATJUR <- 1023
RN_APAC_med_06$AP_NATJUR <- 1023
RN_APAL_med <- do.call("rbind", list(RN_APAC_med_01, RN_APAC_med_02, RN_APAC_med_03, RN_APAC_med_04,RN_APAC_med_06, RN_APAC_med_07, RN_APAC_med_08, RN_APAC_med_09, RN_APAC_med_11, RN_APAC_med_12))
RN_APAL_med$UF <- "RN"
RO_APAC_med_01$AP_NATJUR <- 1023
RO_APAC_med_02$AP_NATJUR <- 1023
RO_APAC_med_03$AP_NATJUR <- 1023
RO_APAC_med_04$AP_NATJUR <- 1023
RO_APAC_med_05$AP_NATJUR <- 1023
RO_APAC_med_06$AP_NATJUR <- 1023
RO_APAL_med <- do.call("rbind", list(RO_APAC_med_01, RO_APAC_med_02, RO_APAC_med_04, RO_APAC_med_05,RO_APAC_med_06, RO_APAC_med_07, RO_APAC_med_08,RO_APAC_med_09, RO_APAC_med_10, RO_APAC_med_11, RO_APAC_med_12))
RO_APAL_med$UF <- "RO"
RR_APAC_med_01$AP_NATJUR <- 1023
RR_APAC_med_02$AP_NATJUR <- 1023
RR_APAC_med_03$AP_NATJUR <- 1023
RR_APAC_med_04$AP_NATJUR <- 1023
RR_APAC_med_05$AP_NATJUR <- 1023
RR_APAC_med_06$AP_NATJUR <- 1023
RR_APAL_med <- do.call("rbind", list(RR_APAC_med_01, RR_APAC_med_02, RR_APAC_med_03, RR_APAC_med_04, RR_APAC_med_05,RR_APAC_med_06, RR_APAC_med_07, RR_APAC_med_08,RR_APAC_med_09, RR_APAC_med_10, RR_APAC_med_11, RR_APAC_med_12 ))
RR_APAL_med$UF <- "RR"
RS_APAC_med_01$AP_NATJUR <- 1023
RS_APAC_med_02$AP_NATJUR <- 1023
RS_APAC_med_03$AP_NATJUR <- 1023
RS_APAC_med_04$AP_NATJUR <- 1023
RS_APAC_med_05$AP_NATJUR <- 1023
RS_APAC_med_06$AP_NATJUR <- 1023
RS_APAL_med <- do.call("rbind", list(RS_APAC_med_02, RS_APAC_med_03, RS_APAC_med_04, RS_APAC_med_05,RS_APAC_med_06, RS_APAC_med_07, RS_APAC_med_08, RS_APAC_med_09, RS_APAC_med_10, RS_APAC_med_11, RS_APAC_med_12))
RS_APAL_med$UF <- "RS"
SC_APAC_med_01$AP_NATJUR <- 1023
SC_APAC_med_02$AP_NATJUR <- 1023
SC_APAC_med_03$AP_NATJUR <- 1023
SC_APAC_med_04$AP_NATJUR <- 1023
SC_APAC_med_05$AP_NATJUR <- 1023
SC_APAC_med_06$AP_NATJUR <- 1023
SC_APAL_med <- do.call("rbind", list(SC_APAC_med_01, SC_APAC_med_02, SC_APAC_med_03, SC_APAC_med_04, SC_APAC_med_05,SC_APAC_med_06, SC_APAC_med_07, SC_APAC_med_08,SC_APAC_med_09, SC_APAC_med_10, SC_APAC_med_11, SC_APAC_med_12 ))
SC_APAL_med$UF <- "SC"
SE_APAC_med_01$AP_NATJUR <- 1023
SE_APAC_med_02$AP_NATJUR <- 1023
SE_APAC_med_03$AP_NATJUR <- 1023
SE_APAC_med_04$AP_NATJUR <- 1023
SE_APAC_med_05$AP_NATJUR <- 1023
SE_APAC_med_06$AP_NATJUR <- 1023
SE_APAL_med <- do.call("rbind", list(SE_APAC_med_01, SE_APAC_med_02, SE_APAC_med_03, SE_APAC_med_04, SE_APAC_med_05,SE_APAC_med_06, SE_APAC_med_08, SE_APAC_med_09, SE_APAC_med_10, SE_APAC_med_11, SE_APAC_med_12))
SE_APAL_med$UF <- "SE"
BR_16_02_trat <- do.call("rbind", list(PR_APAL_med, RJ_APAL_med, RN_APAL_med, RO_APAL_med, RR_APAL_med, RS_APAL_med, SC_APAL_med, SE_APAL_med))
BR_16_02_trat_CID_hepb <- filter(BR_16_02_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_16_02_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_02_trat_CID_hepb.csv")
#####################
##########
#####################
SP_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1601.dbc")
SP_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1602.dbc")
SP_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1603.dbc")
SP_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1604.dbc")
SP_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1605.dbc")
SP_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1606.dbc")
SP_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1607.dbc")
SP_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1608.dbc")
SP_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1609.dbc")
SP_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1610.dbc")
SP_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1611.dbc")
SP_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/SP_APAC_med/AMSP1612.dbc")
TO_APAC_med_01 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1601.dbc")
TO_APAC_med_02 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1602.dbc")
TO_APAC_med_03 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1603.dbc")
TO_APAC_med_04 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1604.dbc")
TO_APAC_med_05 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1605.dbc")
TO_APAC_med_06 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1606.dbc")
TO_APAC_med_07 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1607.dbc")
TO_APAC_med_08 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1608.dbc")
TO_APAC_med_09 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1609.dbc")
TO_APAC_med_10 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1610.dbc")
TO_APAC_med_11 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1611.dbc")
TO_APAC_med_12 <- read.dbc("C:/Users/lemos/Downloads/TO_APAC_med/AMTO1612.dbc")
#####################
##########
#####################
SP_APAC_med_01$AP_NATJUR <- 1023
SP_APAC_med_02$AP_NATJUR <- 1023
SP_APAC_med_03$AP_NATJUR <- 1023
SP_APAC_med_04$AP_NATJUR <- 1023
SP_APAC_med_05$AP_NATJUR <- 1023
SP_APAC_med_06$AP_NATJUR <- 1023
SP_APAL_med <- do.call("rbind", list(SP_APAC_med_01, SP_APAC_med_02, SP_APAC_med_03, SP_APAC_med_04, SP_APAC_med_05,SP_APAC_med_06, SP_APAC_med_07, SP_APAC_med_08, SP_APAC_med_09, SP_APAC_med_10, SP_APAC_med_11, SP_APAC_med_12))
SP_APAL_med$UF <- "SP"
TO_APAC_med_01$AP_NATJUR <- 1023
TO_APAC_med_02$AP_NATJUR <- 1023
TO_APAC_med_03$AP_NATJUR <- 1023
TO_APAC_med_04$AP_NATJUR <- 1023
TO_APAC_med_05$AP_NATJUR <- 1023
TO_APAC_med_06$AP_NATJUR <- 1023
TO_APAL_med <- do.call("rbind", list(TO_APAC_med_01, TO_APAC_med_02, TO_APAC_med_03, TO_APAC_med_04, TO_APAC_med_05,TO_APAC_med_06, TO_APAC_med_07, TO_APAC_med_08, TO_APAC_med_09, TO_APAC_med_10, TO_APAC_med_11, TO_APAC_med_12))
TO_APAL_med$UF <- "TO"
BR_16_03_trat <- do.call("rbind", list(SP_APAL_med, TO_APAL_med))
BR_16_03_trat_CID_hepb <- filter(BR_16_03_trat, AP_CIDPRI == "B180" | AP_CIDPRI == "B181" )
write.csv(BR_16_03_trat_CID_hepb, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_03_trat_CID_hepb.csv")
BR_16_01_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_01_trat_CID_hepb.csv")
BR_16_02_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_02_trat_CID_hepb.csv")
BR_16_03_trat_CID_hepb <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_16_03_trat_CID_hepb.csv")
BR_APAC_trat__hepb_2016 <- do.call("rbind", list( BR_16_01_trat_CID_hepb, BR_16_02_trat_CID_hepb, BR_16_03_trat_CID_hepb))
BR_APAC_trat__hepb_2016un <- distinct(BR_APAC_trat__hepb_2016, AP_CNSPCN , .keep_all = TRUE)
write.csv(BR_APAC_trat__hepb_2016, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2016.csv")
write.csv(BR_APAC_trat__hepb_2016un, "C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2016un.csv")
BR_APAC_trat__hepb_2016<- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2016.csv")
BR_APAC_trat__hepb_2016un <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BR_APAC_trat__hepb_2016un.csv")
#####################
##########
#####################
### Plots
## 2016
plot_uf_2016b <- table(BR_APAC_trat__hepb_2016un$UF)
plot_uf_2016b <- as.data.frame(plot_uf_2016b)
plot_uf_2016b$Var1 <- as.character(plot_uf_2016b$Var1)
total_b_2016 <- matrix(c( "Brasil", 29459),ncol=2,byrow=TRUE)
colnames(total_b_2016) <- c("Var1","Freq")
total_b_2016 <- as.data.frame(total_b_2016)
plot_uf_2016b <- do.call("rbind", list(plot_uf_2016b, total_b_2016 ))
plot_uf_2016b$Freq <- as.integer(plot_uf_2016b$Freq)
ggplot(data=plot_uf_2016b, aes(x=reorder(Var1, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
## 2017
plot_uf_2017b <- table(BR_APAC_trat__hepb_2017un$UF)
plot_uf_2017b <- as.data.frame(plot_uf_2017b)
plot_uf_2017b$Var1 <- as.character(plot_uf_2017b$Var1)
total_b_2017 <- matrix(c( "Brasil", 32691),ncol=2,byrow=TRUE)
colnames(total_b_2017) <- c("Var1","Freq")
total_b_2017 <- as.data.frame(total_b_2017)
plot_uf_2017b <- do.call("rbind", list(plot_uf_2017b, total_b_2017 ))
plot_uf_2017b$Freq <- as.integer(plot_uf_2017b$Freq)
ggplot(data=plot_uf_2017b, aes(x=reorder(Var1, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
## 2018
plot_uf_2018b <- table(BR_APAC_trat__hepb_2018un$UF)
plot_uf_2018b <- as.data.frame(plot_uf_2018b)
plot_uf_2018b$Var1 <- as.character(plot_uf_2018b$Var1)
total_b_2018 <- matrix(c( "Brasil", 35356),ncol=2,byrow=TRUE)
colnames(total_b_2018) <- c("Var1","Freq")
total_b_2018 <- as.data.frame(total_b_2018)
plot_uf_2018b <- do.call("rbind", list(plot_uf_2018b, total_b_2018 ))
plot_uf_2018b$Freq <- as.integer(plot_uf_2018b$Freq)
ggplot(data=plot_uf_2018b, aes(x=reorder(Var1, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
## 2019
plot_uf_2019b <- table(BR_APAC_trat__hepb_2019un$UF)
plot_uf_2019b <- as.data.frame(plot_uf_2019b)
plot_uf_2019b$Var1 <- as.character(plot_uf_2019b$Var1)
total_b_2019 <- matrix(c( "Brasil", 37503),ncol=2,byrow=TRUE)
colnames(total_b_2019) <- c("Var1","Freq")
total_b_2019 <- as.data.frame(total_b_2019)
plot_uf_2019b <- do.call("rbind", list(plot_uf_2019b, total_b_2019 ))
plot_uf_2019b$Freq <- as.integer(plot_uf_2019b$Freq)
ggplot(data=plot_uf_2019b, aes(x=reorder(Var1, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
###############################
### Tratamento scatter plot ###
###############################
plot_uf_2016b$ano <- "2016"
plot_uf_2017b$ano <- "2017"
plot_uf_2018b$ano <- "2018"
plot_uf_2019b$ano <- "2019"
sc_trat <- do.call("rbind", list(plot_uf_2016b, plot_uf_2017b, plot_uf_2018b, plot_uf_2019b))
###########################################################
ggplot(data = sc_trat, aes(x =Var1,
y = Freq,
group=ano,
color=ano )) +
geom_line() +
geom_point() +
labs(
y="Frequência de tratamentos",
x="Unidade Federativa"
) + geom_text(aes(label=Freq),hjust=0, vjust=0, check_overlap = TRUE, size = 3) + theme_minimal()
##############################################################
#####################
#### Diagnóstico ####
#####################
# 2019
gal2019 <- read.xlsx("C:/Users/lemos/OneDrive/Documentos/produtoV/Gal2019.xlsx")
gal19 <- read.xlsx("C:/Users/lemos/OneDrive/Documentos/produtoV/gal19.xlsx")
ggplot(data=gal19, aes(x=reorder(UF, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
gal19$ano <- "2019"
# 2018
gal18 <- read.xlsx("C:/Users/lemos/OneDrive/Documentos/produtoV/gal18.xlsx")
ggplot(data=gal18, aes(x=reorder(UF, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
gal18$ano <- "2018"
# 2017
gal17 <- read.xlsx("C:/Users/lemos/OneDrive/Documentos/produtoV/gal17.xlsx")
ggplot(data=gal17, aes(x=reorder(UF, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
gal17$ano <- "2017"
# 2016
gal16 <- read.xlsx("C:/Users/lemos/OneDrive/Documentos/produtoV/gal16.xlsx")
ggplot(data=gal16, aes(x=reorder(UF, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
gal16$ano <- "2016"
sc_diag <- do.call("rbind", list(gal16, gal17, gal18, gal19))
##### Função para destacar um elemento do eixo X ##########
colorado <- function(src, boulder) {
if (!is.factor(src)) src <- factor(src) # make sure it's a factor
src_levels <- levels(src) # retrieve the levels in their order
brave <- boulder %in% src_levels # make sure everything we want to make bold is actually in the factor levels
if (all(brave)) { # if so
b_pos <- purrr::map_int(boulder, ~which(.==src_levels)) # then find out where they are
b_vec <- rep("plain", length(src_levels)) # make'm all plain first
b_vec[b_pos] <- "bold" # make our targets bold
b_vec # return the new vector
} else {
stop("All elements of 'boulder' must be in src")
}
}
#############################################################
############################################################
ggplot(data = sc_diag, aes(x =UF,
y = Freq,
group=ano,
color=ano )) +
geom_line() +
geom_point() +
labs(
y="Frequência de exames",
x="Unidade Federativa"
) + geom_text(aes(label=Freq),hjust=0, vjust=0, check_overlap = TRUE, size = 3) + theme_minimal() +
theme(axis.text.x=element_text(face=colorado(sc_diag$UF, "Brasil")))
##############################################################
#####################
#### Notificação ####
#####################
sinan_notif <- read.csv("C:/Users/lemos/Downloads/notif.csv")
## 2016
ggplot(data=sinan_notif, aes(x=reorder(UF, -n16), y=n16)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=n16), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência") + theme(axis.text.x = element_text(size=11, angle=90))
not_16 <- select(sinan_notif, UF, Freq = n16)
not_16$ano <- "2016"
## 2017
ggplot(data=sinan_notif, aes(x=reorder(UF, -n17), y=n17)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=n17), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência") + theme(axis.text.x = element_text(size=11, angle=90))
not_17 <- select(sinan_notif, UF, Freq = n17)
not_17$ano <- "2017"
## 2018
ggplot(data=sinan_notif, aes(x=reorder(UF, -n18), y=n18)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=n18), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência") + theme(axis.text.x = element_text(size=11, angle=90))
not_18 <- select(sinan_notif, UF, Freq = n18)
not_18$ano <- "2018"
sc_not <- do.call("rbind", list(not_16, not_17, not_18))
############################################################
ggplot(data = sc_not, aes(x =UF,
y = Freq,
group=ano,
color=ano )) +
geom_line() +
geom_point() +
labs(
y="Frequência de notificações",
x="Unidade Federativa"
) + geom_text(aes(label=Freq),hjust=0, vjust=0, check_overlap = TRUE, size = 3) + theme_minimal() + theme(axis.text.x = element_text(size=11, angle=90)) +
theme(axis.text.x=element_text(face=colorado(sc_not$UF, "Brasil")))
##############################################################
######################
### subNotificação ###
#####################
##### Carregando dados - até 2018 - Bancos ######
## AIH
#AIH_2018_bancos <- read.csv("F:/PROJETO_BDBM/PR_PO_FILTRADO_HEP_TYPE_BD/AIH_PR.csv")
AIH_2018_bancos <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/AIH_PR_BDcompleto.csv")
AIH_2018_bancos_hepb <- filter(AIH_2018_bancos, HEPATITE %like% "B")
AIH_2018_bancos_hepb$UF <- substr(AIH_2018_bancos_hepb$MUN_OCOR ,1,2)
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "12"] <- "AC"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "13"] <- "AM"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "27"] <- "AL"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "16"] <- "AP"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "29"] <- "BA"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "23"] <- "CE"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "53"] <- "DF"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "32"] <- "ES"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "52"] <- "GO"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "21"] <- "MA"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "51"] <- "MT"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "50"] <- "MS"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "31"] <- "MG"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "15"] <- "PA"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "25"] <- "PB"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "41"] <- "PR"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "26"] <- "PE"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "22"] <- "PI"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "24"] <- "RN"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "43"] <- "RS"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "33"] <- "RJ"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "11"] <- "RO"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "14"] <- "RR"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "42"] <- "SC"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "35"] <- "SP"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "28"] <- "SE"
AIH_2018_bancos_hepb$UF[AIH_2018_bancos_hepb$UF == "17"] <- "TO"
## APAC
#APAC_2018_bancos <- read.csv("F:/PROJETO_BDBM/PR_PO_FILTRADO_HEP_TYPE_BD/APAC_PR.csv")
APAC_2018_bancos <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/APAC_PR_BDcompleto.csv")
APAC_2018_bancos_hepb <- filter(APAC_2018_bancos, HEPATITE %like% "B")
APAC_2018_bancos_hepb$UF <- substr(APAC_2018_bancos_hepb$MUN_OCOR ,1,2)
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "12"] <- "AC"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "13"] <- "AM"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "27"] <- "AL"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "16"] <- "AP"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "29"] <- "BA"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "23"] <- "CE"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "53"] <- "DF"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "32"] <- "ES"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "52"] <- "GO"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "21"] <- "MA"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "51"] <- "MT"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "50"] <- "MS"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "31"] <- "MG"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "15"] <- "PA"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "25"] <- "PB"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "41"] <- "PR"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "26"] <- "PE"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "22"] <- "PI"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "24"] <- "RN"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "43"] <- "RS"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "33"] <- "RJ"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "11"] <- "RO"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "14"] <- "RR"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "42"] <- "SC"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "35"] <- "SP"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "28"] <- "SE"
APAC_2018_bancos_hepb$UF[APAC_2018_bancos_hepb$UF == "17"] <- "TO"
## BPAI
#BPAI_2018_bancos <- read.csv("F:/PROJETO_BDBM/PR_PO_FILTRADO_HEP_TYPE_BD/BPAI_PR.csv")
BPAI_2018_bancos <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/BPAI_PR_BDcompleto.csv")
BPAI_2018_bancos_hepb <- filter(BPAI_2018_bancos, HEPATITE %like% "B")
BPAI_2018_bancos_hepb$UF <- substr(BPAI_2018_bancos_hepb$MUN_OCOR ,1,2)
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "12"] <- "AC"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "13"] <- "AM"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "27"] <- "AL"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "16"] <- "AP"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "29"] <- "BA"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "23"] <- "CE"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "53"] <- "DF"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "32"] <- "ES"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "52"] <- "GO"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "21"] <- "MA"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "51"] <- "MT"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "50"] <- "MS"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "31"] <- "MG"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "15"] <- "PA"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "25"] <- "PB"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "41"] <- "PR"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "26"] <- "PE"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "22"] <- "PI"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "24"] <- "RN"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "43"] <- "RS"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "33"] <- "RJ"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "11"] <- "RO"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "14"] <- "RR"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "42"] <- "SC"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "35"] <- "SP"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "28"] <- "SE"
BPAI_2018_bancos_hepb$UF[BPAI_2018_bancos_hepb$UF == "17"] <- "TO"
## SIM
#SIM_2018_bancos <- read.csv("F:/PROJETO_BDBM/PR_PO_FILTRADO_HEP_TYPE_BD/SIM_PR.csv")
SIM_2018_bancos <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/SIM_PR_BDcompleto.csv")
SIM_2018_bancos_hepb <- filter(SIM_2018_bancos, HEPATITE %like% "B")
SIM_2018_bancos_hepb$UF <- substr(SIM_2018_bancos_hepb$MUN_OCOR ,1,2)
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "12"] <- "AC"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "13"] <- "AM"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "27"] <- "AL"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "16"] <- "AP"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "29"] <- "BA"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "23"] <- "CE"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "53"] <- "DF"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "32"] <- "ES"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "52"] <- "GO"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "21"] <- "MA"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "51"] <- "MT"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "50"] <- "MS"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "31"] <- "MG"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "15"] <- "PA"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "25"] <- "PB"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "41"] <- "PR"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "26"] <- "PE"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "22"] <- "PI"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "24"] <- "RN"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "43"] <- "RS"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "33"] <- "RJ"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "11"] <- "RO"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "14"] <- "RR"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "42"] <- "SC"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "35"] <- "SP"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "28"] <- "SE"
SIM_2018_bancos_hepb$UF[SIM_2018_bancos_hepb$UF == "17"] <- "TO"
# SINAN
#SINAN_2018_bancos <- read.csv("F:/PROJETO_BDBM/PR_PO_FILTRADO_HEP_TYPE_BD/SINAN.csv")
SINAN_2018_bancos <- read.csv("C:/Users/lemos/OneDrive/Documentos/produtoV/SINAN.csv")
#SINAN_2018_bancos_hepb <- filter(SINAN_2018_bancos, HEPATITE %like% "B")
SINAN_2018_bancos$UF <- substr(SINAN_2018_bancos$MUN_OCOR ,1,2)
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "12"] <- "AC"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "13"] <- "AM"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "27"] <- "AL"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "16"] <- "AP"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "29"] <- "BA"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "23"] <- "CE"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "53"] <- "DF"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "32"] <- "ES"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "52"] <- "GO"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "21"] <- "MA"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "51"] <- "MT"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "50"] <- "MS"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "31"] <- "MG"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "15"] <- "PA"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "25"] <- "PB"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "41"] <- "PR"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "26"] <- "PE"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "22"] <- "PI"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "24"] <- "RN"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "43"] <- "RS"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "33"] <- "RJ"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "11"] <- "RO"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "14"] <- "RR"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "42"] <- "SC"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "35"] <- "SP"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "28"] <- "SE"
SINAN_2018_bancos_hepb$UF[SINAN_2018_bancos_hepb$UF == "17"] <- "TO"
###### União de bancos - subn #######
subn_hepb_2018 <- do.call("rbind", list(AIH_2018_bancos_hepb, APAC_2018_bancos_hepb, BPAI_2018_bancos_hepb, SIM_2018_bancos_hepb))
subn_hepb_2018_un <- distinct(subn_hepb_2018, ID_PACIENTE , .keep_all = TRUE)
####### Intersecção entre bancos (união) e SINAN ########
## Inner join
SINAN_subn_hepb_2018_innerjoin <- inner_join(SINAN_2018_bancos, subn_hepb_2018, by = "ID_PACIENTE")
SINAN_subn_hepb_2018_innerjoin_un <- distinct(SINAN_subn_hepb_2018_innerjoin, ID_PACIENTE , .keep_all = TRUE)
SINAN_subn_hepb_2018_antijoin <- anti_join(subn_hepb_2018, SINAN_2018_bancos, by = "ID_PACIENTE")
SINAN_subn_hepb_2018_antijoin_un <- distinct(SINAN_subn_hepb_2018_antijoin, ID_PACIENTE , .keep_all = TRUE)
## Subnotificação plot UF
plot_uf_2018_subn <- table(SINAN_subn_hepb_2018_antijoin_un$UF)
plot_uf_2018_subn <- as.data.frame(plot_uf_2018_subn)
plot_uf_2018_subn$Var1 <- as.character(plot_uf_2018_subn$Var1)
total_subn_2018 <- matrix(c( "Total", 5473),ncol=2,byrow=TRUE)
colnames(total_subn_2018) <- c("Var1","Freq")
total_subn_2018 <- as.data.frame(total_subn_2018)
plot_uf_2018_subn <- do.call("rbind", list(plot_uf_2018_subn, total_subn_2018 ))
plot_uf_2018_subn$Freq <- as.integer(plot_uf_2018_subn$Freq)
ggplot(data=plot_uf_2018_subn, aes(x=reorder(Var1, -Freq), y=Freq)) +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Freq), vjust=-0.3, size=3.5)+
theme_minimal() + labs(x="UF", y = "Frequência")
|
3ebdf86c26e87edf68a98a9d130b4f0c8381b646
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fBasics/examples/stats-interpKrige.Rd.R
|
7c6bee67b3c40ba3850beeea317b20ade0afa6a9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
stats-interpKrige.Rd.R
|
library(fBasics)
### Name: krigeInterp
### Title: Bivariate Krige Interpolation
### Aliases: krigeInterp
### Keywords: programming
### ** Examples
## No test:
## The akima library is not auto-installed because of a different licence.
## krigeInterp - Kriging:
set.seed(1953)
x = runif(999) - 0.5
y = runif(999) - 0.5
z = cos(2*pi*(x^2+y^2))
ans = krigeInterp(x, y, z, extrap = FALSE)
persp(ans, theta = -40, phi = 30, col = "steelblue",
xlab = "x", ylab = "y", zlab = "z")
contour(ans)
## End(No test)
|
30f07808bf660282ecb4ae97b73e0a6f13b9760f
|
9c546f0daf168a9f3ec674d4d0b479daf9b7fe67
|
/Practica 11/p11/practica11c1.R
|
d77fc90962ff890995c8e39501567dce38bd3fe0
|
[] |
no_license
|
cynthia9305/Tareas
|
45ab2a2c555853b0605fff058cf4646bc78a8d04
|
f17fda52f3fe9221af1a1514b63ef38048666d63
|
refs/heads/master
| 2021-05-04T21:14:18.174290
| 2018-06-12T03:30:33
| 2018-06-12T03:30:33
| 119,905,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 724
|
r
|
practica11c1.R
|
pick.one <- function(x) {
if (length(x) == 1) {
return(x)
} else {
return(sample(x, 1))
}
}
poli <- function(maxdeg, varcount, termcount) {
f <- data.frame(variable=integer(), coef=integer(), degree=integer())
for (t in 1:termcount) {
var <- pick.one(1:varcount)
deg <- pick.one(1:maxdeg)
f <- rbind(f, c(var, runif(1), deg))
}
names(f) <- c("variable", "coef", "degree")
return(f)
}
eval <- function(pol, vars, terms) {
value <- 0.0
for (t in 1:terms) {
term <- pol[t,]
value <- value + term$coef * vars[term$variable]^term$degree
}
return(value)
}
vc <- 4
md <- 3
tc <- 5
f <- poli(md, vc, tc)
print(f)
print(eval(f, runif(vc), tc))
|
9cf60af5805dc0a8b65e31bb786db97377ad7bb6
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoAttrIteratorNext.Rd
|
0413dbded5a178ba98331fac21d591801dad8ee4
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 464
|
rd
|
pangoAttrIteratorNext.Rd
|
\alias{pangoAttrIteratorNext}
\name{pangoAttrIteratorNext}
\title{pangoAttrIteratorNext}
\description{Advance the iterator until the next change of style.}
\usage{pangoAttrIteratorNext(object)}
\arguments{\item{\verb{object}}{[\code{\link{PangoAttrIterator}}] a \code{\link{PangoAttrIterator}}}}
\value{[logical] \code{FALSE} if the iterator is at the end of the list, otherwise \code{TRUE}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
6747a14e7d99d90085b2669e260f77c3aa1da080
|
d54431bde7faa4032d044991668845269188772e
|
/Formação Cientista de Dados/Atividades/Machine Learning/20 - Aprendizado Baseado em Instância.R
|
6b81ed9a2d8d5cf401a35464bd0082fe34a818d5
|
[] |
no_license
|
lucianofbn/Data-Scientist
|
194ea599cb078898d71f5c4e758076345e8e8ff6
|
bf250e2d3a277e68f46077fd455875d2fe99d369
|
refs/heads/master
| 2020-03-30T05:25:25.598165
| 2018-10-13T03:18:30
| 2018-10-13T03:18:30
| 150,797,887
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 427
|
r
|
20 - Aprendizado Baseado em Instância.R
|
# -> Aprendizado baseado em instância (Vizinho mais próximo)
install.packages("class", dependencies = T)
library(class)
head(iris)
dim(iris)
amostra = sample(2,150, replace = T, prob = c(0.7,0.3))
iristreino = iris[amostra == 1,]
classificar = iris[amostra == 2,]
dim(iristreino)
dim(classificar)
previsao = knn(iristreino[,1:4], classificar[,1:4], iristreino[,5], k=3)
table(classificar[,5], previsao)
fix(classificar)
|
861bb2d670584aba23f9dd00264f5957b1264abf
|
170d59a56f0e7a443ce015470585a0504763f7f0
|
/cours2.R
|
1dad822f670cbd521468f5ab6022e36036f10354
|
[] |
no_license
|
albericloop/Rproject
|
8b215ff26b47f345d393dbf035fc1977587b05b7
|
a2a5d27e4cabf5524ef752576310edf0cba22a95
|
refs/heads/master
| 2020-04-04T19:06:45.778372
| 2018-12-13T05:32:37
| 2018-12-13T05:32:37
| 156,192,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,417
|
r
|
cours2.R
|
library(shiny)
library(shinydashboard)
library(dygraphs)
library(xts)
library(tidyverse)
library(lubridate)
library(magrittr)
library(stringr)
library(gdata)
library(expss)
library(rworldmap)
library(readxl)
library(dplyr)
library(schoolmath)
makeTabByUser<-function()
{
users <- unique(datalogs$User)
nbUser <- length(users)
userlist <- data.frame(1:nbUser)
userlist["Name"] <- 0
userlist["BehaviourNumber"] <- 0
userlist["SmokedNumber"] <- 0
userlist["DaysNumber"] <- 0
userlist["savedCigarettes"] <- 0
for(i in 1:length(users)){
userlist$Name[i] = toString(users[i])
data <- subset(datalogs, User == users[i])
smokedBehaviour <- count_if("Behaviour", data$Type)
smokedCheated <- count_if("Cheated", data$Type)
smokedOntime <- count_if("On time", data$Type)
smokedCigarettes <- (smokedCheated+smokedOntime)
datecount <- difftime(as.Date(max(data$Date)) ,as.Date(min(data$Date)) , units = c("days"))
#datecount <- as.Date(max(data$Date)) - as.Date(min(data$Date))
userlist$BehaviourNumber[i] <- smokedBehaviour
userlist$SmokedNumber[i] <- smokedCigarettes
userlist$DaysNumber[i] <- datecount-7
if(datecount > 7){
userlist$savedCigarettes[i] <- (smokedBehaviour/7)*(datecount-7) - (smokedCigarettes)
}else{
userlist$savedCigarettes[i] <- 0
}
}
return(userlist)
}
pickedColors = colors()[c(30,35,40,45,50,12,60)]
newmap <- getMap(resolution = "low")
dataCountries <- data.frame(Country=c('Russia','Cyprus', 'Belize', 'Austria' ,'Virgin Islands',
'Italy','United States' ,'United Kingdom', 'Germany', 'France' ,'Poland' ,'Switzerland'),
Value=c(-0.310,-0.206,-0.300,-0.179,-0.196,-0.174,-0.105,-0.142,-0.082,-0.097,-0.027,0.052))
pdf1 <- joinCountryData2Map(dataCountries, joinCode="NAME", nameJoinColumn="Country")
# get the coordinates for each country
country_coord<-data.frame(coordinates(pdf1),stringsAsFactors=F)
datalogs<-read.csv("datasets/logs.csv", header=TRUE, sep = ";", encoding = "MacRoman")
UnaccentNames <- function(text){
text <- gsub("[\x8e]","e",text)
text <- gsub("[\x91]","e",text)
text <- gsub("[\x8f]","e",text)
text <- str_replace(text,"\x83","E")
text <- str_replace(text,"\x91","a")
text <- str_replace(text,"ƒ","E")
text <- str_replace(text,"Ž","e")
text <- str_replace(text,"Ž","e")
text <- str_replace(text,"‘","e")
text <- str_replace(text,"\u008f","e")
text <- str_replace(text,"é","e")
text <- str_replace(text,"è","e")
text <- str_replace(text,"ë","e")
text <- str_replace(text,"é","e")
text <- str_replace(text,"É","E")
return(text)
}
#replace with correct accents
datalogs$User = UnaccentNames(datalogs$User)
datalogs$Date <- strptime(as.character(datalogs$Time), "%d/%m/%Y")
datalogs$Time <- strptime(as.character(datalogs$Time), "%d/%m/%Y %H:%M")
datalogs$Hour <- hour(datalogs$Time)
datalogs$Day <- weekdays(as.Date(datalogs$Time))
behav <- datalogs
other <- behav[behav$Type == "Behaviour", c("User","Time")]
other <- other[!duplicated(other[,"User"]),]
behav <- merge(x=behav, y=other, by="User", all = TRUE)
behav$nbWeek <- time_length(interval(start = behav$Time.y, end = behav$Time.x), unit = "weeks")
behav$nbWeek <- floor(behav$nbWeek)
behav <- plyr::rename(behav,c("Time.x"="Time"))
behav <- select(behav,"User","Time","nbWeek")
datalogs <- merge(x=datalogs,y=behav, by=c("User","Time"))
datalogsSmoked <- subset(datalogs, Type == "Behaviour" |Type == "On time" | Type == "Cheated")
tabByUser <- makeTabByUser()
cigPrice = 1
varUser<- ""
dataSurvey = read_excel("datasets/surveydataece.xlsx")
dataSurvey$Name <- UnaccentNames(dataSurvey$Name)
NameList = unique(c(unique(dataSurvey$Name),unique(datalogs$User)))
ui <- dashboardPage(
dashboardHeader(),
dashboardSidebar(sidebarMenu(
menuItem("Single user", tabName = "singleUser"),
menuItem("All users", tabName = "allUsers")
)),
dashboardBody(
tabItems(
tabItem(tabName = "allUsers",
#h2("All users: "),
tabBox(
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1", height = "100%", width = "100%",
tabPanel("Information", "",
fluidRow(
box( verbatimTextOutput("totalCigSaved")),
box( verbatimTextOutput("avgCigSaved")),
box( verbatimTextOutput("totalMoneySaved")),
box( verbatimTextOutput("avgMoneySaved"))
)
),
tabPanel("Classic", "",
fluidRow(
box(plotOutput("countByTime")),
box(plotOutput("allUserCigConsumption"))
)
),
tabPanel("Engagement", "",
fluidRow(
box(plotOutput("userEngagement"))
)
)
)
),
tabItem(tabName = "singleUser",
fluidRow(
box(selectInput("varUser",
label = "Choose a user",
choices = unique(NameList),
selected = "Friend")
)),
fluidRow(
tabBox(
title = "single user",
height = "100%", width = "100%",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset2",
tabPanel("information",
fluidRow(
valueBoxOutput("meanConsumedWeekdays"),
valueBoxOutput("meanConsumedWeekenddays"),
valueBoxOutput("singleUserOverallEngagement"),
valueBoxOutput("singleUserTotalCigSavedRender"),
valueBoxOutput("singleUserTotalMoneySavedRender"),
valueBoxOutput("ageCategory"),
valueBoxOutput("age"),
valueBoxOutput("meanConsumed")
)
),
tabPanel("Classic",
h3(varUser),
fluidRow(
box(plotOutput("countBy")),
box(plotOutput("pieType"))
),
fluidRow(
h3("Progression"),
p("The progression is a ratio computed according to intital frequence of smoking (behavior) and giving bonuses to manual skips and maluses to cheat"),
box(selectInput("varProgPeriod",
label = "Choose a period type",
choices = c("weeks","days"),
selected = "weeks"),
plotOutput("prog")
),
box(
h3("Cigarettes consumption in last seven days"),
plotOutput("lastSeven")
)
),
fluidRow(
h3("smoking localization"),
box(plotOutput("userMap"))
)
),
tabPanel("Week",
fluidRow(
box(
h2("weeks comparison"),
plotOutput("comparisonWeeks")
),
box(
selectInput("modes",
label = "Choose a mode",
choices = unique(datalogs$Type),
selected = "On Time"),
plotOutput("modesPlot")
)
)
),
tabPanel("Engagement",
fluidRow(
box(plotOutput("singleUserEngagement"))
)
),
tabPanel("All days",
fluidRow(
box(
h2("Cigarettes consumption over all period"),
plotOutput("daysCigarettesConsumption")
),
box(
selectInput("mode2",
label = "Choose a mode",
choices = unique(datalogs$Type),
selected = "On Time"),
plotOutput("daysCigarettesConsumptionModes")
)
)
)
)
)
)
)
))
server <- function(input, output) {
singleUserTotalCigSaved<-function(){
data <- subset(tabByUser, Name == input$varUser)$savedCigarettes
totalString = toString(as.integer(data))
}
output$allUserCigConsumption <- renderPlot({
cigConsumption <- datalogsSmoked[c("nbWeek","Day","Type")]
cigConsumption <- data.frame(table(cigConsumption))
cigConsumption <- cigConsumption[cigConsumption$Freq!=0,c("nbWeek","Day","Freq")]
cigConsumption$Day <- factor(cigConsumption$Day, levels = c("Monday","Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
cigConsumption <- cigConsumption[order(cigConsumption$Day), ]
ggplot(cigConsumption, aes(x=Day, y=Freq, fill=Day)) + geom_boxplot() + ggtitle("Mean and std of cigarette consumption per weekday ")
})
output$lastSeven <- renderPlot({
sub <- subset(datalogsSmoked, User == input$varUser)
sub <- subset(sub, Date >= tail(sub,1)$Date - as.difftime(7, unit="days"))
sub$Date<-as.POSIXct(sub$Date)
smokedDays <- sub %>%
select(Type, Date) %>%
count(Date)
barplot(smokedDays$n,names.arg = smokedDays$Date)
})
output$daysCigarettesConsumption <- renderPlot({
sub <- subset(datalogsSmoked, User == input$varUser)
sub$Date<-as.POSIXct(sub$Date)
smokedDays <- sub %>%
select(Type, Date) %>%
count(Date)
plot(smokedDays$Date,smokedDays$n, type = "l")
})
output$daysCigarettesConsumptionModes <- renderPlot({
sub <- subset(datalogsSmoked, User == input$varUser)
sub <- subset(sub,Type == input$mode2)
sub$Date<-as.POSIXct(sub$Date)
smokedDays <- sub %>%
select(Type, Date) %>%
count(Date)
plot(smokedDays$Date,smokedDays$n, type = "l")
})
output$modesPlot <- renderPlot({
sub <- subset(datalogsSmoked, User == input$varUser)
sub <- subset(sub, Type == input$modes)
sub$Week <- strftime(sub$Time, format = "%W")
smokedWeeks <- sub %>%
select(Type, Week) %>%
count(Week)
smokedWeeks$n <- smokedWeeks$n / 7
barplot(smokedWeeks$n,names.arg = smokedWeeks$Week)
})
output$comparisonWeeks <- renderPlot({
sub <- subset(datalogsSmoked, User == input$varUser)
sub$Week <- strftime(sub$Time, format = "%W")
smokedWeeks <- sub %>%
select(Type, Week) %>%
count(Week)
smokedWeeks$n <- smokedWeeks$n/7
barplot(smokedWeeks$n,names.arg = smokedWeeks$Week)
})
output$singleUserTotalCigSavedRender <- renderValueBox({
totalString = singleUserTotalCigSaved()
lastString = paste(totalString,"cigarettes saved ")
valueBox(
paste0(lastString),
paste("Cigarettes saved")
)
})
output$singleUserTotalMoneySavedRender <- renderValueBox({
totalString = singleUserTotalCigSaved()
lastString = paste(totalString,"$ saved ")
valueBox(
paste0(lastString),
paste("Money saved")
)
})
output$countByTime <- renderPlot({
cigCompsuption <- datalogsSmoked[c("Day","Type","Hour")]
timeslots <- c(0,2,4,6,8,10,12,14,16,18,20,22,24)
days = c("Monday","Thursday","Wednesday","Tuesday","Friday","Saturday","Sunday")
cigCompsuption$Hour <- cut(as.numeric(cigCompsuption$Hour), breaks = timeslots, right = FALSE)
cigCompsuption <- data.frame(table(cigCompsuption))
cigCompsuption <- aggregate(list(Freq=cigCompsuption$Freq),by = list(Hour=cigCompsuption$Hour,Day=cigCompsuption$Day), sum)
cigCompsuption$Day <- factor(cigCompsuption$Day,labels = days)
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(data = cigCompsuption, title = "test", aes( x = Day, y = Freq , fill=Hour))+geom_bar( stat = 'identity',position = 'dodge')+ scale_fill_manual(values=cbbPalette)+ ggtitle("Cigarettes per weekday per time slots")
})
output$meanConsumedWeekdays <- renderValueBox({
sub1 <- subset(datalogsSmoked, Day %in% c("Monday","Thursday","Wednesday","Tuesday","Friday"))
sub2 <- subset(sub1, User == input$varUser)
nb = nrow(sub2)
if(nb != 0){
nbWeekDays = sum(!weekdays(seq(min(sub2$Date), max(sub2$Date), "days")) %in% c("Saturday", "Sunday"))
avg = nb/nbWeekDays
avg = lapply(avg, round, 2)
}else{
avg = "not enough data"
}
valueBox(
paste0(avg),
paste("Mean of consumed cigarettes in weekdays")
)
})
output$meanConsumedWeekenddays <- renderValueBox({
sub1 <- subset(datalogsSmoked, Day %in% c("Saturday","Sunday"))
sub2 <- subset(sub1, User == input$varUser)
nb = nrow(sub2)
if(nb != 0){
nbWeekendDays = sum(weekdays(seq(min(sub2$Date), max(sub2$Date), "days")) %in% c("Saturday", "Sunday"))
avg = nb/nbWeekendDays
avg = lapply(avg, round, 2)
}else{
avg = "not enough data"
}
valueBox(
paste0(avg),
paste("Mean of consumed cigarettes in week ends days")
)
})
output$meanConsumed <- renderValueBox({
sub2 <- subset(datalogsSmoked, User == input$varUser)
nb = nrow(sub2)
if(nb != 0){
nbDays = length(seq(min(sub2$Date), max(sub2$Date), "days"))
avg = nb/nbDays
avg = lapply(avg, round, 2)
}else{
avg = "not enough data"
}
valueBox(
paste0(avg),
paste("Mean of consumed cigarettes")
)
})
output$countByDay <- renderPlot({
data<-subset(datalogs, Type == input$varType)
fdata <- factor(data$Day,labels = c("Monday","Thursday","Wednesday","Tuesday","Friday","Saturday","Sunday"))
barplot(table(fdata),ylab="number of smoking occurences",main="Hour of the day", col=pickedColors)
})
output$countAllUsers <- renderPlot({
barplot(table(datalogs$Type),ylab="number of smoking occurences",main="occurence of smoking by type of smoking", col=pickedColors)
})
output$countBy <- renderPlot({
barplot(table(subset(datalogs, User == input$varUser)$Type),ylab="number of smoking occurences",main="occurence of smoking by type of smoking", col=pickedColors)
})
output$prog <- renderPlot({
sub <- subset(datalogs, User == input$varUser)
sub <- sub[,c("Type","Time")]
regularWeekCount = as.numeric(table(sub$Type)["Behaviour"])
sub$Type = as.numeric(sub$Type)
sub$Type[sub$Type == 1] = 1
sub$Type[sub$Type == 2] = -1
sub$Type[sub$Type == 3] = -2
sub$Type[sub$Type == 4] = 0
sub$Type[sub$Type == 5] = -1
sub$Type[sub$Type == 6] = 1
sub$Type[sub$Type == 7] = 0
sub$Date <- strftime(sub$Time,format="%d/%m/%Y %H:%M")
sub$Day <- strftime(sub$Time,format="%d/%m/%Y")
# week
sub$Week <- strftime(sub$Time,format="%W")
#progDay
if(input$varProgPeriod == "days"){
progDay <- aggregate(x=sub$Type, by=list(date = sub$Day), FUN=sum)
progDay$x <- 1 - (as.numeric(progDay$x))/(-regularWeekCount/7)
barplot(progDay$x,names.arg = factor(progDay$date))
}
if(input$varProgPeriod == "weeks"){
#progWeek
progWeek <- aggregate(x=sub$Type, by=list(date = sub$Week), FUN=sum)
progWeek$x <- 1 - (as.numeric(progWeek$x))/(-regularWeekCount)
barplot(progWeek$x,names.arg = factor(progWeek$date))
}
})
userAge <- reactive(
if( length(dataSurvey[dataSurvey$Name == input$varUser,"Age"][[1]]) >0){
dataSurvey[dataSurvey$Name == input$varUser,"Age"][[1]]
}else{
"undefined"
}
)
userAgeCategory <- reactive( if (userAge()<=30) "young" else if (userAge()<=50) "adult" else "old" )
output$ageCategory <- renderValueBox({
if(userAge() == "undefined"){
val = "undefined"
}else if (userAge()<=30){
val = "young"
}else if (userAge()<=50){
val = "adult"
}else{
val = "old"
}
valueBox(
paste0(val),
paste("Age category")
)
})
output$age <- renderValueBox({
val = userAge()
valueBox(
paste0(val),
paste("Age")
)
})
output$pieType <- renderPlot({
# Calculate the percentage for each day, rounded to one decimal place
slices_labels <- round(table(subset(datalogs, User == input$varUser)$Type)/sum(table(subset(datalogs, User == input$varUser)$Type)) * 100, 1)
# Concatenate a '%' char after each value
slices_labels <- paste(slices_labels, "%", sep="")
pie(table(subset(datalogs, User == input$varUser)$Type),labels = slices_labels, main="proportion of smoking types",col=pickedColors)
})
output$userMap <- renderPlot({
plot(newmap, xlim = c(35, 36), ylim = c(32, 35), asp = 1)
points(subset(datalogs, User == input$varUser)$Longitude,subset(datalogs, User == input$varUser)$Latitude, col = "red", cex = .6)
text(x=country_coord$X1,y=country_coord$X2,labels=row.names(country_coord))
})
output$userTime <- renderPlot({
plot(daylist$date,daylist$Friend)
})
output$cigConsumption <- renderPlot({
data<-subset(datalogs, Type == input$varType)
fdata <- factor(data$Day,labels = c("Monday","Thursday","Wednesday","Tuesday","Friday","Saturday","Sunday"))
barplot(table(fdata),ylab="number of smoking occurences",main="Hour of the day", col=pickedColors)
users<-unique(datalogs$User)
daylist<-data.frame(date=c(1:200))
daylist["Score"]<-15
daylist["nbUser"]<-0
for(i in 1:length(users)){
data <- subset(datalogs, User == users[i])
newdate <- seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days")
cpt <- 0
for(j in 1:length(newdate)){
subless <- subset(data, Date == newdate[j])
cntless<-count_if("Auto skipped", subless$Type)
cnt<- nrow(subless)
if(j>7){
res <-cntless
cpt <- cpt+1
daylist$Score[cpt] <- daylist$Score[cpt]-res
if(cnt!=0){
daylist$nbUser[cpt] <- daylist$nbUser[cpt]+1
}
}
}
}
plot(x=daylist$date, y=daylist$Score/daylist$nbUser, xlim=c(1,100),ylim=c(-15,0),
col='black', type='l',
main='Engagement following the number of days of testing', xlab='number of days', ylab='engagement')
})
output$userEngagement <- renderPlot({
users<-unique(datalogs$User)
daylist<-data.frame(date=c(1:200))
daylist["Score"]<-15
daylist["nbUser"]<-0
for(i in 1:length(users)){
data <- subset(datalogs, User == users[i])
newdate <- seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days")
cpt <- 0
for(j in 1:length(newdate)){
subless <- subset(data, Date == newdate[j])
cntless<-count_if("Auto skipped", subless$Type)
cnt<- nrow(subless)
if(j>7){
res <-cntless
cpt <- cpt+1
daylist$Score[cpt] <- daylist$Score[cpt]-res
if(cnt!=0){
daylist$nbUser[cpt] <- daylist$nbUser[cpt]+1
}
}
}
}
plot(x=daylist$date, y=daylist$Score/daylist$nbUser, xlim=c(1,100),ylim=c(-15,0),
col='black', type='l',
main='Engagement following the number of days of testing', xlab='number of days', ylab='engagement')
})
OverallEngagement <- function(){
data = subset(datalogs, User == input$varUser)
if(nrow(data) > 0){
datelist <- seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days")
lengthdate = length(seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days"))
if(lengthdate >7){
engagementList<-data.frame(date=c(-6:lengthdate-7))
engagementList["Engagement"]<-0
engagementList["AutoSkip"]<-0
engagementList["Smoked"]<-0
for(j in 1:length(datelist)){
subless <- subset(data, Date == datelist[j])
cntAutoSkip = count_if("Auto skipped", subless$Type)
if(cntAutoSkip != 0){
smoked = count_if("Skipped", subless$Type) + count_if("Snoozed", subless$Type) + count_if("On time", subless$Type)
engagement = 1 - (cntAutoSkip/(cntAutoSkip + smoked))
engagementList$Engagement[j] = engagement
engagementList$AutoSkip[j] = cntAutoSkip
engagementList$Smoked[j] = smoked
}else{
engagementList$Engagement[j] = 0
engagementList$AutoSkip[j] = 0
engagementList$Smoked[j] = 0
}
}
#engagementList
Overall = sum(engagementList$Engagement)/lengthdate
}else{
Overall = -1
}
}else{
Overall = -1
}
}
output$singleUserOverallEngagement <- renderValueBox({
val = OverallEngagement()
if(val != -1){
if(is.decimal(val)){
val = lapply(val, round, 2)
}
}else{
val = "not enough data"
}
valueBox(
paste0(val),
paste("Overall Engagement")
)
})
output$singleUserEngagement <- renderPlot({
data = subset(datalogs, User == input$varUser)
datelist <- seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days")
lengthdate = length(seq(as.Date(min(data$Date)), as.Date(max(data$Date)), by="days"))
if(lengthdate >7){
engagementList<-data.frame(date=c(-6:lengthdate-7))
engagementList["Engagement"]<-0
engagementList["AutoSkip"]<-0
engagementList["Smoked"]<-0
for(j in 1:length(datelist)){
subless <- subset(data, Date == datelist[j])
cntAutoSkip = count_if("Auto skipped", subless$Type)
if(cntAutoSkip != 0){
smoked = count_if("Skipped", subless$Type) + count_if("Snoozed", subless$Type) + count_if("On time", subless$Type)
engagement = 1 - (cntAutoSkip/(cntAutoSkip + smoked))
engagementList$Engagement[j] = engagement
engagementList$AutoSkip[j] = cntAutoSkip
engagementList$Smoked[j] = smoked
}else{
engagementList$Engagement[j] = 0
engagementList$AutoSkip[j] = 0
engagementList$Smoked[j] = 0
}
}
plot(x=engagementList$date, y=engagementList$Engagement, xlim=c(0,lengthdate-7), ylim=c(0,1),
col='black', type='l',
main='Engagement following the number of days of testing', xlab='number of days', ylab='engagement per day')
}else{
plot(x=c(0:1), y=c(0:1), xlim=c(0,lengthdate-7), ylim=c(0,1),
col='black', type='l',
main='Engagement following the number of days of testing (no data)', xlab='number of days', ylab='engagement per day')
}
})
output$totalCigSaved <- renderText({
total = as.integer(sum(tabByUser$savedCigarettes))
totalString = toString(total)
lastString = paste(totalString,"cigarettes saved ")
})
output$avgCigSaved <- renderText({
totalCig = as.integer(sum(tabByUser$savedCigarettes))
totalUsers = nrow(tabByUser)
totalString = toString(as.integer(totalCig/totalUsers))
lastString = paste(totalString,"cigarettes saved per user")
})
output$totalMoneySaved <- renderText({
total = as.integer(sum(tabByUser$savedCigarettes)*cigPrice)
totalString = toString(as.integer(total))
lastString = paste(totalString,"$ saved")
})
output$avgMoneySaved <- renderText({
totalMoney = as.integer(sum(tabByUser$savedCigarettes)*cigPrice)
totalUsers = nrow(tabByUser)
totalString = toString(as.integer(totalMoney/totalUsers))
lastString = paste(totalString,"€ saved per user")
})
}
shinyApp(ui = ui, server = server)
|
2d614fbfcddc38c814cbdb46af70d40ab410af0e
|
014d8f1396c972e73584ef0a8bb150174e7bba25
|
/Zhenyu Xu 289/HASH.R
|
d54d6ef6fa37e6664ceb6207a944f440f25eb465
|
[] |
no_license
|
BDIF/Home-Work-for-BDIF
|
013e7669193964e16509553f8f19da72ea182017
|
2ee37154b65d7302fb8a8df9b964f5ada35d5ae7
|
refs/heads/master
| 2021-05-14T22:23:52.396046
| 2017-11-25T05:27:23
| 2017-11-25T05:27:23
| 107,395,953
| 0
| 43
| null | 2017-11-21T02:11:12
| 2017-10-18T10:59:49
|
R
|
UTF-8
|
R
| false
| false
| 377
|
r
|
HASH.R
|
read.table("clipboard",header=T)
digest('I learn a lot from this class when I am proper listening to the professor','sha256')
#[1] "c16700de5a5c1961e279135f2be7dcf9c187cb6b21ac8032308c715e1ce9964c"
digest('I do not learn a lot from this class when I am absent and playing on my Iphone','sha256')
#[1] "2533d529768409d1c09d50451d9125fdbaa6e5fd4efdeb45c04e3c68bcb3a63e"
|
7d1db78122f07d5977643020f6aafeb088523851
|
eb6972b89e82af6e58b0eefb5cc55310bc8f2a39
|
/devtools.easyRasch/easyRasch/man/print.Rd
|
7b157b9419770c455ae7d4ed97ff3714325c6b03
|
[] |
no_license
|
benjaminschneider212/benjaminschneider.midterm
|
16e5d08478925f001072ac99eb20a21ee96beb6e
|
1e63292cfb6f685c582b1bab3e1359b2385d2843
|
refs/heads/master
| 2021-09-09T14:18:38.434948
| 2018-03-16T23:46:48
| 2018-03-16T23:46:48
| 125,554,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,122
|
rd
|
print.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.R
\name{print}
\alias{print}
\alias{print,}
\alias{ANY-method}
\title{Print function for student name and EAP}
\usage{
print(raschObj, lower = -6, upper = 6)
}
\arguments{
\item{raschObj}{an object of class \code{Rasch}}
\item{lower}{a proposed lower bound of integration. The default is -6}
\item{upper}{a proposed upper bound of integration. The default is 6}
}
\value{
the name of the student and the expected a posteriori value
}
\description{
Lists the name of the student taking the test and finds the value of the expected a posteriori given an input object of class \code{Rasch} and a lower and upper bound.
}
\note{
This function is a slight upgrade of the \code{eap} function in that it also provides the name of the student.
}
\examples{
raschobject<-new("Rasch", name="Benjamin", a=c(1,2,3,4,5), y_i=c(0,1,0,1,0))
upper<-5
lower<-0
print(raschobject, lower, upper)
}
\seealso{
\code{\link{Rasch}}, \code{\link{likelihood}}, \code{\link{prior}}, \code{\link{eap}}, \code{\link{probability}}
}
\author{
Benjamin Schneider
}
|
7a50995a92e6e2b2b330ae5d69df4937f835eab0
|
ce7e91d5f64daaa35ba6fc08a372f23d22dc7408
|
/R/05_visualiseElasticNet.R
|
438ac3957d0626cbf739120f056c2c77181e243e
|
[] |
no_license
|
avila/enetfactorforecastR
|
c43cc774479a4fa56c9a29eac0f5a9408ca848e7
|
e9415c2cd70d3d4616addca46cb6cff1e4181b6f
|
refs/heads/master
| 2023-06-12T19:42:53.074430
| 2021-07-09T14:57:41
| 2021-07-09T14:57:41
| 198,282,276
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,746
|
r
|
05_visualiseElasticNet.R
|
saveplotaspdf <- function(object, type, wid, hei,
file_loc=paste0("./fig/fig_robs_", match.call()$object, "_",
match.call()$type, ".pdf"), ...) {
try(dev.off())
pdf(file = file_loc, width = wid, height = hei)
plot(object, type, ...)
dev.off()
invisible(plot(object, type))
}
wid <- 11
hei <- 7
widhm <- 13
heihm <- 8
## Benchmark -------------------------------------------------------------------
if (run_loops) {
resbench <- doTheLoopEN(odata, 12)
saveRDS(resbench, "./data/aa_resbench.RDS")
} else {
resbench <- readRDS("./data/aa_resbench.RDS")
}
saveplotaspdf(resbench, wid = wid, hei = hei, type = "spaghetti")
saveplotaspdf(resbench,type="heatmap", wid = widhm, hei = heihm, cellw = 5, cellh = 4)
## Benchmark + 18 lags ---------------------------------------------------------
if (run_loops) {
resbench18lags_p6alpha_60fch <- doTheLoopEN(odata, 18)
saveRDS(resbench18lags_p6alpha_60fch, "./data/resbench18lags_p6alpha_60fch.RDS")
} else {
resbench18 <- readRDS("./data/resbench18lags_p6alpha_60fch.RDS")
}
saveplotaspdf(resbench18lags_p6alpha_60fch, wid = wid, hei = hei, type = "spaghetti")
saveplotaspdf(resbench18lags_p6alpha_60fch,type="heatmap", wid = widhm, hei = heihm, cellw = 5, cellh = 4)
## Benchmark + 24 fch ----------------------------------------------------------
if (run_loops) {
resbench_fch_24 <- doTheLoopEN(odata, 12, forecastHorizon=24)
saveRDS(resbench, "./data/resbench_fch_24.RDS")
} else {
resbench <- readRDS("./data/resbench_fch_24.RDS")
}
saveplotaspdf(resbench_fch_24, wid = wid, hei = hei, type = "spaghetti")
saveplotaspdf(resbench_fch_24,type="heatmap", wid = widhm, hei = heihm, cellw = 5, cellh = 4)
|
4e8374149ba2d4f320d79589cb58ab1c9c693713
|
53ce9c6156fba7d5a889ca0cb275e814f2c58781
|
/man/vd_vdrs.Rd
|
1225a3338d2fcee565ec8b2e7e4054bde8ad4816
|
[] |
no_license
|
injuryepi/rvdrs
|
ff9570a10768a5df72ed0350d048c302ffdd8a62
|
e2e61aee5f8e1d06af547c783adecd4acd08342d
|
refs/heads/master
| 2020-05-20T18:22:55.463895
| 2019-05-09T01:40:29
| 2019-05-09T01:40:29
| 185,705,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 233
|
rd
|
vd_vdrs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vd_vdrs.R
\name{vd_vdrs}
\alias{vd_vdrs}
\title{Title}
\usage{
vd_vdrs(data, underly)
}
\arguments{
\item{underly}{}
}
\description{
Title
}
|
676574cdb529067b4bcd378df421599964071538
|
ba368228527588560aa7d737f4e0a60277200eac
|
/2018 long-to-wide-df-recoding and merge.R
|
4bb3190258fe3e0d9e3865a8266e65c58e2896d0
|
[] |
no_license
|
peacewaffle/psychiatry
|
2f3cce8bb6ff735a667b3634b26392ced0e84e30
|
7420a76187318e3726d3c7f4f36d5dd719a721bc
|
refs/heads/master
| 2020-04-23T12:36:07.000613
| 2019-05-26T22:18:18
| 2019-05-26T22:18:18
| 171,174,763
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,859
|
r
|
2018 long-to-wide-df-recoding and merge.R
|
# LAB.csv: separeting this file beased on TESTNAME
labpre <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/LABdispo.csv",header = T, as.is = T)
nam <- names(table(labpre$TESTNAME)) #this extract the unique names of in the column
for (i in 1:length(nam)){
dat <- labpre[which(labpre$TESTNAME==nam[i]),]
dat = reshape(dat, idvar = "CATIEID",timevar = "VISITID",direction = "wide")
write.csv(dat, paste(as.character(nam[i]), "seperated.csv", sep="_"), col.names=TRUE, row.names=FALSE, quote =FALSE, sep="\t")
}
# Reshaping each file
adasdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/ADAS.csv",header = T, as.is = T)
adasdf.reshaped <- reshape(adasdf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
aimsdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/AIMS-ver2.csv",header = T, as.is = T)
aimsdf.reshaped <- reshape(aimsdf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
bprsdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/BPRS.csv",header = T, as.is = T)
bprsdf.reshaped <- reshape(bprsdf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
cornldf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/CORNL.csv",header = T, as.is = T)
cornldf.reshaped <- reshape(cornldf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
demodf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/DEMO-ver2.csv",header = T, as.is = T)
dispdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/DISP-ver2.csv",header = T, as.is = T)
ecgdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/ECG-ver2.csv",header = T, as.is = T)
ecgdf.reshaped <- reshape(ecgdf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
expover2df <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/EXPO-ver2.csv",header = T, as.is = T)
expover2df.reshaped <- reshape(expover2df,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
mmsedf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/MMSE.csv",header = T, as.is = T)
mmsedf.reshaped <- reshape(mmsedf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
neurocogdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/NEUROCOG.csv",header = T, as.is = T)
neurocogdf.reshaped <- reshape(neurocogdf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
survdf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/SURV.csv",header = T, as.is = T)
vitaldf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/VITAL.csv",header = T, as.is = T)
vitaldf.reshaped <- reshape(vitaldf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
npidf <- read.csv("~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/NPI-ver2.csv",header = T, as.is = T)
npidf.reshaped <- reshape(npidf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
# Merging all files
my.merged.df <- merge(adasdf.reshaped, aimsdf.reshaped, by = "CATIEID", all = T)
my.merged2.df <- merge(my.merged.df, bprsdf.reshaped, by = "CATIEID", all = T)
my.merged3.df <- merge(my.merged2.df, cornldf.reshaped, by = "CATIEID", all = T)
my.merged4.df <- merge(my.merged3.df, demodf, by = "CATIEID", all = T)
my.merged5.df <- merge(my.merged4.df, dispdf, by = "CATIEID", all = T)
my.merged6.df <- merge(my.merged5.df, ecgdf.reshaped, by = "CATIEID", all = T)
my.merged7.df <- merge(my.merged6.df, expover2df.reshaped, by = "CATIEID", all = T)
my.merged8.df <- merge(my.merged7.df, mmsedf.reshaped, by = "CATIEID", all = T)
my.merged9.df <- merge(my.merged8.df, neurocogdf.reshaped, by = "CATIEID", all = T)
my.merged10.df <- merge(my.merged9.df, survdf, by = "CATIEID", all = T)
my.merged11.df <- merge(my.merged10.df, vitaldf.reshaped, by = "CATIEID", all = T)
my.merged12.df <- merge(my.merged11.df, npidf.reshaped, by = "CATIEID", all = T)
# Writing the merged file
write.csv(my.merged12.df,file = "~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/Comma Delimited Data_R/allmerged.csv",quote = F)
# examle
str(mydf)
?reshape
## example:
# reshape(dat1, idvar = "name", timevar = "numbers", direction = "wide")
mydf.reshaped <- reshape(mydf,idvar = "CATIEID",timevar = "VISITID",direction = "wide")
head(mydf.reshaped)
my.merged.df <- merge(mydf.reshaped,mydf2,by = "CATIEID",all = T)
head(my.merged.df)
write.csv(my.merged.df,file = "~/Desktop/CATIE-AD post-hoc_RIS_南さん/R_CATIE_AD_RIS/AIMS-wide-withDEMO.csv",quote = F)
|
4d350c1a6446a0ce89dfb51577333d1456a135dd
|
9c58c2abb9c21b9f7ece5a4d21c6da80b7365777
|
/plot4.R
|
8288e1988f681bd5f697fd9b578f84cdceebc5d2
|
[] |
no_license
|
mathewjoy/cds_exdata_p2
|
feb2cde80477dda3f6d25742efc162f001d39457
|
677fd509697dca1f38a57c9e5f872a750ae52f5a
|
refs/heads/master
| 2021-01-10T07:20:13.084847
| 2015-10-25T16:30:17
| 2015-10-25T16:30:17
| 44,916,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
plot4.R
|
library(ggplot2)
#download and unzip if file not already present
archiveDF <- "NEI_data.zip"
if(!file.exists(archiveDF)) {
archiveURL <- "http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url=archiveURL,destfile=archiveDF,method="curl")
}
if(!(file.exists("summarySCC_PM25.rds") &&
file.exists("Source_Classification_Code.rds"))) {
unzip(archiveDF)
}
##plot4
#load data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset data
combustion <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coal <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
coalCombustion <- (combustion & coal)
combustionSCC <- SCC[coalCombustion,]$SCC
combustionNEI <- NEI[NEI$SCC %in% combustionSCC,]
ggp <- ggplot(combustionNEI,aes(factor(year),Emissions/10^5)) +
geom_bar(stat="identity",fill="grey",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Coal Combustion Source Emissions Across US from 1999-2008"))
print(ggp)
dev.copy(png, file="plot4.png", height=480, width=480,units="px",bg="transparent")
dev.off()
|
2ef2b235e08d1dfa0731c54141e7b4f36c84cd3e
|
89ade40b52968d3ca1ac2a3725d53425f18fa203
|
/Introduction To R/Analyze matrices, you shall.r
|
9a564116dae3f25b917a45f7cba05f950098d352
|
[] |
no_license
|
Diganta-droid/Data-Camp-Exercise
|
bdc796abc476d1d7ab201f6911ce56580c335b2b
|
4bfd2e3bb02b382f5876e4010ed04e5e1aa147c7
|
refs/heads/master
| 2022-09-17T14:31:26.619462
| 2020-06-03T07:45:16
| 2020-06-03T07:45:16
| 266,725,467
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
Analyze matrices, you shall.r
|
Analyze matrices, you shall
It is now time to get your hands dirty. In the following exercises you will analyze the box office numbers of the Star Wars franchise. May the force be with you!
In the editor, three vectors are defined. Each one represents the box office numbers from the first three Star Wars movies. The first element of each vector indicates the US box office revenue, the second element refers to the Non-US box office (source: Wikipedia).
In this exercise, you'll combine all these figures into a single vector. Next, you'll build a matrix from this vector.
Instructions
100 XP
Use c(new_hope, empire_strikes, return_jedi) to combine the three vectors into one vector. Call this vector box_office.
Construct a matrix with 3 rows, where each row represents a movie. Use the matrix() function to do this. The first argument is the vector box_office, containing all box office figures. Next, you'll have to specify nrow = 3 and byrow = TRUE. Name the resulting matrix star_wars_matrix.
Code::
# Box office Star Wars (in millions!)
new_hope <- c(460.998, 314.4)
empire_strikes <- c(290.475, 247.900)
return_jedi <- c(309.306, 165.8)
# Create box_office
box_office <- c(new_hope, empire_strikes, return_jedi)
# Construct star_wars_matrix
star_wars_matrix <- matrix(box_office,byrow = TRUE,nrow = 3)
|
a4860fbe564a53f2541d515d264b7d1ae585fbfd
|
11eb3a60f9e86f576f8e281d73892e0ce52c5b7d
|
/DataAdaptor/00_data_adaptor_test.R
|
def6f9f954f2b57992bcd4dc5e9d577a9a76c8e5
|
[] |
no_license
|
wellermatt/exp1.1
|
c34148e7b43cdb25f161b1c0924ce62df54d0223
|
70afbb54b93e65af0052244aafab6da4e4e40482
|
refs/heads/master
| 2016-08-03T13:20:44.360020
| 2014-02-26T12:19:56
| 2014-02-26T12:19:56
| 16,102,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
00_data_adaptor_test.R
|
source("config.R")
source("./DataAdaptor/10_load_data_various.R")
setwd(pth.dropbox.data) ; f_load.calendar()
par.category = "beer" ; par.periodicity = "445"
f_da.reg.cat.all = function(par.category, par.periodicity, par.item="00-01-18200-53030", bo.save =FALSE) {
# get an input dataset for regression for a whole category
# optionally split it for a single item
# optionally save it to a subset file as a test regression file
setwd(pth.dropbox.data)
fil = paste("./regression datasets/", par.category, ".regression.data.", par.periodicity, ".rds", sep= "")
sp = readRDS(fil)
if (!is.null(par.item) & bo.save == TRUE) {
sp = sp[UPC==par.item]
fil = paste("./regression datasets/", par.category, ".test.regression.data.", par.periodicity, ".rds", sep= "")
sp[,fc.item:=factor(fc.item)]
saveRDS(sp,fil)
}
sp
}
f_da.reg.cat.test = function(par.category, par.periodicity) {
setwd(pth.dropbox.data)
fil = paste("./regression datasets/", par.category, ".test.regression.data.", par.periodicity, ".rds", sep= "")
sp = readRDS(fil)
sp
}
|
11aefdedf27df05f8a4550d8ef6c6c29a4aae480
|
d8b1e4a1b74cc4275f4c2963804f4f42e2352385
|
/man/getDataValuesDownload.Rd
|
21609d3148a535c3574ea54c3e933459d51cb69f
|
[
"MIT"
] |
permissive
|
SRHilz/GliomaAtlas3D
|
fc080b9adf9750b7f7806bc1cb5de29f4d9aef60
|
4b9b85497a0bffb415fafb5a6cb4aee88136f6d7
|
refs/heads/master
| 2021-05-19T16:36:46.225506
| 2020-11-19T19:09:12
| 2020-11-19T19:09:12
| 252,030,374
| 4
| 3
|
NOASSERTION
| 2020-05-12T17:43:27
| 2020-04-01T00:23:40
|
R
|
UTF-8
|
R
| false
| true
| 1,225
|
rd
|
getDataValuesDownload.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDataValuesDownload.R
\name{getDataValuesDownload}
\alias{getDataValuesDownload}
\title{Retrieve the values for a particular patients dataset}
\usage{
getDataValuesDownload(
patientsFinal,
sfConversion,
datasetD,
typeD,
rowSelectionD,
thresholdD,
conversion,
tumorDatasetsPath,
sampleData
)
}
\arguments{
\item{patientsFinal}{Patient ID in long form (i.e. Patient300 not P300)}
\item{sfConversion}{Coverts patientID to sf# in a one-to-one relationship}
\item{datasetD}{name of dataset (i.e. Histology, Amplification, Copy Number, etc)}
\item{typeD}{only required if dataset is Histology}
\item{rowSelectionD}{only required if dataset is Copy Number, RNAseq, Cell Types, Cancer-associated Processes, or Expansions}
\item{thresholdD}{only required if dataset is Amplification}
\item{conversion}{converts dataset to name of data file, where value is dataset and name is file name (ex c(cn.rds='Copy Number', ))}
\item{tumorDatasetsPath}{path to datasets}
\item{sampleData}{contains spatial information about each sample}
}
\description{
Creates a vector of final data values for a particular patient, tumor, and dataset
}
|
07a62beb7fe57caebfa2cd559ba27aaf8d067469
|
1db5084d33ce23cfc7031509e5e9266b0d8ae07c
|
/vignettes/cell_segmentation/step1_streak_removal/removing_streaks.R
|
185dd25625086ab1af7f1431735e4dc04a39d4f3
|
[] |
no_license
|
Kaestner-Lab/T2D_IMC
|
6140fcf1d9ee0fd6aa5544253fb9171d77ebc478
|
3802926dd85a1f1cbbb91aec8bd616161311b211
|
refs/heads/main
| 2023-04-10T06:58:46.108000
| 2021-11-05T17:25:13
| 2021-11-05T17:25:13
| 375,136,919
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,281
|
r
|
removing_streaks.R
|
# Removing streaks or hot pixels in IMC images
# Using the first two noise/background channel for points detection
# Then for rest of the channels, check if these points are greater than the mean of local neighborhood(5x5), if yes, remove.
# To run the script, please see the end of the file.
# `tiff` the image data.
# n=5 is the size of the mask. The value has to be odd so that there is a
# center row to drop. The flashes we have seen are in the horizontal
# orientation so we drop a row (not column.)
# `apply_to` sets an intensity threshold. Pixels over this threshold are
# considered to be in a flash. If the value is negative, it is treated as a
# 'negative' quantile and is used to set the threshold value from the tiff data.
# For example, the default value of -0.1 means that the threshold will be set at
# the 90% quantile of pixel brightness in the image.
# `min_rel_change` this is the fold change needed to trigger replacement. The
# purpose is to allow `apply_to` to be set liberally, but to only modify pixels
# that are 'clearly' flashes.
# ---
# free_mask_x and free_mask_y are relative column and row values from define the
# mask relative to the center pixel. For example for n=5 the rows will have
# values in -2, -1, 1, and 2 (once the center row is dropped) and columns will
# be -2,-1,0,1, and 2.
# to speed things up we define a list of the flash pixel coordinates, oi. We do
# not need to apply the filter everywhere so this speeds up the calculation
# by skipping useless work.
# as we iterate through the flash pixel coordinates we 'bind' the mask to the
# pixel's location and store in bound_mask.
# using the bound mask we calculate the local mean.
# Do we update the pixel? If it was zero we do (this is an extra non-intuitive
# goal) If the mean is 'much' less than the pixel, then we replace it in place.
# once we have iterated over the pixels, we return the modified tiff.
tiff.deflash_pt <- function ( tiff, n=5, apply_to=-0.02, min_rel_change=2.0 ) {
if ( n %% 2 == 0) n <- n + 1
half_n <- (n-1)/2
offset_i <- 1:n - half_n - 1
free_mask_x <- rep( offset_i, n-1 )
free_mask_y <- rep( offset_i, each=n )
free_mask_y <- free_mask_y[ free_mask_y != 0 ]
# free_mask <- cbind( free_mask_y, free_mask_x ) # backwards to match row,col indexing
inv_min_rel_change <- 1/min_rel_change
rv_tiff <- tiff
# convert quantile cutoff to absolute
if (apply_to < 0) {
apply_to <- as.numeric(quantile(as.numeric(tiff), 1+apply_to))
}
if(apply_to==0){
apply_to<-1
}
# get a data.frame of the pixel coordinates for the flash pixels.
oi <- (data.frame( which( tiff >= apply_to, arr.ind=TRUE ))
%>% dplyr::filter( row >= half_n, col >= half_n )
%>% dplyr::filter( row <= nrow(tiff) - half_n, col <= ncol(tiff) - half_n))
oi_n <- nrow(oi)
# work through the flash pixels.
print(paste0("going through ",oi_n))
n_changed <- 0
# points to be changed
oi_tc <- c()
if(oi_n>=1){
for (i in 1:oi_n) {
#print(i)
bound_mask <- cbind( free_mask_y + oi$row[i], free_mask_x + oi$col[i] )
old_value <- tiff[oi$row[i],oi$col[i]]
new_value <- mean( tiff[bound_mask] ) + 1
if ( old_value == 0 ) {
update_px <- TRUE
} else if ( new_value/old_value < inv_min_rel_change ) {
update_px <- TRUE
n_changed<-n_changed+1
oi_tc<-c(oi_tc,i)
} else {
update_px <- FALSE
}
#if (update_px) rv_tiff[ oi$row[i], oi$col[i] ] <- new_value
}
print(length(oi_tc))
return(oi[oi_tc,])
}
}
write_multi_tiff<-function(data,filename,bits_per_sample,width,height){
write_tif(result_tiff_obj[[1]][,,1,],paste0(output_directory,i),
bits_per_sample = 16,overwrite = TRUE)
tiff(filename = paste0(output_directory,i),
width = round(width/1000*0.4,digits = 1), height = round(height/1000*0.4,digits = 1), units = "inch", pointsize = 12,
compression = "none", bit)
}
tiff_deflash_multistack<- function(tiff, n=5, apply_to=-0.02, min_rel_change=5.0 ){
# first two noise channels, the minimal change requirement is much less, only 2 fold-change required
points_1 <- tiff.deflash_pt(tiff[,,,1],n=n, apply_to=apply_to, min_rel_change=2.0 )
points_2 <- tiff.deflash_pt(tiff[,,,2],n=n, apply_to=apply_to, min_rel_change=2.0 )
oi_all <- intersect(points_1,points_2)
print(paste0("Number of common signals detected in channel 1 and 2: ", nrow(oi_all)))
if ( n %% 2 == 0) n <- n + 1
half_n <- (n-1)/2
offset_i <- 1:n - half_n - 1
free_mask_x <- rep( offset_i, n-1 )
free_mask_y <- rep( offset_i, each=n )
free_mask_y <- free_mask_y[ free_mask_y != 0 ]
inv_min_rel_change <- 1/min_rel_change
oi_n <- nrow(oi_all)
# work through the flash pixels.
all_changed <- 0
res_tiff <- tiff
if(oi_n>=1){
print(paste0("removing streaks: ", oi_n, " points"))
for(j in 1:dim(tiff)[4]){
n_changed<-0
tiff_i<-tiff[,,,j]
rv_tiff <- tiff_i
for (i in 1:oi_n) {
#print(i)
bound_mask <- cbind( free_mask_y + oi_all$row[i], free_mask_x + oi_all$col[i] )
old_value <- tiff_i[oi_all$row[i],oi_all$col[i]]
new_value <- mean( tiff_i[bound_mask] )
if ( old_value == 0 ) {
update_px <- TRUE
} else if ( new_value/old_value < inv_min_rel_change ) {
update_px <- TRUE
n_changed <- n_changed+1
all_changed <- all_changed+1
} else {
update_px <- FALSE
}
if (update_px) rv_tiff[ oi_all$row[i], oi_all$col[i] ] <- new_value
}
res_tiff[,,,j]<-round(rv_tiff,digits = 0)
print(paste0("In channel ", j, ": ",n_changed," points changed."))
}
print(paste0("All channels: ",all_changed," points changed."))
}
return(list(res_tiff,(all_changed>=5)))
}
deflash_folder_2<- function(input_directory,output_directory,n=5, apply_to=-0.02, min_rel_change=5.0){
# copy the whole directory into the output folder
file.copy(input_directory,output_directory ,recursive = T,overwrite = T)
# get all tiff files that need to be processed
files <- list.files(output_directory,recursive = T,pattern = ".tiff")
log<-c()
for(i in files){
print(paste0("processing ", i))
flash_tiff <- read_tif(paste0(output_directory,i))
result_tiff_obj <- tiff_deflash_multistack(flash_tiff, n ,apply_to, min_rel_change)
write_tif(result_tiff_obj[[1]][,,1,],paste0(output_directory,i),
bits_per_sample = 16,overwrite = TRUE)
tiff(filename = "Rplot%03d.tiff",
width = 480, height = 480, units = "px", pointsize = 12,
compression = c("none", "rle", "lzw", "jpeg", "zip", "lzw+p", "zip+p"),
bg = "white", res = NA, ...,
type = c("cairo", "Xlib", "quartz"), antialias)
if(result_tiff_obj[[2]]){
log<-c(log, i)
}
}
return(log)
}
#==== Run here=====
# input folder should contain a list of .tiff files
input_directory<-"~/Desktop/NPOD6259_Body/"
# output folder location, will mimic the folder structure from input folder
output_directory<-"~/Desktop/tmp/"
log <- deflash_folder_2(input_directory,output_directory)
# print the name of the images that have been changed
print(paste0("Images with streaks: ",log))
|
2eb7b016b9b84cb0b9cd13eef6e51c43b417b491
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/man/get_mkl_threads.Rd
|
9c515cbe8642358551d9a715156d8dd776c16236
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,356
|
rd
|
get_mkl_threads.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_mkl_threads.R
\name{get_mkl_threads}
\alias{get_mkl_threads}
\title{Finds number of threads set for MKL operations}
\usage{
get_mkl_threads()
}
\value{
number of threads assigned to MKL
}
\description{
\code{get_mkl_threads()} Uses environmental variable "MKL_NUM_THREADS" used
in LBD Singualirty images to determine how many threads have been assigned
for MKL operations.
}
\details{
The OpenMP function \code{omp_get_num_threads()} will report how many threads
have been set for OpenMP operations. Unfortunately, there is no
"mkl_get_num_threads()" function in the MKL library, so we have to rely on
our MKL_NUM_THREADS environmental variable to find out how many threads have
been assigned for MKL operations. Fortunately, we can guarantee that
MKL_NUM_THREADS has been set in LBD Singularity containers spun up by either
"shell_sing.sh" or "singR.sh".
}
\seealso{
This function is used by:
\code{\link{get_total_threads()}}
Other Mutlti-threading Functions: \code{\link{get_max_forked_threads}},
\code{\link{get_omp_threads}},
\code{\link{get_total_threads}},
\code{\link{setmkldynamic}}, \code{\link{setmklthreads}},
\code{\link{setompnested}}, \code{\link{setompthreads}}
}
\author{
Ian M. Davis, \email{imdavis@uw.edu}
}
\concept{Mutlti-threading Functions}
|
14c05bd280a51f97610a029d378030d3fc11938c
|
58f5e50edf2b3b1bd79644196e6323e2b16c9c7c
|
/plot1.R
|
52ca3fc069a328c841970b064980ae5d656402fb
|
[] |
no_license
|
Hipporz/ExData_Plotting1
|
42581705412618bf8f10aed0a2bb97854717569d
|
ce73e020cf332c269aeaa9e55967a9a5b3f6867d
|
refs/heads/master
| 2021-01-16T19:19:39.627220
| 2014-07-13T04:49:18
| 2014-07-13T04:49:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 570
|
r
|
plot1.R
|
setwd("/Users/eric/Downloads")
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE)
data[,1]<-as.Date(strptime((data[,1]),format="%d/%m/%Y"))
Feb01<-data[data[,1]==as.Date("2007-02-01"),]
Feb02<-data[data[,1]==as.Date("2007-02-02"),]
both<-rbind(Feb01,Feb02)
for (i in 3:9) {
both[,i]<-as.numeric(as.character(both[,i]))
}
##plot1
par(mfrow=c(1,1))
hist(as.numeric(both[,3]),
col="Red",xlab="Global Active Power (kilowatts)",
main="Global Active Power",ylim=c(0,1200),xlim=c(0,6))
dev.copy(png,'plot1.png',width=480,height=480)
dev.off()
|
9d49a3426520fd667ff1fa59c0cbfb193781b5bb
|
5ce1cd2be24a5d1cc7d7640f84d3b3a937a7540f
|
/inst/repository/analyze_suma_output.R
|
1a669ad65a6b5407de16453e228d06a9d6c37fc0
|
[] |
no_license
|
xylimeng/rave
|
ec738c2c01763055dbabea267418903a4153cf5e
|
a0b89b41c2b07548c867f2ba69a4b8dcfc1fb197
|
refs/heads/master
| 2021-04-26T22:48:24.293626
| 2018-03-05T17:19:52
| 2018-03-05T17:19:52
| 124,144,616
| 1
| 0
| null | 2018-03-06T22:07:19
| 2018-03-06T22:07:19
| null |
UTF-8
|
R
| false
| false
| 603
|
r
|
analyze_suma_output.R
|
twoConditions <- read.delim('RAFE_temp_2017-06-22_15_57_03.1D', header=FALSE)
keep_ind <- seq(1, 240*42, by=42)
keep_ind %>% head(10)
twoConditions <- twoConditions[keep_ind,]
singleCondition <- read.delim('RAFE_temp_2017-06-22_16_01_49.1D', header=FALSE)
singleCondition <- singleCondition[keep_ind,]
ocps <- singleCondition %>% {2 * (1 - pt(., 189))}
is_sig <- which(p.adjust(ocps, method='fdr') < 0.05)
length(is_sig)
tcps <- twoConditions[is_sig] %>% {2 * (1 - pt(., 189/2))}
is_sig2 <- which(p.adjust(tcps, method='fdr') < 0.05)
length(is_sig2)
twoConditions[is_sig[is_sig2]] %>% length
|
50e303832d5f205240596fa5b62986e32284134f
|
1ba68c4d75f0f38973225ef68eada0846de9e7ed
|
/analysis/11_0_DTW_functions.R
|
069ba047575d7c4bbe8fbf0306fb4ae09353f9f3
|
[
"MIT"
] |
permissive
|
opensafely/hdruk-os-covid-paeds
|
76f31b1878f8769972981e52884962ee01477162
|
19e52d891545a4cf1d4db7bea8788e0e89862d98
|
refs/heads/main
| 2023-08-23T10:42:25.593590
| 2023-05-03T11:35:41
| 2023-05-03T11:35:41
| 404,328,097
| 2
| 0
|
MIT
| 2022-05-06T11:42:29
| 2021-09-08T11:51:44
|
Python
|
UTF-8
|
R
| false
| false
| 1,505
|
r
|
11_0_DTW_functions.R
|
# Studying the Long-term Impact of COVID-19 in Kids (SLICK)
#
# 11_DTW_functions.R
# Centre for Medical Informatics, Usher Institute, University of Edinburgh 2022
# School of Informatics, University of Edinburgh 2022
# Written by: Karthik Mohan, James Farrell
#
# This script contains functions to be used in 11_DTW_tsclust.R
# Function to separate time series for each individual ---
get_indv_resource_traj = function(gdf, id){
dates = decimal_date(gdf$date)
start_date = dates[1]
encode_cond = match(gdf$service, service_list)
time_series = ts(encode_cond, start=start_date)
attr(time_series, "name") <- id
time_series
}
# Function to register custom distance function with proxy ----
regist_dist = function() {
cat("[INFO] Registering the customized distance function with proxy...")
dist_overlap = function(x, y) {
dist = 1
if (x == y){dist = 0}
return(dist)
}
if (!pr_DB$entry_exists("overlap")){
pr_DB$set_entry(FUN = dist_overlap, names=c("overlap"), loop = TRUE, type = "metric",
distance = TRUE, description = "The overlap distance counts number of matches")
}
noverlap = function(ts1, ts2, ...) {
dtw(ts1, ts2, dist.method = "overlap", distance.only = TRUE, ...)$normalizedDistance
}
if (!pr_DB$entry_exists("nOverlap")){
pr_DB$set_entry(FUN = noverlap, names=c("nOverlap"), loop = TRUE, type = "metric",
distance = TRUE, description = "The normalized global overlap distance")
}
cat("Done\n")
}
|
b656564520b6e238a614ddf01f9bef76262f31cd
|
4521a29538bc44232a9e99120acb1dd88f6b8497
|
/Codes/useless/shiny_server.R
|
75ddbe221f62972fc10c32a167e851f101f15159
|
[] |
no_license
|
RachelAnqi/Sales_training_programme
|
6772879042830b635e86f8d071a841afefd838fc
|
d980bb8f0456fdc7fa251c0c2e04681cff2b59ba
|
refs/heads/master
| 2021-01-19T15:03:39.791198
| 2017-11-09T10:54:08
| 2017-11-09T10:54:08
| 100,939,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,568
|
r
|
shiny_server.R
|
server = function(input, output) {
##phase1
output$p1_total_promotional_budget <- renderText(
total_promotional_budget$phase1
)
tmp <- eventReactive(input$decision1_phase1_calculator,{
phase1_promotional_budget=0
phase1_total_time_arrangement1 <- 0
phase1_total_time_arrangement2 <- 0
phase1_total_time_arrangement3 <- 0
phase1_total_time_arrangement4 <- 0
phase1_total_time_arrangement5 <- 0
for(i in 1:10){
phase1_promotional_budget <-
sum(c(phase1_promotional_budget,
as.numeric(input[[paste("p1_promotional_budget_hosp",i,sep="")]])),
na.rm = TRUE)
tmp <- sum(c(as.numeric(input[[paste("p1_hosp",i,"_worktime_1",sep="")]]),
as.numeric(input[[paste("p1_hosp",i,"_worktime_2",sep="")]]),
as.numeric(input[[paste("p1_hosp",i,"_worktime_3",sep="")]]),
as.numeric(input[[paste("p1_hosp",i,"_worktime_4",sep="")]])),
na.rm = TRUE)
if (input[[paste("p1_sr_hosp",i,sep = "")]]==
available_srs[1]){
phase1_total_time_arrangement1 <-
phase1_total_time_arrangement1 +tmp
} else if (input[[paste("p1_sr_hosp",i,sep = "")]]==
available_srs[2]) {
phase1_total_time_arrangement2 <-
phase1_total_time_arrangement2 +tmp
} else if (input[[paste("p1_sr_hosp",i,sep = "")]]==
available_srs[3]) {
phase1_total_time_arrangement3 <-
phase1_total_time_arrangement3 +tmp
} else if (input[[paste("p1_sr_hosp",i,sep = "")]]==
available_srs[4]) {
phase1_total_time_arrangement4 <-
phase1_total_time_arrangement4 +tmp
} else if (input[[paste("p1_sr_hosp",i,sep = "")]]==
available_srs[5]) {
phase1_total_time_arrangement5 <-
phase1_total_time_arrangement5 +tmp
}
}
data <- c(phase1_promotional_budget,
phase1_total_time_arrangement1,
phase1_total_time_arrangement2,
phase1_total_time_arrangement3,
phase1_total_time_arrangement4,
phase1_total_time_arrangement5)
data
})
output$p1_arranged_promotional_budget <- renderText(tmp()[1])
output$p1_arranged_time_of_sr1 <- renderText(tmp()[2])
output$p1_arranged_time_of_sr2 <- renderText(tmp()[3])
output$p1_arranged_time_of_sr3 <- renderText(tmp()[4])
output$p1_arranged_time_of_sr4 <- renderText(tmp()[5])
output$p1_arranged_time_of_sr5 <- renderText(tmp()[6])
observeEvent(input$decision1_phase1_submit, {
disable("p1_discount_hosp1_1")
disable("p1_discount_hosp1_2")
output$p1_decison1_summary_hosp1 <- renderText({12})
output$p1_decison1_summary_hosp2 <- renderText({12})
})
data1_phase1 <- eventReactive(input$decision1_phase1_submit,{
get.data1(input,1)
})
data2_phase1 <- eventReactive(input$decision2_phase1_submit,{
get.data2(input,1)
})
output$report1_table <- renderDataTable(data1_phase1())
output$report2_table <- renderDataTable(data2_phase1())
sales_training <- reactive({sum(c(
as.numeric(input$p1_sr1_sales_training),
as.numeric(input$p1_sr2_sales_training),
as.numeric(input$p1_sr3_sales_training),
as.numeric(input$p1_sr4_sales_training),
as.numeric(input$p1_sr5_sales_training),
na.rm = T))})
field_work <- reactive({sum(c(
as.numeric(input$p1_sr1_field_work),
as.numeric(input$p1_sr2_field_work),
as.numeric(input$p1_sr3_field_work),
as.numeric(input$p1_sr4_field_work),
as.numeric(input$p1_sr5_field_work),
na.rm = T
))})
output$p1_total_sales_training <-renderText(sales_training())
output$p1_flm_sales_training <- renderText(sales_training())
output$p1_total_field_work <-renderText(field_work())
output$p1_flm_field_work <- renderText(field_work())
output$p1_total_team_meeting <- renderText(input$p1_flm_team_meeting)
output$p1_total_kpi_analysis <- renderText(input$p1_flm_kpi_analysis)
output$p1_total_strategy_planning <- renderText(input$p1_flm_strategy_planning)
output$p1_total_admin_work <- renderText(input$p1_flm_admin_work)
output$p1_total_management <- renderText(sum(c(
sales_training(),
field_work(),
as.numeric(input$p1_flm_team_meeting),
as.numeric(input$p1_flm_kpi_analysis),
as.numeric(input$p1_flm_strategy_planning),
as.numeric(input$p1_flm_admin_work),
na.rm = T
)))
output$report3_table <- renderDataTable({
data1_phase1
data2_phase1
data1 <- data1_phase1()
data2 <- data2_phase1()
tmp <- left_join(data1,data2,by=c("phase","sales_rep")) %>%
group_by(phase,sales_rep) %>%
mutate(no.hospitals = n_distinct(hospital)) %>%
ungroup %>%
mutate(experience_index_pp = curve(curve11,acc_revenue_0),
sales_target_realization = sales_target/real_sales,
contact_priority_fit_index = sum(c(time_on_doc*0.5,
time_on_diet*0.25,
time_on_admin*0.15,
time_on_nurs*0.1),
na.rm=T),
field_work_peraccount = field_work/no.hospitals,
product_knowledge_addition_current_period = curve(curve26,product_training),
product_knowledge_transfer_value = curve(curve28,product_knowledge_0),
ss_accumulated_field_work_delta = curve(curve42,field_work),
ss_accumulated_sales_training_delta = curve(curve43,sales_training),
ss_experience_index_pp = curve(curve44,experience_index_pp),
m_meeting_with_team_delta = {if (sales_level == "junior") {
curve(curve13,meetings_with_team)
} else if(sales_level=="middle"){
curve(curve14,meetings_with_team)
} else {curve(curve15,meetings_with_team)}},
m_sales_target_realization_delta = curve(curve16,sales_target_realization),
m_sales_training_delta = curve(curve17,sales_training),
m_admin_work_delta = curve(curve18,admin_work)) %>%
mutate(sales_skill_index = sum(c(
ss_accumulated_field_work_delta*((weightage$sales_skills)$field_work),
ss_accumulated_sales_training_delta*((weightage$sales_skills)$sales_training),
ss_experience_index_pp*((weightage$sales_skills)$experience)),na.rm=T),
product_knowledge_index = sum(c(
product_knowledge_addition_current_period,
product_knowledge_transfer_value),na.rm=T),
motivation_index = sum(c(
(motivation_0+m_admin_work_delta)*
((weightage$motivation)$admin_work),
(motivation_0+m_sales_target_realization_delta)*
((weightage$motivation)$sales_target_realization),
(motivation_0+m_meeting_with_team_delta)*
((weightage$motivation)$meetings_with_team),
(motivation_0+m_sales_training_delta)*
((weightage$motivation)$sales_training)),
na.rm=T)) %>%
mutate(srsp_motivation_factor = curve(curve32,motivation_0),
srsp_sales_skills_factor = curve(curve34,sales_skill_index),
srsp_product_knowledge_factor = curve(curve33,product_knowledge_index),
srsp_time_with_account_factor = ({if (product=="product1"){
curve(curve35,sr_time)} else if(
product=="product2"){
curve(curve36,sr_time)} else if (
product=="product3") {
curve(curve37,sr_time)} else {
curve(curve38,sr_time)}
})) %>%
mutate(sr_sales_performance = sum(c(
srsp_motivation_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$motivation),
srsp_sales_skills_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$sales_skills),
srsp_product_knowledge_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$product_knowledge),
srsp_time_with_account_factor*pp_sr_sales_performance*
((weightage$sr_sales_performance)$time_with_account)),
na.rm=T)) %>%
mutate(dq_admin_work_delta = curve(curve5,admin_work),
dq_priority_fit_delta = curve(curve6,contact_priority_fit_index),
dq_meetings_with_team_delta =curve(curve7,meetings_with_team),
dq_kpi_analysis_factor = curve(curve8,kpi_analysis),
dq_strategy_planning_delta = curve(curve9,strategy_and_cycle_planning))%>%
mutate(deployment_quality_index = sum(c(
(pp_deployment_quality+dq_admin_work_delta)*
((weightage$deployment_quality)$admin_work),
(pp_deployment_quality+dq_priority_fit_delta)*
((weightage$deployment_quality)$priority_fit),
(pp_deployment_quality+dq_meetings_with_team_delta)*
((weightage$deployment_quality)$meetings_with_team),
pp_deployment_quality*dq_kpi_analysis_factor*
((weightage$deployment_quality)$kpi_report_analysis),
(pp_deployment_quality+dq_strategy_planning_delta)*
((weightage$deployment_quality)$strategy_and_cycle_planning)),
na.rm=T)) %>%
mutate(ps_strategy_planning_factor = curve(curve29,strategy_and_cycle_planning),
ps_promotional_budget_factor = curve(curve30,promotional_budget)) %>%
mutate(promotional_support_index = sum(c(
pp_promotional_support*ps_strategy_planning_factor*
((weightage$promotional_support)$strategy_and_cycle_planning),
pp_promotional_support*ps_promotional_budget_factor*
((weightage$promotional_support)$promotional_budget)),
na.rm=T)) %>%
mutate(sp_field_work_delta = curve(curve40,field_work_peraccount),
sp_deployment_quality_factor = curve(curve41,deployment_quality_index)) %>%
mutate(sales_performance = sum(c(
sr_sales_performance*((weightage$sales_performance)$sr_sales_performance),
(pp_sales_performance+sp_field_work_delta)*
((weightage$sales_performance)$field_work),
(pp_sales_performance*sp_deployment_quality_factor)*
((weightage$sales_performance)$deployment_quality)),
na.rm=T)) %>%
mutate(#cr_market_share_delta = curve(curve1,market_share_peraccount),
cr_product_knowledge_delta = curve(curve2,product_knowledge_index-product_knowledge_0),
cr_promotional_support_delta = curve(curve3,promotional_support_index/pp_promotional_support),
cr_pp_customer_relationship_index = curve(curve4,pp_customer_relationship))%>%
mutate(customer_relationship_index =
sum(c((cr_pp_customer_relationship_index+cr_product_knowledge_delta)*
(weightage$customer_relaitonship)$product_knowledge,
(cr_pp_customer_relationship_index+cr_promotional_support_delta)*
(weightage$customer_relaitonship)$promotional_support,
cr_pp_customer_relationship_index*
(weightage$customer_relaitonship)$past_relationship),
na.rm=T)) %>%
mutate(oa_customer_relationship_factor = ({if (product=="product1"){
curve(curve19,sr_time)} else if(
product=="product2"){
curve(curve20,sr_time)} else if (
product=="product3") {
curve(curve21,sr_time)} else {
curve(curve22,sr_time)}}),
oa_sales_performance_factor = curve(curve25,sales_performance)) %>%
mutate(cp_offer_attractiveness = sum(c(
pp_offer_attractiveness*oa_customer_relationship_factor*
(weightage$cp_offer_attractiveness)$customer_relationship,
pp_offer_attractiveness*oa_sales_performance_factor*
(weightage$cp_offer_attractiveness)$sales_performance
))) %>%
mutate(offer_attractiveness = sum(c(
cp_offer_attractiveness*(weightage$total_attractiveness)$cp_offer_attractiveness,
pp_offer_attractiveness*(weightage$total_attractiveness)$pp_offer_attractiveness
))) %>%
select(sales_skill_index,
product_knowledge_index,
motivation_index,
sr_sales_performance,
deployment_quality_index,
promotional_support_index,
sales_performance,
customer_relationship_index,
offer_attractiveness)
})
}
|
f656932015c0704806b039c87e53d7e13ee40aef
|
7c7dd80f0efd3079d2fa40d2012abec34edefbd1
|
/man/fill-methods.Rd
|
447f4827f50e63b1bfabffb3a9f276f91deba374
|
[] |
no_license
|
bbuchsbaum/neuroim
|
afebc5c959541bf36a4a2a2ac9a076b875cc0c45
|
a0c2c5db6e717eeacf7ad3cb9be65a48a34d1f93
|
refs/heads/master
| 2021-06-13T08:42:37.597547
| 2021-03-17T19:51:53
| 2021-03-17T19:51:53
| 13,177,894
| 5
| 1
| null | 2016-10-18T12:52:10
| 2013-09-28T17:43:12
|
R
|
UTF-8
|
R
| false
| true
| 1,258
|
rd
|
fill-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGeneric.R, R/BrainVolume.R
\docType{methods}
\name{fill}
\alias{fill}
\alias{fill,BrainVolume,list-method}
\alias{fill,BrainVolume,matrix-method}
\title{Generic function to map values from one set to another using a user-supplied lookup table}
\usage{
fill(x, lookup)
\S4method{fill}{BrainVolume,list}(x, lookup)
\S4method{fill}{BrainVolume,matrix}(x, lookup)
}
\arguments{
\item{x}{the object to map values from}
\item{lookup}{the lookup table. The first column is the "key" the second column is the "value".}
}
\value{
a new object where the original values have been filled in with the values in the lookup table
}
\description{
Generic function to map values from one set to another using a user-supplied lookup table
}
\examples{
x <- BrainSpace(c(10,10,10), c(1,1,1))
vol <- BrainVolume(sample(1:10, 10*10*10, replace=TRUE), x)
## lookup table is list
lookup <- lapply(1:10, function(i) i*10)
ovol <- fill(vol, lookup)
## lookup table is matrix. First column is key, second column is value
names(lookup) <- 1:length(lookup)
lookup.mat <- cbind(as.numeric(names(lookup)), unlist(lookup))
ovol2 <- fill(vol, lookup.mat)
all.equal(as.vector(ovol2), as.vector(ovol))
}
|
fa508285fcb10e65f3a4ce661051a1230a0f308e
|
ab1accb32c0b170d6fc57ae59564466a83cc6aec
|
/_not_used/cell_lines/Vias-Brenton-Organoids_CellLines.R
|
438853bc05c40a91f16fb98fb3e2f754f6db20fa
|
[
"MIT"
] |
permissive
|
lm687/Organoids_Compositional_Analysis
|
140345e036638687ecab39f43d77ebe858350097
|
6dd6895402b48ee6e9b58bfbad966d874c47433a
|
refs/heads/master
| 2023-05-10T04:59:57.789432
| 2023-05-05T17:47:37
| 2023-05-05T17:47:37
| 265,240,528
| 0
| 0
| null | 2023-05-05T16:14:10
| 2020-05-19T12:25:28
|
HTML
|
UTF-8
|
R
| false
| false
| 13,158
|
r
|
Vias-Brenton-Organoids_CellLines.R
|
#<begin_omit>```{r}
rm(list = ls())
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
Sys.setenv(LANG='en')
#<end_omit>```{r}
#<begin_chunk>```{r, libraries,message=FALSE, cache=TRUE}
library(grid)
library(gridExtra)
library(dendextend)
library(ggrepel)
library(cowplot)
library(compositions)
library(CompSign)
source("../../../CDA_in_Cancer/code/functions/meretricious/pretty_plots/prettySignatures.R")
prevdata = 'Redefinition'
prevdata = 'NatGen'
#<end_chunk>
#<begin_text>
## The background data that we are using is `r prevdata`.
#<end_text>
#<begin_chunk>```{r,fig.height=4.5,messages=FALSE, echo=FALSE,warning=FALSE, cache=TRUE}
#org <- as(read.csv("data/CIOV_cell_lines_signature_exposures.csv", row.names = 1), 'matrix')
org <- as(read.csv("data/CIOV_cell_lines_signature_exposures_p7.csv", row.names = 1), 'matrix')
createBarplot(as(org, 'matrix'), remove_labels = FALSE, verbatim = FALSE, angle_rotation_axis = 45, order_labels = names(sort(org[,1]))) +
ggtitle('Exposures for the organoids')
#<end_chunk>
#<begin_chunk>```{r, natgen_data,include=FALSE, cache=TRUE}
natgen <- list()
natgen_metadata <- list()
## prevdata == 'NatGen'
load("../../../CDA_in_Cancer/data/Robj/image_NatGen_rmd.RData")
natgen0 <- as.matrix(sig_data_unorm[,1:7])
natgen_metadata[[1]] <- sig_data_unorm[,8:ncol(sig_data_unorm)]
## Geoff
## Normalisation is not done in such a way that rows add up to 1. Re-normalising
natgen[[1]] <- sweep(natgen0, 1, rowSums(natgen0), '/')
## last exposures from Ruben
id_previous_samples <- 1
natgen_barplt1 <- createBarplot(natgen[[id_previous_samples]], remove_labels = TRUE, verbatim = FALSE,
order_labels = rownames(natgen[[id_previous_samples]])[(order(natgen[[id_previous_samples]][,1]))]) +
ggtitle('Original')
natgen_barplt2 <- createBarplot(natgen[[id_previous_samples]], remove_labels = TRUE, verbatim = FALSE,
order_labels = rownames(natgen[[id_previous_samples]])[(order(natgen[[id_previous_samples]][,1]))]) +
ggtitle('Re-normalised')
#grid.arrange(natgen_barplt1, natgen_barplt2)
# natgen_barplt_perstudy <- list()
# for(i in 1:length(unique(natgen_metadata$study))){
# natgen_barplt_perstudy[[i]] <- createBarplot(natgen[natgen_metadata$study == unique(natgen_metadata$study)[i],],
# remove_labels = TRUE, verbatim = FALSE)+
# ggtitle(paste0('Re-normalised\n', unique(natgen_metadata$study)[i] ))
# }
# plot_grid(plotlist=natgen_barplt_perstudy)
## }else if(prevdata == 'Redefinition'){
natgen[[2]] <- readRDS("data/Export-matrix_OV_Sigs_on_TCGA-OV_12112019.rds")
natgen_metadata[[2]] <- data.frame(study=rep('Previous', nrow(natgen[[2]])), stringsAsFactors = FALSE)
#<end_chunk>
#<begin_chunk>```{r, include=FALSE, message=FALSE, cache=TRUE}
org_barplot <- createBarplot(org, remove_labels = FALSE, verbatim = FALSE, angle_rotation_axis = 45, order_labels = names(sort(org[,1]))) +
ggtitle('Exposures for the organoids')
no1_natgen1 <- createBarplot(natgen[[1]], remove_labels = TRUE, verbatim = FALSE,
order_labels = rownames(natgen[[1]])[(order(natgen[[1]][,1]))]) +
ggtitle('Original')
no1_natgen2 <- createBarplot(natgen[[2]], remove_labels = TRUE, verbatim = FALSE,
order_labels = rownames(natgen[[2]])[(order(natgen[[2]][,1]))]) +
ggtitle('Original')
grid.arrange(org_barplot, no1_natgen1, no1_natgen2)
#<end_chunk>
#<begin_text>
##' ## PCA
##' ### PCA in compositional data
##'
##' In the book Analysing compositional data with R they say that PCA should be done on clr-transformed data.
##' Here I am using robust zeroes: for zero exposures, the centered log-ratios are set to zero (as opposed to -Inf).
##' The plot done with (biplot(princomp(acomp(x)))) is the same as plotting princomp(as(clr(x), 'matrix'))
#<end_text>
#<begin_chunk>```{r, clr_funs,include=FALSE,eval=TRUE, cache=TRUE}
clr_vec <- function(x){
log(x) - mean(log(x))
}
clr_mat <- function(X){
.res <- t(apply(X, 1, clr_vec))
stopifnot(dim(.res) == dim(X))
.res
}
#<end_chunk>
#<begin_chunk>```{r, clr,include=FALSE,eval=TRUE, cache=TRUE}
## there were no zeroes
natgen_clr <- list()
for(i in 1:2){
cat('Zeroes:',sum(natgen[[i]] == 0),'\n')
# natgen_clr[[i]] <- clr_mat(natgen[[i]])
natgen_clr[[i]] <- as(compositions::clr(natgen[[i]]), 'matrix')
}
org_clr <- clr_mat(org)
org_clr_robustzeroes <- as(compositions::clr(org), 'matrix')
rownames(org_clr_robustzeroes) <- rownames(org_clr) <- paste0('Cell line ', rownames(org_clr))
#<end_chunk>
#<begin_chunk>```{r, cols,include=FALSE, cache=TRUE}
n <- 60
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unique(unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals))))
#<end_chunk>
#<begin_text>
##' #### Projecting
#<end_text>
#<begin_chunk>```{r, princomp,echo=FALSE, cache=TRUE}
createPCA_projectorganoids <- function(input_matrix, annotation, annotation2, return_df=FALSE, labels_active=TRUE){
bool_organoids <- grepl('Cell line', rownames(input_matrix))
prcomp_all <- princomp(input_matrix[!bool_organoids,])
df_prcomp_exposures <- data.frame(prcomp_all$scores[,1:2], study=annotation[!bool_organoids], labels=NA)
df_prcomp_exposures_org <- data.frame(predict(prcomp_all, (input_matrix[bool_organoids,]))[,1:2], 'Organoid', rownames(input_matrix[bool_organoids,]))
colnames(df_prcomp_exposures_org) <- c('Comp.1', 'Comp.2', 'study', 'labels')
df_prcomp_exposures <- rbind(df_prcomp_exposures, df_prcomp_exposures_org)
df_prcomp_exposures$labels <- gsub('Sample ', '', df_prcomp_exposures$labels) ##here
myColors <- col_vector[1:length(unique(df_prcomp_exposures$study))]
names(myColors) <- unique(df_prcomp_exposures$study)
#ggthemr('flat dark')
#ggthemr_reset()
# set_swatch(myColors)
var_explained1 <- (prcomp_all$sdev**2)/sum(prcomp_all$sdev**2)
if(return_df){
return(prcomp_all)
}else{
ggplot(df_prcomp_exposures, aes(x=Comp.1, y=Comp.2, col=study))+
geom_point() +
geom_label_repel(aes(label=labels))+
ggtitle("PCA of both datasets with projection")+
labs(x=paste0('PC1 (', round(var_explained1[1], 2)*100, '%)'),
y=paste0('PC2 (', round(var_explained1[2], 2)*100, '%)'))+
theme_dark()+ theme(legend.position = "bottom")
}
}
#<end_chunk>
#<begin_chunk>```{r, pca_from_scratch,include=TRUE, echo=FALSE, cache=TRUE}
createPCA_fromscratch <- function(input_matrix, annotation, annotation2, return_df=FALSE, labels_active=TRUE){
prcomp_all_clr <- princomp(input_matrix)
df_prcomp_exposures_clr <- data.frame(prcomp_all_clr$scores[,1:2],
study=annotation,
bool_any_zeroes=annotation2,
labels=rownames(input_matrix))
df_prcomp_exposures_clr$labels[!grepl('Cell line', df_prcomp_exposures_clr$labels)] <- NA
df_prcomp_exposures_clr[,'labels'] <- gsub("Cell line ", "", df_prcomp_exposures_clr$labels)
var_explained2 <- (prcomp_all_clr$sdev**2)/sum(prcomp_all_clr$sdev**2)
if(return_df){
prcomp_all_clr
}else{
if(labels_active){
ggplot(df_prcomp_exposures_clr, aes(x=Comp.1, y=Comp.2, col=interaction(bool_any_zeroes, study), label=labels))+ geom_point() + geom_label_repel()+
labs(x=paste0('PC1 (', round(var_explained2[1], 2)*100, '%)'),
y=paste0('PC2 (', round(var_explained2[2], 2)*100, '%)'))
}else{
ggplot(df_prcomp_exposures_clr, aes(x=Comp.1, y=Comp.2, col=interaction(bool_any_zeroes, study)))+ geom_point() +
labs(x=paste0('PC1 (', round(var_explained2[1], 2)*100, '%)'),
y=paste0('PC2 (', round(var_explained2[2], 2)*100, '%)'))
}
}
}
for(i in 1:2){
print(createPCA_projectorganoids(input_matrix = rbind(natgen_clr[[i]],org_clr_robustzeroes),
annotation = c(natgen_metadata[[i]]$study, rep('Cell Line', nrow(org_clr))),
annotation2 = c(rep(FALSE, dim(natgen_metadata[[i]])[1]),
unlist(apply(org, 1, function(i) any(i == 0)))),
labels_active = TRUE)+ theme_dark()+ theme(legend.position = "bottom") +
ggtitle(paste0('PCA of both datasets with projection with robust zeroes, datatset=', i)))
print(createPCA_fromscratch(input_matrix = rbind(natgen_clr[[i]],org_clr_robustzeroes),
annotation = c(natgen_metadata[[i]]$study, rep('Cell Line', nrow(org_clr))),
annotation2 = c(rep(FALSE, dim(natgen_metadata[[i]])[1]),
unlist(apply(org, 1, function(i) any(i == 0)))),
labels_active = TRUE)+ theme_dark()+ theme(legend.position = "bottom") +
ggtitle(paste0('PCA created from scratch with robust zeroes, dataset=', i)))
}
#<end_chunk>
#<begin_text>
##' Overall, there are two groups (JB126 and 2259). All JB126 are **extremely** similar except that p22 has a non-zero exposure for 25, whereas all others have a zero exposure. Then, for 2259 there are two groups: p3 and p20 (which have a zero exposure of S6 and a non-zero exposure of S5) and p7 and p13 (opposite scenario; non-zero for S6 and zero for S5).
#<end_text>
#<begin_text>
#' ### Loadings for the PCAs
#<end_text>
#<begin_chunk>```{r, loadings, fig.height=4, echo=FALSE, cache=TRUE}
pcas_with_projection <- list()
pcas_from_scratch <- list()
for(i in 1:2){
pcas_with_projection[[i]] <- createPCA_projectorganoids(input_matrix = rbind(natgen_clr[[i]],org_clr_robustzeroes),
annotation = c(natgen_metadata[[i]]$study, rep('Cell Line', nrow(org_clr))),
annotation2 = c(rep(FALSE, dim(natgen_metadata[[i]])[1]),
unlist(apply(org, 1, function(i) any(i == 0)))),
labels_active = TRUE, return_df = TRUE)
pcas_from_scratch[[i]] <- createPCA_fromscratch(input_matrix = rbind(natgen_clr[[i]],org_clr_robustzeroes),
annotation = c(natgen_metadata[[i]]$study, rep('Cell Line', nrow(org_clr))),
annotation2 = c(rep(FALSE, dim(natgen_metadata[[i]])[1]),
unlist(apply(org, 1, function(i) any(i == 0)))),
labels_active = TRUE, return_df = TRUE)
}
par(mfrow=c(1,2))
for(i in 1:2){
barplot(pcas_with_projection[[i]]$loadings[,1], main='Loadings of the\nfirst principal component')
barplot(pcas_with_projection[[i]]$loadings[,2], main='Loadings of the\nfirst principal component')
barplot(pcas_from_scratch[[i]]$loadings[,1], main='Loadings of the\nsecond principal component')
barplot(pcas_from_scratch[[i]]$loadings[,2], main='Loadings of the\nsecond principal component')
}
#<end_chunk>
#<begin_chunk>```{r, dendrogram_aitchisondistance,echo=FALSE, cache=TRUE}
par(mfrow=c(1,2))
pdf("results/dendrogram.pdf")
names_prev_datasets <- c('NatGen dataset', 'New OV exposures for SNP TCGA')
for(idx in 1){#1:2){
organoid_metadata <- cbind.data.frame(study=rep('organoids', nrow(org_clr_robustzeroes)), age=NA, age.cat=NA, stringsAsFactors=FALSE)
rownames(organoid_metadata) <- rownames(org_clr_robustzeroes)
if(idx==1){
all_metadata <- rbind(cbind(natgen_metadata[[idx]]$study), cbind(study=organoid_metadata$study))
}else{
all_metadata <- rbind(natgen_metadata[[idx]], cbind(study=organoid_metadata$study))
}
all_clr <- rbind(natgen_clr[[idx]], org_clr_robustzeroes)
rownames(all_metadata) <- rownames(all_clr)
rm_infinite <- apply(all_clr, 1, function(x) any(is.infinite(x)))
cat(which(rm_infinite), 'removed due to infinite values')
all_clr_clean <- all_clr[!rm_infinite,]
which(rm_infinite)
dendro_all <- as.dendrogram(hclust(dist(all_clr_clean)))
levels_study <- levels(factor(all_metadata[labels(dendro_all),'study']))
levels_study
which_level_organoids <- which(grepl('organoids', levels_study))
cols <- rep(NA, length(levels_study))
cols[which_level_organoids] <- 'blue' #'#88E9A2'
cols[-which_level_organoids] <- c('#FFA07A', '#FA8072', '#E9967A', '#F08080')
labels_colors(dendro_all) <- cols[factor(all_metadata[labels(dendro_all),'study'])]
labels_org_bool <- labels_colors(dendro_all) == 'blue' #'#88E9A2'
# labels(dendro_all)[labels_org_bool] <- rep('●', sum(labels_org_bool))
labels(dendro_all)[!labels_org_bool] <- rep('•', sum(!labels_org_bool))
labels(dendro_all)[!labels_org_bool] <- rep(NA, sum(!labels_org_bool))
labels(dendro_all) <- gsub('Cell line ', '', labels(dendro_all))
cex_labels <- rep(1, length(labels_org_bool))
cex_labels[labels_org_bool] <- 0.9
dendro_all <- set(dendro_all, "labels_cex", cex_labels)
plot(dendro_all, cex=0.2, cex.main=1, main=paste0('Dendrogram based on the exposures\n(Aitchison distance)\n', names_prev_datasets[idx]))
}
dev.off()
#<end_chunk>
|
c4e817c746c8973786d63a007523b9ec3eabedff
|
d4ca4aa48bebf5498d0ef682108173a0c8a0c1eb
|
/man/rotationmat.maxcor.Rd
|
5dc52dab9f8184cbe0e1e8f239688ddb08c3360d
|
[] |
no_license
|
tpepler/cpc
|
a6e528eaa7d1e98e950f0b2b7cb3f57963ac09cc
|
76c916b6dda684bb97491128fb27aa881ed998a7
|
refs/heads/master
| 2022-07-18T16:13:46.469529
| 2022-07-06T19:38:11
| 2022-07-06T19:38:11
| 30,924,214
| 2
| 2
| null | 2022-06-24T20:21:09
| 2015-02-17T15:54:36
|
R
|
UTF-8
|
R
| false
| false
| 1,733
|
rd
|
rotationmat.maxcor.Rd
|
\name{rotationmat.maxcor}
\alias{rotationmat.maxcor}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Maximum correlation rotation matrix
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Finds a rotation matrix with maximum correlation between the variables.
}
\usage{
rotationmat.maxcor(p)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{p }{Number of variables (rows/columns) required for the rotation matrix.}
}
\details{
%% ~~ If necessary, more details than the description above ~~
Finding rotation matrices (approximately) maximising the correlations between the variables can be useful in some simulation study settings, for example when the purpose is to study the effect of high correlations on the performance of some statistical method.
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
Returns the p x p square rotation matrix.
}
\references{
%% ~put references to the literature/web site here ~
Pepler, P.T. (2014). The identification and application of common principal components. PhD dissertation in the Department of Statistics and Actuarial Science, Stellenbosch University.
}
\author{
%% ~~who you are~~
Theo Pepler
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
rotationmat.maxcor(p = 5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
b20f80c6627ae38bf7ccdb2595842c9b1b56d44c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ade4/examples/mantel.rtest.Rd.R
|
8050fa61f5d2e8de197dd6db2e02fee9b9180db1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
mantel.rtest.Rd.R
|
library(ade4)
### Name: mantel.rtest
### Title: Mantel test (correlation between two distance matrices (in R).)
### Aliases: mantel.rtest
### Keywords: array nonparametric
### ** Examples
data(yanomama)
gen <- quasieuclid(as.dist(yanomama$gen))
geo <- quasieuclid(as.dist(yanomama$geo))
plot(r1 <- mantel.rtest(geo,gen), main = "Mantel's test")
r1
|
2f848fc776cb307b6f837ddb660e100439e3a7e1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/proportion/examples/ciCTW.Rd.R
|
69e1622257c227c2b7f86ff580766ee78ce65385
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 180
|
r
|
ciCTW.Rd.R
|
library(proportion)
### Name: ciCTW
### Title: Continuity corrected Wald-T method of CI estimation
### Aliases: ciCTW
### ** Examples
n=5; alp=0.05;c=1/(2*n)
ciCTW(n,alp,c)
|
670d73718219dd5f871999cd6737104c56b573da
|
da3112d28186f4000ab3aa285b9be9879da69595
|
/inst/deprecated/bp.test.R
|
ebb1a331629d5f9a58a3f9ddfdc84f4db32d5913
|
[] |
no_license
|
Huaichao2018/rawr
|
08c0456d9822ae9654a11891bc47bf3c7a49b466
|
95857be33dd7128ab5ad48875a66c249191a2bd7
|
refs/heads/master
| 2023-01-19T02:07:17.790103
| 2020-11-26T04:50:44
| 2020-11-26T04:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
bp.test.R
|
bp.test <- function(formula, data, which = NULL, at = NULL, line = NULL,
test = wilcox.test, ...) {
op <- par(..., no.readonly = TRUE)
on.exit(par(op))
bp <- boxplot(formula, data, plot = FALSE)
ng <- length(bp$n)
if (ng == 1L) {
message('only one group -- no test performed')
return(invisible(NULL))
}
pv <- if (ng == 2L)
test(formula, data)
else cuzick.test(formula, data, details = test)$details$pairs
pv <- pvalr(pv$p.value, show.p = TRUE)
which <- if (is.null(which))
seq_along(pv) else which[which %in% seq_along(pv)]
at <- if (is.null(at))
seq.int(ng) else at
line <- if (is.null(line) || length(line) == 1L)
1.25 * (seq_along(which) - 1) + line %||% 0 else line
seg <- function(x1, y, x2) {
pad <- diff(par('usr')[3:4]) / 100
col <- par('fg')
segments(x1, y, x2, y, col = col, xpd = NA)
segments(x1, y, x1, y - pad, col = col, xpd = NA)
segments(x2, y, x2, y - pad, col = col, xpd = NA)
c(x = x1 + (x2 - x1) / 2, y = y + pad * 3)
}
yat <- coords(line = line, side = 3L)
cbn <- combn(at, 2L)
res <- sapply(seq_along(which), function(ii) {
xat <- cbn[, which[ii]]
xat <- seg(xat[1L], yat[ii], xat[2L])
text(xat[1L], xat[2L], pv[which[ii]], xpd = NA)
xat
})
invisible(t(res))
}
|
dd14f988fada8c4288484448d32896b560d46a27
|
856404dc987a6a685cf3de20fbdbcb2ce7be4afd
|
/facebook-crawl/data/simon/IRT.statistics.R
|
f0efd6ce1c723efed8c12d3f0699c79209d4c38a
|
[] |
no_license
|
gsm1011/fall-2010
|
79654681ff5657069acd033e210a1469f4b6c757
|
62217664050110ec92ed905c56fbd406193c1739
|
refs/heads/master
| 2021-01-23T13:17:08.757549
| 2014-02-23T05:41:36
| 2014-02-23T05:41:36
| 32,332,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,466
|
r
|
IRT.statistics.R
|
# Author: Simon Guo.
# This script is to use the IRT model to analyse the dataset.
# we are using the binary IRT model ltm for the analysis, so
# the input data of this script should have binary response.
#
###########################
# IRT modeling functions. #
###########################
ltm.model <- function (data, tdata=NULL, weight = NULL) {
require('ltm')
print('Building 2-parametric IRT models.')
if (is.null(tdata)) {
tdata <- data
}
if (is.null(weight)) {
weight <- rep(1, 27)
}
# model.rasch <- rasch(data, constraint = cbind(ncol(data) + 1, 1))
model <- ltm(data ~ z1, weight = weight)
model$dffclt <- coef(model)[,1]
model$dscrmn <- coef(model)[,2]
model$fitted <- fitted(model, tdata, type="conditional-probabilities")
model$scores <- factor.scores(model, tdata) # factor scores.
model$theta <- model$scores$score.dat$z1 # ability level / theta.
model$sump <- apply(model$fitted, 1, sum) # summed probabilities.
model$sumu <- apply(tdata, 1, sum) # summed binary settings.
model$residu <- residuals(model, tdata, order=FALSE) # residuals.
model$residu <- model$residu[,"Resid"]
model$info <- information(model, c(-3, 3)) # information in range.
print('---> done')
return(model)
}
# histgram of the hidden variables.
ltm.hist.plot <- function (model0, model1) {
print('Plotting histgram of the irt models...')
hist(model0$theta, 30, freq=FALSE, main="Histgram of theta for privacy data(0).",
xlab=NULL, ylab=NULL);
lines(density(model0$theta), col="red")
rug(jitter(model0$theta), col="brown")
hist(model1$theta, 30, freq=FALSE, main="Histgram of theta for privacy data(1).",
xlab=NULL, ylab=NULL);
lines(density(model1$theta), col="red")
rug(jitter(model1$theta), col="brown")
print('---> done.')
}
# estimated theta vs. estimated score and expected scores.
# function to draw the figure for utility and privacy.
ltm.scores.plot <- function (model0, model1) {
print('plotting scores for the IRT models...')
plot(model0$theta, model0$sump, col="blue", main="Theta vs. Scores(0).", xlab="", ylab="", pch=1);
points(model0$theta, model0$sumu, col="red", xlab="", ylab="", pch=4);
plot(model1$theta, model1$sump, col="blue", main="Theta vs. Scores(1).", xlab="", ylab="", pch=1);
points(model1$theta, model1$sumu, col="red", xlab="", ylab="", pch=4);
print('---> done.')
}
|
dbd8e1f180a98fd5d84f6d4eda8ed628cb82cbc7
|
229c0dd9ed28a2c5887957856e8b9ddd439597e2
|
/BC/fLogL_Sivia.R
|
af6a6c95fc107646c00d9a9e6ba4b8994399eebb
|
[] |
no_license
|
MarcelVanOijen/CAF2021
|
7e6e68ac4e17f33badb2071d8dc74d1390d907df
|
e752e2024c420cb1f4518fcfc29010fa65670afd
|
refs/heads/main
| 2023-04-11T18:29:56.318506
| 2022-01-17T09:07:46
| 2022-01-17T09:07:46
| 349,493,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
fLogL_Sivia.R
|
flogL <- function(sims,data,data_s)
{
Ri <- (sims - data) / data_s
i0 <- which( abs(Ri)<1.e-08 )
logLi <- log(1-exp(-0.5*Ri^2)) - log(Ri^2) - 0.5*log(2*pi) - log(data_s)
logLi[i0] <- -0.5*log(2*pi) - log(2*data_s[i0])
sum(logLi)
}
|
98eae98274fa08d37898f7e00f77d9df0e0e2a0e
|
265657c3da16c9215e8e58cd4f5fedff4c1fdf3a
|
/plot3.R
|
a05fe8cdcdb333b69a29a3dc9223389fe774c2d1
|
[] |
no_license
|
Poictesme/ExData_Plotting1
|
3c059a617fc888615b9244a24dbab812fba254ab
|
415039587475525babf1df988a3e1b6961ecce17
|
refs/heads/master
| 2021-01-19T16:18:20.369454
| 2017-09-05T19:27:08
| 2017-09-05T19:27:08
| 100,996,950
| 0
| 0
| null | 2017-08-21T22:14:43
| 2017-08-21T22:14:43
| null |
UTF-8
|
R
| false
| false
| 952
|
r
|
plot3.R
|
#Assumes data file is in the working directory
all.data<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
#Subset just the data we need
#Get data from 2/1/07
my.data<-subset(all.data, Date=="1/2/2007")
#Append data from 2/2/07
my.data<-rbind(my.data,subset(all.data, Date=="2/2/2007"))
#Convert Date to POSIXct
my.data$Date <- paste(as.character(my.data$Date),as.character(my.data$Time))
my.data$Date<-as.POSIXct(my.data$Date, tz="", "%d/%m/%Y %H:%M:%S")
#Plot 3
png("plot3.png",width=480,height=480) #Set display to png device
with(my.data,plot(Date,as.numeric(as.character(Sub_metering_1)),type="l",ylab="Energy sub metering",xlab=""))
with(my.data,lines(Date,as.numeric(as.character(Sub_metering_2)),col="red"))
with(my.data,lines(Date,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() #Close device
|
1f4590e9ed3ce198e47882d9a9c1f5df3bfb3101
|
bcc5dab59e4229eb26dc7f2e24b5964d97aa4840
|
/JH-Data Analysis/Quiz3.R
|
6e8901dc45b29fc69b4dd3e08c5786e5317e4f4f
|
[] |
no_license
|
JPruitt/Coursera
|
87d3d273bce00d143769f6c8070c9a2163a568fd
|
339873ff1036b4a1d52f6cca5001b4d9670f374d
|
refs/heads/master
| 2021-01-01T19:42:50.471049
| 2014-05-09T11:00:20
| 2014-05-09T11:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,687
|
r
|
Quiz3.R
|
# Week 3 Quiz
## Question 1
## Below is a plot of bone density versus age. It was created using the following
## code in R:
library(ElemStatLearn)
data(bone)
plot(bone$age,bone$spnbmd,pch=19,col=((bone$gender=="male")+1))
## Males are shown in black and females in red. What are the characteristics that
## make this an exploratory graph? Check all correct options.
## **There plot does not have a legend.**
## **The axis labels are R variables**
## **The plot does not report the units on the axis labels.**
## Question 2
## Below is a boxplot of yearly income by marital status for individuals in the
## United States. It was created using the following code in R:
library(ElemStatLearn)
data(marketing)
plot(bone$age,bone$spnbmd,pch=19,col=((bone$gender=="male")+1))
boxplot(marketing$Income ~ marketing$Marital,col="grey",xaxt="n",ylab="Income",xlab="")
axis(side=1,at=1:5,labels=c("Married","Living together/not married","Divorced or separated","Widowed","Nevermarried"),las=2)
## Which of the following can you conclude from the plot? (Check all that apply)
## **There are more individuals who were never married than divorced in this data set.**
## **The median income for individuals who are divorced is higher than the median for individuals who are widowed.**
## **The 75th percentile of the income for widowed individuals is almost the same as the 75th percentile for never married individuals..**
## Question 3
## Load the iris data into R using the following commands:
library(datasets)
data(iris)
## Subset the `iris` data to the first four columns and call this matrix
## `irisSubset`. Apply hierarchical clustering to the irisSubset data frame to
## cluster the rows. If I cut the dendrogram at a height of 3 how many clusters
## result?
## SHOW YOUR WORK:
irisSubset <- iris[,1:4]
plot(hclust(dist(irisSubset)))
## Brendan suggests:
rect.hclust(hclust(dist(irisSubset)), h=3)
## **4 clusters**
## Question 4
## Load the following data set into R using either the .rda or .csv file:
## https://spark-public.s3.amazonaws.com/dataanalysis/quiz3question4.rda
## https://spark-public.s3.amazonaws.com/dataanalysis/quiz3question4.csv
## Make a scatterplot of the `x` versus `y` values. How many clusters do you
## observe? Perform k-means clustering using your estimate as to the number of
## clusters. Color the scatterplot of the `x`, `y` values by what cluster they
## appear in. Is there anything wrong with the resulting cluster estimates?
## SHOW YOUR WORK:
download.file('https://spark-public.s3.amazonaws.com/dataanalysis/quiz3question4.csv',
"quiz3question4.csv")
q3q4.data <- read.csv("quiz3question4.csv", header=TRUE)
q3q4.scatter <- plot(q3q4.data$x, q3q4.data$y)
# How many clusters do you observe?
# ~~4?~~ **2**
# Perform k-means clustering...
kmeansObj <- kmeans(q3q4.data, centers=2)
plot(q3q4.data$x, q3q4.data$y, col=kmeansObj$cluster, pch=19, cex=2)
## **There are two obvious clusters. The k-means algorithm does not assign all of the points to the correct clusters because the clusters wrap around each other.**
## Question 5
## Load the hand-written digits data using the following commands:
library(ElemStatLearn)
data(zip.train)
## Each row of the `zip.train` data set corresponds to a hand written digit. The
## first column of the zip.train data is the actual digit. The next 256 columns
## are the intensity values for an image of the digit. To visualize the digit we
## can use the `zip2image()` function to convert a row into a 16 x 16 matrix:
# Create an image matrix for the 3rd row, which is a 4
im = zip2image(zip.train,3)
image(im)
## Using the `zip2image` file, create an image matrix for the 8th and 18th rows.
## For each image matrix calculate the `svd` of the matrix (with no scaling). What
## is the percent variance explained by the first singular vector for the image
## from the 8th row? What is the percent variance explained for the image from the
## 18th row? Why is the percent variance lower for the image from the 18th row?
## SHOW YOUR WORK:
im8 <- zip2image(zip.train, 8)
im18 <- zip2image(zip.train, 18)
svd8 <- svd(im8)
svd18 <- svd(im18)
par(mfrow=c(2,2))
plot(svd8$d^2/sum(svd8$d^2),xlab="Column",ylab="Percent of variance explained",pch=19)
plot(svd18$d^2/sum(svd18$d^2),xlab="Column",ylab="Percent of variance explained",pch=19)
image(im8)
image(im18)
## **The first singular vector explains 98% of the variance for row 8 and 48% for row 18.
## The reason the first singular vector explains less variance for the 18th row is that the
## image is more complicated, so there are multiple patterns each explaining a large
## percentage of variance.**
|
f0fb919f6f23126d6b449fd6d7442d4ed407b282
|
89b94a1553151b34386d75b0006b970dc372d2de
|
/College Basketball/iowa basketball analysis.R
|
c53d442e07cb1272dd5d348c419ac6be42f1e3e4
|
[] |
no_license
|
aelfering/Sports-Data
|
0d2052a9fdf0e05b08ec820faaefa34f0066d48d
|
86ed99e443f255a4c3178358c374429454984d13
|
refs/heads/master
| 2022-05-02T23:50:14.812967
| 2022-04-01T15:49:59
| 2022-04-01T15:49:59
| 222,936,805
| 1
| 0
| null | 2019-11-21T01:02:03
| 2019-11-20T12:52:29
| null |
UTF-8
|
R
| false
| false
| 5,463
|
r
|
iowa basketball analysis.R
|
# Iowa Basketball Analysis
library(dplyr)
library(tidyr)
library(tidyverse)
library(reshape2)
iowa.team <- read.csv('Iowa Basketball.csv')
iowa.opp <- read.csv('Iowa Opp Basketball.csv')
seasons <- read.csv('iowa seasons.csv')
#### Cleaning the Script ####
iowa.team.opp <- inner_join(iowa.team,
iowa.opp,
by = c('Date' = 'Date',
'Location' = 'Location',
'Schl' = 'Schl',
'Opp' = 'Opp',
"Result" = "Result",
'MP' = 'MP'))
iowa.column.rename <- dplyr::select(iowa.team.opp,
Date,
Team = Schl,
Opp,
Location,
Result,
MP,
Team.FG = FG.x,
Team.FGA = FGA.x,
Team.2P = X2P.x,
Team.2PA = X2PA.x,
Team.3P = X3P.x,
Team.3PA = X3PA.x,
Team.FT = FT.x,
Team.FTA = FTA.x,
Team.PTS = PTS.x,
Opp.FG = FG.y,
Opp.FGA = FGA.y,
Opp.2P = X2P.y,
Opp.2PA = X2PA.y,
Opp.3P = X3P.y,
Opp.3PA = X3PA.y,
Opp.FT = FT.y,
Opp.FTA = FTA.y,
Opp.PTS = PTS.y)
iowa.results <- iowa.column.rename %>%
mutate(Wins = ifelse(grepl('W', Result), 1, 0),
Loses = ifelse(grepl('L', Result), 1, 0)) %>%
mutate(Result = gsub('L', '', Result)) %>%
mutate(Result = gsub('W', '', Result)) %>%
mutate(Result = gsub(' (OT)', '', Result, fixed = TRUE)) %>%
mutate(Result = gsub(' (2OT)', '', Result, fixed = TRUE))
iowa.results.split <- separate(iowa.results, Result, into = c('Iowa.Pts', 'Opp.Pts'), sep = '-')
iowa.results.seasons <- inner_join(seasons, iowa.results.split, by = c('Date' = 'Date'))
iowa.results.pts.int <- dplyr::mutate(iowa.results.seasons,
Iowa.Pts = as.numeric(Iowa.Pts),
Opp.Pts = as.numeric(Opp.Pts))
head(iowa.results.pts.int)
#### What percent of points come from 3-point shots? ####
percent.shots <- iowa.results.pts.int %>%
group_by(Season) %>%
summarise(Total.Season.Pts = sum(Team.PTS),
Total.Season.Att = sum(Team.3PA) + sum(Team.2PA),
Total.3P.Pts = sum(Team.3P) * 3,
Total.3P.Att = sum(Team.3PA),
Total.2P.Pts = sum(Team.2P) * 2,
Total.2P.Att = sum(Team.2PA),
Total.FT.Pts = sum(Team.FT),
Total.FT.Att = sum(Team.FTA),
Total.Minutes = max(MP)) %>%
ungroup() %>%
mutate(Pct.3P = Total.3P.Pts/Total.Season.Pts,
Pct.3PA = Total.3P.Att/Total.Season.Att,
Three.Points.40.Min = (Total.3P.Att * 40)/Total.Minutes,
Pct.2P = Total.2P.Pts/Total.Season.Pts,
Pct.2PA = Total.2P.Att/Total.Season.Att,
Two.Points.40.Minutes = (Total.2P.Att * 40)/Total.Minutes,
Pct.FT = Total.FT.Pts/Total.Season.Pts,
Pct.FTA = Total.FT.Att/Total.Season.Att,
Season = as.factor(Season))
ggplot(percent.shots, aes(x = Season, y = Pct.3PA)) +
# Examining shot attempts
geom_line(aes(group = 1), color = '#94003a') +
geom_point(color = '#94003a') +
geom_line(data = percent.shots, aes(x = Season, y = Pct.2PA, group = 1), color = '#00bcff') +
geom_point(data = percent.shots, aes(x = Season, y = Pct.2PA, group = 1), color = '#00bcff') +
# Theme Elements
theme(plot.title = element_text(size = 18, face = 'bold', family = 'Arial'),
plot.subtitle = element_text(size = 15, family = 'Arial')) +
labs(title = 'Three Point Shots are a Bigger Focus for Iowa',
subtitle = 'Attempts for three-point attempts have steadily climbed to nearly 40% from ',
caption = 'Visualization by Alex Elfering\nSource: College Basketball Reference',
x = '',
y = 'Percent of Attempts') +
scale_y_continuous(labels = scales::percent)
# This visualizes that percent of points from three pointers has increased per share
#### Net Pointers?
running.net <- iowa.results.pts.int %>%
mutate(Net.3P = Team.3P-Opp.3P,
Net.2P = Team.2P-Opp.2P,
Net.FT = Team.FT-Opp.FT) %>%
group_by(Season) %>%
mutate(Running.Net.3P = cumsum(Net.3P),
Running.Net.2P = cumsum(Net.2P),
Running.Net.FT = cumsum(Net.FT),
Season.Game.No = row_number()) %>%
ungroup()
ggplot(running.net,
aes(x = Season.Game.No)) +
geom_hline(yintercept = 0,
alpha = 0.6) +
theme_bw() +
geom_line(mapping = aes(y = Running.Net.FT),
color = 'red') +
geom_line(mapping = aes(y = Running.Net.2P),
color = 'blue') +
geom_line(mapping = aes(y = Running.Net.3P),
color = 'orange') +
facet_wrap(~Season)
head(iowa.results.pts.int)
|
66329fb720123b8a88e50c1145406e7cdf5cfbe2
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/synlik/R/I_quadModMat.R
|
4ac8dc0a9a2db520142b5e415d044b7a5b48d4f3
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 440
|
r
|
I_quadModMat.R
|
# Creates model matrix for quadratic multivariate regression
.quadModMat <- function(X)
{
nPar <- ncol(X)
# Center X
X <- t( t(X) - colMeans(X) )
# Add quadratic terms
mod <- cbind(1, X, (X^2)/2)
# Add interactions
if(nPar > 1){
comb <- t( combn(nPar, 2) )
for(jj in 1:nrow(comb)){
mod <- cbind(mod, X[ , comb[jj, 1]] * X[ , comb[jj, 2]])
}
}
return( mod )
}
|
73caf866144ead15fee4c843093dd9f7b17b2131
|
c251710444a5eb26d6ccf7ba233863c5042526cc
|
/run_analysis.R
|
2dd7fbad0a314494dc5d2297bac030bc9069cbcd
|
[] |
no_license
|
martinhorecky/GettingAndCleaningData
|
8c99416f95c50c8ddab8afab02e4284f409711df
|
6a791b338defecf11e02dd44e45b7db56b08a2bf
|
refs/heads/master
| 2021-01-13T02:22:35.512874
| 2014-06-14T21:03:04
| 2014-06-14T21:03:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,002
|
r
|
run_analysis.R
|
## Note: Set your working folder befor running the batch
## so that the script is in root of the folder
## and data is extracted in 'UCI HAR Dataset' subfolder
######################################################################################
## Start by reading common data from files
######################################################################################
###########################################
## Read features.txt
## this data set contains column names for xtest/xtrain data sets
urlfeatures <- "./UCI HAR Dataset/features.txt"
features <- read.csv(urlfeatures, header=FALSE, sep="")
###########################################
## Read activity_labels.txt
## this data contains labels for activities
urlactivity = "./UCI HAR Dataset/activity_labels.txt"
activity = read.csv(urlactivity, header=FALSE, sep="")
names(activity) <- c("id", "label")
######################################################################################
## Read test data sets
######################################################################################
###########################################
## Read x_test data set
## this data set contains measured data
urlxtest <- "./UCI HAR Dataset/test/X_test.txt"
xtest <- read.csv(urlxtest, header=FALSE, sep="")
## set xtest column names
names(xtest) <- features[,2]
###########################################
## Read y_test data set
## this dataset identifies activity performed by subject
urlytest <- "./UCI HAR Dataset/test/y_test.txt"
ytest <- read.csv(urlytest, header = FALSE)
names(ytest) <- c("activityid")
# merge ytest with activity labels to get pretty names
ytestactivity = merge(ytest, activity, by.x = "activityid", by.y="id", all=TRUE)
###########################################
## Read subject test data set
urlsubjecttest <- "./UCI HAR Dataset/test/subject_test.txt"
subjecttest <- read.csv(urlsubjecttest, header = FALSE)
###########################################
## Combine xtest, ytest to single test
test <- cbind(ytestactivity[,2], subjecttest, xtest)
names(test)[1] <- c("activity")
names(test)[2] <- c("subject")
###########################################
## We're only interested in mean and std columns
# get vector for columns names containing mean() and std()
m <- grep("mean\\(\\)", names(test))
s <- grep("std\\(\\)", names(test))
ms <- sort(c(1,2,m,s))
test <- test[,ms]
######################################################################################
## Read test data sets
######################################################################################
###########################################
## Read x_train data set
## this data set contains measured data
urlxtrain <- "./UCI HAR Dataset/train/X_train.txt"
xtrain <- read.csv(urlxtrain, header=FALSE, sep="")
## set xtrain column names
names(xtrain) <- features[,2]
###########################################
## Read y_train data set
## this dataset identifies activity performed by subject
urlytrain <- "./UCI HAR Dataset/train/y_train.txt"
ytrain <- read.csv(urlytrain, header = FALSE)
names(ytrain) <- c("activityid")
# merge ytrain with activity labels to get pretty names
ytrainactivity = merge(ytrain, activity, by.x = "activityid", by.y="id", all=TRUE)
###########################################
## Read subject train data set
urlsubjecttrain <- "./UCI HAR Dataset/train/subject_train.txt"
subjecttrain <- read.csv(urlsubjecttrain, header = FALSE)
###########################################
## Combine xtrain, ytrain to single train
train <- cbind(ytrainactivity[,2], subjecttrain, xtrain)
names(train)[1] <- c("activity")
names(train)[2] <- c("subject")
###########################################
## We're only interested in mean and std columns
# get vector for columns names containing mean() and std()
m <- grep("mean\\(\\)", names(train))
s <- grep("std\\(\\)", names(train))
ms <- sort(c(1,2,m,s))
train <- train[,ms]
######################################################################################
## Merge test and train data sets together and save to file clean_data.csv
######################################################################################
merged <- rbind(train, test)
write.csv(merged, "clean_data.txt")
######################################################################################
## Get the summary data
######################################################################################
## Calculate average values for each subject/activity
## Do this in a loop so each column is calculated
averages <- aggregate(merged[,3], list(subject=merged$subject, activity=merged$activity), mean)
names(averages)[3] <- names(merged)[3]
for (i in 4:dim(merged)[2]) {
ai <- aggregate(merged[,i], list(subject=merged$subject, activity=merged$activity), mean)
names(ai)[3] <- names(merged)[i]
averages <- merge(averages, ai, by.x = c("subject", "activity"), by.y=c("subject", "activity"), all=TRUE)
}
write.csv(averages, "averages.txt")
|
6006ea00e723998884f1fe093a61510afa5fedfa
|
a7e3f0739f609ca7d81468a89348f55633c22d44
|
/archive/abc_binomial.R
|
1cbaec476751b630a76c6a0d3395063812ad8058
|
[] |
no_license
|
kkaloudis/approximate-bayesian-computation
|
1d3b0c752a691ac687657d92f12f43dc14fad333
|
6fa28c285661cbb22cd006940f8b86a71f5a7254
|
refs/heads/master
| 2023-03-18T14:29:28.375220
| 2020-05-28T17:45:16
| 2020-05-28T17:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 854
|
r
|
abc_binomial.R
|
# Binomial with beta prior
discrepancy <- function(x, y){
discrepancy <- sum(abs(x - y)) / length(x)
return(discrepancy)
}
abc_sample <- function(N, epsilon, y, alpha=1, beta=1){
samples <- rep(0, N)
for (i in 1:N) {
rho <- epsilon + 1
accept_prob <- rho <= epsilon
while (runif(n = 1) > accept_prob){
theta <- rbeta(n = 1, shape1 = alpha, shape2 = beta)
x <- rbinom(n, prob = theta, size = 1)
rho <- discrepancy(x, y)
accept_prob <- rho <= epsilon
}
samples[i] <- theta
}
return(samples)
}
kernel <- function(x, y, epsilon){
return(discrepancy(x, y) <= epsilon)
}
set.seed(1)
p_0 <- 0.7
sample_size <- 3
n <- 10
y <- rbinom(n, prob = p_0, size = 1)
samples <- abc_sample(N = 500, epsilon = 0, y = y, alpha = 2, beta = 1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.