blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76eb602fc0449b2ef9689e2e2b092b0e3d8fb3bb
|
22de5761ce45456a21ac8c29abe56cbfc76cbfd9
|
/man/compute_turning_angle.Rd
|
176da5e6e3c280aa2f60c71f65829ca880eabdb2
|
[
"MIT"
] |
permissive
|
Ryan-Colin-White/smp-framework-in-r
|
bc0d1abc119cc0a744715cb3e3aec4df0f1a44e7
|
c98f94180f7d23dca1b6f37c818c3a24d6e69087
|
refs/heads/main
| 2023-06-27T09:25:15.567095
| 2021-08-06T17:55:13
| 2021-08-06T17:55:13
| 389,116,120
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 695
|
rd
|
compute_turning_angle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SMP-framework-sub-functions.R
\name{compute_turning_angle}
\alias{compute_turning_angle}
\title{Turning angle computation}
\usage{
compute_turning_angle(bearing_vec_1, bearing_vec_2)
}
\arguments{
\item{bearing_vec_1}{A vector or numeric value of bearing angles.}
\item{bearing_vec_2}{A vector or numeric value of bearing angles, one observation ahead of bear_vec_1.}
}
\value{
The absolute turning angle or the change in bearing angles of an object in degrees.
}
\description{
Computes the turning angle of an object.
}
\examples{
my_turning_angle <- compute_turning_angle(bearing_vec_1 = 90, bearing_vec_2 = 60)
}
|
a861b426d34c60c286e9fdd86c252bceae36ce7a
|
05ef315c6ebe51aad1107d3e13a2d46598ca1f76
|
/R/getArea.R
|
784c662553da4b3766a937f125e3d95627ca8784
|
[] |
no_license
|
rxlacroix/tilegram
|
c29bcd0aae2efa849435c695b12c621a02300106
|
e448ca9f4942591ec4362d137e4a71daf7e5e2dc
|
refs/heads/master
| 2020-06-03T15:21:12.322899
| 2019-06-17T20:34:12
| 2019-06-17T20:34:12
| 191,626,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 344
|
r
|
getArea.R
|
# Function to get the sum of the area of SpatialPolygons
getArea <- function(x) {
getAreaPolygons <- function(x) {
holes <- unlist(lapply(x@Polygons, function(x) x@hole))
areas <- unlist(lapply(x@Polygons, function(x) x@area))
area <- ifelse(holes, -1, 1) * areas
area
}
sum(unlist(lapply(x@polygons, getAreaPolygons)))
}
|
f91844ceec06e806081bee0ef0be5725a32c3e79
|
7d41cd956e19c3da6604715e4791cb44b21493fa
|
/plot1.R
|
f35e5bf9f676d944b4be59c941366995fabc9738
|
[] |
no_license
|
cawfeeandtea/ExData_Plotting1
|
c79429be5a84aadb0ab0d3add32e5e3fe172297f
|
5637aa214f8eec5df1553fc0690f91ab6cd47a3a
|
refs/heads/master
| 2021-01-24T22:06:27.573899
| 2015-06-06T15:56:59
| 2015-06-06T15:56:59
| 36,894,177
| 0
| 0
| null | 2015-06-04T20:37:58
| 2015-06-04T20:37:58
| null |
UTF-8
|
R
| false
| false
| 706
|
r
|
plot1.R
|
# Plot 1: Global Active Power Histogram
# Read in data from household_power_consumption.txt file
info <- read.table("household_power_consumption.txt", header=T, sep=";")
info$Date <- as.Date(info$Date, format="%d/%m/%Y")
#Subset of data from specific timeframe
data <- info[(info$Date=="2007-02-01") | (info$Date=="2007-02-02"),]
data$Global_active_power <- as.numeric(as.character(data$Global_active_power))
data <- transform(data, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
#Create and save histogram as plot1.png
png('plot1.png', width=480, height=480)
hist(data$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.off()
|
f7d17b93d2f944693d39317f5ef8c3b341fbd7df
|
8763344fa9780625bbf4c8a2a6fd60671d41b3fe
|
/magic04.R
|
f80dc16f32899d7a3406c4ed5f90f1b284bde5ec
|
[] |
no_license
|
ldnicolasmay/julia_learning
|
952d17030cefec94a48f3df509d38fce1e56aa83
|
3c006e3ded84cae85286597ed55e74cb7dd965e7
|
refs/heads/master
| 2020-04-26T02:12:27.901327
| 2019-03-11T03:28:13
| 2019-03-11T03:28:13
| 173,227,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
magic04.R
|
magic04 <- readr::read_delim("magic04.data",
delim = ",",
col_names = FALSE,
trim_ws = TRUE)
names(magic04) <-
c(
"fLength"
, "fWidth"
, "fSize"
, "fConc"
, "fConc1"
, "fAsym"
, "fM3Long"
, "fM3Trans"
, "fAlpha"
, "fDist"
, "class"
)
readr::write_csv(magic04, "magic04.csv", na = "")
|
b711a836052f77e27b7730171e4a2fcd8f051e09
|
919771876295a23888d29017293f9ad3c1f9d353
|
/level4_page/00_setup.R
|
4027afdfbb206a2b60b39fecbb04d6076be7d2f3
|
[
"Apache-2.0"
] |
permissive
|
bcgov/air-zone-reports
|
3f0d6de54e4f3bbad0260d76efaf5795d2e15056
|
531d182db498255e222bb012c36a46e65081ec75
|
refs/heads/master
| 2023-09-01T11:24:45.365365
| 2023-08-30T17:09:17
| 2023-08-30T17:09:17
| 170,563,197
| 6
| 4
|
Apache-2.0
| 2020-04-15T00:35:37
| 2019-02-13T19:06:30
|
R
|
UTF-8
|
R
| false
| false
| 6,546
|
r
|
00_setup.R
|
#' CREATE TRENDS of air quality in BC
#' The files here are duplicated in ./assets
#'
#' @param dirs_location is the location of the data files
get_trends <- function(dirs_location = './data/out',reporting_year=NULL,airzone_filter = 'BC') {
library(dplyr)
library(readr)
library(ggplot2)
library(plotly)
# dirs_location <- './data/out' #local location, two dots for final, one dot for debug
if (0) {
dirs_location <- './data/out'
reporting_year <- 2021
airzone_filter <- 'Central Interior'
}
list.files(dirs_location)
df_data_trends_caaqs <- readr::read_csv(paste(dirs_location,'caaqs_results.csv',sep='/'))
df_data_trends_annual <- readr::read_csv(paste(dirs_location,'annual_results.csv',sep='/')) %>%
filter(!is.na(value),value>-10)
if (is.null(reporting_year)) {
reporting_year <- max(df_data_trends_caaqs$year)
}
maxyear <- reporting_year
df_stations <- readr::read_csv(paste(dirs_location,'liststations.csv',sep='/')) %>%
mutate(AQMS = ifelse(is.na(AQMS),'N/A',AQMS)) %>%
filter(AQMS != 'N') %>%
filter(site %in% df_data_trends_annual$site)
colnames(df_stations)
df_plot_metric <- df_data_trends_annual%>%
select(metric,parameter) %>%
distinct()
#rolling 3-year average needed for
#pm25 annual
#pm25 24-hour
#o3_8hr
#no2 1-hour
#so2 1-hour
#calculate 3-year running average
df_data_trends_annual_3yr <- df_data_trends_annual %>%
mutate(index = 1:n())
df_data_trends_annual_3yr_ <- NULL
for (i in 0:2) {
df_data_trends_annual_3yr_ <- df_data_trends_annual_3yr_ %>%
bind_rows(
df_data_trends_annual_3yr %>%
mutate(year=year+i)
)
}
#those with metric "MEAN_1HR" are averaged over 1-year
#all the rest are averaged over 3 years
df_data_trends_annual_3yr <- df_data_trends_annual_3yr_ %>%
mutate(valid_count = ifelse(is.na(value),0,1)) %>%
group_by(parameter,site,instrument,tfee,year,metric) %>%
dplyr::mutate(value_3yr = sum(value,na.rm = TRUE),valid_n =sum(valid_count)) %>%
filter(valid_n>=2) %>%
ungroup() %>%
mutate(value = ifelse(grepl('MEAN_1HR',metric,ignore.case = TRUE),value,value_3yr/valid_n)) %>%
select(-value_3yr,-valid_count) %>%
filter(year<=maxyear)
#bug found, 2023-06-21
#add
if (0) {
unique(df_data_trends_annual_3yr$parameter)
unique(df_data_trends_annual_3yr_$parameter)
unique(df_data_trends_annual_3yr_$metric)
}
#summarize for air zone plot
df_data_trends_annual_airzone <- df_data_trends_annual_3yr %>%
left_join(df_stations %>%
select(site,AIRZONE)) %>%
filter(!is.na(AIRZONE)) %>%
group_by(parameter,tfee,year,metric,AIRZONE) %>%
dplyr::summarise(value_avg = mean(value,na.rm = TRUE),
value_min = min(value,na.rm = TRUE),
value_max = max(value,na.rm = TRUE))
df_data_trends_annual_overall <- df_data_trends_annual_3yr %>%
left_join(df_stations %>%
select(site,AIRZONE)) %>%
filter(!is.na(AIRZONE)) %>%
group_by(parameter,tfee,year,metric) %>%
dplyr::summarise(value_avg = mean(value,na.rm = TRUE),
value_min = min(value,na.rm = TRUE),
value_max = max(value,na.rm = TRUE)) %>%
mutate(AIRZONE = "BC")
df_data_trends_annual_airzone <- df_data_trends_annual_airzone %>%
bind_rows(df_data_trends_annual_overall) %>%
filter(!tfee) %>%
filter(grepl('RAW',metric,ignore.case = TRUE))
#create reference years
#this shows how many percent increase or decrease in value
# result_table <-
df_BC_summary_ref<- df_data_trends_annual_airzone %>%
filter(year %in% c(1990,2000,2010,maxyear)) %>%
select(parameter,year,metric,AIRZONE,value_avg) %>%
tidyr::pivot_wider(names_from = year, values_from = value_avg) %>%
mutate(perc_2000 = envair::round2((`2021`-`2000`)/`2000`*100),
perc_1990 = envair::round2((`2021`-`1990`)/`1990`*100))
#plot
lst_parameters <- df_data_trends_annual_airzone %>%
filter(!tfee) %>%
filter(AIRZONE == airzone_filter) %>%
filter(year >=1990) %>%
mutate(parameter_label = paste(parameter,metric)) %>%
pull(parameter_label) %>%
unique()
# paste(lst_parameters,collapse=',')
#define parameters for recoding
df_parameters <- tribble(
~parameter_label,~label,~CAAQS,~order,
'NO2 RAW_ANNUAL_98P_D1HM','NO2 (1-Hour)',60,5,
'NO2 RAW_ANNUAL_MEAN_1HR','NO2 (Annual)',17,4,
'O3 RAW_ANNUAL_4TH_D8HM','O3 (8-Hour)',62,3,
'PM25 RAW_ANNUAL_98P_24h','PM2.5 (24-Hour)',27,2,
'PM25 RAW_ANNUAL_MEAN_24h','PM2.5 (Annual)',8.8,1,
'SO2 RAW_ANNUAL_99P_D1HM','SO2 (1-Hour)',70,7,
'SO2 RAW_ANNUAL_MEAN_1HR','SO2 (Annual)',5,6
)
a <- df_data_trends_annual_airzone %>%
filter(!tfee) %>%
filter(AIRZONE == airzone_filter) %>%
filter(year >=1990) %>%
mutate(parameter_label = paste(parameter,metric)) %>%
left_join(df_parameters) %>%
mutate(percentAbove = (value_avg - CAAQS)/CAAQS *100) %>%
ungroup()
result_ggplot <- a %>%
ggplot(aes(x=year,y=percentAbove,colour = reorder(label,order))) +
geom_line() +
geom_hline(yintercept = 0, colour='red',linetype = 'dashed') +
annotate("text",x=2010, y=10,label = 'Current CAAQS') +
theme(legend.position = 'bottom', legend.title = element_blank(),
legend.key = element_blank(),
panel.background = element_rect(fill=NA,colour = 'black'),
axis.title.x = element_blank()) +
ylab('Percent Above/Below Current CAAQS')
result_plotly <- a %>%
mutate(percentAbove = envair::round2(percentAbove,n=1)) %>%
mutate(hovertext = paste(percentAbove,'%',sep='')) %>%
mutate(label = gsub('PM2.5','PM<sub>2.5</sub>',label)) %>%
mutate(label = gsub('O3','O<sub>3</sub>',label)) %>%
mutate(label = gsub('NO2','NO<sub>2</sub>',label)) %>%
mutate(label = gsub('SO2','SO<sub>2</sub>',label)) %>%
plotly::plot_ly(x=~year,y=~percentAbove,color =~reorder(label,order),
type='scatter',mode='lines+markers',showlegend =T,
hoverinfo ='y',
hovertemplate = paste('%{y:.1f}','%',sep='')
) %>%
layout(title = 'Trends in Pollutant Levels',
legend = list(orientation = 'h'),
yaxis = list(title = 'Percent Above/Below CAAQS'),
xaxis = list(title = 'Annual Reporting Period')
) %>%
plotly::layout(hovermode = 'x unified')
return(list(table = df_BC_summary_ref,ggplot = result_ggplot,plotly = result_plotly,data = a))
}
add_arrow <- function(value) {
if(value>0) {
result <- paste('<span style="color:red">↑</span>',abs(value),sep='')
} else {
result <- paste('<span style="color:blue">↓</span>',abs(value),sep='')
}
return(result)
}
|
f586ccfda3d52d951de3e6220b491924d4ee947b
|
604286e26e2686c7f74fc189cafb5d2acd2467bc
|
/server.R
|
61a07671ab12658710ea949293771efba8adfd8b
|
[] |
no_license
|
fredericberlioz/appli_conso_espace_doubs
|
01793ec3ed9b03b254a14d63910358d83c30ceeb
|
f885e6a2acd75cbae5bc556b6518848b5cf30dea
|
refs/heads/main
| 2023-05-20T13:58:40.417505
| 2021-06-03T13:51:59
| 2021-06-03T13:51:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,033
|
r
|
server.R
|
# FONCTION input / output
shinyServer(function(input, output, session) {
## Navigation -------------------
observeEvent(input$page1, {
updateNavbarPage(session, "app_navbar", selected = "Cartographie communale")
})
observeEvent(input$page2, {
updateNavbarPage(session, "app_navbar", selected = "Indicateurs territoriaux")
})
observeEvent(input$page3, {
updateNavbarPage(session, "app_navbar", selected = "Comparaison entre commune et territoire")
})
observeEvent(input$methodo, {
updateNavbarPage(session, "app_navbar", selected = "Méthodologie")
})
observeEvent(input$mentions, {
updateNavbarPage(session, "app_navbar", selected = "Mentions légales")
})
## [Bandeau du haut]-------------
output$UI_bandeau_visuel <- renderUI({
fluidRow(id="bandeau_visuel",
fluidRow(
column(12, offset=0,
fluidRow(tags$p("Observatoire départemental de la consommation d'espaces", style="font-size:3em;"))
)
)
)
})
# variables réactives
v <- reactiveValues(
annee0 = annee_t0,
commune_select = NULL,
zone_select = NULL,
annees = anneesref,
boite = NULL,
com_date = NULL,
com_nondate = NULL,
com_bati = NULL,
com_indic = NULL,
zone_date = NULL,
zone_nondate = NULL,
zone_indic = NULL,
pal1 = NULL,
pal2 = NULL,
pal3 = NULL,
pal4 = NULL,
pal5 = NULL,
pal6 = NULL,
pal7 = NULL,
# pal8 = NULL,
pal9 = NULL,
com_tempo = NULL,
zone_tempo = NULL,
zone_indictempo = NULL,
tempo = NULL,
ind2 = NULL,
ind4 = NULL,
# ind8 = NULL,
comind2 = NULL,
comind4 = NULL
# comind8 = NULL
)
# inputs réactifs
# réactivité quand changement de choix de commune
observeEvent(
input$codeinsee, {
# choix par défaut de l'EPCI d'appartenance de la commune
updateSelectInput(session, "id_zone", selected = dcommunes[dcommunes$insee_com == input$codeinsee, "code_epci"])
v$commune_select = dcommunes %>% filter(insee_com == input$codeinsee)
# boite englobante
b <- st_bbox(v$commune_select)
v$boite <- lapply(split(b, names(b)), unname) # permet de convertir un vecteur en liste
# extraction des couches enveloppe et bati
com <- paste(unlist(input$codeinsee), collapse = "', '")
requete_date <- paste("SELECT * FROM env_date WHERE code_insee IN ('", com, "')", sep ="")
requete_nondate <- paste("SELECT * FROM env_nondate WHERE code_insee IN ('", com, "')", sep ="")
requete_bati <- paste("SELECT * FROM bati WHERE insee_com IN ('", com, "')", sep ="")
v$com_date <- st_as_sf(dbGetQuery(conn, requete_date), crs = 4326)
v$com_nondate <- st_as_sf(dbGetQuery(conn, requete_nondate), crs = 4326)
v$com_bati <- st_as_sf(dbGetQuery(conn, requete_bati), crs = 4326)
# sélection des indicateurs
v$com_indic <- dindic %>% filter (insee_com %in% input$codeinsee)
v$com_tempo <- dtempo %>% filter (insee_com %in% input$codeinsee)
})
# réactivité quand changement de zonage territorial
observeEvent(
input$id_zone, {
ifelse(input$id_zone == 'dept',
v$zone_select <- dcommunes,
v$zone_select <- dcommunes %>%
filter (code_epci %in% input$id_zone | code_scot %in% input$id_zone | code_pnr %in% input$id_zone)
)
# extraction des indicateurs
comzone1 <- v$zone_select %>% select(insee_com)
v$zone_indic <- dindic %>% filter(insee_com %in% comzone1$insee_com)
v$zone_tempo <- dtempo %>% filter(insee_com %in% comzone1$insee_com)
# palettes de couleur pour la carte dynamique
v$pal1 <- colorBin("YlOrRd", domain = v$zone_indic$sartif, bins = classIntervals(v$zone_indic$sartif, style = "jenks", n = 6)$brks)
v$pal3 <- colorBin("YlOrRd", domain = v$zone_indic$partif, bins = 5)
v$pal5 <- colorBin("YlOrRd", domain = v$zone_indic$cos, bins = 5)
v$pal6 <- colorBin("YlOrRd", domain = v$zone_indic$sartif_par_hab, bins = 5)
v$pal7 <- colorBin("YlOrRd", domain = v$zone_indic$sartif_par_op, bins = 5)
v$pal9 <- colorBin("YlOrRd", domain = v$zone_indic$sartif_evo_men, bins = 5)
}
)
# réactivité quand changement d'année de référence
observeEvent(
input$annee, {
v$annees <- anneesref[sapply(anneesref, function(x) x >= input$annee)]
v$tempo <- dtempo %>% filter(annee == input$annee)
}
)
# indicateurs qui dépendent à la fois de l'année de référence et du zonage
observeEvent(
c(input$annee, input$id_zone), {
v$zone_indictempo <- v$zone_indic %>%
dplyr::left_join(v$tempo, by = NULL, copy = FALSE)
v$ind2 <- v$zone_indictempo %>%
mutate(sartif_evo = sartif - stot) %>%
select(nom_com, sartif_evo)
v$pal2 <- colorBin("YlOrRd", domain = v$ind2$sartif_evo, bins = classIntervals(v$ind2$sartif_evo, style = "jenks", n = 6)$brks)
v$ind4 <- v$zone_indictempo %>%
mutate(partif_evo = (sartif - stot)/ stot) %>%
select(nom_com, partif_evo)
v$pal4 <- colorBin("YlOrRd", domain = v$ind4$partif_evo, bins = 5)
# v$ind8 <- v$zone_indictempo %>%
# mutate(sartif_evo_par_op = 10000 * (sartif - stot)/(occpot17 - ocpot)) %>%
# mutate(sartif_evo_par_op = ifelse(sartif_evo_par_op > 0, sartif_evo_par_op, NA)) %>%
# select(nom_com, sartif_evo_par_op)
#
# v$pal8 <- colorBin("YlOrRd", domain = v$ind8$sartif_evo_par_op)
}
)
observeEvent(
c(input$annee, input$code_insee), {
v$comind2 <- v$com_indic$sartif - v$com_tempo[v$com_tempo$annee == input$annee, "stot"][1]
v$comind4 <- v$comind2 / v$com_tempo[v$com_tempo$annee == input$annee, "stot"][1]
# v$comind8 <- 10000 * v$comind2 / (v$com_indic$occpot17 - v$com_tempo[v$com_tempo$annee == input$annee, "ocpot"][1])
}
)
## --------------- OUTPUTS COMMUNE -----------------------------------------------
output$nomcom <- renderText({v$commune_select$nom_com})
# indicateur surface artificialisée
output$indicateur11 <- renderUI({
infoBox(
value = paste0(round(v$com_indic$senv17 + v$com_indic$senvnd), " ha"),
title = HTML("surface artificialisée <br/> par le bâti en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'orange',
width = 12,
fill = TRUE
)
})
# indicateur densité artificialisée
output$indicateur12 <- renderUI({
infoBox(
value = paste0(round(1000000 *(v$com_indic$senv17 + v$com_indic$senvnd)/ sum(v$commune_select$surface),1), " %"),
title = HTML("part de la commune <br/> artificialisée par le bâti en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'orange',
width = 12,
fill = TRUE
)
})
# indicateur surface artificialisée par occupant potentiel
output$indicateur13 <- renderUI({
infoBox(
value = HTML(paste0(round(v$com_indic$sartif_par_op, 0), " m<sup>2</sup>")),
title = HTML("surface artificialisée par<br/> occupant potentiel en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'orange',
width = 12,
fill = TRUE
)
})
# barplot progression SAB
sabcom <- reactive({ v$com_tempo %>%
select(annee, stot) %>%
filter(annee >= input$annee) %>%
arrange(annee) %>%
mutate(stot_prec = dplyr::lag(stot),
annee_prec = dplyr::lag(annee),
annees_ecart = paste0(annee_prec, ' - ', annee),
surface_prog = (stot - stot_prec) / (annee - annee_prec)
) %>%
filter(!is.na(annee_prec)) %>%
select (annees_ecart, surface_prog)
})
output$barres12 <- renderPlotly({
plot_ly(sabcom(),
x = ~annees_ecart,
y = ~surface_prog,
type = 'bar',
marker = list(color = col1)
) %>%
layout(xaxis = list(title = 'années'),
yaxis = list(title = 'ha par an'))
})
# courbes évolution SAB versus population
popcom <- reactive({ v$com_tempo %>%
select(annee, population, stot) %>%
filter (annee >= input$annee) %>%
arrange(annee)
})
output$lines11 <- renderPlotly({
plot_ly()%>%
add_trace(x = popcom()$annee,
y = 100 * popcom()$stot / popcom()$stot[1],
name = 'surface artificialisée par le bâti',
type = 'scatter',
mode = 'lines+markers',
marker = list(color = col1),
line = list(color = col1)
)%>%
add_trace(x = popcom()$annee,
y = 100 * popcom()$population / popcom()$population[1],
name = 'population',
type = 'scatter',
mode = 'lines+markers',
marker = list(color = col3),
line = list(color = col3)
)%>%
layout(yaxis = list(title = paste('base 100 en ', input$annee, sep = '')),
legend = list(x = 0, y = 1.1))
})
# carte leaflet commune
gr1 <- 'surface artificialisée 2017'
gr2 <- 'surface artificialisée référence'
gr5 <- 'surface artificialisée non datée'
gr3 <- 'commune sélectionnée'
gr4 <- 'bati'
# fond de carte et légende
output$carte <- renderLeaflet({
leaflet() %>%
fitBounds(lat1 = v$boite$ymin, lng1 = v$boite$xmin, lat2 = v$boite$ymax, lng2 = v$boite$xmax) %>%
addTiles(group = "OSM") %>%
# addProviderTiles(providers$GeoportailFrance.ignMaps, group = "IGN")%>%
addProviderTiles(providers$Stamen.Toner, group = "Stamen") %>%
addMapPane("envref", zIndex = 420) %>%
addMapPane("envact", zIndex = 410) %>%
addMapPane("bati", zIndex = 430) %>%
addLayersControl(
baseGroups = c("OSM", "IGN", "Stamen"),
overlayGroups = c(gr1, gr2, gr5, gr3, gr4),
options = layersControlOptions(collapsed = TRUE)
) %>%
addPolygons(data = req(v$commune_select),
color = col1,
fillColor = col1,
fillOpacity = 0.2,
group = gr3
)%>%
addPolygons(data = req(v$com_date) %>% filter(datation == 2016),
fillColor = 'red',
fillOpacity = 0.7,
stroke = FALSE,
group = gr1,
options = pathOptions(pane = "envact")
)%>%
addPolygons(data = req(v$com_nondate),
fillColor = 'grey',
fillOpacity = 0.7,
stroke = FALSE,
group = gr5,
options = pathOptions(pane = "envact")
)%>%
addPolygons(data = req(v$com_date) %>% filter(datation == input$annee - 1),
fillColor = 'orange',
fillOpacity = 0.7,
stroke = FALSE,
group = gr2,
options = pathOptions(pane = "envref")
)%>%
addPolygons(data = req(v$com_bati),
stroke = FALSE,
fillColor = 'black',
fillOpacity = 0.7,
group = gr4,
options = pathOptions(pane = "bati")
)
})
# recale la carte et met à jour la couche des enveloppes si changement de commune sélectionnée
observeEvent(input$codeinsee, {
leafletProxy("carte") %>%
fitBounds(lat1 = v$boite$ymin, lng1 = v$boite$xmin, lat2 = v$boite$ymax, lng2 = v$boite$xmax)
leafletProxy("carte") %>%
clearShapes() %>%
addPolygons(data = req(v$commune_select),
color = col1,
fillColor = col1,
fillOpacity = 0.2,
group = gr3
)%>%
addPolygons(data = req(v$com_date) %>% filter(datation == 2016),
fillColor = 'red',
fillOpacity = 0.7,
stroke = FALSE,
group = gr1,
options = pathOptions(pane = "envact")
)%>%
addPolygons(data = req(v$com_nondate),
fillColor = 'grey',
fillOpacity = 0.7,
stroke = FALSE,
group = gr5,
options = pathOptions(pane = "envact")
)%>%
addPolygons(data = req(v$com_date) %>% filter(datation == input$annee - 1),
fillColor = 'orange',
fillOpacity = 0.7,
stroke = FALSE,
group = gr2,
options = pathOptions(pane = "envref")
)%>%
addPolygons(data = req(v$com_bati),
stroke = FALSE,
fillColor = 'black',
fillOpacity = 0.7,
group = gr4,
options = pathOptions(pane = "bati")
)
})
# met à jour la couche des enveloppes si changement d'année sélectionnée
observeEvent(input$annee,{
leafletProxy("carte") %>%
clearGroup(group = gr2)%>%
addPolygons(data = req(v$com_date) %>% filter(datation == input$annee - 1),
fillColor = 'orange',
fillOpacity = 0.7,
stroke = FALSE,
group = gr2,
options = pathOptions(pane = "envref")
)
v$annee0 <- input$annee
})
## --------------- OUTPUTS TERRITOIRE -----------------------------------------------
output$nomzone <- renderText({dzonages[dzonages$id_zone == input$id_zone, "nom_zone"]})
# indicateur surface artificialisée
output$indicateur21 <- renderUI({
infoBox(
value = paste0(round(sum(v$zone_indic$senv17) + sum(v$zone_indic$senvnd)), " ha"),
title = HTML("surface artificialisée <br/> par le bâti en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'olive',
width = 12,
fill = TRUE
)
})
# indicateur densité artificialisée
output$indicateur22 <- renderUI({
infoBox(
value = paste0(round(1000000 *(sum(v$zone_indic$senv17) + sum(v$zone_indic$senvnd))/ sum(v$zone_select$surface),1), " %"),
title = HTML("part du territoire <br/> artificialisé par le bâti en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'olive',
width = 12,
fill = TRUE
)
})
# indicateur SAB par occupant potentiel
output$indicateur23 <- renderUI({
infoBox(
value = HTML(paste0(round(10000 *(sum(v$zone_indic$senv17) + sum(v$zone_indic$senvnd))/(sum(v$zone_indic$occpot17)),0), " m<sup>2</sup>")),
title = HTML("surface artificialisée par <br/> occupant potentiel en 2017"),
icon = icon("new-window", lib = "glyphicon"),
color = 'olive',
width = 12,
fill = TRUE
)
})
# barplot progression SAB
sabzone <- reactive({ v$zone_tempo %>%
select(annee, stot) %>%
filter(annee >= input$annee) %>%
group_by(annee) %>%
summarise(surface = sum(stot)) %>%
mutate(surface_prec = dplyr::lag(surface),
annee_prec = dplyr::lag(annee),
annees_ecart = paste0(annee_prec, ' - ', annee),
surface_prog = (surface - surface_prec) / (annee - annee_prec)
) %>%
filter(!is.na(annee_prec)) %>%
select (annees_ecart, surface_prog)
})
output$barres22 <- renderPlotly({
plot_ly(sabzone(),
x = ~annees_ecart,
y = ~surface_prog,
type = 'bar',
marker = list(color = col2)
) %>%
layout(xaxis = list(title = 'années'),
yaxis = list(title = 'ha par an'))
})
# courbes évolution SAB versus population
popzone <- reactive({ v$zone_tempo %>%
select(annee, population, stot) %>%
filter(annee >= input$annee) %>%
group_by(annee) %>%
summarise(surface = sum(stot), population = sum(population)) %>%
arrange(annee)
})
output$lines21 <- renderPlotly({
plot_ly()%>%
add_trace(x = popzone()$annee,
y = 100 * popzone()$surface / popzone()$surface[1],
name = 'surface artificialisée par le bâti',
type = 'scatter',
mode = 'lines+markers',
marker = list(color = col2),
line = list(color = col2)
)%>%
add_trace(x = v$annees,
y = 100 * popzone()$population / popzone()$population[1],
name = 'population',
type = 'scatter',
mode = 'lines+markers',
marker = list(color = col3),
line = list(color = col3)
)%>%
layout(yaxis = list(title = paste('base 100 en ', input$annee, sep = '')),
legend = list(x = 0, y = 1.1))
})
# carte leaflet territoire de référence
# fond de carte et légende
gr11 <- "territoire de référence"
output$carteter <- renderLeaflet({
leaflet() %>%
# gère la position en arrière plan des couches région, départements, PNR, EPCI, SCOT
addMapPane("tuile_com", zIndex = 300) %>%
# couche des communes
addPolygons(
data = v$zone_select,
weight = 1,
color = 'white',
fillColor = 'grey',
fillOpacity = 0,
group = 'communes',
options = pathOptions(pane = "tuile_com")
)%>%
# setView(lng = 6, lat = 47, zoom = 10) %>%
addTiles(group = "OSM") %>%
addLayersControl(
baseGroups = c(i1, i2, i3, i4, i5, i6, i7, i9),
overlayGroups = c("communes", "OSM"),
options = layersControlOptions(collapsed = TRUE)
) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind1, " : ", round(sartif), " ha"),
fillColor = ~v$pal1(sartif),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i1
) %>%
addPolygons(data = req(v$ind2),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind2, " entre ", input$annee, " et 2017 : ", round(sartif_evo), " ha"),
fillColor = ~v$pal2(sartif_evo),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i2
) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind3, " : ", round(100 * partif, 1), " %"),
fillColor = ~v$pal3(partif),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i3
) %>%
addPolygons(data = req(v$ind4),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind4, " entre ", input$annee, " et 2017 : ", round(100 * partif_evo, 1), " %"),
fillColor = ~v$pal4(partif_evo),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i4
) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind5, " : ", round(100 * cos), " %"),
fillColor = ~v$pal5(cos),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i5
) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind6, " : ", round(sartif_par_hab), " m2"),
fillColor = ~v$pal6(sartif_par_hab),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i6
) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind7, " : ", round(sartif_par_op), " m2"),
fillColor = ~v$pal7(sartif_par_op),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i7
) %>%
# addPolygons(data = req(v$ind8),
# label = ~nom_com,
# popup = ~paste0("<b>", nom_com, "</b><br>", ind8, " : ", round(sartif_evo_par_op), " m2"),
# fillColor = ~v$pal8(sartif_evo_par_op),
# color = 'white',
# weight = 1,
# fillOpacity = 0.8,
# group = i8
# ) %>%
addPolygons(data = req(v$zone_indic),
label = ~nom_com,
popup = ~paste0("<b>", nom_com, "</b><br>", ind9, " entre 2012 et 2017 : ", round(sartif_evo_men), " ménages"),
fillColor = ~v$pal9(sartif_evo_men),
color = 'white',
weight = 1,
fillOpacity = 0.8,
group = i9
) %>%
addLegend(pal = v$pal1,
values = v$zone_indic$sartif,
position = "bottomright",
title = ind1,
group = i1,
className = paste0("info legend ", i1),
labFormat = labelFormat(suffix = " ha", big.mark = " ")
) %>%
addLegend(pal = v$pal2,
values = v$ind2,
position = "bottomright",
title = ind2,
group = i2,
className = paste0("info legend ", i2),
labFormat = labelFormat(suffix = " ha", big.mark = " ")
) %>%
addLegend(pal = v$pal3,
values = v$zone_indic$partif,
position = "bottomright",
title = ind3,
group = i3,
className = paste0("info legend ", i3),
labFormat = labelFormat(suffix = " %", transform = function(x) 100 * x, big.mark = " ")
) %>%
addLegend(pal = v$pal4,
values = v$ind4,
position = "bottomright",
title = ind4,
group = i4,
className = paste0("info legend ", i4),
labFormat = labelFormat(suffix = " %", transform = function(x) 100 * x, big.mark = " ")
) %>%
addLegend(pal = v$pal5,
values = v$zone_indic$cos,
position = "bottomright",
title = ind5,
group = i5,
className = paste0("info legend ", i5),
labFormat = labelFormat(suffix = " %", transform = function(x) 100 * x, big.mark = " ")
) %>%
addLegend(pal = v$pal6,
values = v$zone_indic$sartif_par_hab,
position = "bottomright",
title = ind6,
group = i6,
className = paste0("info legend ", i6),
labFormat = labelFormat(suffix = " m2", big.mark = " ")
) %>%
addLegend(pal = v$pal7,
values = v$zone_indic$sartif_par_op,
position = "bottomright",
title = ind7,
group = i7,
className = paste0("info legend ", i7),
labFormat = labelFormat(suffix = " m2", big.mark = " ")
) %>%
# addLegend(pal = v$pal8,
# values = v$ind8,
# position = "bottomright",
# title = i8,
# group = i8,
# className = paste0("info legend ", i8),
# labFormat = labelFormat(suffix = " m2", big.mark = " ")
# ) %>%
addLegend(pal = v$pal9,
values = v$zone_indic$sartif_evo_men,
position = "bottomright",
title = ind9,
group = i9,
className = paste0("info legend ", i9),
labFormat = labelFormat(suffix = " ménages", big.mark = " ")
) %>%
hideGroup(c("OSM")) %>%
# fonction qui permet de gérer l'affichage de la légende pour les basegroup : voir https://github.com/rstudio/leaflet/issues/477
htmlwidgets::onRender("
function(el, x) {
var updateLegend = function () {
var selectedGroup = document.querySelectorAll('input:checked')[0].nextSibling.innerText.substr(1);
document.querySelectorAll('.legend').forEach(a => a.hidden=true);
document.querySelectorAll('.legend').forEach(l => {
if (l.classList.contains(selectedGroup)) l.hidden=false;
});
};
updateLegend();
this.on('baselayerchange', el => updateLegend());
}"
)
})
observeEvent(c(input$codeinsee, input$annee, input$id_zone), {
leafletProxy("carteter") %>%
htmlwidgets::onRender("
function(el, x) {
var updateLegend = function () {
var selectedGroup = document.querySelectorAll('input:checked')[0].nextSibling.innerText.substr(1);
document.querySelectorAll('.legend').forEach(a => a.hidden=true);
document.querySelectorAll('.legend').forEach(l => {
if (l.classList.contains(selectedGroup)) l.hidden=false;
});
};
updateLegend();
this.on('baselayerchange', el => updateLegend());
}"
)
})
# tableau des indicateurs communaux
output$tableauindic <- renderDataTable(as.data.table(v$zone_indic) %>%
select(c(nom_com, insee_com, sartif, partif, cos, sartif_par_hab, sartif_par_op, sartif_evo_men)))
## OUTPUTS COMPARAISON
gauge_plot <- function(indiv, pop, format = "", suffix = ""){
p <- plot_ly(
domain = list(x = c(0, 1), y = c(0, 1)),
value = indiv,
type = "indicator",
mode = "gauge+number",
gauge = list(
axis = list(
range = list(NULL, max(indiv, max(pop)))
),
bar = list(color = col1),
steps = list(
list(range = c(min(pop), max(pop)), color = 'lightgray'),
list(range = c(quantile(pop, 0.25, na.rm = TRUE), quantile(pop, 0.75, na.rm = TRUE)), color = col2)
)
),
number = list(
valueformat = format,
suffix = suffix
)
) %>%
layout(margin = list(l=20,r=30),
font = list(color = col1))
return(p)
}
output$nomind2 <- renderText({paste0(ind2, " entre ", input$annee, " et 2017")})
output$nomind4 <- renderText({paste0(ind4, " entre ", input$annee, " et 2017")})
output$gauge31 <- renderPlotly({
gauge_plot(indiv = v$com_indic$sartif, pop = v$zone_indic$sartif, suffix = " ha")
})
output$gauge32 <- renderPlotly({
gauge_plot(indiv = as.numeric(v$comind2), pop = v$ind2$sartif_evo, suffix = " ha")
})
output$gauge33 <- renderPlotly({
gauge_plot(indiv = v$com_indic$partif, pop = v$zone_indic$partif, format = ".0%")
})
output$gauge34 <- renderPlotly({
gauge_plot(indiv = as.numeric(v$comind4), pop = v$ind4$partif_evo, format = ".0%")
})
output$gauge35 <- renderPlotly({
gauge_plot(indiv = v$com_indic$cos, pop = v$zone_indic$cos, format = ".0%")
})
output$gauge36 <- renderPlotly({
gauge_plot(indiv = v$com_indic$sartif_par_hab, pop = v$zone_indic$sartif_par_hab, suffix = " m2")
})
output$gauge37 <- renderPlotly({
gauge_plot(indiv = v$com_indic$sartif_par_op, pop = v$zone_indic$sartif_par_op, suffix = " m2")
})
# output$gauge38 <- renderPlotly({
# gauge_plot(indiv = as.numeric(v$comind8), pop = v$ind8$sartif_evo_par_op)
# })
output$gauge39 <- renderPlotly({
gauge_plot(indiv = v$com_indic$sartif_evo_men, pop = v$zone_indic$sartif_evo_men)
})
})
|
ffe26df176388a1e3e43e15e2647007642e19f03
|
fd00641074b6ff353fdda5614cdb58aa9db96569
|
/R/get_bic.R
|
300abcfa475c9d32ac7541e67d0b5e18040ef813
|
[] |
no_license
|
princyparsana/myutils
|
429b665570cf18460af32ffcc9c1cafbf7f25bd4
|
23add18b69919073483aa2aa88641e7b1fbc366d
|
refs/heads/master
| 2022-07-19T12:58:13.326598
| 2020-05-21T13:10:36
| 2020-05-21T13:10:36
| 265,600,494
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 155
|
r
|
get_bic.R
|
get_bic <- function(ll,num_params, num_samples){
#ll - log likelihood
#num_params
penalty = (num_params*log(num_samples))
penalty - (2*ll)
}
|
4b729c260cf156c487a5ac4d54ca25538ae12e60
|
7561c7a2bda45a2cbcccd9e22cc4135ac1e711bc
|
/try_t_squared_q_statistic.R
|
13804d8a9d81ab0567a3269b2871ff6a90c50b41
|
[] |
no_license
|
a30123/R
|
10cc9e071f75e55a96320e5b078f102d83c86a23
|
6f5280874eb5c0c7eeb9ebaf1299a77bfbc822af
|
refs/heads/master
| 2020-05-17T15:24:01.282712
| 2016-01-29T07:01:22
| 2016-01-29T07:01:22
| 34,438,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,425
|
r
|
try_t_squared_q_statistic.R
|
### created date: July 2 2015
### last modified date:
### author:A30123
### description:PCA plus t squared q
#########################################################################################################
### ##### ##### ##### ############### # ### ### ### ################
### ######### ######## ######## #################### # # ### ### ### ### ### ################
### ######### ######## ######## #################### #### ### ### ### ### ### ################
### ##### ######## ######## ############### #### ### ### ### ################
#########################################################################################################
#########################################################################################################
####################################### IMPORT LIBRARIES ###########################################
#########################################################################################################
#########################################################################################################
####################################### FUNCTIONS ###########################################
#########################################################################################################
#########################################################################################################
####################################### INITIALIZING ###########################################
#########################################################################################################
my_data_path<-"C:/Users/A30123.ITRI/Documents/R scripts/New for event mining/Try_Handmade_Tsquared_Q/features/output.csv"
#########################################################################################################
####################################### MAIN PROGRAM ###########################################
#########################################################################################################
#Read in the data
all_data<-read.csv(my_data_path)
#extract only first 200 entries
my_data<-all_data[1:200,]
my_data_testing<-all_data[201:length(all_data[,1]),]
#mean and standard deviation of normal data
means<-colMeans(my_data)
stds<-apply(my_data,2,sd)
my_data2<-all_data[1:200,1:10]
pairs(my_data2)
#standardize each column
standardized_my_data<-scale(my_data,center=TRUE,scale=TRUE)
standardized_my_data_testing<-scale(my_data_testing,center=means,scale=stds)
coco<-matrix(standardized_my_data,length(my_data[,1]))
#coco<-as.matrix(standardized_my_data)
cov<-t(coco)%*%coco
eigs<-eigen(cov,symmetric=TRUE,only.values=FALSE)
#eigs$vectors
#eigs$values
############### PCA
### watch this:https://www.youtube.com/watch?v=Heh7Nv4qimU
pca_result<-princomp(my_data,scores=TRUE,cor=TRUE)
plot(pca_result)
biplot(pca_result)
pca_result$loadings
pca_result$scores
############## PCA on non-standardized data is exactly the same
pca_result2<-princomp(standardized_my_data,scores=TRUE,cor=TRUE)
plot(pca_result2)
################Top ten
Pmatrix=pca_result$loadings[,1:10]
Tmatrix=pca_result$scores[,1:10]
############## calculate T squared
pret=Pmatrix%*%t(Pmatrix)%*%t(my_data_testing)
g<-pret*pret
kit<-apply(g,2,sum)
final<-sqrt(kit)
############# calculate Q statistics
|
cbec5d45260da33a4ce6d74307ad2c7376835542
|
a8d33ed85325b4d55d72f14cf2868906af2a18bf
|
/AnovaAssumptions.R
|
e42cae1cc0e627a1b7fc58670fc66dc52b43dfdc
|
[] |
no_license
|
sciencecasey/coursera_sd_analysisdesign
|
b2d82bafb54601e8204768814861032638fc29c3
|
2d993fa3087e705f94c6f6ea67a9251c4f8f87a9
|
refs/heads/master
| 2020-05-16T20:55:14.101406
| 2019-04-25T19:48:37
| 2019-04-25T19:48:37
| 183,291,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,842
|
r
|
AnovaAssumptions.R
|
#if can't manipulate a confound, try to control for it; if can't control, record/measure it
#Avova Assumptions::Independence; normality; homoscedaticity (similar variance)
#Anova cares about the normality of the residuals (if residuals are normally distributed, we know that it's not systematic)
#look at normality assumption with distributions
#View the histograms to see if looks more normal than before.
hist(ide2[ide2$IDE=="VStudio",]$Time)
hist(ide2[ide2$IDE=="Eclipse",]$Time)
#Test normality assumption
shapiro.test(ide2[ide2$IDE=="VStudio",]$Time) #sig p value suggesting we significantly differ from normal
shapiro.test(ide2[ide2$IDE=="Eclipse",]$Time)
#test residual assumtptions
m=aov(Time~IDE, data=ide2) #fit an anova to model
shapiro.test(residuals(m)) #test the residuals
qqnorm(residuals(m)); qqline(residuals(m)) #plot the residuals and see if deviate greatly
#Kolmogorov-Smirnov test for log normally since wasn't normal
#fit distribution to lognormal estimate fit parameters
#ks test using parameters we extracted from the fit (mean and sd)
library(MASS)
fit=fitdistr(ide2[ide2$IDE="VStudio",]$Time, "lognormal")$estimate
ks.test(ide2[ide2$IDE=="VStudio",]$Time, "plnorm", meanlog=fit[1], sdlog=fit[2], exact=TRUE)
fit=fitdistr(ide2[ide2$IDE="Eclipse",]$Time, "lognormal")$estimate
ks.test(ide2[ide2$IDE=="Eclipse",]$Time, "plnorm", meanlog=fit[1], sdlog=fit[2], exact=TRUE)
#t test for homoscedasticity
library(car)
#levene test using mean
leveneTest(Time ~IDE, data=ide2, center=mean)
#Bown-Forsythe test
#more robust to outliers
leveneTest(Time~IDE, data=ide2, center=median)
#welch t-test for unequal variances
#used when differences in variances var.equal=FALSE
#won't solve a violation of normality
t.test(Time~IDE, data=ide2, var.equal=FALSE)
##Transform data so it conforms to normality assumption (since we say it was not normally distributed by the small pvalue)
#create new column in ide2 definted as log(Time) since we might be log distributed
ide2$logTime=log(ide2$Time)
#View the histograms to see if looks more normal than before.
hist(ide2[ide2$IDE=="VStudio",]$logTime)
hist(ide2[ide2$IDE=="Eclipse",]$logTime)
#test the logTime for normality
shapiro.test(ide2[ide2$IDE=="VStudio",]$logTime) #no longer sig p value suggesting we don't significantly differ from normal
shapiro.test(ide2[ide2$IDE=="Eclipse",]$logTime)
m=aov(logTime~IDE, data=ide2) #fit an anova to model
shapiro.test(residuals(m)) #test the residuals
qqnorm(residuals(m)); qqline(residuals(m)) #plot the residuals and see if deviate greatly
#test the homoscedasticity of log
#levene test using mean
leveneTest(logTime ~IDE, data=ide2, center=mean)
#Bown-Forsythe test
#more robust to outliers
leveneTest(logTime~IDE, data=ide2, center=median)
#t-test with equal variances
t.test(logTime~IDE, data=ide2, var.equal=TRUE)
|
6aab5a072f32d19e493d29b6de26347d0265ef36
|
be3b8e6a43724fa902cd769a582f5b53f713ffbe
|
/plot2.R
|
5f10c5ce066e49e63c2b16918dd1bf6283ce0016
|
[] |
no_license
|
amberv0/ExData_Plotting1
|
d5ce0354d77d946a614e5fc4626dfc55c86ed37b
|
e6d1bdc4f8a0f26f5952c3fee86d24a619de70ba
|
refs/heads/master
| 2021-01-18T03:08:01.019204
| 2015-11-05T11:21:45
| 2015-11-05T11:21:45
| 45,569,163
| 0
| 0
| null | 2015-11-04T21:38:16
| 2015-11-04T21:38:15
| null |
UTF-8
|
R
| false
| false
| 558
|
r
|
plot2.R
|
setClass('myDate')
setAs("character","myDate", function(from) as.Date(from, format="%d/%m/%Y") )
data <- subset(read.csv("./household_power_consumption.txt", sep = ";", na.strings="?", colClasses = c("Date" = "myDate")), Date >= "2007/02/01" & Date <= "2007/02/02")
png(filename = "plot2.png", width = 480, height = 480, units = "px", bg ="transparent")
datetime <- as.POSIXct(paste(data$Date, data$Time), format = "%Y-%m-%d %H:%M:%S")
with(data, plot(datetime, Global_active_power, ylab = "Global Active Power (kilowatts)", xlab = "", type = "l"))
dev.off()
|
832a236e31756eb926ea937b26279e0487fcded4
|
0bdeee3b1ab83ec1d6cf5e437353a46a7c910292
|
/svgSlices2Fluoview.R
|
a841da0e3815b50e5679a900eae76a61ca00ba48
|
[] |
no_license
|
cmandrycky/2P-Ablation_Fluoview-Conversion
|
ea707cd6a84ab0538f208b863403513d2078e683
|
6740b2fb33f393993ebbf8cb98e9b696379a07e9
|
refs/heads/master
| 2022-11-13T09:01:24.089392
| 2020-07-09T08:02:47
| 2020-07-09T08:02:47
| 267,980,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,343
|
r
|
svgSlices2Fluoview.R
|
svgSlices2Fluoview <- function(zStep = 1, numRepeats = 1, startingZ = 0){
library(XML)
library(dplyr)
library(tidyverse)
library(dplyr)
library(RODBC)
polygonEstimate <- function(svgDir){
filenames <- paste(svgDir,"\\", list.files(path = svgDir,pattern = ".svg"), sep="")
counter <- 0
for (file in filenames) {
svg <- readChar(file, file.info(file)$size)
doc <- htmlParse(svg)
p <- xpathSApply(doc, "//polygon", xmlGetAttr, "points")
counter <- counter + length(p)
}
return(counter)
}
GetXY <- function(listIndex, listValue) {
#Conversion factor for microscope microns to pixels
micro2Pix <- 2.01574803
data.frame(listValue) %>%
rename(X = X1, Y = X2) %>%
mutate(ShapeNum = listIndex,
X = floor(X*micro2Pix),
Y = floor(Y*micro2Pix)) %>%
select(ShapeNum, X, Y)
}
frameTimeEstimate <- function(w,h){
#Returns estimated frame time in (ms)
frame <- 20.416 + numRepeats*h*(w*.00197712+1.10997349)
if(frame < 100){
return(100)
} else if(frame > 3300) {
return(3300)
} else {
return(round(frame*1.10))
}
}
#Make a copy of the original db file and place it in user defined directory. Name will be based on svg directory file name
defaultDBpath <- c("C:\\PathToDefaultDB")
svgDirectory <- choose.dir(caption = "Select SVG slices folder")
numPolygons <- polygonEstimate(svgDirectory)
if(numPolygons > 2999){
stop(paste("Too many polygons for single protocol file. Break into smaller segments:",numPolygons))
} else{
print(paste("Number of polygons:", numPolygons))
}
#second line names the new DB file after the svgDirectory
dbSaveToPath <- paste(choose.dir(caption = "Where do you want to save the DB file to?"),
"\\",sapply(strsplit(svgDirectory, "\\\\"), tail, 1),".mdb",
sep = "")
file.copy(defaultDBpath,
dbSaveToPath,
copy.mode = TRUE,
copy.date = FALSE)
filenames <- paste(svgDirectory,"\\", list.files(path = svgDirectory,pattern = ".svg"), sep="")
rpCounter = 2 #first TaskID is a prototype
zStepSize = zStep*1000 #micron
zHeight = startingZ*1000 #micron
dbName <- paste("Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=",dbSaveToPath,sep = "")
db <- odbcDriverConnect(dbName)
#editing
taskClip <- sqlQuery( db , paste("select * from IMAGING_TASK_CLIP"))
taskInfo <- sqlQuery( db , paste("select * from IMAGING_TASK_INFO"))
#duplicating
channelInfo <- sqlQuery( db , paste("select * from IMAGING_CHANNEL_INFO"))
taskLaser <- sqlQuery( db , paste("select * from IMAGING_TASK_LASER"))
taskMatlinfo <- sqlQuery( db , paste("select * from IMAGING_TASK_MATLINFO"))
taskScaninfo <- sqlQuery( db , paste("select * from IMAGING_TASK_SCANINFO"))
taskScanrange <- sqlQuery( db , paste("select * from IMAGING_TASK_SCANRANGE"))
#different spelling
taskBar <- sqlQuery( db , paste("select * from IMAGING_TASK_BAR"))
for (file in filenames) {
svg <- readChar(file, file.info(file)$size)
doc <- htmlParse(svg)
p <- xpathSApply(doc, "//polygon", xmlGetAttr, "points")
if(is_empty(p)){
#do nothing
} else {
# Convert them to numbers
roiList <- lapply( strsplit(p, " "), function(u)
matrix(as.numeric(unlist(strsplit(u, ","))),ncol=2,byrow=TRUE) )
Output <- roiList %>%
imap_dfr(~ GetXY(.y, .x))
Output$X[Output$X > 1023] <- 1023
Output$Y[Output$Y > 1023] <- 1023
Output$X[Output$X < 0] <- 0
Output$Y[Output$Y < 0] <- 0
for(i in c(1:length(roiList))){
activeROI <- filter(Output, ShapeNum == i)
#Make string of X and Y coordinate separated by commas. Can only be integer values
xStr <- paste(activeROI$X, sep="", collapse=",")
yStr <- paste(activeROI$Y, sep="", collapse=",")
polyWidth <- max(activeROI$X) - min(activeROI$X) + 1
polyHeight <- max(activeROI$Y) - min(activeROI$Y) + 1
polyXCoord <- min(activeROI$X)
polyYCoord <- min(activeROI$Y)
#taskClip
taskClipToAdd <- taskClip %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter,
IndexName = 1,
IndexID = '1S',
Shape = 6,
X = polyXCoord,
Y = polyYCoord,
Width = polyWidth,
Height = polyHeight,
XCnt = length(activeROI$X),
YCnt = length(activeROI$Y),
XList = xStr,
YList = yStr)
taskClip <- rbind(taskClip,taskClipToAdd)
#taskInfo
#repeats enable/disable and set number -
#modecheck = 0 even when enabled?
#Line mode = 2
taskInfoToAdd <- taskInfo %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter,
ZPos = zHeight,
NumberOfClip = 1,
FilterModeLineFrame = if(numRepeats > 1){0} else {2},
FilterModeNum = numRepeats)
taskInfo <- rbind(taskInfo,taskInfoToAdd)
#taskBar
#need to figure out minimum time for RPdelay
betweenRPdelay <- 100 #ms
frameTime <- frameTimeEstimate(polyWidth,polyHeight) #full frame = 3300 ms
terminateDuration <- 300 #ms
mts <- taskBar$TerminateEnd[rpCounter]+betweenRPdelay
mte <- mts+frameTime
ps <- taskBar$TerminateEnd[rpCounter]
pe <- mts
ts <- mte
te <- ts+terminateDuration
taskBarToAdd <- taskBar %>%
filter(TASKID == 1) %>%
mutate(TASKID = rpCounter,
MainTimeStart = mts,
MainTimeEnd = mte,
PrepareStart = ps,
PrepareEnd = pe,
TerminateStart = ts,
TerminateEnd = te)
taskBar <- rbind(taskBar,taskBarToAdd)
#channelInfo
channelInfoToAdd <- channelInfo %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter)
channelInfo <- rbind(channelInfo,channelInfoToAdd)
#taskLaser
taskLaserToAdd <- taskLaser %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter)
taskLaser <- rbind(taskLaser,taskLaserToAdd)
#taskScaninfo
taskScaninfoToAdd <- taskScaninfo %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter,
ClipScanSizeX = if(taskClip$Width[rpCounter] < 1023){taskClip$Width[rpCounter] + 1} else {1024},
ClipScanSizeY = if(taskClip$Height[rpCounter] < 1023){taskClip$Height[rpCounter] + 3} else {1024})
taskScaninfo <- rbind(taskScaninfo,taskScaninfoToAdd)
#taskMatlinfo
taskMatlinfoToAdd <- taskMatlinfo %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter,
Num = rpCounter,
XIndex = round(4.9697*taskScaninfo$ClipScanSizeX[rpCounter]),
YIndex = round(4.9697*taskScaninfo$ClipScanSizeY[rpCounter]))
taskMatlinfo <- rbind(taskMatlinfo,taskMatlinfoToAdd)
#taskScanrange
taskScanrangeToAdd <- taskScanrange %>%
filter(TaskID == 1) %>%
mutate(TaskID = rpCounter)
taskScanrange <- rbind(taskScanrange,taskScanrangeToAdd)
rpCounter = rpCounter + 1
}
}
zHeight = zHeight + zStepSize
}
#important, updates end time in db
updateString <- paste("UPDATE OTHERITEMS_INITVAL SET DataValue = ",
taskBar$TerminateEnd[dim(taskBar)[1]],
" WHERE PropertyLeafName = 'EndTime'",sep = "")
sqlQuery( db , updateString)
sqlSave(db, filter(taskClip,TaskID > 1), tablename = "IMAGING_TASK_CLIP", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskInfo,TaskID > 1), tablename = "IMAGING_TASK_INFO", append = TRUE, rownames = FALSE)
sqlSave(db, filter(channelInfo,TaskID > 1), tablename = "IMAGING_CHANNEL_INFO", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskLaser,TaskID > 1), tablename = "IMAGING_TASK_LASER", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskMatlinfo,TaskID > 1), tablename = "IMAGING_TASK_MATLINFO", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskScaninfo,TaskID > 1), tablename = "IMAGING_TASK_SCANINFO", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskScanrange,TaskID > 1), tablename = "IMAGING_TASK_SCANRANGE", append = TRUE, rownames = FALSE)
sqlSave(db, filter(taskBar,TASKID > 1), tablename = "IMAGING_TASK_BAR", append = TRUE, rownames = FALSE)
odbcCloseAll()
estimatedTime <- if(taskBar$TerminateEnd[dim(taskBar)[1]] > 2000*numPolygons){
taskBar$TerminateEnd[dim(taskBar)[1]]
} else{
2000*numPolygons
}
print(paste("Estimated protocol time:", estimatedTime/60/1000, "min"))
}
|
2e193657c675bb89bc46ad668a456d9aaef662e3
|
a1fe2793a1d70c1964a3ae67b308e7ff9fff9869
|
/man/datasets.Rd
|
d507f65277531b1b1bcb43d60995c16a87bd188d
|
[] |
no_license
|
cran/Mercator
|
352ed35b93e44c19c5716c4d974396a45894a882
|
43afd21c8ab08019a1699823b85ffd155c1ded42
|
refs/heads/master
| 2022-07-17T22:22:08.557479
| 2022-06-30T15:20:02
| 2022-06-30T15:20:02
| 236,624,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,135
|
rd
|
datasets.Rd
|
\name{mercator-data}
\alias{mercator-data}
\alias{CML500}
\alias{CML1000}
\alias{lgfFeatures}
\alias{fakedata}
\alias{fakeclin}
\docType{data}
\title{CML Cytogenetic Data}
\description{
These data sets contain binary versions of subsets of cytogenetic
karyotype data from patients with chronic myelogenous leukemia (CML).
}
\usage{
data("lgfFeatures")
data("CML500")
data("CML1000")
data("fakedata") # includes "fakeclin"
}
\format{
\describe{
\item{\code{lgfFeatures}}{A data matrix with 2748 rows and 6 columns
listing the cytogentic bands produced as output of the CytoGPS
algorithm that converts text-based karyotypes into a binary
Loss-Gain-Fusion (LGF) model. The columns include the \code{Label}
(the Type and Band, joined by an underscore),
\code{Type} (Loss, Gain, or Fusion), \code{Band} (standard name of
the cytogenetic band), \code{Chr} (chromosome), \code{Arm} (the
chromsome arm, of the form #p or #q), and \code{Index} (an integer
that can be used for sorting or indexing).
}
\item{\code{CML500}}{A \code{\link{BinaryMatrix}} object with 770
rows (subset of LGF features) and 511 columns (patients). The
patients were selected using the \code{\link{downsample}} function
from the full set of more than 3000 CML karyotypes. The rows were
selected by removing redundant and non-informative features when
considering the full data set.}
\item{\code{CML1000}}{A \code{\link{BinaryMatrix}} object with 770
rows (subset of LGF features) and 1057 columns (patients). The
patients were selected using the \code{\link{downsample}} function
from the full set of more than 3000 CML karyotypes. The rows were
selected by removing redundant and non-informative features when
considering the full data set.}
\item{\code{fakedata}}{A matrix with 776 rows ("features") and 300
columns ("samples") containng synthetic continuos data.}
\item{\code{fakeclin}}{A data frame with 300 rows ("samples") and 4
columns of synthetic clincal data related to the \code{fakedata}.}
}
}
\source{
The cytogenetic data were obtained from the public Mitelman Database
of Chromosomal Aberrations and Gene Fusions in Cancer on 4 April
2019. The database is currently located at
https://cgap.nci.nih.gov/Chromosomes/Mitelman as part of hte
Cancer Genome Anatomy Project (CGAP). The CGAP web site is expected to
close on 1 October 2019 at which point the Mitelman database will
move to an as-yet-undisclosed location. The data were then converted
from text-based karyotrypes into binary vectors using CytoGPS
\url{http://cytogps.org/}.
}
\references{
Abrams ZB, Zhang L, Abruzzo LV, Heerema NA, Li S, Dillon T, Rodriguez
R, Coombes KR, Payne PRO. CytoGPS: A Web-Enabled Karyotype Analysis
Tool for Cytogenetics. Bioinformatics. 2019 Jul 2. pii: btz520. doi:
10.1093/bioinformatics/btz520. [Epub ahead of print]
}
\author{
Kevin R. Coombes <krc@silicovore.com>, Caitlin E. Coombes
}
\keyword{datasets}
|
22318895aecc92cbc87bec2883ea37a41ac0a4db
|
6035aa32d55f399450bc4a094f863727b56b6b75
|
/copy files to new folders HELPER.R
|
2a5d3a844e52b9d02fdca2031f21245d0bcb08a3
|
[] |
no_license
|
Bartesto/helper_functions
|
3401138eaa50e18fecae05c24761c1af44d81d04
|
3abca67eeb801f429f96aa14434dbeb4b263de31
|
refs/heads/master
| 2021-01-17T11:29:51.307363
| 2016-02-26T05:31:39
| 2016-02-26T05:31:39
| 33,292,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,018
|
r
|
copy files to new folders HELPER.R
|
rm(list = ls(all = T))#Clears workspace if required
dir="W:\\cube_imagery\\110078"
setwd(dir)
list.dirs <- function(path=".", pattern=NULL, all.dirs=FALSE,
full.names=FALSE, ignore.case=FALSE) {
# use full.names=TRUE to pass to file.info
all <- list.files(path, pattern, all.dirs,
full.names=TRUE, recursive=FALSE, ignore.case)
dirs <- all[file.info(all)$isdir]
# determine whether to return full names or just dir names
if(isTRUE(full.names))
return(dirs)
else
return(basename(dirs))
}
afiles <- list.dirs()
folds <- substr(afiles, 8, 15)
for(i in 1:length(folds)){
if(!file.exists(folds[i])){dir.create(folds[i])}
}
for(j in 1:length(folds)){
dir.i <- paste0(dir,"\\", afiles[j])
from.i <- paste0(afiles[j], "\\", list.files(dir.i))
to.i <- folds[j]
file.copy(from=from.i, to=to.i, recursive = FALSE, overwrite = TRUE, copy.mode = TRUE)
}
|
319a0e301f1acc0832df2adf131bd5f7a75e85f6
|
a2140f9355037bda5b04d9776c156482e0c3438a
|
/Data Mining/FinalExam/Q8.R
|
1382b1fa3227d48e1dbe6aefd229369603dc5f87
|
[] |
no_license
|
Manideepnetha/DataScience_2019501111
|
e990b11761185a4945b2b9f9e1bb9bf21fff012a
|
cdac5133fd5fdef765725d889e55eaa0e89d3468
|
refs/heads/master
| 2023-04-08T08:25:56.558274
| 2021-04-23T11:20:27
| 2021-04-23T11:20:27
| 295,368,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
Q8.R
|
data = read.csv("BSE_Sensex_Index.csv", header = TRUE)
View(data)
summary(data)
data$Date = as.Date(data$Date, format='%m/%d/%Y')
successive_difference <- function(x) {
n = length(x)
for (i in 1:(length(x))) {
x[i] <- (x[i] - x[i + 1]) / x[i + 1]
}
x[length(x)] = (x[length(x) - 1] + x[length(x) - 2] + x[length(x) - 3]) / 3
return(x)
}
data$successive_growth <- successive_difference(data$Close)
#calc_Z-scores
sgrmean <- mean(data$successive_growth, na.rm=TRUE)
sgrsd <- sd(data$successive_growth,na.rm=TRUE)
z<-(data$successive_growth - sgrmean) / sgrsd
sort(z)
data$zscores <- z
#outliers
dates<-subset(data[,1],data[,"zscores"] >= 3.0 | data[,"zscores"] <= -3.0)
View(dates)
write.csv(dates, "OutliersDatesData_.csv", quote = FALSE, row.names = TRUE)
|
f71d2af1385f1f90f1fbede0693487fadabbe3b6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/huge/examples/huge.select.Rd.R
|
70fa92136e7a69f8552dbc1ffd28d24ded0b55be
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
huge.select.Rd.R
|
library(huge)
### Name: huge.select
### Title: Model selection for high-dimensional undirected graph estimation
### Aliases: huge.select
### ** Examples
#generate data
L = huge.generator(d = 20, graph="hub")
out.mb = huge(L$data)
out.ct = huge(L$data, method = "ct")
out.glasso = huge(L$data, method = "glasso")
#model selection using ric
out.select = huge.select(out.mb)
plot(out.select)
#model selection using stars
#out.select = huge.select(out.ct, criterion = "stars", stars.thresh = 0.05,rep.num=10)
#plot(out.select)
#model selection using ebic
out.select = huge.select(out.glasso,criterion = "ebic")
plot(out.select)
|
35f8efe3b0e1722008cb492cd138c8dc1ccb3e8e
|
711ecaf1584eece22a9d64a2ab625eb8ddd5e450
|
/data/Wang2000/dataManipulate.R
|
2065cf916af56271955d7440a78f239efeff0539
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
hrlai/baad
|
c23aac92900805c38a26b543c83c1e94739f59fb
|
9e7c976f3a9df582ee44ce8f48d0a90e94e34094
|
refs/heads/master
| 2020-08-29T23:36:38.214071
| 2019-10-31T03:59:18
| 2019-10-31T03:59:18
| 218,203,662
| 0
| 0
|
NOASSERTION
| 2019-10-29T04:23:21
| 2019-10-29T04:23:20
| null |
UTF-8
|
R
| false
| false
| 342
|
r
|
dataManipulate.R
|
manipulate <- function(raw) {
raw$m.rc <- raw[["Lrg. Root"]] + raw[["Med. Root"]]
raw$m.rc[raw$m.rc == 0] <- NA
raw[["Stem Wood"]] <- raw[["Stem Wood"]] + raw[["Stem Bark"]]
# zero d_bh is NA (note: zero dbh is possible when h.c < 1.4 or 1.3).
d0 <- raw[["D.B.H.(mm)"]] == 0
raw[["D.B.H.(mm)"]][d0] <- NA
raw
}
|
1a45ee3d992abbededd5169da5d946dac8b443d5
|
96fce738d0c420b8556aaa765080b08253f2cc3e
|
/Plot3_1.R
|
ad545c6011c2cc7e949328395e63e025a53865a4
|
[] |
no_license
|
MontseFigueiro/ExData_Plotting1
|
19b746ccbb5e8ebfbdb65f8d6c91fd1f4634e4fd
|
aaeddc5c68930360b191505e87c731ac09f5c30c
|
refs/heads/master
| 2021-01-22T16:18:56.507346
| 2016-05-04T16:45:44
| 2016-05-04T16:45:44
| 57,129,914
| 0
| 0
| null | 2016-04-26T13:21:52
| 2016-04-26T13:21:52
| null |
UTF-8
|
R
| false
| false
| 937
|
r
|
Plot3_1.R
|
Data<-read.csv("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors = FALSE,dec=".")
Data$Date<-as.Date(Data$Date,"%d/%m/%Y")
Data$Global_active_power<-as.numeric(Data$Global_active_power,na.rm=TRUE)
day1<-Data[Data$Date=="2007-02-01"|Data$Date=="2007-02-02",]
day1$dayweek<-weekdays(as.Date(day1$Date))
datetime <- paste(as.Date(day1$Date), day1$Time)
day1$Datetime <- as.POSIXct(datetime)
globalActivePower <- as.numeric(day1$Global_active_power)
submet1<-as.numeric(day1$Sub_metering_1)
submet2<-as.numeric(day1$Sub_metering_2)
submet3<-as.numeric(day1$Sub_metering_3)
png("Plot3_3.png")
plot(day1$Datetime, submet1, type="l", xlab="", ylab="Energy Submetering",main="")
lines(day1$Datetime,submet2,type="l",col="red")
lines(day1$Datetime,submet3,type="l",col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
dev.off()
|
3cedfce65fd3081588dbb33f3f854d59fb0269dd
|
cc1e703a2fc5c661de3a03c87063b9527321cbc3
|
/scripts/plot_ebola_obs.R
|
af00d5deddc917fcc3c847b98b62c279abfe8c41
|
[] |
no_license
|
jclifton333/stdmMf_cpp
|
7f6d268ca193300f56efe168885107033c4a92e7
|
d10dbacb80ce1588c9d39a8ae3e3dc41444a37fe
|
refs/heads/master
| 2021-04-09T16:36:00.801764
| 2017-10-11T23:37:10
| 2017-10-11T23:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,094
|
r
|
plot_ebola_obs.R
|
rm(list=ls(all=TRUE))
require("rgdal")
require("rgeos")
require("maptools")
require("ggplot2")
require("plyr")
require("gpclib")
require("readr")
require("gganimate")
require("viridis")
gpclibPermit()
polygons = readRDS("~/Downloads/DataAndCode/DataAndCode/WestAfricaCountyPolygons.rds")
polygons.data = polygons@data
polygons.data$id = rownames(polygons@data)
polygons.data$node = 0:289
polygons = gSimplify(polygons, tol=0.01, topologyPreserve=TRUE)
attributes(polygons)$data = polygons.data
polygons.points = fortify(polygons)
polygons.df = join(polygons.points, polygons@data, by="id")
outbreaks = read_csv("../src/data/ebola_outbreaks.txt", col_names = FALSE)
names(outbreaks) = c("day")
outbreaks$day = ifelse(outbreaks$day < 0, NA, outbreaks$day)
outbreaks$node = 0:289
outbreaks$date = as.Date("2014-04-26") + outbreaks$day
obs_polygons_data = join(outbreaks, polygons.df, by = "node")
convert_to_date = function(x) {
as.Date(x, origin = "1970-01-01")
}
p = ggplot(obs_polygons_data) +
aes(long,lat,group=group,fill=as.integer(date)) +
geom_polygon() +
geom_path(color="gray", size=0.1) +
scale_fill_viridis("Date of outbreak", labels = convert_to_date) +
theme(legend.position="right",
panel.background = element_blank(),
panel.border = element_blank(),
axis.line = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank()) +
coord_fixed()
print(p)
ggsave("../data/figures/ebola_obs_outbreaks.pdf", p, width = 7, height = 4)
ggsave("../data/figures/ebola_obs_outbreaks.svg", p, width = 10, height = 6)
## p = ggplot(polygons.df) +
## aes(long,lat,group=group,fill=id) +
## geom_polygon() +<
## geom_path(color="gray") +
## coord_equal() +
## theme(legend.position="none")
## print(p)
pops = read_csv("../src/data/ebola_population.txt", col_names = FALSE)
names(pops) = c("pop")
p = ggplot(pops) +
aes(x = log(pop)) +
geom_histogram() +
xlab("log(Population)") +
ylab("Count")
print(p)
ggsave("../data/figures/ebola_obs_population.pdf", p)
|
911b4b13a40d41eee95a8b21a8020253be8e5f55
|
57f54656b24fd75ee6b7d403bb30f74657bedc41
|
/archive/n_calculation/n_model2/n_param_plot.r
|
30fd400f433683876c503fc0ea4d608342535b18
|
[] |
no_license
|
kroppheather/synthesis_database
|
95a78338270fea260ee364f81813e63126858de3
|
b34246d99970152456cbc0fda62b8b4df2e0f08d
|
refs/heads/master
| 2021-01-09T20:36:11.505384
| 2020-09-15T20:33:11
| 2020-09-15T20:33:11
| 60,531,994
| 1
| 0
| null | 2017-05-08T14:57:16
| 2016-06-06T13:54:44
|
R
|
UTF-8
|
R
| false
| false
| 21,414
|
r
|
n_param_plot.r
|
###############################################################
###############################################################
################look at some specific site patterns to help
################deal with interpretation
library(plyr)
#read in data
setwd("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u7\\Tmod1\\output_u7")
datAI<-read.csv("AirIDS.csv")
datSI<-read.csv("SoilIDS.csv")
datM<-read.csv("Temp_mod7_stats.csv")
datQ<-read.csv("Temp_mod7_quant.csv")
datAIS<-read.csv("AirIDS_SD.csv")
datSIS<-read.csv("SoilIDS_SD.csv")
datAM<-read.csv("Tair_model.csv")
datSM<-read.csv("Tsoil_model.csv")
datSM$decdateA<-datSM$decdate-1991
datAM$decdateA<-datAM$decdate-1991
#read in temperature data used in model
#now join means with quantiles
datC<-cbind(datM,datQ)
#make a param vector
dexps<-"\\[*[[:digit:]]*\\]"
datC$parms1<-gsub(dexps,"",rownames(datC))
#now add id number
dexps2<-"\\D"
pnames<-rownames(datC)
parms2<-gsub(dexps2,"",pnames)
datC$parms2<-c(as.numeric(parms2))
datC<-data.frame(M=datC[,1],pc2.5=datC[,5],pc97.5=datC[,9],param=as.character(datC[,10]),ID=datC[,11])
datADDF<-datC[datC$param=="FDDA",]
datADDT<-datC[datC$param=="TDDA",]
datSDDF<-datC[datC$param=="FDDS",]
datSDDT<-datC[datC$param=="TDDS",]
colnames(datADDF)[5]<-"SDWA"
colnames(datADDT)[5]<-"SDWA"
colnames(datSDDF)[5]<-"SDWS"
colnames(datSDDT)[5]<-"SDWS"
datADDF2<-join(datADDF,datAI, by="SDWA", type="left")
datADDT2<-join(datADDT,datAI, by="SDWA", type="left")
datSDDF2<-join(datSDDF,datSI, by="SDWS", type="left")
datSDDT2<-join(datSDDT,datSI, by="SDWS", type="left")
#read in vegetation id info
datVC<-read.csv("c:\\Users\\hkropp\\Google Drive\\raw_data\\analysis_u7\\vegeClass.csv")
colnames(datVC)[1]<-"siteid"
datADDF3<-join(datADDF2,datVC, by="siteid", type="left")
datADDT3<-join(datADDT2,datVC, by="siteid", type="left")
datSDDF3<-join(datSDDF2,datVC, by="siteid", type="left")
datSDDT3<-join(datSDDT2,datVC, by="siteid", type="left")
colnames(datSDDF3)<-c("M.S", "pc2.5S", "pc.97.5S", "paramS",
"SDWS", "siteid", "depth", "wyear","classS")
colnames(datSDDT3)<-c("M.S", "pc2.5S", "pc.97.5S", "paramS",
"SDWS", "siteid", "depth", "wyear","classS")
FreezeJ<-join(datSDDF3, datADDF3, by=c("siteid", "wyear"), type="left")
bareFJ<-FreezeJ[FreezeJ$classS==1,]
par(mai=c(1,1,1,1))
plot(FreezeJ$M.S[FreezeJ$classS==1],FreezeJ$M[FreezeJ$classS==1],
xlim=c(-5000,-3000), ylim=c(-5000,-3000),pch=19,
xlab="Freezing degree days soil",
ylab="Freezing degree days air", cex.axis=2, cex.lab=2,
cex=1.5)
AmpA<-datC[datC$param=="AmpA",]
AmpS<-datC[datC$param=="AmpS",]
TaA<-datC[datC$param=="T.aveA",]
TaS<-datC[datC$param=="T.aveS",]
colnames(AmpA)<-c("Aa", "Aapc2.5","Aapc97.5","paramAA", "SDWA")
colnames(AmpS)<-c("Sa", "Sapc2.5","Sapc97.5","paramSA", "SDWS")
colnames(TaA)<-c("At", "Atpc2.5","Atpc97.5","paramAT", "SDWA")
colnames(TaS)<-c("st", "Stpc2.5","Stpc97.5","paramST", "SDWS")
bareFJ2<-join(bareFJ, AmpA, by="SDWA", type="left")
bareFJ3<-join(bareFJ2, AmpS, by="SDWS", type="left")
bareFJ4<-join(bareFJ3, TaS, by="SDWS", type="left")
bareFJ5<-join(bareFJ4, TaA, by="SDWA", type="left")
par(mai=c(1,1,1,1))
plot(bareFJ5$Aa,bareFJ5$Sa, pch=19, xlim=c(13,20), ylim=c(13,20),
xlab="Air Temperature Amplitude (C)",
ylab="Soil Temperature Amplitude (C)", cex.axis=2, cex.lab=2,
cex=1.5 )
arrows(bareFJ5$Aapc2.5,bareFJ5$Sa,bareFJ5$Aapc97.5,bareFJ5$Sa,
code=0, lwd=1.5)
arrows(bareFJ5$Aa,bareFJ5$Sapc2.5,bareFJ5$Aa,bareFJ5$Sapc97.5,
code=0, lwd=1.5)
abline(0,1, lty=4, lwd=2)
par(mai=c(1,1,1,1))
plot(bareFJ5$At,bareFJ5$st, pch=19, xlim=c(-13,-6), ylim=c(-13,-6),
xlab="Air Temperature Average (C)",
ylab="Soil Temperature Average (C)", cex.axis=2, cex.lab=2,
cex=1.5 )
arrows(bareFJ5$Atpc2.5,bareFJ5$st,bareFJ5$Atpc97.5,bareFJ5$st,
code=0, lwd=1.5)
arrows(bareFJ5$At,bareFJ5$Stpc2.5,bareFJ5$At,bareFJ5$Stpc97.5,
code=0, lwd=1.5)
abline(0,1, lty=4, lwd=2)
#####################################################################
####look at N factors vs world climate data
####
######################################
#read in data
library(plyr)
#setwd("c:\\Users\\hkropp\\Google Drive\\raw_data\\nmod_out\\u7_n4")
setwd("c:\\Users\\hkropp\\Downloads\\n_u4")
datF<-read.csv("Freezing_n_forMod.csv")
datT<-read.csv("Thawing_n_forMod.csv")
datEA<-read.csv("AVET_forMod.csv")
#read in params
datM<-read.csv("model_variaion_stats.csv")
datQ<-read.csv("model_variaion_quant.csv")
#now join means with quantiles
datC<-cbind(datM,datQ)
#make a param vector
#make a param vector
dexps<-"\\[*[[:digit:]]*\\]"
datC$parms1<-gsub(dexps,"",rownames(datC))
#now add id number
dexps2<-"\\D"
#pull out names
pnames<-rownames(datC)
#need to split because there are numbers in param names
psplit<-strsplit(pnames, "\\[")
#pull out vector number
pEnd<-character(0)
for(i in 1:dim(datC)[1]){
if(length(psplit[[i]])>1){
pEnd[i]<-psplit[[i]][2]
}else{pEnd[i]<-"NA"}
}
#get vector number only and make numeric
parmCN<-ifelse(pEnd=="NA", NA, gsub(dexps2,"", pEnd ))
datC$parms2<-c(as.numeric(parmCN))
datCS<-data.frame(M=datC[,1],pc2.5=datC[,5],pc97.5=datC[,9],param=as.character(datC[,10]),ID=datC[,11])
datCT<-datCS[datCS$param=="betaT3",]
datCF<-datCS[datCS$param=="betaF3",]
wb<-40
hb<-40
covlm<-function(x,b0, xave,b3){
b0+ (b3*(x-xave))
}
#classID code
#1= herb barren
#2 = grasstundra
#3= tussock tundra
#4= shrub tundra
#5= wetland
#6= evergreen boreal
#7= mixed boreal
#make a plot to look at more closely
jpeg("c:\\Users\\hkropp\\Google Drive\\raw_data\\nmod_out\\u7_n4\\n_Fmin.jpg", width=9000,height=2200)
ab<-layout(matrix(seq(1,7), ncol=7, byrow=TRUE),
width=c(lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb)),
height=c(lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb)))
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50,-7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==1],datF$NT[datF$classID==1],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][1],
col="black", lwd=8)
abline(v=datEA$AveMin[1],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "Herb barren", cex=10)
axis(1, seq(-45, -10, by=5), cex.axis=8, padj=1)
axis(2, seq(.2,1.6, by=.2),cex.axis=8,las=2)
mtext("Minimum Yearly Temperature (C)", side=1, cex=7, line=-8,outer =TRUE)
mtext("Freezing n-factor", side=2, cex=7, line=15)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==2],datF$NT[datF$classID==2],
pch=19, cex=12, col="deepskyblue4")
points(seq(-50,-7,by=.5), covlm(seq(-50,-7,by=.5),datCS$M[datCS$param=="betaF1"][2],datEA$AveMin[2],
datCS$M[datCS$param=="betaF3"][2]),
type="l", col="black", lwd=8)
text(-45 + 10, 1.6, "Grass tundra", cex=10)
axis(1, seq(-45, -10, by=5), cex.axis=8, padj=1)
abline(v=datEA$AveMin[2],
col="plum3", lwd=8, lty=3)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==3],datF$NT[datF$classID==3],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][3],
col="black", lwd=8)
abline(v=datEA$AveMin[3],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "Tussock tundra", cex=10)
axis(1, seq(-45, -10, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==4],datF$NT[datF$classID==4],
pch=19, cex=12, col="deepskyblue4")
points(seq(-50,-7,by=.5), covlm(seq(-50,-7,by=.5),datCS$M[datCS$param=="betaF1"][4],datEA$AveMin[4],
datCS$M[datCS$param=="betaF3"][4]),
type="l", col="black", lwd=8)
abline(v=datEA$AveMin[4],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "Shrub tunda", cex=10)
axis(1, seq(-45, -5, by=10), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==5],datF$NT[datF$classID==5],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][5],
col="black", lwd=8)
abline(v=datEA$AveMin[5],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "wetland tund", cex=10)
axis(1, seq(-45, -5, by=10), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==6],datF$NT[datF$classID==6],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][6],
col="black", lwd=8)
abline(v=datEA$AveMin[6],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "evergreen boreal", cex=10)
axis(1, seq(-45, -5, by=10), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-50, -7), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$Amin[datF$classID==7],datF$NT[datF$classID==7],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][7],
col="black", lwd=8)
abline(v=datEA$AveMin[7],
col="plum3", lwd=8, lty=3)
text(-45 + 10, 1.6, "mixed boreal", cex=10)
axis(1, seq(-45, -5, by=10), cex.axis=8, padj=1)
box(which="plot")
dev.off()
########################################################################
########################################################################
#make a plot to look at more closely
jpeg("c:\\Users\\hkropp\\Google Drive\\raw_data\\nmod_out\\u7_n4\\n_Tmax.jpg", width=9000,height=2200)
ab<-layout(matrix(seq(1,7), ncol=7, byrow=TRUE),
width=c(lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb)),
height=c(lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb)))
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==1],datT$NT[datT$classID==1],
pch=19, cex=12, col="seagreen4")
points(seq(10,26,by=.5), covlm(seq(10,26,by=.5),datCS$M[datCS$param=="betaT1"][1],datEA$AveMax[1],
datCS$M[datCS$param=="betaT3"][1]),
type="l", col="black", lwd=8)
abline(v=datEA$AveMax[1],
col="plum3", lwd=8, lty=3)
text( 20, 1.6, "Herb barren", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
axis(2, seq(.2,1.6, by=.2),cex.axis=8,las=2)
mtext("Maximum Yearly Temperature (C)", side=1, cex=7, line=-8,outer =TRUE)
mtext("Thawing n-factor", side=2, cex=7, line=15)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==2],datT$NT[datT$classID==2],
pch=19, cex=12, col="seagreen4")
points(seq(10,26,by=.5), covlm(seq(10,26,by=.5),datCS$M[datCS$param=="betaT1"][2],datEA$AveMax[2],
datCS$M[datCS$param=="betaT3"][2]),
type="l", col="black", lwd=8)
text(20, 1.6, "Grass tundra", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
abline(v=datEA$AveMax[2],
col="plum3", lwd=8, lty=3)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==3],datT$NT[datT$classID==3],
pch=19, cex=12, col="seagreen4")
points(seq(10,26,by=.5), covlm(seq(10,26,by=.5),datCS$M[datCS$param=="betaT1"][3],datEA$AveMax[3],
datCS$M[datCS$param=="betaT3"][3]),
type="l", col="black", lwd=8)
abline(v=datEA$AveMax[3],
col="plum3", lwd=8, lty=3)
text(20, 1.6, "Tussock tundra", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==4],datT$NT[datT$classID==4],
pch=19, cex=12, col="seagreen4")
points(seq(10,26,by=.5), covlm(seq(10,26,by=.5),datCS$M[datCS$param=="betaT1"][4],datEA$AveMax[4],
datCS$M[datCS$param=="betaT3"][4]),
type="l", col="black", lwd=8)
abline(v=datEA$AveMax[4],
col="plum3", lwd=8, lty=3)
text(20, 1.6, "Shrub tunda", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==5],datT$NT[datT$classID==5],
pch=19, cex=12, col="seagreen4")
points(seq(10,26,by=.5), covlm(seq(10,26,by=.5),datCS$M[datCS$param=="betaT1"][5],datEA$AveMax[5],
datCS$M[datCS$param=="betaT3"][5]),
type="l", col="black", lwd=8)
abline(v=datEA$AveMax[5],
col="plum3", lwd=8, lty=3)
text(20, 1.6, "wetland tunda", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==6],datT$NT[datT$classID==6],
pch=19, cex=12, col="seagreen4")
abline(h=datCS$M[datCS$param=="betaT1"][6],
col="black", lwd=8)
abline(v=datEA$AveMax[6],
col="plum3", lwd=8, lty=3)
text(20, 1.6, "evergreen boreal", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(10,26), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$Amax[datT$classID==7],datT$NT[datT$classID==7],
pch=19, cex=12, col="seagreen4")
abline(h=datCS$M[datCS$param=="betaT1"][7],
col="black", lwd=8)
abline(v=datEA$AveMax[7],
col="plum3", lwd=8, lty=3)
text(20, 1.6, "mixed boreal", cex=10)
axis(1, seq(10,26, by=5), cex.axis=8, padj=1)
box(which="plot")
dev.off()
#####################################################################################
#####################################################################################
jpeg("c:\\Users\\hkropp\\Google Drive\\raw_data\\nmod_out\\u7_n4\\n_Fdepth.jpg", width=9000,height=2200)
ab<-layout(matrix(seq(1,7), ncol=7, byrow=TRUE),
width=c(lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb)),
height=c(lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb)))
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==1],datF$NT[datF$classID==1],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][1],
col="black", lwd=8)
text(10, 1.6, "Herb barren", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
axis(2, seq(.2,1.6, by=.2),cex.axis=8,las=2)
mtext("Depth in the soil (cm)", side=1, cex=7, line=-8,outer =TRUE)
mtext("Freezing n-factor", side=2, cex=7, line=15)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==2],datF$NT[datF$classID==2],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][2],
col="black", lwd=8)
text(10, 1.6, "Grass tundra", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==3],datF$NT[datF$classID==3],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][3],
col="black", lwd=8)
text( 10, 1.6, "Tussock tundra", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==4],datF$NT[datF$classID==4],
pch=19, cex=12, col="deepskyblue4")
abline(datCS$M[datCS$param=="betaF1"][4],datCS$M[datCS$param=="betaF2"][4],
col="black", lwd=8)
text( 10, 1.6, "Shrub tunda", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==5],datF$NT[datF$classID==5],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][5],
col="black", lwd=8)
text(10, 1.6, "wetland tund", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==6],datF$NT[datF$classID==6],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][6],
col="black", lwd=8)
text(10, 1.6, "evergreen boreal", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datF$depth[datF$classID==7],datF$NT[datF$classID==7],
pch=19, cex=12, col="deepskyblue4")
abline(h=datCS$M[datCS$param=="betaF1"][7],
col="black", lwd=8)
text(10, 1.6, "mixed boreal", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
dev.off()
#####################################################################################
#####################################################################################
jpeg("c:\\Users\\hkropp\\Google Drive\\raw_data\\nmod_out\\u7_n4\\n_Tdepth.jpg", width=9000,height=2200)
ab<-layout(matrix(seq(1,7), ncol=7, byrow=TRUE),
width=c(lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb),lcm(wb)),
height=c(lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb),lcm(hb)))
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==1],datT$NT[datT$classID==1],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][1],datCS$M[datCS$param=="betaT2"][1],
col="black", lwd=8)
text(10, 1.6, "Herb barren", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
axis(2, seq(.2,1.6, by=.2),cex.axis=8,las=2)
mtext("Depth in the soil (cm)", side=1, cex=7, line=-8,outer =TRUE)
mtext("Freezing n-factor", side=2, cex=7, line=15)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==2],datT$NT[datT$classID==2],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][2],datCS$M[datCS$param=="betaT2"][2],
col="black", lwd=8)
text(10, 1.6, "Grass tundra", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==3],datT$NT[datT$classID==3],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][3],datCS$M[datCS$param=="betaT2"][3],
col="black", lwd=8)
text( 10, 1.6, "Tussock tundra", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==4],datT$NT[datT$classID==4],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][4],datCS$M[datCS$param=="betaT2"][4],
col="black", lwd=8)
text( 10, 1.6, "Shrub tunda", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==5],datT$NT[datT$classID==5],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][5],datCS$M[datCS$param=="betaT2"][5],
col="black", lwd=8)
text(10, 1.6, "wetland tund", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==6],datT$NT[datT$classID==6],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][6],datCS$M[datCS$param=="betaT2"][6],
col="black", lwd=8)
text(10, 1.6, "evergreen boreal", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
#
par(mai=c(0,0,0,0))
plot(c(0,1),c(0,1), type="n", axes=FALSE, xlim=c(-1,21), ylim=c(0,1.75), xlab=" ",
ylab=" ", xaxs="i", yaxs="i")
points(datT$depth[datT$classID==7],datT$NT[datT$classID==7],
pch=19, cex=12, col="seagreen4")
abline(datCS$M[datCS$param=="betaT1"][7],datCS$M[datCS$param=="betaT2"][7],
col="black", lwd=8)
text(10, 1.6, "mixed boreal", cex=10)
axis(1, seq(5,20, by=5), cex.axis=8, padj=1)
box(which="plot")
dev.off()
|
10b3817351aec662a58b74869b13a1b6119c72ee
|
1ba5b7c213871eb2b9aa5d194fa403f87d728193
|
/R/dropIndex.R
|
9e755ca85dc4a299bab3ba286cc565d5b6bc42e6
|
[
"MIT"
] |
permissive
|
noelnamai/RNeo4j
|
6a0c42ffe5a6f3f9ffc19d15ad25453696ea3760
|
4af57a9b00593109155e9f2c55108fe8b94c8f0b
|
refs/heads/master
| 2020-04-01T23:02:26.894316
| 2015-04-17T18:03:26
| 2015-04-17T18:03:26
| 34,324,382
| 1
| 0
| null | 2015-04-21T12:02:52
| 2015-04-21T12:02:51
|
R
|
UTF-8
|
R
| false
| false
| 2,249
|
r
|
dropIndex.R
|
dropIndex = function(graph, label = character(), key = character(), all = FALSE) UseMethod("dropIndex")
dropIndex.default = function(x, ...) {
stop("Invalid object. Must supply graph object.")
}
dropIndex.graph = function(graph, label = character(), key = character(), all = FALSE) {
stopifnot(is.character(label),
is.character(key),
is.logical(all))
headers = setHeaders(graph)
url = attr(graph, "indexes")
constraints = suppressMessages(getConstraint(graph))
# If user sets all=TRUE, drop all indexes from the graph.
if(all) {
indexes = suppressMessages(getIndex(graph))
if(is.null(indexes)) {
message("No indexes to drop.")
return(invisible(NULL))
}
overlap = merge(indexes, constraints)
if(nrow(overlap) > 0) {
errors = c()
for(i in 1:nrow(overlap)) {
errors = c(errors,
"There is a uniqueness constraint for label '", overlap[i,'label'], "' on property '", overlap[i,'property_keys'], "'.\n")
}
stop(errors,
"Remove the uniqueness constraint(s) instead using dropConstraint(). This drops the index(es) as well.")
return(invisible(NULL))
}
urls = apply(indexes, 1, function(x) paste(url, x['label'], x['property_keys'], sep = "/"))
for(i in 1:length(urls)) {
http_request(urls[i],
"DELETE",
"No Content",
httpheader=headers)
}
return(invisible(NULL))
# Else, drop the index for the label and key given.
} else if (length(label) == 1 & length(key) == 1) {
index = suppressMessages(getIndex(graph, label))
overlap = merge(index, constraints)
if(nrow(overlap) > 0) {
stop("There is a uniqueness constraint on label '", label, "' with property '", key, "'. ",
"Remove the uniqueness constraint instead using dropConstraint(). This drops the index as well.")
}
url = paste(url, label, key, sep = "/")
http_request(url, "DELETE", "No Content", httpheader=headers)
return(invisible(NULL))
# Else, user supplied an invalid combination of arguments.
} else {
stop("Arguments supplied are invalid.")
}
}
|
46ebd6cc1cdc60dc4b53a04394f8d86ec02d764b
|
9262e777f0812773af7c841cd582a63f92d398a4
|
/inst/userguide/figures/CS7--Cs27_plank-model-4.R
|
c5ce5e7e76b36f64d2fd2d119cd3a2bb0fe82217
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nwfsc-timeseries/MARSS
|
f0124f9ba414a28ecac1f50c4596caaab796fdd2
|
a9d662e880cb6d003ddfbd32d2e1231d132c3b7e
|
refs/heads/master
| 2023-06-07T11:50:43.479197
| 2023-06-02T19:20:17
| 2023-06-02T19:20:17
| 438,764,790
| 1
| 2
|
NOASSERTION
| 2023-06-02T19:17:41
| 2021-12-15T20:32:14
|
R
|
UTF-8
|
R
| false
| false
| 208
|
r
|
CS7--Cs27_plank-model-4.R
|
###################################################
### code chunk number 30: Cs27_plank-model-4
###################################################
kem.plank.4 <- MARSS(d.plank.dat, model = plank.model.4)
|
5bd7f0d6d6988082510556773c29b5981a574de8
|
9cb1fe4a501d445b1ed69297f7431f1a1d49eaab
|
/enfa.R
|
9c7258574cb7fa3ba8da486eda6d79d5e73ef01b
|
[] |
no_license
|
SaraVarela/Odonata_corotipos
|
ee39234ea6ab54e0d10ab3f2d369ba47c2140e31
|
662b91c1f0d8fef69b6aa81829daf24e79d4444a
|
refs/heads/master
| 2021-01-25T04:02:13.158296
| 2014-02-12T21:15:19
| 2014-02-12T21:15:19
| 16,782,464
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,077
|
r
|
enfa.R
|
library (adehabitatHS)
##### necesitamos un dataframe con los datos del clima, tab
#### y un vector de 0/1 con la presencia, pr
## We then perform the PCA before the ENFA
pc <- dudi.pca(tab, scannf = FALSE)
## The object 'pc' contains the transformed table (i.e.
## centered so that all columns have a mean of 0
## and scaled so that all columns have a variance of 1
## 'pc' also contains the weights of the habitat variables,
## and the weights of the pixels in the analysis
(enfa1 <- enfa(pc, pr,
scannf = FALSE))
hist(enfa1)
hist(enfa1, scores = FALSE, type = "l")
## scatterplot
scatter(enfa1)
## randomization test
## Not run:
(renfa <- randtest(enfa1))
plot(renfa)
#### ahora, para predecir,
### necesito que las variables estén en SpatialPixelDataFrame, map
data(meuse.grid)
head (meuse.grid)
map<- SpatialPixelsDataFrame(points = meuse.grid[c("x", "y")],
data = meuse.grid)
predict.enfa (enfa1, map)
pred <- predict(enfa1, map)
image(pred)
points(locs, pch = 16, cex=0.2)
??SpatialPixelDataFrame
?madifa
|
36650f8e9dbefcb056b5c408a572b955968d49bd
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/hutilscpp/R/pmaxC.R
|
8fc41eab882dd546eaaa2a141c0a89e770f135ee
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,425
|
r
|
pmaxC.R
|
#' @title Parallel maximum/minimum
#' @description Faster \code{pmax()} and \code{pmin()}.
#'
#' @name pmaxC
#' @aliases pminC pmaxV pminV pmax0 pmin0
#' @param x \describe{
#' \item{\code{numeric(n)}}{A numeric vector.}
#' }
#' @param y,z \describe{\item{\code{numeric(n)}}{Other numeric vectors the same length as \code{x}}}
#' @param a \describe{\item{\code{numeric(1)}}{A single numeric value.}}
#'
#' @param in_place \describe{
#' \item{\code{TRUE | FALSE}, default: \code{FALSE}}{Should \code{x} be modified in-place? For advanced use only.}
#' }
#'
#'
#' @param keep_nas \describe{
#' \item{\code{TRUE | FALSE}, default: \code{FALSE}}{Should \code{NA}s values be
#' preserved? By default, \code{FALSE}, so the behaviour of the function is
#' dependent on the representation of \code{NA}s at the C++ level.}
#' }
#'
#' @param dbl_ok \describe{
#' \item{\code{TRUE | FALSE}, default: \code{TRUE}}{Is it acceptable to return
#' a non-integer vector if \code{x} is integer? If \code{TRUE}, the default,
#' if \code{x} is an integer vector, a double vector may be returned if
#' \code{a} is not an integer.}
#' }
#'
#' @param sorted \describe{
#' \item{\code{TRUE | FALSE}, default: \code{FALSE}}{
#' Is \code{x} known to be sorted?
#' If \code{TRUE}, \code{x} is assumed to be sorted. Thus the
#' first zero determines whether the position at which zeroes start or end.}
#' }
#'
#' @param nThread \describe{
#' \item{\code{integer(1)}}{The number of threads to use. Combining \code{nThread > 1}
#' and \code{in_place = TRUE} is not supported.}
#' }
#'
#' @return Versions of \code{pmax} and \code{pmin}, designed for performance.
#'
#' When \code{in_place = TRUE}, the values of \code{x} are modified in-place.
#' For advanced users only.
#'
#'
#'
#'
#'
#' The differences are:
#' \describe{
#' \item{\code{pmaxC(x, a)} and \code{pminC(x, a)}}{Both \code{x} and \code{a} must be numeric and
#' \code{a} must be length-one.}
#' }
#'
#'
#'
#'
#' @note This function will always be faster than \code{pmax(x, a)} when \code{a} is
#' a single value, but can be slower than \code{pmax.int(x, a)} when \code{x} is short.
#' Use this function when comparing a numeric vector with a single value.
#'
#' Use \code{in_place = TRUE} only within functions when you are sure it is safe, i.e. not a
#' reference to something outside the environment.
#'
#' By design, the functions first check whether \code{x} will be modified before
#' allocating memory to a new vector. For example, if all values in \code{x} are
#' nonnegative, the vector is returned.
#'
#'
#' @examples
#' pmaxC(-5:5, 2)
#'
#' @export pmaxC pmax0 pmaxV pmax3
#'
#'
pmaxC <- function(x, a,
in_place = FALSE,
keep_nas = FALSE,
dbl_ok = TRUE,
nThread = getOption("hutilscpp.nThread", 1L)) {
check_TF(in_place)
check_TF(keep_nas)
if (!is.atomic(x) || !is.numeric(x)) {
stop("\n`x` was of type ", typeof(x), ", class ", toString(class(x)), " and\n",
"`a` was of type ", typeof(a), ", class ", toString(class(a)), " and\n",
"Both `x` and `a` must be atomic numeric vectors.")
}
if (amsg <- isnt_number(a, na.bad = TRUE, infinite.bad = TRUE, int.only = !dbl_ok)) {
stop(attr(amsg, "ErrorMessage"))
}
if (is.double(x)) {
a <- as.double(a)
}
o <- do_pminpmax(x, a,
do_min = FALSE,
in_place = in_place,
keep_nas = keep_nas,
dbl_ok = dbl_ok,
swap_xy = FALSE,
nThread = nThread)
return(o)
}
#' @rdname pmaxC
#' @export
pminC <- function(x, a,
in_place = FALSE,
keep_nas = FALSE,
dbl_ok = TRUE,
nThread = getOption("hutilscpp.nThread", 1L)) {
check_TF(in_place)
check_TF(keep_nas)
if (!is.atomic(x) || !is.numeric(x)) {
stop("\n`x` was of type ", typeof(x), ", class ", toString(class(x)), " and\n",
"`a` was of type ", typeof(a), ", class ", toString(class(a)), " and\n",
"Both `x` and `a` must be atomic numeric vectors.")
}
if (amsg <- isnt_number(a, na.bad = TRUE, infinite.bad = TRUE, int.only = !dbl_ok)) {
stop(attr(amsg, "ErrorMessage"))
}
if (is.double(x)) {
a <- as.double(a)
}
o <- do_pminpmax(x, a,
do_min = TRUE,
in_place = in_place,
dbl_ok = dbl_ok,
swap_xy = FALSE,
nThread = nThread)
return(o)
}
#' @rdname pmaxC
#' @export
pmax0 <- function(x, in_place = FALSE, sorted = FALSE, keep_nas = FALSE, nThread = getOption("hutilscpp.nThread", 1L)) {
if (!is.atomic(x) || !is.numeric(x)) {
stop("`x` was a ", class(x), ", but must be numeric.")
}
if (!length(x)) {
return(x)
}
check_TF(in_place)
check_TF(sorted)
check_TF(keep_nas)
check_omp(nThread)
if (is_altrep(x)) {
if (in_place) {
warning("`in_place = TRUE`, but `x` is an ALTREP vector. ",
"This is unsupported so `in_place` is being set to FALSE.")
}
return(do_pminmax0_altrep(x, do_pmin = FALSE, keep_nas = keep_nas, nThread = nThread))
}
if (sorted) {
if (is.integer(x)) {
return(do_pmax0_radix_sorted_int(x, in_place = in_place))
} else {
return(do_pmax0_radix_sorted_dbl(x, in_place = in_place))
}
}
if (is.integer(x) && !keep_nas && !in_place) {
return(do_pmax0_bitwise(x, nThread = nThread))
}
z <- if (is.double(x)) 0 else 0L
do_pminpmax(x, z, do_min = FALSE, in_place = in_place, keep_nas = keep_nas, nThread = nThread)
}
#' @rdname pmaxC
#' @export
pmin0 <- function(x, in_place = FALSE, sorted = FALSE, keep_nas = FALSE, nThread = getOption("hutilscpp.nThread", 1L)) {
if (!is.atomic(x) || !is.numeric(x)) {
stop("`x` was a ", class(x), ", but must be numeric.")
}
if (!length(x)) {
return(x)
}
check_TF(in_place)
check_TF(sorted)
check_TF(keep_nas)
check_omp(nThread)
if (is_altrep(x)) {
if (in_place) {
warning("`in_place = TRUE`, but `x` is an ALTREP vector. ",
"This is unsupported so `in_place` is being set to FALSE.")
}
return(do_pminmax0_altrep(x, do_pmin = TRUE, keep_nas = keep_nas, nThread = nThread))
}
if (sorted) {
if (is.integer(x)) {
return(do_pmin0_radix_sorted_int(x, in_place = in_place))
} else {
return(do_pmin0_radix_sorted_dbl(x, in_place = in_place))
}
}
if (is.integer(x) && !keep_nas && !in_place) {
return(do_pmin0_bitwise(x, nThread = nThread))
}
z <- if (is.double(x)) 0 else 0L
do_pminpmax(x, z, do_min = TRUE, in_place = in_place, keep_nas = keep_nas, nThread = nThread)
}
#' @rdname pmaxC
#' @export
pmaxV <- function(x, y, in_place = FALSE, dbl_ok = TRUE, nThread = getOption("hutilscpp.nThread", 1L)) {
check_TF(in_place)
check_TF(dbl_ok)
if (length(x) != length(y)) {
stop("`length(x) = ", length(x), "`, yet ",
"`length(y) = ", length(y), "`. ",
"`x` and `y` must have the same length.")
}
if (!is.atomic(x) || !is.atomic(y) || !is.numeric(x) || !is.numeric(y)) {
stop("\n`x` was of type ", typeof(x), ", class ", toString(class(x)), " and\n",
"`y` was of type ", typeof(y), ", class ", toString(class(y)), " and\n",
"Both `x` and `y` must be atomic numeric vectors.")
}
if (in_place) {
swap_xy <- FALSE
} else {
swap_xy <- is.integer(y) && is.double(x)
}
o <- do_pminpmax(if (swap_xy) y else x,
if (swap_xy) x else y,
do_min = FALSE,
in_place = in_place,
dbl_ok = dbl_ok,
swap_xy = swap_xy,
nThread = nThread)
return(o)
}
#' @rdname pmaxC
#' @export
pminV <- function(x, y, in_place = FALSE, dbl_ok = TRUE, nThread = getOption("hutilscpp.nThread", 1L)) {
check_TF(in_place)
check_TF(dbl_ok)
if (length(x) != length(y)) {
stop("`length(x) = ", length(x), "`, yet ",
"`length(y) = ", length(y), "`. ",
"`x` and `y` must have the same length.")
}
if (!is.atomic(x) || !is.atomic(y) || !is.numeric(x) || !is.numeric(y)) {
stop("\n`x` was of type ", typeof(x), ", class ", toString(class(x)), " and\n",
"`y` was of type ", typeof(y), ", class ", toString(class(y)), " and\n",
"Both `x` and `y` must be atomic numeric vectors.")
}
if (in_place) {
swap_xy <- FALSE
} else {
swap_xy <- is.integer(y) && is.double(x)
}
o <- do_pminpmax(if (swap_xy) y else x,
if (swap_xy) x else y,
do_min = TRUE,
in_place = in_place,
dbl_ok = dbl_ok,
swap_xy = swap_xy,
nThread = nThread)
return(o)
}
#' @rdname pmaxC
#' @export
pmax3 <- function(x, y, z, in_place = FALSE) {
.pminpmax3(x, y, z, in_place = in_place, do_max = TRUE)
}
#' @rdname pmaxC
#' @export
pmin3 <- function(x, y, z, in_place = FALSE) {
.pminpmax3(x, y, z, in_place = in_place, do_max = FALSE)
}
.pminpmax3 <- function(x, y, z, in_place = FALSE, do_max) {
check_TF(in_place)
lx <- length(x)
if (length(y) == lx && length(z) == lx) {
if (is.integer(x) && is.integer(y) && is.integer(z)) {
return(do_summary3_int(x, y, z, in_place, do_max = do_max))
}
if (is.double(x) && is.double(y) && is.double(z)) {
return(do_summary3_dbl(x, y, z, in_place, do_max = do_max))
}
}
if (!is.numeric(x) || !is.numeric(y) || !is.numeric(z)) {
stop("`x` was of type ", typeof(x),
"`y` was of type ", typeof(y),
"`z` was of type ", typeof(z), ". ",
"All of `x`, `y`, and `z` must be numeric.")
}
# lengths differ
if (length(y) != lx && length(y) != 1L) {
stop("`y` had length ", length(y), ", yet ",
"`x` had length ", length(x), ". ",
"`y` and `z` must be the same length as `x`, (or length-one).")
}
if (length(z) != lx && length(z) != 1L) {
stop("`z` had length ", length(z), ", yet ",
"`x` had length ", length(x), ". ",
"`y` and `z` must be the same length as `x`, (or length-one).")
}
if (is.integer(x) && (is.double(y) || is.double(z))) {
yi <- y
zi <- z
if (is.double(y)) {
if (AND(is.double(y),
wb <- which_isnt_integerish(y))) {
stop("`x` was type integer and `y` was type double, but entry ", wb,
" was not equal to the integer equivalent. ")
}
}
if (is.double(z)) {
if (AND(is.double(z),
wb <- which_isnt_integerish(z))) {
stop("`x` was type integer and `z` was type double, but entry ", wb,
" was not equal to the integer equivalent. ")
}
}
return(do_summary3_int(x, y, z, in_place = in_place, do_max = do_max))
}
if (is.double(x) && is.numeric(y) && is.numeric(z)) {
return(do_summary3_dbl(x, as.double(y), as.double(z), in_place = in_place, do_max = do_max))
}
# nocov begin
if (do_max) {
pmax(x, pmax(y, z))
} else {
pmin(x, pmin(y, z))
}
# nocov end
}
do_pminmax0_altrep <- function(x,
a = 0L,
keep_nas = FALSE,
do_pmin = FALSE,
nThread = getOption("hutilscpp.nThread", 1L)) {
x1 <- x[1]
n <- length(x)
xn <- x[n]
all_nonnegative <- x1 >= 0 && xn >= 0
all_nonpositive <- x1 <= 0 && xn <= 0
if (do_pmin && all_nonpositive) {
return(x)
}
if (!do_pmin && all_nonnegative) {
return(x)
}
if (!do_pmin && all_nonpositive) {
return(allocate0_int(n, nThread = nThread))
}
if (do_pmin && all_nonnegative) {
return(allocate0_int(n, nThread = nThread))
}
d <- x[2] - x[1]
# Should zeroes be to the
# left of the root or
# 0 0 0 1 2 3 4
# right?
# -2 -1 0 0 0 0 0
#
zero_left <- XOR(do_pmin, x1 < 0)
root <- (-x1 / d)
allocate_with_root(n, a = a, root, zero_left, do_pmin = do_pmin, nThread = nThread)
}
|
d0ac438a86a59f917248b3c74963fa226cc931a2
|
48b86279b723e55b97524d32c12917d2f813cb78
|
/FMOL_VS_LIBCOMPLEXITY/botseq_efficiency.R
|
066951ba886dd8f90d4977325da5c620211c299c
|
[] |
no_license
|
fa8sanger/NanoSeq_Paper_Code
|
74b74b4ba6aca7c121e5d61d742c8f6c4a4e1984
|
f50603370bd95f130872e87af3e30e8581853bf9
|
refs/heads/main
| 2023-03-19T23:09:32.747787
| 2021-03-02T10:56:09
| 2021-03-02T10:56:09
| 311,312,983
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,587
|
r
|
botseq_efficiency.R
|
###########################################################################################
## Simulations to estimate the optimal bottleneck size given an estimate of
## library complexity.
## Equations below are assuming 1 fmol sequenced equates to 1e8 molecules
## The relationship between fmol and molecules was derived empirically (see next section)
###########################################################################################
# Truncated Poisson (only defined for x>=1): this models the duplicate family size distribution
dpoistrunc = function(x, lambda) {
d = dpois(x, lambda) / (1-dpois(0, lambda))
d[x==0] = NA
return(d)
}
# The mean sequencing cost of a family is the mean of the zero-truncated Poisson:
# lambda/(1-exp(-lambda))
# Efficiency: The fraction of families of size>=2 divided by the average size (i.e.
# sequencing cost) of a family, squared (as we need coverage from both strands)
ratio = seq(0,20,by=0.1) # Sequencing output / library complexity
efficiency = (ppois(q=2-0.1, lambda=ratio/2, lower.tail=F)/(1-dpois(0, ratio/2)))^2 / (ratio/(1-exp(-ratio)))
# Duprate can be simply calculated using the mean of the family size (from the zero-truncated
# Poisson): 1-1/mean = 1-(1-exp(-r))/r
duprate = 1-(1-exp(-ratio))/ratio
dev.new(width=9, height=3)
par(mfrow=c(1,3))
plot(ratio, duprate, ylim=c(0,1), type="l", xlab="Seq ratio (sequencing yield / lib complexity)", ylab="Duplicate rate", las=1)
abline(h=1, lty=2)
#lines(ratio, pmax(0, 1-1/ratio), col="palegreen3", lty=2) # Simplistic expectation (ratio=10 would tend to yield ~90% duplicate rates)
plot(duprate, efficiency, type="l", xlim=c(0,1), xlab="Duplicate rate", ylab="Efficiency (duplex bp / total bp)", las=1)
opt_duprate = duprate[which.max(efficiency)] # Optimal duplicate rate appears to be 0.805
ind = which(efficiency > max(efficiency, na.rm=T)*0.8)
abline(v=opt_duprate, col="red")
abline(v=duprate[c(min(ind),max(ind))], col="red", lty=2)
semiopt_duprate = duprate[c(min(ind),max(ind))]
plot(ratio, efficiency, type="l", xlab="Seq ratio (sequencing yield / lib complexity)", ylab="Efficiency (duplex bp / total bp)", las=1)
opt_ratio = ratio[which.max(efficiency)] # Optimal duplicate rate appears to be 5.1
ind = which(efficiency > max(efficiency, na.rm=T)*0.8)
abline(v=opt_ratio, col="red")
abline(v=ratio[c(min(ind),max(ind))], col="red", lty=2)
semiopt_ratio = ratio[c(min(ind),max(ind))]
dev.copy(pdf, file="Nanoseq_efficiency_plots.pdf", width=9, height=3, useDingbats=F); dev.off(); dev.off()
###########################################################################################
## Linear relationship between library complexity and qPCR quantification
## Our empirical results indicate that 1 fmol ~ 1e8 molecules
## (our size selection aims at 250-500 bps)
###########################################################################################
## MySeq library (Aug 2019)
qpcr = read.table("miseq_concentrations_Pete_Ellis_20190809.txt", header=1, sep="\t", stringsAsFactors=F)
plot(qpcr$fmol, qpcr$unique_fragments, xlab="Library yield (fmol)", ylab="Unique fragments", las=1, pch=20, col="cadetblue4")
model = lm(qpcr$unique_fragments ~ qpcr$fmol -1)
abline(model, col="orange", lty=2)
summary(model)
coefficients(model) # slope (unique fragments per fmol): (9.8e7 unique fragments / fmol)
confint(model) # Confidence intervals for the slope (CI95%: 9.5e7, 10.1e7)
#################################################################
# Using the median of the fragment/fmol relationship
median(qpcr$unique_fragments/qpcr$fmol) # 10.27e7 instead of 9.8e7
#################################################################
# Based on the median and the regression we establish that the number of fragments
# per fmol is ~ 1e8 (with our size selection for fragments between 250-500 bps)
#################################################################
#################################################################
# Calculating the volume required to do a deep NanoSeq experiment (equivalent to a 15x standard)
# The 5.1 factor is the duplicate ratio estimated in the analytical simulation
qpcr$fmol_per_ul = qpcr$fmol / 12.5 # 12.5 ul taken
coverage = 15 # coverage desired in genome fold units (e.g. 15x)
fragmperfmol = 1e8
qpcr$vol = round(coverage * 1e7 / (qpcr$fmol_per_ul * fragmperfmol * 5.1))
qpcr$vol_fromMiSeq = round(coverage * 1e7 * 12.5 / (qpcr$unique_fragments * 5.1), digits = 1)
|
aa47c82f9bab221fbbc4ca2bc48c51fc0b4b5a3f
|
22cc49efa5a5bdb444affb0f2f99a25ff48e6bce
|
/VisualizationLearningRate/program/graphing_true_loop.R
|
8067f8d5e4f5ff5c102f565bef7f40c5ea739ba2
|
[] |
no_license
|
LearnSphere/WorkflowComponents
|
8bfa0aa1880c6c974e16e9a0651fdad4df7aaea2
|
3a79e26a4c520ee20de5dd07b27e775f781ce9f4
|
refs/heads/dev
| 2023-08-17T01:24:45.378740
| 2023-08-11T15:52:18
| 2023-08-11T15:52:18
| 59,806,725
| 31
| 19
| null | 2023-08-10T17:03:46
| 2016-05-27T05:31:01
|
HTML
|
UTF-8
|
R
| false
| false
| 17,030
|
r
|
graphing_true_loop.R
|
#test on command line
#"C:/Program Files/R/R-3.6.1/bin/Rscript.exe" graphing_true_loop.R -programDir "./" -workingDir "./" -learningCurve KC -useMedianMaxOpp Yes -model_nodeIndex 0 -model_fileIndex 0 -model "KC (Area)" -model_nodeIndex 0 -model_fileIndex 0 -model "KC (Circle-Collapse)" -modelingMethod iAFM -node 0 -fileIndex 0 "ds76_student_step_export.txt"
#"C:/Program Files/R/R-3.6.1/bin/Rscript.exe" graphing_true_loop.R -programDir "./" -workingDir "./" -learningCurve KC -useMedianMaxOpp Yes -model_nodeIndex 0 -model_fileIndex 0 -model "KC (LFA_AIC_LIB_Model0)" -modelingMethod iAFM -node 0 -fileIndex 0 "ds99_student_step_All_Data_211_2021_1004_101023.txt"
options(warn = -1)
#load packages
suppressWarnings(suppressMessages(library(ggplot2)))
suppressWarnings(suppressMessages(library(lme4)))
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(dplyr)))
options(dplyr.summarise.inform = FALSE)
#rm(list=ls())
#read arguments
args <- commandArgs(trailingOnly = TRUE)
#initializ var
workingDirectory = NULL
componentDirectory = NULL
inputFile = NULL
learningPlotType = NULL #KC or Student
modelingMethod = NULL
models = list()
useMedianMaxOpp = NULL
i = 1
#parsing arguments
while (i <= length(args)) {
#get input file
if (args[i] == "-node") {
# Syntax follows: -node m -fileIndex n <infile>
if (i > length(args) - 4) {
stop("node and fileIndex must be specified")
}
nodeIndex <- args[i+1]
fileIndex = NULL
fileIndexParam <- args[i+2]
if (fileIndexParam == "-fileIndex") {
fileIndex <- args[i+3]
}
if (nodeIndex == 0 && fileIndex == 0) {
inputFile <- args[i+4]
}
i = i+4
} else if (args[i] == "-workingDir") {
if (length(args) == i) {
stop("workingDir name must be specified")
}
# This dir is the "working directory" for the component instantiation, e.g. /workflows/<workflowId>/<componentId>/output/.
workingDirectory = args[i+1]
i = i+1
} else if (args[i] == "-programDir") {
if (length(args) == i) {
stop("programDir name must be specified")
}
# This dir is WorkflowComponents/<ComponentName>/
componentDirectory = args[i+1]
i = i+1
}else if (args[i] == "-learningCurve") {
if (length(args) == i) {
stop("learningCurve Must be specified.")
}
learningPlotType = args[i+1]
i = i+1
} else if (args[i] == "-modelingMethod") {
if (length(args) == i) {
stop("modelingMethod Must be specified.")
}
modelingMethod = args[i+1]
i = i+1
} else if (args[i] == "-useMedianMaxOpp") {
if (length(args) == i) {
stop("useMedianMaxOpp Must be specified.")
}
useMedianMaxOpp = args[i+1]
if (tolower(useMedianMaxOpp) == "yes") {
useMedianMaxOpp = TRUE
} else {
useMedianMaxOpp = FALSE
}
i = i+1
}
else if (args[i] == "-model") {
if (length(args) == i) {
stop("groupBy name must be specified")
}
models <- c(models, args[i+1])
i = i+1
}
i = i+1
}
#for test
# workingDirectory = "."
# componentDirectory = "."
# inputFile = "ds76_student_step_export.txt"
# learningPlotType = "Student" #KC or Student
# modelingMethod = "iAFM"
# model = 'KC (Circle-Collapse)'
# useMedianMaxOpp = TRUE
#set wd
#rm(list=ls())
#use workingdir
#setwd(workingDirectory)
#load functions
#in test folder for development
#source(file="models_function.R")
#in WF environment
source(file=paste(componentDirectory, "program/models_function.R", sep=""))
prediction_file = paste(workingDirectory, "predictions.txt", sep="")
pdf_file = paste(workingDirectory, "learningRatePlot.pdf", sep="")
result_sum_file = paste(workingDirectory, "analysis-summary.txt", sep="")
#prediction df
df_pred = NULL
plots = list()
df_result = NULL
plot_cnt = 1
for(model in models){
# for testing
# model = "KC (LFA_AIC_LIB_Model0)"
# inputFile="ds99_student_step_All_Data_211_2021_1004_101023.txt"
#
#get the median of max opp across all KCs
kcm = substr(model, unlist(gregexpr('\\(',model))[1]+1, unlist(gregexpr('\\)',model))[1]-1)
response = "First Attempt"
opportunity = paste("Opportunity (", kcm, ")", sep="")
individual = "Anon Student Id"
#get median max opp of each student or KC
df_student_step = suppressWarnings(fread(file=inputFile,verbose = F)) #the file to import
colnames(df_student_step)[which(names(df_student_step) == model)] <- "kcm"
colnames(df_student_step)[which(names(df_student_step) == opportunity)] <- "opp"
colnames(df_student_step)[which(names(df_student_step) == individual)] <- "stu_id"
df_kc_student_max_opp = df_student_step %>%
dplyr::group_by(stu_id,kcm) %>%
dplyr::summarise(max_opp = max(opp))
df_kc_student_max_opp <- na.omit(df_kc_student_max_opp)
df_student_median_max_opp = df_kc_student_max_opp %>%
dplyr::group_by(stu_id)%>%
dplyr::summarise(median_max_opp = as.integer(median(max_opp)))
df_kc_median_max_opp = df_kc_student_max_opp %>%
dplyr::group_by(kcm) %>%
dplyr::summarise(median_max_opp = as.integer(median(max_opp)))
max_student_median_max_opp = max(df_student_median_max_opp$median_max_opp)
max_kc_median_max_opp = max(df_kc_median_max_opp$median_max_opp)
m = NULL
if (modelingMethod == "AFM") {
m <- AFM(inputFile,model,kcm,response,opportunity,individual)
} else if (modelingMethod == "bAFM") {
m <- bAFM(inputFile,model,kcm,response,opportunity,individual)
} else if (modelingMethod == "iAFM") {
m <- iAFM(inputFile,model,kcm,response,opportunity,individual)
# } else if (modelingMethod == "iAFM easier") {
# m <- iAFM_easier(inputFile,model,kcm,response,opportunity,individual)
# } else if (modelingMethod == "iAFM harder") {
# m <- iAFM_harder(inputFile,model,kcm,response,opportunity,individual)
# } else if (modelingMethod == "iAFM restrict") {
# m <- iAFM_restrict(inputFile,model,kcm,response,opportunity,individual)
} else if (modelingMethod == "iAFM full") {
m <- iAFM_full(inputFile,model,kcm,response,opportunity,individual)
} else if (modelingMethod == "itAFM") {
m <- itAFM(inputFile,model,kcm,response,opportunity,individual)
} else if (modelingMethod == "tAFM") {
m <- tAFM(inputFile,model,kcm,response,opportunity,individual)
}
#m <- iAFM(inputFile,model,kcm,response,opportunity,individual)
ds_student <- m$stud.params
ds_kc <- m$kc.params
ds_overall <- m$overall.params
ds_student$Intercept <- as.numeric(as.character(ds_student$Intercept))
ds_student$Slope <- as.numeric(as.character(ds_student$Slope))
ds_kc$Intercept <- as.numeric(as.character(ds_kc$Intercept))
ds_kc$Slope <- as.numeric(as.character(ds_kc$Slope))
ds_overall$maineffect_intercept <- as.numeric(as.character(ds_overall$maineffect_intercept))
ds_overall$maineffect_slope <- as.numeric(as.character(ds_overall$maineffect_slope))
ds_student$Intercept_corre <- ds_student$Intercept + ds_overall$maineffect_intercept
ds_student$Slope_corre <- ds_student$Slope + ds_overall$maineffect_slope
ds_student$initial_performance <- 1/(1+exp(-ds_student$Intercept_corre))
ds_student$med_initial <- median(ds_student$initial_performance)
ds_student$x <- median(ds_student$initial_performance)
ds_kc$Intercept_corre <- ds_kc$Intercept + ds_overall$maineffect_intercept
ds_kc$Slope_corre <- ds_kc$Slope + ds_overall$maineffect_slope
ds_kc$initial_performance <- 1/(1+exp(-ds_kc$Intercept_corre))
ds_kc$med_initial <- median(ds_kc$initial_performance)
df <- as.data.table(m$df)
sumdf <- df[,.(success=mean(success)),by=.(opportunity)]
sumdf <- sumdf[-which(is.na(sumdf$opportunity)),]
sumdf$opportunity <- sumdf$opportunity+1
if (learningPlotType == "Student") {
if (useMedianMaxOpp) {
#use max_student_median_max_opp to decide the x-axis
conc_str = ""
for (i in 0:(max_student_median_max_opp-1)) {
this_str = paste("rep(", i, ",length(unique(df$individual)))", sep="")
if (conc_str == "") {
conc_str = this_str
} else {
conc_str = paste(conc_str, this_str, sep=",")
}
}
#ex: newData <- data.table("opportunity"=c(rep(0,length(unique(df$individual))),rep(1,length(unique(df$individual))),rep(2,length(unique(df$individual))),rep(3,length(unique(df$individual))),rep(4,length(unique(df$individual))),rep(5,length(unique(df$individual))),rep(6,length(unique(df$individual))),rep(7,length(unique(df$individual))),rep(8,length(unique(df$individual))),rep(9,length(unique(df$individual))),rep(10,length(unique(df$individual))),rep(11,length(unique(df$individual))),rep(12,length(unique(df$individual))),rep(13,length(unique(df$individual))),rep(14,length(unique(df$individual))),rep(15,length(unique(df$individual)))),"individual"=rep(unique(df$individual),16),"KC"=rep("new",length(unique(df$individual))*16))
cmd_str = paste('newData <- data.table("opportunity"=c(', conc_str, '),"individual"=rep(unique(df$individual),', max_student_median_max_opp, '),"KC"=rep("new",length(unique(df$individual))*', max_student_median_max_opp, '))', sep="")
eval(parse(text=cmd_str))
} else {
newData <- data.table("opportunity"=c(rep(0,length(unique(df$individual))),rep(1,length(unique(df$individual))),rep(2,length(unique(df$individual))),rep(3,length(unique(df$individual))),rep(4,length(unique(df$individual))),rep(5,length(unique(df$individual))),rep(6,length(unique(df$individual))),rep(7,length(unique(df$individual))),rep(8,length(unique(df$individual))),rep(9,length(unique(df$individual))),rep(10,length(unique(df$individual))),rep(11,length(unique(df$individual))),rep(12,length(unique(df$individual))),rep(13,length(unique(df$individual))),rep(14,length(unique(df$individual))),rep(15,length(unique(df$individual)))),"individual"=rep(unique(df$individual),16),"KC"=rep("new",length(unique(df$individual))*16))
}
newData$pred <-predict(m$model,newData,type="response",allow.new.levels=TRUE,)
# lr_plot = ggplot(data=newData,
# aes(opportunity,pred,colour=individual))+
# theme_bw() + geom_line() + theme(legend.position = "none")
newData$pred_lodds <- log(newData$pred/(1-newData$pred))
newData$KCM = kcm
if (is.null(df_pred)) {
df_pred = newData
} else {
df_pred <- rbind(df_pred, newData)
}
if (is.null(df_result)) {
df_result = ds_overall
} else {
df_result <- rbind(df_result, ds_overall)
}
if (useMedianMaxOpp) {
#merge with df_student_median_max_opp and take only the rows that are less than student's median max opp
newData_median_max_opp = newData %>%
inner_join(df_student_median_max_opp, by = c("individual" = "stu_id"))
newData_median_max_opp = newData_median_max_opp[newData_median_max_opp$opportunity <= newData_median_max_opp$median_max_opp,]
lr_plot <- ggplot(data=newData_median_max_opp,
aes(opportunity,pred_lodds,shape=individual))+
#theme_bw() + geom_line() + theme(legend.position = "none") + scale_y_continuous(name = "Performance (Log Odds)", limits=c(-3,3), breaks = c(-3,-2,-1,0,1,2,3), labels = c(0.04,0.12,0.27,0.50,0.73,0.88,0.95)) + scale_x_continuous(name = "Opportunity", limits=c(0,10), breaks = c(0,1,2,3,4,5,6,7,8,9,10), labels = c(0,1,2,3,4,5,6,7,8,9,10)) + ggtitle(paste("Student Learning Rate\n"," (",kcm,")",sep="")) + theme(text = element_text(size = 20), plot.title = element_text(hjust = 0.5))
theme_bw() + geom_line() + theme(legend.position = "none") + scale_y_continuous(name = "Performance (Log Odds)", limits=c(-3,3), breaks = c(-3,-2,-1,0,1,2,3), labels = c(0.04,0.12,0.27,0.50,0.73,0.88,0.95)) + scale_x_continuous(name = "Opportunity") + ggtitle(paste("Student Learning Rate\n"," (",kcm,")",sep="")) + theme(text = element_text(size = 20), plot.title = element_text(hjust = 0.5))
} else {
lr_plot <- ggplot(data=newData[newData$opportunity<11,],
aes(opportunity,pred_lodds,shape=individual))+
theme_bw() + geom_line() + theme(legend.position = "none") + scale_y_continuous(name = "Performance (Log Odds)", limits=c(-3,3), breaks = c(-3,-2,-1,0,1,2,3), labels = c(0.04,0.12,0.27,0.50,0.73,0.88,0.95)) + scale_x_continuous(name = "Opportunity", limits=c(0,10), breaks = c(0,1,2,3,4,5,6,7,8,9,10), labels = c(0,1,2,3,4,5,6,7,8,9,10)) + ggtitle(paste("Student Learning Rate\n"," (",kcm,")",sep="")) + theme(text = element_text(size = 20), plot.title = element_text(hjust = 0.5))
}
plots[[plot_cnt]]=lr_plot
#ggsave(filename = paste("student_lr_ds_",datasets$Dataset[i],".png",sep=""))
} else if (learningPlotType == "KC") {
if (useMedianMaxOpp) {
#use max_student_median_max_opp to decide the x-axis
conc_str = ""
for (i in 0:(max_kc_median_max_opp-1)) {
this_str = paste("rep(", i, ",length(unique(df$KC)))", sep="")
if (conc_str == "") {
conc_str = this_str
} else {
conc_str = paste(conc_str, this_str, sep=",")
}
}
#ex: newData_kc <- data.table("opportunity"=c(rep(0,length(unique(df$KC))),rep(1,length(unique(df$KC))),rep(2,length(unique(df$KC))),rep(3,length(unique(df$KC))),rep(4,length(unique(df$KC))),rep(5,length(unique(df$KC))),rep(6,length(unique(df$KC))),rep(7,length(unique(df$KC))),rep(8,length(unique(df$KC))),rep(9,length(unique(df$KC))),rep(10,length(unique(df$KC))),rep(11,length(unique(df$KC))),rep(12,length(unique(df$KC))),rep(13,length(unique(df$KC))),rep(14,length(unique(df$KC))),rep(15,length(unique(df$KC)))),"KC"=rep(unique(df$KC),16),"individual"=rep("new",length(unique(df$KC))*16))
cmd_str = paste('newData_kc <- data.table("opportunity"=c(', conc_str, '),"KC"=rep(unique(df$KC),', max_kc_median_max_opp, '),"individual"=rep("new",length(unique(df$KC))*', max_kc_median_max_opp, '))', sep="")
eval(parse(text=cmd_str))
} else {
newData_kc <- data.table("opportunity"=c(rep(0,length(unique(df$KC))),rep(1,length(unique(df$KC))),rep(2,length(unique(df$KC))),rep(3,length(unique(df$KC))),rep(4,length(unique(df$KC))),rep(5,length(unique(df$KC))),rep(6,length(unique(df$KC))),rep(7,length(unique(df$KC))),rep(8,length(unique(df$KC))),rep(9,length(unique(df$KC))),rep(10,length(unique(df$KC))),rep(11,length(unique(df$KC))),rep(12,length(unique(df$KC))),rep(13,length(unique(df$KC))),rep(14,length(unique(df$KC))),rep(15,length(unique(df$KC)))),"KC"=rep(unique(df$KC),16),"individual"=rep("new",length(unique(df$KC))*16))
}
if(""%in%unique(newData_kc$KC)){
newData_kc <- newData_kc[-which(newData_kc$KC==""),]
}
newData_kc$pred <-predict(m$model,newData_kc,type="response",allow.new.levels=TRUE,)
newData_kc$pred_lodds <- log(newData_kc$pred/(1-newData_kc$pred))
newData_kc$KCM = kcm
if (is.null(df_pred)) {
df_pred = newData_kc
} else {
df_pred <- rbind(df_pred, newData_kc)
}
if (is.null(df_result)) {
df_result = ds_overall
} else {
df_result <- rbind(df_result, ds_overall)
}
#write.csv(newData_kc,"newData_kc.csv", row.names = FALSE)
if (useMedianMaxOpp) {
#merge with df_kc_median_max_opp and take only the rows that are less than kc's median max opp
newData_kc_median_max_opp = newData_kc %>%
inner_join(df_kc_median_max_opp, by = c("KC" = "kcm"))
newData_kc_median_max_opp = newData_kc_median_max_opp[newData_kc_median_max_opp$opportunity <= newData_kc_median_max_opp$median_max_opp,]
lr_plot <- ggplot(data=newData_kc_median_max_opp,
aes(opportunity,pred_lodds,shape=KC))+
theme_bw() + geom_line() + theme(legend.position = "none") + scale_y_continuous(name = "Performance (Log Odds)", breaks = c(-3,-2,-1,0,1,2,3), labels = c(0.04,0.12,0.27,0.50,0.73,0.88,0.95),limits = c(-3,3)) + scale_x_continuous(name = "Opportunity") + ggtitle(paste("KC Learning Rate\n"," (",kcm,")",sep="")) + theme(text = element_text(size = 20), plot.title = element_text(hjust = 0.5))
} else {
lr_plot = ggplot(data=newData_kc[newData_kc$opportunity<11,],
aes(opportunity,pred_lodds,shape=KC))+
theme_bw() + geom_line() + theme(legend.position = "none") + scale_y_continuous(name = "Performance (Log Odds)", breaks = c(-3,-2,-1,0,1,2,3), labels = c(0.04,0.12,0.27,0.50,0.73,0.88,0.95),limits = c(-3,3)) + scale_x_continuous(name = "Opportunity", limits=c(0,10), breaks = c(0,1,2,3,4,5,6,7,8,9,10), labels = c(0,1,2,3,4,5,6,7,8,9,10)) + ggtitle(paste("KC Learning Rate\n"," (",kcm,")",sep="")) + theme(text = element_text(size = 20), plot.title = element_text(hjust = 0.5))
}
plots[[plot_cnt]]=lr_plot
#ggsave(filename = paste("kc_lr_ds_",datasets$Dataset[i],".png",sep=""))
}
plot_cnt = plot_cnt + 1
}
#write prediction
write.table(df_pred,prediction_file, row.names = FALSE, sep="\t")
#write summary
write.table(df_result,result_sum_file,row.names = FALSE, sep="\t")
#write plot
pdf(pdf_file)
plots
dev.off()
|
3e7869d63c4f06df0c1573e1edd5cbbfc4f25d48
|
3f8c68d200144c3ddf4d1855c52355af6bf887bc
|
/Travel_map_maker/server.R
|
b2a2acaa907095db77b91d461261affb35fe2c13
|
[
"MIT"
] |
permissive
|
JakubR12/spatial_project
|
d052fb37aa6d1744c6b04d9969e6a218c543fdb6
|
fae3bd5c23220f57c00b7ba84f87c8865c028851
|
refs/heads/main
| 2023-06-09T05:59:24.304745
| 2021-06-10T11:37:20
| 2021-06-10T11:37:20
| 355,158,716
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,573
|
r
|
server.R
|
library(shiny) # For running the app
library(tidyverse) #for data wrangling
library(shinyalert) #For error pop ups
library(leaflet) #For making the map
library(mapview) #for saving the map
library(colourpicker) #For color selection in the app
library(leafpop) #For image pop ups
library(varhandle) #for checking numeric values
library(sp) #For converting dms values to numeric
library(htmltools) #For renddering labels as html
source("utils.R") #sourcing custom functions
#install phantomjs if not already installed.
if (!webshot::is_phantomjs_installed()){webshot::install_phantomjs()}
options(shiny.maxRequestSize = 30 * 1024 ^ 2)
#define server logic
shinyServer(function(input, output) {
#the csv loader
filedata <- reactive({
infile <- input$file1
if (is.null(infile)) {
return()
}
if (input$dms == T) {
#read data
df <-
read.csv(infile$datapath, sep = input$seperator)
#if it does not have the longitude columns, it can't convert the values
if (is.null(df$long)|is.null(df$lat)) {
return(df)
}
#convert to numeric
else if (is.numeric(df$lat) & is.numeric(df$lat)) {
return(df)
}
else {
#parse dms and convert to decimal degrees
df = df %>% mutate(lat =
as.numeric(char2dms(
df$lat, input$chd, input$chm, input$chs
)),
long =
as.numeric(char2dms(
df$long, input$chd, input$chm, input$chs
)))
} <
return(df)
}
#just load data normally
data <-
read.csv(infile$datapath, sep = input$seperator) #infile$datapath
return(data)
})
#Diagnose error button
observeEvent(input$diagnose, {
if (is.null(input$file1)) {
return ()
}
df = filedata()
#label_col = description()
error_check(df, input$description)
})
#Monitor for errors when uploading data
observeEvent(input$file1, {
if (is.null(input$file1)) {
return ()
}
df = filedata()
#label_col = description()
error_check(df, input$description)
})
#reactive value for approving the data format
format_approved <- reactive ({
if (is.null(input$file1)) {
return ()
}
df = filedata()
len = length(df[, 1])
#format requirements
if (sum(c("lat", "long", input$description) %in% colnames(df)) == 3 &
sum(check.numeric(df$long) + check.numeric(df$lat)) == len * 2 &
sum(df$long <= 180 &
df$long >= -180) == len &
sum(df$lat <= 90 & df$lat >= -90) == len) {
TRUE
}
else {
FALSE
}
})
#reactive value that can handle the relative file paths for the images
merged_df <- reactive({
if (is.null(input$file1)) {
return ()
}
df = filedata() #fetch data
if (!is.null(input$file2)) {
df = input$file2 %>%
rename(Image_name = name) %>%
merge(df, by = "Image_name")
}
df
})
#The file structure output
output$fileob <- renderPrint({
if (is.null(input$file1)) {
return ("No data has been uploaded yet")
}
df = filedata()
str(df)
})
#creating interactive table for the data
output$contents = renderDataTable({
if (is.null(input$file1)) {
return ()
}
df = filedata()
return(df)
})
#reactivebject for the leaflet map
main_map <- reactive({
#call the dataframe with the full image paths
df <- merged_df()
#initiate map
start_map <- leaflet()
#grab all esri tile layers
esri <- grep("^Esri", providers, value = TRUE)
#add esri tile layers
for (provider in esri) {
start_map <-
start_map %>% addProviderTiles(provider,
group = provider,
options = providerTileOptions(noWrap = !input$wrap_map))
}
#use html code to format the title
map_title <-
paste(
"<b style='color:",
input$title_color,
";font-size:",
input$title_size
,
"px;font-family:Comic Sans MS'>",
#Comic Sans FTW
input$map_title
,
"<b/>",
sep = ""
)
#create the basic map without data
plot <- start_map %>%
#add layer selector
addLayersControl(baseGroups = names(esri),
options = layersControlOptions(collapsed = T)) %>%
#add minimap
addMiniMap(tiles = esri[[1]],
toggleDisplay = TRUE,
position = "bottomright") %>%
#add measuring tool
addMeasure(
position = "bottomright",
primaryLengthUnit = "meters",
primaryAreaUnit = "sqmeters",
activeColor = "#3D535D",
completedColor = "#7D4479"
) %>%
#add title
addControl(map_title, "bottomleft")
#use base map if no data has been uploaded
if (is.null(input$file1)) {
return(plot)
}
#Create the map with markers if the data has been uploaded and approved
# if the format has been approved:
if (req(format_approved()) == TRUE) {
#name of the label column
icons <- makeAwesomeIcon(
text = fa(input$icon),
markerColor = input$markerColor,
iconColor = input$icon_color,
spin = input$icon_spin,
squareMarker = input$square_markers
)
#render labels with html
if (input$html == T) {
html_labels <- lapply(df[input$description][, 1], HTML)
}
else{
html_labels <- df[input$description][, 1]
}
#add markers to plot
marker_plot <- plot %>%
addAwesomeMarkers(
data = df,
lng = df$long,
lat = df$lat,
label = html_labels,
icon = icons,
group = "pnts",
#clusterOptions = markerClusterOptions(),
options = markerOptions(opacity = 0.8)
)
}
#add images if imges has been uploaded
if (req(format_approved()) == TRUE & !is.null(df) & !is.null(df$datapath)) {
return(marker_plot %>%
addPopupImages(df$datapath, "pnts", 150))
}
#else return map just with markers
else if (req(format_approved()) == TRUE) {
return(marker_plot)
} #otherwise just return the base map
else {
return(plot)
}
})
#render the map
output$map <- leaflet::renderLeaflet({
main_map()
})
#reactive object for saving the state of the map
mymap <- reactive({
# call the foundational Leaflet map
main_map() %>%
# store the view based on UI
setView(
lng = input$map_center$lng,
lat = input$map_center$lat,
zoom = input$map_zoom
) %>%
showGroup(group = input$map_groups)
})
#download map as png saved as the state of the map
output$downloadPlotPNG <- downloadHandler(
filename = "data.png",
content = function(file) {
mapshot(
x = mymap(),
file = file,
cliprect = "viewport",
selfcontained = T
)
}
)
#save html version of the map. The map should be self contained in the html to work wethink...
output$downloadPlotHTML <- downloadHandler(
filename = paste0(getwd(), "/map.html"),
content = function(file) {
mapshot(
x = mymap(),
cliprect = "viewport",
selfcontained = T,
url = file
)
}
)
})
|
eb7600b4c5697a9c00113ab38584f52559b373aa
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gaston/R/bm_select.r
|
8b0ec0811b00d3d99802064e0a85425cb7cef369
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
bm_select.r
|
select.snps <- function(x, condition) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
w <- eval(substitute(condition), x@snps, parent.frame())
miss <- is.na(w)
if(sum(miss)>0) {
warning(paste(sum(miss), 'SNP(s) with undefined condition are removed from bed.matrix'))
w <- w & !miss
}
x[,w]
}
select.inds <- function(x, condition) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
w <- eval(substitute(condition), x@ped, parent.frame())
miss <- is.na(w)
if(sum(miss)>0) {
warning(paste(sum(miss), 'individual(s) with undefined condition are removed from bed.matrix'))
w <- w & !miss
}
x[w,]
}
test.snps <- function(x, condition, na.to.false = TRUE) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
w <- eval(substitute(condition), x@snps, parent.frame())
miss <- is.na(w)
if(na.to.false & sum(miss)>0)
return(w & !miss)
w
}
test.inds <- function(x, condition, na.to.false = TRUE) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
w <- eval(substitute(condition), x@ped, parent.frame())
miss <- is.na(w)
if(sum(miss)>0)
return(w & !miss)
w
}
which.snps <- function(x, condition) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
which(eval(substitute(condition), x@snps, parent.frame()))
}
which.inds <- function(x, condition) {
if(!is(x, "bed.matrix")) stop("x is not a bed.matrix")
which(eval(substitute(condition), x@ped, parent.frame()))
}
is.autosome <- function(chr) chr %in% getOption("gaston.autosomes")
is.chr.x <- function(chr) chr %in% getOption("gaston.chr.x")
is.chr.y <- function(chr) chr %in% getOption("gaston.chr.y")
is.chr.mt <- function(chr) chr %in% getOption("gaston.chr.mt")
|
3a41687b2cf0a70c794477d9ab302acb46742b5a
|
d82659c050908515ea01c71ef56dcfa143d81785
|
/R/GDS.R
|
e4c7f8b5b3cd09630d261eec16faed7e0a6ef70e
|
[
"MIT"
] |
permissive
|
fmhoeger/psyquest
|
2c882713ac827df5d6755a2508bee8184e22cc88
|
87d984510ec6c3c3278f0491f2ef3dd2c17a2a58
|
refs/heads/master
| 2023-07-22T17:10:08.471168
| 2021-01-21T22:31:22
| 2021-01-21T22:31:22
| 276,203,667
| 7
| 17
|
NOASSERTION
| 2023-03-14T19:09:36
| 2020-06-30T20:38:50
|
R
|
UTF-8
|
R
| false
| false
| 3,689
|
r
|
GDS.R
|
#' GDS
#'
#' This function defines a GDS module for incorporation into a
#' psychTestR timeline.
#' Use this function if you want to include the GDS in a
#' battery of other tests, or if you want to add custom psychTestR
#' pages to your test timeline.
#' For a standalone implementation of the GDS,
#' consider using \code{\link{GDS_standalone}()}.
#' @param label (Character scalar) Three uppercase letter acronym of the questionnaire.
#' This is also the label given to the results in the output file.
#' @param dict (i18n_dict) The psyquest dictionary used for internationalisation.
#' @param subscales (Character vector) There are 4 subscales, one general subscale
#' and 6 items of 'dance experience observations' to be included in the questionnaire.
#' Possible subscales are \code{"Body Awareness"}, \code{"Social Dancing"},
#' \code{"Urge to Dance"}, \code{"Dance Training"}, \code{"General"},
#' and \code{"Observational Dance Experience"}.
#' If no subscales are provided all subscales for the questionnaire are selected.
#' @param ... Further arguments to be passed to \code{\link{GDS}()}.
#' @export
GDS <- function(label = "GDS",
dict = psyquest::psyquest_dict,
subscales = c(),
...) {
stopifnot(purrr::is_scalar_character(label))
questionnaire_id <- "GDS"
main_test_gds(
questionnaire_id = questionnaire_id,
label = label,
items = get_items(questionnaire_id,
subscales = subscales
),
subscales = subscales
)
}
main_test_gds <- function(questionnaire_id, label, items, subscales) {
elts <- c()
prompt_id <- NULL
prompt_ids <- items %>% pull(prompt_id)
question_numbers <- as.numeric(gsub("[^0-9]", "", prompt_ids))
for (counter in seq_along(numeric(length(question_numbers)))) {
question_label <- sprintf("q%d", question_numbers[counter])
item_bank_row <-
items %>%
filter(stringr::str_detect(prompt_id, sprintf("T%s_%04d", label, question_numbers[counter])))
num_of_options <- strsplit(item_bank_row$option_type, "-")[[1]][1]
choices <- sprintf("btn%d_text", 1:num_of_options)
choice_ids <- sprintf("T%s_%04d_CHOICE%d", label, question_numbers[counter], 1:num_of_options)
arrange_vertically <- TRUE
if (question_numbers[counter] %in% c(18, 19, 20, 23)) {
arrange_vertically <- FALSE
}
button_style <- "margin-bottom: 4px"
min_width <- ''
if (!question_numbers[counter] %in% c(18, 19, 20, 23)) {
button_style <- paste(button_style, "min-width: 236px", sep="; ")
} else {
if (question_numbers[counter] %in% c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 21, 22)) {
min_width <- '46px'
}
button_style <- paste(button_style,
stringr::str_interp("min-width: ${min_width}"),
sep="; ")
}
item_page <- psychTestR::new_timeline(
psychTestR::NAFC_page(
label = question_label,
prompt = get_prompt(
counter,
length(question_numbers),
sprintf("T%s_%04d_PROMPT", questionnaire_id, question_numbers[counter])
),
choices = choices,
arrange_vertically = arrange_vertically,
button_style = button_style,
labels = map(choice_ids, psychTestR::i18n)
),
dict = psyquest::psyquest_dict
)
elts <- psychTestR::join(elts, item_page)
}
psychTestR::join(psychTestR::begin_module(label),
elts,
scoring(questionnaire_id, label, items, subscales),
psychTestR::end_module())
}
|
ca32f39d34032681122ac704819565831dda354c
|
d55554b1739a15e3bef9fb46fc7d6e44af29326d
|
/tests/err1.R
|
73882dd0c60ac4762e6b75ce243eb4f3ae0eddc6
|
[] |
no_license
|
duncantl/Rtesseract
|
82ea53682360f44226444e98521e6d4cf04f609b
|
82376f7fc39ed4447562f125ae20369ba01ff918
|
refs/heads/master
| 2022-04-28T01:23:36.544282
| 2022-03-08T19:12:09
| 2022-03-08T19:12:09
| 34,768,868
| 15
| 3
| null | 2022-03-08T19:13:01
| 2015-04-29T02:52:02
|
R
|
UTF-8
|
R
| false
| false
| 114
|
r
|
err1.R
|
if(file.exists("testExit"))
stop("Test err.R (not this one) failed. libtesseract invoked exit() (probably)")
|
99571dc037d2d74dc784807f94fefb7a1aee2c5b
|
ea77df156d9fb138cb2fc1843df20a18b7132294
|
/man/selenium_storm.Rd
|
e9a7945815f80ccce96debbe9739ed8b602905d5
|
[] |
no_license
|
bedantaguru/RSeleniumTools
|
be0c85a7464c340061ee58f90ed436af0bc83377
|
df24ff23f0f6025967744a39f6b24bcb4e86526a
|
refs/heads/master
| 2023-08-14T05:27:28.976800
| 2021-10-06T07:11:43
| 2021-10-06T07:11:43
| 414,105,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,308
|
rd
|
selenium_storm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selenium_storm.R
\name{selenium_storm}
\alias{selenium_storm}
\title{Start a Selenium Storm instance}
\usage{
selenium_storm(
port = 15318L,
Browser,
headless,
fresh_start = F,
check = FALSE,
clean_start = TRUE,
singular_pid_sid = TRUE,
num_sessions
)
}
\arguments{
\item{port}{port to listen for selenium}
\item{Browser}{(optional) The browser to initiate by default.}
\item{headless}{(optional) If set \code{TRUE} it will start a headless browser.}
\item{fresh_start}{Restarts Selenium Storm.}
\item{check}{If set to \code{TRUE}, checks the versions of selenium available and the versions of associated drivers (chromever, geckover, phantomver, iedrver).
If new versions are available they will be downloaded.}
\item{clean_start}{If set to \code{TRUE} it discards old config and starts fresh.}
\item{singular_pid_sid}{If set to \code{TRUE} only one selenium session per process will be allowed. Default is \code{TRUE}.}
\item{num_sessions}{number of sessions to allow. If not mentioned it will take values = \code{parallel::detectCores()}.}
}
\value{
It returns invisibly the selenium handle (as returned by wdman::selenium)
}
\description{
Start a Selenium Storm instance
}
\examples{
selenium_storm()
}
|
98e6072f8cf7742ad3fdd347fd834f28577d8f78
|
dc05b4c26334fecf39b5f6f8acac77e7f76abf83
|
/R/save_slackr.R
|
bcf76488c2f487b2ac6f407e8f861923dbd17aad
|
[] |
no_license
|
siegelsbla/slackr
|
0f05e352694f9297f5ba14b10454ca161a7e66aa
|
aa569bede2911c7b22891aec0862fa2cf17953cc
|
refs/heads/master
| 2023-03-19T06:40:05.143491
| 2021-03-10T23:29:37
| 2021-03-10T23:29:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
save_slackr.R
|
#' Save R objects to an RData file on Slack
#'
#' `save_slackr` enables you upload R objects (as an R data file)
#' to Slack and (optionally) post them to one or more channels
#' (if `channels` is not empty).
#'
#' @param ... objects to store in the R data file
#' @param channels Slack channels to save to (optional)
#' @param file filename (without extension) to use
#' @param bot_user_oauth_token Slack bot user OAuth token
#' @param plot_text the plot text to send with the plot (defaults to "")
#' @return `httr` response object from `POST` call
#' @seealso [slackr_setup()], [slackr_dev()], [slackr_upload()]
#' @importFrom httr add_headers upload_file
#' @export
#' @examples \dontrun{
#' slackr_setup()
#' save_slackr(mtcars, channels="#slackr", file="mtcars")
#' }
save_slackr <- function(...,
channels=Sys.getenv("SLACK_CHANNEL"),
file="slackr",
bot_user_oauth_token=Sys.getenv("SLACK_BOT_USER_OAUTH_TOKEN"),
plot_text = '') {
if (channels == '') stop("No channels specified. Did you forget select which channels to post to with the 'channels' argument?")
loc <- Sys.getlocale('LC_CTYPE')
Sys.setlocale('LC_CTYPE','C')
on.exit(Sys.setlocale("LC_CTYPE", loc))
ftmp <- tempfile(file, fileext=".Rdata")
save(..., file=ftmp)
on.exit(unlink(ftmp), add=TRUE)
res <- files_upload(
file = ftmp,
channel = channels,
txt = plot_text,
bot_user_oauth_token = bot_user_oauth_token,
filename = sprintf("%s.Rdata", file)
)
stop_for_status(res)
return(invisible(res))
}
|
80d6631eb83793aa60633c96954ad7ffad4b0300
|
63d8490c19a43fdb3bbf13c25a273fdc1a796f5e
|
/main(yearly).R
|
4683c2d41159e782d99ee4636f79da4c8a8654f7
|
[] |
no_license
|
tpopenfoose/Predict-Stock-Market-Using-ML
|
a50a22efdd68812332834fa37533aad45aa96d5a
|
76c542d3837799c4ea34f2b02edb938247323092
|
refs/heads/master
| 2021-01-11T22:28:45.587504
| 2016-04-24T22:02:19
| 2016-04-24T22:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,511
|
r
|
main(yearly).R
|
#header include all program source
#buildDataMain has all data prep part source
rm(list=ls())
source("header.R")
source("buildDataMain.R")
#=====Neural Network Program =====
lambda = 10
num_labels = length(unique(FSD_windows[,ncol(FSD_windows)]))
input_layer_size = ncol(FSD_windows)-1
hidden_layer_size = 15
max_iter = 10000
#save result in table
result_table = matrix(0, ncol = 18, nrow = 2)
colnames(result_table) = c("w Month", "lambda", "#hidden_node", "iter", "acc_2003", "acc_2004", "acc_2005", "acc_2006", "acc_2007"
, "acc_2008", "acc_2009", "acc_2010", "acc_2011", "acc_2012", "acc_2013", "acc_2014", "acc_2015", "acc_2016")
result_table[1:2,1] = c(0,1)
result_table[1, 2:4] = c(lambda, hidden_layer_size, max_iter)
result_table[2, 2:4] = c(lambda, hidden_layer_size, max_iter)
result_index = 5
#random initialize theta
theta1 = randInitialWeight(input_layer_size, hidden_layer_size)
theta2 = randInitialWeight(hidden_layer_size, num_labels)
initial_parameter = append(as.vector(theta1), as.vector(theta2))
#=====2002 -> 2003=====
theta_2002 = NeuralNetworkMain(FSD2002, hidden_layer_size, lambda, max_iter, initial_parameter)
theta_2002_1 = theta_2002[[1]]
theta_2002_2 = theta_2002[[2]]
theta_2002 = append(theta_2002_1, theta_2002_2)
X = FSD2003[, -ncol(FSD2015)]
y = FSD2003[ ,ncol(FSD2015)]
prediction = predict(theta_2002_1, theta_2002_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2003 -> 2004=====
theta_2003 = NeuralNetworkMain(FSD2003, hidden_layer_size, lambda, max_iter, theta_2002)
theta_2003_1 = theta_2003[[1]]
theta_2003_2 = theta_2003[[2]]
theta_2003 = append(theta_2003_1, theta_2003_2)
X = FSD2004[, -ncol(FSD2015)]
y = FSD2004[ ,ncol(FSD2015)]
prediction = predict(theta_2003_1, theta_2003_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2004 -> 2005=====
theta_2004 = NeuralNetworkMain(FSD2004, hidden_layer_size, lambda, max_iter, theta_2003)
theta_2004_1 = theta_2004[[1]]
theta_2004_2 = theta_2004[[2]]
theta_2004 = append(theta_2004_1, theta_2004_2)
X = FSD2005[, -ncol(FSD2015)]
y = FSD2005[ ,ncol(FSD2015)]
prediction = predict(theta_2004_1, theta_2004_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2005 -> 2006=====
theta_2005 = NeuralNetworkMain(FSD2005, hidden_layer_size, lambda, max_iter, theta_2004)
theta_2005_1 = theta_2005[[1]]
theta_2005_2 = theta_2005[[2]]
theta_2005 = append(theta_2005_1, theta_2005_2)
X = FSD2006[, -ncol(FSD2015)]
y = FSD2006[ ,ncol(FSD2015)]
prediction = predict(theta_2005_1, theta_2005_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2006 -> 2007=====
theta_2006 = NeuralNetworkMain(FSD2006, hidden_layer_size, lambda, max_iter, theta_2005)
theta_2006_1 = theta_2006[[1]]
theta_2006_2 = theta_2006[[2]]
theta_2006 = append(theta_2006_1, theta_2006_2)
X = FSD2007[, -ncol(FSD2015)]
y = FSD2007[ ,ncol(FSD2015)]
prediction = predict(theta_2006_1, theta_2006_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2007 -> 2008 =====
theta_2007 = NeuralNetworkMain(FSD2007, hidden_layer_size, lambda, max_iter, theta_2006)
theta_2007_1 = theta_2007[[1]]
theta_2007_2 = theta_2007[[2]]
theta_2007 = append(theta_2007_1, theta_2007_2)
X = FSD2008[, -ncol(FSD2015)]
y = FSD2008[ ,ncol(FSD2015)]
prediction = predict(theta_2007_1, theta_2007_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2008 -> 2009 =====
theta_2008 = NeuralNetworkMain(FSD2008, hidden_layer_size, lambda, max_iter, theta_2007)
theta_2008_1 = theta_2008[[1]]
theta_2008_2 = theta_2008[[2]]
theta_2008 = append(theta_2008_1, theta_2008_2)
X = FSD2009[, -ncol(FSD2015)]
y = FSD2009[ ,ncol(FSD2015)]
prediction = predict(theta_2008_1, theta_2008_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2009 -> 2010=====
theta_2009 = NeuralNetworkMain(FSD2009, hidden_layer_size, lambda, max_iter, theta_2008)
theta_2009_1 = theta_2009[[1]]
theta_2009_2 = theta_2009[[2]]
theta_2009 = append(theta_2009_1, theta_2009_2)
X = FSD2010[, -ncol(FSD2015)]
y = FSD2010[ ,ncol(FSD2015)]
prediction = predict(theta_2009_1, theta_2009_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2010 -> 2011=====
theta_2010 = NeuralNetworkMain(FSD2010, hidden_layer_size, lambda, max_iter, theta_2009)
theta_2010_1 = theta_2010[[1]]
theta_2010_2 = theta_2010[[2]]
theta_2010 = append(theta_2010_1, theta_2010_2)
X = FSD2011[, -ncol(FSD2015)]
y = FSD2011[ ,ncol(FSD2015)]
prediction = predict(theta_2010_1, theta_2010_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2011 -> 2012=====
theta_2011 = NeuralNetworkMain(FSD2011, hidden_layer_size, lambda, max_iter, theta_2010)
theta_2011_1 = theta_2011[[1]]
theta_2011_2 = theta_2011[[2]]
theta_2011 = append(theta_2011_1, theta_2011_2)
X = FSD2012[, -ncol(FSD2015)]
y = FSD2012[ ,ncol(FSD2015)]
prediction = predict(theta_2011_1, theta_2011_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2012 -> 2013=====
theta_2012 = NeuralNetworkMain(FSD2012, hidden_layer_size, lambda, max_iter, theta_2011)
theta_2012_1 = theta_2012[[1]]
theta_2012_2 = theta_2012[[2]]
theta_2012 = append(theta_2012_1, theta_2012_2)
X = FSD2013[, -ncol(FSD2015)]
y = FSD2013[ ,ncol(FSD2015)]
prediction = predict(theta_2012_1, theta_2012_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2013 -> 2014=====
theta_2013 = NeuralNetworkMain(FSD2013, hidden_layer_size, lambda, max_iter, theta_2012)
theta_2013_1 = theta_2013[[1]]
theta_2013_2 = theta_2013[[2]]
theta_2013 = append(theta_2013_1, theta_2013_2)
X = FSD2014[, -ncol(FSD2015)]
y = FSD2014[ ,ncol(FSD2015)]
prediction = predict(theta_2013_1, theta_2013_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2014 -> 2015=====
theta_2014 = NeuralNetworkMain(FSD2014, hidden_layer_size, lambda, max_iter, theta_2013)
theta_2014_1 = theta_2014[[1]]
theta_2014_2 = theta_2014[[2]]
theta_2014 = append(theta_2014_1, theta_2014_2)
X = FSD2015[, -ncol(FSD2015)]
y = FSD2015[ ,ncol(FSD2015)]
prediction = predict(theta_2014_1, theta_2014_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
#=====2015 -> 2016 =====
theta_2015 = NeuralNetworkMain(FSD2015, hidden_layer_size, lambda, max_iter, theta_2014)
theta_2015_1 = theta_2015[[1]]
theta_2015_2 = theta_2015[[2]]
theta_2015 = append(theta_2015_1, theta_2015_2)
X = FSD2016[, -ncol(FSD2015)]
y = FSD2016[ ,ncol(FSD2015)]
prediction = predict(theta_2015_1, theta_2015_2, X)
result = sum(prediction == y)/length(y)
result_table[1, result_index] = result
result_index = result_index + 1
|
14218f96f6944225fe3c6f8225a8190c679d49da
|
9ecd686648f3f0c1eb2c640719da8b133da1cf24
|
/R/affil_list_to_df.R
|
17e2eaac3334d4d703ec5edc66f720ebe61f1eb0
|
[] |
no_license
|
muschellij2/rscopus
|
55e65a3283d2df25d7611a4ace3f98bd426f4de4
|
bf144768698aaf48cb376bfaf626b01b87a70f73
|
refs/heads/master
| 2022-01-02T15:18:19.295209
| 2021-12-18T00:37:22
| 2021-12-18T00:37:22
| 44,287,888
| 65
| 18
| null | 2021-12-18T00:23:52
| 2015-10-15T02:06:02
|
R
|
UTF-8
|
R
| false
| false
| 2,204
|
r
|
affil_list_to_df.R
|
#' @title List of SCOPUS Entries to List of Affiliations Data Frames
#' @description Take a SCOPUS entry and transform it to a data frame of
#' affiliations
#' @param affils List of affiliations, from \code{\link{entries_to_affil_list}}
#' @return A \code{data.frame} of affiliation information. A column named
#' \code{"index"} denotes the element of the object \code{affils} that the row
#' corresponds to
#' @export
#' @importFrom stats reshape
affil_list_to_df = function(affils) {
########################
# Get the index (first, second, third affil)
########################
affils = lapply(affils, function(x){
x$seq = as.numeric(as.character(x$seq))
x$ind = unlist(tapply(x$seq, x$seq, function(y) {
seq(length(y))
}))
x
})
n_reps = sapply(affils, function(x){
max(table(x$seq))
})
ncols = max(n_reps)
idvars = c("seq", "au_id", "name")
all_colnames = c(idvars, "ind", "affil_id", "affilname")
check = sapply(affils, function(x) {
all(colnames(x) %in% all_colnames)
})
if (!all(check)) {
stop(paste0("colnames of affils has changed! ",
"Must be ", paste0(all_colnames, collapse = ", ")))
}
########################
# Make it wide
########################
auths = lapply(affils, function(x){
reshape(x,
timevar = "ind",
idvar = idvars,
direction = "wide", sep = "_")
})
new_colnames = c(idvars, c(outer(c("affil_id", "affilname"),
seq(ncols),
paste, sep = "_"))
)
########################
# Make all df same number of cols
########################
auths = lapply(auths, function(x){
cn = colnames(x)
sd = setdiff(new_colnames, cn)
if (length(sd) > 0) {
mat = matrix(NA, nrow = nrow(x), ncol = length(sd))
colnames(mat) = sd
x = cbind(x, mat)
}
x = x[, new_colnames]
return(x)
})
######################
# Create indexer
######################
auths = mapply(function(x, y) {
x$index = y
x
}, auths, seq_along(auths), SIMPLIFY = FALSE)
auths = do.call("rbind", auths)
return(auths)
}
|
1d5ff01137b911825cc126ca030e42be58ee6d78
|
4163329fadd085e0dcb00a9b47ffb756fc84f94b
|
/code/scratch/dsc_make_data.R
|
906df64eb14a3e1eea48162f3592babfdf63f960
|
[] |
no_license
|
jhsiao999/dsc_conquer_comparison
|
ee5c44f570ae47e578a1653f1302aa39372a637a
|
02ec4978920ee09ba9fa2ce07c60a23cff9adfc9
|
refs/heads/master
| 2020-03-13T00:55:24.583284
| 2018-05-01T15:22:31
| 2018-05-01T15:22:31
| 130,895,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
dsc_make_data.R
|
mae <- clean_mae(mae=mae, groupid=groupid)
sizes <- names(keep_samples)
#for (sz in sizes) {
# for (i in 1:nrow(keep_samples[[as.character(sz)]])) {
# message(sz, ".", i)
L <- subset_mae(mae = mae, keep_samples = keep_samples, sz = sz, i = i,
imposed_condition = imposed_condition, filt = filt, impute = config$impute)
message(nrow(L$count), " genes, ", ncol(L$count), " samples.")
pdf(paste0(config$figfilebase, "_", demethod, exts, "_", sz, "_", i, ".pdf"))
res[[paste0(demethod, exts, ".", sz, ".", i)]] <- get(paste0("run_", demethod))(L)
res[[paste0(demethod, exts, ".", sz, ".", i)]][["nimp"]] <- L$nimp
dev.off()
# }
#}
#L <- subset_mae(mae = mae, keep_samples = keep_samples, sz = sz, i = i,
# imposed_condition = imposed_condition, filt = filt, impute = config$impute)
|
d07513f8e710acc6e34c017dbfaa0e2a166ff894
|
13b951f9531af94d60f21568b50503664bf9e139
|
/Skripts/Signal quality.R
|
a517756e08737465241f406e9504751b1f34cf52
|
[] |
no_license
|
Iraleksa/Wifi
|
987eae4710d003ddd820a45674dbd00bbafe9bc9
|
6e76da18b64a24337616d85cc9f05a89a7c2acb2
|
refs/heads/master
| 2020-06-24T13:37:36.219667
| 2019-08-02T08:07:50
| 2019-08-02T08:07:50
| 198,976,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,662
|
r
|
Signal quality.R
|
# x>=-30 - Amazing
# x>=-67 & x<-30 - Very Good
# x>=-75 & x<-67 - Okay
# x<-75 - Not Good
apply(wifi_data[,1:520],1,function(x) length(which(x<-75 & x!=100)))
apply(wifi_data[1,1:520],1,function(x) length(which(x<-75 & x!=100)))
apply(wifi_data[,1:520],1,function(x) length(which(x>=-30 & x!=100)))
wifi_data<-wifi_data %>%
dplyr::mutate(Amazing = apply(wifi_data[,1:520],1,function(x) length(which(x>=-30 & x!=100))))
wifi_data$Amazing<-as.numeric(wifi_data$Amazing)
# This is not finished
wifi_data$Amazing<-apply(wifi_data[,1:520],1,function(x) length(which(x>=-30 & x!=100)))
wifi_data$Very_Good <-apply(wifi_data[,1:520],1,function(x) length(which(x>=-67 & x < -30 & x!=100)))
wifi_data$Okay <-apply(wifi_data[,1:520],1,function(x) length(which(x>= -75 & x< -67 & x!=100)))
wifi_data$Not_Good <-apply(wifi_data[,1:520],1,function(x) length(which(x< -75 & x!=100)))
wifi_data$No_Signal <-apply(wifi_data[,1:520],1,function(x) length(which( x==100)))
# 3D plot for signal's distribution by Quality of signal
#### Amazing signals ####
Amaz<-wifi_data %>% filter(Amazing !=0)
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
write.csv(Amaz,file = "Amazing by user ID.csv", row.names = FALSE)
plot_ly(Amaz, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(USERID),colors = "Set1") %>%
layout(title = "Amazing signals (more, than -30 dBm) distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>% layout(legend = l)
#### Very Good signals ####
write.csv(Very_G,file = "Very Good and OKay signals.csv", row.names = FALSE)
Very_G<-wifi_data %>% filter(Very_Good !=0 & Okay !=0)
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
plot_ly(Very_G, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(Very_Good),colors = "Set1") %>%
layout(title = "Very Good and OKay signals (between -75 & - 30 dBm) distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>%
layout(legend = l)
#### Okay signals ####
Ok<-wifi_data %>% filter(Okay !=0)
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
plot_ly(Ok, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(Okay),colors = "Set1") %>%
layout(title = "Okay signals (between -75 & -67 dBm) distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>%
layout(legend = l)
#### Not_Good signals ####
N_Good<-wifi_data %>% filter(Not_Good !=0)
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
plot_ly(N_Good, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(Not_Good),colors = "Set1") %>%
layout(title = "Okay signals (less than -75 dBm) distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>%
layout(legend = l)
# No signals
No_Sig<-wifi_data %>% filter(No_Signal ==520)
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
plot_ly(No_Sig, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(No_Signal),colors = "Set1") %>%
layout(title = "No signals distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>%
layout(legend = l)
#Signal frequency for all phones only Amazing signals
require(reshape2)
# Preparing dataset
Amaz<-wifi_data %>% filter(Amazing !=0)
phones_amaz<-Amaz
am<-phones_amaz %>% select(WAP001:WAP520)
am[am==100]<-NA
am[am< -30]<-NA
phones_amaz<-cbind(am,phones_amaz$PHONEID)
colnames(phones_amaz)[521] <-"PHONEID"
phones_melt_am <- melt(phones_amaz, na.rm = TRUE, id.vars = "PHONEID")
colnames(phones_melt_am)[2] <-"WAPs"
all_ph_amaz<-ggplot(phones_melt_am, aes(x=value)) + geom_histogram(color="darkblue", fill="lightblue", stat="count")+
xlab("Signal intencity") + ylab("Frequency")+ggtitle("Signal frequency for all phones only Amazing signals (more, than -30 dBm)")+
theme(plot.title = element_text(hjust = 0.5))
ggplotly(all_ph_amaz)
write.csv(phones_melt_am,file = "Amazing signals his.csv", row.names = FALSE)
#Signal frequency for all phones only Very Good signals
require(reshape2)
# Preparing dataset
Very_G<-wifi_data %>% filter(Very_Good !=0)
phones_vg<-Very_G
avg<-phones_vg %>% select(WAP001:WAP520)
avg[avg==100]<-NA
avg[avg < -67]<-NA
avg[avg >=- 30]<-NA
phones_vg<-cbind(avg,phones_vg$PHONEID)
colnames(phones_vg)[521] <-"PHONEID"
phones_melt_avg <- melt(phones_vg, na.rm = TRUE, id.vars = "PHONEID")
colnames(phones_melt_avg)[2] <-"WAPs"
all_ph_vg<-ggplot(phones_melt_avg, aes(x=value)) + geom_histogram(color="darkblue", fill="lightblue", stat="count")+
xlab("Signal intencity") + ylab("Frequency")+ggtitle("Signal frequency for all phones only Very Good signals(between -67 & - 30 dBm)")+
theme(plot.title = element_text(hjust = 0.5))
ggplotly(all_ph_vg)
#Signal frequency for all phones only Okay signals
# Preparing dataset
Ok<-wifi_data %>% filter(Okay !=0)
phones_ok<-Ok
aok<-phones_ok %>% select(WAP001:WAP520)
aok[aok==100]<-NA
aok[aok < -75]<-NA
aok[aok >= -67]<-NA
phones_ok<-cbind(aok,phones_ok$PHONEID)
colnames(phones_ok)[521] <-"PHONEID"
phones_melt_aok <- melt(phones_ok, na.rm = TRUE, id.vars = "PHONEID")
colnames(phones_melt_aok)[2] <-"WAPs"
all_ph_ok<-ggplot(phones_melt_aok, aes(x=value)) + geom_histogram(color="darkblue", fill="lightblue", stat="count")+
xlab("Signal intencity") + ylab("Frequency")+ggtitle("Signal frequency for all phones only Okay signals (between -75 & -67 dBm)")+
theme(plot.title = element_text(hjust = 0.5))
ggplotly(all_ph_ok)
#Signal frequency for all phones only Not Good signals
# Preparing dataset
N_Good<-wifi_data %>% filter(Not_Good !=0)
phones_ng<-N_Good
ang<-phones_ng %>% select(WAP001:WAP520)
ang[ang==100]<-NA
ang[ang >= -75]<-NA
phones_ng<-cbind(ang,phones_ng$PHONEID)
colnames(phones_ng)[521] <-"PHONEID"
phones_melt_ang <- melt(phones_ng, na.rm = TRUE, id.vars = "PHONEID")
colnames(phones_melt_ang)[2] <-"WAPs"
all_ph_ng<-ggplot(phones_melt_ang, aes(x=value)) + geom_histogram(color="darkblue", fill="lightblue", stat="count")+
xlab("Signal intencity") + ylab("Frequency")+ggtitle("Signal frequency for all phones only Not Good signals (less than -75 dBm) ")+
theme(plot.title = element_text(hjust = 0.5))
ggplotly(all_ph_ng)
Too_good<-wifi_data$Too_good<-apply(wifi_data[,1:520],1,function(x) length(which(x >= -3 & x!=100)))
Too_good_df<- wifi_data %>% filter(Too_good >= -3)
Too_good_susp<- wifi_data %>% filter(Too_good > 1)
TG<-wifi_data %>% filter(Too_good >= -30 )
l <- list(
font = list(
family = "sans-serif",
size = 30,
color = "#000"))
plot_ly(TG, x = ~LONGITUDE, y = ~LATITUDE, z = ~unclass(FLOOR),marker = list(size = 6)) %>%
add_markers(color = ~as.factor(Too_good),colors = "Set1") %>%
layout(title = "Too good signals (more, than -3 dBm) distribution_train data",scene = list(xaxis = list(title = 'LONGITUDE'),
yaxis = list(title = 'LATITUDE'),
zaxis = list(title = 'Floor' ))) %>%
layout(legend = l)
|
c49998d6c6df7f08e32d54b40907500b17f41c58
|
bac9aab5e11cb668702540f506cea1233e561ce3
|
/server.R
|
0d82f8b7d76d2e73a1c04b2ac452987ff0b39777
|
[] |
no_license
|
mfournierca/DataProducts_ShinyApp
|
690aac10310a8ff4fe9c654e6d882d4aedb52a21
|
61c36509bc2b5f9f4f17b7706dcc746e15b81ea0
|
refs/heads/master
| 2021-01-01T06:50:17.483238
| 2014-06-23T02:32:01
| 2014-06-23T02:32:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
server.R
|
shinyServer(function(input, output, session) {
#create new data frame from user - selected variables
selectData <- reactive({
swiss[, c(input$xcol, input$ycol)]
})
#the clustering function
kclust <- reactive({
kmeans(selectData(), input$numclusters)
})
#create the output
output$plot1 <- renderPlot({
#plot k means
par(mar = c(5, 5, 1, 0))
par(mfrow=c(1, 1))
plot(selectData(), col=kclust()$cluster, pch=20, cex=2)
points(kclust()$centers, pch=5, cex=4, lwd=1)
})
})
|
52791de662f9112f3556b7c36eb5d9a2a8d27655
|
fcf685da338b4b70b3008700abceb77b0ba28a86
|
/R/plotASEMetrics.R
|
33cbc6c24db7332c1d686213e88f14b4a0a6a028
|
[] |
no_license
|
AClement1990/hap-eQTL
|
f472baccf075ee323023c0c965b26929af1b38c5
|
cc2c24cbdf246f787224e588e532dd75cfef3735
|
refs/heads/master
| 2023-02-18T02:02:31.410416
| 2021-01-07T17:41:57
| 2021-01-07T17:41:57
| 317,462,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
plotASEMetrics.R
|
#' Plot ASE metrics generated by Gen.input.
#'
#' This function takes the output of Gen.input and produces various plots regarding allele specific expression in the dataset.
#' @param input The output of the Gen.input function
#' @param individuals The individual(s) to restrict the plots to.
#' @param genes The gene(s) to restrict the plots to.
#' @param variants The variant(s) to restrict the plots to.
#' @export
#' @examples
#' #' #Plot the output of Gen.input stored in aseDat
#' plotASEMetrics(aseDat)
plotASEMetrics<-function(input, individuals=NULL, genes=NULL, variants=NULL)
{
thisASE<-input$ASE
if(!is.null(individuals))
{
thisASE<-input$ASE[which(input$ASE$Ind %in% individuals),]
}
if(!is.null(genes))
{
thisASE<-input$ASE[which(input$ASE$Gene %in% genes),]
}
if(!is.null(variants))
{
thisASE<-input$ASE[which(input$ASE$ID %in% variants),]
}
if(dim(input$ASE)[1] > 0)
{
numInd<-length(unique(thisASE$Ind))
numVar<-length(unique(thisASE$ID))
numGenes<-length(unique(thisASE$Gene[!is.na(thisASE$Gene)]))
numNA<-length(unique(thisASE$ID[is.na(thisASE$Gene)]))
cat("Numbers after filtering:\n")
cat("\t", numInd, "individual(s)\n")
cat("\t", numVar, "variant(s)\n")
cat("\t", numGenes, "gene(s)\n")
title<-paste("ASE metrics across ", numInd, " individuals, ", numGenes, " genes and ", numVar, " variants (", numNA, " outside known genes)", sep="")
hetCounts<-sort(table(thisASE$ID), decreasing = TRUE)
#check plotting when only one SNP
hetCounts<-as.vector(hetCounts[which(hetCounts>0)])
hetCountsRank<-cbind.data.frame(Individuals=hetCounts, Rank=1:length(hetCounts))
colnames(hetCountsRank)<-c("Individuals.Freq", "Rank")
p1<-ggplot(hetCountsRank, aes(x=Rank, y=Individuals.Freq)) +
geom_point() + scale_y_continuous(trans='log10') + annotation_logticks(scaled = TRUE,sides = "lr") +
xlab("Rank of variant") + ylab("Number of heterozygote individuals") + theme_pubr()
#rather convoluted command to get variant counts by gene
geneCounts<-table(as.character(unique(thisASE[complete.cases(thisASE[,c("ID","Gene")]),c("ID","Gene")])$Gene))
#sometimes variants left may not be in genes. If the case dont do this plot.
if(dim(geneCounts)[1] > 0)
{
geneCountsRank<-cbind.data.frame(Genes=sort(as.vector(geneCounts), decreasing = TRUE), Rank=1:length(geneCounts))
#colnames(geneCountsRank)<-c("Freq", "Rank")
p2<-ggplot(geneCountsRank, aes(x=Rank, y=Genes)) +
geom_point() + scale_y_continuous(trans='log10') + annotation_logticks(scaled = TRUE,sides = "lr") +
xlab("Rank of gene") + ylab("Number of heterozygote variants") + theme_pubr()
}
p3<-ggplot(thisASE, aes(x=propRef)) +
geom_histogram(aes(y=..density..),binwidth=.05, colour="black", fill="white") +
geom_vline(aes(xintercept=median(propRef, na.rm=T)), # Ignore NA values for median
color="red", linetype="dashed", size=1) + xlab("Proportion of reads carrying reference allele") + ylab("Density of sites") +
geom_density(alpha=.2, fill="#FF6666") + theme_pubr()
logTransBi<--log10(thisASE$binomp)
maxLogP<-max(logTransBi[is.finite(logTransBi)])
p4<-ggplot(thisASE, aes(x=logRatio, y=totalReads)) +
geom_point(alpha = 3/10,aes(colour=-log10(binomp+1e-300))) + theme_pubr() +
xlab("Log2 ratio ((Ref. reads + 1)/(Alt. reads +1))") + ylab("Total number of reads") +
scale_y_continuous(trans='log10') +
scale_colour_gradientn(name = "-log10(Binomial P value)",colors=c("cornflowerblue","orange", "red"), values=c(0,3/maxLogP,1))
if(!exists("p2"))
{
annotate_figure(ggarrange(p1,p3,p4), top=title)
} else {
annotate_figure(ggarrange(p1,p2,p3,p4), top=title)
}
} else {
stop("No data left to plot. Did you specify valid ids?")
}
}
|
dafb66c20552b8df2d9d0518087e6032656e97b3
|
59a18107a8ff9e9e4532d6a66996c3850ce80eda
|
/UrWeb_With_No_Ni_WithCBONLY.R
|
0ae762d10dcb72bbb50e5883cac6e67248b26428
|
[] |
no_license
|
MilesOtt/FCD_AFCD
|
954763654023adb3d9a20ff513a7f5e811a639da
|
54f4b868e324b1e8f827a2cd9ebf92436ff59b32
|
refs/heads/master
| 2021-07-24T07:40:08.934274
| 2017-11-03T01:55:49
| 2017-11-03T01:55:49
| 108,866,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,646
|
r
|
UrWeb_With_No_Ni_WithCBONLY.R
|
#install.packages("foreign")
library(foreign)
#install.packages("amen")
library(amen)
##----------- Reading in data ----------------#
setwd("C:/Users/Miles/Dropbox/Miles Backup/crystal networks/updated dta files")
getwd()
pnom<-as.matrix(read.dta("nomination_matrix_129.dta"), nrow=129, ncol=130)
subj<-as.matrix(read.dta("subjectdata.dta"), nrow=129, ncol=132)
sna_dorm<-as.matrix(read.dta("sndata_long_dorm_only.dta"))
w<-matrix(rep(0,129*129), nrow=129, ncol=129)
y<-pnom[1:129,2:130]
for(i in 1:129){
for(j in 1:129){
w[i,j]<-as.numeric(y[i,j])
}
}
y.matrix.data.full<-w
keep.track.CB.1<-NULL
keep.track.CB.2<-NULL
keep.track.CB.3<-NULL
keep.track.CB.4<-NULL
keep.track.CB.5<-NULL
keep.track.CB.6<-NULL
keep.track.CB.7<-NULL
keep.track.CB.8<-NULL
keep.track.CB.9<-NULL
n.nodes<-129
x.0.matrix.data<-matrix(rep(1, (n.nodes*n.nodes)), nrow=n.nodes)
drink.days.month<-as.numeric(subj[,112])
index<-1:n.nodes
index.matrix<-matrix(index, ncol=n.nodes,nrow=n.nodes)
x.1.matrix.data<-matrix(drink.days.month, ncol=n.nodes,nrow=n.nodes)
x.2.matrix.data<-x.1.matrix.data
for(i in 1:n.nodes){
for(j in 1:n.nodes){
x.2.matrix.data[i,j]<-abs(drink.days.month[i]-drink.days.month[j])
}
}
n.loop<-100
for(iii in 1:n.loop){
#---- setting up the data--------#
N.i<-rowSums(y.matrix.data.full)
dim.y<-dim(y.matrix.data.full)[1]
y.matrix.data.9<-y.matrix.data.full
y.matrix.data.8<-y.matrix.data.full
y.matrix.data.7<-y.matrix.data.full
y.matrix.data.6<-y.matrix.data.full
y.matrix.data.5<-y.matrix.data.full
y.matrix.data.4<-y.matrix.data.full
y.matrix.data.3<-y.matrix.data.full
y.matrix.data.2<-y.matrix.data.full
y.matrix.data.1<-y.matrix.data.full
y.miss.9<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.8<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.7<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.6<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.5<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.4<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.3<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.2<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.1<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.9<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.8<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.7<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.6<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.5<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.4<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.3<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.2<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
y.miss.no.N.i.1<-matrix(rep(0, (n.nodes*n.nodes)), nrow=n.nodes)
index<-1:dim.y
for(i in 1:dim.y){
t.c<-index[y.matrix.data.full[i,]==1]
t.c.o<-sample(t.c, length(t.c))
n.c<-length(t.c.o)
if(n.c>1){
y.matrix.data.1[i,t.c.o[2:n.c]]<-0
y.miss.1[i,y.matrix.data.1[i,]==0]<-1
y.miss.no.N.i.1[i,y.matrix.data.1[i,]==0]<-1
}
if (n.c==1){
y.miss.no.N.i.1[i,y.matrix.data.1[i,]==0]<-1
}
if(n.c>2){
y.matrix.data.2[i,t.c.o[3:n.c]]<-0
y.miss.2[i,y.matrix.data.2[i,]==0]<-1
y.miss.no.N.i.2[i,y.matrix.data.2[i,]==0]<-1
}
if (n.c==2){
y.miss.no.N.i.2[i,y.matrix.data.2[i,]==0]<-1
}
if(n.c>3){
y.matrix.data.3[i,t.c.o[4:n.c]]<-0
y.miss.3[i,y.matrix.data.3[i,]==0]<-1
y.miss.no.N.i.3[i,y.matrix.data.3[i,]==0]<-1
}
if (n.c==3){
y.miss.no.N.i.3[i,y.matrix.data.3[i,]==0]<-1
}
if(n.c>4){
y.matrix.data.4[i,t.c.o[5:n.c]]<-0
y.miss.4[i,y.matrix.data.4[i,]==0]<-1
y.miss.no.N.i.4[i,y.matrix.data.4[i,]==0]<-1
}
if (n.c==4){
y.miss.no.N.i.4[i,y.matrix.data.4[i,]==0]<-1
}
if(n.c>5){
y.matrix.data.5[i,t.c.o[6:n.c]]<-0
y.miss.5[i,y.matrix.data.5[i,]==0]<-1
y.miss.no.N.i.5[i,y.matrix.data.5[i,]==0]<-1
}
if (n.c==5){
y.miss.no.N.i.5[i,y.matrix.data.5[i,]==0]<-1
}
if(n.c>6){
y.matrix.data.6[i,t.c.o[7:n.c]]<-0
y.miss.6[i,y.matrix.data.6[i,]==0]<-1
y.miss.no.N.i.6[i,y.matrix.data.6[i,]==0]<-1
}
if (n.c==6){
y.miss.no.N.i.6[i,y.matrix.data.6[i,]==0]<-1
}
if(n.c>7){
y.matrix.data.7[i,t.c.o[8:n.c]]<-0
y.miss.7[i,y.matrix.data.7[i,]==0]<-1
y.miss.no.N.i.7[i,y.matrix.data.7[i,]==0]<-1
}
if (n.c==7){
y.miss.no.N.i.7[i,y.matrix.data.7[i,]==0]<-1
}
if(n.c>8){
y.matrix.data.8[i,t.c.o[9:n.c]]<-0
y.miss.8[i,y.matrix.data.8[i,]==0]<-1
y.miss.no.N.i.8[i,y.matrix.data.8[i,]==0]<-1
}
if (n.c==8){
y.miss.no.N.i.8[i,y.matrix.data.8[i,]==0]<-1
}
if(n.c>9){
y.matrix.data.9[i,t.c.o[10:n.c]]<-0
y.miss.9[i,y.matrix.data.9[i,]==0]<-1
y.miss.no.N.i.9[i,y.matrix.data.9[i,]==0]<-1
}
if (n.c==9){
y.miss.no.N.i.9[i,y.matrix.data.9[i,]==0]<-1
}
}
vectorized.x.0<-c(x.0.matrix.data[upper.tri(x.0.matrix.data)], x.0.matrix.data[lower.tri(x.0.matrix.data)])
vectorized.x.1<-c(x.1.matrix.data[upper.tri(x.1.matrix.data)], x.1.matrix.data[lower.tri(x.1.matrix.data)])
vectorized.x.2<-c(x.2.matrix.data[upper.tri(x.2.matrix.data)], x.2.matrix.data[lower.tri(x.2.matrix.data)])
x.vecs.a<-cbind((t(vectorized.x.0)),(t(vectorized.x.1)), (t(vectorized.x.2)))
x.vecs<-matrix(x.vecs.a, ncol=3, byrow=FALSE)
y.vecs.full<-c(y.matrix.data.full[upper.tri(y.matrix.data.full)], y.matrix.data.full[lower.tri(y.matrix.data.full)])
y.miss.vec.1<-c(y.miss.1[upper.tri(y.miss.5)], y.miss.1[lower.tri(y.miss.5)])
y.miss.vec.2<-c(y.miss.2[upper.tri(y.miss.5)], y.miss.2[lower.tri(y.miss.5)])
y.miss.vec.3<-c(y.miss.3[upper.tri(y.miss.5)], y.miss.3[lower.tri(y.miss.5)])
y.miss.vec.4<-c(y.miss.4[upper.tri(y.miss.5)], y.miss.4[lower.tri(y.miss.5)])
y.miss.vec.5<-c(y.miss.5[upper.tri(y.miss.5)], y.miss.5[lower.tri(y.miss.5)])
y.miss.vec.6<-c(y.miss.6[upper.tri(y.miss.5)], y.miss.6[lower.tri(y.miss.5)])
y.miss.vec.7<-c(y.miss.7[upper.tri(y.miss.5)], y.miss.7[lower.tri(y.miss.5)])
y.miss.vec.8<-c(y.miss.8[upper.tri(y.miss.5)], y.miss.8[lower.tri(y.miss.5)])
y.miss.vec.9<-c(y.miss.9[upper.tri(y.miss.5)], y.miss.9[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.1<-c(y.miss.no.N.i.1[upper.tri(y.miss.5)], y.miss.no.N.i.1[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.2<-c(y.miss.no.N.i.2[upper.tri(y.miss.5)], y.miss.no.N.i.2[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.3<-c(y.miss.no.N.i.3[upper.tri(y.miss.5)], y.miss.no.N.i.3[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.4<-c(y.miss.no.N.i.4[upper.tri(y.miss.5)], y.miss.no.N.i.4[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.5<-c(y.miss.no.N.i.5[upper.tri(y.miss.5)], y.miss.no.N.i.5[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.6<-c(y.miss.no.N.i.6[upper.tri(y.miss.5)], y.miss.no.N.i.6[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.7<-c(y.miss.no.N.i.7[upper.tri(y.miss.5)], y.miss.no.N.i.7[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.8<-c(y.miss.no.N.i.8[upper.tri(y.miss.5)], y.miss.no.N.i.8[lower.tri(y.miss.5)])
y.miss.no.N.i.vec.9<-c(y.miss.no.N.i.9[upper.tri(y.miss.5)], y.miss.no.N.i.9[lower.tri(y.miss.5)])
y.vec.1<-c(y.matrix.data.1[upper.tri(y.miss.5)], y.matrix.data.1[lower.tri(y.miss.5)])
y.vec.2<-c(y.matrix.data.2[upper.tri(y.miss.5)], y.matrix.data.2[lower.tri(y.miss.5)])
y.vec.3<-c(y.matrix.data.3[upper.tri(y.miss.5)], y.matrix.data.3[lower.tri(y.miss.5)])
y.vec.4<-c(y.matrix.data.4[upper.tri(y.miss.5)], y.matrix.data.4[lower.tri(y.miss.5)])
y.vec.5<-c(y.matrix.data.5[upper.tri(y.miss.5)], y.matrix.data.5[lower.tri(y.miss.5)])
y.vec.6<-c(y.matrix.data.6[upper.tri(y.miss.5)], y.matrix.data.6[lower.tri(y.miss.5)])
y.vec.7<-c(y.matrix.data.7[upper.tri(y.miss.5)], y.matrix.data.7[lower.tri(y.miss.5)])
y.vec.8<-c(y.matrix.data.8[upper.tri(y.miss.5)], y.matrix.data.8[lower.tri(y.miss.5)])
y.vec.9<-c(y.matrix.data.9[upper.tri(y.miss.5)], y.matrix.data.9[lower.tri(y.miss.5)])
index.vec<-c(index.matrix[upper.tri(index.matrix)], index.matrix[lower.tri(index.matrix)])
#--------------- End Creating the Data--------------#
set.seed(iii)
#--------- CB 2 ------------------------------#
fit.full<-ame(y.matrix.data.2, Xdyad=x.2.matrix.data, Xrow=drink.days.month,
model="cbin", intercept=TRUE, odmax=2, print=FALSE, plot=FALSE,
burn=5000,nscan=50000, odens=25, seed=iii)
params.full<-colMeans(fit.full$BETA)
keep.track.CB.2<-rbind(keep.track.CB.2,params.full)
#--------- CB 4 ------------------------------#
fit.full<-ame(y.matrix.data.4, Xdyad=x.2.matrix.data, Xrow=drink.days.month,
model="cbin", intercept=TRUE, odmax=4, print=FALSE, plot=FALSE,
burn=5000,nscan=50000, odens=25, seed=iii)
params.full<-colMeans(fit.full$BETA)
keep.track.CB.4<-rbind(keep.track.CB.4,params.full)
#--------- CB 6 ------------------------------#
fit.full<-ame(y.matrix.data.6, Xdyad=x.2.matrix.data, Xrow=drink.days.month,
model="cbin", intercept=TRUE, odmax=6, print=FALSE, plot=FALSE,
burn=5000,nscan=50000, odens=25, seed=iii)
params.full<-colMeans(fit.full$BETA)
keep.track.CB.6<-rbind(keep.track.CB.6,params.full)
#--------- CB 8 ------------------------------#
fit.full<-ame(y.matrix.data.8, Xdyad=x.2.matrix.data, Xrow=drink.days.month,
model="cbin", intercept=TRUE, odmax=8, print=FALSE, plot=FALSE,
burn=5000,nscan=50000, odens=25, seed=iii)
params.full<-colMeans(fit.full$BETA)
keep.track.CB.8<-rbind(keep.track.CB.8,params.full)
print(iii)
}
|
f0bff154021e26391f51054279b7e452de36b4f3
|
46f5fd6f58f4ef9d56835b72b62448af0c4c3353
|
/man/prune_site_digests.Rd
|
6010b120990ca18e89fdf7a126cd255e9624fe82
|
[
"MIT"
] |
permissive
|
jonathan-g/blogdownDigest
|
61068d35f3c434b0705523d48990a4b07080d82b
|
3224174980b32625245ad39d3ee8b45aef668ab9
|
refs/heads/main
| 2021-06-19T17:15:34.052435
| 2021-01-20T00:57:01
| 2021-01-20T00:57:01
| 157,514,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 659
|
rd
|
prune_site_digests.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/digest.R
\name{prune_site_digests}
\alias{prune_site_digests}
\title{Delete stored digests for specified source files}
\usage{
prune_site_digests(files)
}
\arguments{
\item{files}{A character vector of paths to the source files to be removed.}
}
\value{
The path to the digest file.
}
\description{
\code{prune_site_digests} removes the lines from the digest file
corresponding to a vector of source files.
}
\details{
Modifies the stored digest file to remove lines corresponding to selected
source files.
}
\seealso{
\code{\link{update_site_digests}()}, \code{\link{digests}}.
}
|
58aeb70aa446ac5b8ea0f64d6673a01ef7668c69
|
df59d2e7f1675e855bbc8f20bcb129fe68f0110c
|
/cachematrix.R
|
279aa836044b241a3074bec696876607d0eb3b45
|
[] |
no_license
|
greatgoal/ProgrammingAssignment2
|
3bfcdb8478f8e14cf14e33c5c9a4b6f5b309af63
|
0f3d021cb5e95029c1c0aed5595336627de2d1c7
|
refs/heads/master
| 2021-01-18T11:33:14.442098
| 2014-06-21T12:45:35
| 2014-06-21T12:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
cachematrix.R
|
## This is to complete programming assignment 2. Thank you!
## The pair of functions below will cache the inverse of a matrix
## makeCacheMatrix creates a special "matrix", which is really a list containing a function to
## Set the matrix
## Get the matrix
## Set the inverse of the matrix
## Get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## Set the matrix
setMatrix <- function(y) {
x <<- y
m <<- NULL
}
## Get the matrix
getMatrix <- function() x
## Set the inverse of the matrix and cache it
setInverse <- function(inverse) m <<- inverse
## Get the inverse of the matrix
getInverse <- function() m
## Return the list for the special matrix
list(setMatrix = setMatrix, getMatrix = getMatrix, setInverse = setInverse, getInverse = getInverse)
}
## cacheSolve computes the inverse of the special matrix created with the above function
## It first checks to see if the inverse has already been computed
## If so, it gets inverse and skips the computation
## Otherwise, it computes the inverse of the matrix and set the inverse in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Return the cached inverse if it is not null
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## Get the matrix of x
matrix <- x$getMatrix()
## Compute the inverse of x
m <- solve(matrix, ...)
## Set the inverse of x and cache it
x$setInverse(m)
m
}
|
92331753932a34988064e235d967f17d10955283
|
a78b7151334497392c30cb4ec679826aa7ca1344
|
/tests/testthat/test-labelled_spss.R
|
4043b799197b95081fe95d082907f82689567497
|
[
"MIT"
] |
permissive
|
Displayr/havenOld
|
1a2218f07be4f7fb67fe12b6d070c612b6cab3ed
|
70f4205ba23661fbad41c694eca8986167013c35
|
refs/heads/master
| 2023-04-01T06:43:14.701982
| 2020-09-06T23:06:31
| 2021-04-08T23:14:24
| 293,376,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
test-labelled_spss.R
|
test_that("constructor checks na_value", {
expect_incompatible_type(labelled_spss(1:10, na_values = "a"))
})
test_that("constructor checks na_range", {
expect_error(labelled_spss(1:10, na_range = "a"), "must be a numeric vector")
expect_error(labelled_spss(1:10, na_range = 1:3), "of length two")
expect_error(
labelled_spss("a", c(a = "a"), na_range = 1:2),
"only applicable for labelled numeric"
)
})
test_that("printed output is stable", {
x <- labelled_spss(
1:5, c("Good" = 1, "Bad" = 5),
na_values = c(1, 2),
na_range = c(3, Inf)
)
expect_snapshot(x)
})
test_that("subsetting preserves attributes", {
x <- labelled_spss(
1:5, c("Good" = 1, "Bad" = 5),
na_values = c(1, 2),
na_range = c(3, Inf),
label = "Rating"
)
expect_identical(x, x[])
})
test_that("labels must be unique", {
expect_error(
labelled_spss(1, c(female = 1, male = 1), na_values = 9),
"must be unique")
})
# is.na -------------------------------------------------------------------
test_that("values in na_range flagged as missing", {
x <- labelled_spss(1:5, c("a" = 1), na_range = c(1, 3))
expect_equal(is.na(x), c(TRUE, TRUE, TRUE, FALSE, FALSE))
})
test_that("values in na_values flagged as missing", {
x <- labelled_spss(1:5, c("a" = 1), na_values = c(1, 3, 5))
expect_equal(is.na(x), c(TRUE, FALSE, TRUE, FALSE, TRUE))
})
# Types -------------------------------------------------------------------
test_that("combining preserves class", {
skip("todo")
expect_s3_class(vec_c(labelled_spss(), labelled_spss()), "haven_labelled_spss")
})
|
4d6284c859394c905b0a4f8d86fd2ef7716b750f
|
384b417a0dcc38636beb26118a79cd8a88cae043
|
/R/RcppExports.R
|
8318defa4c72cb7cab85b60208a13a138a3851d2
|
[] |
no_license
|
simschul/coopbreed5
|
65ace1470cf5e98ac8eebd333b5ad26877e39daf
|
d4dc7a5634ab791153871882b2fd9f6ab27499df
|
refs/heads/master
| 2021-06-16T11:38:48.475417
| 2017-04-26T09:11:33
| 2017-04-26T09:11:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
Sb <- function(d, k) {
.Call('coopbreed_Sb', PACKAGE = 'coopbreed', d, k)
}
mean_rcpp <- function(x) {
.Call('coopbreed_mean_rcpp', PACKAGE = 'coopbreed', x)
}
coopbreed <- function(paths, n_gener, n_patches, MutStep, n_mates, n_off, par_c, par_k, modify) {
.Call('coopbreed_coopbreed', PACKAGE = 'coopbreed', paths, n_gener, n_patches, MutStep, n_mates, n_off, par_c, par_k, modify)
}
coopbreed2 <- function(paths, n_gener, n_patches, MutStep, n_mates_vec, n_off_vec, par_c_vec, par_k_vec) {
.Call('coopbreed_coopbreed2', PACKAGE = 'coopbreed', paths, n_gener, n_patches, MutStep, n_mates_vec, n_off_vec, par_c_vec, par_k_vec)
}
|
bff28d4ef2f41130cb48ae5a4a64856d977d7eba
|
91d57054c2e6079e4cb48af3c4c8782bf83228d0
|
/man/retrainInception.Rd
|
3b0d230ed0582fae3c5e15c6ae86338163837e97
|
[] |
no_license
|
trafficonese/Rinception
|
b364cf6ca7211a15ffea5c6016f10e8aa8d56f13
|
12a1a5ff93421a5875d471b2ad937d03491b2206
|
refs/heads/master
| 2020-04-18T02:44:37.512747
| 2017-09-19T14:08:11
| 2017-09-19T14:08:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,515
|
rd
|
retrainInception.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrainInception.R
\name{retrainInception}
\alias{retrainInception}
\title{Create an image classifier}
\usage{
retrainInception(imageDir = "images", outputGraph = "output_graph.pb",
outputLabels = "output_labels.txt", summariesDir = "summaries",
trainingSteps = 4000, learningRate = 0.01, testingPercentage = 10,
validationPaercentage = 10, evaluationInterval = 10,
trainingBatchSize = 500, testBatchSize = 500, validationBatchSize = 100,
modelDir = "imagenet", bottleneckDir = "bottleneck",
finalTensorName = "final_result", flipLeftRight = FALSE, randomCrop = 0,
randomScale = 0, randomBrightness = 0)
}
\arguments{
\item{imageDir}{Character Path the directory containing folders of images. Each folder should be named as the class e.g. 'dogs, 'cats', 'birds', and contain training images of those classes}
\item{outputGraph}{Character Path saying where to save the trained graph}
\item{outputLabels}{Character Path saying Where to save the trained graph's a labels}
\item{summariesDir}{Character Path saying where to save summary logs for TensorBoard.}
\item{trainingSteps}{Numeric how many training steps to run before ending, defaults to 4000. The more the better.}
\item{learningRate}{Numeric How large a learning rate to use when training, defaults to 0.01}
\item{testingPercentage}{Numeric What percentage of images to use as a test set, defaults to 10}
\item{validationPaercentage}{Numeric What percentage of images to use as a validation set, defaults to 10}
\item{evaluationInterval}{Numeric How often to evaluate the training results, defaults to intervals of 10}
\item{trainingBatchSize}{Numeric How many images to train on at a time, defaults to 500}
\item{testBatchSize}{Numeric How many images to test on at a time. This test set is only used infrequently to verify the overall accuracy of the model. Defaultss to 500}
\item{validationBatchSize}{Numeric How many images to use in an evaluation batch. This validation set is used much more often than the test set, and is an early indicator of how accurate the model is during training. Defaults to 100}
\item{modelDir}{Character Path to classify_image_graph_def.pb, imagenet_synset_to_human_label_map.txt, and imagenet_2012_challenge_label_map_proto.pbtxt. The model will be automatically downloaded if it hasnt been already.}
\item{bottleneckDir}{Character Path to cache bottleneck layer values as files}
\item{finalTensorName}{Character The name of the output classification layer in the retrained graph}
\item{flipLeftRight}{Logical Whether to randomly flip half of the training images horizontally, defualts to FALSE}
\item{randomCrop}{Numeric A percentage determining how much of a margin to randomly crop off the training images. Defaults to 0}
\item{randomScale}{Numeric A percentage determining how much to randomly scale up the size of the training images by. Defaults to 0}
\item{randomBrightness}{Numeric A percentage determining how much to randomly multiply the training image input pixels up or down by. Defaults to 0}
}
\value{
A list of paths for the model, model labels and the logs directory
}
\description{
Undertakes transfer learning with inception v3 architecture image recognition model.
}
\details{
This code requires tensorflow and python to run. These should be installed first.
Tensorflow can be installed from within R using \code{tensorflow::install_tensorflow()}.
The function \code{retrainInception} is a simple wrapper around pre-existing python scripts which can be found
here \url{https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/image_retraining}. Avoid use of avoid use of ".." in paths.
}
\examples{
\dontrun{
# Get example images (218MB)
tDir <- tempdir()
download.file(url = "http://download.tensorflow.org/example_images/flower_photos.tgz",
destfile = file.path(tDir, 'images.tgz'))
# Unzip example images
untar(tarfile = file.path(tDir, 'images.tgz'), exdir = file.path(tDir, 'images'))
# Train a model using images shipped with Rinception
# This is quick and dirty
incep <- retrainInception(imageDir = file.path(tDir, 'images/flower_photos'),
trainingSteps = 50,
trainingBatchSize = 10,
testBatchSize = 10,
validationBatchSize = 20)
# Use Tensorboard to examine the model
tensorflow::tensorboard(log_dir = incep$log_dir)
}
}
|
b51cd69922264e433392e07f39a6a351df25464c
|
d8ac4efad65306245e551cfc569f5ab87edd7ba0
|
/man/adj_inflation.Rd
|
fd90abceee197c51f40557952b143af5b88c9db3
|
[
"MIT"
] |
permissive
|
elipousson/cwi
|
c93fd88c46c04e429c86c88c4d29079258127a87
|
11d14158d4995b26fdb11a55d501ff60c08981f9
|
refs/heads/main
| 2023-07-12T10:00:51.545688
| 2021-08-16T02:00:00
| 2021-08-16T02:00:00
| 325,317,743
| 0
| 0
|
NOASSERTION
| 2021-08-15T01:03:02
| 2020-12-29T15:11:43
|
R
|
UTF-8
|
R
| false
| true
| 1,586
|
rd
|
adj_inflation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adj_inflation.R
\name{adj_inflation}
\alias{adj_inflation}
\title{Add inflation-adjusted values to a data frame}
\usage{
adj_inflation(
.data,
value,
year,
base_year = 2019,
key = Sys.getenv("BLS_KEY")
)
}
\arguments{
\item{.data}{A data frame containing monetary values by year.}
\item{value}{Bare column name of monetary values; for safety, has no default.}
\item{year}{Bare column name of years; for safety, has no default.}
\item{base_year}{Year on which to base inflation amounts; defaults to 2019.}
\item{key}{A string giving the BLS API key. Defaults to the value in \code{Sys.getenv("BLS_KEY")}.}
}
\value{
A data frame with two additional columns: adjustment factors, and adjusted values. The adjusted values column is named based on the name supplied as \code{value}; e.g. if \code{value = avg_wage}, the adjusted column is named \code{adj_avg_wage}.
}
\description{
This is modeled after \code{blscrapeR::inflation_adjust} that joins a data frame with an inflation adjustment table from the Bureau of Labor Statistics, then calculates adjusted values. It returns the original data frame with two additional columns for adjustment factors and adjustment values.
}
\details{
\strong{Note:} Because \code{adj_inflation} makes API calls, internet access is required.
}
\examples{
\dontrun{
wages <- data.frame(
fiscal_year = 2010:2016,
wage = c(50000, 51000, 52000, 53000, 54000, 55000, 54000)
)
adj_inflation(wages, value = wage, year = fiscal_year, base_year = 2016)
}
}
|
f90e6acd86f31b0fa692ad410fcfc3fce24503b2
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/s2dverification/R/InsertDim.R
|
ee3e739dcae2426fc35ad49a82af24408a10071c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
InsertDim.R
|
InsertDim <- function(var, posdim, lendim) {
#
# Initialisation of the output var with the required dimension length
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
dimsvar <- dim(var)
if (is.null(dimsvar)) {
dimsvar <- length(var)
}
outdim <- lendim
if (posdim > 1) {
outdim <- c(dimsvar[1:(posdim - 1)], outdim)
}
if (posdim <= length(dimsvar)) {
outdim <- c(outdim, dimsvar[posdim:length(dimsvar)])
}
tmpvar <- array(dim = c(outdim, array(1, dim = (10 - length(outdim)))))
#
# Duplicate the matrix along the required (posdim)th dimension
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
u <- IniListDims(outdim, 10)
for (jindex in 1:lendim) {
u[[posdim]] <- jindex
tmpvar[u[[1]], u[[2]], u[[3]], u[[4]], u[[5]], u[[6]], u[[7]], u[[8]],
u[[9]], u[[10]]] <- var
}
#
# Reduce the number of dimensions to the required one
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
outvar <- array(dim = outdim)
outvar[] <- tmpvar
#
# Outputs
# ~~~~~~~~~
#
outvar
}
|
e47fb716742933531406d607926bee23934bb39b
|
d03924f56c9f09371d9e381421a2c3ce002eb92c
|
/man/qqbounds.Rd
|
36ae5a25183a60f88d20b50e4bd1d0492af25d82
|
[] |
no_license
|
cran/distr
|
0b0396bbd5661eb117ca54026afc801afaf25251
|
c6565f7fef060f0e7e7a46320a8fef415d35910f
|
refs/heads/master
| 2023-05-25T00:55:19.097550
| 2023-05-08T07:10:06
| 2023-05-08T07:10:06
| 17,695,561
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,053
|
rd
|
qqbounds.Rd
|
\name{qqbounds}
\title{Computation of confidence intervals for qqplot}
\usage{
qqbounds(x,D,alpha,n,withConf.pw, withConf.sim,
exact.sCI=(n<100),exact.pCI=(n<100),
nosym.pCI = FALSE, debug = FALSE)
}
\alias{qqbounds}
\arguments{
\item{x}{data to be checked for compatibility with distribution \code{D}.}
\item{D}{object of class \code{"UnivariateDistribution"}, the assumed data
distribution.}
\item{alpha}{confidence level}
\item{n}{sample size}
\item{withConf.pw}{logical; shall pointwise confidence lines be computed?}
\item{withConf.sim}{logical; shall simultaneous confidence lines be computed?}
\item{exact.pCI}{logical; shall pointwise CIs be determined with exact Binomial distribution?}
\item{exact.sCI}{logical; shall simultaneous CIs be determined with exact kolmogorov distribution?}
\item{nosym.pCI}{logical; shall we use (shortest) asymmetric CIs?}
\item{debug}{logical; if \code{TRUE} additional output to debug confidence bounds.}
}
\description{
We compute confidence intervals for QQ plots.
These can be simultaneous (to check whether the whole data set is compatible)
or pointwise (to check whether each (single) data point is compatible);}
\details{
Both simultaneous and pointwise confidence intervals come in a
finite-sample and an asymptotic version;
the finite sample versions will get quite slow
for large data sets \code{x}, so in these cases the asymptotic version
will be preferrable.\cr
For simultaneous intervals,
the finite sample version is based on C function \code{"pkolmogorov2x"}
from package \pkg{stats}, while the asymptotic one uses
R function \code{pkstwo} again from package \pkg{stats}, both taken
from the code to \code{\link[stats:ks.test]{ks.test}}.
Both finite sample and asymptotic versions use the fact,
that the distribution of the supremal distance between the
empirical distribution \eqn{\hat F_n}{F.emp} and the corresponding theoretical one
\eqn{F} (assuming data from \eqn{F})
does not depend on \eqn{F} for continuous distribution \eqn{F}
and leads to the Kolmogorov distribution (compare, e.g. Durbin(1973)).
In case of \eqn{F} with jumps, the corresponding Kolmogorov distribution
is used to produce conservative intervals.
\cr
For pointwise intervals,
the finite sample version is based on corresponding binomial distributions,
(compare e.g., Fisz(1963)), while the asymptotic one uses a CLT approximation
for this binomial distribution. In fact, this approximation is only valid
for distributions with strictly positive density at the evaluation quantiles.
In the finite sample version, the binomial distributions will in general not
be symmetric, so that, by setting \code{nosym.pCI} to \code{TRUE} we may
produce shortest asymmetric confidence intervals (albeit with a considerable
computational effort).
The symmetric intervals returned by default will
be conservative (which also applies to distributions with jumps in this case).
For distributions with jumps or with density (nearly) equal to 0 at the
corresponding quantile, we use the approximation of \code{(D-E(D))/sd(D)}
by the standard normal at these points; this latter approximation is only
available if package \pkg{distrEx} is installed; otherwise the corresponding
columns will be filled with \code{NA}.
}
\value{
A list with components \code{crit} --- a matrix with the lower and upper confidence
bounds, and \code{err} a logical vector of length 2.
Component \code{crit} is a matrix with \code{length(x)} rows
and four columns \code{c("sim.left","sim.right","pw.left","pw.right")}.
Entries will be set to \code{NA} if the corresponding \code{x} component
is not in \code{support(D)} or if the computation method returned an error
or if the corresponding parts have not been required (if \code{withConf.pw}
or \code{withConf.sim} is \code{FALSE}).
\code{err} has components \code{pw}
---do we have a non-error return value for the computation of pointwise CI's
(\code{FALSE} if \code{withConf.pw} is \code{FALSE})--- and \code{sim}
---do we have a non-error return value for the computation of simultaneous CI's
(\code{FALSE} if \code{withConf.sim} is \code{FALSE}).
}
\references{
Durbin, J. (1973)
\emph{Distribution theory for tests based on the sample distribution
function}. SIAM.
Fisz, M. (1963). \emph{Probability Theory and Mathematical Statistics}.
3rd ed. Wiley, New York.
}
\author{
Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}
}
\seealso{
\code{\link[stats:qqnorm]{qqplot}} from package \pkg{stats} -- the standard QQ plot
function, \code{\link[stats:ks.test]{ks.test}} again from package \pkg{stats}
for the implementation of the Kolmogorov distributions;
\code{\link[distr]{qqplot}} from package \pkg{distr} for
comparisons of distributions, and
\code{qqplot} from package \pkg{distrMod} for comparisons
of data with models, as well as \code{RobAStBase::qqplot} from package \pkg{RobAStBase} for
checking of corresponding robust esimators.
}
\examples{
qqplot(Norm(15,sqrt(30)), Chisq(df=15))
## uses:
old.digits <- getOption("digits")
on.exit(options(digits = old.digits))
options(digits = 6)
set.seed(20230508)
## IGNORE_RDIFF_BEGIN
qqbounds(x = rnorm(30), Norm(), alpha = 0.95, n = 30,
withConf.pw = TRUE, withConf.sim = TRUE,
exact.sCI = TRUE, exact.pCI = TRUE,
nosym.pCI = FALSE)
## other calls:
qqbounds(x = rchisq(30,df=4), Chisq(df=4), alpha = 0.95, n = 30,
withConf.pw = TRUE, withConf.sim = TRUE,
exact.sCI = FALSE, exact.pCI = FALSE,
nosym.pCI = FALSE)
qqbounds(x = rchisq(30,df=4), Chisq(df=4), alpha = 0.95, n = 30,
withConf.pw = TRUE, withConf.sim = TRUE,
exact.sCI = TRUE, exact.pCI= TRUE,
nosym.pCI = TRUE)
## IGNORE_RDIFF_END
options(digits = old.digits)
}
\keyword{hplot}
\keyword{distribution}
|
eaff0d345c88d8cc7471c28636c206526116ae84
|
8115454bc342f2d493a7eed375533b91a25bb4d6
|
/genome_splitter.R
|
87e8e62dcc7784c68b0ba8c7a5dcbb4634f47d26
|
[] |
no_license
|
Callum-Rakhit/cgh2bed
|
86be288feb7917e4277118f61e07c4ba7eeead90
|
b4fc7ffdc3b54fb9d6f26f2f8474788bb348029f
|
refs/heads/master
| 2020-04-17T18:23:51.895649
| 2019-03-25T11:35:27
| 2019-03-25T11:35:27
| 166,824,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,302
|
r
|
genome_splitter.R
|
#/usr/bin/Rscript
args = commandArgs(trailingOnly=TRUE)
if (length(args) < 2) {
stop(paste("Supply an input vcf (1) and an output file name/location (2)",
sep=""),
call.=FALSE)
}
# Script to pick the 100bp region with the most SNPs for
# each of the 6206 0.5Mb chunks contained in the genome
hg19 <- read.delim(args[1], header = F)
options(scipen = 999) # Remove scientific notation
binmaker <- function(input, output_name){
output_name <- data.frame(matrix(ncol = 3, nrow = 0))
for(i in 1:dim(input)[1]){
chr <- hg19[i,][1]
start <- as.integer(hg19[i,][2])
end <- as.integer(hg19[i,][3])
bins <- as.integer(floor(end/500000))
tmp_value <- 0
for (i in ((1:bins)-1)){
tmp_value_old <- tmp_value
tmp_value <- tmp_value + 500000
names(output_name) <- c("Chr", "Start", "End")
output_name[nrow(output_name) + 1,] <- list(as.character(chr[1,]), tmp_value_old+1, tmp_value)
}
output_name[nrow(output_name) + 1,] <- list(as.character(chr[1,]), tmp_value_old+500001, end)
}
return(output_name)
}
df <- binmaker(hg19, output_name)
df <- apply(df, 2, as.character) # Flattens the data frame so you can write to file
write.table(x = df, file = args[2],
sep = "\t", quote = F, row.names = F, col.names = F)
|
7fc3562716bb82c3739dd8fb02583739035a8b1f
|
ea3856377b15b9fcc0b15bee45832b64e6027d96
|
/tests/testthat/test-LM_API.R
|
fb4c0613811f91fb771d695942e1a78ce7665f18
|
[] |
no_license
|
jmaspons/dSep
|
41428c96267895ca8131f4f18348ea275b5f7be2
|
00b899cdbde2aeea14a25780b6c446b4c31a209f
|
refs/heads/master
| 2020-04-22T21:42:16.919194
| 2019-02-14T11:46:59
| 2019-02-14T11:46:59
| 170,681,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
test-LM_API.R
|
## Common API for lm, glm, gls, pgls, MCMCglmm and brm ----
# library(caper)
# library(nlme)
# library(MCMCglmm)
# library(brms)
context("lm, glm, gls, pgls, MCMCglmm and brm models")
# load(system.file("extdata/sampleModels.RData", package="dSep"))
## No need to increase the package size for a test. File in .Rbuildignore
load("../../inst/extdata/sampleModels.RData")
sapply(M, function(x) class(x))
test_that("models have the right classes",{
sapply(M, function(x) { expect_is(x, c("lm", "glm", "gls", "pgls", "MCMCglmm", "brmsfit")); summary(x); })
})
## Generic methods for lineal models ----
context("Genergic methods for lineal models")
test_that("pValues() works",{
# sapply(M, function(x) try(pValues(x)))
lapply(M, function(x) expect_type(try(pValues(x)), "double"))
})
test_that("scoef() works",{
# sapply(M, function(x) try(scoef(x)))
lapply(M, function(x) expect_type(try(scoef(x)), "double"))
})
test_that("nobs() works",{
# sapply(M, function(x) try(nobs(x))) ## Fail for MCMCglmm
lapply(M, function(x) expect_equal(try(mode(nobs(x))), "numeric"))
})
|
1950483e1a9254bd5262a21730836e6e89cabf14
|
4679cf566e668bfbc6c149ca43936f379bfd893f
|
/R/hi.R
|
2c7c805f2b858938aa225bfbd3b7e634c3547cf6
|
[] |
no_license
|
gomulki/Snowfall
|
253de8bc9a069e12978060ff2173dde0ecfe08d9
|
69935885975d939d2d659fb2a0b6c3f764b8d750
|
refs/heads/master
| 2020-12-31T00:09:50.995687
| 2017-01-31T19:59:30
| 2017-01-31T19:59:30
| 80,551,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
r
|
hi.R
|
#' Start a conversation
#'
#' This function will produce a moduated greeting, useful in starting
#' conversations.
#'
#' @param who character(1) Name of person to be greeted.
#'
#' @param how character(1) (optional) How the person is to be greeted. Options
#' include "shout", "whisper", or "asis".
#'
#' @return A character(1) vector containing the appropriate greeting.
#' @examples
#' hi("Dick")
#' hi("Dick", "shout")
#'
#' @export
hi <- function(who, how=c("shout","whisper","asis")) {
stopifnot(is.character(who),length(who) == 1,
!anyNA(who),nzchar(who))
how <- match.arg(how)
fun <- switch(how,
shout=toupper, whisper=tolower, asis=identity,
)
paste("hi", fun(who), "how are you?")
}
|
bfa9e47b1ee0c36db78f04e609770491bd3b087e
|
f1dd4979186d90cc479c48d7673f4ce4c633cf35
|
/psf/astro/zone110.r
|
3940aa04c846e310df2bf7cb0d7ae877658e1f58
|
[] |
no_license
|
flaviasobreira/DESWL
|
9d93abd3849f28217ae19c41d9b270b9b1bd5909
|
6ba2a7f33196041aa7f34d956535f7a0076ae1f2
|
refs/heads/master
| 2020-03-26T05:50:05.292689
| 2018-08-12T21:43:34
| 2018-08-12T21:43:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 315
|
r
|
zone110.r
|
271659
271662
383298
389585
390291
390292
390293
390294
390628
390662
390665
390672
391033
391055
391057
391064
391430
391432
391434
391437
391803
392118
392125
392127
400462
400848
408720
408721
408723
409126
409127
409454
409456
410187
410191
410195
410578
410581
410584
502117
502131
502134
502806
514224
514226
|
42fa880c7ed66e134fdd41faefd412a5c764bdaa
|
a3b398e34be5d7c41b649f82188cb53b2df80715
|
/man/markersMatrix.Rd
|
db386aa2e7bc36cbcd19afbae895e25b30aba614
|
[] |
no_license
|
kakawill46/TCGAWorkflowData
|
c12dfd82bf519f0550e08d9f282f41174afca8b3
|
8a5da5d0d956d73c559ee43e7303c65af96451cf
|
refs/heads/master
| 2022-04-10T03:59:05.545290
| 2019-05-24T13:58:10
| 2019-05-24T13:58:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 738
|
rd
|
markersMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCGAWorkflowExampleData.R
\docType{data}
\name{markersMatrix}
\alias{markersMatrix}
\title{Probes meta file from broadinstitute website for Copy Number
Variation Analysis (CNV) analysis
From: ftp://ftp.broadinstitute.org/pub/GISTIC2.0/hg19_support/genome.info.6.0_hg19.na31_minus_frequent_nan_probes_sorted_2.1.txt}
\format{A matrix with 1831228 rows and 3 columns}
\description{
Probes meta file from broadinstitute website for Copy Number
Variation Analysis (CNV) analysis
From: ftp://ftp.broadinstitute.org/pub/GISTIC2.0/hg19_support/genome.info.6.0_hg19.na31_minus_frequent_nan_probes_sorted_2.1.txt
}
\examples{
data("markersMatrix")
}
\keyword{internal}
|
e9abf0f72d231227ad0fd5f35e41dc14441b82ab
|
3edc7e8c4ad8fc48ec87617f75d5cc44a805a23d
|
/man/msarc.plotHeatmap.Rd
|
1f4a2d5555136ff441d011c52bef8891b1f51433
|
[] |
no_license
|
mellomu/msarc
|
4645a2161b40be5e6f67420189f8da98f4b70008
|
bd0f070c4be51620ca75cb951d37a46b43594f92
|
refs/heads/master
| 2020-09-14T23:49:24.652199
| 2015-01-12T00:00:00
| 2015-01-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,113
|
rd
|
msarc.plotHeatmap.Rd
|
\name{msarc.plotHeatmap}
\alias{msarc.plotHeatmap}
\title{Correlation Heat Maps for Mass Spec Experiments}
\description{Given 3 or more mass spec experiments as \code{msarc} objects,
this function draws a heat map of the similarity among the experiments.
Similarity may be calculated based on similarity of scores, or simply by
the presence or absence of individual UniProt accession id's.
}
\usage{
msarc.plotHeatmap(msalist, method = "euclidean", useScore = T, ...)
}
\arguments{
\item{msalist}{a list of \code{msarc} objects.}
\item{method}{Correlation method to pass to \code{dist}. Acceptable values
are whatever \code{dist} supports, currently "euclidean",
"maximum", "manhattan", "canberra", "binary" or "minkowski".
See the documentation for \code{stats:dist} for details.}
\item{useScore}{If true (the default), use the score of each peptide in
computing the correlation. If false, just use the presence
or absence of each protein in each experiment.}
\item{\dots}{Other parameters to pass to \code{heatmap.2} from package
\code{gplots}.}
}
\value{the distance matrix calculated by \code{dist}.}
\details{The samples must have distinct filenames, or this function will
fail. You can load the samples from mass spec files, or just explicitly
set object$filename for each \code{msarc} object. These filenames will also
be used as labels in the heat map.
}
\references{
Gregory R. Warnes. Includes R source code and/or documentation
contributed by: Ben Bolker, Lodewijk Bonebakker, Robert Gentleman,
Wolfgang Huber Andy Liaw, Thomas Lumley, Martin Maechler, Arni
Magnusson, Steffen Moeller, Marc Schwartz and Bill Venables (2011).
gplots: Various R programming tools for plotting data. R package
version 2.10.1. http://CRAN.R-project.org/package=gplots
}
\author{Gord Brown}
\seealso{
See also \code{heatmap.2} from package \code{gplots} and
\code{dist} from package \code{stats}.
}
\examples{
data('sample_cluster',package="msarc")
\dontrun{msarc.plotHeatmap(sample_cluster)}
}
\keyword{hplot}
|
4da712a831a2fd47f785f15927465cabd28f8d21
|
38286faee2117eb504a3efb3978331317c05e418
|
/FuzzyTransformFunctions.r
|
b298f17a0a5c602b57088996dd9c09248d999daf
|
[] |
no_license
|
erfactor/FuzzyStructuralBreaks
|
17ba50101e3b12a87d0c72c9d7772aa56f570db3
|
5ceb6747a8007ae040c8bf3d35d28ad48f4316dc
|
refs/heads/master
| 2022-04-25T04:46:38.419506
| 2020-04-29T15:26:21
| 2020-04-29T15:26:21
| 256,716,729
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
FuzzyTransformFunctions.r
|
#Pomocnicze funkcje do obliczenia Fuzzy Transform
#Zwraca funkcje g=f1*f2
multi <- function(f1, f2){
result <- function(x){
f1(x) * f2(x)
}
}
#Zwraca funkcje g=x-ck
#ck - wezly, bedace srodkami Fuzzy Sets
minusck <- function(ck){
result <- function (x){
x - ck
}
}
#Zwraca liste powyzszych funkcji (minusck)
CreateMinusCks<-function(nodes)
{
lapply(nodes,FUN = function(x){minusck(x)})
}
|
0180314b81da6e20999ced0c15908bc3e2b78459
|
028867702c97e4bc10021281bfc0cf93d18cf852
|
/cachematrix.R
|
00ee066281c7223f33c84c0c4e8200604b205fdb
|
[] |
no_license
|
satyanarayanarangala/ProgrammingAssignment2
|
77d45add94f6146d3353747a2863a6d51c0f11f9
|
7e4500b3d1e6c61581cec91ba0ab9f0af73b711d
|
refs/heads/master
| 2021-01-16T20:03:26.325259
| 2016-01-21T18:36:48
| 2016-01-21T18:36:48
| 50,125,328
| 1
| 0
| null | 2016-01-21T17:47:09
| 2016-01-21T17:47:09
| null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
cachematrix.R
|
## Starting from a normal square (and invertible) matrix, the two functions combined allow to
## build a special matrix, calculate the inverse of the matrix and check if the inverse has already
## been calculated, in this way avoiding to waste time in repeating unnecessary calculations.
## What my function do
## The first function takes a normal square numeric matrix and transform
## it to a special matrix provided with in-built functions that allow to:
## 1- modify the value of the matrix.
## 2- retrieve the matrix.
## 3- set the inverse of the matrix to a given matrix (it does not calculate the inverse).
## 4- retrieve the inverse of the matrix if it has already been calculated.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The second function takes a special matrix, built using the first
## function, and checks if its inverse has already been calculated:
## 1- if it has, the function returns the inverse without calculating it again.
## 2- if it has not, the function calculates the inverse and returns it.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
33c86ad6f9e2d21b45f06210edd0dbf39296d335
|
cda6b2a122ec61251e35a4359be834c2e2285f81
|
/RenissanceTripadvisorCode.R
|
41b30a7354f248de72a3ee8727a6146747ae9b91
|
[] |
no_license
|
ThomasGmumford/Renissance_Tripadvisor
|
af61aca44bb947d28d1f9431a0fd17a68a5445f3
|
1d3f1776e61cd2c78df9ae2e1cdd4f362571d84d
|
refs/heads/master
| 2021-01-17T16:56:37.866251
| 2016-06-15T15:05:05
| 2016-06-15T15:05:05
| 61,217,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
RenissanceTripadvisorCode.R
|
library(dplyr)
library(tidyr)
library(caret)
RenaissanceSouthPark$numberofreviews <- gsub(pattern = " review| reviews","", x = RenaissanceSouthPark$numberofreviews)
RenaissanceSouthPark <- subset(RenaissanceSouthPark, !duplicated(RenaissanceSouthPark[,7]))
|
2239c3eee3b2aebd637d6ec011e4cb4055a47f33
|
68fe900eda00733f3a20312556dbf7e246b277ed
|
/Probability/Ex2_84.R
|
83c8f8c69a26d02b40c6442c68e6c197d5d8d5f2
|
[] |
no_license
|
thesaltree/TBC_R
|
299e6f697294c1ddd51f128cfc46627a9ee4a178
|
e3f35a7209adf77f1eb75421e5f2e80b105ade83
|
refs/heads/master
| 2021-07-14T08:40:57.482143
| 2019-10-15T20:58:40
| 2019-10-15T20:58:40
| 215,079,409
| 0
| 1
| null | 2023-01-02T18:37:52
| 2019-10-14T15:29:37
|
R
|
UTF-8
|
R
| false
| false
| 198
|
r
|
Ex2_84.R
|
##Section: 2.4.4 Page No.:112
sd=4
var=sd^2
coeff=rep(1^2, each=5)
SD_total=sum(var*coeff)^0.5
SD_total
cat("Standard deviationn for John's weekly work commute time is",SD_total,"minutes.")
|
a629c3463292986cf1c2521538609e6b7bdd79d1
|
5a4cbe8227f52ec315ae7351c8f7793aad70b521
|
/test_source/app/app.r
|
12ce1c3e6ee875fcd6125d132a314acfca966915
|
[] |
no_license
|
DrRoad/fdk
|
199584cd3442b3d898efb4722a33dae0a25188d2
|
31db3f727c834b2d0dcc7c03bcaca53ee5775591
|
refs/heads/master
| 2023-05-11T16:18:56.351151
| 2021-05-14T10:11:43
| 2021-05-14T10:11:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,783
|
r
|
app.r
|
parameter <- get_default_hyperpar()
jsResetCode <- "shinyjs.reset = function() {history.go(0)}"
ui <- dashboardPage(
dashboardHeader(title = "Draft dashboard"),
dashboardSidebar(),
dashboardBody(
tabsetPanel(id = "tabs"
, tabPanel(title = "HindSight"
, fluidRow(
useShinyjs()
, box(width = 9, withSpinner(plotlyOutput("plot_1"), size = 1, type = 8)) # main graph
, tabBox(width = 3
#, useShinyjs()
, tabPanel(title = "Model parameters"
, selectInput(inputId = "exc_features", label = "Excluded features"
, choices = c("month_seas")
, selected = NULL
, multiple = TRUE
, selectize = TRUE)
, dateInput("date_init"
, label = paste0("Initial date:")
, value = as.Date(NA))
, fluidRow(
column(
numericInput(inputId = "trend_decay"
, label = "Trend decay"
, value = .75
, min = 0.1
, max = 1
, step = .05), width = 6)
, column(
numericInput(inputId = "k", label = "Smooth basis"
, min = -1, max = 1e5
, value = -1, step = 1), width = 6)
, width = 3)
, p("Options")
, radioButtons("link_function"
, label = "Link function"
, choices = list("Gaussian" = 1
, "Poisson" = 2
),
selected = 1)
, checkboxInput(inputId = "robust_mse", label = "Robust")
)
, tabPanel(title = "Graph options"
, checkboxInput(inputId = "gam_forecast", label = "Show GAM forecast", value = TRUE)
, checkboxInput(inputId = 'min_limit_zero', label = "Low limit zero")
)
)
),
fluidRow(
box(withSpinner(plotlyOutput("plot_2", height = 250), size = 1, type = 8), width = 3)
, box(withSpinner(plotlyOutput("plot_3", height = 250), size = 1, type = 8), width = 3)
#, valueBoxOutput("diff_mean", width = 2)
, box(withSpinner(formattableOutput('summary_stat_table', height = 250), size = 1, type = 8), width = 3)
, box(
fluidRow(
column(width = 12
, selectizeInput(inputId = "key"
, label = "Key"
, choices = NULL
, multiple = FALSE
, options = list(placeholder = "Select key"
, onInitialize = I('function() { this.setValue(""); }'))
)
)
)
, fluidRow(width = 3
, column(actionButton("execute", "Execute"), width = 6)
, column(actionButton("reset", "Reset"), width = 6)
)
)
)
, fluidRow(
box(withSpinner(formattableOutput('year_agg', height = 100), size = 1, type = 8), width = 6)
)
)
)
)
)
server <- function(session, input, output) {
updateSelectizeInput(session, 'key', choices = forecast_item_list, server = TRUE, selected = NULL)
insight_data <- eventReactive(input$execute
, {
parameter_int <- fdk:::react_gam_par(parameter = get_default_hyperpar()
, values_list = list(k = input$k
, exc_features = input$exc_features
, trend_decay = input$trend_decay
, link_function = input$link_function))
tmp <- get_insight_data(oc_data = oc_data
, key = input$key
, parameter = parameter_int)
updateDateInput(session = session, inputId = "date_init"
, label = "Initial date:"
, value = min(tmp$gam_fitted$date_var)
, min = min(tmp$gam_fitted$date_var)
, max = max(tmp$gam_fitted$date_var))
tmp <- fdk:::mod_stat_data(insight_data = tmp, date = input$date_init)
tmp
})
observeEvent(eventExpr = input$reset
, {
reset("exc_features")
reset("date_init")
reset("k")
reset("trend_decay")
})
output$plot_1 <- renderPlotly({
get_graph_stat(insight_data = insight_data(), graph_type = "forecast"
, conf = list(min_limit_zero = input$min_limit_zero
, gam_forecast = input$gam_forecast))
})
output$plot_2 <- renderPlotly({
get_graph_stat(insight_data = insight_data(), graph_type = "seas_me")
})
output$plot_3 <- renderPlotly({
get_graph_stat(insight_data = insight_data(), graph_type = "derivative")
})
output$summary_stat_table <- renderFormattable(
{
tmp <- insight_data()
get_tables(tmp, table_type = "cum_diff")
}
)
output$year_agg <- renderFormattable(
{
tmp <- insight_data()
get_tables(tmp, table_type = "year_agg")
}
)
observeEvent(input$screenshot
, {
screenshot(id = "ss")
}
)
}
shinyApp(ui, server)
|
bf7d987e38663fc7bbc1383e915763d81e11f68f
|
2d6418dc719114e785f716b08478089503bc0ab2
|
/r/library/ggplot2/qplot/line.r
|
82c40a374092efdfbb78481243cd5f8d030993a6
|
[] |
no_license
|
jk983294/math
|
9a9343bde5804c8655a897a20ded3e67f528b225
|
f883e281961cd4cf16851de15e64746f59b966c7
|
refs/heads/master
| 2023-09-04T13:04:17.944946
| 2023-09-04T09:25:07
| 2023-09-04T09:25:07
| 139,425,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 484
|
r
|
line.r
|
library(ggplot2)
library(data.table)
library(dplyr)
# dot graph
x <- c(-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1)
y <- x^3
y1 <- x^2
dt <- data.table(x, y, y1)
qplot(x, y)
qplot(x, y, geom = "line")
# multiple line method1
ggplot(dt, aes(x)) +
geom_line(aes(y = y, colour = "y")) +
geom_line(aes(y = y1, colour = "y1"))
# multiple line method2
melted = melt(dt, id.vars="x")
ggplot(data=melted, aes(x=x, y=value, group=variable, colour=variable)) + geom_line()
|
3e95417eac6e45b93431c2398f64befa24457225
|
05a3778c7064c77ea054ab653e570e9460f85090
|
/Distribuicao de probabilidades.R
|
1dbc0524947930d2f4d8a837927b9a05acbaad42
|
[
"MIT"
] |
permissive
|
nelsonssjunior/r
|
1df0cf4cdb77cceb7fdbfeedc91c525aed7bf31b
|
1ace945fd2fa9b445676603fade6829392db40c0
|
refs/heads/main
| 2023-05-04T04:36:51.404280
| 2021-05-23T00:06:30
| 2021-05-23T00:06:30
| 369,917,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,929
|
r
|
Distribuicao de probabilidades.R
|
#Remove objetos da memória do R
rm(list=ls(all=TRUE))
################################
#### DISTRIBUIÇÃO BINOMIAL ####
################################
# Exemplo: Definindo como sucesso o cliente comprar, e supondo que a probabilidade de sucesso é 50%.
# Ao passar 10 clientes em nossa loja, qual a probabilidade de realizarmos 2 vendas?
#Ou seja, queremos encontrar a probabilidade de dois sucessos, em dez tentativas.Cuja probabilidade de sucesso
# em cada tentativa é 50%
dbinom (x = 2, size = 10, prob = 0.5)
#Onde:
# x é o número de sucessos,
# size é o número de tentativas,
# prob é a probabilidade de sucesso em cada tentativa
# A função a seguir gera quantidades aleatórias de sucesso oriundos de uma quantidade (size) de tentativas dada a probabilidade
#(prob) de sucesso.
# É útil para realizar experimentos. Podemos simular qual a frequencia esperada de vendas a cada dez clientes ?
#Ainda mantendo a probabilidade de sucesso (cliente comprar) de 50%
va_binomial <- rbinom(n = 30, size=10, prob=0.5)
#Onde:
# n é a quantidade de vezes que o experimento deve ser repetido
# size é o número de tentativas a cada experimento
# prob é o número de sucesso em cada uma das tentativas hist(va_binomial) # A maior barra no histograma representa a quantidade esperada
#de vendas
#Ajuste o parametro n para 1000 e plote o histograma, observe como a distribuição
#binomial se aproxima da normal
# Podemos também querer a probabilidade de que até dois clientes comprem.Análise Estatística de Dados – Página 60 de 177
#Ao invés de saber a probabilidade de exatos dois comprarem.
#A probabilidade de até dois clientes comprarem é:
#(probabilidade de nenhum cliente comprar) + (probabilidade de um cliente comprar)
#+ probabilidade de dois cliente comprarem)
#Formalizando: P(X<=2) = P(X=0) + P(X=1) + P(X=2)
pbinom(q = 2,size = 10, prob = 0.5)
#A probabilidade de que até dois clientes comprem ao entrarem dez clientes, é de 5,48%
##################################
#### DISTRIBUIÇÃO GEOMÉTRICA ####
##################################
#Exemplo: Definindo como sucesso o cliente comprar, e supondo que a probabilidade de sucesso é 50%.
#Qual a probabilidade da primeira venda ocorrer quando o quinto cliente entrar na loja?
dgeom(x = 5, prob = 0.5)
#Onde:
# x é o número de tentativas
# prob é a probabilidade de sucessos
# Podemos utilizar a mesma função para nos dar a probabilidade do sucesso ocorrerna primeira tentativa,
#Segunda tentativa, terceira tentativa ... até a décima tentativa.
va_geometrica <- dgeom(x = 1:10, prob = 0.5)
va_geometrica
plot(va_geometrica) #Veja como as probabilidades vão diminuindo. A probabilidade de sucesso de 50% é relativamente alta,
#então é muito provavel que o sucesso ocorra logo nas primeiras tentativas
#Podemos utilizar a distribuição geométrica acumulada para saber qual a probabilidade do primeiro sucesso
#ocorrer na primeira tentativa OU na segunda tentativa OU na terceira tentativa
#Formalizando, queremos: P(X<=3)
va_geometrica_acumulada <- pgeom(0:3, prob = 0.5)
plot(va_geometrica_acumulada)
#########################################
#### DISTRIBUIÇÃO BINOMIAL NEGATIVA ####
#########################################
' Exemplo: Definindo como sucesso o cliente comprar, e supondo que a probabilidade de sucesso é 50%.
Qual a probabilidade de ter que entrar 8 clientes até que a segunda venda ocorra?'
dnbinom(x=2, size = 8, prob = 0.50)
# Onde: x é o número de sucessos size é a quantidade de tentativos e prob é a probabilidade de sucesso.
#########################################
#### DISTRIBUIÇÃO POISSON ####
#########################################
# Exemplo: Uma loja recebe em média, 6 (�) clientes por minuto. Qual a probabilidade de que 5(x) clientes
#entrem em um minuto?
dpois(x= 5,lambda = 6)
' Onde: x é a quantidade a ser testada lambda é a taxa média de ocorrêcia do evento em um determinado período de
intervalo de tempo ou espaço. Podemos utilizar a mesma funcao para obter a probabilidade de entrar um cliente,
dois clientes... quinze clientes'
va_poison <- dpois(x = 1:15, lambda = 6)
plot(va_poison)
'Observe que os valores se distribuiem simetricamente en tormo de seis, use acontece porque o paramentro
#lambda é a média (e também o desvio padrão) da distribuição de Poisson. Também podemos obter a probabilidade acumulada de até 5 clientes entrarem na
loja em um minuto. Formalizando, queremos: P(X<=5)'
va_poison <- ppois(1:5, lambda = 6)
plot(va_poison)
#########################################
#### DISTRIBUIÇÃO NORMAL ####
#########################################
'Exemplo: Suponha que a distribuição dos salários dos funcionários de uma empresa sigam uma distribuição
normal com média �=2.500 e desvio padrão σ= 170.
Ao selecionar aleatoriamente um indivíduo dessa população, qual a probabilidade de ter salário entre #2.400 e 2.600 ?
Precisamos achar a probabilidade do indivíduo ter um salário de até 2.600 e subtrair pela probabilidade do
indivíduo ter o salário até 2.400 P(X<=2600)'
probabilidade_ate_2600 <- pnorm(q = 2600, mean = 2500, sd =170 )
#P(X<=2400)
probabilidade_ate_2400 <- pnorm(q = 2400, mean = 2500, sd =170 )
#P(X<=2600) - P(X<=2400)
probabilidade_ate_2600 - probabilidade_ate_2400
#Podemos gerar 100 números aleatórios para uma distribuição normal com média 2500 e desvio padrão 170
va_normal <- rnorm(n = 100, mean = 2500,sd = 170)
hist(va_normal)
#########################################
#### DISTRIBUIÇÃO NORMAL PADRÃO ####
#########################################
'O comando scale() padroniza uma variável aleatória.
Ao aplicar o comando na variável va_normal que acabmos de criar, ela ficará com
média zero e desvio padrão unitário'
va_normal_padrao <- scale(va_normal)
hist(va_normal_padrao)
'Exemplo: Suponha que a distribuição dos salários dos funcionários de uma empresa sigam uma distribuição
normal com média �=2.500 e desvio padrão σ= 170.
Ao selecionar aleatoriamente um indivíduo dessa população, qual a probabilidade de ter #salário acima de 2.600 ?
Padronização'
z <- (2600-2500)/170
pnorm(z, mean = 0, sd = 1)
#ou simplesmente
pnorm(z)
#Podemos também visualizar onde está o nosso valor Z em relação a média
plot(density(scale(va_normal))) #Plota curva de densidade
abline(v = 0,col = 'blue') #Gera uma linha sobre média, que é zero pois padronizamos a distribuição
abline(v = 0.58,col = 'red') #Gera uma linha sobre o valor z obtido
#########################################
#### DISTRIBUIÇÃO F ####
#########################################
#Gerando uma amostra aleatória de 1000 número seguindo uma distribuição F
va_f <- rf( n= 1000, df1 = 5 , df2 = 33 )
'Onde:
n é a quantidade de números a ser gerado
df1 é o primeiro grau de liberidade
df2 é o segundo grau de liberdade'
hist(va_f)
'Vá aumentando os graus de liberdade e observe como a distribuição se aproxima da normal.
Informação Extra: Uma distribuição F é a razão entre duas chi-quadrado'
#########################################
#### DISTRIBUIÇÃO T ####
#########################################
#Gera uma amostra aleatória de 1000 números seguindo uma distribuição T
va_t <- rt(1000, df = 2)
hist(va_t)
'Observe que a distribuição t, assim como a normal padrão, é centrada no zero
Vá aumentando o grau de liberdade e observando o comportamento do histograma'
#########################################
#### DISTRIBUIÇÃO QUI-QUADRADO ####
#########################################
#Gera uma amostra aleatória de 1000 números seguindo uma distribuição quiquadrado
va_QuiQuadrado <- rchisq(1000,df = 3)
hist(va_QuiQuadrado)
|
9c12add730164937bab499883831af60ada344b5
|
91805f708aa835ad22546f5c2b02e2fc1f00f989
|
/R/modelsZoo.R
|
8370e2a1794601e518e7ef411fced37c278f1986
|
[
"MIT"
] |
permissive
|
DataZhukov/animalTrial
|
07f502258dbc8a34b9ba19f334b28b92a8a5817b
|
1d9dab9014aa56d5bff0021878a9de053505a7aa
|
refs/heads/main
| 2023-07-05T05:39:01.567997
| 2021-08-04T10:41:32
| 2021-08-04T10:41:32
| 354,015,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,855
|
r
|
modelsZoo.R
|
#' Run models for each parameter
#'
#' @param data data.table
#' @param precision numeric number of digits after the decimal
#' @param VARnames character vector of variables to use from data
#' @param effects formula for starting model
#' @param treatment character name of treatment variable
#' @param group1 character name of second grouping variable, typically faseW
#'
#' @return table of means and sd with significance codes
#' @export
#'
#' @import lmerTest
#' @import car
#' @import emmeans
#' @import multcomp
#'
#' @examples
#' groeidataLong <- makeLong(groeidata,2)
#' tabel <- modelsZoo(groeidataLong,VARnames = c("geinde","dgtot"),effects = "behandeling1+gbegin+(1|compartiment/ronde)")
modelsZoo <- function(data,precision = 2, VARnames, effects, treatment = "behandeling1", group1 = NULL){
if(is.null(group1)){
means <- data[,lapply(.SD,function(x)sprintf(paste("%.",precision,"f",sep=""), round(mean(x,na.rm=T),precision+1)))
,.SDcols=VARnames,keyby=get(treatment)]
names(means)[1] <- c(treatment)
sds <- data[,lapply(.SD,function(x)sprintf(paste("%.",precision,"f",sep=""), round(sd(x,na.rm=T),precision+1)))
,.SDcols=VARnames,keyby=get(treatment)]
names(sds)[1] <- c(treatment)
}else{
means <- data[,lapply(.SD,function(x)sprintf(paste("%.",precision,"f",sep=""), round(mean(x,na.rm=T),precision+1)))
,.SDcols=VARnames,keyby=list(get(group1),get(treatment))]
names(means)[1:2] <- c(group1,treatment)
sds <- data[,lapply(.SD,function(x)sprintf(paste("%.",precision,"f",sep=""), round(sd(x,na.rm=T),precision+1)))
,.SDcols=VARnames,keyby=list(get(group1),get(treatment))]
names(sds)[1:2] <- c(group1,treatment)
}
for (i in VARnames) {
formula <- as.formula(paste(i,"~",effects))
model <- lmerTest::lmer(formula,data=data)
selection <- lmerTest::step(model,ddf ="Kenward-Roger",alpha.random = 0.1,alpha.fixed = 0.1,reduce.fixed = TRUE,reduce.random = F,keep= treatment)
model <- get_model(selection)
ano <- Anova(model,type="III",test.statistic="F")
print(summary(model))
print(ano)
qqPlot(residuals(model))
if(is.null(group1) || !(group1 %in% row.names(ano))){
cld <- cld(emmeans(model,as.formula(paste("~",treatment))),Letters=letters,sort=T,reversed =T)
cld <- cld[order(cld[,treatment]),]
cld <-gsub(" ", "", cld[,7], fixed = TRUE)
}else{
cld <- cld(emmeans(model,as.formula(paste("~",treatment,"|",group1))),Letters=letters,sort=T,reversed =T)
cld <- cld[order(cld[,group1],cld[,treatment]),]
cld <-gsub(" ", "", cld[,8], fixed = TRUE)
}
if (length(unique(cld))>1){
means[[i]] <- paste(means[[i]],cld,sep="")
}
}
if(is.null(group1)){tot <- merge(means,sds,by=treatment)}else{
tot <- merge(means,sds,by=c("behandeling1","faseW"))
}
tot
}
|
0cdcf7badabc60f4360715d3ecb4befa39e5fcd5
|
79b935ef556d5b9748b69690275d929503a90cf6
|
/man/Fest.Rd
|
8bc1b5ad9d7a31f5131b8f205b4ff9741b417c24
|
[] |
no_license
|
spatstat/spatstat.core
|
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
|
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
|
refs/heads/master
| 2022-06-26T21:58:46.194519
| 2022-05-24T05:37:16
| 2022-05-24T05:37:16
| 77,811,657
| 6
| 10
| null | 2022-03-09T02:53:21
| 2017-01-02T04:54:22
|
R
|
UTF-8
|
R
| false
| false
| 11,812
|
rd
|
Fest.Rd
|
\name{Fest}
\alias{Fest}
\alias{Fhazard}
\title{Estimate the Empty Space Function or its Hazard Rate}
\description{
Estimates the empty space function \eqn{F(r)}
or its hazard rate \eqn{h(r)} from a point pattern in a
window of arbitrary shape.
}
\usage{
Fest(X, \dots, eps, r=NULL, breaks=NULL,
correction=c("rs", "km", "cs"),
domain=NULL)
Fhazard(X, \dots)
}
\arguments{
\item{X}{The observed point pattern,
from which an estimate of \eqn{F(r)} will be computed.
An object of class \code{ppp}, or data
in any format acceptable to \code{\link{as.ppp}()}.
}
\item{\dots}{
Extra arguments, passed from \code{Fhazard} to \code{Fest}.
Extra arguments to \code{Fest} are ignored.
}
\item{eps}{Optional. A positive number.
The resolution of the discrete approximation to Euclidean
distance (see below). There is a sensible default.
}
\item{r}{Optional. Numeric vector. The values of the argument \eqn{r}
at which \eqn{F(r)} should be evaluated. There is a sensible default.
First-time users are strongly advised not to specify this argument.
See below for important conditions on \eqn{r}.
}
\item{breaks}{
This argument is for internal use only.
}
\item{correction}{
Optional.
The edge correction(s) to be used to estimate \eqn{F(r)}.
A vector of character strings selected from
\code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"}
and \code{"best"}.
Alternatively \code{correction="all"} selects all options.
}
\item{domain}{
Optional. Calculations will be restricted to this subset
of the window. See Details.
}
}
\value{
An object of class \code{"fv"}, see \code{\link{fv.object}},
which can be plotted directly using \code{\link{plot.fv}}.
The result of \code{Fest} is
essentially a data frame containing up to seven columns:
\item{r}{the values of the argument \eqn{r}
at which the function \eqn{F(r)} has been estimated
}
\item{rs}{the ``reduced sample'' or ``border correction''
estimator of \eqn{F(r)}
}
\item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)}
}
\item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)}
of \eqn{F(r)} by the spatial Kaplan-Meier method
}
\item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)}
}
\item{raw}{the uncorrected estimate of \eqn{F(r)},
i.e. the empirical distribution of the distance from
a random point in the window to the nearest point of
the data pattern \code{X}
}
\item{theo}{the theoretical value of \eqn{F(r)}
for a stationary Poisson process of the same estimated intensity.
}
The result of \code{Fhazard} contains only three columns
\item{r}{the values of the argument \eqn{r}
at which the hazard rate \eqn{h(r)} has been estimated
}
\item{hazard}{the spatial Kaplan-Meier estimate of the
hazard rate \eqn{h(r)}}
\item{theo}{
the theoretical value of \eqn{h(r)}
for a stationary Poisson process of the same estimated intensity.
}
}
\details{
\code{Fest} computes an estimate of the empty space function \eqn{F(r)},
and \code{Fhazard} computes an estimate of its hazard rate \eqn{h(r)}.
The empty space function
(also called the ``\emph{spherical contact distribution}''
or the ``\emph{point-to-nearest-event}'' distribution)
of a stationary point process \eqn{X}
is the cumulative distribution function \eqn{F} of the distance
from a fixed point in space to the nearest point of \eqn{X}.
An estimate of \eqn{F} derived from a spatial point pattern dataset
can be used in exploratory data analysis and formal inference
about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988).
In exploratory analyses, the estimate of \eqn{F} is a useful statistic
summarising the sizes of gaps in the pattern.
For inferential purposes, the estimate of \eqn{F} is usually compared to the
true value of \eqn{F} for a completely random (Poisson) point process,
which is
\deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{%
F(r) = 1 - exp( - \lambda * \pi * r^2) %
}
where \eqn{\lambda}{\lambda}
is the intensity (expected number of points per unit area).
Deviations between the empirical and theoretical \eqn{F} curves
may suggest spatial clustering or spatial regularity.
This algorithm estimates the empty space function \eqn{F}
from the point pattern \code{X}. It assumes that \code{X} can be treated
as a realisation of a stationary (spatially homogeneous)
random spatial point process in the plane, observed through
a bounded window.
The window (which is specified in \code{X}) may have arbitrary shape.
The argument \code{X} is interpreted as a point pattern object
(of class \code{"ppp"}, see \code{\link{ppp.object}}) and can
be supplied in any of the formats recognised
by \code{\link{as.ppp}}.
The algorithm uses two discrete approximations which are controlled
by the parameter \code{eps} and by the spacing of values of \code{r}
respectively. (See below for details.)
First-time users are strongly advised not to specify these arguments.
The estimation of \eqn{F} is hampered by edge effects arising from
the unobservability of points of the random pattern outside the window.
An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988).
The edge corrections implemented here are the border method or
"\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator
(Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and
Stoyan, 1998).
Our implementation makes essential use of the distance transform algorithm
of image processing (Borgefors, 1986). A fine grid of pixels is
created in the observation window. The Euclidean distance between two pixels
is approximated by the length of the shortest path joining them in the grid,
where a path is a sequence of steps between adjacent pixels, and
horizontal, vertical and diagonal steps have length
\eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)}
respectively in pixel units. If the pixel grid is sufficiently fine then
this is an accurate approximation.
The parameter \code{eps}
is the pixel width of the rectangular raster
used to compute the distance transform (see below). It must not be too
large: the absolute error in distance values due to discretisation is bounded
by \code{eps}.
If \code{eps} is not specified, the function
checks whether the window \code{Window(X)} contains pixel raster
information. If so, then \code{eps} is set equal to the
pixel width of the raster; otherwise, \code{eps}
defaults to 1/100 of the width of the observation window.
The argument \code{r} is the vector of values for the
distance \eqn{r} at which \eqn{F(r)} should be evaluated.
It is also used to determine the breakpoints
(in the sense of \code{\link{hist}})
for the computation of histograms of distances. The
estimators are computed from histogram counts.
This introduces a discretisation
error which is controlled by the fineness of the breakpoints.
First-time users would be strongly advised not to specify \code{r}.
However, if it is specified, \code{r} must satisfy \code{r[1] = 0},
and \code{max(r)} must be larger than the radius of the largest disc
contained in the window. Furthermore, the spacing of successive
\code{r} values must be very fine (ideally not greater than \code{eps/4}).
The algorithm also returns an estimate of the hazard rate function,
\eqn{h(r)} of \eqn{F(r)}. The hazard rate is
defined by
\deqn{h(r) = - \frac{d}{dr} \log(1 - F(r))}{%
h(r) = - (d/dr) log(1 - F(r)) %
}
The hazard rate of \eqn{F} has been proposed as a useful
exploratory statistic (Baddeley and Gill, 1994).
The estimate of \eqn{h(r)} given here
is a discrete approximation to the hazard rate of the
Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is
absolutely continuous (for any stationary point process \eqn{X}),
so the hazard function always exists (Baddeley and Gill, 1997).
If the argument \code{domain} is given, the estimate of \eqn{F(r)}
will be based only on the empty space distances
measured from locations inside \code{domain} (although their
nearest data points may lie outside \code{domain}).
This is useful in bootstrap techniques. The argument \code{domain}
should be a window (object of class \code{"owin"}) or something acceptable to
\code{\link{as.owin}}. It must be a subset of the
window of the point pattern \code{X}.
The naive empirical distribution of distances from each location
in the window to the nearest point of the data pattern, is a biased
estimate of \eqn{F}. However this is also returned by the algorithm
(if \code{correction="none"}),
as it is sometimes useful in other contexts.
Care should be taken not to use the uncorrected
empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}.
}
\note{
Sizeable amounts of memory may be needed during the calculation.
}
\references{
Baddeley, A.J. Spatial sampling and censoring.
In O.E. Barndorff-Nielsen, W.S. Kendall and
M.N.M. van Lieshout (eds)
\emph{Stochastic Geometry: Likelihood and Computation}.
Chapman and Hall, 1998.
Chapter 2, pages 37-78.
Baddeley, A.J. and Gill, R.D.
The empty space hazard of a spatial pattern.
Research Report 1994/3, Department of Mathematics,
University of Western Australia, May 1994.
Baddeley, A.J. and Gill, R.D.
Kaplan-Meier estimators of interpoint distance
distributions for spatial point processes.
\emph{Annals of Statistics} \bold{25} (1997) 263-292.
Borgefors, G.
Distance transformations in digital images.
\emph{Computer Vision, Graphics and Image Processing}
\bold{34} (1986) 344-371.
Chiu, S.N. and Stoyan, D. (1998)
Estimators of distance distributions for spatial patterns.
\emph{Statistica Neerlandica} \bold{52}, 239--246.
Cressie, N.A.C. \emph{Statistics for spatial data}.
John Wiley and Sons, 1991.
Diggle, P.J. \emph{Statistical analysis of spatial point patterns}.
Academic Press, 1983.
Ripley, B.D. \emph{Statistical inference for spatial processes}.
Cambridge University Press, 1988.
Stoyan, D, Kendall, W.S. and Mecke, J.
\emph{Stochastic geometry and its applications}.
2nd edition. Springer Verlag, 1995.
}
\section{Warnings}{
The reduced sample (border method)
estimator of \eqn{F} is pointwise approximately
unbiased, but need not be a valid distribution function; it may
not be a nondecreasing function of \eqn{r}. Its range is always
within \eqn{[0,1]}.
The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing
but its maximum value may be less than \eqn{1}.
The estimate of hazard rate \eqn{h(r)}
returned by the algorithm is an approximately
unbiased estimate for the integral of \eqn{h()}
over the corresponding histogram cell.
It may exhibit oscillations due to discretisation effects.
We recommend modest smoothing, such as kernel smoothing with
kernel width equal to the width of a histogram cell,
using \code{\link{Smooth.fv}}.
}
\seealso{
\code{\link{Gest}},
\code{\link{Jest}},
\code{\link{Kest}},
\code{\link{km.rs}},
\code{\link{reduced.sample}},
\code{\link{kaplan.meier}}
}
\examples{
Fc <- Fest(cells, 0.01)
# Tip: don't use F for the left hand side!
# That's an abbreviation for FALSE
plot(Fc)
# P-P style plot
plot(Fc, cbind(km, theo) ~ theo)
# The empirical F is above the Poisson F
# indicating an inhibited pattern
if(interactive()) {
plot(Fc, . ~ theo)
plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo)))
}
\testonly{
Fh <- Fhazard(cells)
}
}
\author{\adrian
and \rolf
}
\keyword{spatial}
\keyword{nonparametric}
|
6f801db1c33f591823c0d9ff1386bae1bb3d36ed
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TickExec/R/TimeDiff.R
|
93913c4f755b40784d0b545c25eec85076d06c70
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
TimeDiff.R
|
#### this is to calculate the difference in seconds between to timestamps ####
#### arguments can be numeric or character ####
#### negative values means time1 < time2 ####
TimeDiff <- function(time1, time2) {
## formalize argumaent ##
time1 <- as.numeric(time1)
time2 <- as.numeric(time2)
## seconds ##
sec1 = time1 %% 100
sec2 = time2 %% 100
if (sec1 > 60 | sec2 > 60) {
stop('Wrong time given.')
}
## minutes ##
min1 = (time1 %/% 100) %% 100
min2 = (time2 %/% 100) %% 100
if (min1 > 60 | min2 > 60) {
stop('Wrong time given.')
}
## hours ##
hr1 = (time1 %/% 10000) %% 100
hr2 = (time2 %/% 10000) %% 100
if (hr1 > 24 | hr2 > 24) {
stop('Wrong time given.')
}
## calculate difference in seconds ##
diffSec = (hr2 - hr1) * 3600 +
(min2 - min1) * 60 +
(sec2 - sec1)
return (diffSec)
}
|
7ccb31304b120c6d1e0ee07b682f5d0f3bb58c37
|
ab7dbe08d895dbb998a9aab6a022a692973e3da3
|
/Programs/lab7.R
|
de6194de790fe11da46fd8713a57fa9ad273f4f8
|
[] |
no_license
|
Manikanta-Bhuvanesh/statistics
|
85b161504bc456de06fdbb4822cd41fbbec19c3e
|
b0389a2acd17259ea5bb84641f3f0b9475e904d5
|
refs/heads/main
| 2023-05-04T07:27:09.780878
| 2021-05-15T07:20:50
| 2021-05-15T07:20:50
| 367,566,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
lab7.R
|
pnorm(125,100,15,lower.tail=T)
pnorm(110,100,15,lower.tail=F)
pnorm(125,100,15,lower.tail=T)-pnorm(110,100,15,lower.tail=F)
qnorm(0.25)
qnorm(0.25,2,3)
qnorm(0.25,100,15,T)
qnorm(0.25,100,15,F)
qnorm(0.25,100,15)
a<-rnorm(n=20,mean=572, sd=51)
a
mean(a)
sd(a)
hist(a)
hist(a,freq=F)
curve(dnorm(x,mean(a),sd(a)),add=T)
|
9d67b0113988477a1746276ab24a3bfded28bd25
|
db359cf922c097b355c9bff856ef3ee4881c6976
|
/Case1/3_Scripts/case1_v1.R
|
3a9fbee8f588270598134cc7b2776852183d2ba1
|
[] |
no_license
|
jmontalvo94/02441_Applied_Statistics
|
c107a26c912f518c5f0325bec8b502cd0a2af53d
|
7e6b2201d8c72d3d6149f0c5f6928177c007e622
|
refs/heads/master
| 2020-11-26T02:57:11.526115
| 2020-01-30T21:23:29
| 2020-01-30T21:23:29
| 228,943,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,618
|
r
|
case1_v1.R
|
#########################################################################
## Case 1: ##
## Effect of hardness and detergent on enzymatic catalysis ##
#########################################################################
# Authors: Begoña Bolos Sierra, Laura Sans Comerma, Jorge Montalvo Arvizu
# Load Data ---------------------------------------------------------------
require("car")
require("ggplot2")
# Load data and clean
data <- read.table("~/Github/02441_Applied_Statistics/Case1/2_Data/SPR.txt", header = TRUE, sep="\t")
data <- data[,-c(1,2)]
# Alternative: copy dataframe and merge detergent with hardness
df <- data
df$Stock <- as.factor(paste(as.character(df$DetStock),as.character(df$CaStock)))
df <- df[,-c(4,5)]
# Transformations ---------------------------------------------------------
# Testing
par(mfrow=c(1,1))
y <- sort(unique(data$EnzymeConc))
x <- 0:3
plot(y~x, pch=19, ylim=c(0,20))
lines(x,exp(x))
par(mfrow=c(1,1))
y <- c(y[1],log(y[y>0]))
plot(x,y, col=2, type="l")
lm <- lm(y~x)
lines(x, predict(lm), col=3)
# Transforming the data
data$EnzymeConc <- log(data$EnzymeConc)
data$EnzymeConc[data$EnzymeConc=="-Inf"] <- 0
df$EnzymeConc <- log(df$EnzymeConc)
df$EnzymeConc[df$EnzymeConc=="-Inf"] <- 0
# Summary Statistics ------------------------------------------------------
# Structure and summary of both data frames
str(data)
summary(data)
str(df)
summary(df)
# Data Visualization ------------------------------------------------------
# setwd("~/Github/02441_Applied_Statistics/Case1/4_Images")
# Pairs plot
pairs(data, col=as.numeric(data$Enzyme)+1, pch=19)
par(mfrow=c(1,2))
plot(data$Response~data$DetStock, ylab="Response", xlab="Detergent")
plot(data$Response~data$CaStock, ylab="Response", xlab="Hardness")
pairs(df, col=as.numeric(df$Enzyme)+1, pch=19)
par(mfrow=c(1,1))
plot(df$Response~df$Stock)
cols <- c("black","red", "blue", "green")
col_bg <- adjustcolor(cols, alpha = 0.2)
cols2 <- c("black","red", "blue", "green",6)
col_bg2 <- adjustcolor(cols2, alpha = 0.2)
cols3 <- c("black","red")
col_bg3 <- adjustcolor(cols3, alpha = 0.2)
par(mfrow=c(1,1))
# Response - Enzyme
boxplot(Response~Enzyme, data=data, xlab="Enzyme type", ylab="Protein removal (RU)",
col=col_bg2, medcol=cols2, whiskcol=cols2, staplecol=cols2, boxcol=cols2, outcol=cols2, outbg=cols2)
# Response - concentration
boxplot(Response~EnzymeConc, data=data, xlab="Enzyme Concentration log(nM)", ylab="Protein removal (RU)",
col=col_bg, medcol=cols, whiskcol=cols, staplecol=cols, boxcol=cols, outcol=cols,outbg=cols )
# Response - Enzyme - Concentration
par(mfrow = c(1,1))
b <- boxplot(Response ~ EnzymeConc + Enzyme, data = data, xaxt = "n", xlab="Enzyme type",
col= col_bg, medcol=cols, whiskcol=cols, staplecol=cols, boxcol=cols, outcol=cols,outbg=cols,
names =c("","","A","","","","B","","","","C","","","","D","","","","E",""))
axis(side= 1, at=seq_along(b$names), tick = FALSE, labels = b$names)
legend("topright",title="Enzyme concentration", legend = c(0, 2.5, 7.5, 15), fill =cols, horiz =TRUE, cex=0.8)
# Response - Stock
par(mfrow = c(1,1))
boxplot(Response~Stock , data=df, xlab="Conditions (Detergent and Ca combinations)", ylab="Protein removal (RU)",
col=col_bg3, medcol=cols3, whiskcol=cols3, staplecol=cols3, boxcol=cols3, outcol=cols3,outbg=cols3 )
#png(filename="Response per concentration.png", width=750, height=750)
par(mfrow=c(2,2))
for (i in y){
plot(data$Response[data$EnzymeConc==i], pch=as.numeric(data$DetStock[data$EnzymeConc==i])+14, col=as.numeric(data$Enzyme[data$EnzymeConc==i]), ylab="Response", xlab="Observations", main=paste("Enzyme concentration: ",exp(i)))
}
#dev.off()
# Model selection (1) ------------------------------------------------------
# Testing response given detergent and hardness for data
lm1a <- lm(data$Response~data$DetStock)
Anova(lm1a)
lm1b <- lm(data$Response~data$CaStock)
Anova(lm1b)
lm1c <- lm(data$Response~data$DetStock+data$CaStock)
Anova(lm1c)
lm1d <- lm(data$Response~data$DetStock*data$CaStock)
Anova(lm1d)
step(lm1d)
# Hardness doesn't seem to be significant, thus we keep increasing the complexity of the model without it by adding enzyme type
lm1e <- lm(data$Response~data$DetStock+data$Enzyme)
Anova(lm1e)
lm1f <- lm(data$Response~data$DetStock*data$Enzyme)
Anova(lm1f)
step(lm1f)
# Interaction between detergent and enzyme doesn't seem to be significant, add enzyme concentration
lm1g <- lm(data$Response~data$DetStock+data$Enzyme+data$EnzymeConc)
Anova(lm1g)
lm1h <- lm(data$Response~(data$DetStock+data$Enzyme)*data$EnzymeConc)
Anova(lm1h)
lm1i <- lm(data$Response~data$DetStock*(data$Enzyme+data$EnzymeConc))
Anova(lm1i)
drop1(lm1h, test="F")
drop1(lm1i, test="F")
BIC(lm1h, lm1i) # We prefer lm1h since BIC is better
AIC(lm1h, lm1i) # We prefer lm1h since AIC is better
# Full interactions without hardness
lm1j <- lm(data$Response~data$DetStock*data$Enzyme*data$EnzymeConc)
Anova(lm1j)
drop1(lm1j, test="F")
lm1k <- update(lm1j, ~.-data$DetStock:data$Enzyme:data$EnzymeConc)
Anova(lm1k)
drop1(lm1k, test="F")
step(lm1j,k=3.8) # We get the same model by backward selection, thus lm1k is our selected model for the complete dataframe
# Full interactions with hardness
lm1l <- lm(data$Response~data$Enzyme*data$EnzymeConc*data$DetStock*data$CaStock)
step(lm1l, k=3.8)
AIC(lm1a, lm1b, lm1c, lm1d, lm1e, lm1f, lm1g, lm1h, lm1i, lm1k, lm1l)
BIC(lm1a, lm1b, lm1c, lm1d, lm1e, lm1f, lm1g, lm1h, lm1i, lm1k, lm1l)
# Model Selection (2) -----------------------------------------------------
# Testing response given stock
lm2a <- lm(df$Response~df$Stock, df)
Anova(lm2a)
lm2b <- lm(df$Response~df$Enzyme, df)
Anova(lm2b)
# Testing response given stock and enzyme
lm2c <- lm(df$Response~df$Enzyme+df$Stock, df)
Anova(lm2c)
lm2d <- lm(df$Response~df$Enzyme*df$Stock, df)
Anova(lm2d)
step(lm2d)
# Testing response given stock, enzyme and enzyme concentration (with interactions)
lm2e <- lm(df$Response~df$Enzyme+df$Stock+df$EnzymeConc, df)
Anova(lm2e)
lm2f <- lm(df$Response~(df$Enzyme+df$Stock)*df$EnzymeConc, df)
Anova(lm2f)
lm2g <- lm(df$Response~df$Enzyme*(df$Stock+df$EnzymeConc), df)
Anova(lm2g)
drop1(lm2f, test="F")
drop1(lm2g, test="F")
BIC(lm2f, lm2g) # We prefer lm2f since BIC is better
AIC(lm2f, lm2g) # We prefer lm2f since AIC is better
lm2h <- lm(df$Response~df$Enzyme*df$Stock*df$EnzymeConc, df)
Anova(lm2h)
drop1(lm2h, test="F")
lm2i <- update(lm2h, ~.-df$Enzyme:df$Stock:df$EnzymeConc)
Anova(lm2i)
drop1(lm2i, test="F")
step(lm2h,k=2) # We get the same model by backward selection, thus lm2i is our selected model for the complete dataframe
AIC(lm1a, lm1b, lm1c, lm1d, lm1e, lm1f, lm1g, lm1h, lm1i, lm1k, lm1l, lm2a, lm2b, lm2c, lm2d, lm2e, lm2f, lm2g, lm2h, lm2i)
BIC(lm1a, lm1b, lm1c, lm1d, lm1e, lm1f, lm1g, lm1h, lm1i, lm1k, lm1l, lm2a, lm2b, lm2c, lm2d, lm2e, lm2f, lm2g, lm2h, lm2i)
AIC(lm1k, lm2i)
BIC(lm1k, lm2i)
# It seems that lm1k is better (without the hardness of the water)
# Outlier detection -------------------------------------------------------
par(mfrow=c(1,1))
qqPlot(lm1k)
data <- data[-c(147,159),]
# Testing the model -------------------------------------------------------
lm1k <- lm(data$Response~data$DetStock*data$Enzyme*data$EnzymeConc)
lm1k <- update(lm1k, ~.-data$DetStock:data$Enzyme:data$EnzymeConc)
Anova(lm1k)
summary(lm1k, correlation=TRUE)
#png(filename="LinearModel_Transformed.png", width=750, height=750)
par(mfrow=c(2,2))
plot(lm1k, col=as.numeric(data$Enzyme)+1, pch=19)
#dev.off()
|
c2c117b1c5de34a002a51b114cecf5664376d845
|
778958c1e9077cda75f9191068170e0cd6dc8143
|
/R/petemisc.R
|
b951783cc46b2d8d8067918e46983b19c6198b5c
|
[
"MIT"
] |
permissive
|
petestmarie/petemisc
|
fafab2a769962cc5e48314f7bf7c2f6443eb001a
|
1d991bc9e9f3b0cb2c2febbb0640114f3258b64c
|
refs/heads/master
| 2021-01-20T09:49:36.186511
| 2016-02-18T19:42:43
| 2016-02-18T19:42:43
| 51,448,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
petemisc.R
|
#' petemisc: A package of misc functions.
#'
#' @section petemisc functions:
#' \code{\link{fwr}} returns power and omega from Lui(2012).
#' \code{\link{age_years}} returns age in years from birthdata and second date.
#'
#' @docType package
#' @name petemisc
NULL
|
4ac689b49e91a944ed089f118712ec0828795b62
|
876f5f4de8ccb50bb67a28af9d731f8d94d9988e
|
/man/GO.Rd
|
c1abb6308ceb31832812851b2984925d57c8ba65
|
[] |
no_license
|
jarioksa/GO
|
f9e763b5e3b0e89ada51d588de71ff5f71040a0a
|
42e5f65902d48dfe702127a8b5ef5572405ab380
|
refs/heads/master
| 2016-09-16T15:07:10.117729
| 2015-08-12T08:23:28
| 2015-08-12T08:23:28
| 32,721,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,042
|
rd
|
GO.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/GO.R
\name{GO1}
\alias{GO}
\alias{GO1}
\alias{anova.GO}
\alias{calibrate.GO}
\alias{metaGO}
\alias{plot.GO}
\alias{predict.GO}
\alias{spanodev}
\title{Unconstrained Gaussian Maximum Likelihood Ordination}
\usage{
GO1(comm, tot = max(comm), freqlim = 5, parallel = 1, trace = TRUE, ...)
GO(comm, k = 1, tot = max(comm), freqlim = 5, family = c("poisson",
"binomial"), far = 10, init, trace = TRUE, ...)
metaGO(comm, k = 1, trymax = 3, firstOK = TRUE, trace = FALSE, ...)
\method{plot}{GO}(x, choices = 1, label = FALSE, marginal = FALSE,
cex = 0.7, col = 1:6, ...)
\method{anova}{GO}(object, ...)
spanodev(mod1, mod2 = NULL, ...)
\method{predict}{GO}(object, newdata, type = c("response", "link"), ...)
\method{calibrate}{GO}(object, newdata, ...)
}
\arguments{
\item{comm}{Community data frame.}
\item{tot}{Total abundance used in Binomial models. This can be
either a single value for all data, or a vector with value for each
row of \code{comm}. The default is to use the maximum value in
matrix.}
\item{freqlim}{Minimum number of occurrence for analysed species.}
\item{parallel}{Number of parallel processes.}
\item{trace}{logical; print information on the progress of the analysis.}
\item{k}{Number of estimated gradients (axes).}
\item{family}{Error distribution. Can be either \code{"poisson"}
for quasi-Poisson or \code{"binomial"} for quasi-Binomial (and must
be quoted).}
\item{far}{Threshold distance for species optima regarded as
alien and frozen in fitting.}
\item{init}{Initial configuration to start the iterations. This
should be a matrix, and number of rows should match the community
data, and number coluns the number of gradients (\code{k}). The
default is to use scores from \code{\link[vegan]{decorana}}.}
\item{trymax}{Maximum number of random starts.}
\item{firstOK}{Do not launch random starts if default start succeeds.}
\item{x}{Fitted model.}
\item{choices}{The axis or axes plotted.}
\item{label}{Label species responses.}
\item{marginal}{Plot marginal responses or slice through origin of
other dimensions.}
\item{cex}{Character size for \code{labels}.}
\item{col}{Colours of response curves.}
\item{object}{Ordination result object.}
\item{mod1,mod2}{Compared result objects}
\item{newdata}{New gradient locations to \code{predict} species
responses or new community data to \code{calibrate} gradient
locations.}
\item{type}{Predictions in the scale of responses or in the scale
of link function.}
\item{\dots}{Other parameters passed to functions. In \code{GO}
these are passed to \code{\link{nlm}} and can include, e.g.,
\code{iterlim} (which often must be set to higher value than the
default 100).}
}
\description{
The functions fit unconstrained maximum likelihood ordination with
unit-width Gaussian response models.
}
\details{
Function is under development and unreleased. It will be released
under different name in \pkg{vegan}. The current version is only
provided for review purposes. The function and its support functions
require \pkg{vegan}, although this requirements is not explicitly
made. The optimization is based on \code{\link{nlm}} function, and passes
arguments to this function.
Function \code{anova} can analyse two nested models or a single model
against null model of flat responses using parametric tests based on
quasi-Likelihood. Function \code{spanodev} performs similar test
split by species. Function \code{predict} returns estimated response
curves, and \code{newdata} can be gradient locations. Function
\code{calibrate} returns estimated gradient locations, and \code{newdata}
can be community data.
The \code{plot} function displays fitted respose curves against one
ordination axis. In principle, the ordination can be rotated using
\pkg{vegan} function \code{\link[vegan]{MDSrotate}}, but this requires
a version that agrees to analyse \code{GO} results. Traditional
ordination plots of SU scores and species optima can be displayed
with \code{\link[vegan]{ordiplot}} (\pkg{vegan} package). The function
is written so that several other \pkg{vegan} and standard \R functions
can handle results.
}
\section{Functions}{
\itemize{
\item \code{GO1}: Alternating estimation of species parameters and
gradient locations in one dimension.
\item \code{GO}: Simultaneous estimation of species parameters and
gradient locations.
\item \code{metaGO}: Start GO several times from random configurations if
default start fails or optionally always
\item \code{spanodev}: Comparison of goodness of fit for individual species.
}}
\examples{
library(vegan) ## *must* be given before using the function
data(varespec)
mod <- GO(varespec, k=2, far=5, tot=100, family="binomial", iterlim=1000)
plot(mod, label=TRUE)
ordiplot(mod, type="t")
ordiplot(mod, dis="si", type="t")
anova(mod)
mod1 <- update(mod, k=1)
anova(mod1, mod)
spanodev(mod1)
spanodev(mod1, mod)
}
\author{
Jari Oksanen
}
\seealso{
\code{\link[VGAM]{cgo}} in \pkg{VGAM} package.
}
|
ef0b667b4fdf2a82dc56d70e1aebf06d71bcad61
|
dcfa1e9cb6aa24d93f566b42fcdee3d5542bff1c
|
/man/list_stratification_values.Rd
|
4c38a46622a9441b936a4eb7655603a3dee299c8
|
[
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-public-domain"
] |
permissive
|
WayneBogart/EPHTrackR
|
ab9b0b62c627dc72ead6ac0a8aebb830d1df7508
|
abc0234566071613abb6bfc202e85959bc69b2d1
|
refs/heads/master
| 2023-05-16T06:15:08.836021
| 2021-05-28T13:54:30
| 2021-05-28T13:54:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,680
|
rd
|
list_stratification_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_stratification_values.R
\name{list_stratification_values}
\alias{list_stratification_values}
\title{List stratification values}
\usage{
list_stratification_values(
measure = NA,
geo_type = NA,
geo_type_ID = NA,
format = "ID",
smoothing = 0
)
}
\arguments{
\item{measure}{Specifies the measure of interest as an ID, name, or shortName. IDs should be unquoted; name and shortName entries should be quoted strings.}
\item{geo_type}{An optional argument in which you can specify a geographic type as a quoted string (e.g., "State", "County"). The "geographicType" column in the list_geography_types() output contains a list of geo_types associated with each measure.}
\item{geo_type_ID}{An optional argument in which you can specify a geographic type ID as an unquoted numeric value (e.g., 1, 2). The "geographicTypeId" column in the list_geography_types() output contains a list of geo_types associated with each measure.}
\item{format}{Indicates whether the measure argument contains entries formatted as an ID, name, or shortName as a quoted string (e.g., "name", "shortName"). The default is ID.}
\item{smoothing}{Specifies whether to return stratification values for geographically smoothed versions of a measure (1) or not (0). The default value is 0 because smoothing is not available for most measures.}
}
\value{
This function returns a list with each element containing a data frame corresponding to all combinations of specified measures and geographic types. Within each row of the data frame is a nested data frame containing the stratification values. If the specified measure and associated geography type do not have any "Advanced Options" stratifications, the returned list element will be empty.
}
\description{
Some measures on the Tracking Network have a set of "Advanced Options" that allow the user to access data stratified by variables other than geography or temporal period. For instance, data on asthma hospitalizations can be broken down further by age and/or gender. This function allows the user to list available "Advanced Options" stratification values for specified measures and geographic types. For instance, in the case of the asthma hospitalization data, it would be possible to view the potential gender (e.g., Male, Female), and age (e.g., 0–4 years, >=65 years) values that are available.
The user should not need this function to retrieve data from the Tracking Network Data API because the get_data() function calls it internally. It can, however, be used as a reference to view available stratification values.
}
\examples{
\dontrun{
}
}
|
70c6deffdbdfecf2b54e99dc4bb37bafd97eacb4
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/sourcecode/pani-depth.R
|
b7c7ef3f6fda7733176df7f4a62ef05c8d4b0b2d
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,506
|
r
|
pani-depth.R
|
#
# pani-depth.R, 13 Jan 19
# Data from:
# Loop Patterns in C Programs
# Thomas Pani
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG loop basic-block_depth source-code_loop C_loop
source("ESEUR_config.r")
plot_layout(2, 1)
pal_col=rainbow(2)
depth=read.csv(paste0(ESEUR_dir, "sourcecode/pani-depth.csv.xz"), as.is=TRUE)
depth=subset(depth, (num > 0) & (bound > 0))
cbench=subset(depth, project == "cBench")
CU=subset(depth, project == "coreutils")
# plot(cbench$num, cbench$bound, log="y", col=pal_col[1],
# xlim=c(0, 30),
# xlab="Max nesting", ylab="Loops")
fit_expo=function(df)
{
plot(df$num, df$bound, log="y", col=pal_col[2],
xlab="Basic block depth", ylab="Loops\n")
# Fit nesting to depth 10, assuming that after that screen width has an impact
cb_mod=glm(log(bound) ~ num, data=df, subset=(num > 2))
summary(cb_mod)
pred=predict(cb_mod)
lines(df$num[3:25], exp(pred), col=pal_col[1])
return(cb_mod)
}
cb_mod=fit_expo(cbench[1:25, ])
# CU_mod=fit_expo(CU[1:20, ])
fit_power=function(df)
{
plot(df$num, df$bound, log="xy", col=pal_col[2],
xlab="Basic block depth", ylab="Loops\n")
# Fit nesting to depth 10, assuming that after that screen width has an impact
cb_mod=glm(log(bound) ~ log(num), data=df, subset=(num < 11))
summary(cb_mod)
pred=predict(cb_mod)
lines(df$num[1:10], exp(pred), col=pal_col[1])
return(cb_mod)
}
cb_mod=fit_power(cbench[1:25, ])
# CU_mod=fit_power(CU[1:20, ])
|
9b341f7574dd2e9e15038c5b8c57e3b38550c64c
|
cf586320910e8f27fe7e9dbe70c5909d550c3d44
|
/1_Intro_to_Shiny_materials/templates/server_function_rule_1.R
|
00041aa14994c5c19fab9b70854051ba2ba552b1
|
[] |
no_license
|
narensahu13/Shiny-Materials
|
bb757f94e06fe83c466dc169e629a36cbbdf4e87
|
b0ad1d3428126b8af2681446e8ea085eaba619e1
|
refs/heads/master
| 2020-05-16T13:03:32.929982
| 2019-04-23T17:28:05
| 2019-04-23T17:28:05
| 183,063,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
server_function_rule_1.R
|
server <- function(input, output) {
output$hist <- # some code
}
|
6d69ebc31931e1e6153e374b36e308b3fd9d0167
|
337fa3213115b40d7fdd534cc993940762499d1e
|
/plotting_functions/plotTrend.R
|
b9cd5c571ae89247354a89ba8b43a16369317b7e
|
[] |
no_license
|
bjonesPew/r-functions
|
cda613d4f68160cbceccf4a2d55e7f75f5284c2c
|
cf9b0ca9f8f7f455cf3cfed1fcb4e5c8f13d8859
|
refs/heads/master
| 2023-07-26T04:41:56.697478
| 2023-07-17T21:41:51
| 2023-07-17T21:41:51
| 189,678,193
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,805
|
r
|
plotTrend.R
|
plotTrend <- function(to.plot,
xlim,
ylim,
x = NULL,
axis.control = NULL,
col.vec = NULL,
ord = NULL,
plot.width = 3.2,
plot.height = 2,
labels = NULL,
lab.pos = NULL,
hollow = TRUE,
res = 1,
write.file = "no",
addPoints = TRUE,
xadj = c(0,0),
lwd = 2.5,
lty = 1,
val.lab.adj = NULL,
point.size = 1.2,
lab.points = NULL,
lab.points.pos = NULL,
add.lines = NULL,
add.scatter = NULL,
add.legend = NULL,
text.labels.list = NULL,
add.value.labels = TRUE,
add.all.value.labels = NULL) {
##open the plot window (multiplied by the resolution factor)
dev.new(width=plot.width*res, height=plot.height*res)
##Re order value label adjustment columns
if (!is.null(val.lab.adj)) val.lab.adj <-
val.lab.adj[,ncol(val.lab.adj):1]
##save to file
if (write.file != "no") {
if (write.file == "jpg") {
src <- tempfile(fileext = ".jpg")
jpeg(src, width = plot.width, height = plot.height,
units = 'in', res = 1000)
}
if (write.file == "pdf") {
src <- tempfile(fileext = ".pdf")
pdf(src, width = plot.width, height = plot.height)
}
}
if (is.null(ord)) ord <- 1:ncol(to.plot)
##Reorder the data if it is in the wrong order
sort <- order(x, decreasing = TRUE)
x <- x[sort]
to.plot <- to.plot[sort,]
##Open plot window
par(mar = c(1.7,.1,.1,.1))
fam <- ifelse(write.file == "pdf", "", "Franklin Gothic Book")
plot(0,0, pch = '', xlim = xlim+xadj, ylim = ylim,
axes = FALSE, xlab = '', ylab = '',
family = fam)
##print axis
if (is.null(axis.control)) axis(1, family = fam)
if (!is.null(axis.control)) {
axis(1, at=xlim, labels=c("",""),
col = grey(.4), lwd.ticks=0)
axis(1, at = axis.control[['at']], labels = NA,
cex.axis = .75, col = grey(.4), tcl = -.4,
tck = -.02)
axis(1, at = axis.control[['at']], family = fam,
labels = axis.control[['labels']],
cex.axis = .75, lwd = 0, line = -.7)
}
if (length(lwd) != ncol(to.plot)) lwds <- rep(lwd, ncol(to.plot))
if (length(lwd) == ncol(to.plot)) lwds <- lwd
if (is.null(val.lab.adj)) val.lab.adj <- array(0, c(length(ord), 2))
if ( length(hollow) == 1) hollow = rep(hollow, nrow(to.plot))
if (length(lty) == 1) lty = rep(lty, nrow(to.plot))
if (is.null(lab.points)) {
lab.points = c(1, nrow(to.plot))
lab.points.pos = c(4, 2)
}
##Add background elements
if (!is.null(add.lines)) {
abline(v = add.lines, col = 'grey')
}
if (!is.null(add.scatter)) {
scatter_col <- ifelse(is.null(add.scatter$col), "grey", add.scatter$col)
scatter_cex <- ifelse(is.null(add.scatter$cex), 1, add.scatter$cex)
scatter_pch <- ifelse(is.null(add.scatter$pch), 20, add.scatter$pch)
points(add.scatter$coords[,1],
add.scatter$coords[,2],
col = scatter_col, cex = scatter_cex,
pch = scatter_pch)
}
for (j in ord) {
fam <- ifelse(write.file == "pdf", "", "Franklin Gothic Demi")
##Add trend lines
for (k in 2:length(to.plot[,j])) {
segments(y0 = to.plot[k,j],
y1 = to.plot[k-1,j],
x0 = x[k], x1 = x[k-1],
col = col.vec[j],
lty = lty[k-1],
lwd = lwds[j])
}
##Add value labels
if (add.value.labels) {
for (k in 1:length(lab.points)) {
lp <- lab.points[k]
text(x[lp], to.plot[lp,j]+val.lab.adj[j,k],
round(abs(to.plot[lp,j])), family = fam,
col = col.vec[j], pos = lab.points.pos[k], cex = .75)
}
}
if (!is.null(add.all.value.labels)) {
for (k in 1:length(x)) {
for (i in 1:length(add.all.value.labels)) {
text(x[k], to.plot[k,i], round(abs(to.plot[k,i])),
family = fam, col = col.vec[i], pos = add.all.value.labels[i],
cex = .75)
}
}
}
if (addPoints) {
for (k in 1:nrow(to.plot)) {
if (length(point.size) == 1) ptsz = point.size
if (length(point.size) > 1) ptsz = point.size[j]
addDot(x[k], to.plot[k,j], col = col.vec[j],
hollow = hollow[k], cex = ptsz)
}
}
}
if (length(lab.pos)>0) {
for (j in 1:length(lab.pos)) {
fam <- ifelse(write.file == "pdf", "", "Franklin Gothic Demi")
pos <- lab.pos[[j]]
text(pos[1], pos[2], labels[j],
family = fam,
col = col.vec[j],
cex = .75)
}
}
if (!is.null(add.legend)) {
leg <- sort(ord)
fam <- ifelse(write.file == "pdf", "", "Franklin Gothic Book")
op <- par(family = fam)
legend(x = sum(xlim/2), y = ylim[2], xjust = .5,
add.legend$labels, horiz = TRUE,
lwd = lwd[leg], col = col.vec[leg], bty = 'n',
cex = .75)
}
if (!is.null(text.labels.list)) {
for (j in 1:length(text.labels.list)) {
txt <- text.labels.list[[j]]
text(txt$x, txt$y, txt$text,
family = txt$fam,
col = txt$col,
cex = .75)
}
}
if (write.file != "no") {
dev.off()
dev.off()
return(src)
}
}
|
522668cd3fc079396d8dc40f5631a4b0cbd1c57f
|
9e488c9abaec0dce8c37fa99301293ed37e26042
|
/Script for Github.R
|
32390c17fd7bc9adf7c58652063584d771015b3e
|
[] |
no_license
|
jckridenour/Forecast
|
e14b4ce53f26e85e10dd814ea734af34a4156ead
|
798bf2a2fac25334d44adfb6e4c4179452b5e01a
|
refs/heads/master
| 2021-01-11T07:05:56.124239
| 2016-10-25T16:17:53
| 2016-10-25T16:17:53
| 71,915,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,664
|
r
|
Script for Github.R
|
rm(list=ls())
##### Gross Domestic Product #####
setwd("C:/Users/Joshua/Documents/Data/Election Data/My Data")
load("The Dataset.RData")
load("gdp.RData")
gdp<-gdp$gdpgr
reppct<-d1$reppct
summary(lm(reppct~gdp))
y = .5099323 + (-.0000811*(2.43))
y
# 0.51
##### Index of Consumer Sentiment #####
rm(list=ls())
#setwd("C:/Users/Joshua/Documents/Data/Election Data/My Data")
load("The Dataset.RData")
newdata<-d1[-c(1:4),]
ics<-c(99.6, 94.2, 93.2, 86, 64.5, 97.5, 93.7, 77.3, 93.6, 107.6, 95.2, 63.7, 76.6)
newdata$ics
summary(lm(reppct~ics, data=newdata))
y = .5069529 + .0002481*(91.4-100)
y
# 0.505
##### Unemployment Change #####
rm(list=ls())
load("empch.RData")
summary(lm(reppct~empch, data=empmodel))
y = .49995 + -.00154*(4.9-8.2)
y
# 0.505
##### International Conflicts #####
rm(list=ls())
setwd("C:/Users/Joshua/Documents/Data/Election Data/PRIO")
load("prio.RData")
conf<-prio
rm(prio)
conf$conflict<-rep(1, length(conf[,1]))
conf44<-sum(conf$conflict[conf$Year==1944])
conf45<-sum(conf$conflict[conf$Year==1945])
conf46<-sum(conf$conflict[conf$Year==1946])
conf47<-sum(conf$conflict[conf$Year==1947])
conf48<-sum(conf$conflict[conf$Year==1948])
conf49<-sum(conf$conflict[conf$Year==1949])
conf50<-sum(conf$conflict[conf$Year==1950])
conf51<-sum(conf$conflict[conf$Year==1951])
conf52<-sum(conf$conflict[conf$Year==1952])
conf53<-sum(conf$conflict[conf$Year==1953])
conf54<-sum(conf$conflict[conf$Year==1954])
conf55<-sum(conf$conflict[conf$Year==1955])
conf56<-sum(conf$conflict[conf$Year==1956])
conf57<-sum(conf$conflict[conf$Year==1957])
conf58<-sum(conf$conflict[conf$Year==1958])
conf59<-sum(conf$conflict[conf$Year==1959])
conf60<-sum(conf$conflict[conf$Year==1960])
conf61<-sum(conf$conflict[conf$Year==1961])
conf62<-sum(conf$conflict[conf$Year==1962])
conf63<-sum(conf$conflict[conf$Year==1963])
conf64<-sum(conf$conflict[conf$Year==1964])
conf65<-sum(conf$conflict[conf$Year==1965])
conf66<-sum(conf$conflict[conf$Year==1966])
conf67<-sum(conf$conflict[conf$Year==1967])
conf68<-sum(conf$conflict[conf$Year==1968])
conf69<-sum(conf$conflict[conf$Year==1969])
conf70<-sum(conf$conflict[conf$Year==1970])
conf71<-sum(conf$conflict[conf$Year==1971])
conf72<-sum(conf$conflict[conf$Year==1972])
conf73<-sum(conf$conflict[conf$Year==1973])
conf74<-sum(conf$conflict[conf$Year==1974])
conf75<-sum(conf$conflict[conf$Year==1975])
conf76<-sum(conf$conflict[conf$Year==1976])
conf77<-sum(conf$conflict[conf$Year==1977])
conf78<-sum(conf$conflict[conf$Year==1978])
conf79<-sum(conf$conflict[conf$Year==1979])
conf80<-sum(conf$conflict[conf$Year==1980])
conf81<-sum(conf$conflict[conf$Year==1981])
conf82<-sum(conf$conflict[conf$Year==1982])
conf83<-sum(conf$conflict[conf$Year==1983])
conf84<-sum(conf$conflict[conf$Year==1984])
conf85<-sum(conf$conflict[conf$Year==1985])
conf86<-sum(conf$conflict[conf$Year==1986])
conf87<-sum(conf$conflict[conf$Year==1987])
conf88<-sum(conf$conflict[conf$Year==1988])
conf89<-sum(conf$conflict[conf$Year==1989])
conf90<-sum(conf$conflict[conf$Year==1990])
conf91<-sum(conf$conflict[conf$Year==1991])
conf92<-sum(conf$conflict[conf$Year==1992])
conf93<-sum(conf$conflict[conf$Year==1993])
conf94<-sum(conf$conflict[conf$Year==1994])
conf95<-sum(conf$conflict[conf$Year==1995])
conf96<-sum(conf$conflict[conf$Year==1996])
conf97<-sum(conf$conflict[conf$Year==1997])
conf98<-sum(conf$conflict[conf$Year==1998])
conf99<-sum(conf$conflict[conf$Year==1999])
conf00<-sum(conf$conflict[conf$Year==2000])
conf01<-sum(conf$conflict[conf$Year==2001])
conf02<-sum(conf$conflict[conf$Year==2002])
conf03<-sum(conf$conflict[conf$Year==2003])
conf04<-sum(conf$conflict[conf$Year==2004])
conf05<-sum(conf$conflict[conf$Year==2005])
conf06<-sum(conf$conflict[conf$Year==2006])
conf07<-sum(conf$conflict[conf$Year==2007])
conf08<-sum(conf$conflict[conf$Year==2008])
conf09<-sum(conf$conflict[conf$Year==2009])
conf10<-sum(conf$conflict[conf$Year==2010])
conf11<-sum(conf$conflict[conf$Year==2011])
conf12<-sum(conf$conflict[conf$Year==2012])
conf13<-sum(conf$conflict[conf$Year==2013])
conf14<-sum(conf$conflict[conf$Year==2014])
conf15<-sum(conf$conflict[conf$Year==2015])
allpre<-c((conf44+conf45+conf46+conf47), (conf48+conf49+conf50+conf51),
(conf52+conf53+conf54+conf55), (conf56+conf57+conf58+conf59),
(conf60+conf61+conf62+conf63), (conf64+conf65+conf66+conf67),
(conf68+conf69+conf70+conf71), (conf72+conf73+conf74+conf75),
(conf76+conf77+conf78+conf79), (conf80+conf81+conf82+conf83),
(conf84+conf85+conf86+conf87), (conf88+conf89+conf90+conf91),
(conf92+conf93+conf94+conf95), (conf96+conf97+conf98+conf99),
(conf00+conf01+conf02+conf03), (conf04+conf05+conf06+conf07),
(conf08+conf09+conf10+conf11))
latest<-conf12+conf13+conf14+conf15
setwd("C:/Users/Joshua/Documents/Data/Election Data/My Data")
load("The Dataset.RData")
reppct<-d1$reppct
summary(lm(reppct~allpre))
y = .5080 + 0.00001056*(160)
y
# 0.51
##### Voting Eligible Population and Turnout #####
## RECODING PORTION -- DO NOT USE FOR ANALYSIS ##
rm(list=ls())
setwd("C:/Users/Joshua/Dropbox/University of Arizona/2016-2017/Fall 2016/POL 683 - Methods/Midterm Project/Newest")
load("election data since 1980.RData")
mod_vepto<-c(.542, .552, .528, .581, .517, .542, .601, .616, .580) #US Elections Project
mod_vep<-c(159635102, 167701904, 173579281, 179655523, 186347044, 194331436, 203483455, 213313508, 222474111) # US Elections Project
plot(data_mod$year, data_mod$vepto, xlab="Election Year", ylab="VEP Turnout", ylim=c(.4, .7))
lines(data_mod$year, data_mod$vepto)
data_mod$veptoch<-NA
for(i in 2:9){
data_mod[i,7]<-data_mod[i,5] - data_mod[i-1,5]
}
data_mod[1,7]<- data_mod[1,5]-.548
veptoch16<-mean(data_mod$veptoch)
vepto16<-data_mod[9,5] - veptoch16
data_mod$vepch<-NA
for(i in 2:9){
data_mod[i,8]<-data_mod[i,6] - data_mod[i-1,6]
}
data_mod[1,8] <- 7854876
vep16<-data_mod[9,6] + mean(data_mod$vepch)
## Analysis Component -- START HERE FOR ANALYSIS ##
rm(list=ls())
setwd("C:/Users/Joshua/Dropbox/University of Arizona/2016-2017/Fall 2016/POL 683 - Methods/Midterm Project/Data")
load("election data since 1980.RData")
veptoch16<-mean(data_mod$veptoch)
vepto16<-data_mod[9,5] - veptoch16
vep16<-data_mod[9,6] + mean(data_mod$vepch)
reg<-lm(reppct~vep*vepto, data=data_mod)
y = reg$coefficients[1] + (vep16*reg$coefficients[2]) + (vepto16*reg$coefficients[3]) +(vep16*vepto16*reg$coefficients[4])
y
# 0.441
##### Further Testing of the Model #####
d1<-data_mod[-1,]
d2<-data_mod[-2,]
d3<-data_mod[-3,]
d4<-data_mod[-4,]
d5<-data_mod[-5,]
d6<-data_mod[-6,]
d7<-data_mod[-7,]
d8<-data_mod[-8,]
d9<-data_mod[-9,]
reg1<-lm(reppct~vep*vepto, data=d1)
reg2<-lm(reppct~vep*vepto, data=d2)
reg3<-lm(reppct~vep*vepto, data=d3)
reg4<-lm(reppct~vep*vepto, data=d4)
reg5<-lm(reppct~vep*vepto, data=d5)
reg6<-lm(reppct~vep*vepto, data=d6)
reg7<-lm(reppct~vep*vepto, data=d7)
reg8<-lm(reppct~vep*vepto, data=d8)
reg9<-lm(reppct~vep*vepto, data=d9)
a1<-reg1$coefficients[1]
a2<-reg2$coefficients[1]
a3<-reg3$coefficients[1]
a4<-reg4$coefficients[1]
a5<-reg5$coefficients[1]
a6<-reg6$coefficients[1]
a7<-reg7$coefficients[1]
a8<-reg8$coefficients[1]
a9<-reg9$coefficients[1]
bvep1<-reg1$coefficients[2]
bvep2<-reg2$coefficients[2]
bvep3<-reg3$coefficients[2]
bvep4<-reg4$coefficients[2]
bvep5<-reg5$coefficients[2]
bvep6<-reg6$coefficients[2]
bvep7<-reg7$coefficients[2]
bvep8<-reg8$coefficients[2]
bvep9<-reg9$coefficients[2]
bto1<-reg1$coefficients[3]
bto2<-reg2$coefficients[3]
bto3<-reg3$coefficients[3]
bto4<-reg4$coefficients[3]
bto5<-reg5$coefficients[3]
bto6<-reg6$coefficients[3]
bto7<-reg7$coefficients[3]
bto8<-reg8$coefficients[3]
bto9<-reg9$coefficients[3]
bint1<-reg1$coefficients[4]
bint2<-reg2$coefficients[4]
bint3<-reg3$coefficients[4]
bint4<-reg4$coefficients[4]
bint5<-reg5$coefficients[4]
bint6<-reg6$coefficients[4]
bint7<-reg7$coefficients[4]
bint8<-reg8$coefficients[4]
bint9<-reg9$coefficients[4]
### Predictions for each election ###
y80<- (a1 + (bvep1*data_mod[1,6]) + (bto1*data_mod[1,5]) + (bint1*data_mod[1,6]*data_mod[1,5]))
y84<- (a2 + (bvep2*data_mod[2,6]) + (bto2*data_mod[2,5]) + (bint2*data_mod[2,6]*data_mod[2,5]))
y88<- (a3 + (bvep3*data_mod[3,6]) + (bto3*data_mod[3,5]) + (bint3*data_mod[3,6]*data_mod[3,5]))
y92<- (a4 + (bvep4*data_mod[4,6]) + (bto4*data_mod[4,5]) + (bint4*data_mod[4,6]*data_mod[4,5]))
y96<- (a5 + (bvep5*data_mod[5,6]) + (bto5*data_mod[5,5]) + (bint5*data_mod[5,6]*data_mod[5,5]))
y00<- (a6 + (bvep6*data_mod[6,6]) + (bto6*data_mod[6,5]) + (bint6*data_mod[6,6]*data_mod[6,5]))
y04<- (a7 + (bvep7*data_mod[7,6]) + (bto7*data_mod[7,5]) + (bint7*data_mod[7,6]*data_mod[7,5]))
y08<- (a8 + (bvep8*data_mod[8,6]) + (bto8*data_mod[8,5]) + (bint8*data_mod[8,6]*data_mod[8,5]))
y12<- (a9 + (bvep9*data_mod[9,6]) + (bto9*data_mod[9,5]) + (bint9*data_mod[9,6]*data_mod[9,5]))
diff80<-abs(y80 - data_mod[1,2])
diff84<-abs(y84 - data_mod[2,2])
diff88<-abs(y88 - data_mod[3,2])
diff92<-abs(y92 - data_mod[4,2])
diff96<-abs(y96 - data_mod[5,2])
diff00<-abs(y00 - data_mod[6,2])
diff04<-abs(y04 - data_mod[7,2])
diff08<-abs(y08 - data_mod[8,2])
diff12<-abs(y12 - data_mod[9,2])
diff<-mean(diff80, diff84, diff88, diff92, diff96, diff00, diff04, diff08, diff12)
# 0.013354286, average absolute difference from prediction to observation using n - 1 regressions on n - 1 datasets
a = reg$coefficients[1]
bvep<-reg$coefficients[2]
bto<-reg$coefficients[3]
bint<-reg$coefficients[4]
p80<-a + bvep*(data_mod[1,6]) + bto*(data_mod[1,5]) + (bint*data_mod[1,6]*data_mod[1,5])
p84<-a + bvep*(data_mod[2,6]) + bto*(data_mod[2,5]) + (bint*data_mod[2,6]*data_mod[2,5])
p88<-a + bvep*(data_mod[3,6]) + bto*(data_mod[3,5]) + (bint*data_mod[3,6]*data_mod[3,5])
p92<-a + bvep*(data_mod[4,6]) + bto*(data_mod[4,5]) + (bint*data_mod[4,6]*data_mod[4,5])
p96<-a + bvep*(data_mod[5,6]) + bto*(data_mod[5,5]) + (bint*data_mod[5,6]*data_mod[5,5])
p00<-a + bvep*(data_mod[6,6]) + bto*(data_mod[6,5]) + (bint*data_mod[6,6]*data_mod[6,5])
p04<-a + bvep*(data_mod[7,6]) + bto*(data_mod[7,5]) + (bint*data_mod[7,6]*data_mod[7,5])
p08<-a + bvep*(data_mod[8,6]) + bto*(data_mod[8,5]) + (bint*data_mod[8,6]*data_mod[8,5])
p12<-a + bvep*(data_mod[9,6]) + bto*(data_mod[9,5]) + (bint*data_mod[9,6]*data_mod[9,5])
preds<-cbind(p80, p84, p88, p92, p96, p00, p04, p08, p12)
cdiff80<-abs(p80-data_mod[1,2])
cdiff84<-abs(p84-data_mod[2,2])
cdiff88<-abs(p88-data_mod[3,2])
cdiff92<-abs(p92-data_mod[4,2])
cdiff96<-abs(p96-data_mod[5,2])
cdiff00<-abs(p00-data_mod[6,2])
cdiff04<-abs(p04-data_mod[7,2])
cdiff08<-abs(p08-data_mod[8,2])
cdiff12<-abs(p12-data_mod[9,2])
avgcdiff<-mean(cdiff80, cdiff84, cdiff88, cdiff92, cdiff96, cdiff96, cdiff00, cdiff04, cdiff08, cdiff12)
# 0.006917939, Average distance between predicted and observed using "Good Model" for each n - 1 dataset
min(cdiff80, cdiff84, cdiff88, cdiff92, cdiff96, cdiff00, cdiff04, cdiff08, cdiff12)
# min: .006480969
# max: .05473805
a_all<-mean(a1, a2, a3, a4, a5, a6, a7, a8, a9)
bvep_all<-mean(bvep1, bvep2, bvep3, bvep4, bvep5, bvep6, bvep7, bvep8, bvep9)
bto_all<-mean(bto1, bto2, bto3, bto4, bto5, bto6, bto7, bto8, bto9)
bint_all<-mean(bint1, bint2, bint3, bint4, bint5, bint6, bint7, bint8, bint9)
all_1<-a_all + (bvep_all*data_mod[1,6]) + (bto_all*data_mod[1,5]) + (bint_all*data_mod[1,6]*data_mod[1,5])
all_2<-a_all + (bvep_all*data_mod[2,6]) + (bto_all*data_mod[2,5]) + (bint_all*data_mod[2,6]*data_mod[2,5])
all_3<-a_all + (bvep_all*data_mod[3,6]) + (bto_all*data_mod[3,5]) + (bint_all*data_mod[3,6]*data_mod[3,5])
all_4<-a_all + (bvep_all*data_mod[4,6]) + (bto_all*data_mod[4,5]) + (bint_all*data_mod[4,6]*data_mod[4,5])
all_5<-a_all + (bvep_all*data_mod[5,6]) + (bto_all*data_mod[5,5]) + (bint_all*data_mod[5,6]*data_mod[5,5])
all_6<-a_all + (bvep_all*data_mod[6,6]) + (bto_all*data_mod[6,5]) + (bint_all*data_mod[6,6]*data_mod[6,5])
all_7<-a_all + (bvep_all*data_mod[7,6]) + (bto_all*data_mod[7,5]) + (bint_all*data_mod[7,6]*data_mod[7,5])
all_8<-a_all + (bvep_all*data_mod[8,6]) + (bto_all*data_mod[8,5]) + (bint_all*data_mod[8,6]*data_mod[8,5])
all_9<-a_all + (bvep_all*data_mod[9,6]) + (bto_all*data_mod[9,5]) + (bint_all*data_mod[9,6]*data_mod[9,5])
all_df80<-abs(all_1 - data_mod[1,2])
all_df84<-abs(all_2 - data_mod[2,2])
all_df88<-abs(all_3 - data_mod[3,2])
all_df92<-abs(all_4 - data_mod[4,2])
all_df96<-abs(all_5 - data_mod[5,2])
all_df00<-abs(all_6 - data_mod[6,2])
all_df04<-abs(all_7 - data_mod[7,2])
all_df08<-abs(all_8 - data_mod[8,2])
all_df12<-abs(all_9 - data_mod[9,2])
all_diffavg<-mean(all_df80, all_df84, all_df88, all_df92, all_df96, all_df00, all_df04, all_df08, all_df12)
# 0.01354286, Average difference between predicted and observed using the Averaged n - 1 regression coefficients
diff<-abs(data_mod$reppct - reg$fitted.values)
# min: .006, max: .05
##### Graphics Portion #####
plot(data_mod$year, data_mod$reppct, pch=19, cex=1, main="Republican Share of the Two-Party Vote", ylab="Proportion", xlab="Year", ylim=c(.4, .7), xlim=c(1980, 2016))
points(data_mod$year, reg$fitted.values, col="purple", pch=15)
lines(data_mod$year, data_mod$reppct, lty=1)
lines(data_mod$year, reg$fitted.values, col="purple", lty=2)
abline(.5, 0, col="dark green")
points(2016, .4406698, pch=18, col="blue", cex=2)
text(2013, .42, labels="2016: .44", col="blue", cex=1.1)
|
9971a171236d8443645d8da735c3bd7a9b2659e3
|
e4f02406173e1a20ab179a22839cb8307d3a81d9
|
/data-sci/coursera/getdata/011/quizz1/q4_xml.R
|
424772f4efd518ee956a3d9941991485eeb07559
|
[] |
no_license
|
Mr-Nancy/edu
|
c5d9cb51bf9f099ba9171ba9fd1c7a0e21a8989b
|
81b1e94633d1dba622dedd91dcdcecd8be52a9bd
|
refs/heads/master
| 2021-01-10T22:08:02.784141
| 2015-06-23T17:11:25
| 2015-06-23T17:11:25
| 30,701,679
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 538
|
r
|
q4_xml.R
|
getDataFromXML <- function() {
library(XML)
fileUrl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
doc <- xmlTreeParse(fileUrl, useInternal=TRUE)
doc <- xmlInternalTreeParse(fileUrl)
rootNode <- xmlRoot(doc)
xmlName(rootNode)
names(rootNode)
rootNode[[1]][[1]]
zipcode <- xpathSApply(rootNode, "//zipcode", xmlValue)
table(zipcode == 21231)
# FALSE TRUE
# 1200 127
# XPath
#xpathSApply(rootNode, "//name", xmlValue)
#xpathSApply(rootNode, "//price", xmlValue)
}
|
35fea6bc4be92fb3bf12483bea906661d82c8f88
|
27c7ebc4fa79ebf598a5aa1c9fed166fed46e33b
|
/viz/3_external_markers_elements.R
|
ff27b993b4a160f5dbf82807892956e38c007e2b
|
[] |
no_license
|
millersan/scrna_seurat_pipeline
|
22e8c1e3fe5892c6d5b642641e242253779763e4
|
d3942c13ced780a8d32e4657dc1381bc86ffcc04
|
refs/heads/master
| 2023-04-29T19:11:11.828041
| 2021-05-09T22:54:55
| 2021-05-09T22:54:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,767
|
r
|
3_external_markers_elements.R
|
external_markers_elements <- function(scrna){
len <- length(scrna@tools$parameter)
assertthat::assert_that(
basename(scrna@tools$parameter[[len]]['external_file']) == basename(ext_annot_fp)
)
ORGAN = scrna@tools$parameter[[len]]['organ']
SPECIES = scrna@tools$parameter[[len]]['species']
df <- read.csv(file=ext_annot_fp, sep="\t", stringsAsFactors=F)
df <- df[!apply(df, 1, function(x) all(x=="")), ]
if(!(ORGAN %in% df$Tissue.of.Origin)){
stop("exit 1 no ORGAN")
}
if(!(SPECIES %in% c("Human", "Mouse"))){
stop("exit 1 NO SPECIES")
}
mdf = df[df$Tissue.of.Origin == ORGAN, c(glue("{SPECIES}.Gene"), "Cell.Type")]
celltype_names <- unique(mdf$Cell.Type)
saveRDS(
celltype_names,
file = file.path(report_tables_folder, "ext_annot_celltype_names.RDS")
)
## External markers
message(paste0("### ","External markers"))
Idents(scrna) <- cluster
o_genes <- rownames(scrna)
for (a_celltype in celltype_names){
genes <- mdf[mdf$Cell.Type==a_celltype,glue::glue("{SPECIES}.Gene") ]
message(paste0("### ",a_celltype))
genes <- intersect(genes, o_genes)
if (length(genes) == 0){
next
}
col_def = c(base_color,pos_color)
for (i in seq(1, length(genes), by=4)){
ni = min(i+3, length(genes))
p1 <- FeaturePlot(
object = scrna,
pt.size=0.01,
label=T,
label.size=2,
features = genes[i:ni],
reduction = "DEFAULT_UMAP",
order = T,
# cols = c("lightgrey", "red"),
cols = col_def,
ncol = 2,
max.cutoff = 'q95'
)
save_ggplot_formats(
plt=p1,
base_plot_dir=report_plots_folder,
plt_name=paste0("extmarkers_inte_umap_featureplot_",a_celltype,"-genes_",i,"-to-",ni),
width=9, height=7
)
}
p2 <- DotPlot(
object= scrna,
features = genes,
group.by = cluster
) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
save_ggplot_formats(
plt=p2,
base_plot_dir=report_plots_folder,
plt_name=paste0("extmarkers_dotplot_",a_celltype,"_groupby-",cluster),
width=9, height=7
)
group_by <- cluster
col_def <- viridis::viridis_pal(option = cluster_viridis_opt)(length(unique(scrna@meta.data[,group_by])))
for (i in seq(1, length(genes), by=9)){
ni = min(i+8, length(genes))
p3 <- VlnPlot(
object = scrna,
pt.size=0,
features = genes[i:ni],
cols = col_def,
group.by = group_by
)
save_ggplot_formats(
plt=p3,
base_plot_dir=report_plots_folder,
plt_name=paste0("extmarkers_vlnplot_",a_celltype,"-genes_",i,"-to-",ni),
width=9, height=7
)
}
}
}
|
ac2d6d3729b7951322745b49478d523f1412e685
|
597a5c9f177db6f86f7c0e28dcae18052159fc8e
|
/man/rowTtest.Rd
|
4e33f904d28187f310689a4008819a07d7f07418
|
[] |
no_license
|
demuellae/muRtools
|
241e3d1bdc25ada69c54d4b088980433bc7ea15d
|
74db0ac00c56bd39d95a44b52e99bbe03c22d871
|
refs/heads/master
| 2023-06-21T15:07:03.928229
| 2023-06-20T08:49:25
| 2023-06-20T08:49:25
| 18,805,524
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,027
|
rd
|
rowTtest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statTest.R
\name{rowTtest}
\alias{rowTtest}
\title{rowTtest}
\usage{
rowTtest(X, idx1, idx2 = -idx1, na.rm = FALSE, alternative = "two.sided")
}
\arguments{
\item{X}{Matrix on which the test is performed for every row}
\item{idx1}{column indices of group 1 members}
\item{idx2}{column indices of group 2 members}
\item{na.rm}{Should NAs be removed (logical)}
\item{alternative}{Testing alternative. Must be one of "two.sided" (default),"less","greater" or "all".
in case of "all" a data frome with corresping alternative variables is returned.
Otherwise the result is a vector.}
}
\value{
vector (or data.frame if alternative=="all") of p-values resulting from the Welch's t-test
}
\description{
performs a two-sided Welch's t-test (unequal variances, equal or unequal sample sizes) on each row of a matrix X with the indices inds.1 vs indices idx2 as group assignments.
}
\note{
Requires \code{matrixStats} package
}
\author{
Fabian Mueller
}
|
fed2096d0799204c08856d075949254fb293b69b
|
73ae3e97865707d963a52b377378e065c3e1c422
|
/plot4.R
|
1c09e3ad3e6e1f0b5a6aeea19e2b1cab28064e0f
|
[] |
no_license
|
pmuojekwu/Exploratory-Data-Analysis-Week-4-Project
|
66f2e9124940c2310062a5be69211f1480b6e6c9
|
f13b14daf181ca242d0cb484149e8160ce40060b
|
refs/heads/master
| 2022-12-16T10:04:04.095550
| 2020-09-11T19:59:13
| 2020-09-11T19:59:13
| 294,684,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
plot4.R
|
if(!file.exists("data")){
dir.create("data")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile = "./data/FNEI_data.zip", method = "curl")
unzip("./data/FNEI_data.zip")
}
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
library(dplyr)
library(ggplot2)
coal <- grepl("[Cc][Oo][Aa][Ll]", SCC$EI.Sector)
coal_sources <- SCC[coal, ]
coal_combustion <- NEI[(NEI$SCC %in% coal_sources$SCC), ]
coal_combustion_related <- coal_combustion %>%
group_by(year) %>%
summarise(Emissions = sum(Emissions))
# Plot 4
png("plot4.png", width=640, height=480)
ggplot(coal_combustion_related, aes(x=factor(year), y=Emissions/1000,
fill=year, label = round(Emissions/1000,2))) +
geom_bar(stat="identity") +
xlab("Year") +
ylab(expression("PM"[2.5]*" Emissions (Kilotons)")) +
ggtitle("Emissions from Coal Combustion-Related Sources in the US from 1999-2008") +
geom_label(aes(fill = year),colour = "white", fontface = "bold")
dev.off()
|
a88b308b0918bc35c124a9cc0a47130ef9d9c562
|
aad7c7b66c10940ab3cb23cb24192b2417e74fef
|
/man/make_presence_absence_table.Rd
|
7f0fe8c3ad06790a40747d57d337a39d8edca38c
|
[] |
no_license
|
TransmissibleCancerGroup/dftdLowCov
|
7b029a3a2b62e359b60343d6579c3a8be9136099
|
2f884d69654b4289ef322932ba5077940c260ebe
|
refs/heads/master
| 2021-01-02T04:49:03.263038
| 2020-05-28T16:15:21
| 2020-05-28T16:15:21
| 239,495,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 879
|
rd
|
make_presence_absence_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{make_presence_absence_table}
\alias{make_presence_absence_table}
\title{Make a presence / absence table for CNVs
Melts the CNV table on sample name. The output table has the columns
"New.CNV_ID_ext", "chr", "start", "end", "Index", "Type", plus any
extra columns passed in the `extra_columns` parameter}
\usage{
make_presence_absence_table(cnv_table, extra_columns = NULL)
}
\arguments{
\item{cnv_table}{data.table; Table of CNV information}
\item{extra_columns}{character vector; vector of additional columns to include in output.}
}
\description{
Make a presence / absence table for CNVs
Melts the CNV table on sample name. The output table has the columns
"New.CNV_ID_ext", "chr", "start", "end", "Index", "Type", plus any
extra columns passed in the `extra_columns` parameter
}
|
74ffd32c622770d2c57ee483cc836bac5eea8f03
|
aed11657f0d63ca4a3a2136484842b43115ee32a
|
/plot1.R
|
97d44dc7aedb8513ba21e1f5a4a1ceb477061609
|
[] |
no_license
|
soroosj/ExData_Plotting1
|
f68a58a73d0a5d53fdbed45b758c30e0765c9837
|
cfd58388890c63a926aee1ed9eb65f7ba4ec0ecf
|
refs/heads/master
| 2021-04-15T05:01:40.320781
| 2018-03-30T12:00:02
| 2018-03-30T12:00:02
| 126,714,024
| 0
| 0
| null | 2018-03-25T15:54:58
| 2018-03-25T15:54:58
| null |
UTF-8
|
R
| false
| false
| 884
|
r
|
plot1.R
|
#1. load package(s) into R
library(tidyverse)
library(lubridate)
#2. download and unzip file(s) to local directory
power_url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(power_url,"power.zip")
unzip("power.zip")
power_txt <- "household_power_consumption.txt"
#3. upload the file to R
power <- read_delim(power_txt, delim= ";", na = "?")
#4. clean up date format
power$Date <- dmy(power$Date)
power$DateTime <- ymd_hms(paste(power$Date, power$Time))
select (power, DateTime, everything())
#5. filter on required dates
power <- filter(power, Date >= "2007-02-01" & Date <= "2007-02-02")
#6. create chart
png(file="plot1.png",width=480,height=480)
hist(power$Global_active_power, col = "red", main = "Global Active Power",xlab = "Global Active Power (kilowatts)")
dev.off()
|
7adcf5c66a253a4ccc47bb2e3b4a7cd4d7d5f4b6
|
cddd2e88cd7a47805cb08905ee2092f2e9775402
|
/working_files/construction/globals.R
|
44baacec7b9129b5345e756203c598df81b0ce13
|
[] |
no_license
|
saldaihani/quanteda
|
4e773bfc16b17918a105a43ba21883b7e95934b7
|
ed7147abb1b662e9a2df3bdc720d90da7af895ad
|
refs/heads/master
| 2021-01-17T22:14:52.448770
| 2015-02-13T17:50:14
| 2015-02-13T17:50:14
| 30,843,041
| 1
| 0
| null | 2015-02-15T21:33:31
| 2015-02-15T21:33:30
| null |
UTF-8
|
R
| false
| false
| 806
|
r
|
globals.R
|
####
#### PACKAGE GLOBALS
####
#
# Note: alternative would be to have (also) system settings that would govern the creation
# of new corpus objects.
#
SETTINGS_OPTIONS <- c("stopwords",
"collocations",
"dictionary",
"dictionary_regex",
"stem",
"delimiter_word",
"delimiter_sentence",
"delimiter_paragraph",
"clean_tolower",
"clean_removeDigits",
"clean_removePunct")
DEFAULT_DELIM_SENTENCE <- ".!?"
DEFAULT_DELIM_WORD <- " "
DEFAULT_DELIM_PARAGRAPH <- "\n\n"
save(SETTINGS_OPTIONS, DEFAULT_DELIM_SENTENCE, DEFAULT_DELIM_WORD, DEFAULT_DELIM_PARAGRAPH, file="../data/globals.RData")
|
e496b45ae5bb9cf6375e24cd468b3a68e5ad63d1
|
4358d68c29080f75986580d3d9b16a7ef1082aa9
|
/setupCO.R
|
4b9f5d21a867156b1e490443a6ed132bde510ec2
|
[] |
no_license
|
dnychka/UrbanTypology
|
a362802614d5c0ca30859bf1fd9455832fbf2971
|
772ef23a3ebbf2f1afaf4b24e80344129a6a4771
|
refs/heads/master
| 2020-04-18T15:42:33.279759
| 2019-01-27T22:56:38
| 2019-01-27T22:56:38
| 167,618,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,145
|
r
|
setupCO.R
|
# rscript to read raw csv files and merge variables
library( fields)
setwd("~/Dropbox/Home/Projects/UrbanTypology")
source("GEOID2GISJOIN.R")
INCOME<- read.csv("data/Income.csv" ,stringsAsFactors = FALSE )
ETHNICITY<- read.csv("data/Ethnicity.csv",stringsAsFactors = FALSE )
AGEGENDER<- read.csv("data/AgeGender.csv",stringsAsFactors = FALSE )
CNT<- read.csv("data/CNT_HT_CO for Doug.csv",
stringsAsFactors = FALSE)
# strip out the quotes from CNT IDS
CNT$blkgrp <- substr(CNT$blkgrp,2,13 )
# sort out the labels
tract1<- substr( AGEGENDER$GISJOIN,10, 15)
blk1 <- substr( AGEGENDER$GISJOIN, 6,8)
blk2<- substr( CNT$blkgrp,4,6 )
tract2<- substr( CNT$blkgrp, 7, 12)
ID1<- paste0( blk1, tract1)
ID2<- paste0( blk2, tract2)
look21<- match( ID2, ID1)
look12<- match( ID1, ID2)
hold<- cbind( sort( CNT$blkgrp[is.na(look21)]),
sort( AGEGENDER$GISJOIN[is.na(look12)]) )
# check
error <- as.numeric(sort(ID1[is.na(look12)]))- as.numeric(sort(ID2[is.na(look21)]) )
sum( is.na( look21))
miss2<- is.na( look21)
# surgery to correct IDs
substr(blk2[miss2],3,3) <- "0"
ID1<- paste0( blk1, tract1)
ID2<- paste0( blk2, tract2)
index12<- match( ID1, ID2)
cat(" cases where substituting 0 in CNT ID",
"does not give a match with the GISJOIN ids", fill=TRUE)
cat("This should be zero!", fill=TRUE)
print( sum( is.na( index12)))
# reorder CNT so that it matches the order of NHGIS
CNT <- CNT[index12,]
income<- INCOME$AF49E001
total<- ETHNICITY$AF2ME001
white<- ETHNICITY$AF2ME002
ageTotal<- AGEGENDER$AF2AE001
ageMale <- AGEGENDER$AF2AE002
ageMale25X49<- rowSums(AGEGENDER[c("AF2AE011",
"AF2AE012",
"AF2AE013",
"AF2AE014",
"AF2AE015")])
ageMale50X64<- rowSums(AGEGENDER[c("AF2AE016",
"AF2AE017",
"AF2AE018",
"AF2AE019")])
ageMale65X<- rowSums(AGEGENDER[c("AF2AE020",
"AF2AE021",
"AF2AE022",
"AF2AE023",
"AF2AE024",
"AF2AE025")])
ageFemale <- AGEGENDER$AF2AE026
ageFemale25X49<- rowSums(AGEGENDER[c("AF2AE035",
"AF2AE036",
"AF2AE037",
"AF2AE038",
"AF2AE039")])
ageFemale50X64<- rowSums(AGEGENDER[c("AF2AE040",
"AF2AE041",
"AF2AE042",
"AF2AE043")])
ageFemale65X<- rowSums(AGEGENDER[c("AF2AE044",
"AF2AE045",
"AF2AE046",
"AF2AE047",
"AF2AE048",
"AF2AE049")])
population <- CNT$population
size <- CNT$land_acres
populationDensity<- population/size
intersection<- CNT$intersection_density
age<- data.frame( ageMale25X49, ageMale50X64, ageMale65X,
ageFemale25X49, ageFemale50X64, ageFemale65X)
age <- age/ageTotal
CODemographic0 <- data.frame( INCOME$GISJOIN, population, populationDensity,
intersection,income, white,age )
thisFile<- scan( "setupCO.R", what="a", sep='\r')
save(CODemographic0,age,population, thisFile, file="CO0.rda" )
#### joined data test IDs Match
CO2<- read.csv("data/cbg_cnt_join.csv",
stringsAsFactors = FALSE)
CO2$GEOID<- as.character(CO2$GEOID )
CO2$GISJOIN <- GEOID2GISJOIN(CO2$GEOID)
look31<- match(CO2$GISJOIN, INCOME$GISJOIN )
sum( is.na( look31))
look13<- match( INCOME$GISJOIN,CO2$GISJOIN )
sum( is.na( look13))
testID<- ( cbind( sort(CO2$GISJOIN[is.na(look31)] ) [1:10],
sort(INCOME$GISJOIN[is.na(look13)] ) [1:10])
)
testTRUE<- all(substr( testID[,1],2,14) == substr(testID[,2],3,15) )
cat("comparision should test true:", testTRUE, fill=TRUE)
|
0177989710198a469983eebcd0c1b4175e5500d1
|
4c75e5176930de5bd356025bdffc3068aa6ecbfa
|
/cw1/zestaw10.R
|
5a9bc5457473cac45f333a9a1a700bb0a4739d96
|
[] |
no_license
|
agnieszkajadczak/ts-r-clone
|
b8f43f4b90f171b908e8a67ef809588b17c5f43d
|
671ae6c65e06da1dbb5af38c291a3bf772e40aaa
|
refs/heads/master
| 2020-04-29T12:30:37.645123
| 2019-03-17T18:02:57
| 2019-03-17T18:02:57
| 176,139,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
zestaw10.R
|
## Zadanie
## Napisać funkcję, która sprawdzi czy w podanym wyrażeniu algebraicznym są poprawnie zadane nawiasy
## Np 3*(2+4*4)-2+(3+4^2)
sprawdzacz_nawiasow = function(){
}
|
73a4bf7d08c1a3d7051a96ffd5b8062d2beff2c3
|
59be04a77395cde47e19ee81b887fde30d68f9c5
|
/man/get_package_scope_var.Rd
|
e729bee684bcff00eac1c31c0c12f64fbd14716b
|
[
"Apache-2.0"
] |
permissive
|
ctsit/redcapcustodian
|
9f2a12017ea8db8e8dbe97cd4a26afe934e7f769
|
07ebc86a5ef2b6c8d52d9a783a4b33de9e7ebda1
|
refs/heads/master
| 2023-08-30T21:40:44.975651
| 2023-08-24T13:50:51
| 2023-08-24T13:50:51
| 377,900,072
| 14
| 5
|
NOASSERTION
| 2023-08-24T10:52:56
| 2021-06-17T16:45:41
|
R
|
UTF-8
|
R
| false
| true
| 433
|
rd
|
get_package_scope_var.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_package_scope_var}
\alias{get_package_scope_var}
\title{Get the value from the redcapcustodian.env environment}
\usage{
get_package_scope_var(key)
}
\arguments{
\item{key}{The identifying string to lookup}
}
\description{
Get the value from the redcapcustodian.env environment
}
\examples{
\dontrun{
get_package_scope_var("hello")
}
}
|
9e51afabed53470615530e0109812de82369ee37
|
3c8db5dd81f456d454c864dad491e63247a643d2
|
/s01_qc_processing/qc_processing_report.pdf/app.R
|
ef87fbb0dec8d7cb2931c6ae4fe23c1fb4f4108e
|
[] |
no_license
|
elliefewings/scRNA_analysis
|
b8c7a7adb3c936d6df576eaa5ad7defc8bffe2e4
|
11a4f403d37d80a9c8069028c1550dbdc6bf646c
|
refs/heads/master
| 2023-05-01T23:07:24.943284
| 2021-05-27T12:38:59
| 2021-05-27T12:38:59
| 290,720,132
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,178
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#load(rdata)
# Define UI for application
ui <- shinyUI(fluidPage(
#Set style for vertical alignment
tags$head(tags$style(
HTML('
#vert {
display: flex;
align-items: center;
margin-top: 50px;
}
'))),
# Application title
titlePanel(htmlOutput("title")),
fluidRow(HTML("<h3>For interactive report, load RData output here: <a href='https://saezlab.shinyapps.io/qc_processing_report'> </a></h3>")),
fluidRow(tags$hr(style="border-color: black;")),
#Summary data
fluidRow(column(4, align="left", h3(tags$b("Script options"))), column(8, align="center", h3(tags$b("Quality Control Plots")))),
fluidRow(column(4,
wellPanel(
uiOutput("input"),
uiOutput("output"),
uiOutput("mincells"),
uiOutput("minfeatures"),
uiOutput("maxfeatures"),
uiOutput("maxpercentmt")
),
wellPanel(align="center", tableOutput("sum"))),
column(8, align="right", id="vert", plotOutput("initQC", width="100%", height="600px"))),
#PCA plots
fluidRow(tags$hr(style="border-color: black;")),
fluidRow(column(6, align="center", h3(tags$b("Selection of Principle Components"))), column(6, align="center", h3(tags$b("Plotting Principle Components")))),
fluidRow(column(6, align="center", id="vert", plotOutput("npcs", width="80%", height="400px")), column(6, id="vert", align="center", plotOutput("pca", width="80%", height="400px"))),
fluidRow(tags$hr(style="border-color: black;")),
# Clustree plot
fluidRow(column(12, align="center", h3(tags$b("Selection of K from Clustree")))),
fluidRow(column(12, align="center", id="vert", plotOutput("clust", width="60%", height="800px"))),
fluidRow(tags$hr(style="border-color: black;")),
#Demultiplex plots
fluidRow(column(5, align="center", h3(tags$b(textOutput("doubtitle")))), column(7, align="center", h3(tags$b(textOutput("hashtitle"))))),
fluidRow(column(5, align="right", id="vert", plotOutput("doublets", width="80%", height="700px")), column(7, id="vert", align="right", plotOutput("hashtags", width="80%", height="700px")))
))
# Define server logic
server <- shinyServer(function(input, output, session) {
#Set text outputs
#Set title based on data quality
headtitle <- NULL
#Set title and colour if > 1000 cells
headtitle[nsinglets > 1000] <- paste("QC Report:", sample)
#Set title and colour if between 500 and 1000 cells
headtitle[nsinglets > 500 & nsinglets <= 1000] <- paste("QC Report: ", sample, ' <font style=color:orange !important >(WARNING: Fewer than 1000 singlets)</font>', sep="")
#Set title and colour if < 500 cells
headtitle[nsinglets <= 500] <- paste("QC Report: ", sample, ' <font color="red">(WARNING: Fewer than 500 singlets)</font>', sep="")
output$title <- renderText({HTML(headtitle)})
inshort <- ifelse(nchar(opt$input) > 50,
substr(opt$input, nchar(opt$input)-50, nchar(opt$input)),
opt$input) %>% sub(".*?/", "/", .)
output$input <- renderText({HTML(paste("<b>","Input Directory:", "</b>", "...", inshort))})
outshort <- ifelse(nchar(opt$output) > 50,
substr(opt$output, nchar(opt$output)-50, nchar(opt$output)),
opt$output) %>% sub(".*?/", "/", .)
output$output <- renderText({HTML(paste("<b>", "Output Directory:", "</b>", "...", outshort))})
output$mincells <- renderText({HTML(paste("<b>", "Minimum Cells Filter:", "</b>", opt$mincells))})
output$minfeatures <- renderText({HTML(paste("<b>", "Minimum Features Filter:", "</b>", opt$minfeatures))})
output$maxfeatures <- renderText({HTML(paste("<b>", "Maximum Features Filter:", "</b>", opt$maxfeatures))})
output$maxpercentmt <- renderText({HTML(paste("<b>", "Maximum Percentage MT Filter:", "</b>", opt$maxpercentmt))})
#Set QC plots
output$initQC <- renderPlot({
qc1.1 <- qc1[[1]] + theme(axis.title.x=element_blank(), axis.text.x=element_blank())
qc1.2 <- qc1[[2]] + theme(axis.title.x=element_blank(), axis.text.x=element_blank())
qc1.3 <- qc1[[3]] + theme(axis.title.x=element_blank(), axis.text.x=element_blank())
qc2 <- qc2 + theme(legend.position = "none", title = element_text(size=10)) + labs(title = "Percentage mitochondrial features vs number of molecules detected")
qc3 <- qc3 + theme(legend.position = "none", title = element_text(size=10)) + labs(title = "Number of unique features vs number of molecules detected")
plot <- grid.arrange(arrangeGrob(qc1.1, qc1.2, qc1.3, ncol=3), arrangeGrob(qc2, qc3, ncol=2), heights=c(2.5/4, 1.5/4), ncol=1)
plot})
#Set pre/post filtering table
row.names(data.meta.summ) <- c(
"<b>Number of cells</b>",
"<b>Median nCount_RNA</b>",
"<b>Minimum nCount_RNA</b>",
"<b>Maximum nCount_RNA</b>",
"<b>Median nFeature_RNA</b>",
"<b>Minimum nFeature_RNA</b>",
"<b>Maximum nFeature_RNA</b>",
"<b>Median percent.mt</b>",
"<b>Minimum percent.mt</b>",
"<b>Maximum percent.mt</b>"
)
colnames(data.meta.summ)[1:2] <- c("Pre-filtering", "Post-filtering")
output$sum <- renderTable(data.meta.summ, spacing = "l", rownames=TRUE, digits=0, hover=TRUE, sanitize.text.function=function(x){x})
#Set npcs plot
output$npcs <- renderPlot(npcs$plot + theme(text = element_text(size=20)))
#Set PCA and feature plot event
output$pca <- renderPlot(pca + theme(text = element_text(size=20), legend.position = "none"))
# Plot clustree data
output$clust <- renderPlot(clust)
#Plot hashtag data
if (!is.null(opt$hashtag)) {
output$doubtitle <- renderText("Number of Doublets Identified")
output$hashtitle <- renderText("Expression Counts Over Hashtags")
output$doublets <- renderPlot(doublet)
output$hashtags <- renderPlot(ridge)
}
})
|
82b20562de11c8aa29a9b4dcf0c4885a4adfd8a3
|
70ada1dd1ad90e5856e43d06de7c97ce25f6a54d
|
/ui.R
|
ea3d4914f9a16e56310097c2dfccb62ec8cd13a5
|
[] |
no_license
|
yucedincer/Visualization_Formula-1_RShiny_App
|
38da95dc9d74dff7f4074c48db50ac874f9f8c7e
|
cefc2ae51182420becbacfc29995414ed96afb39
|
refs/heads/master
| 2021-06-13T01:04:39.306757
| 2021-04-28T20:14:18
| 2021-04-28T20:14:18
| 173,119,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,792
|
r
|
ui.R
|
shinyUI(
dashboardPage(skin = "red",
dashboardHeader(title = "FIA Formula 1"),
dashboardSidebar(width = 200,
sidebarUserPanel("Rifat DINCER",
image = "https://images-na.ssl-images-amazon.com/images/I/61Ysu6NeEFL._SY355_.jpg"),
sidebarMenu(
menuItem("About", tabName = "about", icon = icon("book")),
menuItem("Drivers", tabName = "drivers", icon = icon("graduation-cap")),
menuItem("Standings", tabName = "standings", icon = icon("line-chart")),
menuItem("Races", tabName = "races", icon = icon("line-chart")),
menuItem("Let's Race", tabName = "race_animation", icon = icon("line-chart")),
menuItem("Source Code", icon = icon("file-code-o"),
href = "https://github.com/yucedincer/")
)),
dashboardBody(
tags$head(
tags$style(HTML("
.content-wrapper {
background-color: white !important;
}
.main-sidebar {
background-color: black !important;
}
"))
),
tabItems(
tabItem(tabName = "about",
iframe(width="1120", height="630", url_link = "https://www.youtube.com/embed/k4Pegt-HcI8")),
tabItem(tabName = "drivers",
fluidPage(
h3("Driver Info"),
fluidRow(column(width = 6,plotlyOutput("champ_counts")),
column(width = 6, plotlyOutput("country_wins"))),
fluidRow(DT::dataTableOutput("drivers_table"))
)),
tabItem(tabName = "race_animation",
fluidPage(
h4("Please Select a F1 Season & Grand Prix Name"),
fluidRow(column(width = 4, selectizeInput('year_animation', label = "F1 Season", choices = year_vector_animation)),
column(width = 4, uiOutput("gp_name_animation"))),
h5("Even if you see an error, please be patient, animation is loading!"),
plotlyOutput("race_animation")
)),
tabItem(tabName = "standings",
fluidPage(
h1("Grand Prix Standings", align = "center"),
br(),
h6("Please Select a Season & Grand Prix", align = "center"),
br(),
fluidRow(column(width = 4, selectizeInput('year_table1', label = "F1 Season", choices = year_vector)),
column(width = 4, uiOutput("dynamic_widget"))),
br(),
fluidRow(
column(4,
uiOutput("max_speed")
),
column(4,
uiOutput("gps_held")
),
column(4,
uiOutput("fastest_lap_time")
)),
fluidRow(DT::dataTableOutput("race_standings_table"))
)),
tabItem(tabName = "races",
h2('Formula 1 Races Schedule Throughout the History'),
fluidRow(column(width = 1),
column(width = 2, title = "Select a Formula 1 Season", solidHeader = TRUE, status = "primary",
selectInput(inputId = "year_races", label = '', choices = sort(unique(for_map2$year)),
selected = NULL, multiple = FALSE),
DT::dataTableOutput("mini_table")),
column(width = 9, plotlyOutput("race_map", height = 700), solidHeader = TRUE, status = "primary")
)
)
)
)
))
|
f26d63059718184b1f51d6957c80c3e6f432ac14
|
3df5e0d9cd46d3d86fe6879d85c96a3f8829fbd3
|
/man/Binary_Performance_Class.Rd
|
0cdc29e5561c15cdff160148d642e86aff5b8c03
|
[] |
no_license
|
gravesee/rubbish
|
d69e1de8a8c6be9084941eeb0b0d39c391a3fb74
|
fce7aba82bbfead3a28bf16bc2972491d6a1991b
|
refs/heads/master
| 2021-06-12T23:16:47.411331
| 2017-04-10T00:21:33
| 2017-04-10T00:21:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 474
|
rd
|
Binary_Performance_Class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binary_performance_class.R
\docType{class}
\name{Binary_Performance_Class}
\alias{Binary_Performance_Class}
\alias{Binary_Performance}
\title{Bin reference class generator}
\description{
Binary_Performance object generator class that implements
the Performance class methods
}
\section{Fields}{
\describe{
\item{\code{ones}}{sum of \code{y == 0}}
\item{\code{zeros}}{sum of \code{y == 1}}
}}
|
574a11103d12bb4b364ae1ded1f715132073b8f4
|
d580fe730255becf6af4c6cea02ed5a5f3a2398a
|
/treefortbnb/00.R
|
9b5940fadadda787af26e1c0053e5e7bd6807320
|
[] |
no_license
|
jpmarindiaz/playground
|
bdfd78ad30696fb9020f37fe022a7b1c1ecebb28
|
57fc324c4e1fa60cfe4578de51920e6d8aeeb669
|
refs/heads/master
| 2016-09-13T06:26:25.003784
| 2016-05-05T03:20:29
| 2016-05-05T03:20:29
| 58,100,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,260
|
r
|
00.R
|
library(dplyr)
library(tidyr)
## Cleaning
source("str_helpers.R")
# Start Analysis
d0 <- read.csv("Data+for+TreefortBnB+Puzzle.csv", stringsAsFactors = FALSE)
names(d0) <- c("id","city","state","price","nreviews")
d0$originalCities <- paste(d0$city, d0$state, sep = " - ")
# check if clean
sort((unique(d0$city)))
sort((unique(d0$state)))
length((unique(d0$state)))
# cities and states
# City original from: http://simplemaps.com/resources/us-cities-data (cities.csv)
# with some manual tweaking
cityNames <- read.csv("us-cities.csv")
dict <- cityNames[c('name','alternativeNames')]
originalCities <- d0$originalCities
originalCities <- unique(originalCities)
# Let's make an approximate replace
cleanCities <- dictionaryMatch(originalCities,dict, maxDistance = 2)
cities <- data.frame(originalCities = originalCities, cleanCities = cleanCities)
# “Which Are the Most Expensive Cities in America to Book a Tree Fort?”
d <- merge(d0, cities, by = "originalCities")
names(d)
d <- d[c("id","cleanCities","price","nreviews","state")]
names(d)[2] <- "city"
## Analysis
# median price of a tree fort in each of the top 100 cities that have the most units on the market.
#Just send us back a table with a list of the median price in each city, ranked from most to least expensive. Restrict your analysis to just to the "top 100" cities that have the most units on the market.
t <- d %>%
group_by(city) %>%
#filter(!is.na(city)) %>%
summarise(medianPrice = median(price), units = n()) %>%
top_n(100) %>%
arrange(medianPrice) %>%
select(city,medianPrice)
write.csv(t,"output.csv",row.names = FALSE)
# Get additional data
source("scrape_helpers.R")
url <- "https://en.wikipedia.org/wiki/List_of_United_States_cities_by_population"
population <- getWikipediaTable(url)
population <- population[c("City","State[5]","2014 estimate")]
names(population) <- c("city","state","population")
population$city <- removeNotes(population$city)
population$state <- removeNotes(population$state)
population$population <- as.numeric(gsub("[^[:digit:]]","",population$population))
url <- "https://en.wikipedia.org/wiki/List_of_states_and_territories_of_the_United_States"
states <- getWikipediaTable(url,1)
states <- states[c("State","Abbr.")]
names(states) <- c("state","abbr")
write.csv(states,"states.csv",row.names = FALSE)
states$state <- trim_spaces(states$state) %>% removeNotes()
population <- merge(population,states, by = "state",all.x = TRUE)
population$city <- paste(population$city, population$abbr, sep=" - ")
d2 <- merge(d,population[c("population","city")])
# Cities with the most units percapita (top 10)
t2 <- d2 %>%
select(city,population) %>%
group_by(city,population) %>%
summarise(units = n()) %>%
mutate(unitsPerCapita = units/population) %>%
ungroup() %>%
arrange(desc(unitsPerCapita)) %>%
top_n(10)
#
t3 <- d %>%
select(state,price) %>%
group_by(state) %>%
summarise(units = n(), meanPrice = mean(price))
write.csv(t3,"treeforbnb-states.csv",row.names = FALSE)
library(dmaps)
# Experimental package: htmlwidgets wrapper for datamaps in R
# devtools::install_github("jpmarindiaz/dmaps")
## Sort of hacky bivariate Choropleth
d <- read.csv("treeforbnb-states.csv")
var1 <- cut2(d[,2],g=3)
levels(var1) <- c("x1","x2","x3")
var2 <- cut2(d[,3],g=3)
levels(var2) <- c("y1","y2","y3")
d$group <- paste(var1,var2,sep="")
groups2d <- apply(expand.grid(paste0("x",1:3),paste0("y",1:3)),1,
function(r)paste0(r[1],r[2]))
colors2d <- c("#e8e8e8","#e4acac","#c85a5a","#b0d5df","#ad93a5","#985356","#64acbe","#62718c","#574249")
customPalette <- data.frame(group = groups2d, color = colors2d)
opts <- list(
defaultFill = "#FFFFFF",
borderColor = "#CCCCCC",
borderWidth = 0.3,
highlightFillColor = "#999999",
highlightBorderWidth = 1,
palette = "PuBu",
customPalette = customPalette,
choroLegend = list(show = FALSE),
bivariateLegend = list(show = TRUE, var1Label = "Units", var2Label = "Median Price")
)
dmaps("us_states", data = d,
groupCol = "group",
regionCols = "state",
opts = opts)
map <- dmaps("us_states", data = d,
groupCol = "group",
regionCols = "state",
opts = opts)
saveWidget(map,"map.html")
|
b850e3c47daebed0f8b693b331bc827d10b01e61
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/77-TS/55b-ts-case-xxx2.R
|
a0ec1facaa0c03fcea7ebb56bab01f07954d15dc
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
55b-ts-case-xxx2.R
|
# Aggregating TS
data = read.csv(file.choose())
str(data)
head(data)
names(data)
nrow(data)
df= data
df$timestamp2 = as.POSIXct(df$timestamp ,origin="1970-01-01")
head(df)
library(zoo)
library(xts)
ts1 = xts(df[1:3], order.by=df$timestamp2)
class(ts1)
ts1
# use dplyr
library(dplyr)
hourly_summary <- df %>% mutate(hour = as.Date(timestamp2, format="%Y-%m-%d %H")) %>% group_by(house_id, household_id) %>% summarise(hoursum=sum(value),hourlsd = sd(value), hourmean=mean(value))
hourly_summary
class(hourly_summary)
df %>% group_by(house_id, household_id, as.Date(timestamp2, format="%Y-%m-%d") )
aggregate(value ~ house_id + household_id, data=df, FUN=sum)
Data$hour <- as.POSIXlt(dates)$hour # extract hour of the day
|
12521443382eb85f2c879c94b49dca1843c00373
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dna/examples/resultsClassTest-class.rd.R
|
57a32ce033a3dfb795f60ee19916460b57219e09
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 865
|
r
|
resultsClassTest-class.rd.R
|
library(dna)
### Name: resultsClassTest-class
### Title: Class '"resultsClassTest"'
### Aliases: resultsClassTest-class show,resultsClassTest-method
### get.results,resultsClassTest-method
### Keywords: classes
### ** Examples
# small example illustrating test procedures
X1=rbind(
c(2.5,6.7,4.5,2.3,8.4,3.1),
c(1.2,0.7,4.0,9.1,6.6,7.1),
c(4.3,-1.2,7.5,3.8,1.0,9.3),
c(9.5,7.6,5.4,2.3,1.1,0.2))
colnames(X1)=paste("G",1:6,sep="")
X2=rbind(
c(4.5,2.4,6.8,5.6,4.5,1.2,4.5),
c(7.6,9.0,0.1,3.4,5.6,5.5,1.2),
c(8.3,4.5,7.0,1.2,4.3,3.7,6.8),
c(3.4,1.1,6.9,7.2,3.1,0.9,6.6),
c(3.4,2.2,1.3,5.5,9.8,6.7,0.6))
colnames(X2)=paste("G",8:2,sep="")
# perform a test for differential connectivity of all genes
# with PLS connectivity scores and squared distances
## Not run: tcg=test.class.genes(X1,X2)
## Not run: results.tcg=get.results(tcg)
## Not run: results.tcg
|
520ba6f729cad1101b7bbaa7f4fc1563e2d8953b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/darch/R/loadRBMFFWeights.R
|
eacac3e0fbe300bf350f3b19eaa4f8a95700d79c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,580
|
r
|
loadRBMFFWeights.R
|
# Copyright (C) 2013-2015 Martin Drees
#
# This file is part of darch.
#
# darch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# darch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with darch. If not, see <http://www.gnu.org/licenses/>.
#' Loads weights and biases for a RBM network from a ffData file.
#'
#' Loads the weights and the biases for the given RBM object from the filename
#' given through the parameter \code{name}. See \code{\link{ffload}} for more
#' details
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @param name The name of the file without the ending ".net".
#' @return \code{rbm} - The RBM with the loaded weights and biases.
#' @usage loadRBMFFWeights(rbm,name)
#'
#' @seealso \code{\link{ffload}}, \code{\link{saveRBM}}, \code{\link{loadRBM}}, \code{\link{saveRBMFFWeights}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname loadRBMFFWeights
loadRBMFFWeights <- function(rbm,name){
w <- 1
v <- 1
h <- 1
ffload(paste(name,"-WB",sep=""),overwrite=TRUE)
rbm@ffWeights <- w
rbm@ffHiddenBiases <- h
rbm@ffVisibleBiases <- v
return(rbm)
}
|
8c14b5d7a74b48335c2dbe15df1f5fba119f339d
|
fa3f3a781ba0f51b1be2b42d44defb1ed9de65fd
|
/workout03/binomial/man/bin_kurtosis.Rd
|
fadf1fbc38411e193199fdde10ed6eb4c101059a
|
[] |
no_license
|
stat133-sp19/hw-stat133-liusabrina
|
0ca13ee3f75516f93b4283282b68378ac271b638
|
55b9d19b76607dce36963213e611a55624b73f6c
|
refs/heads/master
| 2020-04-28T07:31:19.641362
| 2019-05-03T20:40:35
| 2019-05-03T20:40:35
| 175,095,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 496
|
rd
|
bin_kurtosis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main-functions-and-methods.R
\name{bin_kurtosis}
\alias{bin_kurtosis}
\title{binomial kurtosis}
\usage{
bin_kurtosis(trials, prob)
}
\arguments{
\item{trials:}{number of trials, a non-negative integer}
\item{prob:}{probability of success, a number between 0 and 1, inclusive}
}
\value{
kurtosis of binomial
}
\description{
calculates the kurtosis of the binomial
}
\examples{
> bin_kurtosis(10, 0.3)
[1] -0.1238095
}
|
46bf9975311a476cd06c3e78e5f2bc067c9c3ac8
|
351b370aa66d3e128bfbf830bf112f9bfe1afb07
|
/man/Layout.Rd
|
50872d493a7e736843f88759bbf60c469ebc6980
|
[
"MIT"
] |
permissive
|
s-fleck/lgr
|
725313826efaefe6fd9be9603954fc7c5db9585a
|
cd21d695008e9adc0061c2e4eb44174388d364ce
|
refs/heads/master
| 2023-08-10T05:56:10.731193
| 2023-03-04T20:02:11
| 2023-03-04T20:02:11
| 154,803,571
| 74
| 10
|
NOASSERTION
| 2023-07-21T13:13:59
| 2018-10-26T08:39:18
|
HTML
|
UTF-8
|
R
| false
| true
| 2,827
|
rd
|
Layout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Layout.R
\name{Layout}
\alias{Layout}
\alias{Layouts}
\title{Abstract Class for Layouts}
\description{
Abstract Class for Layouts
Abstract Class for Layouts
}
\details{
\link{Appenders} pass \link[=LogEvent]{LogEvents} to a Layout which formats it for
output. For the Layouts included in lgr that means turning the LogEvent
into a \code{character} string.
For each Appender exist one more more possible Layouts, but not every Layout
will work with every Appender. See the package \pkg{lgrExtra} for examples
for Layouts that return different data types (such as \code{data.frames}) and
Appenders that can handle them.
}
\section{Notes for developers}{
Layouts may have an additional \verb{$read(file, threshold, n)} method that returns
a \code{character} vector, and/or an \verb{$parse(file)} method that
returns a \code{data.frame}. These can be used by Appenders to \verb{$show()} methods
and \verb{$data} active bindings respectively (see source code of \link{AppenderFile}).
}
\seealso{
Other Layouts:
\code{\link{LayoutFormat}},
\code{\link{LayoutGlue}},
\code{\link{LayoutJson}}
}
\concept{Layouts}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-Layout-format_event}{\code{Layout$format_event()}}
\item \href{#method-Layout-toString}{\code{Layout$toString()}}
\item \href{#method-Layout-clone}{\code{Layout$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Layout-format_event"></a>}}
\if{latex}{\out{\hypertarget{method-Layout-format_event}{}}}
\subsection{Method \code{format_event()}}{
Format a log event
Function that the Layout uses to transform a \link{LogEvent} into something
that an \link{Appender} can write to an output destination.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Layout$format_event(event)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{event}}{a \link{LogEvent}}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Layout-toString"></a>}}
\if{latex}{\out{\hypertarget{method-Layout-toString}{}}}
\subsection{Method \code{toString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Layout$toString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-Layout-clone"></a>}}
\if{latex}{\out{\hypertarget{method-Layout-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Layout$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
75bcf4590075c80e145d002827c4437e2f79f69e
|
da04803dd0714434a1e0d458616fd9ecfdecbcce
|
/man/dag_ex4.Rd
|
df9fcb02c41e01e5d55b9c018cf2adbbe5075f38
|
[] |
no_license
|
cran/abn
|
b232e17d29eba356f5b1df5d50c27e17de860422
|
e393f625a9de98adb351ac007b77c87d430cb7bf
|
refs/heads/master
| 2023-05-25T02:14:43.027190
| 2023-05-22T12:50:24
| 2023-05-22T12:50:24
| 17,694,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
rd
|
dag_ex4.Rd
|
\name{ex4.dag.data}
\docType{data}
\alias{ex4.dag.data}
\title{Valdiation data set for use with abn library examples}
\description{2000 observations across with 10 binary variables and one grouping variable. Real (anonymised) data of unknown structure.
}
\usage{ex4.dag.data}
\format{A data frame with eleven columns:
\code{group} factor with 85 levels defining sampling groups;
\code{b1,\dots,b10} binary variables encoded as factors.
}
\keyword{datasets}
|
56b94d8f9893bcbf31cc52087d6e2c1d48e38fd8
|
d6394e20f7850e78b88ef99130ae4d9a7d10d85f
|
/ui.R
|
39ea68f9cf173394304c766c1654754885a7b2d5
|
[] |
no_license
|
pedrobaigorri/DataScienceCapstone
|
ac9288b663786bd2aa759211aad7e0127b7bcc05
|
049f2df10b60aaa18f7176ddebafdcbcb3d23d8c
|
refs/heads/master
| 2020-03-19T09:54:56.015494
| 2018-08-24T07:45:51
| 2018-08-24T07:45:51
| 136,327,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,600
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#
# Author Pedro A. Alonso Baigorri
#
library(shiny)
library(markdown)
shinyUI(navbarPage("Welcome to my Impressive Text Predictor!",
tabPanel("Text Predictor",
sidebarLayout(
sidebarPanel(
p("Input your sentence. I will predict the next word for you:"),
textAreaInput("sentence", "Text", ""),
br(),
p("Select the maximum number of results to show:"),
sliderInput("max_results", "Number of Results:", min = 1, max = 20, value = 5),
br(),
submitButton(text = "Predict!", icon = NULL, width = NULL)
),
mainPanel(
h3("Your prediction is:"),
h3(tableOutput("result"))
)
)
),
tabPanel("Documentation", includeMarkdown("about.md"))
)
)
|
0f66b52b6189f594ec664b540fa12f4df7416eca
|
7d5d635837cc685935dfc06365a13e9471304cc5
|
/R/get_scanbase.R
|
bdbbd7c4aa82809cd62e4f9d3445f22a31fc5c45
|
[
"MIT"
] |
permissive
|
Mouse-Imaging-Centre/scanbaseTools
|
9ff011e27317930eb073c03edd7c61403cb5d9d0
|
5a76d0e9b9e90b5b3422d9c25ed36062172346bb
|
refs/heads/main
| 2022-02-17T12:49:27.565888
| 2022-01-27T14:59:58
| 2022-01-27T14:59:58
| 195,113,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,266
|
r
|
get_scanbase.R
|
#' @param db The google sheet title.
#' @export
read_scanbase <- function(db = "test_scanbase_40um"){
tfile <- tempfile(fileext = ".xlsx")
googledrive::drive_download(db, path = tfile)
list(scans = readxl::read_excel(tfile, "Scans")
, studies = readxl::read_excel(tfile, "Studies")
, genotypes = readxl::read_excel(tfile, "Genotypes")
, treatments = readxl::read_excel(tfile, "Treatments")
, global = readxl::read_excel(tfile, "Global")
)
}
#'
#' @export
load_pydpiper_results <-
function(ppd
, common = "/hpf/largeprojects/MICe/scanbase/pipeline-40um/scanbase_second_level_nlin/scanbase_second_level-nlin-3.mnc"
, mask = sub("\\.mnc$", "_mask.mnc", common)
, fwhm = 0.2
, clobber = FALSE
, dry = FALSE){
ppd <- normalizePath(ppd, mustWork = TRUE)
if(!file.exists(common))
stop("Unable to find your common space file: ", common)
if(!file.exists(mask))
stop("Unable to find your common space mask: ", mask)
transforms <- read.csv(file.path(ppd, "transforms.csv")
, stringsAsFactors = FALSE)
if(is.null(transforms[["overall_xfm_to_common"]])){
if(is.null(transforms[["overall_xfm_to_common_inv"]])){
stop("Neither overall_xfm_to_common nor overall_xfm_to_common_inv found in your transforms.csv, can't compute determinants.\\n Aborting.")
} else {
forwards <-
transforms$overall_xfm_to_common_inv %>%
sub("_inverted\\.xfm", ".xfm", .)
forwards_full <-
forwards %>%
ifelse(grepl("^/", .), ., file.path(ppd, .))
if(!all(file.exists(file.path(forwards_full))))
stop("Only overall_xfm_to_common_inv found in transforms, removing `_inverted` suffix does not produce valid files.\\n Aborting.")
transforms$overall_xfm_to_common <- forwards
}
}
determinants <-
read.csv(file.path(ppd, "determinants.csv")
, stringsAsFactors = FALSE) %>%
filter(fwhm == 0.2)
column_mapping <-
c(
Distortion_Corrected_Scan = "native_file"
, Scan_To_Study_Absolute_Jacobians = "log_full_det"
, Scan_To_Study_Relative_Jacobians = "log_nlin_det"
, Scan_To_Global_Absolute_Jacobians = "log_full_overall_det_common"
, Scan_To_Global_Relative_Jacobians = "log_nlin_overall_det_common"
, Scan_To_Study_Global_Space_Resampled_Absolute_Jacobians = "log_full_det_common"
, Scan_To_Study_Global_Space_Resampled_Relative_Jacobians = "log_nlin_det_common"
, Rigid_Transform = "rigid_xfm"
, Rigid_Filepath = "lsq6_file"
, Scan_To_Study_Transform = "lsq12_nlin_xfm"
, Labels = "label_file"
)
known_columns <- keep(column_mapping, ~ . %in% names(transforms) || . %in% names(determinants))
full_data <-
inner_join(transforms, determinants
, by = c("lsq12_nlin_xfm" = "inv_xfm")) %>%
rename(
!!! map(known_columns, as.symbol)
) %>%
mutate_at(
.vars = vars(!!!names(known_columns), overall_xfm_to_common)
, .funs = funs(
ifelse(grepl("^/", .), ., file.path(ppd, .)))
) %>%
mutate(Processed_dir = dirname(dirname(Scan_To_Study_Relative_Jacobians))
, Labels =
`if`(is.null(.$Labels)
, map_chr(file.path(Processed_dir, "voted.mnc")
, function(f){
if(file.exists(f)){
return(f)
} else {
stop("these subjects have not been processed with MAGeT")
}
})
, Labels
))
scans <-
full_data %>%
mutate(
xfm_nlin3_to_global =
map_chr(overall_xfm_to_common
, ~ global_from_concat(., "nlin3-to-global.xfm"
, clobber = clobber
, dry = dry)))
if(is.null(scans$Scan_To_Global_Absolute_Jacobians)){
scans <- scans %>%
mutate(Scan_To_Global_Absolute_Jacobians =
future_map2_chr(overall_xfm_to_common
, basename(Processed_dir)
, ~ compute_inverse_determinants(.x
, "abs"
, like = common
, output = .y
, mask = mask
, clobber = clobber
, dry = dry))
)
}
if(is.null(scans$Scan_To_Global_Relative_Jacobians)){
scans <- scans %>%
mutate(Scan_To_Global_Relative_Jacobians =
future_map2_chr(overall_xfm_to_common
, basename(Processed_dir)
, ~ compute_inverse_determinants(.x
, "rel"
, like = common
, output = .y
, mask = mask
, clobber = clobber
, dry = dry))
)
}
scans <- scans %>%
select(!!! map(names(column_mapping), as.symbol))
command_and_version_to_date <-
function(cav){
cav %>%
basename() %>%
sub(".*-([^-]*-[^-]*-[^-]*-at-[^-]*-[^-]*-[^-]*)\\.sh", "\\1", ., perl = TRUE) %>%
as.POSIXct(format = "%d-%m-%Y-at-%H-%M-%OS")
}
get_first_command <-
function(){
quiet_readLines <- function(x){
withCallingHandlers(
readLines(x)
, warning = function(w){
if(!grepl("incomplete final line", w)){
warning(w)
} else {
invokeRestart("muffleWarning")
}
})
}
Sys.glob(file.path(ppd, "*command-and-version*")) %>%
setNames(command_and_version_to_date(.), .) %>%
sort(decreasing = TRUE) %>%
head(1) %>%
names %>%
readLines %>%
(function(lines){
vers <- lines[6] %>% sub(".*is: *", "", .)
cmd <- lines[8] %>% strsplit(" ") %>% .[[1]] %>% .[1]
paste0(cmd, " v", vers)
})
}
get_study_to_global_xfm <- function(){
xfm <- full_data$overall_xfm_to_common[1]
xfm_dir <- dirname(xfm)
files <-
xfm %>%
scan(what = character(), quiet = TRUE) %>%
grep("*.mnc", ., value = TRUE) %>%
file.path(xfm_dir, .) %>%
c(xfm, .)
print(files)
new_files <- file.path(ppd, basename(files))
walk2(files, new_files, ~ {
if(!file.exists(.y) || clobber)
file.copy(.x, .y)
})
study_to_global <- "study_to_global.xfm"
global_from_concat(new_files[1], study_to_global, clobber = clobber, dry = dry)
file.path(ppd, study_to_global)
}
study <-
data_frame(Study_Average =
Sys.glob(file.path(ppd, "*nlin", "/*nlin-3.mnc"))
, Study_To_Global_Transform =
get_study_to_global_xfm()
, Study_Mask =
Sys.glob(file.path(ppd, "*nlin", "*nlin-3_mask.mnc"))
, Pydpiper_Path =
get_first_command()
)
if(nrow(study) != 1)
stop("Error identifying study mask or average \n"
, "Should match <pydpiper_dir>/*nlin/*nlin-3(_mask)?.mnc")
list(scans = scans, study = study)
}
#' @export
upload_study <- function(ppd, scan, study, treatments, genotypes
, db = "test_scanbase_40um"
, common = "/hpf/largeprojects/MICe/scanbase/pipeline-40um/scanbase_second_level_nlin/scanbase_second_level-nlin-3.mnc"
, mask = sub("\\.mnc$", "_mask.mnc", common)
, clobber = FALSE ){
sb <- read_scanbase(db)
pp <- load_pydpiper_results(ppd, common, mask = mask, clobber = clobber)
scanf <- read.csv(scan, stringsAsFactors = FALSE) %>% mutate(Mouse_ID = as.character(Mouse_ID))
studf <- read.csv(study, stringsAsFactors = FALSE) %>% bind_cols(pp$study)
treatf <- read.csv(treatments, stringsAsFactors = FALSE)
genef <- read.csv(genotypes, stringsAsFactors = FALSE)
set_diffs <- function(x,y){
list(missing = setdiff(x, y)
, extra = setdiff(y, x))
}
column_diff_test <- function(f1, f2, name){
column_diffs <- set_diffs(names(f2), names(f1))
if(length(c(column_diffs$missing, column_diffs$extra)) != 0)
stop("Columns of ", name, " sheet update differ from those present in scanbase\n"
, "MISSING: ", paste0(column_diffs$missing, collapse = ", "), "\n"
, "EXTRA: ", paste0(column_diffs$extra, collapse = ", "))
}
# Check the study doesn't already exist in scanbase
if(studf$Study_Name %in% sb$studies$Study_Name)
stop("This study name already appears in scanbase, has it been uploaded already?")
# Merge the pydpiper results with the scan frame
scanf_merged <- inner_join(scanf, pp$scans, by = "Distortion_Corrected_Scan")
if(nrow(scanf_merged) != nrow(scanf)){
cat("Failed to merge pydpiper results with your scan sheet, trying resolving symlinks")
scanf_merged <-
pp$scans %>%
mutate(Distortion_Corrected_Scan =
map_chr(Distortion_Corrected_Scan, ~ system(paste("readlink -f", .), intern = TRUE))) %>%
inner_join(scanf, . , by = "Distortion_Corrected_Scan")
if(nrow(scanf_merged) != nrow(scanf))
stop("Scans present in the pydpiper directory were not merged successfully, "
, "this potentially indicates a problem with you scan metadata")
}
if(nrow(scanf_merged) != nrow(pp$scans)){
choice <- readline("There were scans found in your pydpiper directory that aren't in your scan sheet. Proceed? [Y/n]")
while(! choice %in% c("", "Y", "y")){
if(choice == "n") stop("Aborting")
choice <- readline("Please answer y or n")
}
}
if(is.null(scanf_merged$POND_Mouse_ID))
scanf_merged$POND_Mouse_ID <-
seq_len(nrow(scanf_merged))
column_diff_test(sb$scans, scanf_merged, "scans")
column_diff_test(sb$studies, studf, "studies")
column_diff_test(sb$treatments, treatf, "treatments")
column_diff_test(sb$genotypes, genef, "genotypes")
# Will error if there are type mismatches
new_scans <- bind_rows(sb$scans, scanf_merged) %>% tail(nrow(scanf_merged))
new_studies <- bind_rows(sb$studies, studf) %>% tail(nrow(studf))
new_treatments <- bind_rows(sb$treatments, treatf) %>% tail(nrow(treatf))
new_genotypes <- bind_rows(sb$genotypes, genef) %>% tail(nrow(genef))
sheet <- googlesheets4::sheets_find(db) %>% filter(name == db)
if(nrow(sheet) != 1) stop("Sheet ", db, " not found")
id <- sheet$id[1]
token <- googlesheets4::sheets_token()
upload_sheet <- function(new, old, name){
col <- cellranger::num_to_letter(ncol(old))
old_max <- nrow(old) + 1
new_max <- old_max + nrow(new)
range_ref <- glue("{name}!A{old_max + 1}:{col}{new_max}")
body <-
list(range = range_ref
, majorDimension = "ROWS"
, values = as.matrix(new))
httr::PUT(
glue("https://sheets.googleapis.com/v4/spreadsheets/"
, "{id}/"
, "values/{range_ref}?valueInputOption=USER_ENTERED")
, token
, body = jsonlite::toJSON(body, auto_unbox = TRUE))
}
list(upload_sheet(new_scans, sb$scans, "Scans")
, upload_sheet(new_studies, sb$studies, "Studies")
, upload_sheet(new_treatments, sb$treatments, "Treatments")
, upload_sheet(new_genotypes, sb$genotypes, "Genotypes"))
}
#' @export
unconcat_xfm <- function(xfm){
lines <- readLines(xfm)
xfm_starts <- grep("Transform_Type", lines)
header <- lines[1:(xfm_starts[1] - 1)]
xfms <-
xfm_starts %>%
{ map2(., lead(., default = length(lines) + 1) - 1
, function(b,e) lines[b:e]) }
list(header = header, xfms = xfms)
}
#' @export
reconstruct_xfm <- function(unc_xfms, which, file = NULL
, clobber = TRUE, dry = FALSE){
header <- c(unc_xfms$header %>% { .[!grepl("^ *$", .)] }
, glue("%{Sys.Date()}>>> Unconcat in R and reassembled from pieces:",
" {paste0(which, collapse = ', ')}"))
xfm <- c(header, "", unlist(unc_xfms$xfms[which]))
if(!is.null(file)){
if(!file.exists(file) || clobber){
if(dry){
cat("Generating reconstructed xfm ", file, "\n")
} else {
cat(xfm, file = file, sep = "\n")
}
return(invisible(xfm))
}
}
xfm
}
#' ((scan->nlin)->global) -> (nlin->global)
#'
#' Deconstruct a scan to global transform into an nlin to global transform
#' The output file is created in the same directory as the input file.
#' @param scan_to_global path to an xfm file composed of a nonlinear scan->nlin and a nonlinear
#' nlin->global. The xfm file should have four component transforms linear->grid->linear->grid.
#' The final two are extracted and written out.
#' @param nlin_to_global A filename for the resultant transform, to be written to the directory
#' of `scan_to_global`
#' @param dry whether to do a dry run, meaning not run the command.
#' @return the path to the output file invisibly.
#' @export
global_from_concat <- function(scan_to_global, nlin_to_global
, clobber = TRUE, dry = FALSE){
dir <- dirname(scan_to_global)
if(grepl("/", nlin_to_global)) stop("nlin_to_global cannot be a path")
nlin_to_global <- file.path(dir, nlin_to_global)
s2g <- unconcat_xfm(scan_to_global)
reconstruct_xfm(s2g, 3:4, file = nlin_to_global, clobber = clobber
, dry = dry)
invisible(nlin_to_global)
}
#' Apply an xfm to a minc file
#'
#' @param xfm The transform
#' @param input The minc to transform
#' @param output an output file
#' @param like a like file
#' @return the transformed file invisibly
#' @export
apply_xfm <- function(xfm, input, output, like, clobber = TRUE
, dry = FALSE){
runner <- `if`(dry, function(x) cat(x, "\n"), system)
if(!file.exists(output) || clobber)
runner(
glue("mincresample -transform {xfm} {if(clobber) '-clobber' else ''} "
, " {input} -like {like} {output}"))
invisible(output)
}
#' Compute the determinants of nonlinear transform
#' @export
compute_inverse_determinants <-
function(xfm, type, like, mask
, output = NULL
, clobber = TRUE
, fwhm = 0.2
, dry = FALSE
, tempdir = tempfile(pattern = "compute_determinants")
){
if(! type %in% c("abs", "rel"))
stop("You must specify \"abs\"-olute or \"rel\"-ative jacobians with the `type` "
, "argument. You specified ", type)
if(is.null(output))
output <- sub("\\.[^.]$", "", like)
if(!grepl("^/", output))
output <- file.path(dirname(xfm), output)
output_path <- glue("{output}_log_{type}_fwhm{fwhm}.mnc")
if(file.exists(output_path) && !clobber)
return(invisible(output_path))
cmd <-
glue("compute_determinant.py --transform {xfm} "
, "--inverse --smooth {fwhm} "
, "--like {like} --mask {mask} "
, "{if(type == 'rel') '--non-linear-only' else ''} "
, "--temp-dir {tempdir} "
, "--log "
, "--determinant {output_path}"
)
runner <-
`if`(dry
, function(cmd) cat(cmd, "\n")
, system)
runner(cmd)
invisible(output_path)
}
|
761cca51fced9f08109538c9f958356e8e54efbc
|
2a37dde597919e5cf829d9cae8951692837341f6
|
/man/ICSOutlier-package.Rd
|
448d25adfae8069049b1f3d39447cb566feb639b
|
[] |
no_license
|
cran/ICSOutlier
|
6ff37e29dfc64d317a6e97d03762d0acdfdd1f0e
|
76a71530484a5ebcc9490ed809de6f7246776505
|
refs/heads/master
| 2021-01-12T07:24:42.324651
| 2018-02-02T22:07:18
| 2018-02-02T22:07:18
| 76,953,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 973
|
rd
|
ICSOutlier-package.Rd
|
\name{ICSOutlier}
\alias{ICSOutlier}
\docType{package}
\title{
Outlier Detection Using Invariant Coordinate Selection
}
\description{
Multivariate outlier detection is
performed using invariant coordinates and the package
offers different methods to choose the appropriate components.
}
\details{
\tabular{ll}{
Package: \tab ICS\cr
Type: \tab Package\cr
Version: \tab 0.3-0\cr
Date: \tab 2018-02-03\cr
License: \tab GPL (>= 2)\cr
}
ICS is a general multivariate technique with many applications in multivariate analysis. ICSOutlier
offers a selection of functions for automated detection of outliers in the data based on a fitted ics2
object. The current implementation targets data sets with only a small percentage of outliers but future extensions
are under preparation.
}
\author{
Aurore Archimbaud, Klaus Nordhausen, Anne Ruiz-Gazen
Maintainer:
Klaus Nordhausen, \email{klaus.nordhausen@tuwien.ac.at}
}
\keyword{package}
|
dc3a48acf9aa6b37c55e328ab525088153e86a1e
|
14da52d67b25e8187e818978635f7d0034ca936d
|
/man/fb_creatives.Rd
|
0ceab0a6aa87ef1615bf956fafba82244f9fe0c5
|
[] |
no_license
|
romainfrancois/lares
|
72ff2a6ce306a5a84b0080aeed5eadd436d16d7e
|
82c9fb460deb2bc336c590751e47f347bef305cc
|
refs/heads/main
| 2023-08-24T00:55:51.479492
| 2021-10-20T19:04:42
| 2021-10-20T19:04:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,778
|
rd
|
fb_creatives.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/facebook.R
\name{fb_creatives}
\alias{fb_creatives}
\title{Facebook Creatives API}
\usage{
fb_creatives(token, which, api_version = "v11.0", process = TRUE)
}
\arguments{
\item{token}{Character. Valid access token with sufficient privileges. Visit the
\href{https://developers.facebook.com/tools/explorer}{Facebook API Graph Explorer}
to acquire one.}
\item{which}{Character vector. This is the accounts, campaigns, adsets,
or ads IDs to be queried. Remember: if \code{report_level = "account"}, you must
start the ID with \code{act_}.}
\item{api_version}{Character. Facebook API version}
\item{process}{Boolean. Process GET results to a more friendly format?}
}
\value{
data.frame with un-nested processed results if \code{process=TRUE} or
raw API results as list when \code{process=FALSE}.
}
\description{
For more information: \href{https://developers.facebook.com/docs/marketing-api/reference/ad-creative/}{Ad Creative}
}
\examples{
\dontrun{
token <- YOURTOKEN
which <- act_ADACCOUNT
# Query all creatives for "which"
creatives <- fb_creatives(YOURTOKEN, which)
}
}
\seealso{
Other API:
\code{\link{bring_api}()},
\code{\link{fb_accounts}()},
\code{\link{fb_ads}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()},
\code{\link{fb_token}()},
\code{\link{li_auth}()},
\code{\link{li_profile}()},
\code{\link{queryGA}()},
\code{\link{slackSend}()}
Other Facebook:
\code{\link{fb_accounts}()},
\code{\link{fb_ads}()},
\code{\link{fb_insights}()},
\code{\link{fb_posts}()},
\code{\link{fb_post}()},
\code{\link{fb_process}()},
\code{\link{fb_rf}()},
\code{\link{fb_token}()}
}
\concept{API}
\concept{Facebook}
|
95e27f58a3de4d517e73424b174ac9eaa3984364
|
88e2e55f7ac29695ed214b7d52bb85a0bbf658e8
|
/scripts/99_2_BONUS_exploratory_data_analysis_2_aes_group_order.R
|
4aa64dd86556d787ce21150357e3cd5a2057deff
|
[] |
no_license
|
bpolner/introduction_to_R
|
391cc9fc83acd4a937dc741e895170abf50f9732
|
e801e2e03b4fa6d315c6eeb2427522846ded9b9d
|
refs/heads/master
| 2022-10-28T18:03:31.763798
| 2022-10-24T08:46:45
| 2022-10-24T08:46:45
| 168,689,525
| 4
| 5
| null | 2021-03-05T20:18:23
| 2019-02-01T11:44:37
|
HTML
|
UTF-8
|
R
| false
| false
| 1,946
|
r
|
99_2_BONUS_exploratory_data_analysis_2_aes_group_order.R
|
# From the help
# aes_group_order {ggplot2}
# There are three common cases where the default is not enough, and we
# will consider each one below. In the following examples, we will use a simple
# longitudinal dataset, Oxboys, from the nlme package. It records the heights
# (height) and centered ages (age) of 26 boys (Subject), measured on nine
# occasions (Occasion).
## A) Multiple groups with one aesthetic ----------------
h <- ggplot(nlme::Oxboys, aes(age, height))
# A single line tries to connect all the observations
h + geom_line()
# The group aesthetic maps a different line for each subject
h + geom_line(aes(group = Subject))
## B) Different groups on different layers ----------------
h <- h + geom_line(aes(group = Subject))
# Using the group aesthetic with both geom_line() and geom_smooth()
# groups the data the same way for both layers
h + geom_smooth(aes(group = Subject), method = "lm", se = FALSE)
# Changing the group aesthetic for the smoother layer
# fits a single line of best fit across all boys
h + geom_smooth(aes(group = 1), size = 2, method = "lm", se = FALSE)
## C) Overriding the default grouping ----------------
# The plot has a discrete scale but you want to draw lines that connect across
# groups. This is the strategy used in interaction plots, profile plots, and parallel
# coordinate plots, among others. For example, we draw boxplots of height at
# each measurement occasion
boysbox <- ggplot(nlme::Oxboys, aes(Occasion, height))
boysbox + geom_boxplot()
# There is no need to specify the group aesthetic here; the default grouping
# works because occasion is a discrete variable. To overlay individual trajectories
# we again need to override the default grouping for that layer with aes(group = Subject)
boysbox <- boysbox + geom_boxplot()
boysbox + geom_line(aes(group = Subject), colour = "blue")
# Q: How can we display a linear trend line for each boy instead? And for the whole sample?
|
b39c49078b17ccf37af2222652bdf3c3f74da57d
|
587bbca7cca601f1281aa66cf9203e77500d07f3
|
/run_analysis.R
|
d28b44197198f8f0b9dc078992181fa6050b26fe
|
[] |
no_license
|
jenna92/Getting-and-Cleaning-Data-Course-Project
|
c9f2c95bf3742366b80ff102663d7d0060bf45d7
|
5ebdedf04a35fc163bfe3484f2d65cdfa806ce44
|
refs/heads/master
| 2020-04-09T20:07:18.369371
| 2018-12-05T19:30:17
| 2018-12-05T19:30:17
| 160,564,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
run_analysis.R
|
#set the working directory
setwd("./UCI HAR Dataset")
# read features
features <- read.csv("features.txt",header = FALSE, sep =" ")
feaures <- as.character(features[,2])
#read the train dataset
data_train_x <- read.table("./train/X_train.txt")
data_train_activity <- read.csv("./train/y_train.txt",header = FALSE,sep = " ")
data_train_subject <- read.csv("./train/subject_train.txt", header=FALSE,sep = " ")
data_train <- data.frame(data_train_subject,data_train_activity,data_train_x)
dim(data_train)
feature <- features[,2]
feature <- as.character(feature)
names(data_train) <- c(c('subject', 'activity'),feature)
#read the test dataset
data_test_subject <- read.table("./test/subject_test.txt")
data_test_activity <- read.csv("./test/y_test.txt",header = FALSE,sep=" ")
data_test <- data.frame(data_test_subject,data_test_activity,data_test_x)
names(data_test) <- c(c("subject","activity"),feature)
#combine the train and test dataset together
data_all <- rbind(data_train,data_test)
#Extracts only the measurements on the mean and standard deviation for each measurement.
data_mean_std <- grep("mean|std",feature)
data_mean_std <- data_all[,c(1,2,data_mean_std+2)]
#Uses descriptive activity names to name the activities in the data set
activity <- read.csv("activity_labels.txt",header=FALSE,sep=" ")
activity <- as.character(activity[,2])
data_mean_std$activity <- activity[data_mean_std$activity]
#Appropriately labels the data set with descriptive variable names.
new_names <- names(data_mean_std)
new_names <- gsub("[(][)]","",new_names)
new_names <- gsub("-mean-","_mean_",new_names)
new_names<- gsub("^t","TimeDomain_",new_names)
new_names<- gsub("^f","FrequencyDomain_",new_names)
new_names <- gsub("Acc","Accelerometer",new_names)
new_names <- gsub("Gyro","Gyroscope",new_names)
new_names <- gsub("Mag","Magnitude",new_names)
new_names <- gsub("-","_",new_names)
new_names <- gsub("std","standardDeviation",new_names)
names(data_mean_std) <- new_names
#creates a tidy data set with the average of each variable for each activity and each subject.
data_tidy <- aggregate(data_mean_std[,3:81],
by = list(subject = data_mean_std$subject,activity=data_mean_std$activity), FUN = mean)
write.table(x = data_tidy, file = "data_tidy.txt", row.names = FALSE)
|
9fe8f210b1e62d4e327115f003cb7bd300ae4721
|
bbf5f9cb8ef89c221e4b14518ec0e20114a73161
|
/Kalman.R
|
954494e0441238c5f4ee46feb2cde82620f027c8
|
[] |
no_license
|
ahirwani/MacroClub
|
7d32755b704a04cee0ff89417c51fceb5c5ace8f
|
f54fb9956a5e3c4b743339df8f6ecbc4ec8858ef
|
refs/heads/master
| 2021-08-19T19:38:41.333251
| 2017-11-27T08:24:09
| 2017-11-27T08:24:09
| 109,444,864
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,640
|
r
|
Kalman.R
|
library(mnormt)
xhat <- c(0.2,-0.2)
Sigma <- matrix(c(0.4,0.3,0.3,0.45),ncol=2)
x1 <- seq(-2,4,length=151)
x2 <- seq(-4,2, length=151)
f <- function(x1,x2,mean=xhat, varcov=Sigma)
dmnorm(cbind(x1,x2),mean,varcov) #dmnrom returns a vector of density values (possibly log-transformed)
z <- outer(x1,x2,f)
mycols <- topo.colors(100,0.5)
#Plot the density
image(x1,x2,z,col=mycols,main="Prior density",xlab=expression('x'[1]), ylab=expression('x'[2]))
contour(x1,x2,z,add=T)
points(0.2,-0.2,add=T)
text(0.1,-0.2,labels=expression(hat(x)),adj=1)
###Get Sensor Info
R <- 0.5*Sigma
z2 <- outer(x1,x2,f,mean=c(2.3,-1.9),varcov=R)
image(x1,x2,z2,col=mycols, main="sensor density")#backgrond colours
contour(x1,x2,z2,add=TRUE)## Sensonr Contour
points(2.3,-1.9,pch=19)
text(2.2,-1.9, labels="y",adj=1)
contour(x1,x2,z,add=TRUE) ## Original Actual Contour
points(0.2,-0.2,pch=19)
text(0.1,-0.2,labels=expression(hat(x)),adj=1)
#Using Bayesian Statistics - know robot location given Y. p(x|y) = p(y|x)p(x)/p(y)
#y|x ~ N(Gx,R); x~N(x,E)
#Now because x,y are normal and G is the identity matrix
#xf = (E-1 + R-1)-1 (E-1 x + R-1 y)
#Ef = (E-1 + R-1)-1
#Using Matrix Inversion Identity
#(A-1 + B-1)-1 = A - A(A+B)-1A = A(A+B)-1 B
#xf = (E - E(E+R)-1E)(E-1x + R-1y)
#x-E(E+R)-1x + ER-1y - E(E+R)-1*ER-1y
#x+E(E+R)-1(y-x)
#... Ef = E-E(E+R)-1 E
G= diag(2) #identity matrix
y <- c(2.4,-1.9)
xhatf <- xhat + Sigma %*% t(G) %*% solve(G %*% Sigma %*% t(G)+R) %*% (y- G %*% xhat)
Sigmaf <- Sigma - Sigma %*% t(G) %*% solve(G %*% Sigma %*% t(G)+R) %*% G %*% Sigma
z3 <- outer(x1,x2,f,mean=c(xhatf),varcov=Sigmaf)
#Now Plot
image(x1, x2, z3, col=mycols, xlab=expression('x'[1]), ylab=expression('x'[2]),
main="Filtered density")
contour(x1, x2, z3, add=TRUE)
points(xhatf[1], xhatf[2], pch=19)
text(xhatf[1]-0.1, xhatf[2],
labels = expression(hat(x)[f]), adj = 1)
lb <- adjustcolor("black", alpha=0.5)
contour(x1, x2, z, add=TRUE, col=lb)
points(0.2, -0.2, pch=19, col=lb)
text(0.1, -0.2, labels = expression(hat(x)), adj = 1, col=lb)
contour(x1, x2, z2, add=TRUE, col=lb)
points(2.3, -1.9, pch=19, col=lb)
text(2.2, -1.9,labels = "y", adj = 1, col=lb)
###Now assume robot is moving - have a lienar model that explains how state evolves over time - based on wheel spin.
#xt+1 = Axt + wt+1 where Wt is noise A is (1.2,0,0,-0.2) Q = 0.3Sigma
#E(Axf + w)= AE(xf)+E(w) = Axf = Ax + AEG' (GEG'+R)-1 (y-Gx)
A <- matrix(c(1.2,0,0,-0.2),ncol=2)
Q <- 0.3 * Sigma
K <- A %*% Sigma %*% t(G) %*% solve(G %*% Sigma %*% t(G)+R)
xhatnew <- A %*% xhat + K %*% (y-G %*% xhat)
Sigmanew <- A %*% Sigma %*% t(A) - K %*% G %*% Sigma %*% t(A) + Q
z4 <- outer(x1,x2, f, mean=c(xhatnew), varcov=Sigmanew)
image(x1, x2, z4, col=mycols,
xlab=expression('x'[1]), ylab=expression('x'[2]),
main="Predictive density")
contour(x1, x2, z4, add=TRUE)
points(xhatnew[1], xhatnew[2], pch=19)
text(xhatnew[1]-0.1, xhatnew[2],
labels = expression(hat(x)[new]), adj = 1)
contour(x1, x2, z3, add=TRUE, col=lb)
points(xhatf[1], xhatf[2], pch=19, col=lb)
text(xhatf[1]-0.1, xhatf[2], col=lb,
labels = expression(hat(x)[f]), adj = 1)
contour(x1, x2, z, add=TRUE, col=lb)
points(0.2, -0.2, pch=19, col=lb)
text(0.1, -0.2, labels = expression(hat(x)), adj = 1, col=lb)
contour(x1, x2, z2, add=TRUE, col=lb)
points(2.3, -1.9, pch=19, col=lb)
text(2.2, -1.9,labels = "y", adj = 1, col=lb)
### Image Can also be done with lattice
library(lattice)
grid <- expand.grid(x=x1,y=x2)
grid$Prior <- as.vector(z)
grid$Likelihood <- as.vector(z2)
grid$Posterior <- as.vector(z3)
grid$Predictive <- as.vector(z4)
contourplot(Prior + Likelihood + Posterior + Predictive ~ x*y,
data=grid, col.regions=mycols, region=TRUE,
as.table=TRUE,
xlab=expression(x[1]),
ylab=expression(x[2]),
main="Kalman Filter",
panel=function(x,y,...){
panel.grid(h=-1, v=-1)
panel.contourplot(x,y,...)
})
####################### Kalman filter for Random Walk
#Kalman filter is an example of an adaptive model, dynamic linear model. unlike Simple moving average,
#or FIR that has a fixed set of windowing parameters, the kalman filter constantly updates information to produce adaptive filtering on fly
#xt = A*xt-1 + w #xt is time series, A is state transition matrix, wt is white noise
#zt = H*xt+ v #zt is estimate of actual signal covariance with respect to x, xt is estimated center of time series, v is noise
#zt is value of time series we are trying to capture and model with xt
#With Hidden Markov models - hiddena nd observed state variables are here
###Logistic Growth with noise
logistG <- function(r,p,k,t){
k*p*exp(r*t)/(k+p*exp(r*t)-1)
}
k <- 100
p0 <- 0.1*k
r <-0.2
deltaT <- 0.1
set.seed(12345)
obsVariance <- 25
nObs <- 250
nu <- rnorm(nObs, mean=0,sd=sqrt(obsVariance))
pop <- c(p0, logistG(r,p0,k,(1:(nObs-1))*deltaT))+ nu
plot(pop); lines(c(p0, logistG(r,p0,k,(1:(nObs-1))*deltaT)),col="blue") #with what it looks like without noise
tail(pop); head(pop);
Estimate <- data.frame(Rate=rep(NA,nObs), Population = rep(NA,nObs))
library(numDeriv)
a <- function(x, k, deltaT){c(r=x[1],logistG(r=x[1],p=x[2],k,deltaT))}
G <- t(c(0,1))
Q <- diag(c(0,0))
R <- obsVariance
x <- c(r,p0) ##original vales x0,p0
Sigma <- diag(c(144,25)) #original variance
for(i in 1:nObs){
xobs <- c(0,pop[i]) #x observation of population item i
y <- G %*% xobs
#Filter
SigTermInv <- solve(G %*% Sigma %*% t(G) + R)
xf <- x + Sigma %*% t(G) %*% SigTermInv %*% (y - G %*% x)
Sigma <- Sigma - Sigma %*% t(G) %*% SigTermInv %*% G %*% Sigma
A <- jacobian(a, x=x, k=k, deltaT = deltaT)
K <- A %*% Sigma %*% t(G) %*% solve(G %*% Sigma %*% t(G) + R)
Estimate[i,] <- x
#Predict
x <- a(x = xf, k=k, deltaT = deltaT) + K %*% (y- G %*% xf)
Sigma <- A %*% Sigma %*% t(A) - K %*% G %*% Sigma %*% t(A) + Q
}
#Plot output
op <- par(mfrow= c(2,1))
time <- c(1:nObs)*deltaT
plot(y=pop, x= time, t= 'l',main = "Population growth", xlab="Time",ylab="Population")
curve(logistG(r,p0,k,x),from=0,to=max(time), col=2,add=TRUE, lwd = 1)
lines(y=Estimate$Population, x= time, col="orange",lwd=2)
legend("bottomright",legend=c("Data","Actual","Estimate"),bty='n',col=c("black","red","orange"),lty=1,lwd=2)
plot(y= Estimate$Rate,x=time,t='l',main="Estimated Growth Rate",xlab="Time",ylab="Rate",col="orange",lwd=2)
abline(h=r, col=adjustcolor("red",alpha=0.5),lwd=2)
legend("topright",legend=c("Actual","Estimate"),bty="n",col=c("red","orange"),lty=1,lwd=2)
par(op)
#####Example of Kalman filter for estimating a fixed value with measurement error from Welch and Bishop
count = 50
true_value = -0.377727 #actual value
z = rnorm(count, mean=true_value,sd=0.1)
Q=1e-5 #process variance
#Allocate space
xhat = rep(0,count)#a posteri estimate at each step
P = rep(0,count)#a posteri error estimate
xhatminus = rep(0,count) #a prior estimate
Pminus = rep(0,count) #a priori error estimate
K=rep(0,count) #gain
#estimate of measu. variance
R = 1**2
#initialise guesses: assume true_value=0, error is 1.0
xhat[1] <- 0
P[1] <- 1
for(k in 2:count){
#time update
xhatminus[k] <- xhat[k-1]
Pminus[k] <- P[k-1] + Q
#measurement update
K[k] = Pminus[k]/(Pminus[k]+R)
xhat[k] = xhatminus[k] + K[k] * (z[k] - xhatminus[k])
P[k] = (1-K[k])*Pminus[k]
}
par(mfrow=c(2,1))
plot(xhat,
type="l",
col="blue",
xlab="Iteration",
ylab="Volts",
ylim=1.05 * c(min(z,true_value,xhat),max(z,true_value,xhat))
)
points(z, pch=2)
abline(true_value,0)
plot(Pminus[2:count],type="l",xlab="Iteration", ylab=substitute(Variance~(Volts^2 )))
|
7b2baf1398688ec6ba971f8306ccabf698c903fd
|
cd901f78760d0856a58e2791d94751b3e3e5c3e8
|
/man/plotExonCNV.Rd
|
2f5b9c9e658eb59845485bd699dc38344889a75b
|
[] |
no_license
|
sanadamakomi/exonCNV
|
4d6056596d2a17df5e56075400441207bf6eb77f
|
92aaeb8ea242aa6965e3910ae5825c68ec30c65b
|
refs/heads/master
| 2022-08-10T09:24:41.165518
| 2022-08-04T07:59:10
| 2022-08-04T07:59:10
| 175,590,331
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 754
|
rd
|
plotExonCNV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotCNV.R
\name{plotExonCNV}
\alias{plotExonCNV}
\title{Plot the unifomity of coverage among samples.}
\usage{
plotExonCNV(
geneCNVFile,
mergedCovFile,
parameterFile,
gene = NULL,
path = NULL,
draw = 10
)
}
\arguments{
\item{geneCNVFile}{Path of gene cnv result file.}
\item{mergedCovFile}{Path of merged coverage file.}
\item{parameterFile}{Path of metrics file.}
\item{gene}{A character string or vector of gene symbol.}
\item{path}{Path to write to.}
\item{draw}{An integer of gene counts to plot.}
}
\description{
Compute correlation between columns of a numeric matrix or data
frame and plot the distribution of correlations.
}
\author{
Zhan-Ni Chen
}
|
e275660f80a04aea62124d61be9ed7783fe96b86
|
2dba13252fdfde7be23e111d01fb8cd5321e87ab
|
/man/preprocessTools.Rd
|
15b61ff98dd52c898e6f25e64a974a8a900458b6
|
[] |
no_license
|
benilton/oligo
|
afaee000d45f6f2f238d4c7c4aff5e855beaa30e
|
1124ae1708c8a98263aa953632b686818b7b4bdd
|
refs/heads/master
| 2021-01-25T04:52:49.717316
| 2020-11-03T19:38:03
| 2020-11-03T19:38:03
| 230,304,766
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,761
|
rd
|
preprocessTools.Rd
|
\name{summarize}
\alias{backgroundCorrectionMethods}
\alias{normalizationMethods}
\alias{summarizationMethods}
\alias{summarize}
\alias{summarize-methods}
\alias{summarize,matrix-method}
\alias{summarize,ff_matrix-method}
\alias{backgroundCorrect}
\alias{backgroundCorrect-methods}
\alias{backgroundCorrect,matrix-method}
\alias{backgroundCorrect,ff_matrix-method}
\alias{backgroundCorrect,FeatureSet-method}
\alias{normalize,matrix-method}
\alias{normalize,ff_matrix-method}
\alias{normalize,FeatureSet-method}
\alias{normalizeToTarget}
\alias{normalizeToTarget-methods}
\alias{normalizeToTarget,matrix-method}
\alias{normalizeToTarget,ff_matrix-method}
\title{
Tools for microarray preprocessing.
}
\description{
These are tools to preprocess microarray data. They include background correction,
normalization and summarization methods.
}
\usage{
backgroundCorrectionMethods()
normalizationMethods()
summarizationMethods()
backgroundCorrect(object, method=backgroundCorrectionMethods(), copy=TRUE, extra, subset=NULL, target='core', verbose=TRUE)
summarize(object, probes=rownames(object), method="medianpolish", verbose=TRUE, ...)
\S4method{normalize}{FeatureSet}(object, method=normalizationMethods(), copy=TRUE, subset=NULL,target='core', verbose=TRUE, ...)
\S4method{normalize}{matrix}(object, method=normalizationMethods(), copy=TRUE, verbose=TRUE, ...)
\S4method{normalize}{ff_matrix}(object, method=normalizationMethods(), copy=TRUE, verbose=TRUE, ...)
normalizeToTarget(object, targetDist, method="quantile", copy=TRUE, verbose=TRUE)
}
\arguments{
\item{object}{Object containing probe intensities to be preprocessed.}
\item{method}{String determining which method to use at that
preprocessing step.}
\item{targetDist}{Vector with the target distribution}
\item{probes}{Character vector that identifies the name of the probes represented
by the rows of \code{object}.}
\item{copy}{Logical flag determining if data must be copied before
processing (TRUE), or if data can be overwritten (FALSE).}
\item{subset}{Not yet implemented.}
\item{target}{One of the following values: 'core', 'full', 'extended',
'probeset'. Used only with Gene ST and Exon ST designs.}
\item{extra}{Extra arguments to be passed to other methods.}
\item{verbose}{Logical flag for verbosity.}
\item{\dots}{Arguments to be passed to methods.}
}
\details{
Number of rows of \code{object} must match the length of
\code{probes}.
}
\value{
\code{backgroundCorrectionMethods} and \code{normalizationMethods}
will return a character vector with the methods implemented currently.
\code{backgroundCorrect}, \code{normalize} and
\code{normalizeToTarget} will return a matrix with same dimensions as
the input matrix. If they are applied to a FeatureSet object, the PM
matrix will be used as input.
The \code{summarize} method will return a matrix with
\code{length(unique(probes))} rows and \code{ncol(object)} columns.
}
\examples{
ns <- 100
nps <- 1000
np <- 10
intensities <- matrix(rnorm(ns*nps*np, 8000, 400), nc=ns)
ids <- rep(as.character(1:nps), each=np)
bgCorrected <- backgroundCorrect(intensities)
normalized <- normalize(bgCorrected)
summarizationMethods()
expression <- summarize(normalized, probes=ids)
intensities[1:20, 1:3]
expression[1:20, 1:3]
target <- rnorm(np*nps)
normalizedToTarget <- normalizeToTarget(intensities, target)
if (require(oligoData) & require(pd.hg18.60mer.expr)){
## Example of normalization with real data
data(nimbleExpressionFS)
boxplot(nimbleExpressionFS, main='Original')
for (mtd in normalizationMethods()){
message('Normalizing with ', mtd)
res <- normalize(nimbleExpressionFS, method=mtd, verbose=FALSE)
boxplot(res, main=mtd)
}
}
}
\keyword{manip}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.