blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5cbde3a1b025b6ece34df7731d9d609dfe6f376e | 0a906cf8b1b7da2aea87de958e3662870df49727 | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044870-test.R | bfc02dd6d41d0f2ba4a007be9b5698cc2dfd008e | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 187 | r | 1610044870-test.R | testlist <- list(hi = 1.12780552972647e+45, lo = 1.12780552972646e+45, mu = 1.12780552972647e+45, sig = 1.12780552972647e+45)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
c288a3c95fae86b949b5a14e135d6981b61529ba | fa703db3c7f0621c2c2656d47aec2364eb51a6d8 | /4_ss21/r/P07-3.R | db7afcbfa229966c80e91d7d81577898371feb2c | [] | no_license | JosuaKugler/uni | f49b8e0d246d031c0feb81705f763859446f9b0f | 7f6ae93a5ef180554c463b624ea79e5fbc485d31 | refs/heads/master | 2023-08-15T01:57:53.927548 | 2023-07-22T10:42:37 | 2023-07-22T10:42:37 | 247,952,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,697 | r | P07-3.R | library(R6)
GridPath <- R6Class("GridPath", list(
path = NULL,
dir = "",
rotate_right = function() {
if (self$dir == "N") self$dir <- "E"
else if (self$dir == "E") self$dir <- "S"
else if (self$dir == "S") self$dir <- "W"
else if (self$dir == "W") self$dir <- "N"
invisible(self)
},
rotate_left = function() {
if (self$dir == "N") self$dir <- "W"
else if (self$dir == "E") self$dir <- "N"
else if (self$dir == "S") self$dir <- "E"
else if (self$dir == "W") self$dir <- "S"
invisible(self)
},
move = function(n) {
incr <- c(0,0)
if (self$dir == "N") incr <- c(0,1)
else if (self$dir == "E") incr <- c(1,0)
else if (self$dir == "S") incr <- c(0,-1)
else if (self$dir == "W") incr <- c(-1,0)
for (i in 1:n) {
self$path <- rbind(self$path, self$path[nrow(self$path), ] + incr)
}
invisible(self)
},
initialize = function(dir) {
self$dir <- dir
self$path <- matrix(c(0, 0), nrow=1)
},
print = function() {
par(mar=c(0,0,0,0))
p <- self$path
plot(NA,
xlim=range(p[,1])+c(-1,1),
ylim=range(p[,2])+c(-1,1),
asp=1)
rect(p[,1]-1, p[,2]-1, p[,1], p[,2], col='black')
a <- p[nrow(p), ]
b <- a + dir_to_delta[[self$dir]]
arrows(a[1]-0.5, a[2]-0.5, b[1]-0.5, b[2]-0.5, col='red', lwd=2)
}
))
gpath3 <- GridPath$new('N')
gpath3$move(7)$
rotate_right()$move(8)$
rotate_right()$move(7)$
rotate_right()$move(6)$
rotate_right()$move(5)
gpath3 # calls print(gpath3) calls print.R6(gpath3) calls gpath3$print() |
22efeb8d62c405db26435647abc2e5448b690480 | 38ccbac05de45e94e9e4385a981ef689bdc2e6e5 | /app.R | 2a8dc63370239d549112c0bcbaf6dee397d3f73d | [
"MIT"
] | permissive | antoniorv6/meteorlandsvis | 9cd8dfb7fcad377344da2e1f1c8e3d6b36a50b5d | 28538e545220ae873eb7e82c51bf1d3a4d1476a0 | refs/heads/main | 2023-03-04T11:27:21.054711 | 2021-02-15T10:23:53 | 2021-02-15T10:23:53 | 337,361,986 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,125 | r | app.R | library(shiny)
library(reshape2)
library(dplyr)
library(xts)
library(dygraphs)
library(rbokeh)
library(shinythemes)
library(shinyWidgets)
library(shinydashboard)
library(ggplot2)
library(plotly)
library(scales)
library(leaflet)
library(leaflet.providers)
library(ggmap)
library(leaflet.extras)
library(magrittr)
library(leafsync)
library(knitr)
library(reactable)
# CARGAMOS LOS DATOS
meteor <- read.csv("Data/meteorite-landings.csv", sep = ",", header = TRUE)
# limpiamos
meteor.val <- meteor[!is.na(meteor$year) & !is.na(meteor$mass),]
# obtenemos los cuartiles
meteor.type.range <- c(quantile(meteor.val$mass[is.na(meteor.val$mass) == FALSE], 0.25),
quantile(meteor.val$mass[is.na(meteor.val$mass) == FALSE], 0.50),
quantile(meteor.val$mass[is.na(meteor.val$mass) == FALSE], 0.75))
# clasificamos por tamaño según los cuartiles
meteor.val$size <- ifelse(meteor.val$mass < meteor.type.range[1], "Small",
ifelse(meteor.val$mass < meteor.type.range[2], "Medium",
ifelse(meteor.val$mass < meteor.type.range[3], "Large",
"Very Large"
)
)
)
# USER INTERFACE
ui <- dashboardPage(
dashboardHeader(disable = TRUE),
dashboardSidebar(disable = TRUE),
dashboardBody(
fluidPage(
theme = shinytheme("flatly"),
tags$head(
# Css para ajustar los parámetros por defecto de Bootstrap
tags$style("body{background-color: #ffffff !important} .content{padding:0 !important} .container-fluid{padding: 0 !important;}
.navbar-header{margin-left: 1em !important}
.navbar{margin:0 !important}
.dygraph-legend {left: 70px !important; background-color: transparent !important;}
"),
tags$link(rel = "stylesheet", type = "text/css", href = "css/styles.css")
),
# App title ----
navbarPage("Meteorite Landing Visualization",
tabPanel("Summary", style="margin-top:1em",
sidebarPanel(
pickerInput("recclass","Metorite Classes:",
choices=unique(meteor.val$recclass),
selected=unique(meteor.val$recclass),
options = list(`actions-box` = TRUE),multiple = T),
h5("Click to learn more about meteorite classes:",
a("Meteorite classification", href = "https://en.wikipedia.org/wiki/Meteorite_classification")),
dateRangeInput("daterange1", "Date Range:",
start = "2000-01-01",
end = "2009-12-31")
),
mainPanel(
fluidRow(
valueBoxOutput("totalMeteoritos"),
valueBoxOutput("filtroMeteoritos")
),
fluidRow(
infoBoxOutput("filtroMeteoritoSmall"),
infoBoxOutput("filtroMeteoritoLarge"),
infoBoxOutput("filtroMeteoritoVeryLarge")
)
),
verticalLayout(
splitLayout(
dygraphOutput(outputId = "distPlot"),
plotOutput("plot")
),
reactableOutput("tablaResumen")
)
),
tabPanel("Typology", style="height = 100%",
column(9, style = "background-color: none;",
h3("Mass by class"),
fluidRow(plotlyOutput("classBox")),
column(6,
h3("Class of meteorites found"),
fluidRow(plotlyOutput("percent"))),
column(6, div(img(src="./chon.png", height="80%", width="80%", align="center")),
h4("Chondryte meteor, from Sahara desert")
)
),
column(3,
h3("Classification"),
h4("Ordinary chondrites"),
p("They are classfied by iron cuantity. His origin is from little asteroids. x is the texture number, indicates the evololution of father's body."),
tags$ul(
tags$li("Lx : Low iron"),
tags$li("LLx: Very low iron"),
tags$li("Hx: High iron")
),
h4("Carbonaceus chondrites"),
p("It contains up to 5% of its weight in carbon. Their main metals are olivine and serpentine, along with Fe and Ni. They can contain up to 20% water and up to 7% organic compounds. They come from asteroids and perhaps comets.")
,tags$ul(
tags$li("CM: They contain 15% less chondrules"),
tags$li("CO: They contain 35-40% chondrules")
),
h4("Metallic"),
p("They generally come from large asteroids. They are characterized by being composed of more than 90% metal.")
,tags$ul(
tags$li("IIAB: Medium to coarse octahedrite. They present troilite and graphite nodules, with a rare presence of silicates.")
),
h4("Achondrites"),
p("Achondrites are igneous rocks, like volcanic rocks. Its initial content has been completely transformed due to high heat. They are characterized by having little metal (less than 1%) and are classified according to their origin and calcium level.")
,tags$ul(
tags$li("Ureilite: They are the achondrites poor in calcium. They are the rarest meteorites of all, rich in graphite, clinobronzite, olivines, diamonds and silicates.")
)
)),
tabPanel("Maps",
column(2, class = "futurepanel",style="z-index:10",
fluidRow(style="padding:1em",
selectInput("selClass", h3("Class"),choices = rbind("All",unique(meteor.val$recclass)), selected = 1)),
fluidRow(style="padding:1em",
checkboxGroupInput("checkGroup",
h3("Fall"),
choices = list("Found" = "Found","Fell" = "Fell"),
selected = "Found")),
fluidRow(style="padding:1em",
h3("Discovery year"),
sliderInput("sli1", "",
min = 1800, max = 2021, value = c(1800, 2021))
),
fluidRow(style="padding:1em",
h3("Weight range"),
sliderInput("sli2", "",min = 0, max = 70000, value = c(0, 70000))
)),
leafletOutput("map", height="900")
),
tabPanel("3D Vis",
htmlOutput("show3D"))
)
)
)
)
# CÓDIGO REACTIVO
server <- function(input, output) {
# Histogram of the Old Faithful Geyser Data ----
# with requested number of bins
# This expression that generates a histogram is wrapped in a call
# to renderPlot to indicate that:
#
# 1. It is "reactive" and therefore should be automatically
# re-executed when inputs (input$bins) change
# 2. Its output type is a plot
output$filtros <- renderText(
as.character(input$recclass)
)
output$totalMeteoritos <- renderInfoBox(
valueBox(count(meteor.val), "Total Meteorites:", icon = icon("meteor", lib="font-awesome"), color = "red")
)
output$filtroMeteoritos <- renderValueBox({
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])
), c("name","recclass","mass","fall","year")]
valueBox(count(meteor.df), "Filtered meteorites:", icon = icon("meteor", lib="font-awesome"), color = "yellow")
})
output$filtroMeteoritoSmall <- renderValueBox({
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])
& meteor.val$size %in% c("Small","Medium")
), c("name","recclass","mass","fall","year")]
infoBox("Small or Medium:", count(meteor.df), icon = icon("thumbs-up", lib="font-awesome"), color = "green")
})
output$filtroMeteoritoLarge <- renderValueBox({
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])
& meteor.val$size == "Large"
), c("name","recclass","mass","fall","year")]
infoBox("Large:", count(meteor.df), icon = icon("warning", lib="font-awesome"), color = "orange")
})
output$filtroMeteoritoVeryLarge <- renderValueBox({
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])
& meteor.val$size == "Very Large"
), c("name","recclass","mass","fall","year")]
infoBox("Very Large:", count(meteor.df), icon = icon("fire", lib="font-awesome"), color = "red")
})
output$distPlot = renderDygraph({
if (!is.null(input$recclass)) {
# Agrupamos la informacion para crear el xts
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass), c("year","size")]
meteor.df.group <- meteor.df %>% group_by(year, size, .add = TRUE)
# meteor.df.group %>% group_vars()
meteor.df.count <- meteor.df.group %>% summarise(n = n())
# meteor.df.count %>% group_vars()
meteor.df.pivot <- meteor.df.count %>% dcast(year ~ size, fill=0)
meteor.xts <- xts(x = meteor.df.pivot[,-1], order.by = as.Date(paste(meteor.df.pivot$year,"-01-01", sep="")))
meteor.xts.graph <- meteor.xts[index(meteor.xts) >= as.Date(input$daterange1[1]) & index(meteor.xts) < as.Date(input$daterange1[2])]
title <- "Meteorite Time-Series by year"
dygraph(meteor.xts.graph, main = title) %>%
dyAxis("x", label = "Year", drawGrid = FALSE, axisLabelFormatter="function(d) { return d.getFullYear() }") %>%
dyAxis("y", label = "Amount of meteorites") %>%
dyLegend(show = "onmouseover", width = 450) %>%
dyOptions(includeZero = TRUE,
axisLineColor = "navy",
gridLineColor = "lightblue")
}
})
output$show3D=renderUI({includeHTML("www/index.html")})
# Tabla
output$tablaResumen <- renderReactable({
options(reactable.theme = reactableTheme(
color = "hsl(210, 29%, 87%)",
backgroundColor = "hsl(210, 29%, 19%)",
borderColor = "hsl(210, 29%, 22%)",
stripedColor = "hsl(211, 29%, 22%)",
highlightColor = "hsl(211, 29%, 24%)",
inputStyle = list(backgroundColor = "hsl(210, 29%, 25%)"),
selectStyle = list(backgroundColor = "hsl(210, 29%, 25%)"),
pageButtonHoverStyle = list(backgroundColor = "hsl(210, 29%, 25%)"),
pageButtonActiveStyle = list(backgroundColor = "hsl(210, 29%, 28%)")
))
if (!is.null(input$recclass)) {
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])
), c("name","recclass","mass","fall","year","size")]
reactable(meteor.df,
columns = list(
mass = colDef(aggregate = "sum",
format = colFormat(separators = TRUE, digits = 2, suffix = " g"))
),
groupBy = c("recclass"),
searchable = TRUE,
defaultPageSize = 1,
sortable = TRUE,
striped = TRUE,
highlight = TRUE
)
}
})
output$plot <- renderPlot({
if (!is.null(input$recclass)) {
meteor.df <- meteor.val[which(meteor.val$recclass %in% input$recclass
& as.Date(paste(meteor.val$year,"-01-01", sep="")) >= as.Date(input$daterange1[1])
& as.Date(paste(meteor.val$year,"-01-01", sep="")) < as.Date(input$daterange1[2])), ]
datos_order<-meteor.df[order(meteor.df[,5]),]
n <- nrow(datos_order)
datos_order<- datos_order[seq(n-10,n),]
hst_mass <- ggplot(datos_order , aes(y = name, x = mass), fill = "transparent")
hst_mass + ggtitle(label = "Top 10 Metorites by mass") +
geom_bar(stat = "identity", fill = "#2C3E50") +
scale_y_discrete(limits=datos_order$name) +
scale_x_continuous(labels = comma_format(big.mark = ".", decimal.mark = ",")) +
xlab("Weight(g)") + ylab("Meteorite") +
theme (
axis.text.y = element_text(size = 12),
axis.title.x = element_text(size = 16, color = "#2C3E50"),
axis.title.y = element_text(size = 16, color = "#2C3E50"),
plot.title = element_text(hjust=0, color="red", size=18, face="bold.italic"),
panel.background = element_rect (fill = 'white'),
plot.background = element_rect (fill = '#ECF0F5')
)
}
})
### Mapa
# define center of map
lat_center <- c(40.48250014304902)
long_center <- c(-28.383444587235175)
df <- meteor
df$mass <- df$mass/1000
pal3 <- colorNumeric(
palette = colorRamp(c("#fff76a", "#ff2828"), interpolate="spline"),
domain = df$total_oc)
output$map <- renderLeaflet({
index_fall<-which(df$fall==input$checkGroup) # if found or fell
df<-df[index_fall,]
if(input$selClass!="All"){
index_class <- which(df$recclass==input$selClass)
df <- df[index_class,]
}
index_year<-which(df$year>=input$sli1[1] & df$year<=input$sli1[2])
df <- df[index_year,]
index_mass <- which(df$mass>=input$sli2[1] & df$mass<=input$sli2[2])
df <- df[index_mass,]
mapa <- leaflet(df,options = leafletOptions(zoomControl = FALSE)) %>% addTiles() %>%
addCircles(lng = ~reclong, lat = ~reclat, weight = 1, color = ~pal3(mass),
radius = ~ mass*3, fillOpacity = 0.5, popup = paste("<strong>Nombre:</strong>", df$name, "<br/>", "<strong>Masa:</strong>", df$mass,"kg","<br/>", "<strong>Tipo:</strong>",df$recclass,"<br/>","<strong>Año:</strong>",df$year)) %>%
addProviderTiles(providers$CartoDB.DarkMatter) %>%
setView(long_center,lat_center,3) %>%
addLegend(pal = pal3, values = ~ mass, opacity = 1, title = "Masa(kg)")
})
# Typology page
## box plt
datos_by_class <- meteor %>% group_by(recclass) %>%
count(recclass, sort = TRUE)
datos_by_class <- datos_by_class[1:15,]
datos_by_class <- transform(datos_by_class, recclass = as.character(recclass))
index <- which(meteor$recclass %in% datos_by_class[,1])
datclass <- meteor[index,]
g <-ggplot(datclass, aes(x=recclass, y= log(mass), fill=recclass)) +
geom_boxplot() +
xlab("Class") + ylab("Log(Mass) Kg") +
scale_fill_manual(values=c('#8b4513','#9f6934','#5421d3','#002b73','#3c2ac5','#0c203d','#002861','#a9a9a9','#6c0de0','#232eb6','#00244f','#092c69','#002e84','#0030a6','#ffc0cb'))
output$classBox <-renderPlotly({ggplotly(g)})
## pie chart
datos_by_class1 <- meteor %>% group_by(recclass) %>%
count(recclass, sort = TRUE)
other <- list("recclass"="Other", "n"=sum(datos_by_class1[-c(1:15), 2]))
datos_by_class2 <- rbind(datos_by_class1[1:15,] , other)
datos_by_class2 <- datos_by_class2 %>%
arrange(desc(recclass)) %>%
mutate(prop = n / sum(datos_by_class2$n) *100) %>%
mutate(ypos = cumsum(prop)- 0.5*prop )
datos_by_class2 <- datos_by_class2[order(datos_by_class2$prop),]
p <- plot_ly(datos_by_class2, labels = ~recclass, values = ~n, type = 'pie',textposition = 'outside',textinfo = 'label+percent', marker = list(colors = c( '#a9a9a9','#ffc0cb','#9f6934','#4f30bf','#7523d6','#8b4513','#622ccb','#2e33a4','#203396','#143187','#0c2f78','#092c69','#0c2959','#10254a','#E7E7D0','#5421d3'))) %>%
layout(
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
output$percent <-renderPlotly({ggplotly(p)})
}
shinyApp(ui = ui, server = server) |
74a5bbcbc31550267cac4db77b9f40dafbb2f673 | 41373e2c057dccbd5243f6fc0da22781964bb905 | /analysis/analysis_pennsylvania.R | ab1445fe1f956aedc068f524038964ffb72e3103 | [] | no_license | Howard-Center-Investigations/Juvenile-lifers- | e27011cf963fd6ac8c2ccaa22b36aa8875d02260 | 5081d6f1c0b44f768185e1dedda7933dd4ac48a9 | refs/heads/master | 2020-08-27T09:04:21.309156 | 2019-11-13T00:35:14 | 2019-11-13T00:35:14 | 217,311,006 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,300 | r | analysis_pennsylvania.R | ###Pennsylvania juvenile analysis
###Data acquired from Maryland DOC by Camilla Velloso (camilaspvelloso@gmail.com)
###Analysis by Riin Aljas (aljasriin@gmail.com)
###GET DATA----
###GET DATA----
library(readxl)
library(tidyverse)
library(readr)
library(lubridate)
library(tidycensus)
library(gridExtra)
lifers <- read_excel("data/source/pennsylvania_091219.xlsx")
population <- get_estimates(geography = "county", "population", variables = NULL, breakdown = NULL,
breakdown_labels = NULL, year = 2018, state = "PA", key = "156fda6326a38745b31480cc7848c55e7f4fcf41")
##create an age column
lifers <- lifers %>%
mutate(age = as.period(interval(DOB, Commitment_Date, "years")))
lifers$age <- as.integer(substr(lifers$age, 1, 2))
###LOOK INTO DATA
### how many unique people?
n_distinct(lifers$dc_number)
#521
###age----
age <- lifers %>%
group_by(dc_number, age) %>%
count()
age %>% group_by(age) %>% count() %>% mutate(pct = n/521*100)
###one guy went to prison only when he was 35.
summary(lifers$age)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 14.00 17.00 18.00 18.24 19.00 35.00
###race----
lifers %>%
group_by(Race) %>%
summarise(people = n()) %>%
mutate(pct = people/nrow(lifers)*100)
# #race people pct
# <chr> <int> <dbl>
# 1 ALL OTHERS 2 0.410
# 2 BLACK 323 66.2
# 3 HISPANIC 14 2.87
# 4 WHITE 149 30.5
lifers %>%
group_by(Gender) %>%
summarise(people = n()) %>%
mutate(pct = people/nrow(lifers)*100)
###gender----
# FEMALE 10 1.92
# 2 MALE 511 98.1
####geography----
by_county <- lifers %>%
group_by(`Committing County`) %>%
summarise(people = n()) %>%
mutate(pct = people/nrow(lifers)*100) %>%
rename(NAME = `Committing County`)
#look how they line up with populations
population$NAME <- gsub(" County, Pennsylvania", "", population$NAME) #<--clean up to line with existing data
population <- population %>%
mutate(NAME = toupper(NAME)) %>%
filter(variable == "POP")
by_county <- by_county %>%
left_join(population, by = "NAME") %>%
select(1:3, 6)
by_county <-
by_county %>%
mutate(juvenile_per_person = people/value)
###sentence time----
lifers <- lifers %>%
mutate(sentence_year = year(Sentencing_Date),
commitment_year = year(Commitment_Date))
# grid.arrange(
# (lifers %>%
# group_by(sentence_year) %>%
# filter(!any(is.na(sentence_year))) %>%
# summarise(people = n()) %>%
# ggplot(lifers, mapping = aes(sentence_year, people))+
# geom_point()+
# geom_line()),
# (lifers %>%
# group_by(commitment_year) %>%
# summarise(people = n()) %>%
# ggplot(lifers, mapping = aes(commitment_year, people))+
# geom_point()+
# geom_line()),
# ncol=2)
###how many are resentenced?----
by_status <- lifers %>%
group_by(Status) %>%
summarise(people = n()) %>%
mutate(pct = people/nrow(lifers)*100)
# Status people pct
# <chr> <int> <dbl>
# 1 Deceased 3 0.576
# 2 Pending 77 14.8
# 3 Released 209 40.1
# 4 Released, then Readmitted 1 0.192
# 5 Resentenced (no longer Life-Life) 216 41.5
# 6 Resentenced (no longer Life-Life)* 6 1.15
# 7 Resentenced to Life-Life 9 1.73
# #
#look at released/resentenced
lifers %>%
filter(Status == "Released") %>%
group_by(Race) %>%
summarise(people = n()) %>%
mutate(pct = people/nrow(lifers%>%
filter(Status == "Released"))*100)
# #race people pct
# <chr> <int> <dbl>
# 1 ALL OTHERS 2 0.410
# 2 BLACK 323 66.2
# 3 HISPANIC 14 2.87
# 4 WHITE 149 30.5
#
# Race people pct
# <chr> <int> <dbl>
# 1 BLACK 160 76.6
# 2 HISPANIC 17 8.13
# 3 WHITE 32 15.3
##more black and hispanic people are getting out compared to white people?
whitesout <- lifers %>%
filter(Race == "WHITE") %>%
group_by(Status) %>%
summarise(people= n()) %>%
mutate(pct = round(people/nrow(lifers %>%
filter(Race == "WHITE")),2))%>%
mutate(Race = "WHITE")
blacksout <- lifers %>%
filter(Race == "BLACK") %>%
group_by(Status) %>%
summarise(people= n()) %>%
mutate(pct = round(people/nrow(lifers %>%
filter(Race == "BLACK")),2)) %>%
mutate(Race = "BLACK")
hispanicsout <- lifers %>%
filter(Race == "HISPANIC") %>%
group_by(Status) %>%
summarise(people= n()) %>%
mutate(pct = round(people/nrow(lifers %>%
filter(Race == "HISPANIC")),2)) %>%
mutate(Race = "HISPANIC")
asiansout <- lifers %>%
filter(Race == "ASIAN") %>%
group_by(Status) %>%
summarise(people= n()) %>%
mutate(pct = round(people/nrow(lifers %>%
filter(Race == "ASIAN")),2)) %>%
mutate(Race = "ASIAN")
by_race_status <- bind_rows(blacksout, whitesout, hispanicsout, asiansout)
by_race_status <- pivot_wider(by_race_status,
names_from = Race,
values_from = c(people,pct))
t <- by_race_status %>%
select(Status,
nr_black_people = people_BLACK,
pct_BLACK,
nr_white_people = people_WHITE,
pct_WHITE,
nr_hispanic_people = people_HISPANIC,
pct_HISPANIC,
nr_asian_people = people_ASIAN,
pct_ASIAN)
#hispanics have been released/resentenced the most, then blacks then whites.
#For blacks it might be also that they started to release them earlier,
#already in 2000s, but that doesn't apply for hispanics
###look at racial differences among counties
county <- lifers %>%
group_by(`Committing County`) %>%
summarise(total_people=n())
county_status <- lifers %>%
group_by(`Committing County`, Status) %>%
summarise(total_status = n()) %>%
left_join(county) %>%
mutate(pct_in_county = round((total_status/total_people)*100,2))
ggplot(county_status,
mapping = aes(Status, pct_in_county, color = Status))+
geom_bar(stat = "identity")+
facet_wrap(~`Committing County`, ncol = 5)
released <- county_status %>%
filter(Status == "Released")
resentenced <- county_status %>%
filter(Status %in%
c("Resentenced (no longer Life-Life)",
"Resentenced (no longer Life-Life)*",
"Resentenced to Life-Life"))
pending <- county_status %>%
filter(Status == "Pending")
#most of counties with high amount of pending people have very little amount
#of people to begin with, exceptions are Dauphin and Delaware
summary(released$pct_in_county)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 7.69 18.34 36.36 43.30 52.96 100.00
#Most of countries where there are high % of released peopel are the ones
#with low numbers of juveniles to begin with, exception being Philadelphia,
#which have a lot of released people. Delaware is the exception, having 26 total
#juveniles but low number of released people. Allegheny has a high % of resentenced
# people.
county_race <- lifers %>%
group_by(`Committing County`, Race) %>%
summarise(total_people=n())
county_released_or_resentenced_race <- lifers %>%
filter(!Status %in% c("Pending", "Resentenced to Life-Life", "Deceased"))%>%
group_by(`Committing County`, Race) %>%
summarise(total_status = n()) %>%
left_join(county_race) %>%
mutate(pct_in_county = round((total_status/total_people)*100,2))
ggplot(county_released_or_resentenced_race,
aes(Race, pct_in_county))+
geom_bar(stat = "identity")+
facet_wrap(~`Committing County`)
lifers %>%
filter(!Status %in% c("Pending", "Resentenced to Life-Life", "Deceased")) %>%
group_by(`Committing County`, Race) %>%
summarise(people = n()) %>%
ggplot(lifers, mapping = aes(sentence_year, people))+
geom_point()+
facet_wrap(~Race)
blacksoutyear <- lifers %>%
filter(Race == "BLACK") %>%
group_by(Status, sentence_year) %>%
summarise(people= n()) %>%
ggplot(lifers, mapping=aes(sentence_year, people))+
geom_point()+
geom_line()+
facet_wrap(~Status)
lifers %>%
filter(Race == "HISPANIC") %>%
group_by(Status, sentence_year) %>%
summarise(people= n()) %>%
ggplot(lifers, mapping=aes(sentence_year, people))+
geom_point()+
geom_line()+
facet_wrap(~Status)
lifers %>%
filter(!Status %in% c("Pending", "Resentenced to Life-Life", "Deceased")) %>%
group_by(sentence_year, `Committing County` ) %>%
summarise(people = n()) %>%
ggplot(lifers, mapping = aes(sentence_year, people))+
geom_point()+
facet_wrap(~`Committing County`)
##doesn't seem a lot of geographical disparity.
###to do list----
### look into let out age
### pull in populations
###
county <- lifers %>%
group_by(`Committing County`) %>%
summarise(total_people=n())
county_race <- lifers %>%
group_by(`Committing County`, Race) %>%
summarise(total_people=n())
county_status <- lifers %>%
group_by(`Committing County`, Status) %>%
summarise(total_status = n()) %>%
left_join(county) %>%
mutate(pct_in_county = round((total_status/total_people)*100,2) )
|
e8612ab39b9c3ec914aab67370ea426a8faa54b9 | 26d3e98123fe1aa73234849bb90be65d0acd4a3d | /Statistical-Analysis-Multiple-Regression-Model-Prostate-Cancer-main/RCodeProject2.R | 617efdf62e86439a7d53674df4238fe031105ac7 | [
"MIT"
] | permissive | augannan/ML-Algorithms-Implementations-Using-R | 9a6e0bf35d87b077114ee9eefa2c8eb940bc0f07 | 93a873eadf8bff3592c186567af5f52a2c450daa | refs/heads/main | 2023-07-26T22:45:34.444119 | 2021-08-19T18:21:23 | 2021-08-19T18:21:23 | 398,035,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,153 | r | RCodeProject2.R | #### Perform an exploratory analysis of data ####
# Understand variables
# Find patterns in data
# Suggest modeling stategies
# Clear console
cat("\014")
# Remove all variables
rm(list=ls(all=TRUE))
# Set working directory
setwd('FolderPath')
# Load Data
pcancer = read.csv("prostate_cancer.csv")
#Install if the package doesn't exist
if (!require(DataExplorer)) install.packages('DataExplorer')
library(DataExplorer)
# Get high level overview of data
str(pcancer)
summary(pcancer)
pcancer[, 'vesinv'] <- as.factor(pcancer[, 'vesinv'])
# Do we need to modify the data frame? Yes, subject is not necessary
if (!require(dplyr)) install.packages('dplyr')
library(dplyr)
pcancer = select(pcancer,-subject)
summary(pcancer)
plot_str(pcancer, "fontSize" = 60)
class(pcancer)
View(pcancer)
head(pcancer)
str(pcancer)
############## UNIVARIATE ANALYSIS ##################
######## PSA Level ##############
par(mfrow=c(3,1))
boxplot(pcancer$psa,
main="PSA level Central Tendency and Spread with Box Plot",
xlab = "Serum prostate-specific antigen level (mg/ml)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$psa,main="PSA level Central Tendency and Spread with Histogram",xlab = "Serum prostate-specific antigen level (mg/ml)")
plot(density(pcancer$psa),main="PSA level Central Tendency and Spread with Density Plot")
######## Cancer Volume ##############
par(mfrow=c(3,1))
boxplot(pcancer$cancervol,
main="Cancer Volume Central Tendency and Spread with Box Plot",
xlab = "Estimate of prostate cancer volume (cc)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$cancervol,main="Cancer Volume Central Tendency and Spread with Histogram",xlab = "Estimate of prostate cancer volume (cc)")
plot(density(pcancer$cancervol),main="Cancer Volume Central Tendency and Spread with Density Plot")
######## Weight ##############
par(mfrow=c(3,1))
boxplot(pcancer$weight,
main="Weight Central Tendency and Spread with Box Plot",
xlab = "prostate weight (gm)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$weight,main="Weight Central Tendency and Spread with Histogram",xlab = "prostate weight (gm)")
plot(density(pcancer$weight),main="Weight Central Tendency and Spread with Density Plot")
######## Age ##############
par(mfrow=c(3,1))
boxplot(pcancer$age,
main="Age Central Tendency and Spread with Box Plot",
xlab = "Age of patient (years)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$age,main="Age Central Tendency and Spread with Histogram",xlab = "Age of patient (years)")
plot(density(pcancer$age),main="Age Central Tendency and Spread with Density Plot")
######## Benign prostatic hyperplasia ##############
par(mfrow=c(3,1))
boxplot(pcancer$benpros,
main="Benign prostatic hyperplasia Central Tendency and Spread with Box Plot",
xlab = "Amount of benign prostatic hyperplasia (cm^2)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$benpros,main="Benign prostatic hyperplasia Central Tendency and Spread with Histogram",xlab = "Amount of benign prostatic hyperplasia (cm^2)")
plot(density(pcancer$benpros),main="Benign prostatic hyperplasia Central Tendency and Spread with Density Plot")
######## Capsular penetration ##############
par(mfrow=c(3,1))
boxplot(pcancer$capspen,
main="Capsular penetration Central Tendency and Spread with Box Plot",
xlab = "Degree of capsular penetration (cm)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$capspen,main="Capsular penetration Central Tendency and Spread with Histogram",xlab = "Degree of capsular penetration (cm)")
plot(density(pcancer$capspen),main="Capsular penetration Central Tendency and Spread with Density Plot")
######## Gleason score ##############
par(mfrow=c(3,1))
boxplot(pcancer$gleason,
main="Gleason score Central Tendency and Spread with Box Plot",
xlab = "Pathologically determined grade of disease (6, 7 or 8)",
col="gray",
border="black",
horizontal=TRUE
)
hist(pcancer$gleason,main="Gleason score Central Tendency and Spread with Histogram",xlab = "Pathologically determined grade of disease (6, 7 or 8)")
plot(density(pcancer$gleason),main="Gleason score Central Tendency and Spread with Density Plot")
########### CATEGORICAL DATA #############
par(mfrow=c(1,1))
vesinv2 <- table(pcancer$vesinv)
barplot(vesinv2[order(vesinv2, decreasing = TRUE)],horiz = TRUE,
main = 'Seminal vesicle invasion count with Bar plot',
ylab = 'Presence (1) or absence (0)',
xlab = 'Number of records')
################## MULTIVARIATE ANALYSIS #######################
pcancer <- pcancer[-c(which.max(pcancer$weight)),]
str(pcancer)
plot(pcancer)
pcancer2 = select(pcancer,-vesinv)
cor(pcancer2)
############### Transformation ##########################
######## PSA Level ##############
psa2 = log(pcancer$psa,exp(1))
psa2 = psa2 - mean(psa2)
par(mfrow=c(3,1))
boxplot(psa2,
main="PSA Level Central Tendency and Spread with Box Plot",
xlab = "Serum prostate-specific antigen level (mg/ml)",
col="gray",
border="black",
horizontal=TRUE
)
hist(psa2,main="PSA Level Central Tendency and Spread with Histogram",
xlab = "Serum prostate-specific antigen level (mg/ml)")
plot(density(psa2),main="PSA Level Central Tendency and Spread with Density Plot")
######## Cancer Volume ##############
cancer2 = log(pcancer$cancervol,exp(1))
cancer2 = cancer2 - mean(cancer2)
par(mfrow=c(3,1))
boxplot(cancer2,
main="Cancer Volume Central Tendency and Spread with Box Plot",
xlab = "Estimate of prostate cancer volume (cc)",
col="gray",
border="black",
horizontal=TRUE
)
hist(cancer2,main="Cancer Volume Central Tendency and Spread with Histogram",
xlab = "Estimate of prostate cancer volume (cc)")
plot(density(cancer2),main="Cancer Volume Central Tendency and Spread with Density Plot")
plot(cancer2)
############### Q(3) ####################
fit1 <- lm(psa2 ~ cancer2)
summary(fit1)
par(mfrow=c(1,1))
plot(cancer2,psa2,main='Plot of transformed PSA Level against Cancer Volume',
xlab = 'Cancer Volume after logarithmic transformation',
ylab = 'PSA Level after logarithmic transformation',)
abline(fit1)
###
fit2 <- lm(psa2 ~ pcancer$weight)
fit2
summary(fit2)
par(mfrow=c(1,1))
plot(pcancer$weight,psa2,main='Plot of transformed PSA Level against prostate weight',
xlab = 'Prostate weight (gm)',
ylab = 'PSA Level after logarithmic transformation')
abline(fit2)
###
fit3 <- lm(psa2 ~ pcancer$age)
fit3
summary(fit3)
par(mfrow=c(1,1))
plot(pcancer$age,psa2,main='Plot of transformed PSA Level against Age',
xlab = 'Age of patient (years)',
ylab = 'PSA Level after logarithmic transformation')
abline(fit3)
###
fit4 <- lm(psa2 ~ pcancer$benpros)
fit4
summary(fit4)
par(mfrow=c(1,1))
plot(pcancer$benpros,psa2,main='Plot of transformed PSA Level against Benign prostatic hyperplasia',
xlab = 'Amount of benign prostatic hyperplasia (cm^2)',
ylab = 'PSA Level after logarithmic transformation')
abline(fit4)
###
fit5 <- lm(psa2 ~ pcancer$capspen)
fit5
summary(fit5)
par(mfrow=c(1,1))
plot(pcancer$benpros,psa2,main='Plot of transformed PSA Level against Capsular penetration',
xlab = 'Degree of capsular penetration (cm)',
ylab = 'PSA Level after logarithmic transformation')
abline(fit5)
###
fit6 <- lm(psa2 ~ pcancer$gleason)
fit6
summary(fit6)
par(mfrow=c(1,1))
plot(pcancer$gleason,psa2,main='Plot of transformed PSA Level against Gleason score',
xlab = 'Pathologically determined grade of disease (6, 7 or 8)',
ylab = 'PSA Level after logarithmic transformation')
abline(fit6)
###
fit7 <- lm(psa2 ~ pcancer$vesinv)
fit7
summary(fit7)
par(mfrow=c(1,1))
plot(pcancer$vesinv,psa2,main='Plot of transformed PSA Level against Seminal vesicle invasion',
xlab = 'Presence (1) or absence (0) of seminal vesicle invasion',
ylab = 'PSA Level after logarithmic transformation')
abline(fit7)
################ Mult Lin Reg (d) ####################
summary(pcancer)
str(pcancer)
fitm <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$age+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
print(fitm)
summary(fitm)
plot(fitm)
fitcancer2 <- lm(psa2 ~ pcancer$weight+pcancer$age+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
anova(fitcancer2,fitm)
fitage <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
anova(fitage,fitm)
fittemp <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
fitweight <- lm(psa2 ~ cancer2+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
anova(fitweight,fittemp)
fittemp <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$benpros+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
fitbenpros <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
anova(fitbenpros,fittemp)
fittemp <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv+pcancer$capspen+pcancer$gleason)
fitcapspen <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv+pcancer$gleason)
anova(fitcapspen,fittemp)
fittemp <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv+pcancer$gleason)
fitgleason <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv)
anova(fitgleason,fittemp)
fittemp <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$vesinv+pcancer$gleason)
fitvesinv <- lm(psa2 ~ cancer2+pcancer$weight+pcancer$gleason)
anova(fitvesinv,fittemp)
####### product terms #########
fitprod <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2:pcancer$weight+
cancer2*pcancer$vesinv+
cancer2*pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
print(fitprod)
summary(fitprod)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2:pcancer$weight+
cancer2*pcancer$vesinv+
cancer2*pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodvw <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2*pcancer$vesinv+
cancer2*pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodvw)
anova(fitprodtemp,fitprodvw)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2*pcancer$vesinv+
cancer2*pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodvg <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2*pcancer$vesinv+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodvg)
anova(fitprodtemp,fitprodvg)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
cancer2*pcancer$vesinv+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodvs <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodvs)
anova(fitprodtemp,fitprodvs)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$weight*pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodwg <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$vesinv*pcancer$gleason)
summary(fitprodvs)
anova(fitprodtemp,fitprodwg)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodws <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$vesinv*pcancer$gleason)
summary(fitprodvs)
anova(fitprodtemp,fitprodws)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv+
pcancer$vesinv*pcancer$gleason)
summary(fitprodtemp)
fitprodws <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv)
summary(fitprodvs)
anova(fitprodtemp,fitprodws)
fitprodtemp <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight*pcancer$vesinv)
summary(fitprodtemp)
fitprodws <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason)
summary(fitprodvs)
anova(fitprodws,fitprodtemp)
####### Assumptions ######
fitFinal <- lm(psa2 ~cancer2+
pcancer$weight+
pcancer$vesinv+
pcancer$gleason+
pcancer$weight:pcancer$vesinv)
summary(fitFinal)
plot(fitted(fitFinal),resid(fitFinal),main = 'Residual Plot')
abline(h=0)
plot(fitFinal)
shapiro.test(residuals(fitFinal))
if (!require(lmtest)) install.packages('lmtest')
library(lmtest)
bptest(fitFinal)
summary(fitFinal)
############################
table(pcancer$vesinv)
#################################
v <- cancer2
w <- pcancer$weight
s1 <- pcancer$vesinv
g <- pcancer$gleason
fitFinal <- lm(psa2 ~ v+w+s+g+w:s)
summary(pcancer)
summary(fitFinal)
str(fitFinal)
predict(fitFinal,data.frame(v = log(7.0591)-7.0591,
w = 41.27,
s1="0",
g=6.885),
se.fit=T,interval="prediction")
par(mfrow=c(1,2))
boxplot(pcancer$psa~pcancer$vesinv,
main="PSA level Central Tendency with Box Plot",
xlab = "Serum prostate-specific antigen level (mg/ml)",
col="gray",
border="black",
horizontal=FALSE
)
if (!require(sm)) install.packages('sm')
library(sm)
sm.density.compare(pcancer$psa,pcancer$vesinv, xlab="PSA Level with Seminal vesicle invasion")
# Add a legend (the color numbers start from 2 and go up)
legend("topright", levels(pcancer$vesinv), fill=2+(0:nlevels(pcancer$vesinv)))
title(main="Distributions of PSA Level - Seminal vesicle
invasion Presence (1) or absence (0)")
par(mfrow=c(1,2))
boxplot(pcancer$psa~pcancer$gleason,
main="PSA level Central Tendency with Box Plot",
col="gray",
border="black",
horizontal=FALSE
)
if (!require(sm)) install.packages('sm')
library(sm)
sm.density.compare(pcancer$psa,pcancer$vesinv, xlab="PSA Level with Seminal vesicle invasion")
# Add a legend (the color numbers start from 2 and go up)
legend("topright", levels(pcancer$vesinv), fill=2+(0:nlevels(pcancer$vesinv)))
title(main="Distributions of PSA Level - Seminal vesicle
invasion Presence (1) or absence (0)")
|
c7c14f1dc750a82df6096ee0ac05ec4f78f9b30a | e28041b40405525591643242b91b9f0edb6fd0e5 | /data/complexportal/Functions.R | ae1424e1076fbac34ca94c9f5aabbf9a51161768 | [] | no_license | moghbaie/ComplexPlus | bd403bd29b6311dbf3980819d3bad3db4b19b96e | e709909af991260f4b52124b8544de99ccf64d92 | refs/heads/master | 2020-09-04T21:46:51.415196 | 2019-11-08T23:18:30 | 2019-11-08T23:18:30 | 201,650,525 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,949 | r | Functions.R | # Mehrnoosh Oghbaie
# 08/30/2018
# Repository for all the functions
######################################################################################
# Either download or install the required library from CRAN or bioconductor
#######################################################################################
install.packages.if.necessary <- function(CRAN.packages=c(), bioconductor.packages=c()) {
if (length(bioconductor.packages) > 0) {
source("http://bioconductor.org/biocLite.R")
}
for (p in bioconductor.packages) {
if (!require(p, character.only=T)) {
biocLite(p)
library(p, character.only=T)
}
}
for (p in CRAN.packages) {
if (!require(p, character.only=T)) {
install.packages(p)
library(p, character.only=T)
}
}
}
###########################################################################################################################
# Writing a function that get the url of each file (Protein Complex) and integrate interaction data to one csv file
###########################################################################################################################
Convert2CSV <- function(filename,dir){
complex <- strsplit(basename(filename),".xml")[[1]]
data <- xmlParse(filename)
xml_data <- xmlToList(data)
## reading the interaction list
complexID <- as.list(xml_data[["entry"]][["interactionList"]][["interaction"]][["xref"]])
complex_name <- complexID[grepl("CPX",complexID)]$secondaryRef[["id"]]
binding <- as.list(xml_data[["entry"]][["interactionList"]][["interaction"]][["inferredInteractionList"]])
if (length(binding)!=0){
list <- data.frame(matrix(ncol=2,nrow=0))
for (bind in binding){
interact1 <- as.integer(bind[1]$participant$participantFeatureRef)
interact2 <- as.integer(bind[2]$participant$participantFeatureRef)
interact <- c(interact1,interact2)
list <- rbind(list,interact)}
colnames(list) <- c("id_A","id_B")
## reading the interactor list
nodes <-as.list(xml_data[["entry"]][["interactorList"]])
ref_list <- data.frame(matrix(ncol=3,nrow=0))
for(node in nodes){
ref <- as.integer(unlist(node$.attrs["id"]))
protein <- node$names$shortLabel
protein_fullname <- ifelse(is.null(node$names$fullName),NA, node$names$fullName)
nodel <- cbind(as.integer(ref),protein,protein_fullname)
ref_list <- rbind(ref_list,unname(nodel))
}
colnames(ref_list) <- c("ref","protein","protein_fullname")
## reading the mapping between interactors and featured interactors in interaction list
links <- as.list(xml_data[["entry"]][["interactionList"]][["interaction"]][["participantList"]])
link_list <- data.frame(matrix(ncol=2, nrow=0))
for(link in links){
intRef <-as.integer(link$interactorRef)
featurelist <- link$featureList
feat <-c()
if(!is.null(featurelist)){
for(feature in featurelist){
feat <-rbind(feat,as.integer(feature$.attrs["id"]))
}
linkRef <- cbind(feat,intRef)
link_list <- rbind(link_list, linkRef)
}
}
colnames(link_list) <- c("id","ref")
## matching and merging the data
link_list$protein <- ref_list$protein[match(link_list$ref,ref_list$ref)]
link_list$protein_fullname <- ref_list$protein_fullname[match(link_list$ref,ref_list$ref)]
list$protein_A <- link_list$protein[match(list$id_A,link_list$id)]
list$protein_B <- link_list$protein[match(list$id_B,link_list$id)]
list$protein_fullname_A <- link_list$protein_fullname[match(list$id_A,link_list$id)]
list$protein_fullname_B <- link_list$protein_fullname[match(list$id_B,link_list$id)]
list$complexID <- complex_name
## Saving each complex separately
write.csv(list, paste0(dir,complex,".csv"))
}else{print(complex_name)}
##return the list of interactors for each complex
return(list)
}
|
b9cbbf83250b073e53f8e09f5cf3cdbc99378022 | 2f482df1498f7311c70069da6977bd8c47577a56 | /man/call_api.Rd | 8297d7154738212a0f069de2f783e674c54c41ea | [] | no_license | cran/cometr | 179d837d20ab349dcbb5c70d26002c8f936d03de | 45ae44c25f6640576688fac53a34f1cb48287f77 | refs/heads/master | 2022-11-29T23:52:21.356518 | 2020-08-13T16:40:03 | 2020-08-13T16:40:03 | 255,887,004 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,105 | rd | call_api.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{call_api}
\alias{call_api}
\title{Call a Comet REST API endpoint}
\usage{
call_api(
endpoint,
method = c("GET", "POST"),
params = list(),
response_json = TRUE,
api_key = NULL
)
}
\arguments{
\item{endpoint}{The REST API endpoint.}
\item{method}{The HTTP method to use, either "GET" or "POST".}
\item{params}{A list of parameters. For GET endpoints, the parameters are appended
to the URL; for POST endpoints, the parameters are sent in the body of the request.}
\item{response_json}{If \code{TRUE}, try to parse the response as JSON. If \code{FALSE}, return
the response as raw data (useful when the response is a file).}
\item{api_key}{Comet API key (can also be specified using the \code{COMET_API_KEY}
parameter as an environment variable or in a comet config file).}
}
\value{
The parsed response
}
\description{
This function is only meant for advanced users. If you would like to call any
arbitrary Comet API endpoint that isn't natively supported by \code{cometr}, you can
use this function.
}
|
e33c39a73e3ff41fc0604bc4072b30d271029e74 | 689e60449a13f2eac2cf714ed971f6a224358dfb | /niche_conservatism_test_2019_11_28.R | 73602dd2b6b13d7d836faa313ebe13ab62585f24 | [] | no_license | Eryops1/niche-conservatism | d6833a6251d0520cdaca785fb5de09e520c10408 | 18b6857ab4a41020ec713aef22691fbd6a6fbeca | refs/heads/master | 2020-09-22T22:41:55.275712 | 2019-12-03T14:40:19 | 2019-12-03T14:40:19 | 225,336,586 | 0 | 0 | null | 2019-12-02T09:31:40 | 2019-12-02T09:31:39 | null | UTF-8 | R | false | false | 3,934 | r | niche_conservatism_test_2019_11_28.R | library(BIEN)
library(ape) #Package for working with phylogenies in R
library(maps) #Useful for making quick maps of occurrences
library(sp) #A package for spatial data
library(stringr)
library(rgdal)
library(sp)
test_data<-read.csv("test_data.csv", sep = ";", na.string = "") # species list
map_trf <- readOGR("corlett_shapefiles/corlett_TRF_ext.shp") # TRF map
map_cont <- readOGR("continents/continent.shp") # continent map for testing
# Exclude "Unplaced" taxon_status_description
test_data_1 <- test_data[!is.na(test_data$species) & !is.na(test_data$accepted_name_id),]
target_species <- unique(test_data_1$accepted_name_id)
#ptm <- proc.time() # Start the clock
# set up matrix to fill in results
# for biomes later...
#species_res <- matrix(nrow=length(target_species), ncol=length(grep("FID", names(map_trf))), data=NA)
#species_res <- data.frame(species=target_species, freq_in=rep(NA,length(target_species)))
species_res <- c()
for(i in 1:length(target_species)){
t <- test_data_1[test_data_1$accepted_name_id==target_species[i],] # subset to one species
u <- unique(paste(t$genus, t$species)) # get species binomial name
occurrence_records <- data.frame() # setup empty data frame
for(j in u){
occurrence_records <- rbind(occurrence_records, (BIEN_occurrence_species(j, only.new.world = F)))
}
######## Biome analysis ########
occurrence_records = occurrence_records[!is.na(occurrence_records[,"latitude"]) & !is.na(occurrence_records[,"longitude"]),] # excluding NA coordinates
occurrence_records = occurrence_records[!duplicated(occurrence_records),] # excluding repeated occurrences
# Q: Ignore species with one occurrence point (try both with and without this line)
coord <- occurrence_records[,c("latitude", "longitude")]
coordinates(coord) <- ~ longitude + latitude # convert dataframe to spatial points object
proj4string(coord) <- proj4string(map_trf) # match projection attributes of both objects
# Q: Ignore points close to biome boundaries (perhaps wait with this)
# trf_res <- over(coord, map_trf) # matches coordinates with shapefile polygons
# return the number of points in each polygon:
trf_res <- sapply(over(map_trf, geometry(coord), returnList = TRUE), length)
# store the results for each species:
species_res <- c(species_res, sum(trf_res)/length(coord))
# species_res$freq_in[species_res$species==target_species[i]] <- prop_species_trf
}
# plot(map)
# plot(coord, size=10, add=TRUE, col=c("blue", "red")[as.factor(occurrence_records$scrubbed_species_binomial)])
proc.time() - ptm # Stop the clock
############### Script examples. Do not run ############
#download maps
#read maps
map_trf <- readOGR("C:/Users/Emil/Documents/Aarhus_University/Master's_project/Shapefiles/Corlett and Primack/Archive/corlett_TRF_ext.shp") # Retrieve shapefiles from Wolf
#shapefiles for each of the 3 maps should be read, plotted and analyzed
#plot(world, map_trf) - sp?rg asger
coord <- all_occurrence_records[,c("latitude", "longitude")]
coordinates(coord) <- ~ lon + lat # convert dataframe to spatial points object
proj4string(coord) <- proj4string(map_trf) # match projection attributes of both objects
cont_res <- over(coord, map_trf) # matches coordinates with shapefile polygons
# check results
plot(map_trf)
plot(coord, size=10, add=TRUE, col=c("blue", "red")[as.factor(species)])
# world <- (directory(world_map.shp) #read hypothetical world map
# spTransform(world, crs(+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0)) #transformation af world projektion til match TRF.
# plot(world, map) plot TRF-map over world map
### Niche assignment - Comming soon...
#Ratio of occurrence inside/outside biome to determine niche
#Division of tropical rainforest geographic regions (ex. Neotropics, Africa, Southeast-Asia)
############ Assignment of niche determined species on phylogenetic tree + analysis - Comming soon... |
679b293f9e86d736dabf6afbc33d1e7e28ea665d | d4b18c2a188f6ad0120e07cbd90f1e6cfce84928 | /R/SimRedVsWhiteNoise.R | fd0da19060e891a1b1dcc62cb9fe540cd39bf0be | [] | no_license | kmanlove/BighornIPM | 6b01606af640eac9bce8f8c214e5bed8e422605c | 1f903391b6f76f52db4020a2689eda90c2bcf0d8 | refs/heads/master | 2016-09-06T19:38:25.349407 | 2015-11-12T02:02:46 | 2015-11-12T02:02:46 | 27,792,923 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,013 | r | SimRedVsWhiteNoise.R | SimRedVsWhiteNoise <- function(timesteps,
reps,
alpha.range,
gamma.range,
alpha.steps,
gamma.steps,
samples.to.use.in,
pop.size,
ages.init.in)
{
ages.init <- pop.size * ages.init.in
intro.cost.in <- .3
popsize.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
popsize.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
popsize.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.popsize.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.popsize.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.popsize.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.intros.popsize.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.intros.popsize.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
unstr.intros.popsize.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
loglambda.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
loglambda.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
loglambda.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.intros.gam.2.alph.05 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.intros.gam.2.alph.2 <- matrix(NA, ncol = timesteps, nrow = reps)
disstat.intros.gam.2.alph1 <- matrix(NA, ncol = timesteps, nrow = reps)
for(i in 1:reps){
project.gam.2.alph.05 <- ProjectFun(johnson = F, timesteps = timesteps, intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init, alpha = .05, gamma = .05, samples.to.draw = samples.to.use.in, tot.chains = 3, joint.posterior.coda = ipm11.coda, posterior.names = posterior.names, fixed.start.time = T)
project.gam.2.alph.2 <- ProjectFun(johnson = F, timesteps = timesteps, intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init, alpha = .2, gamma = .05, samples.to.draw = samples.to.use.in, tot.chains = 3, joint.posterior.coda = ipm11.coda, posterior.names = posterior.names, fixed.start.time = T)
project.gam.2.alph1 <- ProjectFun(johnson = F, timesteps = timesteps, intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init, alpha = .9, gamma = .05, samples.to.draw = samples.to.use.in, tot.chains = 3, joint.posterior.coda = ipm11.coda, posterior.names = posterior.names, fixed.start.time = T)
popsize.gam.2.alph.05[i, ] <- project.gam.2.alph.05$tot.pop.size
popsize.gam.2.alph.2[i, ] <- project.gam.2.alph.2$tot.pop.size
popsize.gam.2.alph1[i, ] <- project.gam.2.alph1$tot.pop.size
# loglambda.gam.2.alph.05[i, ] <- project.gam.2.alph.05$log.lambda.s
# loglambda.gam.2.alph.2[i, ] <- project.gam.2.alph.2$log.lambda.s
# loglambda.gam.2.alph1[i, ] <- project.gam.2.alph1$log.lambda.s
# extract vector of disease statuses for each sim
disstat.gam.2.alph.05[i, ] <- sample(project.gam.2.alph.05$disease.status, length(project.gam.2.alph.05$disease.status), replace = F)
disstat.gam.2.alph.2[i, ] <- sample(project.gam.2.alph.2$disease.status, length(project.gam.2.alph.2$disease.status), replace = F)
disstat.gam.2.alph1[i, ] <- sample(project.gam.2.alph1$disease.status, length(project.gam.2.alph1$disease.status), replace = F)
unstr.project.gam.2.alph.05 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .05, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.gam.2.alph.05[i, ])
unstr.project.gam.2.alph.2 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .2, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.gam.2.alph.2[i, ])
unstr.project.gam.2.alph1 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .05, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.gam.2.alph1[i, ])
unstr.popsize.gam.2.alph.05[i, ] <- unstr.project.gam.2.alph.05$tot.pop.size
unstr.popsize.gam.2.alph.2[i, ] <- unstr.project.gam.2.alph.2$tot.pop.size
unstr.popsize.gam.2.alph1[i, ] <- unstr.project.gam.2.alph1$tot.pop.size
disstat.intros.gam.2.alph.05[i, ] <- ifelse(sample(project.gam.2.alph.05$disease.status, length(project.gam.2.alph.05$disease.status), replace = F) == "spillover", "spillover", "healthy")
disstat.intros.gam.2.alph.2[i, ] <- ifelse(sample(project.gam.2.alph.2$disease.status, length(project.gam.2.alph.2$disease.status), replace = F) == "spillover", "spillover", "healthy")
disstat.intros.gam.2.alph1[i, ] <- ifelse(sample(project.gam.2.alph1$disease.status, length(project.gam.2.alph1$disease.status), replace = F) == "spillover", "spillover", "healthy")
unstr.intros.project.gam.2.alph.05 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .05, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.intros.gam.2.alph.05[i, ])
unstr.intros.project.gam.2.alph.2 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .2, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.intros.gam.2.alph.2[i, ])
unstr.intros.project.gam.2.alph1 <- UnstrProjectFun(johnson = F, timesteps = timesteps,
intro.cost = intro.cost.in, sex.ratio = .5, ages.init = ages.init,
alpha = .05, gamma = .05, samples.to.draw = samples.to.use.in,
tot.chains = 3, joint.posterior.coda = ipm11.coda,
posterior.names = posterior.names, fixed.start.time = T, states = disstat.intros.gam.2.alph1[i, ])
unstr.intros.popsize.gam.2.alph.05[i, ] <- unstr.intros.project.gam.2.alph.05$tot.pop.size
unstr.intros.popsize.gam.2.alph.2[i, ] <- unstr.intros.project.gam.2.alph.2$tot.pop.size
unstr.intros.popsize.gam.2.alph1[i, ] <- unstr.intros.project.gam.2.alph1$tot.pop.size
print(i)
}
list.out <- list(popsize.gam.2.alph.05 = popsize.gam.2.alph.05,
popsize.gam.2.alph.2 = popsize.gam.2.alph.2,
popsize.gam.2.alph1 = popsize.gam.2.alph1,
unstr.popsize.gam.2.alph.05 = unstr.popsize.gam.2.alph.05,
unstr.popsize.gam.2.alph.2 = unstr.popsize.gam.2.alph.2,
unstr.popsize.gam.2.alph1 = unstr.popsize.gam.2.alph1,
unstr.intros.popsize.gam.2.alph.05 = unstr.intros.popsize.gam.2.alph.05,
unstr.intros.popsize.gam.2.alph.2 = unstr.intros.popsize.gam.2.alph.2,
unstr.intros.popsize.gam.2.alph1 = unstr.intros.popsize.gam.2.alph1
)
return(list.out)
}
|
280c6273ce80a43fbbd691eec64d4e2b764a69bc | 5404b977568c52fb729d59223eeebaf2e99dc497 | /R/collapse.R | 52e86c31c45d1b132bafa04ac3e18ee597ac3906 | [
"MIT"
] | permissive | AlexaBennett/featuretable | 6a9d8c1c58d729cd135e74fd03565376615b5c11 | 083f3f506a68762c288fca85a6718eae4d1b2568 | refs/heads/master | 2023-04-18T07:43:20.369518 | 2021-05-04T04:19:31 | 2021-05-04T04:19:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,662 | r | collapse.R | #' Collapse samples/observations/rows or features/columns based on metadata.
#'
#' @description
#' Collapse samples/observations/rows or features/columns based on metadata. For
#' features/samples, any feature/sample with the same metadata for selected
#' category will be grouped.
#'
#' @details
#' Grouping is done by summing counts for each category.
#'
#' If you to keep features/samples with \code{NA} for the \code{by} category,
#' pass \code{keep_na = TRUE}. Then the NA will become a new factor in the
#' collapsed data.
#'
#' Currently, you can only collapse on one metadata column at a time :(
#'
#' @examples
#' data(ft)
#'
#' # You can direcctly access variables in the metadata.
#' ft$collapse("features", Color)
#' ft$collapse_features(Color)
#'
#' # Or refer to them by strings.
#' ft$collapse("features", "Color")
#' ft$collapse_features("Color")
#'
#' # You can collapse samples on metadata as well.
#' ft$collapse("samples", Location)
#' ft$collapse_samples(Location)
#'
#' # And you can use the s3 style functions.
#' collapse(ft, "samples", Location)
#' collapse_samples(ft, Location)
#'
#' collapse(ft, "features", Shape)
#' collapse_features(ft, Shape)
#'
#' # For now, you can't do more than one variable at a time. Sorry!
#' \dontrun{
#' ft$collapse_samples(c("Location", "Season"))
#' }
#'
#' @param ft A FeatureTable. (Only used in the \code{S3} version.)
#' @param margin Margin to collapse. E.g., \code{1} or \code{"samples"}
#' indicates rows, \code{2} or \code{"features"} indicates columns.
#' @param by The data column to collapse by.
#' @param keep_na Do you want to group all NAs together (TRUE) or drop them
#' (FALSE, the defult)?
#' @param keep_hierarchy Do you want to keep all data above the level specified
#' with the \code{by} argument? Pass \code{TRUE} to this parameter if you
#' know some of your data is hierarchical and you want to treat it as such.
#' See vignettes for details.
#'
#' @return A new FeatureTable with the specified margin collapsed on the
#' specified metadata column.
#'
#' @export
collapse <- function(ft, ...) {
UseMethod("collapse")
}
#' @rdname collapse
#' @export
collapse.FeatureTable <- function(ft, ...) {
ft$collapse(...)
}
#' @rdname collapse
#' @export
collapse_features <- function(ft, ...) {
UseMethod("collapse_features")
}
#' @rdname collapse
#' @export
collapse_features.FeatureTable <- function(ft, ...) {
ft$collapse_features(...)
}
#' @rdname collapse
#' @export
collapse_samples <- function(ft, ...) {
UseMethod("collapse_samples")
}
#' @rdname collapse
#' @export
collapse_samples.FeatureTable <- function(ft, ...) {
ft$collapse_samples(...)
}
|
11c7b8cd2ea363dd88eaba3397b2e2b3d10bcfb1 | 0d4c1d4a347fbf9202d21aa1710a3b056711cedf | /man/stub.Rd | 211e892bd9ce9e59fb705a71b652d47242160285 | [] | no_license | armenic/reporter | 6a5756977da13340f7bf80cd63d13d340d97d8f9 | 00dc496ca93afef4b6e05f0f24a74dc935e91123 | refs/heads/master | 2023-02-18T06:49:22.454126 | 2021-01-19T14:55:15 | 2021-01-19T14:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,174 | rd | stub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_spec.R
\name{stub}
\alias{stub}
\title{Defines a report stub}
\usage{
stub(x, vars, label = "", label_align = NULL, align = "left", width = NULL)
}
\arguments{
\item{x}{The table spec.}
\item{vars}{A vector of quoted or unquoted variable names from
which to create the stub. If you want to pass an R variable of names,
escape the values with double curly braces, i.e. \code{vars = {{myvar}}}.
The curly brace escape is useful when writing functions that construct
reports dynamically.}
\item{label}{The label for the report stub. The default label is an empty
string.}
\item{label_align}{The alignment for the stub column label.
Valid values are 'left', 'right', 'center', and 'centre'. Default follows
the \code{align} parameter.}
\item{align}{How to align the stub column. Valid values are 'left',
'right', 'center', and 'centre'. Default is 'left'.}
\item{width}{The width of the stub, in report units of measure.}
}
\value{
The modified table spec.
}
\description{
Combine columns into a nested report stub. The report stub
is a common feature of statistical reports. The stub is created with
the \code{stub} function, and frequently appears in combination with the
\code{label_row} and \code{indent} parameters from the
\code{\link{define}} function. These elements work together to define
the appearance of the stub.
}
\details{
The table stub is a nested set of labels that identify rows
on the table. The stub is created by combining two or more columns into
a single stub column. The relationship between the columns is typically
visualized as a hierarchy, with lower level concepts indented under
higher level concepts.
A typical stub is created with the following steps:
\itemize{
\item Prepare the data.
\item Create the table object.
\item Define the stub on the table using the \code{stub} function,
and identify the variables to be combined.
\item Identify higher level concepts with the \code{label_row} parameter
on the \code{\link{define}} function.
\item Identify lower level concepts using the \code{indent} parameter
on the \code{\link{define}} function.
}
The stub will be automatically added as an identity variable on the report,
and will always appear as the leftmost column. There can only be one stub
defined on a report.
If you wish to create multiple levels of nested labels, use
an NA value to prevent lower level labels from overwriting
higher level labels.
For example, the following data:
\preformatted{
continent country state_province
"North America" NA NA
"North America" "Canada" NA
"North America" "Canada" "Ontario"
"North America" "USA" NA
"North America" "USA" "New York"
"South America" NA NA
"South America" "Brazil" NA
"South America" "Brazil" "Amazonas"
"South America" "Brazil" "Bahia"
}
Will produce the following stub:
\preformatted{
North America
Canada
Ontario
USA
New York
South America
Brazil
Amazonas
Bahia
}
With the following code:
\preformatted{
tbl <- create_table(dat) \%>\%
stub(c(continent, country, state_province)) \%>\%
define(country, indent = .25) \%>\%
define(state_province, indent = .5)
}
}
\examples{
library(reporter)
library(magrittr)
# Create temporary path
tmp <- file.path(tempdir(), "stub.txt")
# Read in prepared data
df <- read.table(header = TRUE, text = '
var label A B
"ampg" "N" "19" "13"
"ampg" "Mean" "18.8 (6.5)" "22.0 (4.9)"
"ampg" "Median" "16.4" "21.4"
"ampg" "Q1 - Q3" "15.1 - 21.2" "19.2 - 22.8"
"ampg" "Range" "10.4 - 33.9" "14.7 - 32.4"
"cyl" "8 Cylinder" "10 ( 52.6\%)" "4 ( 30.8\%)"
"cyl" "6 Cylinder" "4 ( 21.1\%)" "3 ( 23.1\%)"
"cyl" "4 Cylinder" "5 ( 26.3\%)" "6 ( 46.2\%)"')
# Create table
tbl <- create_table(df, first_row_blank = TRUE) \%>\%
stub(c(var, label)) \%>\%
define(var, blank_after = TRUE, label_row = TRUE,
format = c(ampg = "Miles Per Gallon", cyl = "Cylinders")) \%>\%
define(label, indent = .25) \%>\%
define(A, label = "Group A", align = "center", n = 19) \%>\%
define(B, label = "Group B", align = "center", n = 13)
# Create report and add content
rpt <- create_report(tmp, orientation = "portrait") \%>\%
page_header(left = "Client: Motor Trend", right = "Study: Cars") \%>\%
titles("Table 1.0", "MTCARS Summary Table") \%>\%
add_content(tbl) \%>\%
footnotes("* Motor Trend, 1974") \%>\%
page_footer(left = Sys.time(),
center = "Confidential",
right = "Page [pg] of [tpg]")
# Write out report
write_report(rpt)
# View report in console
writeLines(readLines(tmp, encoding = "UTF-8"))
# Client: Motor Trend Study: Cars
# Table 1.0
# MTCARS Summary Table
#
# Group A Group B
# (N=19) (N=13)
# -------------------------------------------
#
# Miles Per Gallon
# N 19 13
# Mean 18.8 (6.5) 22.0 (4.9)
# Median 16.4 21.4
# Q1 - Q3 15.1 - 21.2 19.2 - 22.8
# Range 10.4 - 33.9 14.7 - 32.4
#
# Cylinders
# 8 Cylinder 10 ( 52.6\%) 4 ( 30.8\%)
# 6 Cylinder 4 ( 21.1\%) 3 ( 23.1\%)
# 4 Cylinder 5 ( 26.3\%) 6 ( 46.2\%)
#
# ...
#
#
# * Motor Trend, 1974
#
# 2020-08-30 03:50:02 Confidential Page 1 of 1
#
}
\seealso{
Other table:
\code{\link{column_defaults}()},
\code{\link{create_table}()},
\code{\link{define}()},
\code{\link{print.table_spec}()},
\code{\link{spanning_header}()}
}
\concept{table}
|
1d2c1d6ad8578382f383116e68f8d7dee8a7ea0a | 09efa70ba24c57fa26432d744b6a07fd217d60d5 | /notar.2.3.r | 8c38fafc68d4b54a3f1255dc9719ca15f83072db | [] | no_license | diogro/ex-R | 84412d8e6226c9c9467a728a5dfc65e2aca24402 | 7e9c3d1d56e38d869340c1a764eae66bbd796d1e | refs/heads/master | 2022-04-30T14:59:11.829918 | 2022-04-11T18:49:57 | 2022-04-11T18:49:57 | 8,480,954 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | notar.2.3.r | luz = c( 9839,10149,10486,10746,11264,11684,12082,12599,13004,13350,13717,14052)
luz.cons = diff(luz)
luz.m = mean(luz.cons)
luz.md = median(luz.cons)
luz.v = var(luz.cons)
luz.range = c(min(luz.cons), max(luz.cons))
|
bba1eadd89c86e047da0022b0ee64a0408bcc8b9 | 6d197fdc3050a6f5305b57a20e7e19f8c3fbe757 | /Project.R | dd9b64f97dcc18785ca3416d5edb34f949655f1e | [] | no_license | akshat0905/Classification-algorithm-with-R | 719cb5492e14ccde156faa10dce8f36b31413af9 | 448fc2b1bf8c398931d82370a84e407a88cc3c39 | refs/heads/master | 2020-05-03T05:38:52.051524 | 2019-03-29T18:08:18 | 2019-03-29T18:08:18 | 178,452,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,048 | r | Project.R | #For finding out the best predictions in a multi-class classification in such a case when
#where we want to build a model for predicting people who were readmitted in less than 30 days
library(class)
library(caret)
library(ggplot2)
library(rpart)
library(rpart.plot)
library(randomForest)
library(e1071)
set.seed(123)
path="C:\\Users\\Admin\\Documents\\R Programming\\Project_mock_r\\dataset_diabetes\\diabetic_data.csv"
diab=read.csv(path,header=T)
table(diab$glimepiride.pioglitazone)
table(diab$metformin.rosiglitazone)
table(diab$metformin.pioglitazone)
# Performing EDA
col_name = colnames(diab) [apply(diab, 2, function(n) any(is.na(n)))]
if(length(col_name) > 0) print("NA's present") else print("No NA's")
print(col_name)
col_name = colnames(diab) [apply(diab, 2, function(n) any(n == ""))]
if(length(col_name) > 0) print("Blanks present") else print("No Blanks")
col_name = colnames(diab) [apply(diab, 2, function(n) any(n=='?'))]
print(col_name)
plot(diab$gender, main = "gender distribution")
plot(diab$race, main="race")
#fixing missing values
levels(diab$race)[levels(diab$race)=="?"] <- "Unk" # converted missing values to other
levels(diab$gender)[levels(diab$gender)=="Unknown/Invalid"] = "Female" # low number of unknown/Invalid so converted to mode
#reducing level based on IDS mapping
diab$admission_type_id=as.factor(diab$admission_type_id)
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='6' | levels(diab$admission_type_id)=='8']= '5'
levels(diab$admission_type_id)[levels(diab$admission_type_id)=='1' | levels(diab$admission_type_id)=='2' | levels(diab$admission_type_id)=='4']= '7'
diab$admission_source_id=as.factor(diab$admission_source_id)
#diab$time_in_hospital=as.factor(diab$time_in_hospital) # converted it to factor variable because of only 14 values present
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='15' | levels(diab$admission_source_id)=='17' | levels(diab$admission_source_id)=='20' | levels(diab$admission_source_id)=='21']='9'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='2' | levels(diab$admission_source_id)=='3']='1'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='11' | levels(diab$admission_source_id)=='23' | levels(diab$admission_source_id)=='24']='8'
levels(diab$admission_source_id)[levels(diab$admission_source_id)=='12' | levels(diab$admission_source_id)=='13' | levels(diab$admission_source_id)=='14']='7'
levels(diab$admission_source_id)[levels(diab$admission_source_id)!='1' & levels(diab$admission_source_id)!='8' & levels(diab$admission_source_id)!='7'& levels(diab$admission_source_id)!='9']='4'
table(diab$admission_source_id)
diab$discharge_disposition_id=as.factor(diab$discharge_disposition_id)
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id)=='13']='1'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('19','20','21')]='11'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('25','26')]='18'
levels(diab$discharge_disposition_id)[levels(diab$discharge_disposition_id) %in% c('3','4','5','6','8','12','15','10','14','16','17','22','23','24','30','27','28','29')]='2'
table(diab$discharge_disposition_id)
levels(diab$medical_specialty)
str(diab$num_lab_procedures)
str(diab$num_medications)
str(diab$num_procedures)
100*prop.table(table(diab$medical_specialty))
str(diab)
str(diab$num_medications)
# removing columns which are not required
diab$encounter_id = NULL
diab$patient_nbr = NULL
#diab$weight = NULL
#diab$payer_code = NULL
#diab$medical_specialty = NULL
diab$citoglipton = NULL
diab$examide = NULL
table(diab$citoglipton)
table(diab$examide)
ncol(diab)
str(diab)
cor(diab[8:13])
#Diagnosis 1
table(diab$diag_1)#Since It has too many Variable we will Group the variable
levels(diab$diag_1)[levels(diab$diag_1) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(800:999)] <- "Injury"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_1)[levels(diab$diag_1) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_1)[!(levels(diab$diag_1) %in% Defined)] <- "Other"
table(diab$diag_1)#Grouped levels by ICD9 codes
#Diagnosis 2
table(diab$diag_2)#Since It has too many Variable we will Group the variable
levels(diab$diag_2)[levels(diab$diag_2) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(800:999)] <- "Injury"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_2)[levels(diab$diag_2) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_2)[!(levels(diab$diag_2) %in% Defined)] <- "Other"
table(diab$diag_2)#Grouped levels by ICD9 codes
#Diagnosis 3
table(diab$diag_3)#Since It has too many Variable we will Group the variable
levels(diab$diag_3)[levels(diab$diag_3) %in% c(390:459, 785)] <- "Circulatory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(460:519, 786)] <- "Respiratory"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(520:579, 787)] <- "Digestive"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(seq.default(from = 250,to = 250.99,by =0.01))] <- "Diabetes"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(800:999)] <- "Injury"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(710:739)] <- "Musculoskeletal"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(580:629,788)] <- "Genitourinary"
levels(diab$diag_3)[levels(diab$diag_3) %in% c(140:239,780,781,784,790:799,240:249,251:279,680:709,782,001:139)] <- "Neoplasms"
Defined=c("Circulatory","Respiratory","Digestive","Diabetes","Injury","Musculoskeletal","Genitourinary","Neoplasms")
levels(diab$diag_3)[!(levels(diab$diag_3) %in% Defined)] <- "Other"
table(diab$diag_3)#Grouped levels by ICD9 codes
table(diab$payer_code)
levels(diab$payer_code)[levels(diab$payer_code)=='?']='Unk'
table(diab$weight)
levels(diab$weight)[levels(diab$weight)=='?'] <- "Unk"
table(train$medical_specialty)
levels(diab$medical_specialty)[levels(diab$medical_specialty)=='?']='Unk'
levels(diab$medical_specialty)[levels(diab$medical_specialty)!= 'Nephrology' & levels(diab$medical_specialty)!= 'Unk' & levels(diab$medical_specialty)!= 'Orthopedics' & levels(diab$medical_specialty)!= 'Orthopedics-Reconstructive' & levels(diab$medical_specialty)!='Radiologist' & levels(diab$medical_specialty)!='Family/GeneralPractice' & levels(diab$medical_specialty)!='Surgery-General' & levels(diab$medical_specialty)!='Emergency/Trauma' & levels(diab$medical_specialty)!='Cardiology' & levels(diab$medical_specialty)!='InternalMedicine'] <- "Other"
#reducing levels
levels(diab$A1Cresult)
table(diab$A1Cresult)
#Removing Rows
diab = diab[diab$discharge_disposition_id != '11',]
ncol(diab)
nrow(diab)
#Building Training and Testing models
set.seed(123)
grp = runif(nrow(diab))
diab = diab[order(grp),]
ind = sample(seq_len(nrow(diab)), floor(nrow(diab)*0.7) )
train = diab[ind,]
test = diab[-ind,]
train_x = train[,1:45]
train_y = train[,46]
head(train_x,3)
head(train_y,3)
str(diab)
ncol(diab)
rf1 = randomForest(train_x, factor(train_y) )
summary(rf1)
str(diab)
pdct_rf1 = predict(rf1, test)
pdct_rf1
table(predicted=pdct_rf1,actual=test$readmitted)
confusionMatrix(pdct_rf1,test$readmitted,positive = "positive")
ncol(diab)
colnames(diab)
#Feature selection
importance(rf1)
varImpPlot(rf1)
#model number 2
train_x$metformin.pioglitazone=NULL
train$metformin.pioglitazone=NULL
test$metformin.pioglitazone=NULL
train_x$metformin.rosiglitazone=NULL
train$metformin.rosiglitazone=NULL
test$metformin.rosiglitazone=NULL
ncol(train)
ncol(train_x)
ncol(test)
head(train_x,3)
head(train_y,3)
rf2 = randomForest(train_x, factor(train_y) )
summary(rf2)
pdct_rf2 = predict(rf2, test)
pdct_rf2
table(predicted=pdct_rf2,actual=test$readmitted)
confusionMatrix(pdct_rf2,test$readmitted,
positive = "positive")
# it gives slightly better accuracy, better sesnitivity for specifically class <30
importance(rf2)
varImpPlot(rf1)
varUsed(rf1, by.tree = F, count=F)
#Feature selection
train_x$glimepiride.pioglitazone=NULL
train$glimepiride.pioglitazone=NULL
test$glimepiride.pioglitazone=NULL
train_x$acetohexamide=NULL
train$acetohexamide=NULL
test$acetohexamide=NULL
train_x$troglitazone=NULL
train$troglitazone=NULL
test$troglitazone=NULL
train_x$glipizide.metformin=NULL
train$glipizide.metformin=NULL
test$glipizide.metformin=NULL
ncol(train)
rf3 = randomForest(train_x, factor(train_y) )
summary(rf3)
pdct_rf3 = predict(rf3, test)
pdct_rf3
table(predicted=pdct_rf3,actual=test$readmitted)
confusionMatrix(pdct_rf3,test$readmitted,
positive = "positive")
#model 3 the accuracy increases
importance(rf3)
train_x$trogl=NULL
train$troglitazone=NULL
test$troglitazone=NULL
rf1 = randomForest(train_x, factor(train_y) )
summary(rf1)
pdct_rf1 = predict(rf1, test)
pdct_rf1
table(predicted=pdct_rf1,actual=test$readmitted)
confusionMatrix(pdct_rf1,test$readmitted,
positive = "positive")
importance(rf2)
varImpPlot(rf1)
varUsed(rf1, by.tree = F, count=F)
ncol(diab)
#model 4 accuracy decreases significantly by 10%
#NO
ind = sample(seq_len(nrow(diab)), floor(nrow(diab)*0.7) )
train = diab[ind,]
test = diab[-ind,]
ncol(diab)
ncol(train_x)
rf3 = randomForest(train_x, factor(train_y) )
summary(rf3)
pdct_rf3 = predict(rf3, test)
pdct_rf3
table(predicted=pdct_rf3,actual=test$readmitted)
confusionMatrix(pdct_rf3,test$readmitted,
positive = "positive")
#--------------------------------------
#KNN
#error in model regarding NAs in coerrence
ncol(diab)
sample_size = floor(0.7*nrow(diab))
sample_ind = sample(seq_len(nrow(diab)), sample_size)
train = diab[sample_ind,]
test=diab[-sample_ind,]
ncol(train)
ncol(test)
traintarget=train$readmitted
testtarget=test$readmitted
train$readmitted=NULL
test$readmitted=NULL
str(diab)
model_knncv = knn.cv(train, traintarget, k=3)
cv_accuracy[i] = length(which(model_knncv==traintarget, T)) / length(model_knncv)
cv_accuracy
predict_target = knn(train, test, traintarget, k=3)
#decision tree
# poorest model with no True Positives
table(test$readmitted)
table(train$readmitted)
dt_readmitted = rpart(readmitted ~., method="class", data=train)
pdct = predict(dt_readmitted, test, type="class")
ncol(test)
confusionMatrix(test[,40],pdct)
#SVM model
# using radial kernel
model_lin = svm(readmitted~., data=train, kernel="radial")
summary(model_lin)
#unable to process the model using this algorithm |
4cea159e909612e7c45b08c8e71139001dd44183 | ca6ee7379ce9c2dcd7249d372d3b08ed3964fcf0 | /sourceScripts/createMutationMatrix.R | 1b6a2f1cafe076f2ceb47f48fa3f031ba7c4da0c | [] | no_license | weiyi-bitw/synapseCCLE | 5457600933de0163c0437af6e0fe241dac8fac92 | ed9f7c17f15e515401566a093936f4778b22c015 | refs/heads/master | 2020-04-12T20:06:34.715798 | 2013-06-07T16:47:02 | 2013-06-07T16:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 565 | r | createMutationMatrix.R |
maf = loadClin("CCLE_hybrid_capture1650_hg19_NoCommonSNPs_NoNeutralVariants_CDS_2012.05.07.maf", sep='\t')
mutSamples = sort(unique(maf[,"Tumor_Sample_Barcode"]))
mutEvent = cbind(rownames(maf), maf[,"Tumor_Sample_Barcode"])
mutGenes = sort(unique(mutEvent[,1]))
mutmat = matrix(0, ncol=length(mutSamples), nrow=length(mutGenes), dimnames=list(mutGenes, mutSamples))
m = nrow(mutEvent)
for(j in 1:m){
if(j %% 100 == 0) message(j, " / ", nrow(mutEvent), "...")
mutmat[mutEvent[j, 1], mutEvent[j,2]] = 1
}
save(mutmat, file=paste("ccle.mutmat.rda", sep=""))
|
ece4c2850f50c4aec0f343580c66c2b277ffd919 | 189d693b09a7576bec04ea3e336450d01d6fe237 | /diversityAnalyses/windowsOfOverlap.R | 4aaf47793022a467580de974bbbd0f8bcd43bc26 | [] | no_license | AbigailShockey/sputum | 80c1a0daa8f2328cf7a1ce67d035c774364e78a3 | d297c7a391f1a527cf22e4a6b0f6045d6a7eb4b9 | refs/heads/master | 2020-04-23T17:16:13.237640 | 2019-11-05T22:48:56 | 2019-11-05T22:48:56 | 171,325,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,264 | r | windowsOfOverlap.R | ######### Overlap in windows of extreme nucleotide diversity
library(ggplot2)
library(magrittr)
library(dplyr)
library(gplots)
library(ggthemes)
library(reshape2)
library(tidyverse)
library(ggsci)
library(plyr)
### Read in slinding-window tables (long format)
inter.intra.df.l <- read.table("/Users/abbas/Documents/analysis_05.07.18/190212_interIntra_windowsLong.txt",
header = T,
sep = "\t",
na.strings = NA,
stringsAsFactors = F)
inter.intra.df.l <- as.tibble(inter.intra.df.l)
### stat as character
inter.intra.df.l$Stat <- as.character(inter.intra.df.l$Stat)
### patient as factor
inter.intra.df.l$Patient <- factor(inter.intra.df.l$Patient,
levels=c("Patient 2","Patient 3","Patient 4","Patient 5",
"Patient 7","Patient 8","Patient 9","Patient 10",
"Patient 11","Patient 14","Patient 21","Patient 22",
"Patient 23","Patient 35-Sample 1","Patient 35-Sample 2","Patient 35-Sample 3"))
### list of patients
patients <- c(unique(as.character(inter.intra.df.l$Patient)))
### inter-host samples
mtb.patients <- patients[1:13]
### intra-host samples
mbovis.patients <- patients[14:16]
### finite values only
inter.intra.df.l <- inter.intra.df.l[apply(inter.intra.df.l[,c("sput.val", "cult.val")], 1, function(x) all(is.finite(x))),]
### inter-host analysis, sputum
sig.win.sput <- NULL
### calculate z-score for nucleotide diversity in each window per patient
for (x in mtb.patients) {
df.patient <- inter.intra.df.l[which(as.character(inter.intra.df.l$Patient) == x & inter.intra.df.l$Stat == "pi"),]
z <- scale(df.patient$sput.val, center = TRUE, scale = TRUE)
pval = pnorm(-abs(z))
pval.adj <- p.adjust(pval, method = "fdr", n = length(pval))
pval.adj <- as.vector(pval.adj)
if (any(pval.adj < 0.05, na.rm = T)) {
sig.val <- data.frame(window = df.patient$window[which(pval.adj < 0.05)], sput.val = df.patient$sput.val[which(pval.adj < 0.05)],pval.adj = pval.adj[which(pval.adj < 0.05)])
sig.val$patient <- x
sig.win.sput <- rbind(sig.win.sput, sig.val)
}
}
n.occur.s <- data.frame(table(as.numeric(sig.win.sput$window)))
n.occur.sig.s <- n.occur.s[n.occur.s$Freq > 1,]
n.occur.sig.s <- n.occur.sig.s[order(n.occur.sig.s$Var1),]
### inter-host analysis, culture
sig.win.cult <- NULL
for (x in mtb.patients) {
df.patient <- inter.intra.df.l[which(as.character(inter.intra.df.l$Patient) == x & inter.intra.df.l$Stat == "pi"),]
z <- scale(df.patient$cult.val, center = TRUE, scale = TRUE)
pval = pnorm(-abs(z))
pval.adj <- p.adjust(pval, method = "fdr", n = length(pval))
pval.adj <- as.vector(pval.adj)
if (any(pval.adj < 0.05, na.rm = T)) {
sig.val <- data.frame(window = df.patient$window[which(pval.adj < 0.05)],
cult.val = df.patient$cult.val[which(pval.adj < 0.05)],
pval.adj = pval.adj[which(pval.adj < 0.05)])
sig.val$patient <- x
sig.win.cult <- rbind(sig.win.cult, sig.val)
}
}
n.occur.c <- data.frame(table(as.numeric(sig.win.cult$window)))
n.occur.sig.c <- n.occur.c[n.occur.c$Freq > 1,]
n.occur.sig.c <- n.occur.sig.c[order(n.occur.sig.c$Var1),]
length(n.occur.c$Var1[n.occur.c$Freq > 1])
### intra-host analysis, sputum
n.occur.s <- NULL
sig.win.sput <- NULL
for (x in mbovis.patients) {
df.patient <- inter.intra.df.l[which(as.character(inter.intra.df.l$Patient) == x & inter.intra.df.l$Stat == "pi"),]
z <- scale(df.patient$sput.val, center = TRUE, scale = TRUE)
pval = pnorm(-abs(z))
pval.adj <- p.adjust(pval, method = "fdr", n = length(pval))
pval.adj <- as.vector(pval.adj)
if (any(pval.adj < 0.05, na.rm = T)) {
sig.val <- data.frame(window = df.patient$window[which(pval.adj < 0.05)], sput.val = df.patient$sput.val[which(pval.adj < 0.05)],pval.adj = pval.adj[which(pval.adj < 0.05)])
sig.val$patient <- x
sig.win.sput <- rbind(sig.win.sput, sig.val)
}
}
n.occur.s <- data.frame(table(as.character(sig.win.sput$window)))
n.occur.sig.s <- n.occur.s[n.occur.s$Freq > 1,]
n.occur.sig.s <- n.occur.sig[order(n.occur.sig.s$Var1),]
### intra-host analysis, culture
n.occur.c <- NULL
sig.win.cult <- NULL
for (x in mbovis.patients) {
df.patient <- log.df[which(as.character(log.df$Patient) == x & log.df$Stat == "pi"),]
z <- scale(df.patient$cult.val, center = TRUE, scale = TRUE)
pval = pnorm(-abs(z))
pval.adj <- p.adjust(pval, method = "fdr", n = length(pval))
pval.adj <- as.vector(pval.adj)
if (any(pval.adj < 0.05, na.rm = T)) {
sig.val <- data.frame(window = df.patient$window[which(pval.adj < 0.05)], cult.val = df.patient$cult.val[which(pval.adj < 0.05)],pval.adj = pval.adj[which(pval.adj < 0.05)])
sig.val$patient <- x
sig.win.cult <- rbind(sig.win.cult, sig.val)
}
}
n.occur.c <- data.frame(table(as.character(sig.win.cult$window)))
n.occur.sig.c <- n.occur.c[n.occur.c$Freq > 1,]
n.occur.sig.c <- n.occur.sig.c[order(n.occur.sig.c$Var1),]
|
a459cca7abe7d4b5765a18011ba1f960ef9dc819 | ed8c18f106bc266d1a14d5c38d0657aafcfee809 | /tests/testthat.R | 48da8d9cf1d10e817b502a343ee546ec0d5833ab | [] | no_license | mdsumner/wmts | 02abcf357c70fe96e62a0e89b448f5ed9ab267cc | c23d2c114d40a8e0b6856f4deae44894a4199db8 | refs/heads/master | 2021-07-30T05:04:54.069605 | 2021-07-21T03:56:55 | 2021-07-21T03:56:55 | 220,982,958 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 52 | r | testthat.R | library(testthat)
library(wmts)
test_check("wmts")
|
3e3cef1e9997fe40c36db1f8b4b39a1c79b7dddf | 592ff5e324891b94e18686ed96371b170c33481a | /patents/patent_analysis.R | 48c10b1bd0d1e24c77816807e6d0f6f4ba3f4f49 | [] | no_license | fabiorcampos/Innovation-Analysis | fc7965bab49d01ef5bc313764d079b9c523965c7 | 0cfb45f0c100a465b0d6e60840e550eeaf03ed0d | refs/heads/master | 2021-01-01T16:01:58.120243 | 2017-09-13T18:54:11 | 2017-09-13T18:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,258 | r | patent_analysis.R | # Load libraries
library(dplyr)
library(stringr)
library(tm)
library(ggplot2)
library(wordcloud)
library(readr)
library(SnowballC)
# Load dataset
pat_072017 <- read_csv("pat_072017.csv")
df <- as.data.frame(pat_072017)
# Analyse Inventors
ae <- as.character(df$AE)
ae <- str_replace(ae, " \\(.*\\)", "") ### Remove desnecessary text
ae <- as.data.frame(table(ae)) ### combine all elements and make a freq
ae <- ae[order(ae$Freq, decreasing = TRUE), ] ### put in order decreasing
ae <- subset(ae, Freq >= 9, select = c(ae, Freq)) ### create a subset
print(ae)
# Dendrogram Inventors
hc <- hclust(dist(ae)) ### Create
plot(hc, labels = ae$ae, main = "Cluster Dendogram", xlab = "Companies",
ylab = "Height", hang=-50, cex=.40)
# Keywords
dc <- as.character(df$DC)
dc <- str_replace(dc, " \\(.*\\)", "")
dc <- as.data.frame(table(dc))
dc <- dc[order(dc$Freq, decreasing = TRUE), ]
dc <- subset(dc, Freq >= 20, select = c(dc, Freq)) ### create a subset
dc <- mutate(cummean(dc$Freq))
sumdc <- as.numeric(c(dc$Freq))
sdc <- sum(sumdc)
mutate(dc, prop = sumdc / sdc)
print(dc)
# Dendrogram Keywords
dchc <- hclust(dist(dc)) ### Create
plot(dchc, labels = dc$dc, main = "Cluster Dendogram", xlab = "Kewys",
ylab = "Height", hang=-50, cex=.60)
|
3f82db09c68f032688b7bc17f8470133a9ed7362 | e001240ee58783ccde42861e8d80f643996eb70b | /devstats.R | 8c5af4833ab80d13330ae79d40374730adfc8f10 | [] | no_license | pedrovelho/high-five | 908a799105b030f503a2eb015cee3402bee085f7 | c61e1600bca1a8aba669a0c697827dcc58086e70 | refs/heads/master | 2020-03-11T13:51:57.744919 | 2018-04-18T09:20:29 | 2018-04-18T09:20:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,653 | r | devstats.R | #!/usr/bin/env Rscript
##sudo apt-get install r-base
##sudo apt-get install pandoc pandoc-citeproc
##install.packages("highcharter")
##install.packages("htmlwidgets")
devs <- read.table('sample.data', header=TRUE)
devstats <- data.frame(devs)
devstats$total = devstats$minor + devstats$major + devstats$critical
library("highcharter")
## Summary
devstats = devstats[order(devstats$total),]
devstatsSummary <- highchart() %>%
hc_chart(type = "column") %>%
hc_title(text = "SUMMARY of issues per developer") %>%
hc_xAxis(categories = devstats$name) %>%
hc_yAxis(title = list(text = "Issues")) %>%
hc_plotOptions(column = list(
stacking = "normal")) %>%
hc_series(list(name="minor",data=devstats$minor),
list(name="major",data=devstats$major),
list(name="critical",data=devstats$critical))
htmlwidgets::saveWidget(widget = devstatsSummary, file = "./summary.html")
## Critical
##90ed7d
devstats = devstats[order(devstats$critical),]
devstatsCritical <- highchart() %>%
hc_chart(type = "column") %>%
hc_title(text = "CRITICAL") %>%
hc_xAxis(categories = devstats$name) %>%
hc_yAxis(title = list(text = "Issues")) %>%
hc_plotOptions(column = list(
stacking = "normal")) %>%
hc_series(list(name="minor",data=0),
list(name="major",data=0),
list(name="critical",data=devstats$critical))
htmlwidgets::saveWidget(widget = devstatsCritical, file = "./critical.html")
## Major
##434348
devstats = devstats[order(devstats$major),]
devstatsMajor <- highchart() %>%
hc_chart(type = "column") %>%
hc_title(text = "MAJOR") %>%
hc_xAxis(categories = devstats$name) %>%
hc_yAxis(title = list(text = "Issues")) %>%
hc_plotOptions(column = list(
stacking = "normal")) %>%
hc_series(list(name="minor",data=0),
list(name="major",data=devstats$major),
list(name="critical",data=0))
htmlwidgets::saveWidget(widget = devstatsMajor, file = "./major.html")
## Minor
##7cb5ec
devstats = devstats[order(devstats$minor),]
devstatsMinor <- highchart() %>%
hc_chart(type = "column") %>%
hc_title(text = "MINOR") %>%
hc_xAxis(categories = devstats$name) %>%
hc_yAxis(title = list(text = "Issues")) %>%
hc_plotOptions(column = list(
stacking = "normal")) %>%
hc_series(list(name="minor",data=devstats$minor),
list(name="major",data=0),
list(name="critical",data=0))
htmlwidgets::saveWidget(widget = devstatsMinor, file = "./minor.html")
|
cd34cbf29edd1532b3fb568373c319db67e87b0c | 4d216630e99eda5974b2655baf8928ca7da754bd | /drake/functions.R | 9a647910692707b72fc9ace1ddbe18788c214df5 | [] | no_license | ashiklom/edr-da | 467861ec61cd8953eb272e2844414a522db7268f | b092600954b73fa064300c6e7b21d0413d115b94 | refs/heads/master | 2021-07-12T18:59:20.190169 | 2021-04-12T14:00:17 | 2021-04-12T14:00:17 | 71,824,349 | 2 | 5 | null | 2018-02-01T13:29:03 | 2016-10-24T19:26:27 | R | UTF-8 | R | false | false | 18,121 | r | functions.R | last_result_file <- function(basedir = "multi_site_pda_results") {
info <- fs::dir_info(basedir, recurse = TRUE, glob = "*.rds")
info %>%
dplyr::arrange(change_time) %>%
tail(1) %>%
dplyr::pull(path)
}
tidy_prior <- function() {
sites <- readLines(here::here("other_site_data", "site_list"))
nsite <- length(sites)
param_names <- readLines(here::here("param_names.txt"))
prior <- create_prior(
nsite = nsite,
heteroskedastic = FALSE,
limits = TRUE,
param_names = param_names
)
prior_draws <- purrr::map(
seq_len(2000),
~prior$sampler()
) %>% purrr::invoke(.f = rbind)
tidy_param_matrix(prior_draws, "prior")
}
tidy_param_matrix <- function(mat, type) {
tibble::as_tibble(mat) %>%
dplyr::mutate(id = dplyr::row_number()) %>%
tidyr::pivot_longer(-id, names_to = "param", values_to = "value") %>%
dplyr::mutate(type = !!type) %>%
split_params("param")
}
pft_posterior_plot <- function(tidy_priors, tidy_posteriors, ncol = 2) {
lvls <- c("prospect_N", "prospect_Cab", "prospect_Car",
"prospect_Cw", "prospect_Cm",
"SLA",
"b1Bl", "b1Bw",
"clumping_factor", "orient_factor")
lbls <- c("'# mesophyll layers'",
"Chlorophyll ~ (mu * g ~ cm^-2)",
"Carotenoids ~ (mu * g ~ cm^-2)",
"'Water' ~ (g ~ cm^-2)",
"'Dry matter' ~ (g ~ cm^-2)",
"'Specific leaf area' ~ (kg ~ m^-2)",
"'Leaf biomass allometry'", "'Wood biomass allometry'",
"'Canopy clumping' ~ ('0, 1')", "'Leaf orientation' ~ ('-1, 1')")
tidy_prior_sub <- tidy_priors %>%
dplyr::filter(
# Clipped because priors are much wider than posteriors
!(variable == "b1Bl" & value > 0.2),
!(variable == "b1Bw" & value > 0.1),
!is.na(pft)
) %>%
dplyr::mutate(variable = factor(variable, lvls, lbls))
clrs <- c("prior" = "gray70", "posterior" = "black")
tidy_posterior2 <- tidy_posteriors %>%
dplyr::filter(!is.na(pft)) %>%
dplyr::mutate(variable = factor(variable, lvls, lbls))
ggplot() +
aes(x = forcats::fct_inorder(pft), y = value,
fill = type, color = type) +
geom_violin(data = tidy_prior_sub) +
geom_violin(data = tidy_posterior2) +
facet_wrap(vars(variable), scales = "free_y", ncol = ncol,
labeller = label_parsed) +
scale_color_manual(values = clrs, aesthetics = c("color", "fill")) +
theme_bw() +
theme(
axis.text.x = element_text(angle = 90, vjust = 0.5),
axis.title.x = element_blank(),
axis.title.y = element_blank()
)
}
soil_moisture_plot <- function(tidy_posteriors, site_structure_data) {
site_structure <- site_structure_data %>%
dplyr::mutate(
x = forcats::fct_reorder(site_name, frac_evergreen_wtd),
site_soil = paste0("sitesoil_", site_name)
)
site_posterior <- tidy_posteriors %>%
dplyr::filter(grepl("sitesoil", variable)) %>%
dplyr::inner_join(site_structure, c("variable" = "site_soil"))
last_hw_site <- site_structure %>%
dplyr::filter(frac_evergreen_wtd <= 0.5) %>%
dplyr::arrange(dplyr::desc(frac_evergreen_wtd)) %>%
dplyr::slice(1) %>%
dplyr::pull(x)
site_posterior_summary <- site_posterior %>%
dplyr::group_by(EG = frac_evergreen_wtd > 0.5) %>%
dplyr::summarize(Mean = mean(value),
SD = sd(value)) %>%
dplyr::ungroup() %>%
dplyr::mutate(
x = as.numeric(last_hw_site) + 0.5 + c(-3, 3),
lab = sprintf("%.2f (%.2f)", Mean, SD)
)
ggplot(site_posterior) +
aes(x = x, y = value,
fill = frac_evergreen_wtd, color = frac_evergreen_wtd) +
geom_violin() +
geom_vline(xintercept = as.numeric(last_hw_site) + 0.5,
linetype = "dashed") +
geom_text(aes(x = x, y = 0, label = lab), data = site_posterior_summary,
inherit.aes = FALSE) +
scale_color_paletteer_c(
palette = "pals::isol",
aesthetics = c("color", "fill"),
direction = -1,
guide = guide_colorbar(title = "Weighted evergreen fraction")
) +
labs(x = "Site code", y = "Soil moisture fraction (0 - 1)") +
theme_bw() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
}
full_site_info <- function(site_list_file, site_dir) {
selected_sites <- readLines(site_list_file)
site_files <- selected_sites %>% purrr::map_chr(
~head(list.files(file.path(site_dir, .x), "css$", full.names = TRUE), 1)
) %>%
setNames(selected_sites)
site_df <- purrr::map_dfr(site_files, read.table,
header = TRUE, .id = "site") %>%
tibble::as_tibble() %>%
dplyr::select(site, year = time, dbh, ipft = pft, nplant = n) %>%
dplyr::mutate(
ipft = get_ipft(ipft),
hite = dbh2h(dbh, ipft)
) %>%
dplyr::group_by(site, year) %>%
dplyr::arrange(desc(hite)) %>%
dplyr::mutate(cohort = dplyr::row_number()) %>%
dplyr::ungroup() %>%
dplyr::mutate(pft = factor(ipft, 1:5, c(
"Early_Hardwood", "North_Mid_Hardwood", "Late_Hardwood",
"Northern_Pine", "Late_Conifer"
)))
site_df
}
predict_lai <- function(site_details, tidy_posteriors, max_samples = 5000) {
tidy_params_dt <- tidy_posteriors %>%
dplyr::filter(variable %in% c("b1Bl", "SLA", "clumping_factor"))
b2Bl <- purrr::map_dbl(allom_mu, "b2Bl")
names(b2Bl) <- gsub("temperate\\.", "", names(b2Bl))
params_structure <- tidy_params_dt %>%
tidyr::pivot_wider(
names_from = "variable",
values_from = "value"
) %>%
dplyr::mutate(b2Bl = b2Bl[pft])
nsamp <- min(max_samples, nrow(params_structure))
params_structure_sub <- params_structure %>%
dplyr::sample_n(nsamp, replace = FALSE)
site_lai_samples <- params_structure_sub %>%
dplyr::left_join(site_details, "pft") %>%
dplyr::mutate(
bleaf = size2bl(dbh, b1Bl, b2Bl),
lai = nplant * bleaf * SLA,
elai = lai * clumping_factor
)
site_lai_samples
}
summarize_lai_samples <- function(site_lai_samples) {
site_lai_samples %>%
dplyr::group_by(site, year, pft, ipft, hite, dbh, nplant, cohort) %>%
dplyr::summarize(
lai_mean = mean(lai),
lai_sd = sd(lai),
lai_lo = quantile(lai, 0.025),
lai_hi = quantile(lai, 0.975),
elai_mean = mean(elai),
elai_sd = sd(elai),
elai_lo = quantile(elai, 0.025),
elai_hi = quantile(elai, 0.975)
) %>%
dplyr::ungroup() %>%
dplyr::group_by(site, year) %>%
dplyr::arrange(hite) %>%
dplyr::mutate(
cum_lai = cumsum(lai_mean),
cum_elai = cumsum(elai_mean)
) %>%
dplyr::ungroup()
}
spec_error_all_f <- function(observed_predicted, sail_predictions, ncol = 6) {
# Sort sites by aggregate bias
plot_dat <- observed_predicted %>%
dplyr::group_by(site) %>%
dplyr::mutate(site_mean_bias = mean(bias)) %>%
dplyr::ungroup() %>%
dplyr::mutate(site_f = forcats::fct_reorder(site, site_mean_bias))
sail_sub <- sail_predictions %>%
dplyr::semi_join(observed_predicted, "wavelength") %>%
dplyr::mutate(site_f = factor(site, levels(plot_dat[["site_f"]])))
sail_avg <- sail_sub %>%
tidyr::pivot_wider(names_from = "stream", values_from = "value") %>%
# Same configuration as EDR -- assume incident radiation is 90% direct, 10%
# diffuse
dplyr::mutate(value = 0.9 * dhr + 0.1 * bhr)
ggplot(plot_dat) +
aes(x = wavelength, group = aviris_id) +
geom_ribbon(aes(ymin = pmax(albedo_r_q025, 0),
ymax = pmin(albedo_r_q975, 1),
fill = "95% PI")) +
geom_ribbon(aes(ymin = pmax(albedo_q025, 0),
ymax = pmin(albedo_q975, 1),
fill = "95% CI")) +
geom_line(aes(y = albedo_mean, color = "EDR"), size = 1) +
geom_line(aes(y = observed, color = "AVIRIS")) +
geom_line(aes(y = value, color = "SAIL"), data = sail_avg) +
facet_wrap(vars(site_f), scales = "fixed", ncol = ncol) +
labs(x = "Wavelength (nm)", y = "Reflectance (0 - 1)") +
scale_fill_manual(
name = "",
values = c("95% PI" = "gray80",
"95% CI" = "green3")
) +
scale_color_manual(
name = "",
values = c("EDR" = "green4",
"AVIRIS" = "black",
"SAIL" = "red")
) +
theme_bw()
}
site_spec_dbh_plot <- function(site, observed_predicted, site_details,
spec_additions = NULL,
dbh_additions = NULL,
ymax = 1.0) {
spec_sub <- observed_predicted %>%
dplyr::filter(site == !!site)
pspec <- ggplot(spec_sub) +
aes(x = wavelength, group = aviris_id) +
geom_ribbon(aes(ymin = pmax(albedo_r_q025, 0), ymax = pmin(albedo_r_q975, 1)),
fill = "gray70") +
geom_ribbon(aes(ymin = pmax(albedo_q025, 0),
ymax = pmin(albedo_q975, 1)),
fill = "green3") +
geom_line(aes(y = albedo_mean), color = "green4", size = 1) +
geom_line(aes(y = observed)) +
labs(x = "Wavelength (nm)", y = "Reflectance (0 - 1)",
title = site) +
coord_cartesian(ylim = c(0, ymax)) +
theme_bw()
if (!is.null(spec_additions)) {
pspec <- Reduce("+", c(list(pspec), spec_additions))
}
dbh_dat <- site_details %>%
dplyr::filter(site == !!site)
pft_colors <- RColorBrewer::brewer.pal(5, "Set1")
names(pft_colors) <- levels(dbh_dat$pft)
pdbh <- ggplot(dbh_dat) +
aes(x = dbh, fill = pft) +
geom_histogram(binwidth = 5) +
coord_cartesian(xlim = c(0, 100)) +
labs(x = "DBH (cm)", y = "Count", fill = "PFT") +
scale_fill_manual(values = pft_colors, drop = FALSE) +
theme_bw() +
theme(
legend.position = c(1, 1),
legend.justification = c(1, 1),
legend.background = element_blank()
)
if (!is.null(dbh_additions)) {
pdbh <- Reduce("+", c(list(pdbh), dbh_additions))
}
pspec + pdbh
}
calc_ndvi <- function(dat, vcol) {
dat %>%
dplyr::filter(wavelength %in% c(690, 800)) %>%
tidyr::pivot_wider(
names_from = "wavelength",
values_from = all_of(vcol)
) %>%
dplyr::rename(nir = `800`, red = `690`) %>%
dplyr::mutate(ndvi = (nir - red) / (nir + red))
}
calc_ndvi_bysite <- function(observed_spectra, predicted_spectra,
site_structure) {
obs_ndvi <- calc_ndvi(observed_spectra, "observed")
pred_ndvi <- predicted_spectra %>%
dplyr::select(wavelength, site, albedo_mean) %>%
calc_ndvi("albedo_mean")
obs_ndvi %>%
dplyr::inner_join(pred_ndvi, "site", suffix = c("_obs", "_pred")) %>%
dplyr::left_join(site_structure, c("site" = "site_name"))
}
ndvi_dbh_plot <- function(both_ndvi) {
ggplot(both_ndvi) +
aes(x = mean_dbh) +
geom_point(aes(y = ndvi_obs, shape = "observed")) +
geom_smooth(aes(y = ndvi_obs, linetype = "observed"),
method = "lm", se = FALSE, color = "black") +
geom_point(aes(y = ndvi_pred, shape = "predicted")) +
geom_smooth(aes(y = ndvi_pred, linetype = "predicted"),
method = "lm", se = FALSE, color = "black") +
labs(x = "Mean DBH (cm)", y = "NDVI") +
scale_shape_manual(values = c("observed" = 19, predicted = 3)) +
theme_bw()
}
lai_predicted_observed_plot <- function(site_lai_total, lai_observed) {
plot_dat <- dplyr::inner_join(site_lai_total, lai_observed, "site")
fit <- lm(obs_LAI ~ lai_mean, data = plot_dat)
sfit <- summary(fit)
# Test that slope is not equal to 1; this is analogous to seeing whether the
# slope of the regression with an offset is equal to zero.
fit2_form <- formula(lai_mean - obs_LAI ~ lai_mean)
fit2 <- lm(fit2_form, data = plot_dat)
sfit2 <- summary(fit2)
pval <- sfit2$coefficients[2, "Pr(>|t|)"]
eqn <- paste(
sprintf("y = %.2fx + %.2f", fit$coefficients[2], fit$coefficients[1]),
sprintf("R² = %.2f, p(m ≠ 1) = %.3f", sfit$r.squared, pval),
sep = "\n"
)
ggplot(plot_dat) +
aes(x = lai_mean, xmin = lai_lo, xmax = lai_hi,
y = obs_LAI, ymin = obs_LAI_lo, ymax = obs_LAI_hi) +
geom_pointrange(aes(color = pft)) +
geom_errorbarh(aes(color = pft)) +
geom_abline(aes(linetype = "1:1", intercept = 0, slope = 1)) +
geom_abline(aes(linetype = "Regression", intercept = fit$coefficients[1], slope = fit$coefficients[2])) +
scale_linetype_manual(values = c("1:1" = "dashed", "Regression" = "solid"), name = "") +
scale_color_brewer(palette = "Set1", name = "") +
annotate("text", x = -Inf, y = Inf, hjust = -0.2, vjust = 1.2,
label = eqn) +
labs(x = "Predicted LAI", y = "Observed LAI") +
theme_bw() +
theme(legend.position = c(1, 0),
legend.justification = c(1, 0),
legend.background = element_blank(),
legend.box = "horizontal",
legend.box.just = "bottom")
}
tidy_sail_predictions <- function(site_details, site_lai_total,
tidy_posteriors) {
site_tags <- tibble::tibble(
site = readLines(site_list_file),
site_tag = paste0("sitesoil_", seq_along((site)))
)
sail_input <- site_details %>%
group_by(site) %>%
arrange(desc(hite), .by_group = TRUE) %>%
slice(1) %>%
ungroup() %>%
left_join(site_lai_total, c("site", "year")) %>%
left_join(site_tags, "site")
posterior_means <- tidy_posteriors %>%
group_by(pft, variable) %>%
summarize(value = mean(value)) %>%
ungroup()
soil_means <- posterior_means %>%
filter(grepl("sitesoil", variable)) %>%
select(site_tag = variable, soil_moisture = value)
posterior_params <- posterior_means %>%
filter(grepl("prospect_", variable) | variable == "orient_factor") %>%
tidyr::pivot_wider(names_from = "variable", values_from = "value")
sail_input2 <- sail_input %>%
left_join(posterior_params, "pft") %>%
left_join(soil_means, "site_tag") %>%
mutate(leaf_theta = acos((1 + orient_factor) / 2))
sail_output <- sail_input2 %>%
mutate(
sail_result = purrr::pmap(list(
prospect_N, prospect_Cab, prospect_Car, prospect_Cw, prospect_Cm,
leaf_theta, elai_mean, soil_moisture
), ~PEcAnRTM::pro4sail(c(..1, ..2, ..3, 0, ..4, ..5, # PROSPECt
..6, 0, 2, # Leaf angle
..7, 0, # LAI, hot spot
0, 0, 0, # Angles
..8)))
)
sail_output_proc <- sail_output %>%
select(site, sail_result) %>%
mutate(
wavelength = purrr::map(sail_result, PEcAnRTM::wavelengths),
bhr = purrr::map(sail_result, ~.x[, "bi-hemispherical"]) %>% purrr::map(as.numeric),
dhr = purrr::map(sail_result, ~.x[, "directional_hemispherical"]) %>% purrr::map(as.numeric),
hdr = purrr::map(sail_result, ~.x[, "hemispherical_directional"]) %>% purrr::map(as.numeric),
bdr = purrr::map(sail_result, ~.x[, "bi-directional"]) %>% purrr::map(as.numeric)
) %>%
select(-sail_result) %>%
tidyr::unnest(wavelength:bdr)
sail_output_proc %>%
tidyr::pivot_longer(bhr:bdr, names_to = "stream", values_to = "value")
}
edr_sensitivity_defaults <- list(
N = 1.4, Cab = 40, Car = 10, Cw = 0.01, Cm = 0.01,
lai = 3, cai = 1,
clumping_factor = 1,
orient_factor = 0,
direct_sky_frac = 0.8,
pft = 1,
czen = 0.85,
wai = 0,
soil_moisture = 0.5
)
sail_sensitivity_defaults <- c(
edr_sensitivity_defaults[c("N", "Cab", "Car", "Cw", "Cm",
"soil_moisture")],
list(
Cbrown = 0,
LAI = edr_sensitivity_defaults[["lai"]] * edr_sensitivity_defaults[["clumping_factor"]], #nolint
hot_spot = 0,
solar_zenith = acos(edr_sensitivity_defaults[["czen"]]) * 180/pi,
LIDFa = edr_sensitivity_defaults[["orient_factor"]],
LIDFb = 0
)
)
do_sens <- function(value, variable, fun, .dots = list()) {
stopifnot(is.list(.dots))
varlist <- list()
varlist[[variable]] <- value
# Recycle PFT-specific variables if necessary
nval <- length(value)
if (nval > 1) {
for (v in c("pft", "lai", "wai", "cai")) {
.dots[[v]] <- rep(.dots[[v]], nval)
}
}
arglist <- modifyList(.dots, varlist)
do.call(fun, arglist)
}
tidy_albedo <- function(result_list, values) {
stopifnot(length(result_list) == length(values))
values_df <- tibble::tibble(
variable = paste0("V", seq_along(values)),
var_value = values
)
names(result_list) <- values_df[["variable"]]
albedo_dfw <- purrr::map_dfc(result_list, "albedo")
albedo_dfw[["wavelength"]] <- seq(400, 2500)
albedo_long <- tidyr::pivot_longer(albedo_dfw, -wavelength,
names_to = "variable", values_to = "value")
dplyr::left_join(albedo_long, values_df, by = "variable")
}
tidy_sail <- function(result_list, values) {
stopifnot(length(result_list) == length(values))
results_dfl <- purrr::map(result_list, tibble::as_tibble) %>%
purrr::map(function(x) {x$wavelength <- seq(400, 2500); x})
values_df <- tibble::tibble(
variable = paste0("V", seq_along(values)),
var_value = values,
saildata = results_dfl
)
tidyr::unnest(values_df, saildata)
}
sensitivity_plot <- function(values, varname, label,
defaults = edr_sensitivity_defaults,
...) {
sens <- purrr::map(
values, do_sens,
fun = edr_r,
variable = varname,
.dots = modifyList(defaults, list(...))
) %>%
tidy_albedo(values)
plt <- ggplot(sens) +
aes(x = wavelength, y = value, color = var_value,
group = var_value) +
geom_line() +
scale_color_viridis_c() +
labs(x = "Wavelength (nm)", y = "Albedo [0,1]",
color = label) +
theme_bw() +
theme(
legend.position = c(1, 1),
legend.justification = c(1, 1),
legend.background = element_blank()
)
ggsave(path(figdir, paste0("edr-sensitivity-", varname, ".png")), plt,
width = 4, height = 4, dpi = 300)
}
band_cut <- function(x, breaks = c(400, 750, 1100, 1300)) {
x2 <- cut(x, breaks, include.lowest = TRUE, dig.lab = 4)
forcats::fct_relabel(x2, ~paste(.x, "nm"))
}
|
942d6bfe17c6131e407ca60a24f9b74ef17691ec | c6bf3b890f5589eb847493f9d59acf4257c00d08 | /cachematrix.R | 33151fd3561d69688cc3a9ab223d2edcf8897fc9 | [] | no_license | timowlmtn/ProgrammingAssignment2 | 1556c5bbf603e2c5deade4555e7b68af1d757e4f | bb8763fa798d0a195475ee50010dbeb0eb6a3655 | refs/heads/master | 2021-01-09T20:45:43.453926 | 2014-05-25T13:07:25 | 2014-05-25T13:07:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,512 | r | cachematrix.R | # This program demonstrates the use caching in R to create a data structure where the inverse is
# only computed as needed.
#
# To Test:
#
# cachedMatrix <- makeCacheMatrix(matrix(sample(100,16,T),4));
# cacheSolve(cachedMatrix)
#
# Correct result is the identity matrix with ones on diagonal and 0s (with double precision) elsewhere
#
# cachedMatrix$get() %*% cachedMatrix$getinverse()
#
# Caching will only recompute the inverse of the matrix if necessary.
#
# cacheSolve(cachedMatrix)
# (getting cached data)
#
## makeCacheMatrix assigns forms a data structure around the matrix.
##
makeCacheMatrix <- function(A = matrix()) {
Ainv <- NULL
set <- function(y) {
A <<- y
Ainv <<- NULL
}
get <- function() A
setinverse <- function(solve) Ainv <<- solve
getinverse <- function() Ainv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve takes a matrix data structure from makeCacheMatrix the as a parameter and will
## compute the inverse of that matrix if it does not already exist.
##
## The matrix A is assumed non-singular. No eigenvalues should be less than a double-precision 0.
## IE: min(abs(eigen(cachedMatrix$get())$values)) > 1e-14
##
cacheSolve <- function(A, ...) {
Ainv <- A$getinverse()
if(!is.null(Ainv)) {
message("getting cached data")
return(Ainv)
}
data <- A$get()
Ainv <- solve(data, ...)
A$setinverse(Ainv)
## Return a matrix that is the inverse of 'x'
Ainv
}
|
2d0d0b71891f0b8ec2779c981ab3f600d84ca1aa | 392fe0cc9fac49f1c101d32288aa3f661f4e5a10 | /run.r | c1fe408e8b22ed23395c4eadec0ff671d4b1aaf2 | [] | no_license | mwinnel/ARC-Project | bf3b8c053131cd22622dd81c4b9d290e972de4ce | 879f04cc419fe39d92ae416dfb5217047b12a5fd | refs/heads/master | 2021-01-24T20:25:40.735664 | 2015-04-08T03:41:20 | 2015-04-08T03:41:20 | 28,791,006 | 0 | 0 | null | 2015-03-24T06:04:47 | 2015-01-05T00:42:06 | R | UTF-8 | R | false | false | 166 | r | run.r | # setwd("C:/SentinelTest")
setDir <- "C:/Users/s2783343/Documents/ARC/ARC-Project"
setwd(setDir)
source("data_col.R")
source("start_spot.r")
source("loop.R")
|
3e19a35d21bb5aaf8fae3e1f8f07a5aef92274ef | 28c0bb9cf47bc8a8f629b389ba62c1808fd34691 | /man/lactation.machine.model.Rd | fe61bb48b1784d8041a7c6f899f94d26b87d7bc9 | [] | no_license | gcostaneto/ZeBook | 836e3dc8ab80de9ecce782e809606f4d647f30c0 | b892a7e80a233b1c468526307eb5f7b49d95514d | refs/heads/master | 2020-05-14T08:41:25.584061 | 2018-11-09T16:40:03 | 2018-11-09T16:40:03 | 181,727,649 | 1 | 0 | null | 2019-04-16T16:33:07 | 2019-04-16T16:33:07 | null | UTF-8 | R | false | true | 2,321 | rd | lactation.machine.model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lactation.machine.model.r
\name{lactation.machine.model}
\alias{lactation.machine.model}
\title{The Lactation model with milking machine}
\usage{
lactation.machine.model(cu, kdiv, kdl, kdh, km, ksl, kr, ks, ksm, mh, mm,
p, mum, rma, t1, t2, t3, t4, t5, t6, duration, dt, CSi, Mi)
}
\arguments{
\item{cu}{: number of undifferentiated cells}
\item{kdiv}{: cell division rate, Michaelis-Menten constant}
\item{kdl}{: constant degradation of milk}
\item{kdh}{: rate of decomposition of the hormone}
\item{km}{: constant secretion of milk}
\item{ksl}{: milk secretion rate, Michaelis-Menten constant}
\item{kr}{: average milk constant}
\item{ks}{: rate of degradation of the basal cells}
\item{ksm}{: constant rate of degradation of milk secreting cells}
\item{mh}{: parameter}
\item{mm}{: storage Capacity milk the animal}
\item{p}{: parameter}
\item{mum}{: setting the maximum rate of cell division}
\item{rma}{: parameter of milk m (t) function}
\item{t1}{: parameter of milk m (t) function}
\item{t2}{: parameter of milk m (t) function}
\item{t3}{: parameter of milk m (t) function}
\item{t4}{: parameter of milk m (t) function}
\item{t5}{: parameter of milk m (t) function}
\item{t6}{: parameter of milk m (t) function}
\item{duration}{: duration of simulation}
\item{dt}{: time step}
\item{CSi}{: initial Number of secretory cells}
\item{Mi}{: initial Quantity of milk in animal (kg)}
}
\value{
matrix with CS,M,Mmoy,RM
}
\description{
\strong{Model description.}
This model is a model of lactating mammary glands of cattle described by Heather et al. (1983). This model was then inspired more complex models based on these principles.
This model simulates the dynamics of the production of cow's milk.
the system is represented by 6 state variables: change in hormone levels (H), the production and loss of milk secreting cells (CS), and removing the secretion of milk (M), the average quantity of milk contained in the animal (Mmean), the amount of milk removed (RM) and yield (Y).
The model has a time step dt = 0.001 for milking machines.
The model is defined by a few equations, with a total of twenty parameters for the described process.
}
|
56555d78896af984e478b4831dfaa01072999467 | 6436da1c54563dc4589693e396dc679691cbcb4d | /main.R | 9ffcaf45239420d70d510cae8f6923caddb2bec6 | [] | no_license | baidurja/kaggle_GA | 00415bbe827d0d2a0706d2e7059f465bf1f1ecba | 65be50493e11c0df94a564ca4ee89b90728c305a | refs/heads/master | 2020-04-01T07:25:32.658147 | 2018-10-30T01:23:27 | 2018-10-30T01:23:27 | 152,989,447 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,487 | r | main.R | library(tidyverse)
library(jsonlite)
library(data.table)
theData = read_csv('./data/train.csv')
theData$channelGrouping = as.factor( theData$channelGrouping )
theData$fullVisitorId = as.factor( theData$fullVisitorId )
theData$socialEngagementType = as.factor( theData$socialEngagementType )
totals_col = json_clean( theData$totals )
device_col = json_clean( theData$device )
geonet_col = json_clean( theData$geoNetwork )
# traffsrc_col = json_clean( theData$trafficSource )
theData$totals = NULL
theData$device = NULL
theData$geoNetwork = NULL
# theData$trafficSource = NULL
theData = cbind( theData, totals_col )
theData = cbind( theData, device_col )
theData = cbind( theData, geonet_col )
# theData = cbind( theData, traffsrc_col )
ncols = ncol( theData )
k = 1
while ( k <= ncols ){
if ( sum(!duplicated(theData[,..k])) == 1 ){
print( k )
theData[ , k ] = NULL
ncols = ncols - 1
} else {
k = k + 1
}
}
theData$transactionRevenue[ is.na( theData$transactionRevenue) ] = 0
theData$pageviews = as.numeric(theData$pageviews)
theData$transactionRevenue = as.numeric(theData$transactionRevenue)
pairs(~pageviews+transactionRevenue, data=theData)
# k = theData$totals
# d = data.frame( jsoncol = k, stringsAsFactors = FALSE )
#
# theData = unpackJSONCol( theData, theData$totals )
# theData$totals = NULL
# theData = unpackJSONCol( theData, theData$device )
gc()
ggplot( theData, aes( channelGrouping ) ) + geom_bar()
ggplot( theData, aes(x=pageviews, y=transactionRevenue)) +
stat_summary(fun.y="mean", geom="bar")
ggplot( theData, aes(x=channelGrouping, y=transactionRevenue)) +
stat_summary(fun.y="mean", geom="bar")
json_clean = function(column){
col <- column %>% gsub('\"\"','\"',.) #turn into json format
rs <- lapply(col, function(x) jsonlite::fromJSON(x)) # read json into data format
ff <- rbindlist(rs, fill=TRUE) # turn list into dataframe
gc()
return(ff)
}
flatten_json <- . %>% # simple way of writing function
gsub('\"\"','\"',.) %>%
str_c(., collapse = ",") %>% # add rows together
str_c("[", ., "]") %>% # to make list
fromJSON(flatten = T)
test <- bind_cols(flatten_json(theData$trafficSource))
unpackJSONCol = function( theDF, theDFCol )
{
l = lapply( theDFCol, fromJSON )
n = unique( unlist( sapply( l, names )))
theDF[ , n ] = lapply( n, function(x) sapply(l, function(y) y[[x]] ))
return ( theDF )
}
|
6534a9b6f60efd6da2410b1a8e40f11bb67b398c | 5434a6fc0d011064b575b321e93a3519db5e786a | /man/getAbsSandboxPath.Rd | c6fac9d48f1648c0f6f39a19313d483719190890 | [
"MIT"
] | permissive | cytoscape/RCy3 | 4813de06aacbaa9a3f0269c0ab8824a6e276bad9 | 18d5fac035e1f0701e870150c55231c75309bdb7 | refs/heads/devel | 2023-09-01T18:23:28.246389 | 2023-08-23T07:57:19 | 2023-08-23T07:57:19 | 118,533,442 | 47 | 22 | MIT | 2023-04-03T17:52:34 | 2018-01-23T00:21:43 | R | UTF-8 | R | false | true | 373 | rd | getAbsSandboxPath.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-sandbox.R
\name{getAbsSandboxPath}
\alias{getAbsSandboxPath}
\title{getAbsSandboxPath}
\usage{
getAbsSandboxPath(fileLocation)
}
\arguments{
\item{fileLocation}{fileLocation}
}
\value{
file location
}
\description{
Get absolute sandbox path.
}
\examples{
\donttest{
getAbsSandboxPath()
}
}
|
4bf5adb35437899abb3912b81c9e92c43b8f8bb5 | 2ab3220d625574d895ce01eeb6a839d4d195fd5e | /man/Genomic.Instability-package.Rd | 559b7cf6bba0e98ff4d1adcfaac1678137aad5fa | [] | no_license | SilvestriMR/Genomic.Instability | 584ada34386cb43c0c6a9fedf3a2747bcccb99d6 | 2660fb4c42c43bb89d9f271b01fb096611638b48 | refs/heads/master | 2020-04-07T20:28:14.989134 | 2019-01-11T13:10:13 | 2019-01-11T13:10:13 | 158,672,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,232 | rd | Genomic.Instability-package.Rd | \name{Genomic.Instability-package}
\alias{Genomic.Instability-package}
\alias{Genomic.Instability}
\docType{package}
\title{
\packageTitle{Genomic.Instability}
}
\description{
\packageDescription{Genomic.Instability}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{Genomic.Instability}
\packageIndices{Genomic.Instability}
R package for the evaluation of Genomic Instability starting from Copy number alterations data. In particular it computes Large-scale state transitions (LST).
LST is defined as the number of chromosomal breaks between adjacent regions of at least 10 Mb.
}
\author{
\packageAuthor{Genomic.Instability}
Maintainer: \packageMaintainer{Genomic.Instability}
}
\references{
S. B. Greene et al., “Chromosomal Instability Estimation Based on Next Generation Sequencing and Single Cell Genome Wide Copy Number Variation Analysis,” PLoS One, vol. 11, no. 11, p. e0165089, Nov. 2016.
T. Popova et al., “Ploidy and Large-Scale Genomic Instability Consistently Identify Basal-like Breast Carcinomas with BRCA1/2 Inactivation,” Cancer Res November 1 2012 (72) (21) 5454-5462
}
\keyword{ package }
\seealso{
}
\examples{
data(ratio)
MeasureLST(data=ratio, window = 639926, ID = "AA", workflow = "SS")
}
|
d1d30348e9ee0acf8177459ae71c844bb6f94c33 | dd89b14e542e1227b3a63147727be2ec63b4570d | /man/explore.Rd | 4696011e93145aab40ad457bf34f4c3a7c2a8726 | [
"BSD-3-Clause"
] | permissive | MetAnnotate/metannoviz | 79b792432b6b719ba4744c4f252256e24763e46e | d9467dcde72f73edb37ff0149954ca9d1bac8aa0 | refs/heads/master | 2022-11-22T02:44:43.923746 | 2020-07-28T05:41:31 | 2020-07-28T05:41:31 | 264,586,383 | 4 | 0 | BSD-3-Clause | 2020-07-28T05:30:50 | 2020-05-17T04:53:37 | R | UTF-8 | R | false | true | 2,288 | rd | explore.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/explore.R
\name{explore}
\alias{explore}
\alias{explore_metannotate_data}
\title{Explore MetAnnotate data}
\usage{
explore(
metannotate_data,
evalue = 1e-10,
taxon = "Family",
normalizing_HMM = "rpoB",
top_x = 0.02,
percent_mode = "within_sample",
colouring_template_filename = NA,
quietly = FALSE,
...
)
}
\arguments{
\item{metannotate_data}{The mapped metannotate tibble output by \code{\link{map_naming_information}}}
\item{evalue}{E-value cutoff for HMM hits}
\item{taxon}{Character vector (length 1) giving the taxon name to collapse to
Can be: domain, phylum, class, order, family, genus, species (case insensitive)}
\item{normalizing_HMM}{Name of the normalizing HMM (e.g., 'rpoB')]; specify 'auto' to attempt auto-detection}
\item{top_x}{Numeric vector (length 1) giving the subsetting amount you desire.
If top_x >=1, the script will return the "top_x most abundant taxa" for each Dataset/HMM.Family
If top_x <1, the script will return "all taxa of (top_x * 100\%) abundance or greater for each Dataset/HMM.Family -
but see below.}
\item{percent_mode}{If top_x <1, there are two different methods for keeping the most abundant organisms:
\itemize{
\item "within_sample" -- the normalized \% abundance relative to rpoB is used
\item "within_HMM" -- the percent abundance of that taxon within the specific HMM gene hits is used.
You won't notice much of a different between these modes unless one of your HMMs has very few hits and you want to
show some of the taxa that were hit. This would be a good time to use 'within_HMM'.
}}
\item{colouring_template_filename}{Filename of the colouring template you want to load
If the file does not exist, then this function will write a template to that file
If 'NA' is entered, then the function will auto-generate colours and continue on}
\item{quietly}{logical (TRUE/FALSE); if TRUE, only reports warnings and errors}
\item{...}{Other fine-tuned plotting options controlled by \code{\link{visualize}} and the underlying
\code{\link{generate_ggplot}}. Highlights include plot_type, which can be "bar" or "bubble"}
}
\value{
A ggplot of MetAnnotate data
}
\description{
high-level exploration function for examining MetAnnotate data
}
|
2a72bac8c9dda23c655268b71db3a4672f5f6128 | aca02256163fc81fa16fa1b74f1279a347797a65 | /PRCC_function_test.R | f6fe9941c9e9c96cc0b71ac0229b1bfee98b0671 | [] | no_license | jekableka/Gubbins_Sensitivity | 824cd9129cbd28784195ede09dcde9a5074a0c3c | 280590aa2877280aa5cdbb088698a93ee718b97b | refs/heads/master | 2016-09-06T19:02:17.403820 | 2013-07-30T14:19:36 | 2013-07-30T14:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 650 | r | PRCC_function_test.R | ## Set seed for random number generator
set.seed(1)
##Load Hmisc package for correlation coeffecient with p values
library("Hmisc")
##Generate random input variables
x <- rnorm(10)
y <- rnorm(10)
z <- rnorm(10)
##Generate random output variable
w <-rnorm(10)
##Rank transform variables
a <- rank(x)
b <- rank(y)
c <- rank(z)
out <- rank(w)
##PRCC Function
prcc.func <- function(a,b,c,out){
model.input <- lm(a ~ b + c)
model.output <- lm(out ~ b + c)
model.corr <- rcorr((residuals(model.input)),(residuals(model.output)))
prcc <- model.corr$r[1,2]
pvalue <- model.corr$P[1,2]
return(list(prcc=prcc,pvalue=pvalue))
}
prcc.func(a,b,c,out) |
55ecc251172cd8b323e95e2c3fa439f0c42bc1d0 | e070f185ceb16706cd691b292352358360e2bc4d | /man/design_matrix.Rd | 7cf90ea09cbe427b594cb0d60732fb5bfe2e1579 | [] | no_license | muschellij2/fmrireg | 39d1a2576c728ec47b6a92e8f239fe767b29cb84 | 634092518a5b647bc0b57ec2368256110daed610 | refs/heads/master | 2021-01-13T09:50:42.755202 | 2016-05-17T11:59:32 | 2016-05-17T11:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 279 | rd | design_matrix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_generic.R
\name{design_matrix}
\alias{design_matrix}
\title{design_matrix}
\usage{
design_matrix(x, ...)
}
\arguments{
\item{x}{the term}
}
\description{
construct a design matrix from the term
}
|
d472b44e8c5e648165300b9c183f04de433d64e2 | 996c0e4ae7571e1cb90f02056d7eda404099b9bb | /SummerInern/p-medds-master/pmedds.core/man/mcmc.boxplot.Rd | 5569de2b375336ff9bca38d618ca4e4e40d2e5ee | [] | no_license | yinzhuoding/Internship | 5b351885226a698494fc39b8b3c544b5a445706b | 2389f8b6038916bc807fdf7d33efda1ec51f7531 | refs/heads/master | 2021-01-23T02:24:23.538177 | 2017-03-23T20:14:37 | 2017-03-23T20:14:37 | 85,991,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,535 | rd | mcmc.boxplot.Rd | \name{mcmc.boxplot}
\alias{mcmc.boxplot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Post-processing of MCMC data.
}
\description{
Post MCMC optimization, consolidates 'R0' and 'pC' values to the form utilized for box-plots in plot.results.mcmc().
}
\usage{
mcmc.boxplot(tab=NULL,model=NULL,mydata=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tab}{
A vector that contains the value of all parameters for each saved step from the MCMC chain.
}
\item{model}{
A list/data structure containing all MCMC and model parameters. see setup().
}
\item{mydata}{
A list/data structure containing all input data for the flu season. see get.data()
}
}
\value{
%% ~Describe the value returned
No return value
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Riley P, Ben-Nun M, Armenta R, Linker JA, Eick AA, et al. (2013) Multiple Estimates of Transmissibility for the 2009 Influenza Pandemic Based on Influenza-like-Illness Data from Small US Military Populations. PLoS Comput Biol 9: e1003064. doi:10.1371/journal.pcbi.1003064.}
\author{
Predictive Science Inc.
}
\examples{
## This function is specific to results returned during the execution of runPMEDDS() and is not intended for general use
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
e54b457a462d6a674829829f96bfdffbfd859820 | a400213d8441738fa0464fdaff21d39db205aaa4 | /R/graphscan_cluster.R | f762235e47b310c529e4cf3974c847f5cced08fb | [] | no_license | cran/graphscan | 7d57dc13bdc5ba8cdd2222d6cd33d23719dbbce2 | 83036e37d27e5885c70b5b355828d8fbd69674a8 | refs/heads/master | 2020-06-07T09:53:48.244272 | 2016-10-14T16:22:50 | 2016-10-14T16:22:50 | 30,610,972 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,771 | r | graphscan_cluster.R | # ---------------------------------------------------------------
# graphscan : version 1.1
# fonctions cluster, .cluster_1d, .cluster_nd
# lancer les programmes C pour lancer les analyses de détection
# création : 23/10/13
# version du : 18/09/18
# Unité Epidémiologie Animale (UR346)
# Auteurs : Robin Loche, Benoit Giron, David Abrial, Lionel Cucala, Myriam Charras-Garrido, Jocelyn De-Goer
# ---------------------------------------------------------------
setGeneric(name="cluster",
def=function(gr,n_simulation=NULL,cluster_analysis=NULL,memory_size=2000){standardGeneric("cluster")}
)
setMethod(f="cluster",signature="graphscan",
definition=function(gr,n_simulation=gr@param$n_simulation,cluster_analysis=gr@param$cluster_analysis,memory_size=2000)
{
# -----------------------------------------------------
# vérifications des paramètres de la fonction cluster
# -----------------------------------------------------
# argument n_simulation
if(!is.numeric(n_simulation))
stop("argument 'n_simulation' must be 'numeric'",call.=F)
if(n_simulation<10 | n_simulation>10000)
stop("argument 'n_simulation' must be comprise between 10 and 10000",call.=F)
if(!is.numeric(memory_size) || memory_size<0)
stop("argument 'memory_size' must be 'numeric' and >0",call.=F)
# argument cluster_analysis
if(is.na(match(cluster_analysis,c("both","positive","negative"))))
stop("argument 'cluster_analysis' must be 'both', 'positive', 'negative'",call.=F)
# modifier si besoin n_simulation et cluster_analysis de l'objet gr
if(n_simulation!=gr@param$n_simulation) gr@param$n_simulation<-n_simulation
if(cluster_analysis!=gr@param$cluster_analysis) gr@param$cluster_analysis<-cluster_analysis
# lancer l'analyse
if(gr@param$dimension == "1d")
{
gr<-.cluster_1d(gr,n_simulation,cluster_analysis)
}
else
{
gr<-.cluster_nd(gr,n_simulation,cluster_analysis,memory_size)
}
return(gr)
})
# ----------------------------------------------------
# fonction pour détection 1d
# ----------------------------------------------------
.cluster_1d<-function(gr,n_simulation=gr@param$n_simulation,cluster_analysis=gr@param$cluster_analysis)
{
resultat<-NULL
cat("in progress ...\n")
for(i in 1:gr@param$n_events_series)
{
# -----------------------------------------------------
# récupérer les paramètres de l'objet gr
# -----------------------------------------------------
# nombre d'évènements
nb_evenement<-gr@param$n_events[i]
# bornes pour la normalisation
normalisation_debut<-gr@param$normalisation_factor[[i]][1]
normalisation_fin<-gr@param$normalisation_factor[[i]][2]
# vecteurs des positions des évènements et indicatrice de normalisation
vecteur_evenement<-as.numeric(gr@data$x[[i]])
# seuil de la significativité des clusters
alpha<-gr@param$alpha
# paramètre de serie_evenement de taille des agrégats
theta<-10
# nombre de simulations pour le calcul de la significativité
# géré en fonction du paramètre n_simulation de la fonction cluster
if(n_simulation==gr@param$n_simulation) nb_simulation<-gr@param$n_simulation else nb_simulation<-n_simulation
# choix du type de détection
# géré en fonction du paramètre cluster_analysis de la fonction cluster
if(cluster_analysis==gr@param$cluster_analysis) choix_detection_r_string<-gr@param$cluster_analysis else choix_detection_r_string<-cluster_analysis
choix_detection<-match(choix_detection_r_string,c("positive","negative","both")) # codage numérique 1, 2, 3.
# dans le cas de détection des clusters positifs et negatif en même temps cluster_analysis='both'
# dans le cas de deux agregats, un positif et un négatif, autant significatif l'un que l'autre
# choix du type de cluster
choix_type_agregat<-match(gr@param$cluster_user_choice,c("negative","positive","random")) # codage numérique 1, 2, 3.
# --------------------------------------------------------------------------------
# exécuter la recherche de cluster par la fonction C 'detection_multiple_dagregat'
# pour données 1d
# --------------------------------------------------------------------------------
# la fonction C 'detection_multiple_dagregat' renvoie les résultats sous la forme :
# start,end,index,pvalue,positivity,id_cluster,id_serie
res<-NULL
cat("events series : ",i,"/",gr@param$n_events_series,"\n")
res<-.Call("detection_multiple_dagregat",nb_evenement,normalisation_debut,
normalisation_fin,vecteur_evenement,alpha,theta,
nb_simulation,choix_detection,choix_type_agregat)
# gérer résultat vide
if(ncol(res)==0) res<-matrix(NA,nrow=6,ncol=1)
res<-rbind(res,rep(i,times=dim(res)[2]))
resultat<-rbind(resultat,t(res))
}
# --------------------------------------------------------------------------------
# traitements du tableau de résultats
# --------------------------------------------------------------------------------
if(!is.na(resultat[1][1])) # vérifier présence d'un résultat
{
res<-.cluster_1d_traitement_tableau(resultat,gr)
resultat_brut<-res[[1]]
resultat_traite<-res[[2]]
description_serie_evenement<-res[[3]]
} else
{
resultat_brut<-NULL
resultat_traite<-NULL
# message si aucun cluster n'est détecté
description_serie_evenement<-paste("No cluster detected at level alpha = ",gr@param$alpha,sep="")
}
# ajout résultats de l'analyse à l'objet gr
gr@cluster[["cluster_1d"]]<-resultat_traite
gr@cluster[["cluster_1d_raw"]]<-resultat_brut
gr@cluster[["cluster_1d_description"]]<-description_serie_evenement
# renvoyer l'objet gr
return(gr)
}
# ----------------------------------------------------
# fonction pour détection nd
# ----------------------------------------------------
.cluster_nd<-function(gr,n_simulation=gr@param$n_simulation,cluster_analysis=gr@param$cluster_analysis,memory_size=2000)
{
resultat<-NULL
# nombre de simulations pour le calcul de la significativité
# en fonction du paramètre n_simulation de la fonction cluster
if(n_simulation==gr@param$n_simulation)
nb_simulation<-gr@param$n_simulation else nb_simulation<-n_simulation
# dimension des coordonnées (format numérique)
dimension<-match(gr@param$dimension,c("1d","2d","3d"))
# coordonnées des points au format x1,y1,x2,y2,...
coordonnees_r<-as.vector(t(gr@data$x@coords))
# ensemble des identifiants des points
id_r<-1:nrow(gr@data$x)
id_r<-as.integer(id_r)
# nombre de cas et controles par point
cas_r<-as.numeric(gr@data$x@data$cases)
controle_r<-gr@data$x@data$controls
# nombre total de points (cas + controles)
# nb_point<-gr@param$n_events+gr@param$n_controls
# modification David
nb_point<-nrow(gr@data$x)
# recherche des clusters
resultat = .Call("detection_cluster",nb_point,dimension,n_simulation,id_r,coordonnees_r,controle_r,cas_r,as.integer(memory_size))
# mise en forme des résultats au format SpatialPointsDataFrame
# mise à l'exponentielle de l'indice de kulldorff
nb_point_cucala<-resultat[3,1]
if(nb_point_cucala>0)
cucala_data<-data.frame(id=resultat[7:(6+nb_point_cucala),1], index=rep(resultat[1,1],times =nb_point_cucala),radius=rep(resultat[2,1],times=nb_point_cucala),pvalue=rep(resultat[4,1],times=nb_point_cucala),n_controls=rep(resultat[5,1],times=nb_point_cucala),n_cases=rep(resultat[6,1],times=nb_point_cucala))
nb_point_kulldorf<-resultat[3,(dimension+2)]
if(nb_point_kulldorf>0)
kulldorff_data<-data.frame(id=resultat[7:(6+nb_point_kulldorf),(dimension+2)], index=rep(resultat[1,(dimension+2)],times=nb_point_kulldorf),radius=rep(resultat[2,(dimension+2)],times=nb_point_kulldorf),pvalue=rep(resultat[4,(dimension+2)],times=nb_point_kulldorf),n_controls=rep(resultat[5,(dimension+2)],times=nb_point_kulldorf),n_cases=rep(resultat[6,(dimension+2)],times=nb_point_kulldorf))
# description des résultats
if(nb_point_cucala>0 && nrow(cucala_data)>0 && cucala_data[1,4]<=gr@param$alpha)
{
cucala<-SpatialPointsDataFrame(coords=resultat[7:(6+nb_point_cucala),2:(dimension+1)],data= cucala_data)
description_cucala<-paste("Cucala index: ",sprintf(fmt="%10.3e",cucala_data[1,2])," - pvalue: ",round(cucala_data[1,4],3),sep="")
description_cucala<-paste(description_cucala," \nn_cases: ",cucala_data[1,6]," - n_controls: ",cucala_data[1,5],sep="")
description_cucala<-paste(description_cucala," - radius: ",round(cucala_data[1,3],2),sep="")
}else{
description_cucala<-paste("No cluster detected with Cucala index at level alpha = ",gr@param$alpha,sep="")
cucala<-NULL
}
# description des résultats
if(nb_point_kulldorf>0 && nrow(kulldorff_data)>0 && kulldorff_data[1,4]<=gr@param$alpha)
{
kulldorff<-SpatialPointsDataFrame(coords=resultat[7:(6+nb_point_kulldorf),(dimension+3):(2*dimension+2)],data= kulldorff_data)
description_kulldorff<-paste("Kulldorff index: ",sprintf(fmt="%10.3e",kulldorff_data[1,2])," - pvalue: ",round(kulldorff_data[1,4],3),sep="")
description_kulldorff<-paste(description_kulldorff," \nn_cases: ",kulldorff_data[1,6]," - n_controls: ",kulldorff_data[1,5],sep="")
description_kulldorff<-paste(description_kulldorff," - radius: ",round(kulldorff_data[1,3],2),sep="")
}else{
description_kulldorff<-paste("No cluster detected with Kulldorff index at level alpha = ",gr@param$alpha,sep="")
kulldorff<-NULL
}
# ajout des résultats à l'objet gr
gr@cluster<-list()
gr@cluster[["cluster_nd_cucala"]]<-cucala
gr@cluster[["cluster_nd_kulldorff"]]<-kulldorff
gr@cluster[["cluster_nd_description"]]<-c(description_cucala,description_kulldorff)
return(gr)
}
|
711f5f1fd95143395817b016d6a4a923dd5c0dbc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/eqs2lavaan/examples/eqsDesc.Rd.R | 2a9d84b0b48e004cf25d7c023c6d6e9dd85a1ee5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 364 | r | eqsDesc.Rd.R | library(eqs2lavaan)
### Name: eqsDesc
### Title: Extract Descriptive Statistics from an EQS Output File
### Aliases: eqsDesc
### Keywords: EQS lavaan desc mean sd kurt skew model CFA dev
### ** Examples
# EQS required to get a necessary .out file
# Run for62.eqs from the EQS examples and save .out to R directory location
## Not run: eqsCorr("for62.out")
|
3c3635594d43ca09c674dff6ccdc5e7e62de75a9 | e4e9cb2b0972ae42c4fa439e9359b2cdb8dea3d7 | /utils.R | 2e5a60ec85be5e22f509d0b222f019139a57be13 | [] | no_license | gzu300/Internship1 | d4c4d04f0ba50f2e97645bfad8e67534e015c336 | cb4bcee661e8e46d014a9650438d4aa1c41eb197 | refs/heads/master | 2020-04-06T18:43:03.705060 | 2019-03-18T22:12:48 | 2019-03-18T22:12:48 | 157,709,688 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,506 | r | utils.R | #########################data##################
######
#simulation data
######
library(tidyverse)
library(purrr)
#source('simulation_util.R')
source('MaSigPro_util.R')
source('ASCA-genes.1.2.1/sourceASCA.R')
setwd('../')
######
#generate data
######
create.simulation <- function(pat_names.list,ftrs_in_pat.list,replicates,sd,...){
dimnames <- pat_names.list %>%
map2(.,ftrs_in_pat.list,~rep(.x,.y)) %>%
flatten_chr() %>%
list(NULL,.)
#create the simulation matrix
wrap_tre <- function(dimnames,replicates,sd,...){
trend_list <- list(...)
trend_list %>%
map2(ftrs_in_pat.list,.,~rep(.y,.x)) %>%
flatten() %>%
map(.,~rnorm(n = replicates,mean = .x,sd = sd)) %>%
flatten_dbl() %>%
matrix(.,nrow = length(trend_list[[1]])*replicates,dimnames = dimnames) %>%
t(.)
}
wrap_tre <- wrap_tre(dimnames = dimnames,replicates = rep,sd = sd,...)
output <- list(df=wrap_tre,groups=dimnames[[2]])
output
}
#######################asca-gene#############
######
#design matrix
#####
##functions
asca.design.matrix <- function(i,j,r,time){
#time: a vector consists of time points in experiment design.
mx <- vector(mode = 'list')
mx[[1]] <- matrix(0,nrow = i*j*r, ncol = i, dimnames = list(c(),paste('treatment',1:i)))+c(rep(1,j*r),rep(0,i*j*r))
mx[[2]] <- matrix(0,nrow = i*j*r, ncol = j, dimnames = list(c(),paste('T',time)))+c(rep(c(rep(1,r),rep(0,j*r-r)),i),rep(0,r))
mx[[3]] <- matrix(0,nrow = i*j*r, ncol = i*j, dimnames = list(c(),c(paste('inter',1:(i*j)))))+c(rep(1,r),rep(0,i*j*r))
names(mx) <- c('i','j','ij')
mx
}
######
#plot
######
######fitted asca_gene data frame######
wrap.permutation.info <- function(df.final,asca.fit,groups,R,which_leverage,alpha=0.05,...){
#attention: df.final. rows are features(eg.metabolites), columns are samples.
#groups is a vector of strings or numbers specifies patterns of variables. for real data, feature names could be filled in
#it needs to be the same length as variables. same pattern gives the same tag.
#R is the number of permutations
#which_leverage argument: improved leverage is 'ASCA.2f_leverage'. original leverage is 'ASCA.2f'. without the quote in argument
##original leverage loading normalised to 1
##improved leverage scores normalised to 1
#...are the arguments to be passed into 'labels' layer of ggplot in plot.submodel function and
#its downstream functions. such as: 'title', 'tag', etc
permutated.data <- leverage.lims(df.final,R=R,FUN = which_leverage,Designa = mx$j, Designb = mx$i,Fac = Fac,type = type,alpha = alpha,showvar = F, showscree = F)
permu.data <- permutated.data$NullDistribution$Model.bab
lev.lim <- permutated.data$Cutoff[[2]]
spe.lim <- SPE.lims(my.asca = asca.fit,alpha = alpha)[[2]]
leverage <- asca.fit$Model.bab$leverage
spe <- asca.fit$Model.bab$SPE
#assemble dataframe for leverage vs spe as well as stats for prediction accuracy. FP, FN... can be calculated in stats function below
lev.spe.toplot <- data.frame(leverage=leverage,
spe=spe,
metabolites=1:nrow(df.final),
patterns=groups,
predicted=(leverage>lev.lim),
truth=(groups != 'flat')
)
selected_features <- sort(groups[leverage>lev.lim])
Nulldist_plot <- plot.NullDistribution(permu.data,asca.fit$Model.bab$leverage,groups,lev.lim,R,alpha,size=6)
output <- list(lev_limit=lev.lim, spe_lim=spe.lim, stats_for_plot=lev.spe.toplot, selected_features=selected_features, Null_distribution_plot=Nulldist_plot)
submodel_plots <- plot.submodels(output,asca.fit, groups=groups, Fac=Fac, size=6,...)
output <- c(output, plots_for_submodel=submodel_plots)
output
}
########stats###########
stats <- function(permut_wrapped){
FP <- sum(permut_wrapped$predicted&!permut_wrapped$truth)
FN <- sum(!permut_wrapped$predicted&permut_wrapped$truth)
TP <- sum(permut_wrapped$predicted&permut_wrapped$truth)
TN <- sum(!permut_wrapped$predicted&!permut_wrapped$truth)
output <- list(TP,TN,FP,FN)
names(output) <- c('TP','TN','FP','FN')
output
}
#####plots########
#plot.leverage_spe(df.final,asca.fit, groups)
plot.leverage_spe <- function(permut_wrapped, sd=sd,title = paste('improved leverage and SPE with',Fac[3],'PCs'),size,...){
plot <- ggplot(data = permut_wrapped$stats_for_plot,aes(x=leverage,y=spe,color=patterns))+
geom_point()+
geom_hline(yintercept = permut_wrapped$spe_lim)+
geom_vline(xintercept = permut_wrapped$lev_limit)+
labs(title = title,...)+
theme(axis.title = element_text(size=size),panel.background = element_blank(),legend.title = element_text(size=size),legend.text = element_text(size=3),legend.key.size = unit(0.08,'cm'), plot.title = element_text(size=size))
plot
}
#score plot
plot.submodels_score <- function(asca.fit,i,j,title = paste('score plot for submodel b.ab')){
#plot score vs time of all the PCs for submodel b.ab
scores <- asca.fit$Model.bab$scores
PCs <- 1:ncol(scores)
bab.toplot <- data.frame(scores=scores,
time=rep(time,i),
treatments=rep(paste('treatment',1:i),each=j))
output <- as.vector(PCs,mode = 'list') %>%
set_names(paste('PC',PCs,sep = ''))
for (each in PCs){
plot <- ggplot(data = bab.toplot,aes(x=time,y=bab.toplot[[each]],color=treatments))+
geom_line()+
ylab(label = paste('PC',each))+
labs(title = title)
output[[each]] <- plot
}
output
}
#loading plot
plot.submodels_loading <- function(asca.fit,groups=NULL,title = paste('loading plot for submodel b.ab'),size=10,...){
#plot loadings for all the PCs
#groups is a vector of strings or numbers specifies patterns of variables.
#it needs to be the same length as variables. same pattern gives the same tag.
bab.loadings <- data.frame(loading=asca.fit$Model.bab$loadings,
metabolites=1:length(groups),
groups = groups)
PCs <- 1:ncol(asca.fit$Model.bab$loadings)
output <- as.vector(PCs,mode = 'list') %>%
set_names(paste('PC',PCs,sep = ''))
for (each in PCs){
plot <- ggplot(bab.loadings,aes(x=metabolites,fill=groups))+
geom_col(aes_string(y = colnames(bab.loadings)[each]))+
ylab(paste('PC',each))+
labs(title = title,...)+
theme(axis.title = element_text(size=size),panel.background = element_blank(),legend.title = element_text(size=size),legend.text = element_text(size=3),legend.key.size = unit(0.08,'cm'), plot.title = element_text(size=size))
output[[each]] <- plot
}
output
}
plot.submodels <- function(permut_wrapped,asca.fit, Fac=Fac, groups,size=10,...){
output <- list(leverage_spe=plot.leverage_spe(permut_wrapped = permut_wrapped,size = size,...),
scores=plot.submodels_score(asca.fit,i,j),
loadings=plot.submodels_loading(asca.fit,groups=groups,size = size,...))
output
}
plot_metabolites <- function(df,range,formula = y~poly(x,2),...){
df.final <- df
df.toplot <- data.frame(t(df.final[range,]))
df.toplot$time <- rep(rep(time,each=r),i)
df.toplot$treatment <- factor(rep(1:2,each=r*j))
a <- df.toplot %>%
gather(key = metabolites,value = value,1:length(range))
output <- ggplot(a,aes(x=time,y=value,color=treatment))+
geom_point(size=0.5)+
stat_summary(fun.y = mean,geom = 'line')+
theme(legend.title = element_text(size = 5),panel.background = element_blank())+
facet_wrap(metabolites~., scales = 'free')+
geom_smooth(method = 'lm', formula = formula,se = F,linetype = '3313')+
labs(...)
output
}
plot_a_metabolite <- function(df,FUN,which,size=6,...){
df.final <- df
trend.toplot <- data.frame(replicate=rep(1:(i*j),each=r),time=rep(time,each=r),treatment=factor(rep(1:i,each=j*r)),metabolite=df.final[which,])
ggplot(trend.toplot,aes(x=time,y=metabolite,color=treatment))+
geom_point()+
stat_summary(fun.y = FUN,geom = 'line')+
geom_smooth(method = 'lm', formula = y~poly(x,2),se = F,linetype = '3313')+
theme(legend.position=c(0.9,0.1),axis.title = element_text(size=size),legend.title = element_text(size=size),legend.text = element_text(size=3),legend.key.size = unit(0.08,'cm'), plot.title = element_text(size=size),panel.background = element_blank())+
labs(...)
}
plot.NullDistribution <- function(permu.data,model.data,colnames,cutoff,R,alpha,size){
permu.data <- permu.data
model.data <- model.data
model.leverage <- data.frame(metabolites=factor(colnames[1:length(model.data)]),leverage=model.data)
Nulldist <- data.frame(permu.data) %>%
gather(.,key = metabolites,value = leverage) %>%
mutate(.,metabolites=rep(colnames[1:nrow(permu.data)],each=ncol(permu.data))) %>%
ggplot(.,aes(metabolites,leverage,color=metabolites))+
geom_violin(draw_quantiles = c(1-alpha))+
geom_point(data = model.leverage,aes(x = metabolites,y = leverage))+
geom_hline(yintercept = cutoff)+
theme(axis.text.x.bottom = element_text(angle = 90,size = 7,hjust = 1,vjust = 0.5),panel.background = element_blank(),legend.position=c(0.5,0.8),axis.title = element_text(size=size),legend.title = element_text(size=size),legend.text = element_text(size=3),legend.key.size = unit(0.08,'cm'), plot.title = element_text(size=4))+
labs(title = paste('Null distribution by',R,'rounds of permutation. alpha:',alpha))
Nulldist
}
|
6507403fe14f857ac9e210feda97ab6ea4bf3bcd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DMwR/examples/dataset-class.Rd.R | 2437ffea87816d75745b6250972bdbcf21fca8ec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 186 | r | dataset-class.Rd.R | library(DMwR)
### Name: dataset-class
### Title: Class "dataset"
### Aliases: dataset dataset-class show,dataset-method
### Keywords: classes
### ** Examples
showClass("dataset")
|
b399e10a283c441dcbc1a5338b2858eed4ce0943 | c427a5b3c9c434de9765e9cf4c287a711f744d53 | /man/delete_option_list.Rd | 504a03def096d4f27683810c0a72e8e199b4234b | [] | no_license | bdevoe/iformr | 69ac4b6b1e762d4a855b8cd81bf338c33618da36 | fe6cbc4b1334edff5acf84785fc75308f9a8f405 | refs/heads/master | 2021-06-06T01:31:21.954796 | 2019-06-05T18:50:38 | 2019-06-05T18:50:38 | 140,438,573 | 0 | 0 | null | 2018-08-17T19:36:21 | 2018-07-10T13:45:16 | R | UTF-8 | R | false | true | 1,279 | rd | delete_option_list.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/option_lists.R
\name{delete_option_list}
\alias{delete_option_list}
\title{Delete option list.}
\usage{
delete_option_list(server_name, profile_id, access_token, option_list_id)
}
\arguments{
\item{server_name}{String of the iFormBuilder server name.}
\item{profile_id}{Integer of the iFormBuilder profile ID.}
\item{access_token}{Access token produced by \code{\link{get_iform_access_token}}}
\item{option_list_id}{ID of the option list to be deleted.}
}
\value{
ID of the option list to be deleted.
}
\description{
Deletes an option list from a profile. Deleting options and option lists
should be done with consideration for existing data referencing the
list. As an alternative, options can be disabled by setting their condition
value to 'False'
}
\examples{
\dontrun{
# Get access_token
access_token <- get_iform_access_token(
server_name = "your_server_name",
client_key_name = "your_client_key_name",
client_secret_name = "your_client_secret_name")
# Delete option list
deleted_id <- delete_option_list(
server_name = "your_server_name",
profile_id = "your_profile_id",
access_token = access_token,
option_list_id
}
}
\author{
Bill Devoe, \email{William.DeVoe@maine.gov}
}
|
fbbfae051f0767707855034c1d6aa38958432239 | 5543691d5c4bd29023cf42d036c51e5842d2a0a5 | /WorkingWithJoins.R | 0771968af35eeb4bf7505cd94d814a904dd90463 | [] | no_license | BonduMohanSrinivasaSarma/Data-Analytics-R | 218d819ecf95d7eb9969ab322e9c6c38f827bbb8 | 04d23f76c89bfebdc7a30c2f1aa85b9cfa5774f6 | refs/heads/main | 2023-06-11T05:30:26.549712 | 2021-06-27T13:35:10 | 2021-06-27T13:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,138 | r | WorkingWithJoins.R | #Creating Buldings Data Frame
buildings<-data.frame(location=c(1:5),name=c('building1','building2','building3','building4','building5'));
View(buildings)
#Creating details Data Frame
details<-data.frame(survey=c(1,1,1,2,2,2),location=c(1,2,3,2,3,1),efficiency=c(51,64,70,71,80,58),Area=c(210,230,310,320,180,270));
View(details)
#Inner Join of Buildings,Details Data frame and storing in a1
a1<-merge(buildings,details)
View(a1);
#Outer Join of Buildings,Details Data frame and storing in a2
a2<-merge(x=buildings,y=details,by="location",all=TRUE);
View(a2);
#Left Outer Join of Buildings,Details Data frame and storing in a3
a3<-merge(x=buildings,y=details,by="location",all.x=TRUE);
View(a3);
#Right Join of Buildings,Details Data frame and storing in a4
a4<-merge(x=buildings,y=details,by="location",all.y=TRUE);
View(a4);
#Cross Join of Buildings,Details Data frame and storing in a5
a5<-merge(x=buildings,y=details,by=NULL);
View(a5);
#Printing Number of Missing Values in a1
print(paste("No of NA values in a1",sum(is.na(a1))))
#Printing Number of Missing Values in a2
print(paste("No of NA values in a2",sum(is.na(a2))))
#Printing Number of Missing Values in a3
print(paste("No of NA values in a3",sum(is.na(a3))))
#Printing Number of Missing Values in a4
print(paste("No of NA values in a4",sum(is.na(a4))))
#Printing Number of Missing Values in a5
print(paste("No of NA values in a5",sum(is.na(a5))))
#Printing the indices of NA values of a2
print(which(is.na(a2)));
#Printing the indices of NA values of a3
print(which(is.na(a3)));
#Replacing NA values through column mean Imputation
for(i in 1:ncol(a2)) #Iterating through dataframe
{
for(j in 1:nrow(a2))
{
if(is.na(a2[j,i])){#Checking if dataframe value at index is null
a2[j,i]=mean(a2[ ,i],na.rm = TRUE)} #Replacing with Column mean Value
if(is.na(a3[j,i])){#Checking if dataframe value at index is null
a3[j,i]=mean(a3[ ,i],na.rm = TRUE)}#Replacing with Column mean Value
}
}
#Viewing values after replacing it with column mean values
View(a2);
View(a3);
|
c07bebc7ec76b841e1e5d5d4bb05dd543b58efeb | 2ea521c969899b6a3b5820e143170c73c96c7e3e | /man/textProcessor.Rd | 4a97a18b62a3320de0b6e9f2e77f06aa2b5384ef | [
"MIT"
] | permissive | jblumenau/stm | 672205ef225f07ed2271f5756e4fd83de6c7bd7a | 275db44759276d9ef588575cc2c010d66584778e | refs/heads/master | 2020-12-31T02:22:32.495581 | 2014-02-24T01:14:01 | 2014-02-24T01:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,222 | rd | textProcessor.Rd | \name{textProcessor}
\alias{textProcessor}
\title{
Process a vector of raw texts
}
\description{
Function that takes in a vector of raw texts (in a variety of languages) and performs basic operations. This function is essentially a wrapper \code{tm} package where various user specified options can be selected.
}
\usage{
textProcessor(documents, metadata=NULL,
lowercase=TRUE, removestopwords=TRUE, removenumbers=TRUE,
removepunctuation=TRUE, stem=TRUE,
sparselevel=.99, language="en",
verbose=TRUE)
}
\arguments{
\item{documents}{
The documents to be processed. A character vector where each entry is the full text of a document. The \code{tm} package has a variety of extra readers for ingesting other file formats (\code{.doc, .pdf, .txt, .xml}).
}
\item{metadata}{
Additional data about the documents. Specifically a \code{data.frame} or \code{matrix} object with number of rows equal to the number of documents and one column per meta-data type. The column names are used to label the metadata. The metadata do not affect the text processing, but providing the metadata object insures that if documents are dropped the corresponding metadata rows are dropped as well.
}
\item{lowercase}{
Whether all words should be converted to lower case. Defaults to TRUE.
}
\item{removestopwords}{
Whether stop words should be removed using the SMART stopword list (in English) or the snowball stopword lists (for all other languages). Defaults to TRUE.
}
\item{removenumbers}{
Whether numbers should be removed. Defaults to TRUE.
}
\item{removepunctuation}{
whether punctuation should be removed. Defaults to TRUE.
}
\item{stem}{
Whether or not to stem words. Defaults to TRUE}
\item{sparselevel}{
removes terms where at least sparselevel proportion of the entries are 0.
}
\item{language}{
Language used for processing. Defaults to English. \code{tm} uses the \code{SnowballC} stemmer which as of version 0.5 supports "danish dutch english finnish french german hungarian italian norwegian portuguese romanian russian spanish swedish turkish". These can be specified as any on of the above strings or by the three-letter ISO-639 codes. You can also set language to "na" if you want to leave it deliberately unspecified (see documentation in \code{tm})}
\item{verbose}{
If true prints information as it processes.
}
}
\details{
This function is designed to provide a convenient and quick way to process a relatively small volume texts for analysis with the package. It is designed to quickly ingest data in a simple form like a spreadsheet where each document sits in a single cell. Once the text has been processed by \code{tm} the document term matrix is converted to the \code{stm} format using \code{\link{readCorpus}}.
The processor always strips extra white space but all other processing options are optional. Stemming uses the snowball stemmers and supports a wide variety of languages. Words in the vocabulary can be dropped due to sparsity and stop word removal. If a document no longer contains any words it is dropped from the output. Specifying meta-data is a convenient way to make sure the appropriate rows are dropped from the corresponding metadata file.
We emphasize that this function is a convenience wrapper around the excellent \code{tm} package functionality without which it wouldn't be possible.
}
\value{
\item{documents}{A list containing the documents in the stm format.}
\item{vocab }{Character vector of vocabulary.}
\item{meta}{Data frame or matrix containing the user-supplied metadata for the retained documents.}
}
\references{
Ingo Feinerer and Kurt Hornik (2013). tm: Text Mining Package. R package version 0.5-9.1.
Ingo Feinerer, Kurt Hornik, and David Meyer (2008). Text Mining Infrastructure in R. \emph{Journal of Statistical
Software} 25(5): 1-54.
}
\seealso{
\code{\link{readCorpus}}
}
\examples{
head(gadarian)
#Process the data for analysis.
temp<-textProcessor(documents=gadarian$open.ended.response,metadata=gadarian)
meta<-temp$meta
vocab<-temp$vocab
docs<-temp$documents
out <- prepDocuments(docs, vocab, meta)
docs<-out$documents
vocab<-out$vocab
meta <-out$meta
}
|
7dac8da79ee9b2116bbe0c157a9313a1e91d37a9 | 04ab00f952dafa6ab5d0d110af25ac09095db3ea | /R/rscript_NLBELT_2125.R | 5f904b0649853066b941a65aeea63c134f4e0a25 | [] | no_license | mut8/nlbelt_plfa | b8cb323d9a40f0b2e497a1e5744f993b8aca81f4 | b3c0c79d89e33d7451024877f9db2c558b4a68f3 | refs/heads/master | 2020-04-02T07:29:38.542958 | 2013-11-16T12:36:46 | 2013-11-16T12:36:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 73,190 | r | rscript_NLBELT_2125.R | source("/home/lluc/R_functions/functions.R")
source("~/R_functions/plfaplotfunction.R")
setwd("/home/lluc/Copy/MUN/phd/results/NL-BELT/Batches21-25combined/R")
mols<- data.frame(read.csv("mols.csv"))
fames<- read.csv("fames.csv")
samples<- read.csv("samples.csv")
resp245<-read.csv("respiration.csv")
resp<-read.csv("allresp.csv")
d13C<- read.csv("d13C.csv")
amp44<-read.csv("amp44.csv")
nb_C = c(15,15,15,16,16,16,17,18,18,18,19,18,20,NA,22)
#minimum amplitude (44) to accept d13C value
min_amp44 <- 200
#d13C of methanol used for saponification
d13C_meoh <- -50
cond.rel<-samples$trusted_rel==T
cond.abs<-samples$trusted_abs==T
cond.ref <- samples$Region!="Ref"
cond1 <-cond.abs & cond.ref
cond2 <-cond.rel& cond.ref
cond.irms <- samples$trusted_irms==T
cond3 <- cond.irms&cond.ref
sum<-rowSums(mols)
rel<-100*mols/rowSums(mols)
rel.colmeans<-rel
for (i in 1:ncol(rel.colmeans)) {
rel.colmeans[,i]<-rel[,i]/mean(rel[,i])
}
cy17prec<-(rel$cy17.0/rel$X2.4.16.1w7)
cy19prec<-((rel$cy.19.0+rel$cy.19.0.1)/rel$X18.1w9c)
cyprec<-((rel$cy17.0+rel$cy.19.0+rel$cy.19.0.1)/(rel$X2.4.16.1w7+rel$X18.1w9c))
F2B <-rowSums(rel[,fames$microbial.group=="fungi"])/
rowSums(rel[,fames$microbial.group=="Gpos"|fames$microbial.group=="Gneg"|fames$microbial.group=="bac"] )
F2B.18.2<- rel$X18.2w6.9/ rowSums(rel[,fames$microbial.group=="Gpos"|fames$microbial.group=="Gneg"|fames$microbial.group=="bac"] )
euk.conc <-rowSums(mols[,fames$microbial.group=="fungi"|fames$microbial.group=="euk"])
euk.rel <-rowSums(rel[,fames$microbial.group=="fungi"|fames$microbial.group=="euk"])
allbac.conc <-rowSums(mols[,fames$microbial.group=="Gpos"|fames$microbial.group=="Gneg"|fames$microbial.group=="bac"|fames$microbial.group=="act"] )
allbac.rel <-rowSums(rel[,fames$microbial.group=="Gpos"|fames$microbial.group=="Gneg"|fames$microbial.group=="bac"|fames$microbial.group=="act"] )
Euk2bac <- euk.rel/allbac.rel
fungi3.rel<-rowSums(rel[,fames$microbial.group=="fungi"])
fungi2.rel<-rel$X18.2w6.9 + rel$X18.3w3.6.9
fungi1.rel<- rel$X18.2w6.9
fungi3.conc<-rowSums(mols[,fames$microbial.group=="fungi"])
fungi2.conc<- mols$X18.2w6.9 + mols$X18.3w3.6.9
fungi1.conc<- mols$X18.2w6.9
monouns.conc<-mols$X15.1+mols$X2.1.16.1a+mols$X2.3.16.1b+mols$X2.4.16.1w7+mols$X16.1c+mols$X18.1b+mols$X18.1c
monouns.rel<-rel$X15.1+rel$X2.1.16.1a+rel$X2.3.16.1b+rel$X2.4.16.1w7+rel$X16.1c+rel$X18.1b+rel$X18.1c
pufa.conc<-rowSums(mols[,fames$saturation=="PUFA"])
pufa.rel<-rowSums(rel[,fames$saturation=="PUFA"])
longeven.rel <-rel$X20.0+rel$X22.0+rel$X24.0
longpufa.rel <- rel$X20.2+rel$X20.3a+rel$X20.3b+rel$X20.4w6.9.12.15+rel$X20.5w3.6.9.12.15+rel$X22.6w3.6.9.12.15.18
ia.rel <- rel$i.15.0+rel$ai.15.0
act.rel<-rowSums(rel[,fames$microbial.group=="act"])
longeven.conc <-mols$X20.0+mols$X22.0+mols$X24.0
longpufa.conc <- mols$X20.2+mols$X20.3a+mols$X20.3b+mols$X20.4w6.9.12.15+mols$X20.5w3.6.9.12.15
ia.conc <- mols$i.15.0+mols$ai.15.0
act.conc<-rowSums(mols[,fames$microbial.group=="act"])
#remove d13C values with amp44 < min
d13C [amp44 < min_amp44] <- NA
tmp <- data.frame(matrix(ncol=ncol(d13C)-2, nrow=length(unique(d13C[,2]))))
colnames(tmp) <- colnames(d13C)[3:ncol(d13C)]
rownames(tmp) <- names(tapply(d13C[,3], d13C$X.1, mean))
#calculate mean d13C per sample over multiple injections
for (i in 3:ncol(d13C)) {
tmp[,i-2] <-tapply(d13C[,i], d13C$X.1, mean)
}
#correct d13C for methanol
meoh_cor_d13C <- tmp
for (i in 1:ncol(tmp))
meoh_cor_d13C[,i] <- (tmp[,i]*(nb_C[i]+1)-d13C_meoh)/nb_C[i]
#copy d13C for internal standard w/o MeOH correction
meoh_cor_d13C$X20.0EE <- tmp$X20.0EE
#organize d13C values so that they are in the same order as sample info.
d13C_org <- data.frame(matrix(nrow=nrow(samples), ncol=ncol(meoh_cor_d13C)))
colnames(d13C_org) <- colnames(meoh_cor_d13C)
for (i in 1:nrow(samples))
{ if (is.na(samples$sample_nr_irms[i])==F)
d13C_org[i,T] <- meoh_cor_d13C[which(samples$sample_nr_irms[i]==rownames(meoh_cor_d13C)),T]
}
#calculate mass averaged means
# (d13C_org$i15.0*rel$i.15.0+
# d13C_org$a15.0*rel$i.15.0+
# d13C_org$X16.0a*rel$X16.0+
# d13C_org$X16.1w7*rel$X2.4.16.1w7+
# d13C_org$cy17.0*rel$cy17.0+
# d13C_org$X18.1w9*rel$X18.1w9c+
# d13C_org$X18.1w7*rel$X18.1b+
# d13C_org$X18.2w6*rel$X18.2w6.9+
# d13C_org$cy19.0*(rel$cy.19.0+rel$cy.19.0.1)+
# d13C_org$X18.3w3*rel$X18.3w3.6.9+
# d13C_org$X20.0*rel$X20.0+
# d13C_org$X22.0*rel$X22.0)/
# (rel$i.15.0+rel$i.15.0+rel$X16.0+rel$X2.4.16.1w7+rel$cy17.0+rel$X18.1w9c+rel$X18.1b+rel$X18.2w6.9+rel$X18.3w3.6.9+rel$X20.0+rel$X22.0)
for (i in 1:ncol(d13C_org)){
if (i == 1) { plfas_reorg <- data.frame(d13C=d13C_org[cond3,i]-samples$d13C[cond3], Region=samples$Region[cond3], Horizon=samples$Horizon[cond3],Site=samples$Site[cond3],Sample_ID=samples$sample_nr_irms[cond3], plfa=colnames(d13C_org)[i])}
else { plfas_reorg <- rbind (plfas_reorg, data.frame(d13C=d13C_org[cond3,i]-samples$d13C[cond3], Region=samples$Region[cond3], Horizon=samples$Horizon[cond3], Site=samples$Site[cond3],Sample_ID=samples$sample_nr_irms[cond3],plfa=colnames(d13C_org)[i]))}
}
plfas_reorg <- plfas_reorg[plfas_reorg$plfa!="X20.0EE",T]
plfas_reorg <- plfas_reorg[plfas_reorg$plfa!="X16.0",T]
plfas_reorg <- plfas_reorg[plfas_reorg$plfa!="X15.0",T]
c <- is.na(plfas_reorg$d13C)==F
anova(lm(d13C ~ Horizon * plfa, plfas_reorg))
summary(lm(d13C ~ Horizon * plfa, plfas_reorg))
anova(lm(d13C ~ Horizon * plfa + Region, plfas_reorg))
summary(lm(d13C ~ Horizon * plfa + Region, plfas_reorg))
anova(lm(d13C ~ Horizon * plfa + Region*Horizon + Region*plfa, plfas_reorg))
summary(lm(d13C ~ Horizon * plfa + Region*Horizon + Region*plfa, plfas_reorg))
anova(lm(d13C ~ Horizon * plfa * Region, plfas_reorg))
summary(lm(d13C ~ Horizon * plfa *Region, plfas_reorg))
anova(lm(d13C ~ Horizon * plfa + Region*plfa, plfas_reorg))
summary(lm(d13C ~ Horizon * plfa + Region*plfa, plfas_reorg))
l1 <- lm(d13C ~ Horizon * plfa, plfas_reorg)
l2 <- lm(d13C ~ Horizon * plfa + Region, plfas_reorg)
l3 <- lm(d13C ~ Horizon * plfa + Region*Horizon + Region*plfa, plfas_reorg)
l4 <-lm(d13C ~ Horizon * plfa * Region, plfas_reorg)
l5 <- lm(d13C ~ Horizon * plfa + Region*plfa, plfas_reorg)
anova(l1,l2)
anova(l2,l3)
anova(l2,l4)
anova(l3,l5)
hor.plot(x1, hor=plfas_reorg$Horizon[c], horlev=c("L","F","H","B"), fac=plfas_reorg$Region[c], xlim=c(-1,1), er.type="ci", xlab="residual (permil, 95% CI)", ylab="soil horizon", pch=21:22, pt.bg=2:3)
abline(v=0, col="grey")
abline(v=1, col="grey")
abline(v=-1, col="grey")
lm1<-lm(plfas_reorg$d13C[c] ~ plfas_reorg$Horizon[c] * plfas_reorg$plfa[c])
plot(lm1$residuals~plfas_reorg$Region[c], xlim=c(0.5,2.5), xlab="region", ylab="residual (d13C ~ horizon * plfa)")
anova(lm(x1~plfas_reorg$Region[c]))
anova(lm(x1~plfas_reorg$Region[c]*plfas_reorg$Horizon[c]))
pdf("hist_residuals_byregion.pdf", height=8, width=5)
par(mfrow=c(3,1))
x1 <- lm1$residuals
x2 <- lm1$residuals[plfas_reorg$Region[c]=="GC"]
x3 <- lm1$residuals[plfas_reorg$Region[c]=="ER"]
t.test(x2,x3)
hist(x1, xlim=c(-3,3), prob=T)
curve(dnorm, col=2, mean=mean(x1), sd=sd(x1), add=T)
curve(
dnorm(x, mean=mean(x1)+1, sd=sd(x1))
, col=2, , add=T)
?curve
?dnorm
hist(x2, xlim=c(-3,3), prob=T)
curve(dnorm, col=2, mean=mean(x2), sd=sd(x2), add=T)
hist(x3, xlim=c(-3,3), prob=T)
curve(dnorm, col=2, mean=mean(x3), sd=sd(x3), add=T)
densityplot(~x1, groups=plfas_reorg$Region[c])
densityplot(~x1 | plfas_reorg$plfa, groups=plfas_reorg$Region[c], col=c("blue","green"))
densityplot(~x1 | plfas_reorg$Horizon, groups=plfas_reorg$Region[c], col=c("blue","green"))
?densityplot
histogram(~x1,
panel = function(x1,...){
panel.histogram(x1,...)
panel.mathdensity(dmath = dnorm, col = "red",
args = list(mean=mean(x1),sd=sd(x1)))
},
type="density", tck=0.01)
histogram(~x2,
panel = function(x2,...){
panel.histogram(x2,...)
panel.mathdensity(dmath = dnorm, col = "red",
args = list(mean=mean(x2),sd=sd(x2)))
},
type="density", tck=0.01)
histogram(~x3,
panel = function(x3,...){
panel.histogram(x3,...)
panel.mathdensity(dmath = dnorm, col = "red",
args = list(mean=mean(x3),sd=sd(x3)))
},
type="density", tck=0.01)
dev.off()
library(lattice)
hist(x, prob=T)
curve(dnorm, col=2, mean=mean(x), sd=sd(x), add=T)
x <- lm1$residuals[plfas_reorg$Region[c]=="GC"]
histogram(~x,
panel = function(x,...){
panel.histogram(x,...)
panel.mathdensity(dmath = dnorm, col = "red",
args = list(mean=mean(x),sd=sd(x)))
},
type="density")
x <- lm1$residuals[plfas_reorg$Region[c]=="GC"]
histogram(~x,
panel = function(x,...){
panel.histogram(x,...)
panel.mathdensity(dmath = dnorm, col = "red",
args = list(mean=mean(x),sd=sd(x)))
},
type="density")
?density
SnowsPenultimateNormalityTest(lm1$residuals)
qqnorm(lm1$residuals)
qqnorm(lm1$residuals[plfas_reorg$Region[c]=="GC"])
qqnorm(lm1$residuals[plfas_reorg$Region[c]=="ER"])
?hist
summary(lm(plfas_reorg$d13C[c] ~ plfas_reorg$Horizon[c] * plfas_reorg$plfa[c]+plfas_reorg$Region[c]))
plf
summary(d13C[c])
################thats it... now some test plots of IRMS data...
dev.off()
pdf("comp_d13c.pdf", height=7, width=5)
par(mfrow=c(4,1), mar=c(2.1,5.1,0.1,2.1) )
hor.plot(d13C_org$X16.0a[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F, lty=2)
hor.plot(d13C_org$X18.1w9[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=T)
legend("bottomright", lty=1:2, c("16:0", expression(paste("18:1", omega, "9" ))))
hor.plot(d13C_org$i15.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab=expression(paste(Delta, ""^"13", "C" [PLFA - SOM])), ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7))
hor.plot(d13C_org$cy17.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=T, lty=2)
hor.plot(d13C_org$X18.2w6[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=T, lty=4)
legend("bottomright", lty=c(1:2,4), c("i15:0","cy17:0", expression(paste("18:2", omega, "6,9" ))))
hor.plot(d13C_org$a15.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab=expression(paste(Delta, ""^"13", "C" [PLFA - SOM])), ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7))
hor.plot(d13C_org$cy19.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=T, lty=2)
legend("bottomright", lty=c(1:2,4), c("ai15:0", "cy19:0"))
hor.plot(d13C_org$X18.1w7[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F, lty=1)
hor.plot(d13C_org$X16.1w7[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=T, lty=2)
legend("bottomright", lty=c(1:2,4), c(expression(paste("18:1", omega, "7" )),expression(paste("16:1", omega, "7" ))))
dev.off()
hor.plot(d13C_org$X16.1w7[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=T)
hor.plot(d13C_org$X16.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10))
hor.plot(d13C_org$X16.0a[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=F, lty=2)
hor.plot(d13C_org$i15.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10))
hor.plot(d13C_org$a15.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=F)
hor.plot(d13C_org$X16.1w7[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=F)
hor.plot(d13C_org$cy17.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-10, 10), add=F)
hor.plot(d13C_org$X18.1w7[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F)
hor.plot(d13C_org$cy19.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F)
hor.plot(d13C_org$X18.3w3[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F)
hor.plot(d13C_org$X20.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-8, 7), add=F)
hor.plot(d13C_org$X22.0[cond.irms]-samples$d13C[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(-20, 10), add=F)
hor.plot(fungi1.rel[cond.irms]/allbac.rel[cond.irms], hor=samples$Horizon[cond.irms], c("L","F","H","B"), fac=samples$Region[cond.irms], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=F, er.type.nested="sd", xlim=c(0, 1))
hor.plot(fungi1.rel[cond2]/allbac.rel[cond2], hor=samples$Horizon[cond2], c("L","F","H","B"), fac=samples$Region[cond2], pt.bg=2:3, legpl="topright", xlab="d13C 16:0", ylab="Soil horizon", oneway=F, er.type.nested="sd", xlim=c(0, 1))
cond.irms
samples$sample.code[cond.irms]
nrow(d13C_org)
hor.plot(cy19prec, hor=samples$Horizon, c("L","F","H","B"), fac=samples$Region)
pdf("d13c16_0_F2B.pdf", height=5, width=5)
corplot(d13C_org$X16.0a[cond3]-samples$d13C[cond3], fungi2.rel[cond3]/(fungi2.rel[cond3]+allbac.rel[cond3]), bg=as.numeric(samples$Horizon[cond3]), pch=as.numeric(samples$Region[cond3])+20, ylab="Fungi : (Fungi + Bacteria)", xlab=expression(paste(Delta, ""^"13", "C" [PLFA(16:0) - SOM])))
corplot(d13C_org$X18.1w9[cond3]-samples$d13C[cond3], fungi2.rel[cond3]/(fungi2.rel[cond3]+allbac.rel[cond3]), bg=as.numeric(samples$Horizon[cond3]), pch=as.numeric(samples$Region[cond3])+20, ylab="Fungi : (Fungi + Bacteria)", xlab=expression(paste(Delta, ""^"13", "C" [PLFA(18:1(w9)) - SOM])))
legend("topright", c("L","F","H", "B"), pch=21, pt.bg=c(5,3,4,2))
dev.off()
co<-cor.test(d13C_org$X16.0a[cond3]-samples$d13C[cond3], fungi2.rel[cond3]/(fungi2.rel[cond3]+allbac.rel[cond3]))
summary(lm(d13C_org$X16.0a[cond3]-samples$d13C[cond3]~ x))
summary(lm(d13C_org$X16.0[cond3]-samples$d13C[cond3]~ x))
summary(lm(d13C_org$X18.1w9[cond3]-samples$d13C[cond3]~ x))
co<-cor.test(d13C_org$X16.0a[cond3]-samples$d13C[cond3], fungi2.rel[cond3]/(fungi2.rel[cond3]+allbac.rel[cond3]))
lm(d13C_org$X16.0a[cond3]-samples$d13C[cond3]~ x)
x<-(fungi2.rel[cond3]/(fungi2.rel[cond3]+allbac.rel[cond3]))
co
$estimate^2
text(0.3, -4, "asdfas")
paste(co$estimate, siglev(co$p.value))
corplot(d13C_org$X16.0a[cond3], allbac.rel[cond3]/(allbac.rel[cond3]+fungi2.rel[cond3]), bg=as.numeric(samples$Horizon[cond3]), pch=as.numeric(samples$Region[cond3])+20, ylab="Fungi : (Fungi + Bacteria)", xlab=expression(paste(Delta, ""^"13", "C" [PLFA(16:0) - SOM])))
#####################
pdf("pca.pdf")
ord<-rda(rel.colmeans[cond2,T], scale=F)
ord.plot(ord,site.sep2=samples$Horizon[cond2], site.sep1=samples$Site[cond2], pch=21:24, pt.bg=c("green","green","green", "blue","blue","blue"), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=1.5, site.cex=1)
par(mfrow=(c(2,2)))
dev.off()
svg("pca2.svg")
ord<-rda(rel[cond2,T], scale=F)
ord.plot(ord,site.sep2=samples$Horizon[cond2], site.sep1=samples$Site[cond2], pch=21:24, pt.bg=c("green","green","green", "blue","blue","blue"), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=1.5, site.cex=1)
dev.off()
svg("pca3.svg")
ord<-rda(rel[cond2,T], scale=T)
ord.plot(ord,site.sep2=samples$Horizon[cond2], site.sep1=samples$Site[cond2], pch=21:24, pt.bg=c("green","green","green", "blue","blue","blue"), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=1.5, site.cex=1)
horplot(scores(ord, choices=1, display="sites"), )
dev.off()
pdf("groups.pdf", width=12, height=12)
names(list)
par(mfrow=c(3,4))
hor.plot(ia.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="I+A 15:0 (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(ia.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="I+A 15:0 (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(ia.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="I+A 15:0 (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*ia.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="i15:0 + a15:0 (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
hor.plot( monouns.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="Monounsaturated w/o 18:1w9c/t (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(monouns.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="Monounsaturated w/o 18:1w9c/t (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(monouns.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="Monounsaturated w/o 18:1w9c/t (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*monouns.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="Monounsaturated w/o 18:1w9c/t (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
hor.plot(fungi2.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="18:2w6 + 18:3w3 (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(fungi2.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="18:2w6 + 18:3w3 (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(fungi3.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="18:2w6 + 18:3w3 (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*fungi2.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="18:2w6 + 18:3w3 (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
#page 2
hor.plot(longeven.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="20:0+22:0+24:0 (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longeven.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="20:0+22:0+24:0 (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longeven.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="20:0+22:0+24:0 (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*longeven.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="20:0+22:0+24:0 (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
hor.plot( longpufa.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="PUFA 20+ (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longpufa.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="PUFA 20+ (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longpufa.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="PUFA 20+ (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*longpufa.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="PUFA 20+ (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
hor.plot(pufa.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="all PUFA (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(pufa.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="all PUFA (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(pufa.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="all PUFA (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*pufa.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="all PUFA (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
dev.off()
pdf("groups2.pdf", width=12, height=12)
par(mfrow=c(3,4))
for (i in c("act","euk","fungi","general","Gneg","Gpos" ))
{
hor.plot(rowSums(rel[cond.rel, fames$microbial.group==i]), hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab=paste(i, "(mol%,SD)"), ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(rowSums(mols[cond.abs, fames$microbial.group==i]), hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab=paste(i, "(mol g-1 d.w.,SD)"), ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(rowSums(mols[cond.abs, fames$microbial.group==i])/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab=paste(i, "(mol g-1 TOC,SD)"), ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(0.000001*rowSums(mols[cond.abs, fames$microbial.group==i])*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab=paste(i, "(mmol m-2, SD)"), ylab="Soil horizon", er.type.nested="sd", oneway=T)
}
dev.off()
pdf("sumofplfas.pdf", width=7, height=3)
#pdf("sumvsresp.pdf", width=7, height=7)
par(mfrow=c(1,3))
hor.plot(sum[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="Sum of PLFAs (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(sum[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="Sum of PLFAs (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd", xlim=c(0,100))
attach(resp245)
polygon(
c((ActrespT2_10_mean[Region=="ER"]+ActrespT2_10_se[Region=="ER"]*sqrt(3))+60,
((ActrespT2_10_mean[Region=="ER"]-ActrespT2_10_se[Region=="ER"]*sqrt(3))+60)[4:1]),
c(4:1,1:4),
col=rgb(1, 0, 0,0.25), border=1)
polygon(
c((ActrespT2_10_mean[Region=="GC"]+ActrespT2_10_se[Region=="GC"]*sqrt(3))+60,
((ActrespT2_10_mean[Region=="GC"]-ActrespT2_10_se[Region=="GC"]*sqrt(3))+60)[4:1]),
c(4:1,1:4),
col=rgb(0, 1, 0,0.25), border=1)
lines(acc_resp_10_mean[Region=="ER"]/3+60,4:1, col="red", lwd=2)
lines(acc_resp_10_mean[Region=="GC"]/3+60,4:1, col="green", lwd=2)
lines(ActrespT2_10_mean[Region=="ER"]+60,4:1, col="red", lwd=2)
lines(ActrespT2_10_mean[Region=="GC"]+60,4:1, col="green", lwd=2)
resp245
abline(v=60)
hor.plot(0.000001*sum[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="Sum of PLFAs (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
dev.off()
pdf("ratios.pdf", width=7, height=3)
par(mfrow=c(1,3))
hor.plot(F2B[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="(18:2w6+18:3w3):(Gpos+Gneg+Act)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(Euk2bac[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="Eukaryots:Bacteria", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(cy17prec[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="cy17:0/16:1w7", ylab="Soil horizon", oneway=T, er.type.nested="sd")
dev.off()
hor.plot(longeven.rel[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="even SAFA > 20 (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longeven.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="even SAFA >20 (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longeven.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="even SAFA >20 (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longpufa[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="PUFA >20 (mol%,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longpufa.conc[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="PUFA >20 (mol g-1 d.w.,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
hor.plot(longpufa.conc[cond.abs]/samples$c_corr[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="PUFA >20 (mol g-1 TOC,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
###########
#PLFA per area
##########
pdf("perarea.pdf")
par(mfrow=c(2,2))
Cperm2<-
hor.plot(0.00001*samples$percent_C[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="kg TOC m-2", ylab="Soil horizon", er.type.nested="sd", oneway=T)
tmp<-Cperm2[[1]]
100*tmp[,1:3]/rowSums(tmp[,1:3])
plfaperm2<-
hor.plot(0.000001*sum[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="mmol PLFA m-2", ylab="Soil horizon", er.type.nested="sd", oneway=T)
tmp<-plfaperm2[[1]]
rowSums(tmp[,1:3])
100*tmp[,1:3]/rowSums(tmp[,1:3])
tmp2<-plfaperm2[[2]]
sqrt(rowSums(tmp2[,1:3]^2))
fperm2<-hor.plot(0.000001*fungi2.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="18:2w6 + 18:2w3 (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
tmp<-fperm2[[1]]
rowSums(tmp[,1:3])
100*tmp[,1:3]/rowSums(tmp[,1:3])
tmp2<-plfaperm2[[2]]
sqrt(rowSums(tmp2[,1:3]^2))
hor.plot(0.000001*ia.conc[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="i15:0 + a15:0 (mmol m-2, SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
dev.off()
#####
#Cy:precursor ratios
#####
hor.plot(cy17prec[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="cy17:0/16:1w7 (mol:mol,SD)", ylab="Soil horizon", oneway=T, er.type.nested="sd")
anova(lm(cy17prec[cond2]~samples$Horizon[cond2]))
# Analysis of Variance Table
#
# Response: cy17prec[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 1.0272 0.34242 39.466 2.74e-12 ***
# Residuals 42 0.3644 0.00868
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
lm0<-aov(cy17prec[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
Groups, Treatments and means
# a B 0.6908
# b L 0.4143
# b H 0.3507
# b F 0.3345
lm0<-aov(cy17prec[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
tmp<-hsd[[5]]
tmp$trt<-factor(tmp$trt, ordered=T, levels=c("L","F","H","B"))
tmp[order(tmp$trt),T]
anova(lm(cy17prec[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: cy17prec[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 1.02725 0.34242 57.7853 3.164e-14 ***
# samples$Region[cond2] 1 0.01946 0.01946 3.2843 0.0778534 .
# samples$Horizon[cond2]:samples$Region[cond2] 3 0.11976 0.03992 6.7370 0.0009344 ***
# Residuals 38 0.22518 0.00593
hor.plot(cy19prec[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright", xlab="cy19:0/18:1w9 (mol:mol,SD)", ylab="Soil horizon", er.type.nested="sd", oneway=T)
anova(lm(F2B[cond2]~samples$Horizon[cond2]))
lm0<-aov(F2B.18.2[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
anova(lm(cy19prec[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: cy19prec[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 38.835 12.9449 223.7732 < 2.2e-16 ***
# samples$Region[cond2] 1 1.100 1.1004 19.0221 9.53e-05 ***
# samples$Horizon[cond2]:samples$Region[cond2] 3 1.363 0.4544 7.8553 0.0003363 ***
# Residuals 38 2.198 0.0578
hor.plot(cyprec[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomleft", xlab="(cy17:0+cy19:0)/(16:1w7+18:1w9) (mol:mol,SD)", ylab="Soil horizon")
anova(lm(F2B[cond2]~samples$Horizon[cond2]))
lm0<-aov(F2B.18.2[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
anova(lm(cyprec[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: cyprec[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 13.2438 4.4146 222.2535 < 2.2e-16 ***
# samples$Region[cond2] 1 0.5598 0.5598 28.1807 5.046e-06 ***
# samples$Horizon[cond2]:samples$Region[cond2] 3 0.3772 0.1257 6.3293 0.001374 **
# Residuals 38 0.7548 0.0199
#####
#Fungi:Bacteria ratios
#####
hor.plot(F2B[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="fungi:bacteria (mol:mol,SD)", ylab="Soil horizon")
anova(lm(F2B[cond2]~samples$Horizon[cond2]))
# Analysis of Variance Table
#
# Response: F2B[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 5.2514 1.75047 167.35 < 2.2e-16 ***
# Residuals 42 0.4393 0.01046
lm0<-aov(F2B[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
Groups, Treatments and means
# a L 0.9146
# b F 0.2723
# b H 0.1546
# c B 0.02967
anova(lm(F2B[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: F2B[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 5.2514 1.75047 188.049 < 2e-16 ***
# samples$Region[cond2] 1 0.0206 0.02063 2.216 0.14484
# samples$Horizon[cond2]:samples$Region[cond2] 3 0.0650 0.02165 2.326 0.09011 .
hor.plot(F2B.18.2[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="fungi (18:2w6,9 only):bacteria (mol:mol,SD)", ylab="Soil horizon", oneway=T)
anova(lm(F2B.18.2[cond2]~samples$Horizon[cond2]))
# Analysis of Variance Table
#
# Response: F2B.18.2[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 5.2514 1.75047 167.35 < 2.2e-16 ***
# Residuals 42 0.4393 0.01046
lm0<-aov(F2B.18.2[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
# Groups, Treatments and means
# a L 0.9146
# b F 0.2723
# b H 0.1546
# c B 0.02967
anova(lm(F2B.18.2[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: F2B.18.2[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 5.2514 1.75047 188.049 < 2e-16 ***
# samples$Region[cond2] 1 0.0206 0.02063 2.216 0.14484
# samples$Horizon[cond2]:samples$Region[cond2] 3 0.0650 0.02165 2.326 0.09011 .
# Residuals 38 0.3537 0.00931
hor.plot(Euk2bac[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright", xlab="fungi (18:2w6,9 only):bacteria (mol:mol,SD)", ylab="Soil horizon", oneway=T)
anova(lm(Euk2bac[cond2]~samples$Horizon[cond2]))
# Analysis of Variance Table
#
# Response: Euk2bac[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 15.4147 5.1382 156.51 < 2.2e-16 ***
# Residuals 42 1.3789 0.0328
lm0<-aov(Euk2bac[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
# Groups, Treatments and means
# a L 1.683
# b F 0.6202
# b H 0.4516
# c B 0.1347
anova(lm(Euk2bac[cond2]~samples$Horizon[cond2]*samples$Region[cond2]))
# Analysis of Variance Table
#
# Response: Euk2bac[cond2]
# Df Sum Sq Mean Sq F value Pr(>F)
# samples$Horizon[cond2] 3 15.4147 5.1382 188.6274 < 2e-16 ***
# samples$Region[cond2] 1 0.0547 0.0547 2.0090 0.16451
# samples$Horizon[cond2]:samples$Region[cond2] 3 0.2890 0.0963 3.5369 0.02358 *
# Residuals 38 1.0351 0.0272
########
#2-way anova for all FAME, rel abundance
########
aov<-anova(lm(rel[cond2,1]~samples$Horizon[cond2]*samples$Region[cond2]))
aov$'Pr(>F)'
lm0<-aov(cy17prec[cond2]~samples$Horizon[cond2])
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
anova(lm(cy17prec[cond2]~samples$Region[cond2]))
aov1<-a
lm1<-aov(cy17prec[cond2]~samples$Horizon[cond2]*samples$Region[cond2])
tuk1<-TukeyHSD(lm1)
str(tuk1)
tuk1[[3]]
plot(tuk1)
HSD.test(lm0, "samples$Horizon[cond2]", group=TRUE)
HSD.test(lm1, "samples$Horizon[cond2]", group=TRUE)
?HSD.test
library("multcomp")
library("multcompView")
multcompLetters(extract_p(tuk1))
tuk <- glht(lm0, linfct = mcp('samples$Horizon[cond2]'= "Tukey"))
tuk.cld<-cld(tuk)
plot(tuk.cld)
par(mfrow=c(2,2))
cond3<-cond.rel&samples$Horizon=="L"
ord<-rda(rel[cond3,T], scale=T)
ord.plot(ord,site.sep1=samples$Site[cond3], site.sep2=samples$Horizon[cond3], pch=c(21), pt.bg=c(rep(1,3),rep(grey(.6),3)), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=2)
title("L horizon")
cond3<-cond.rel&samples$Horizon=="F"
ord<-rda(rel[cond3,T], scale=T)
ord.plot(ord,site.sep1=samples$Site[cond3], site.sep2=samples$Horizon[cond3], pch=c(21), pt.bg=c(rep(1,3),rep(grey(.6),3)), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=2)
title("F horizon")
cond3<-cond.rel&samples$Horizon=="H"
ord<-rda(rel[cond3,T], scale=T)
ord.plot(ord,site.sep1=samples$Site[cond3], site.sep2=samples$Horizon[cond3], pch=c(21), pt.bg=c(rep(1,3),rep(grey(.6),3)), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=2)
title("H horizon")
cond3<-cond.rel&samples$Horizon=="B"
ord<-rda(rel[cond3,T], scale=T)
ord.plot(ord,site.sep1=samples$Site[cond3], site.sep2=samples$Horizon[cond3], pch=c(21), pt.bg=c(rep(1,3),rep(grey(.6),3)), spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5, spe.mult=2)
title("B horizon")
dev.off()
ord<-rda(rel[cond.rel,T]~samples$Region[cond.rel]*samples$Horizon[cond.rel])
plot(ord)
ord.plot(ord,site.sep1=samples$Horizon[cond.rel], site.sep2=samples$Site[cond.rel], pch=c(21,21,21,22,22,22,24), pt.bg=1:6, spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5)
warnings()
samples$Horizon
ord2<-rda(rel[samples$Horizon=="F"&cond.rel,T])
ord.plot(ord2, site.sep1=samples$Site[samples$Horizon=="F"&cond.rel], site.sep=samples$Region[samples$Horizon=="F"&cond.rel], pch=c(21,21,21,22,22,22,24), pt.bg=1:6, spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5)
ord.l<-rda(rel[samples$Horizon=="L"&cond.rel,T])
ord.plot(ord.l, site.sep1=samples$Site[samples$Horizon=="L"&cond.rel], site.sep=samples$Region[samples$Horizon=="L"&cond.rel], pch=c(21,21,21,22,22,22,24), pt.bg=1:6, spe.label.type="text", spe.labels=fames$FAME, cex.leg=.5)
fac<-samples$Site
horlev<-c("L","F","H","B")
hor<-samples$Horizon
cond1<-is.element(hor,horlev)
hor1<-factor(hor[cond1], ordered=T, levels=horlev)
fac1<-factor(fac[cond1], levels=unique(fac[cond1]))
var1<-sum[cond1]
means<-tapply(var1, list(fac1,hor1), mean)
error<-tapply(var1, list(fac1,hor1), stderr)
hor.plot(sum[cond.abs]/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon")
hor.plot(mols$X16.0[cond.abs]/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon")
hor.plot(mols$X16.0[cond.abs]/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon", pch=rep(21:23,2), er.type="se")
hor.plot(mols$X2.2.16.1w9_[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon", pch=rep(21:23,2), er.type="se")
hor.plot((mols$X2.2.16.1w9_+mols$X2.6.16.1w5_)[cond.abs]/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon")
hor.plot(mols$X2.4.16.1w7[cond.abs]/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA g-1 TOC", ylab="Soil horizon")
hor.plot(sum[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(samples$percent_C[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomleft", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(mols$X3.6.18.1w9c[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(mols$X3.11.18.2w6.9[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(mols$X3.20.18.3w3.6.9[cond.abs]*samples$weight[cond.abs]/samples$area[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="bottomright", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(mols$X3.3.18.1w9t._[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol 1:8w9c g d.w.", ylab="Soil horizon")
hor.plot(rowSums(mols[cond.abs,fames$group=="fungi"])/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon")
hor.plot(rowSums(mols[cond.abs,fames$group=="Gpos"])/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se")
hor.plot(rowSums(mols[cond.abs,fames$group=="Gneg"])/samples$percent_C[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=rep(1,6), nested=samples$Region[cond.abs], pt.bg=2:3, legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se")
pdf("ratios.pdf", height=4)
par(mfrow=c(1,2))
hor.plot( rowSums(mols[cond.rel,fames$microbial.group=="fungi"])/
(rowSums(mols[cond.rel,fames$microbial.group=="Gneg"])+
rowSums(mols[cond.rel,fames$microbial.group=="Gpos"])),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="Fungi:Bacteria (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot( rowSums(mols[cond.rel,fames$microbial.group=="Gpos"])/
rowSums(mols[cond.rel,fames$microbial.group=="Gneg"]),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="G+/G- (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(rowSums(mols[cond.rel,fames$microbial.group=="act"])
,
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="actinomycetes (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
dev.off()
hor.plot( rowSums(mols[cond.rel,fames$group=="fungi"])/
(rowSums(mols[cond.rel,fames$group=="Gneg"])+
rowSums(mols[cond.rel,fames$group=="Gpos"])),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", pch=rep(21:23,2),
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot( rowSums(mols[cond.rel,fames$group=="Gpos"])/rowSums(mols[cond.rel,fames$group=="Gneg"]),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", pch=rep(21:23,2),
xlab="G+/G- (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(sums[cond.abs], hor=samples$Horizon[cond.abs],
c("L","F","H","B"), fac=samples$Site[cond.abs],
lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", pch=rep(21:23,2),
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
unsaturation16.0<-(rel$X2.1.16.1a+rel$X2.3.16.1b+rel$X2.4.16.1w7+rel$X16.1c)/rel$X16.0
unsaturation18.0<-
(rel$X3.3.18.1w9t._+rel$X3.6.18.1w9c+rel$X3.7.18.1w7c)/rel$X3.1.18.0
hor.plot(unsaturation16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", pch=rep(21:23,2),
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(unsaturation18.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), pt.bg=c(2,2,2,3,3,3), legpl="bottomright", pch=rep(21:23,2),
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(unsaturation18.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(unsaturation16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
pdf("physiological_indices.pdf", width=10, height=6)
par(mfrow=c(2,3))
hor.plot((rel$i.15.0/rel$ai.15.0)[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomleft",
xlab="i15:0/ai15:0 (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(((rel$cy.19.0a+rel$cy.19.0b+rel$cy17.0a)/(rel$X2.4.16.1w7+rel$X18.1w9c))[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="topright",
xlab="cyclopropyl:precursor (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(unsaturation16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomleft",
xlab="sum(16:1)/16:0 (mol:mol, SE)", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(
(rel$X18.1w9c/rel$X18.1b)[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="18:1w9c/18:1w7, mol:mol, SE", ylab="Soil horizon", er.type="se",
lwd=2)
hor.plot(
(rel$X18.3w3.6.9/rel$X18.2w6.9)[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="18:3w3c/18:2w6, mol:mol, SE", ylab="Soil horizon", er.type="se",
lwd=2)
hor.plot((
rel$X3.11.18.2w6.9/(rel$X3.11.18.2w6.9+rel$X3.20.18.3w3.6.9)
)[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="18:2w6,9/(18:2w6,9 + 18:3w3,6,9), mol:mol, SE", ylab="Soil horizon", er.type="se", lwd=2)
dev.off()
hor.plot((rel$X3.6.18.1w9c+rel$X3.11.18.2w6.9+rel$X3.20.18.3w3.6.9)[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(rowSums(mols[cond.rel,fames$group=="fungi"])/
(rowSums(mols[cond.rel,fames$group=="Gneg"])+
rowSums(mols[cond.rel,fames$group=="Gpos"])),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(mols$X3.14.cy.19.0[cond.rel]/mols$X3.6.18.1w9c[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(mols$X3.14.cy.19.0[cond.rel]/mols$X3.6.18.1w9c[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(mols$X2.15.cy17.0[cond.rel]/mols$X2.4.16.1w7[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(rel$X22.0[cond.rel], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
hor.plot(
(
rel$X4.2.20.2_[cond.rel]+rel$X4.3.20.3w6.9.15[cond.rel]+rel$X4.5.20.4w6.9.12.15[cond.rel]+rel$X4.7.20.5w3.6.9.12.15[cond.rel]
),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel],
lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3, legpl="bottomright",
xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se", lwd=2)
rel
hor.plot(
rowSums(mols[cond.rel,fames$group=="fungi"])/
(rowSums(mols[cond.rel,fames$group=="Gneg"])+
rowSums(mols[cond.rel,fames$group=="Gpos"])),
samples$percent_C[cond.rel], hor=samples$Horizon[cond.rel], hc("L","F","H","B"),
fac=samples$Site[cond.rel], lty=rep(1,6), nested=samples$Region[cond.rel], pt.bg=2:3,
legpl="topright", xlab="nmol PLFA m-2", ylab="Soil horizon", er.type="se")
summary(sum)
hor.plot(sum[cond2], hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=rep(1), nested=samples$Region)
hor.plot(sum[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=1, nested=samples$Region[cond.abs], er.type.nested="sd", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Sum of PLFA (nmol g-1 d.w.)", ylab="Soil horizon")
hor.plot(mols$X18.2w6.9[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=1, nested=samples$Region[cond.abs], er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:2w6,9 (nmol g-1 d.w.)", ylab="Soil horizon")
hor.plot((mols$X18.2w6.9+mols$X18.3w3.6.9)[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=1, nested=samples$Region[cond.abs], er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:2w6,9 (nmol g-1 d.w.)", ylab="Soil horizon")
hor.plot((mols$X18.1w9c+mols$X18.2w6.9+mols$X18.3w3.6.9)[cond.abs], hor=samples$Horizon[cond.abs], c("L","F","H","B"), fac=samples$Site[cond.abs], lty=1, nested=samples$Region[cond.abs], er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:2w6,9 (nmol g-1 d.w.)", ylab="Soil horizon")
hor.plot(
rowSums(mols[cond.rel, fames$microbial.group=="fungi"])/
(rowSums(mols[cond.rel, fames$microbial.group=="Gpos"]+rowSums(mols[cond.rel, fames$microbial.group=="Gneg"]))),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$i.15.0[cond.rel]/mols$ai.15.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$cy17.0[cond.rel]/mols$X2.4.16.1w7[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$cy.19.0.1[cond.rel]/mols$X18.1w9c[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$X3.14.cy.19.0[cond.rel]/mols$X3.6.18.1w9.t_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$X3.14.cy.19.0[cond.rel]/(mols$X3.7.18.1w9c_[cond.rel]+mols$X3.6.18.1w9.t_[cond.rel]),
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="Fungi:Bacteria (SE)", ylab="Soil horizon")
hor.plot(
mols$X3.7.18.1w9c_[cond.rel]/mols$X3.6.18.1w9.t_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X4.3.20.3w6.9.15[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X3.9.18.2w_.i19.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$ai.16.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X2.13.i18.0.or.10.Me18.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X2.15.cy17.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X3.14.cy.19.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X2.6.16.1w5_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
hor.plot(
rel$X3.8.18.2w9.12[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="18:1c/t (SE)", ylab="Soil horizon")
pdf("longchain.pdf")
par(mfrow=c(2,3))
hor.plot(
rel$X20.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X22.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="22:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X23.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="24:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X24.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="24:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X25.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="24:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X10.Me.16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:3 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X10.Me17.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:3 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.1.16.1a[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:3 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.3.16.1b[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:3 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X16.1c[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:3 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X4.5.20.4w6.9.12.15[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:4 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X4.7.20.5w3.6.9.12.15[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomright", lwd=2, xlab="20:5 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
dev.off()
pdf("16:1s.pdf")
par(mfrow=c(2,3))
hor.plot(
rel$X2.1.16.1w_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="16:1 (2-1) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.2.16.1w9_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="16:1w9 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.3.16.1w_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topright", lwd=2, xlab="16:1 (2-3) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.4a.16.1w_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topright", lwd=2, xlab="16:1 (2-4a) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.4.16.1w7[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="16:1w7 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.6.16.1w5_[cond.rel],
dev.off()
pdf("isoanteisos.pdf")
par(mfrow=c(2,3))
hor.plot(
rel$i.15.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="i-15:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$ai.15.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="ai-15:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$i.16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="i-16:0 (2-3) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$ai.16.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="ai 16:0 (2-4a) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.5.i.17.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="i17:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X2.7._ai.17.0_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="bottomleft", lwd=2, xlab="ai(?)17:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
dev.off()
pdf("sufa.pdf")
par(mfrow=c(2,3))
hor.plot(
rel$X14.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="14:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$n.15.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="15:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X16.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="16:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X3.1.18.0[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="18:0 (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X3.6.18.1w9.t_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topleft", lwd=2, xlab="18:1w9t (3-6) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
hor.plot(
rel$X3.7.18.1w9c_[cond.rel],
hor=samples$Horizon[cond.rel], c("L","F","H","B"), fac=samples$Site[cond.rel], lty=1, nested=samples$Region[cond.rel] , er.type.nested="se", pch=c(21,22), pt.bg=c(2,3), legpl="topright", lwd=2, xlab="18:1w9c (3-7) (mol%, SE)", ylab="Soil horizon",legsize=0.8)
dev.off()
sums <- rowSums(mols)
cond <- is.na(mols$X3.11.18.2w6.9)==F
cond
mols$
f2b<-rowSums(mols[T, fames$X0=="fungi"])/
rowSums(mols[,fames$X0=="Gpos"|fames$X0=="Gneg"])
hor.plot(mols$X3.11.18.2w6.9[cond], samples$Horizon[cond], c("L","F","H","B"), fac=samples$Site[cond], col=c(1,2), nested=samples$Region[cond], lwd=c(2,2), er.type="se", lty=c(1,2))
cond <- is.na(sums)==F
hor.plot(sums[cond1], samples$Horizon[cond1], c("L","F","H","B"), fac=samples$Region[cond1], col=c(1,2), lwd=2, er.type="se")
f2b<-
rowSums(mols[T, fames$X0=="fungi"])/
rowSums(mols[,fames$X0=="Gpos"|fames$X0=="Gneg"])
cond <- is.na(f2b)==F
hor.plot(f2b[cond], samples$Horizon[cond], c("L","F","H","B"), fac=samples$Region[cond], col=c(1,2), lwd=2, er.type="se", lty=c(1,1))
fames
mols
hor.plot()
head(mols)
samples$Horizon
<-
t(tmp[1:19, 9:ncol(tmp)])
mols <-
data.frame(t(tmp[22:nrow(tmp), 9:ncol(tmp)]))
summary(mols)
<-
as.numeric(mols[,3])
lda.all<-lda(rel[cond.rel&cond.ref,T], samples$Region[cond.rel&cond.ref])
lda.l<-lda(rel[cond.rel&samples$Horizon=="L",T], samples$Region[cond.rel&samples$Horizon=="L"])
lda.f<-lda(rel[cond.rel&samples$Horizon=="F",T], samples$Region[cond.rel&samples$Horizon=="F"])
lda.h<-lda(rel[cond.rel&samples$Horizon=="H",T], samples$Region[cond.rel&samples$Horizon=="H"])
lda.b<-lda(rel[cond.rel&samples$Horizon=="B",T], samples$Region[cond.rel&samples$Horizon=="B"])
plot(lda.b)
lda.values<-data.frame( lda.all$scaling, lda.l$scaling, lda.f$scaling, lda.h$scaling)
lda.values
dev.off()
attach(resp)
resp_act[is.na(resp_act)]<-0
days<-as.numeric(harvest)
cond<-temperature=="10"
timeseries(resp_act[cond], xfac=days[cond], sepfac=region[cond], legend="topright") |
155ecb7a809ec2a29db85ff87da29916a125339e | 4a94afa31e6df0023d28bc66650f7f32f5cb3c0b | /misc/eda.R | 2cbedf3a77152a6862a2297d89696ef2e7c340a8 | [] | no_license | UBC-MDS/DSCI532_Youtube-Trending | cd96d6f90de5b4c25b361a73fcbe23e7be95bbe6 | cab490b79219a50377e86adf623e54de4ffac1cd | refs/heads/master | 2020-04-15T08:04:53.796125 | 2019-01-30T07:40:50 | 2019-01-30T07:40:50 | 164,516,073 | 0 | 3 | null | 2019-01-30T07:40:51 | 2019-01-07T23:51:43 | R | UTF-8 | R | false | false | 1,352 | r | eda.R | library(tidyverse)
library(lubridate)
library(scales)
df <- readRDS("data/clean_df.rds")
df$category <- df$category %>% as_factor()
df %>%
count(category, wt=likes) %>%
arrange(n)
df %>%
group_by(category) %>%
count()
df %>%
select(category, views, likes, dislikes) %>%
filter(category %in% "Nonprofits & Activism") %>%
arrange(desc(views))
df %>%
group_by(category) %>%
summarise(likes = sum(as.numeric(views)), n = n(), avg = likes/n) %>%
ggplot() +
geom_col(aes(fct_reorder(category, avg), avg)) +
scale_y_continuous(labels = comma) +
labs(x="", y="Likes per Video") +
coord_flip()
df %>%
ggplot() +
geom_boxplot(aes(fct_reorder(category, views), views)) +
scale_y_log10(labels = comma) +
labs(x="", y="Likes per Video") +
coord_flip()
df %>%
select(publish_time, category) %>%
ggplot() + geom_bar(aes(wday(publish_time, label = TRUE)))
df %>%
select(publish_time, category) %>%
ggplot() + geom_bar(aes(mday(publish_time)))
df %>%
select(publish_time, category) %>%
mutate(hours = hour(publish_time),
minutes = minute(publish_time),
seconds = second(publish_time),
time = make_datetime(hour = hours, min = minutes, sec = seconds)) %>%
ggplot() + geom_freqpoly(aes(time)) +
scale_x_datetime(date_breaks = "3 hours", date_labels = "%H:%M")
|
020d3c5f65d2ba55c400955331e586a1208a3a2b | 2605ed5c32e799ddfd7b1f739800e35093fbc24e | /R/lib/bs/man/simul.bs.mme.Rd | fa720c3748f6f297ef3fd5eff1912ae2515276e1 | [] | no_license | BRICOMATA/Bricomata_ | fcf0e643ff43d2d5ee0eacb3c27e868dec1f0e30 | debde25a4fd9b6329ba65f1172ea9e430586929c | refs/heads/master | 2021-10-16T06:47:43.129087 | 2019-02-08T15:39:01 | 2019-02-08T15:39:01 | 154,360,424 | 1 | 5 | null | null | null | null | WINDOWS-1250 | R | false | false | 2,263 | rd | simul.bs.mme.Rd | \name{simul.bs.mme}
\alias{simul.bs.mme}
\title{Simulation study by using MME method}
\description{
The function \code{simul.bs.mme()} simulates three samples of size \eqn{n} from a
population \eqn{T \sim \rm{BS}(alpha,beta)}{T ~ BS(alpha,beta)}, one for each method (\code{rbs1()}, \code{rbs2()}, or \code{rbs3()}), computes the MMES's
for alpha and beta, and establish goodness-of-fit for each sample.
}
\usage{
simul.bs.mme(n, alpha, beta)
}
\arguments{
\item{n}{Samples of size \code{n}.}
\item{alpha}{Teorical shape parameter for simulations.}
\item{beta}{Teorical scale parameter for simulations.}
}
\details{
In order to carry out simulation studies, we develop the functions \code{simul.bs.gme()},
\code{simul.bs.mle()}, and \code{simul.bs.mme()}. These functions generate random samples, estimate parameters,
and establish goodness-of-fit. The samples of size \eqn{n}, one for each method (G1, G2, or G3), are generated
by using \code{rbs1()}, \code{rbs2()}, and \code{rbs3()}, respectively. The estimations, one for each method,
are obtained by using \code{est1bs()}, \code{est2bs()}, and \code{est3bs()}, respectively. The goodness-of-fit
method is based on the statistic of Kolmogorov-Smirnov (KS), which is available through the
function \code{ksbs()}. The generated observations by means of G1, G2, and G3 are saved as slots of the
\code{R class simulBsClass}, which are named \code{sample1}, \code{sample2}, and \code{sample3}, respectively. Furthermore,
the results of the simulation study are saved in a fourth slot of this class, named \code{results}.
}
\value{
An object of class \code{"simulBsClass"} (Slots).
}
\references{Leiva, V., Hernández, H., and Riquelme, M. (2006). A New Package for the Birnbaum-Saunders Distribution. Rnews, 6/4, 35-40. (http://www.r-project.org)}
\author{Víctor Leiva <victor.leiva@uv.cl>, Hugo Hernández <hugo.hernande@msn.com>, and Marco Riquelme <mriquelm@ucm.cl>.}
\examples{
## Example: simul.bs.mle()
simul.bs.mle(100,0.5,1.0)
results<-simul.bs.mle(100,0.5,1.0)
results@results
sample<-results@sample1
## Example: simul.bs.mme()
simul.bs.mme(100,0.5,1.0)
## Example: simul.bs.gme()
simul.bs.gme(100,0.5,1.0)
}
\keyword{univar}
|
ae71a9a8d65cf21b4080c05edc7b02bf9c90b505 | ea570e2bcab830f1dbd75bef5ae8f09d18b1c6c1 | /man/read.mgh.voxel.Rd | 6debf178cc8afcd49bd1d601c8acd77b9975f255 | [] | no_license | TKoscik/fsurfR | 605e55e568648f5ff183a3e8b86b94bffce0eb3d | 376369d4d1f72a1cad7fa99ce3b8ae96169ecba8 | refs/heads/master | 2022-02-18T21:49:30.782324 | 2019-08-05T15:58:19 | 2019-08-05T15:58:19 | 114,380,078 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 297 | rd | read.mgh.voxel.Rd | \name{read.mgh.voxel}
\alias{read.mgh.voxel}
\title{MGH voxelwise read}
\description{Read voxelwise data from MGH file}
\usage{
read.mgh.voxel(mgh.file, coords)
}
\arguments{
\item{mgh.file}{}
\item{coords}{}
}
\value{}
\author{
Timothy R. Koscik <timkoscik+fsurfR@gmail.com>
}
\examples{} |
db4cd5573622716ab3cadddcdb9af43a88eac987 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/benmarwick/Persistence-of-Public-Interest-in-Gun-Control/install.R | 3b0ecdb5f9a8571bdc58c39430193dde2f185805 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 195 | r | install.R | install.packages(c("glue", "lubridate", "plotly", "purrrlyr", "sessioninfo"))
devtools::install_github(c("tidyverse/ggplot2", "PMassicotte/gtrendsR", "Ironholds/pageviews", "dgrtwo/fuzzyjoin"))
|
a6bafc7eaa8113573ab76ce2c7dd37d74cbed804 | 1d327b833fb19ec87c44d48a39933c627a2437e4 | /figures/milstead.R | 40827559b3e3c569faf960ad75378afc77a83df4 | [] | no_license | jsta/2017_GLEON | 079c6942f1b6570f3e8fd20fb4bcae7c8d534f04 | 4c39fb607dabfe93f1893240261720788aaf55b3 | refs/heads/master | 2021-03-30T18:14:48.430790 | 2017-12-06T15:42:42 | 2017-12-06T15:42:42 | 101,436,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,408 | r | milstead.R | library(ggplot2)
library(forcats)
test3 <- readRDS("/home/jose/Documents/Science/Dissertation/Analysis/misc/milstead_x_lagos.rds")
test3$lakeconnectivity <-
fct_recode(test3$lakeconnectivity,
"Lakestream" = "Secondary",
"Stream" = "Primary",
"Headwater" = "Headwater")
test3$lakeconnectivity <- factor(test3$lakeconnectivity,
levels = c("Isolated", "Headwater", "Stream", "Lakestream"))
test3 <- test3[test3$hrt > 0,]
test3 <- test3[test3$AreaSqKm > 0.04,] # rm less than 4 ha
test3 <- test3[!is.na(test3$lakeconnectivity),]
yr_labels <- c("1e-07", "1e-05",
as.character(10/365),
as.character(2/12), as.character(6/12),
"2", "10")
format_labels <- function(x, mult_factor){
gsub("\\.+$", "\\1", gsub("0+$", "\\1",
trimws(format(round(as.numeric(x), 6) *
mult_factor, scientific = FALSE)), perl = TRUE))
}
day_labels <- ceiling(as.numeric(
format_labels(yr_labels, 365)) / 10) * 10
minute_labels <- ceiling(as.numeric(
format_labels(yr_labels, 365 * 24 * 60)) / 10) * 10
month_labels <- round(as.numeric(
format_labels(yr_labels, 12)), 0)
mixed_labels <- paste0(
c(minute_labels[1:2], day_labels[3], month_labels[4:5], yr_labels[6:7]),
c(" min", " min", " days", " ", " mon.", " years", " years"))
quants <- exp(quantile(log(test3[!is.na(test3$lakeconnectivity), "hrt"])))
# gg_fit_single <- ggplot(data = test3,
# aes(x = hrt, y = Rp)) + geom_point(size = 0.9) +
# scale_x_log10(labels = mixed_labels,
# breaks = as.numeric(yr_labels), limits = c(1 / 365, 11)) +
# stat_smooth(method = "glm", method.args = list(family = "binomial"),
# se = TRUE) +
# scale_color_brewer(palette = "Set1") +
# cowplot::theme_cowplot() +
# theme(legend.position = "none", legend.title = element_blank(), legend.text = element_text()) +
# xlab("Residence Time") +
# ylab("P Retention (%)") +
# geom_segment(data = data.frame(x = quants[c(2, 4)], y = c(0.8, 0.8)),
# aes(x = x, y = c(0, 0), xend = x, yend = y),
# color = "gray42", size = 1.5, linetype = 2)
#
# if(!interactive()){
# ggsave("milstead_single.pdf", gg_fit_single, height = 5)
# }
test4 <- droplevels(test3[test3$lakeconnectivity != "Isolated",])
(gg_fit_multi <- ggplot(data = test4, aes(x = hrt, y = Rp)) +
geom_point(size = 0.9) +
scale_x_log10(labels = mixed_labels,
breaks = as.numeric(yr_labels), limits = c(1 / 365, 11)) +
stat_smooth(method = "glm", method.args = list(family = "binomial"),
se = TRUE, aes(color = lakeconnectivity),
data = test4) +
scale_color_brewer(palette = "Set1") +
cowplot::theme_cowplot() +
theme(legend.title = element_blank(), legend.position = c(0.18, 0.8),
legend.text = element_text(), plot.caption = element_text(size = 10, hjust = 0)) +
xlab("Residence Time") +
ylab("P Retention (%)"))
# geom_segment(data = data.frame(x = quants[c(2, 4)], y = c(0.8, 0.8)),
# aes(x = x, y = c(0, 0), xend = x, yend = y),
# color = "gray42", size = 1.5, linetype = 2))
# labs(caption = "Re-analysis of data from [3] with data from [4]"))
# ggtitle("Lake P Retention as a function of \n residence time and lake connectivity"))
ggsave("figures/milstead_multi.pdf", gg_fit_multi, height = 3) |
eeec7898bd51291f57cef4087766393781c8d89b | 2477434cc1b95634c5b15f558669e39ec2e963a2 | /man/channelResponses.Rd | 43db317c8873587f9e333be8adc14f74900de961 | [] | no_license | pariswu1988/proteomics | 4e4b273d04490a9f3279553dd889d870e504b62f | 5e50c3e344068130a079c9e6c145ffdbf5651ca2 | refs/heads/master | 2021-01-13T03:55:36.847983 | 1977-08-08T00:00:00 | 1977-08-08T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 401 | rd | channelResponses.Rd | \name{channelResponses}
\alias{channelResponses}
\title{Response calculation}
\usage{
channelResponses(dwide, acc)
}
\arguments{
\item{dwide}{iTRAQ data in wide format including columns
corresponding to iTRAQ channels containing their
intensities.}
\item{acc}{result of an accumulation of sample sizes}
}
\description{
From spectrum to protein level -- Response variable
calculation
}
|
6c095baafb1068534ebadd28cc73fdd767d4c3ef | 8df47c83eb85cad38f5e9bf0e37972bd5a50ef6e | /R/vsp.R | a8bac01266fc5343c99b9ab6efd9587ca07f5ad9 | [
"MIT"
] | permissive | karlrohe/vsp-1 | 896d5e6f2ff1af063d1bed1a455cbf12524e7969 | 11ac4aab6a03ab0be679cda21145e858d4ad12a5 | refs/heads/master | 2023-02-09T17:12:56.722815 | 2020-12-30T15:57:14 | 2020-12-30T15:57:14 | 271,078,514 | 0 | 0 | NOASSERTION | 2020-06-09T18:21:19 | 2020-06-09T18:21:18 | null | UTF-8 | R | false | false | 4,784 | r | vsp.R | #' Non-Parametric Factor Analysis via Vintage Sparse PCA
#'
#' This code implements TODO.
#'
#' @param x Either a graph adjacency matrix, [igraph::igraph] or
#' [tidygraph::tbl_graph]. If `x` is a [matrix] or [Matrix::Matrix]
#' then `x[i, j]` should correspond to the edge going from node `i`
#' to node `j`.
#'
#' @param k The number of factors to calculate.
#'
#' @param center Should the adjacency matrix be row *and* column centered?
#' Defaults to `TRUE`.
#'
#' @param normalize Should the graph laplacian be used instead of the
#' raw adjacency matrix? Defaults to `TRUE`. If `center = TRUE`, `A` will
#' first be centered and then normalized.
#'
#' @param tau_row Row regularization term. Default is `NULL`, in which case
#' we use the row degree. Ignored when `normalize = FALSE`.
#'
#' @param tau_col Column regularization term. Default is `NULL`, in which case
#' we use the column degree. Ignored when `normalize = FALSE`.
#'
#' @param rownames TODO. = NUL
#'
#' @param colnames TODO. = NULL
#'
#' @param ... Ignored.
#'
#' @details Sparse SVDs use `RSpectra` for performance.
#'
#' @return An object of class `vsp`. TODO: Details
#'
#' @export
#'
#' @examples
#'
#' library(LRMF3)
#'
#' vsp(ml100k, rank = 5, scale = TRUE)
#' vsp(ml100k, rank = 5, rescale = FALSE)
#' vsp(ml100k, rank = 5)
#'
#'
vsp <- function(x, rank, ...) {
# ellipsis::check_dots_used()
UseMethod("vsp")
}
#' @rdname vsp
#' @export
vsp.default <- function(x, rank, ...) {
stop(glue("No `vsp` method for objects of class {class(x)}. "))
}
#' @importFrom invertiforms DoubleCenter RegularizedLaplacian
#' @importFrom invertiforms transform inverse_transform
#' @rdname vsp
#' @export
vsp.matrix <- function(x, rank, ..., center = FALSE, recenter = FALSE,
scale = TRUE, rescale = scale,
tau_row = NULL, tau_col = NULL,
rownames = NULL, colnames = NULL) {
if (rank < 2)
stop("`rank` must be at least two.", call. = FALSE)
if (recenter && !center)
stop("`recenter` must be FALSE when `center` is FALSE.", call. = FALSE)
if (rescale && !scale)
stop("`rescale` must be FALSE when `scale` is FALSE.", call. = FALSE)
n <- nrow(x)
d <- ncol(x)
transformers <- list()
if (center) {
centerer <- DoubleCenter(x)
transformers <- append(transformers, centerer)
L <- transform(centerer, x)
} else{
L <- x
}
if (scale) {
scaler <- RegularizedLaplacian(L, tau_row = tau_row, tau_col = tau_col)
transformers <- append(transformers, scaler)
L <- transform(scaler, L)
}
# this includes a call to isSymmetric that we might be able to skip out on
s <- svds(L, k = rank, nu = rank, nv = rank)
R_U <- stats::varimax(s$u, normalize = FALSE)$rotmat
R_V <- stats::varimax(s$v, normalize = FALSE)$rotmat
Z <- sqrt(n) * s$u %*% R_U
Y <- sqrt(d) * s$v %*% R_V
B <- t(R_U) %*% Diagonal(n = rank, x = s$d) %*% R_V / (sqrt(n) * sqrt(d))
fa <- vsp_fa(
u = s$u, d = s$d, v = s$v,
Z = Z, B = B, Y = Y,
R_U = R_U, R_V = R_V,
transformers = transformers,
rownames = rownames, colnames = colnames
)
if (rescale) {
fa <- inverse_transform(scaler, fa)
}
if (recenter) {
fa <- inverse_transform(centerer, fa)
}
fa <- make_skew_positive(fa)
fa
}
#' Perform varimax rotation on a low rank matrix factorization
#'
#' @param x TODO
#'
#' @param rank TODO
#' @param ... TODO
#' @param centerer TODO
#' @param scaler TODO
#'
#' @export
#'
#' @examples
#'
#' library(fastadi)
#'
#' mf <- adaptive_impute(ml100k, rank = 20, max_iter = 5)
#' fa <- vsp(mf)
#'
vsp.svd_like <- function(x, rank, ...,
centerer = NULL, scaler = NULL,
recenter = FALSE, rescale = TRUE,
rownames = NULL, colnames = NULL) {
n <- nrow(x$u)
d <- nrow(x$v)
R_U <- stats::varimax(x$u, normalize = FALSE)$rotmat
R_V <- stats::varimax(x$v, normalize = FALSE)$rotmat
Z <- sqrt(n) * x$u %*% R_U
Y <- sqrt(d) * x$v %*% R_V
B <- t(R_U) %*% Diagonal(n = rank, x = x$d) %*% R_V / (sqrt(n) * sqrt(d))
fa <- vsp_fa(
u = x$u, d = x$d, v = x$v,
Z = Z, B = B, Y = Y,
R_U = R_U, R_V = R_V,
transformers = list(centerer, scaler),
rownames = rownames, colnames = colnames
)
if (!is.null(scaler) && rescale) {
fa <- inverse_transform(scaler, fa)
}
if (!is.null(centerer) && recenter) {
fa <- inverse_transform(centerer, fa)
}
fa <- make_skew_positive(fa)
fa
}
#' @rdname vsp
#' @export
vsp.Matrix <- vsp.matrix
#' @rdname vsp
#' @export
vsp.dgCMatrix <- vsp.matrix
#' @rdname vsp
#' @export
vsp.igraph <- function(x, rank, ..., attr = NULL) {
x <- igraph::get.adjacency(x, sparse = TRUE, attr = attr)
vsp.matrix(x, rank, ...)
}
|
021a8fa996a9d00da0f6defee0e6b850ca8c87f6 | 9fa27b387d87a5b69267c5d55c60ff55ee4a1fae | /scripts/make_spp_traitslist.R | 409c22b3648298f736ab36b3cbbf523854f20a94 | [] | no_license | mbelitz/insect-pheno-duration | 68c0176a8609c5bb064f7c2725fc64a291c38919 | 45ea84a8b3093cee79fd6c991e1368f4149a4f14 | refs/heads/master | 2020-12-26T23:45:19.824513 | 2020-09-16T19:20:35 | 2020-09-16T19:20:35 | 237,691,784 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,207 | r | make_spp_traitslist.R | library(dplyr)
## read in spp list from google drive
spp_list <- read.csv('data/traits/spp_list.csv', stringsAsFactors = FALSE) %>%
rename(scientificName = scientific_name)
## read in phenesse outputs
model_df <- read.csv(file = "data/model_dfs/duration_climate_population_data.csv",
stringsAsFactors = FALSE)
id_cells <- model_df %>%
group_by(lon, lat) %>%
summarise(count = n()) %>%
tibble::rownames_to_column() %>%
rename(id_cells = rowname)
model_df <- left_join(model_df, id_cells)
model_df2 <- model_df %>%
na.omit() %>%
mutate(temp = scale(temp),
prec = scale(prec),
pop = scale(log10(pop)),
prec_seas = scale(bio15),
temp_seas = scale(bio4))
datadens <- model_df2 %>%
group_by(scientificName, Order) %>%
summarise(count = n())
has_10_cells <- filter(datadens, count >= 10) %>%
filter (scientificName != "Apis mellifera") # 145
model_df2 <- filter(model_df2, scientificName %in% has_10_cells$scientificName)
### combine model_df2 w/ traits
model_df3 <- filter(spp_list, scientificName %in% has_10_cells$scientificName)
# save csv
write.csv(model_df3, "data/traits/spp_traits.csv", row.names = FALSE)
|
3666789b66c94afc46f089029a911e32b4f66377 | bbf82f06ad6eb63970964a723bc642d2bcc69f50 | /inst/doc/BiSEp.R | 43dcb4a504a4d89059a327940ba3c90a811e0ff4 | [] | no_license | cran/BiSEp | 60af18b5fefd060e9b77df54ae4d5b169d1f8987 | fc7bae903d881648e84d7b31d20c96b99b529f7d | refs/heads/master | 2021-01-09T22:39:07.127740 | 2017-01-26T11:03:06 | 2017-01-26T11:03:06 | 17,678,095 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,453 | r | BiSEp.R | ### R code from vignette source 'BiSEp.Snw'
###################################################
### code chunk number 1: BiSEp.Snw:18-23
###################################################
require(BiSEp)
data(INPUT_data)
INPUT_data[1:2,1:6]
###################################################
### code chunk number 2: packages
###################################################
BISEP_data <- BISEP(INPUT_data)
biIndex <- BISEP_data$BI
bisepIndex <- BISEP_data$BISEP
###################################################
### code chunk number 3: BiSEp.Snw:40-43
###################################################
biIndex[1:10,]
bisepIndex[1:10,]
###################################################
### code chunk number 4: fig1
###################################################
plot(density(INPUT_data["TUSC3",]), main="TUSC3 Density Distribution")
###################################################
### code chunk number 5: fig2
###################################################
plot(density(INPUT_data["MLH1",]), main="MLH1 Density Distribution")
###################################################
### code chunk number 6: packages
###################################################
plot(density(INPUT_data["MLH1",]), main="MLH1 Density Distribution")
###################################################
### code chunk number 7: BiSEp.Snw:72-73
###################################################
BIGEE_out <- BIGEE(BISEP_data, sampleType="cell_line")
###################################################
### code chunk number 8: BiSEp.Snw:78-79
###################################################
BIGEE_out[1:4,]
###################################################
### code chunk number 9: fig3
###################################################
expressionPlot(BISEP_data, gene1="SMARCA4", gene2="SMARCA1")
###################################################
### code chunk number 10: fig4
###################################################
expressionPlot(BISEP_data, gene1="MTAP", gene2="MLH1")
###################################################
### code chunk number 11: BiSEp.Snw:101-103
###################################################
data(MUT_data)
MUT_data[1:4,1:10]
###################################################
### code chunk number 12: BiSEp.Snw:106-107
###################################################
BEEMout <- BEEM(BISEP_data, mutData=MUT_data, sampleType="cell_line", minMut=40)
###################################################
### code chunk number 13: BiSEp.Snw:112-113
###################################################
BEEMout
###################################################
### code chunk number 14: fig5
###################################################
waterfallPlot(BISEP_data, MUT_data, expressionGene="MICB",
mutationGene="PBRM1")
###################################################
### code chunk number 15: fig6
###################################################
waterfallPlot(BISEP_data, MUT_data, expressionGene="BOK",
mutationGene="BRCA2")
###################################################
### code chunk number 16: packages
###################################################
fOut <- FURE(BIGEE_out[1,], inputType="BIGEE")
frPairs <- fOut$funcRedundantPairs
allPairs <- fOut$allPairs
###################################################
### code chunk number 17: BiSEp.Snw:145-146
###################################################
allPairs[1,]
|
6d13bd6e77987d2ef10d490c9218a1dacf55fe60 | 72ad4953ea2c100a03a9ddd364857988a9d1b2de | /R/ini.R | 5de1f08f76004838ce2418f0bc30de80c80d952b | [
"MIT"
] | permissive | bnicenboim/eeguana | 28b46a8088f9ca0a370d955987b688542690547a | 3b475ac0472e6bedf2659a3f102abb9983e70b93 | refs/heads/master | 2023-05-23T09:35:20.767216 | 2022-10-10T11:53:21 | 2022-10-10T11:53:21 | 153,299,577 | 22 | 9 | NOASSERTION | 2022-11-06T13:40:13 | 2018-10-16T14:26:07 | R | UTF-8 | R | false | false | 3,670 | r | ini.R | ## Taken from https://github.com/dvdscripter/ini
#' Read and parse .ini file to list
#'
#' @param filepath file to parse
#' @param encoding Encoding of filepath parameter, will default to system
#' encoding if not specifield
#'
#' @details Lines starting with '#' or ';' are comments and will not be parsed
#'
#' @seealso \code{\link{write.ini}}
#'
#' @return List with length equivalent to number of [sections], each section is
#' a new list
#'
#' @examples
#' ## Create a new temp ini for reading
#' iniFile <- tempfile(fileext = '.ini')
#'
#' sink(iniFile)
#' cat("; This line is a comment\n")
#' cat("# This one too!\n")
#' cat("[ Hello World]\n")
#' cat("Foo = Bar \n")
#' cat("Foo1 = Bar=345 \n")
#' sink()
#'
#' ## Read ini
#' checkini <- read.ini(iniFile)
#'
#' ## Check structure
#' checkini
#' checkini$`Hello World`$Foo
#'
#' @noRd
read.ini <- function(filepath, encoding = getOption("encoding")) {
index <- function(x, rune) {
equalPosition = numeric(1)
for(pos in 1:nchar(x)) {
if (strsplit(x, '')[[1]][pos] == rune) {
equalPosition = pos
break
}
}
return(equalPosition)
}
# internal helper function to find where a character occur
sectionREGEXP <- '^\\s*\\[\\s*(.+?)\\s*]'
# match section and capture section name
keyValueREGEXP <- '^\\s*[^=]+=.+'
# match "key = value" pattern
ignoreREGEXP <- '^\\s*[;#]'
# match lines with ; or # at start
trim <- function(x) sub('^\\s*(.*?)\\s*$', '\\1', x)
# amazing lack of trim at old versions of R
ini <- list()
con <- file(filepath, open = 'r', encoding = encoding)
on.exit(close(con))
while ( TRUE ) {
line <- readLines(con, n = 1, encoding = encoding, warn = F)
if ( length(line) == 0 ) {
break
}
if ( grepl(ignoreREGEXP, line) ) {
next
}
if ( grepl(sectionREGEXP, line) ) {
matches <- regexec(sectionREGEXP, line)
lastSection <- regmatches(line, matches)[[1]][2]
}
if ( grepl(keyValueREGEXP, line) ) {
key <- trim(paste0(strsplit(line, '')[[1]][1:(index(line, '=') - 1)], collapse = ''))
value <- trim(paste0(strsplit(line, '')[[1]][(index(line, '=') + 1):nchar(line)], collapse = ''))
ini[[ lastSection ]] <- c(ini[[ lastSection ]], list(key = value))
names(ini[[ lastSection ]])[ match('key', names(ini[[ lastSection ]])) ] <- key
}
}
ini
}
#' Write list to .ini file
#'
#' @param x List with structure to be write at .ini file.
#'
#' @param filepath file to write
#' @param encoding Encoding of filepath parameter, will default to system
#' encoding if not specifield
#'
#' @seealso \code{\link{read.ini}}
#'
#' @examples
#' ## Create a new temp ini for writing
#' iniFile <- tempfile(fileext = '.ini')
#'
#' ## Create a new list holding our INI
#' newini <- list()
#' newini[[ "Hello World" ]] <- list(Foo = 'Bar')
#'
#' ## Write structure to file
#' write.ini(newini, iniFile)
#'
#' ## Check file content
#' \dontrun{
#' file.show(iniFile)
#' }
#'
#' @noRd
write.ini <- function(x, filepath, encoding = getOption("encoding")) {
con <- file(filepath, open = 'w', encoding = encoding)
on.exit(close(con))
if(!is.null(attributes(x)$title)) writeLines( attributes(x)$title, con)
for(section in names(x) ) {
writeLines( paste0('[', section, ']'), con)
for (key in x[ section ]) {
if(!is.null(attributes(x[[section]])$comments)) {
lapply(attributes(x[[section]])$comments, function(com)
writeLines(paste0("; ", com), con))
}
if(length(key)!=0)writeLines( paste0(names(key), '=', key), con)
}
writeLines("", con)
}
}
|
24dd2267d6f9e77c25b69cbe8f2d95b91b1ea1de | 86a97a073394a944a9634d54884fa748478ce977 | /man/bvar.summary.Rd | c51674c31e005caaeb5e7cb0349bc221cef14c78 | [] | no_license | lnsongxf/bvarrKK | 617e1228c9dcd711fcf2215a67555ec777249125 | 7faed7f4b5a523ac406c46bffeec9bc3fb7c3ce1 | refs/heads/master | 2021-03-12T18:16:38.347155 | 2016-08-16T06:36:52 | 2016-08-16T06:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 450 | rd | bvar.summary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bvar.R
\name{bvar.summary}
\alias{bvar.summary}
\title{Print summary for bayesian VAR model}
\usage{
bvar.summary(model)
}
\arguments{
\item{model}{the list containing all results of bayesian VAR estimation}
}
\description{
Print summary for bayesian VAR model
}
\examples{
data(Yraw)
model <- bvar(Yraw,prior = 'independent',nsave=1000, nburn=100)
bvar.summary(model)
}
|
dea2d9d38ff9a3360085f876280a808590521acd | 92cf9455c7a46a4a35d747bf7b42124a1d2054ee | /archive/simple_geweke.r | 591b1b0f58b982c41282adf4484274238ff21eb0 | [] | no_license | JavierQC/spatcontrol | 551cb820f397dfcb0461d9fbf7306aeb32adf90a | a3e77845d355b2dee396623b976e114b9a89c96e | refs/heads/master | 2020-12-31T06:32:10.711253 | 2015-02-24T13:09:18 | 2015-02-24T13:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27 | r | simple_geweke.r | library(coda)
smallsampled
|
39709fdc046db2d4ab492f2531be22471b5c777d | cdbe5a8e18b83a051ccf3cdb790410babeb01d42 | /man/read.swat.data.rch.2012.Rd | 2219078eb407d1671cb122577f999daf01227eb5 | [
"MIT"
] | permissive | neumannd/riverdata | df587db1330c1cf1688332b68b67039718a2927f | fb5f1c33e054afcf5d0c7cda7e86209a25471ea7 | refs/heads/master | 2022-04-06T13:46:04.069104 | 2020-02-26T06:43:51 | 2020-02-26T06:44:02 | 109,431,479 | 1 | 0 | null | 2017-11-10T13:42:16 | 2017-11-03T18:48:12 | R | UTF-8 | R | false | true | 1,793 | rd | read.swat.data.rch.2012.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.swat.data.rch.2012.R
\name{read.swat.data.rch.2012}
\alias{read.swat.data.rch.2012}
\title{Read data from a reach (RCH) SWAT output file}
\usage{
read.swat.data.rch.2012(fileHandle, nrow.data, header = TRUE, nreach = 1,
ireach = 1)
}
\arguments{
\item{fileHandle}{open connection to a file; the pointer in the file has to
point to the header row if 'header=TRUE'; if
'header=FALSE' it has to point to the first data row}
\item{nrow.data}{number of data rows to read}
\item{header}{logical: consider the first row as header row (or not) [default = TRUE]}
\item{nreach}{integer: number of reaches in the file}
\item{ireach}{integer: index of the reach's data to extract}
}
\value{
list containing a data.frame (out$data), a character array
(out$units), and two character variables (out$format and
out$tstep). The first contains the actual data formatted as a
data.frame. The second contains the units to the corresponding
columns of the data.frame. The third contains the source/format
of data (here: 'swat'; can also be 'mom'). The fourth contains
information on the time step of the data (resp.: on which time
interval they are averaged).
}
\description{
This function reads data from an already opened SWAT reach (*.rch) file of
the SWAT 2012 format.
}
\examples{
# open file
fileHandle <- file('output.rch', open="rt")
# skip first 8 lines of meta data
tmp <- readLines(fileHandle, n = 8)
# read 100 lines of data
data.out <- read.swat.data.2012(fileHandle, 100)
# close file
close(fileHandle)
}
\seealso{
read.swat, read.river.mom
}
\author{
Daniel Neumann, daniel.neumann@io-warnemuende.de
}
|
fe172a7d24271bf06da57b5c04085631febd770d | e1b973c582a68cb308e46b445381b457607e0791 | /R/coursera/best.R | 81818f65614323fdd3c1bc1f0641688d29013063 | [] | no_license | mishagam/progs | d21cb0b1a9523b6083ff77aca69e6a8beb6a3cff | 3300640d779fa14aae15a672b803381c23285582 | refs/heads/master | 2021-08-30T15:51:05.890530 | 2017-12-18T13:54:43 | 2017-12-18T13:54:43 | 114,647,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 904 | r | best.R | # best.R - Programming assignment 3
best <- function(state, outcome) {
## Read outcome data
out <- read.csv("outcome-of-care-measures.csv", colClasses="character")
states <- o[,7]
## Check state and outcome are valid
if (!(state %in% states)) {
stop("invalid state")
}
stateOutcomes = out[state==states,]
if (outcome == "heart attack") {
col=11
} else if(outcome == "heart failure") {
col=17
} else if (outcome == "pneumonia") {
col = 23
} else {
stop("invalid outcome")
}
hospRate <- cbind(stateOutcomes["Hospital.Name"],
as.numeric(stateOutcomes[,col]))
orderedHosp <- hospRate[order(hospRate[,2], hospRate[,1]), ]
## Return hospital name in that state with lowest 30-day death
bestHosp <- orderedHosp[1,1]
## rate
bestHosp
}
|
9c1ac8e6b00b5235c0c03a4fafd52bfddab1e573 | 3d2e8759e6f4b1f422f88b260466bf8a7105035b | /R/list-object-sizes.R | 0bcd82ac5dc5906b7702158c4fb596e27cf425ef | [] | no_license | csgillespie/rprofile | d939831388e7ffc44a947ac71483a04ce54df51c | 1c5e3df58b22e80ae533a70b13fe2e63c5c3194e | refs/heads/main | 2023-08-09T13:18:20.387626 | 2023-07-19T10:49:43 | 2023-07-19T10:49:43 | 200,486,076 | 50 | 9 | null | 2022-12-03T18:28:23 | 2019-08-04T11:48:10 | R | UTF-8 | R | false | false | 1,150 | r | list-object-sizes.R | # SO: http://stackoverflow.com/q/1358003/203420
# improved list of objects
lsos = function(order.by = c("PrettySize", "Type", "Size", "Rows", "Columns"),
pos = 1) {
napply = function(names, fn) sapply(names, function(x) fn(get(x, pos = pos)))
names = ls(pos = pos)
obj.class = napply(names, function(x) as.character(class(x))[1])
obj.mode = napply(names, mode)
obj.type = ifelse(is.na(obj.class), obj.mode, obj.class)
obj.prettysize = napply(names, function(x) {
utils::capture.output(print(utils::object.size(x), units = "auto"))
})
obj.size = napply(names, utils::object.size)
obj.dim = t(napply(names, function(x) as.numeric(dim(x))[1:2]))
if (length(names) > 0) {
vec = is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] = napply(names, length)[vec]
out = data.frame(obj.type, obj.size, obj.prettysize, obj.dim)
} else {
out = tibble::tibble("a", "b", "c", "d", "e")
out = out[FALSE, ]
}
names(out) = c("Type", "Size", "PrettySize", "Rows", "Columns")
order.by = match.arg(order.by)
out = out[order(out[[order.by]], decreasing = TRUE), ]
tibble::tibble(out)
}
|
5fb49c84ac819dca962e497be10c83d20c920bfc | a0b7f55210bd999d6a83195809fa442ac69c972a | /R/package_needs_update.R | d9095f3dbfa77ed94cdfab565dafc25c687f1e8e | [] | no_license | muschellij2/ghtravis | 1e17ebab863dde4aa745511587578ec5581e9aa2 | 0b26809694028878615d172235006a5c0a9c0ada | refs/heads/master | 2023-01-31T01:54:14.889239 | 2021-03-31T17:55:43 | 2021-03-31T17:55:43 | 94,371,912 | 0 | 5 | null | 2023-01-18T20:40:03 | 2017-06-14T20:48:27 | R | UTF-8 | R | false | false | 776 | r | package_needs_update.R | #' @title Check Package Versions
#' @description Looks in installed packages to see if a new version is needed
#'
#' @param version Version of package
#' @param package Package to check
#' @param ... not used
#' @return Output from \code{\link{compareVersion}}
#' @export
#'
#' @importFrom utils compareVersion
package_needs_update = function(
version,
package) {
ip = installed.packages()
if (!(package %in% ip)) {
return(TRUE)
}
cur_ver = ip[ package, "Version"]
v = c("version" = version,
"installed" = cur_ver)
v = make_full_version(v)
utils::compareVersion(
v["version"],
v["installed"]) > 0
}
#' @rdname package_needs_update
#' @export
set_update_var = function(...) {
res = package_needs_update(...)
return(as.numeric(res))
}
|
b48d2c7d62d298ab3b0cee979191c127900f6e7e | 529a04d816b084f77362a9f573bf3a56461c9027 | /R/bayespetr-package.R | 8f3e2de05d337e8d8ff8d40ea65ba11045885a59 | [
"MIT"
] | permissive | dt448/bayespetr | fa78bf98088293d5d367d0d6167a4504a5eda4f0 | 0920ed39ad84f9853924bf653cc99900b6561a64 | refs/heads/main | 2023-09-03T13:52:51.496060 | 2021-11-03T10:53:40 | 2021-11-03T10:53:40 | 402,852,720 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 434 | r | bayespetr-package.R | #' @keywords internal
"_PACKAGE"
# The following block is used by usethis to automatically manage
# roxygen namespace tags. Modify with care!
## usethis namespace: start
#' @useDynLib bayespetr, .registration = TRUE
#' @importFrom Rcpp sourceCpp
## usethis namespace: end
NULL
.onUnload <- function (libpath) {
library.dynam.unload("bayespetr", libpath)
}
.onLoad <- function(libname, pkgname){
# cat("Welcome to bayespetr")
}
|
9a024011a21a87ec3dc23a91292adfe67d1182b4 | 17c5bb56fc79529e6679ff3e3ded1ea72bfba5b8 | /man/plotOperations.Rd | 4ad66754c48db90f2a9d4b2e2fa4fb9a1dba88df | [] | no_license | bmasch/RSmoltMon | b9b3e18ac22a2ff6eb643e5213514e22f7461f44 | 5576b89cf1d8f4ba71eb398863cfd8f2688bc12f | refs/heads/master | 2016-08-11T20:38:01.994189 | 2015-11-10T20:04:13 | 2015-11-10T20:04:13 | 45,934,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 374 | rd | plotOperations.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SMPPlottingFunctions.R
\name{plotOperations}
\alias{plotOperations}
\title{Plot Operational Dates}
\usage{
plotOperations(ops, size = 5)
}
\arguments{
\item{ops}{}
\item{size}{}
}
\description{
Plot Operational Dates
}
\examples{
ops <- read.csv("operations.csv")
plotOperations(ops)
}
|
4527329f967c5fc278f5a116e8156221929223a9 | 0a021f843670c168c4a212207c13be1b0a88ddbe | /man/isColor.Rd | e0d015939e7e33a109dcbdc61a7fbbc3995b7367 | [] | no_license | cran/plotfunctions | ddc4dd741ad2a43d81deb0ef13fe2d7b37ca84bd | ebacdd83686e1a32a4432a35f244bf82015a19a5 | refs/heads/master | 2021-01-20T18:53:05.464513 | 2020-04-28T09:00:02 | 2020-04-28T09:00:02 | 59,847,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,794 | rd | isColor.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{isColor}
\alias{isColor}
\title{Check whether color specifications exists.}
\usage{
isColor(x, return.colors = FALSE)
}
\arguments{
\item{x}{Vector of any of the three kinds of R color specifications,
i.e., either a color name (as listed by
\code{\link[grDevices]{palette}colors()}), a hexadecimal string of the form
'#rrggbb' or '#rrggbbaa' (see rgb), or a positive integer i meaning
\code{\link[grDevices]{palette}()[i]}.}
\item{return.colors}{Logical: logical values (FALSE, default) or
returning colors (TRUE)}
}
\value{
Logical value (or colors)
}
\description{
Function to check whether all specified colors are
actual colors.
}
\examples{
# correct color definitions:
isColor(c('#FF0000FF', '#00FF00FF', '#0000FFFF'))
isColor(c('red', 'steelblue', 'green3'))
isColor(c(1,7,28))
# mixtures are possible too:
isColor(c('#FF0000FF', 'red', 1, '#FF0000', rgb(.1,0,0)))
# return colors:
# note that 28 is converted to 4...
isColor(c(1,7,28), return.colors=TRUE)
isColor(c('#FF0000CC', 'red', 1, '#FF0000'), return.colors=TRUE)
# 4 incorrect colors, 1 correct:
test <- c('#FH0000', 3, '#FF00991', 'lavendel', '#AABBCCFFF')
isColor(test)
isColor(test, return.colors=TRUE)
}
\seealso{
Other Utility functions:
\code{\link{findAbsMin}()},
\code{\link{find_n_neighbors}()},
\code{\link{firstLetterCap}()},
\code{\link{getArrowPos}()},
\code{\link{getDec}()},
\code{\link{getRange}()},
\code{\link{getRatioCoords}()},
\code{\link{get_palette}()},
\code{\link{group_sort}()},
\code{\link{inch2coords}()},
\code{\link{list2str}()},
\code{\link{move_n_point}()},
\code{\link{orderBoxplot}()},
\code{\link{se}()},
\code{\link{sortGroups}()}
}
\author{
Jacolien van Rij
}
\concept{Utility functions}
|
412a76702bdc8f3d9ac01b506af28d463e20983a | c694cb3ac80edf2a5d9359d820393214bd9edfda | /R/bioinf/fst_density_plots.R | f21822f762c66c8705cf853c71d55b3e252bfbf8 | [
"MIT"
] | permissive | maehrlich1/Funhe_Gen | 6a89dcc9e24bbe484a8ee02ca94f76b9a2340ebb | cb9e58c1d5cb4a2ef2b44b1de0cf92b5fe46463f | refs/heads/master | 2021-06-25T02:26:18.176266 | 2020-11-02T23:50:17 | 2020-11-02T23:50:17 | 129,307,750 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,593 | r | fst_density_plots.R | ###Simple Fst distribution plots###
library(dplyr)
library(qvalue)
library(ggplot2)
library(RColorBrewer)
setwd("/Users/Moritz/Documents/Academic/RSMAS/PhD/SF16_GBS/Plots/")
#cols <- brewer.pal(8,"Paired")
#pops <- c(rep("Basin Fall",26), rep("Basin Spring",25), rep("Pond1 Fall",27), rep("Pond1 Spring",19), rep("Pond2 Fall",30), rep("Pond2 Spring",19), rep("Pond3 Fall",24), rep("Pond3 Spring",23))
#bp_pops <- c(rep("Basin",51), rep("Pond",142))
#sf_pops <- c(rep("Fall",26), rep("Spring",25), rep("Fall",27), rep("Spring",19), rep("Fall",30), rep("Spring",19), rep("Fall",24), rep("Spring",23))
#myPng <- function(..., width=6, height=6, res=300, ps=12) {png(..., width=width*res, height=height*res, res=res, pointsize=ps)}
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
cols=gg_color_hue(4)
#create vector of fst sets
comp <- c("BaSUvsBaFT","P1SUvsP1FT","P2SUvsP2FT","P3SUvsP3FT")
#create list to store data and for outliers
fst <-list()
#Read in SNP position data because the bash script drops the chrom field for some reason
#chrom <- read.delim(pipe("cut -f 1 /Users/Moritz/Fuse_vols/SCRATCH/SF16_GBS/Results/re_Q30_DP5_bial_hwe_CR0.93.pos"), header = T)
#Read in the pvalues from the permutation analysis
for(i in comp) {
fst[[i]] <- read.delim(paste("~/Fuse_vols/SCRATCH/SF16_GBS/Results/", i, "_maf5p.weir.fst", sep=""), header=T)
#stat[[i]] <- mutate(stat[[i]], ID=paste(Chrom,Position,sep="_"))
#stat[[i]] <- mutate(stat[[i]], q_val=qvalue(stat[[i]]$p_val)$qvalues)
#outliers[[i]] <- filter(stat[[i]], p_val<=0.001)
}
#Convert the list to a dataframe with a new variable
fst_df <- bind_rows("Basin"=fst[[1]], "Pond1"=fst[[2]], "Pond2"=fst[[3]], "Pond3"=fst[[4]], .id="Habitat")
fst_df$Habitat <- as.factor(fst_df$Habitat)
#make density graphs
png("sf_fst_histogram_zoom_basin.png", width = 2400, height = 1600, res = 300)
ggplot(subset(fst_df, WEIR_AND_COCKERHAM_FST>0.25 & Habitat=="Basin"))+
#geom_density(aes(x=WEIR_AND_COCKERHAM_FST, col=Habitat), alpha=0.8)+
geom_histogram(aes(x=WEIR_AND_COCKERHAM_FST), fill=cols[1], bins=50, alpha=0.9)+
#geom_point(data=subset(sf_stat, p_val<=0.001), aes(x=Fst, y=-log10(p_val)), col=twoggcols[2], alpha=0.8)+
geom_vline(xintercept = 0, lty="twodash", alpha=0.8)+
#facet_wrap(~Habitat)+
labs(y="Count", x=expression(italic(F[ST])))+
coord_cartesian(x=c(0.25,0.5), y=c(0,20))+
theme_bw()+
scale_fill_discrete(guide=F)+
theme(legend.position = c(.8,.8), text = element_text(size=24), axis.title.x = element_blank())
dev.off()
|
388930cdf053c99b5631bdebfd43a2d787707c02 | f293351ff518e8b463deb86ac53550a7f08258b1 | /MEPS/R/get_puf_names.R | b4deddbec3b58ead05d679d6b6ca274f216c4e8a | [] | no_license | TaraFararooy/meps_r_pkg | 4b439ec9dac5da8e727dfd28317c6c50fee3ccb4 | bd877689288e110b1a7946e88e0672b1b6229a73 | refs/heads/master | 2023-04-06T21:21:27.561179 | 2021-01-15T21:00:38 | 2021-01-15T21:00:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,612 | r | get_puf_names.R | #' Get MEPS Public Use File Names
#'
#' This is a lookup function that returns a single requested file name or list of names for specified MEPS data file. Internet access is required, since the function reads from the HHS-AHRQ GitHub page.
#' @param year (optional) Data year, between 1996 and most current PUF release. If omitted, files from all years will be returned
#' @param type (optional) File type of desired MEPS file. Options are 'PIT' (Point-in-time file), 'FYC' (Full-year consolidated), 'Conditions' (Conditions file), 'Jobs' (Jobs file), 'PRPL' (Person-Round-Plan), 'PMED' (Prescription Medicines Events), 'DV' (Dental Visits), 'OM' (Other medical events), 'IP' (Inpatient Stays), 'ER' (Emergency Room Visits), 'OP' (Outpatient Visits), 'OB' (Office-based visits), 'HH' (Home health), 'CLNK' (conditions-event link file), 'RXLK' (PMED - events link file), and 'PMED.Multum' (Multum Lexicon addendum files for 1996-2013)
#' @param web if TRUE, returns names of .zip files from web, otherwise, returns names of .ssp files after download
#' @export
#' @examples
#' ## Get file name for full-year-consolidated (FYC) file from 2005
#' get_puf_names(2005,'FYC')
#'
#' ## Get file names for all PUFs in 2014
#' get_puf_names(2014)
#'
#' ## Get file names for PMED event files, all years
#' get_puf_names(type='PMED')
#'
#' ## Return all files, all years
#' get_puf_names()
#'
#' ## Compare names of .ssp files with those on website links
#' get_puf_names(year = 1996, type = 'DV')
#' get_puf_names(year = 1996, type = 'DV', web=F)
get_puf_names <- function(year, type, web = T) {
# Load latest PUF names from GitHub ---------------------------------------
meps_file = "https://raw.githubusercontent.com/HHS-AHRQ/MEPS/master/Quick_Reference_Guides/meps_file_names.csv"
puf_names_current <- read.csv(meps_file, stringsAsFactors = F)
puf_names <- puf_names_current %>%
mutate(Year = suppressWarnings(as.numeric(Year))) %>%
filter(!is.na(Year))
# Expand event file names -------------------------------------------------
meps_names <- puf_names %>%
dplyr::rename(PMED = PMED.Events) %>%
mutate(RX = PMED)
# Allow 'MV' and 'OB' for office-based medical visits
# Allow 'IP' and 'HS' for inpatient hospital stays
event_letters <- list(DV="b",OM="c",IP="d",HS="d",ER="e",OP="f",OB="g",MV="g",HH="h")
for(evnt in names(event_letters)){
letter = event_letters[[evnt]]
value = meps_names$Events %>% gsub("\\*",letter,.)
meps_names[,evnt] = value
}
meps_names <- meps_names %>% select(-Events)
cols <- meps_names %>% select(-Year, -ends_with("Panel")) %>% colnames
# Force colnames to be uppercase (to match toupper(type))
colnames(meps_names) <- toupper(colnames(meps_names))
# Check for data input errors ---------------------------------------------
if (!missing(type)) {
# Force type to be uppercase to match colnames
type = toupper(type)
# If type = PRP, re-name to PRPL
if (type == "PRP") {
type <- "PRPL"
warning("Getting 'PRPL' file")
}
if (!type %in% colnames(meps_names)) {
stop(sprintf("Type must be one of the following: %s", paste(cols, collapse = ", ")))
}
}
if (!missing(year)) {
if (!year %in% meps_names$YEAR)
stop(sprintf("Year must be between %s and %s", min(meps_names$YEAR), max(meps_names$YEAR)))
}
# Return MEPS names based on specified, year, type ------------------------
if (missing(year) & missing(type)) {
out <- meps_names
} else if (missing(year) & !missing(type)) {
out <- meps_names %>% select(YEAR, all_of(type))
} else if (missing(type) & !missing(year)) {
out <- meps_names %>% filter(YEAR == year) %>% select(-ends_with("Panel"))
} else {
out <- meps_names %>% filter(YEAR == year) %>% select(all_of(type))
}
if (web)
return(out)
# Convert from download names (in meps_names) to .ssp file names ------------
meps_mat <- as.matrix(out)
hc_list <- c(
"h10a", "h10if1", "h10if2", "h26bf1", "h19",
sprintf("h16%sf1", letters[2:8]), sprintf("h10%sf1", letters[2:8]))
meps_mat[meps_mat %in% hc_list] <- sub("h", "hc", meps_mat[meps_mat %in% hc_list])
meps_mat[meps_mat == "h05"] = "hc005xf"
meps_mat[meps_mat == "h06r"] = "hc006r"
meps_mat[meps_mat == "h07"] = "hc007"
meps_mat[meps_mat == "h09"] = "hc009xf"
meps_mat[meps_mat == "h13"] = "hc013xf"
out <- as.data.frame(meps_mat, stringsAsFactors = F)
return(out)
}
|
a4c0454111bf65d4229075869de8245d34b3692d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/afc/examples/afc.cc.Rd.R | 6ebf09dc3a9a5bda804c54efe2cf855ade1f9936 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 402 | r | afc.cc.Rd.R | library(afc)
### Name: afc.cc
### Title: 2AFC For Continuous Observations And Continuous Forecasts
### Aliases: afc.cc
### Keywords: file
### ** Examples
#Forecasts and observations of Nino-3.4 index
#Load set of continuous observations and continuous forecasts
data(cnrm.nino34.cc)
obsv = cnrm.nino34.cc$obsv
fcst = cnrm.nino34.cc$fcst
#Calculate skill score
afc.cc(obsv,fcst)
|
0f808e2c27128765ae66da637748976f4dd5de5c | 9b76f92dfecfc84e2a43da24b9e4aa678a2de356 | /bootcamp/088IntroToLinearRegression.R | 71416e0ccfe60fe8c2554100a0b7ac5dbdaf5f2e | [] | no_license | rathanDev/r | fa9d82582a83271b1f771c3bc9dd4348b0b28f73 | 2c4871f13de7cde82df7e0e63a253fa4a575a23b | refs/heads/master | 2022-12-13T17:56:46.669651 | 2020-09-10T12:32:49 | 2020-09-10T12:32:49 | 264,051,092 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 319 | r | 088IntroToLinearRegression.R |
# Linear Regression
# - Francis Galton
# Investigated the relationship between the heights of the fathers and their sons
# He discovered man's son tended to be roughly as tall as his father
# There are lot of different ways to minimize the distance in a regression
# sum of squared errors / sum of absolute errors
|
fd35dd3fa30056f11379f190a49691690a8788f1 | bba18259bad7b2246ee2a6a4f9c95a2dc990daee | /RHadoop/rmr_word_count.R | 8d94bb2e1c199698bd3f1e43a9c12f4daf816a28 | [] | no_license | yenzichun/Etu_InternReport | 12c7f2a8d05e9349480fba38d907850330a1085b | d4fea730d2b72b83a50c2b0c3b7b23124084db69 | refs/heads/master | 2021-01-16T00:28:04.203515 | 2015-04-27T10:24:34 | 2015-04-27T10:24:34 | 20,614,382 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,421 | r | rmr_word_count.R | #my1mapper
#R-like
small.ints = 1:10
sapply(small.ints, function(x) x^2)
#mr_equivalent
small.ints = to.dfs(1:1000)
mapreduce(
input = small.ints,
map = function(k,v) cbind(v, v*2)
)
from.dfs("/tmp/RtmpxvmpVk/filec31559c11d5")
from.dfs(small.ints)
#my1reducer
#R-like
groups = rbinom(32, n = 50, prob = 0.4)
tapply(groups, groups, length)
#mr_equivalent
groups = to.dfs(groups)
from.dfs(
mapreduce(
input = groups,
map = function(., v) keyval(v, 1),
reduce = function(k, vv) keyval(k, length(vv))
)
)
#word_count
library(rmr2)
wordcount = function(){
wc.map = function(k,v) {
keyval(k,1)
}
wc.reduce = function(word, counts){
keyval(word, sum(counts))
}
mapreduce(
input = "word_count_data",
output = "word_count_result",
input.format = make.input.format("text", sep = " "),
output.format = "text",
map = wc.map,
reduce = wc.reduce
)
}
wordcount()
#old version
wordcount =
function(
input,
output = NULL,
pattern = " "){
wc.map =
function(., lines) {
keyval(
unlist(
strsplit(
x = lines,
split = pattern)),
1)}
wc.reduce =
function(word, counts ) {
keyval(word, sum(counts))}
mapreduce(
input = groups,
output = output,
input.format = "text",
map = wc.map,
reduce = wc.reduce,
combine = T)}
|
8485be2948b3dbdb9a091429f1f0b05747bf6f0b | 6821e51a0e56f4f9ec0295ff7ac050f28755880e | /man/deploy_netlify.Rd | 4ddb1b944a575e9300f74ba8f3f2b50d59eee1a5 | [
"Apache-2.0"
] | permissive | WorldHealthOrganization/casecountapp | f4d4b30ebbf5084156c27968496efe26f8561fe2 | ab0f37bbfc2bd389aca30ba7e99ffc294065ebd8 | refs/heads/master | 2023-02-17T07:23:45.477542 | 2021-01-20T07:19:34 | 2021-01-20T07:19:34 | 299,447,048 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | deploy_netlify.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deploy.R
\name{deploy_netlify}
\alias{deploy_netlify}
\title{Deploy an app to Netlify}
\usage{
deploy_netlify(app, netlify_app_id, require_app_token = FALSE)
}
\arguments{
\item{app}{An app object created with \code{\link[=register_app]{register_app()}}.}
\item{netlify_app_id}{Netlify app ID.}
\item{require_app_token}{Netlify app token.}
}
\description{
Deploy an app to Netlify
}
|
431c3bfd2dbd48dba8a93e44f7983ed1642c650a | 7b66b5cee177598fce6320f9f5115df6324f7c05 | /Plotting_script.R | 20b7de0b0a2de34ac51011050b52d96fe51e9c11 | [] | no_license | fuadar/Dataregression-scripts | b0f5860dab96b4d07b0912930bac2514a9a2dad0 | 46f6514e1601435314cc0807abcdd3007b597f3b | refs/heads/master | 2020-06-05T20:56:49.241264 | 2019-07-15T22:49:51 | 2019-07-15T22:49:51 | 192,544,179 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,935 | r | Plotting_script.R | # A few tips
# run getwd() to get the working directory # keep your datafile file here to read easily
# setwd("path") if you want to change the working directory - need to provide path
data_file <- read.csv("filename.csv") # read a csv file in R by writing the file name
# remember to keep this file in the working directory before running the above command.
# Drag the console screen to the left to leave enough area for plotting, else will get an error.
# use $ to access a variable from a datafile.
# data_file$variablename - This is how you access a variable from a data_file in R. This file should be visible in
# environment tab in the right panel.
# Histogram for one numerical variables
# This code will not run
hist(datasetname$variablename, main= "whatever you want to name your figure as. this comes on the top",
ylab="whatever you want to show written to the left of y axis")
# Example - This code will run
hist(mtcars$mpg, main="Car Milage Data", # mtcars is the name of the dataset, mpg is the name of the variable
ylab="Miles Per Gallon")
# If you want separate histograms for one numerical variable, broken up for various values of a categorical variable"
par(mfrow=c(1,3)) # here you tell R that I need 3 plots togther - this divides the plotting area into 1 row and 3 columns
hist(mtcars$mpg[mtcars$cyl==4],ylim =c(0,8)) # histogram for mpg for cars with 4 cylinders
hist(mtcars$mpg[mtcars$cyl==6],ylim =c(0,8)) # histogram for mpg for cars with 6 cylinders
hist(mtcars$mpg[mtcars$cyl==8],ylim =c(0,8)) # histogram for mpg for cars with 8 cylinders
# here ylim is the range in which you want the y values to be plotted in the histogram. These could be the min and max values.
##########################################################################################################
par(mfrow=c(1,1)) # here you tell R that I need only 1 plot in the figure
# Boxplot for one variable - This code will not run
boxplot(datasetname$variablename, main= "whatever you want to name your figure as. this comes on the top",
ylab="whatever you want to show written to the left of y axis")
#example - This code will run
boxplot(mtcars$mpg, main="Car Milage Data", # mtcars is the name of the dataset, mpg is the name of the variable
ylab="Miles Per Gallon")
# If you want separate boxplots for one numerical variable, broken up for various values of a categorical variable"
# This code will not run
boxplot(numerical_variable_name~categorica_variable_name,data=datasetname, main= "whatever you want to name your figure as. this comes on the top",
xlab = "whatever you want to show written below x axis",
ylab="whatever you want to show written to the left of y axis")
# Example - This code will run
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data", # This boxplot shows sperate distribution of mpg for cars with 4 cylinders, 6 cyliners, and 8 cylinders
xlab="Number of Cylinders", ylab="Miles Per Gallon")
#############################################################################################################
# Scatter plot (regression), with the line
# This code will not run
plot(x, y, main = "Main title", # x is the variable on x axis - this is the predictor variable, and y is the resposne variable.
xlab = "X axis title", ylab = "Y axis title")
# Adding a line - run this after having the plot command above
abline(lm(y~x), col = "red")
# Example - This code will run
plot(mtcars$wt,mtcars$mpg , main = "Weight vs MPG", # here mpg is the y variable, "wt" is the x variable, and mtcars in the dataset name
xlab = "Weight", ylab = "MPG")
abline(lm(mtcars$mpg~mtcars$wt), col ="red") # change color from red to blue if you like
################################################################################################################
|
5e3e511957a9154d53395e6fa33449f6d4e9a370 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/3599_0/rinput.R | 2b615ff5d61043759cc1db018a29357ae44a65dd | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("3599_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3599_0_unrooted.txt") |
7f034c3dbdc24381bb58cb31ba55a3c673915739 | d4e51947dc5bcc784378aad466f41ea09cf36558 | /app.r | 01c487cd3e4f5e52aafff21256b377c273ac5cc7 | [] | no_license | shuvashreeroy/r-Counter | ee99f354f11d7140ecf1efcc808c1c8f858eb63a | f3882e19e9b2abe7c695b1308617773f87c268e4 | refs/heads/main | 2023-08-12T00:02:01.087758 | 2021-10-07T17:19:51 | 2021-10-07T17:19:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,653 | r | app.r | library(shiny)
library(shinydashboard)
library(googlesheets4)
library(ggplot2)
library(graphics)
#reading CSV filea
real_data=read.csv("https://docs.google.com/spreadsheets/d/e/2PACX-1vQ2SVfqRbNwsIzSgd0FOwJ3Rc-Mvpbc7GWwuZI0_DKXIhd4E83vc1PetPZSnnlmPrgrHtAF3Y3hSTjr/pub?output=csv", sep = ",", header = TRUE)
ui <- fluidPage(
#background color change
tags$style('
.container-fluid {
background-color: #008BA6;
}
'),
# Application title
titlePanel("Reading-Display Google Sheet data"),
sidebarPanel(
sidebarMenu(
#input district name
selectizeInput(
"newvar", "Select District from DropDown menu:", choices = sort(unique(real_data$District.Name)), multiple = FALSE,options=list(placeholder="Birju") )
),
br(),
tags$a(href="https://www.youtube.com/c/ujjwalfx", img(src='UJ.png', align = "middle")),
br(),
h4(strong("Youtube Channel Name:"), style = "font-size:20px;"),
tags$a(href="https://www.youtube.com/c/ujjwalfx", "https://www.youtube.com/c/ujjwalfx"),
h5(strong(textOutput("counter")))
),
# Show a plot of the generated distribution
mainPanel(
textOutput('footfalltotal'),
br(),
plotOutput("plot1"),
br(),
plotOutput("plot2")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$plot1 <- renderPlot({
par(bg = "#008BA6")
plotdata=subset(real_data,real_data$District.Name==input$newvar)
barplot(sort(plotdata$Footfall.Total),main =paste("Total Footfall = ",sum(plotdata$Footfall.Total,na.rm=TRUE)),border="green", col="purple", names.arg = plotdata$Footfall.Total)
})
output$plot2 <- renderPlot({
par(bg = "#008BA6")
plotdata=subset(real_data,real_data$District.Name==input$newvar)
barplot(sort(plotdata$Patients.received.medicines),main =paste("Patient received medicines = ",sum(plotdata$Patients.received.medicines,na.rm=TRUE)),border="green", col="purple", names.arg = plotdata$Patients.received.medicines)
})
output$footfalltotal <- renderText({
plotdata=subset(real_data,real_data$District.Name==input$newvar)
sumtotal=paste("Total Footfall = ",sum(plotdata$Footfall.Total,na.rm=TRUE))
})
output$counter <-
renderText({
if (!file.exists("counter.Rdata"))
counter <- 0
else
load(file="counter.Rdata")
counter <- counter + 1
save(counter, file="counter.Rdata")
paste("Hits: ", counter)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
4a5c024bb514ec39cae16c578ec8d8e54e0cc684 | 13a0b230f57980fc1bd48d22e1bffe566e994044 | /main.R | 275b24fc965e2ac2af81f322ca9c34de2589cae0 | [] | no_license | tercen/scale_operator | a82fa090e3baedbf6bf76980369647c2b857df7f | aa2ff4278df49f29d22bd86695ae83e2d9fca4d5 | refs/heads/master | 2023-04-14T05:46:02.218906 | 2023-04-05T12:25:28 | 2023-04-05T12:25:28 | 111,098,455 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 816 | r | main.R | library(tercen)
library(dplyr)
library(data.table)
do.scale = function(y, ci, ...){
scale.m = try(scale(as.matrix(y), ...), silent = TRUE)
if(inherits(scale.m, 'try-error')) {
return (double())
}
result <- data.frame("value"=scale.m, ".ci"=ci)
return (result)
}
ctx = tercenCtx()
df <- ctx$select(c(".ci", ".ri", ".y"))
dt <- data.table(df)
dt[ , .(.y = mean(.y)), by = c(".ci",".ri")]
outDf <- dt[ , c("scaled_value", ".ci") :=
do.scale(.y, .ci,
ctx$op.value("scale", as.logical, TRUE),
ctx$op.value("center", as.logical, TRUE)),
by = c(".ri") ] %>%
select(-.y) %>%
as.data.frame() %>%
arrange(.ri, .ci) %>%
relocate(.ri) %>%
relocate(scaled_value) %>%
ctx$addNamespace() %>%
ctx$save()
|
3113536128305014095ff6d0afeaa14e1d3ea222 | 529a1b02bef95628524d45cf7a68079de6593860 | /man/vech.Rd | f7fba03ca527dbc5e3bd7fae025b56273b146c32 | [
"MIT"
] | permissive | nielsaka/zeitreihe | c56daec8df3f9b7fab0dae261b8a5ade145fa5de | 3d570fad85fb0309b2a60134ae989f769b8b9b12 | refs/heads/master | 2020-03-22T06:03:47.179892 | 2020-03-11T12:38:03 | 2020-03-11T12:38:03 | 139,609,227 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 540 | rd | vech.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aux_funs.R
\name{vech}
\alias{vech}
\title{Vectorise a symmetric matrix}
\usage{
vech(mat)
}
\arguments{
\item{mat}{An \code{(M x M)} matrix with arbitrary dimensions \code{M}.}
}
\value{
A column matrix of dimension \code{(M^2 + M)/2 x 1}.
}
\description{
Vectorise a symmetric matrix by stacking the elements on its lower
triangular matrix including the diagonal.
}
\examples{
mat <- matrix(1:8, 4, 4)
mat[upper.tri(mat)] <- t(mat)[upper.tri(mat)]
vech(mat)
}
|
b2d2827f043a61d71fd719224f34343bfb452879 | 85a673faffa19d90ae2492321488d00205f1f9a8 | /man/calculate_dilutions.Rd | 542ccd1978c0e230b326189579cd3479483d8b2d | [
"MIT"
] | permissive | taiawu/echor | 219c339e3b2cbd55b62be2964a171ac14273fa67 | 4276d9ccf5be5e06befae456a79ee9dfda7933e0 | refs/heads/master | 2023-05-09T01:59:42.092001 | 2021-05-12T06:12:17 | 2021-05-12T06:12:17 | 365,672,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 952 | rd | calculate_dilutions.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_dilutions.R
\name{calculate_dilutions}
\alias{calculate_dilutions}
\title{Calculate dilutions between mother and daughter plates}
\usage{
calculate_dilutions(daughter, mother, .echo_drop_nL = 25)
}
\arguments{
\item{daughter}{daughter plate layout, standardized for echo functions with standardize_layout()}
\item{mother}{mother plate layout, standardized for echo functions with standardize_layout()}
\item{.echo_drop_nL}{the droplet size of the Echo to be used, in nL. Defaults to 25.}
}
\value{
the input tibble, with additional columns describing the volume to be transferred from the mother, and the extent to which rounding the input concentration was necessary, given the droplet size of the echo used.
}
\description{
A helper funciton used inside of calculate_transfers() to calculate the volume of compound to transfer from mother to daughter wells.
}
|
977d5817107385fa93fec9836580ecff090d6487 | b636bb2db890b32140c16ea62265616eaf37efdf | /RMySQL.R | bd4c2c14915ac177181c51fa1dca5e41c3d569d0 | [] | no_license | chapmjs/DataCamp | bf27b69dd94cb76130fcbeded39a70810f4de1c2 | 8a4bfb7367bba2e65754c304fe73380710d75d9a | refs/heads/master | 2020-05-29T09:48:34.914127 | 2019-11-05T23:39:06 | 2019-11-05T23:39:06 | 189,078,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 297 | r | RMySQL.R | # practice file
library(RMySQL)
con <- dbConnect(RMySQL::MySQL(),
dbname = "tweater",
host = "courses.csrrinzqubik.us-east-1.rds.amazonaws.com",
port = 3306,
user = "student",
password = "datacamp")
print(con)
|
73272d32269c7d1e6d4864641ccd023ebc6ca2b8 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /hmlasso/R/plotter.R | 15df68e1a817ce9802fad88152b0be06c491d4f3 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,349 | r | plotter.R | #' Plot a solution path
#'
#' @param x hmlasso model
#' @param xlim x range
#' @param ylim y range
#' @param ... parameters of matlines function
#'
#' @examples
#' X_incompl <- as.matrix(iris[, 1:3])
#' X_incompl[1:5,1] <- NA
#' X_incompl[6:10,2] <- NA
#' y <- iris[, 4]
#' fit <- hmlasso(X_incompl, y, nlambda=50, lambda.min.ratio=1e-2)
#' plot(fit)
#'
#' @export
plot.hmlasso <- function(x, xlim=NULL, ylim=NULL, ...) {
if (all(is.na(x$beta) | is.infinite(x$beta))) {
warning("beta is all NA or infinite")
return()
}
if (missing(xlim) & missing(ylim)) {
plot(x=log(x$lambda), y=x$beta[1,],
xlim=range(log(x$lambda), na.rm=TRUE, finite=TRUE),
ylim=range(x$beta, na.rm=TRUE, finite=TRUE),
type="n",
xlab="log(lambda)", ylab="coefficients")
} else {
plot(x=log(x$lambda), y=x$beta[1,],
xlim=xlim, ylim=ylim,
type="n",
xlab="log(lambda)", ylab="coefficients")
}
matlines(log(x$lambda), t(x$beta), lty=1, ...)
}
#' Plot a cross validation error path
#'
#' @param x cross validated hmlasso model
#' @param xlim x range
#' @param ylim y range
#' @param ... parameters of
#'
#' @examples
#' X_incompl <- as.matrix(iris[, 1:3])
#' X_incompl[1:5,1] <- NA
#' X_incompl[6:10,2] <- NA
#' y <- iris[, 4]
#' cv_fit <- cv.hmlasso(X_incompl, y, nlambda=50, lambda.min.ratio=1e-2)
#' plot(cv_fit)
#' plot(cv_fit$fit)
#'
#' @export
plot.cv.hmlasso <- function(x, xlim=NULL, ylim=NULL, ...) {
if (all(is.na(x$cve) | is.infinite(x$cve))) {
warning("beta is all NA or infinite")
return()
}
if (missing(xlim) & missing(ylim)) {
plot(log(x$lambda), x$cve, type="p",
xlim=range(log(x$lambda), na.rm=TRUE, finite=TRUE),
ylim=range(c(x$cvlo, x$cvup), na.rm=TRUE, finite=TRUE),
col="red", pch=16,
xlab="log(lambda)", ylab="Cross Validation Error")
} else {
plot(log(x$lambda), x$cve, type="p",
xlim=xlim, ylim=ylim,
col="red", pch=16,
xlab="log(lambda)", ylab="Cross Validation Error")
}
suppressWarnings(arrows(x0=log(x$lambda), x1=log(x$lambda),
y0=x$cvlo, y1=x$cvup,
code=3, angle=90, col="gray80", length=.05, ...))
abline(v=log(x$lambda[x$lambda.min.index]),lty=2,lwd=.5)
abline(v=log(x$lambda[x$lambda.1se.index]),lty=3,lwd=.5)
}
|
b7bebea679a99e7ea02023858992ed046c99536b | a71a5d5ae74e831eac70fa35e3b698006ade1ff5 | /cachematrix.R | 6771cc9a479307bbd406508cd1df47b1997ba4c8 | [] | no_license | mikhailidim/ProgrammingAssignment2 | 68e8ede2beca987bb927378e3ccd16826c2c3d60 | c7adfc79ad020ca8226246d75d43a0e31e22ddd7 | refs/heads/master | 2021-01-21T18:43:14.971098 | 2015-12-25T16:52:34 | 2015-12-25T16:52:34 | 48,585,032 | 0 | 0 | null | 2015-12-25T15:56:36 | 2015-12-25T15:56:35 | null | UTF-8 | R | false | false | 982 | r | cachematrix.R | ## Functions below can be used to speed up matrix inversion calculations.
## They use closures to cache inversion results.
## Function cretates a closure to manipulate square matrix
## with cached inverse matix.
makeCacheMatrix <- function(x = matrix()) {
# Initialize cache
solved <- NULL
# set and get matrix
set <- function(y) {
x<<-y
solved<<-NULL
}
get <- function() x
# access cached result
setsolved <- function(m) solved<<-m
getsolved <- function() solved
# describes closure
list(set=set,get=get,setsolved=setsolved,getsolved=getsolved)
}
## Fuction uses closure to speedup large matrices inversion
##
cacheSolve <- function(x, ...) {
slv <- x$getsolved()
# Check if solution has been cached already
if (!is.null(slv)) {
## Return inverse matrix 'x' from cache
return(slv)
}
# No solution cahced.
mtx <-x$get()
# Inverse data and cache in closure
slv<-solve(mtx,...)
x$setsolved(slv)
# Return result.
slv
}
|
e26e38806a1ce04460dfb41ad66a2b6a1e1eb545 | 0b36fe983dd6d936b3ddf9f86f970474200b84c8 | /A2/ModuleScore_sepsis.R | 0b77a08d083f8eec9c4659f8a926784ef18494b2 | [] | no_license | iyalue/MIS-C_scRNAseq | 78f185f0e9969df971378cb367e826e0f1c495e0 | a058de9e1ffe855288030e8b018c94dadf5365e5 | refs/heads/main | 2023-06-04T06:27:20.933639 | 2021-03-28T11:56:24 | 2021-03-28T11:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,630 | r | ModuleScore_sepsis.R | library(Seurat)
library(tidyverse)
library(ggplot2)
## Module score for sepsis across classical monocytes
misc.cluster <- readRDS("shared/Myeloid/misc_integrated_myeloid_updated_cond.rds")
# Modify factor levels
condition_order <- c("C.HD", "MIS-C", "MIS-C-R", "A.HD", "COVID19-A", "COVID19-B")
misc.cluster@meta.data$condition_new <- factor(misc.cluster@meta.data$condition_new, level = condition_order)
sample_order <- c("C.HD1", "C.HD2", "C.HD3", "C.HD4", "C.HD5", "C.HD6",
"P1.1", "P2.1", "P3.1", "P4.1", "P5.1", "P6.1", "P7.1",
"P3.2", "P4.2", "A.HD1", "A.HD2", "A.HD3", "A.HD4", "A.HD5",
"A.HD6", "A.HD7", "A.HD8", "A.HD9", "A.HD10", "A.HD11", "A.HD12",
"A.HD13", "A.COV1.1", "A.COV2.1", "A.COV3.1", "A.COV4.1", "A.COV1.2",
"A.COV2.2", "A.COV3.2", "A.COV4.2", "A.COV5.2", "A.COV6.2")
misc.cluster@meta.data$sample_id <- factor(misc.cluster@meta.data$sample_id, level = sample_order)
cols <- c("#6baed6", "#c94040", "#969696", "#9970ab", "#ec7014", "#fec44f")
# Viral and bacterial scores from Lydon et al. (Respiratory infections)
sepsis <- list(c("PLAC8", "CLU", "RETN", "CD63", "ALOX5AP", "SEC61G", "TXN", "MT1X"))
# Identify monocyte clusters
misc.cluster <- AddModuleScore(misc.cluster, name = "Viral_score_up", nbins=24, ctrl=100,
features = sepsis, assay = "RNA") #not scaled
misc.mono <- subset(misc.cluster, idents = c("Classical Monocytes I",
"Classical Monocytes II", "Classical Monocytes III")) #just mono and neut
pbmc_meta <- misc.mono[[]]
# Export for box plot statistical analysis
df_myeloid <- data.frame('sample' = pbmc_meta$sample_id, 'score' = pbmc_meta$Viral_score_up1,
'cluster' = pbmc_meta$annotation, 'condition' = pbmc_meta$condition)
write.csv(df_myeloid, file = "ModuleScore/sepsis_module_score_cmono_subclusters.csv") #use to calculate pvalue
#
library(dplyr)
library(ggplot2)
library(ggsignif)
cyto <- read.csv("Sheets/sepsis_module_score_cmono_subclusters.csv")
names <- unique(cyto$sample)
means <- data.frame("name" = rep(NA,38), "value" = rep(NA,38))
for(i in 1:length(names)){
cyto_tmp <- cyto %>% filter(sample == names[i])
means[i,1] <- names[i]
means[i,2] <- mean(cyto_tmp[,3])
}
means[means$name %in% c("A.HD1", "A.HD2", "A.HD3", "A.HD4", "A.HD5", "A.HD6",
"A.HD7", "A.HD8", "A.HD9", "A.HD10", "A.HD11",
"A.HD12", "A.HD13"), 'condition'] <- 'A.HD'
means[means$name %in% c("A.COV1.1", "A.COV2.1", "A.COV3.1","A.COV4.1"), 'condition'] <- 'COVID19-A'
means[means$name %in% c("A.COV1.2", "A.COV2.2", "A.COV3.2", "A.COV4.2",
"A.COV5.2", "A.COV6.2"), 'condition'] <- 'COVID19-B'
means[means$name %in% c("P1.1", "P2.1", "P3.1", "P4.1",
"P5.1", "P6.1", "P7.1"), 'condition'] <- 'MIS-C'
means[means$name %in% c("C.HD1", "C.HD2", "C.HD3",
"C.HD4", "C.HD5", "C.HD6"), 'condition'] <- 'C.HD'
means[means$name %in% c("P3.2", "P4.2"), 'condition'] <- 'MIS-C-R'
means_severe <- means %>% filter(name %in% c("P1.1", "P2.1", "P3.1", "P6.1", "P7.1"))
level_order <- c("C.HD", "MIS-C", "MIS-C-R", "A.HD", "COVID19-A", "COVID19-B")
means$condition <- factor(means$condition, level = level_order)
means_severe$condition <- factor(means_severe$condition, level = level_order)
misc_tmp <- means %>% filter(condition == "MIS-C")
chd_tmp <- means %>% filter(condition == "C.HD")
covid19a_tmp <- means %>% filter(condition == "COVID19-A")
covid19b_tmp <- means %>% filter(condition == "COVID19-B")
ahd_tmp <- means %>% filter(condition == "A.HD")
# Example
w.test <- wilcox.test(x = chd_tmp$value, y = misc_tmp$value, alternative = c("two.sided"), correct = FALSE)
pval_ped <- w.test$p.value
w.test <- wilcox.test(x = ahd_tmp$value, y = covid19a_tmp$value, alternative = c("two.sided"), correct = FALSE)
pval_covida <- w.test$p.value
w.test <- wilcox.test(x = ahd_tmp$value, y = covid19b_tmp$value, alternative = c("two.sided"), correct = FALSE)
pval_covidb <- w.test$p.value
means2 <- means %>% filter(!(name %in% c("P1.1", "P2.1", "P3.1", "P6.1", "P7.1")))
cols <- c("#6baed6", "#FC9272", "#969696", "#9970ab", "#ec7014", "#fec44f")
plot1 <- ggplot(means, aes(x = condition, y = value)) +
geom_boxplot(lwd=0.15, outlier.shape = NA) +
geom_jitter(data=means_severe, colour ="#c94040", size = 0.5, width = 0.1)+
geom_jitter(data = means2, aes(colour = factor(condition, level = level_order)), size = 0.5, width = 0.1) +
scale_color_manual(values = cols) +
geom_signif(comparisons = list(c("C.HD", "MIS-C")), annotation = "0.03",
size = 0.12, textsize = 2,
y_position =0.2)+
geom_signif(comparisons = list(c("A.HD", "COVID19-A")), annotation = "0.0008",
size = 0.12, textsize = 2,
y_position =0.32)+
geom_signif(comparisons = list(c("A.HD", "COVID19-B")), annotation = "<0.0001",
size = 0.12, textsize = 2,
y_position =0.45)+
ggtitle("Sepsis signature in classical monocytes") +
xlab("") +
theme_classic(base_size = 7) +
theme(axis.text.x = element_text(angle = 45, vjust = 0.9, hjust=1, size = 7, color = "black"),
axis.text.y = element_text(color = "black"), axis.line = element_line(size = 0.15),
legend.position = "none", axis.ticks = element_line(size = 0.15)) +
ylab("Average module score") +
ylim(c(-0.6, 0.5))
plot1
ggsave(plot1, file = "sepsis_monocyte_module_score_subclusters.pdf", height = 2, width =3)
|
fd466c64ae26b595cc259aab0122455b3e0e80e1 | 0c32555a611c9471b8d79bbb055d0c62cfb7ac18 | /man/lag.Rd | f1c9f056f7add79327b086c8b110979e8b99d7b7 | [] | no_license | AnderWilson/smurf | 9105f0f62d3834a15c88daefdc3f834e8a3467d7 | 95e249cf0fbf90ce97ca78421e0aac0cc1e281d8 | refs/heads/master | 2020-05-20T03:21:19.830279 | 2015-08-11T13:41:24 | 2015-08-11T13:41:24 | 38,887,639 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 833 | rd | lag.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/lag.R
\name{lag}
\alias{lag}
\title{Create lag variables}
\usage{
lag(date, x, lags = 1, by)
}
\arguments{
\item{date}{A vector of dates.}
\item{x}{The varibale to be lagged}
\item{lags}{The number of days the variable should be lagged}
\item{by}{A vector of ids or a matrix with columns as the id variables. The events will be found separately within each unique combination of id variables. This is optional.}
}
\value{
Returns a data.table with lagged exposures as well as the date and by variables.
}
\description{
This function finds non-event days that are near events (e.g. the day after a heat waves or ozone events). The days are potentially excluded as potential control days when using matching approachs.
}
\author{
Ander Wilson
}
|
09b83fdd6593e8edbfa569cd32bb50f0c2dfb76c | 620901c3e8df2d92b6b40c96d3b9c60d7c316548 | /vignettes/precompute_vignettes.R | 980a71eca73e2960e969cf8c58ed383e644731b6 | [
"MIT"
] | permissive | KTH-Library/kthapi | 2d6fdb1e819fee92e74a9c4c0c11b636dacd6f29 | 4bf0dd2edea543bd57c5c3478321abefca0cee1b | refs/heads/master | 2023-06-23T06:35:30.889557 | 2023-06-16T10:55:35 | 2023-06-16T10:55:35 | 246,341,870 | 1 | 0 | NOASSERTION | 2022-06-07T14:14:32 | 2020-03-10T15:44:06 | HTML | UTF-8 | R | false | false | 580 | r | precompute_vignettes.R | library(knitr)
# NB: Remember to execute this script after changing the vignettes (*.orig)!
# Rationale: https://ropensci.org/blog/2019/12/08/precompute-vignettes/
#Sys.chmod("vignettes/precompute_vignettes.R")
scripts <- file.path("vignettes", c(
"KTH-Departments-from-Altmetric-Explorer.Rmd",
"Potential-Heads.Rmd",
"Publications-API-Usage.Rmd",
"Schools-Departments-from-KTH-Directory-API.Rmd"
))
orig <- function(x) paste0(x, ".orig")
#file.copy(scripts, orig(scripts))
reknit <- function(orig, new) knit(orig, new)
purrr::walk2(orig(scripts), scripts, reknit)
|
e04c29ccb1016bffa6551f973bccef60612b21af | be213d42d477cbb10051150c6b8d11ddd7a9ac50 | /tests/testthat.R | d51a6a84f66aab3ff44dc21a102773a0ae8ca85f | [] | no_license | mdsumner/ozcran | cca71af45d460bbfaacef13aa2ee06509c4d584e | eff81307243def5a18fe909883698fdf5bf79076 | refs/heads/master | 2022-02-13T07:09:20.070136 | 2019-07-22T13:42:14 | 2019-07-22T13:42:14 | 198,210,243 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(ozcran)
test_check("ozcran")
|
3a22078d29e195dac8e605ece8d103ebff5e8b3d | bc7736a91a9fddda852bbf1f326dcf330b3f0f57 | /tests/testthat/test_bipartite_stats.R | a64df947e7298cf0816627d08da3ecf6e735b776 | [] | no_license | PrimulaLHD/econullnetr | 2d49fb41b584f78e96eecf0467b7fced66f4bce6 | a0776948b7195e4dbe1ad485c285a7aab0dfeba6 | refs/heads/master | 2020-03-29T11:32:17.863525 | 2018-04-18T20:01:50 | 2018-04-18T20:01:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,737 | r | test_bipartite_stats.R | # Use readRDS to view reference data sets
# e.g. readRDS("tests/testthat/sl_test")
library(econullnetr)
context("Bipartite_stats")
# Dummy nullnet object for unit testing
s.1 <- list()
s.1$obs.interactions <- read.csv(system.file("testdata", "obs_data.csv",
package = "econullnetr"))
s.1$rand.data <- read.csv(system.file("testdata", "sim_data.csv",
package = "econullnetr"))
s.1$n.iterations <- 100
class(s.1) <- "nullnet"
# Check error message for significance level outside 0-1.
test_that("Basic error warnings",{
expect_error(bipartite_stats(s.1, signif.level = 1.1,
index.type = "specieslevel",
indices = "degree"))
})
# Check that all indices can be handled at all three levels (species, group
# and network) and that error and warning messages are produced if unsupported
# indices (e.g. degree distribution) are specified
test_that("Check bipartite specieslevel compatibility",{
skip_on_cran()
set.seed(1234)
expect_error(bipartite_stats(s.1, index.type = "specieslevel",
indices = "degree distribution"))
expect_warning(bipartite_stats(s.1, index.type = "specieslevel",
indices = "ALL", prog.count = FALSE))
expect_warning(bipartite_stats(s.1, index.type = "grouplevel",
indices = "ALL", prog.count = FALSE))
expect_error(bipartite_stats(s.1, index.type = "networklevel",
indices = "topology"))
expect_warning(bipartite_stats(s.1, index.type = "networklevel",
indices = "ALL", prog.count = FALSE))
})
test_that("Consistent outputs of the bipartite statistics at all 3 levels",{
expect_equal_to_reference(bipartite_stats(s.1, index.type = "specieslevel",
indices = c("degree",
"species strength"),
prog.count = FALSE), "sl_test")
expect_equal_to_reference(bipartite_stats(s.1, index.type = "grouplevel",
indices = c("mean number of links",
"partner diversity"),
prog.count = FALSE), "gl_test")
expect_equal_to_reference(bipartite_stats(s.1, index.type = "networklevel",
indices = c("connectance",
"weighted connectance"),
prog.count = FALSE), "nl_test")
})
|
ccb2b965dba4e6cb8192be5ebe77f6a387095f0f | b79956f25c9cc130ef7bf41629ed5909467bd4de | /man/sequenceverts.Rd | f71ce7add62fbd8bb5b50bc2283f8d8c3199f06e | [] | no_license | mbtyers/riverdist | 652f6ca7153722741c5fa450834fe5d6e6be938e | 160e9368d420b960f776a3e93f1b84b716a19e23 | refs/heads/master | 2023-08-09T18:23:30.990174 | 2023-08-07T17:11:08 | 2023-08-07T17:11:08 | 47,280,222 | 21 | 1 | null | 2023-08-02T18:37:35 | 2015-12-02T18:32:05 | R | UTF-8 | R | false | true | 1,083 | rd | sequenceverts.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segs_direction.R
\name{sequenceverts}
\alias{sequenceverts}
\title{Store Vertices in Ascending Sequence}
\usage{
sequenceverts(rivers)
}
\arguments{
\item{rivers}{The river network object to use}
}
\value{
A new river network object (see \link{rivernetwork})
}
\description{
Rearranges the vertices of a river network object so that
vertices are stored sequentially moving up river for all segments
(coordinates [1,] are the bottom of each segment).
}
\note{
Even without calling \code{sequenceverts}, the vertices will be stored
sequentially - either moving up river or down for a given segment. What
\code{sequenceverts()} adds is a standardized direction.
Currently, no function in package 'riverdist' requires the vertices to be stored
sequentially.
}
\examples{
data(Gulk)
Gulk <- setmouth(seg=1, vert=1, rivers=Gulk)
str(Gulk)
Gulk.dir <- sequenceverts(rivers=Gulk)
str(Gulk.dir)
}
\seealso{
\link{line2network}
}
\author{
Matt Tyers
}
|
4b1c940bb825613c12b5829beb287295636dc264 | d1d1e98c1ad3be28ab4e04e47bf0e2dff8a7b847 | /R/nesting.R | 0585f84fa1978a522054d22d6dbedb2dfa22e952 | [] | no_license | dis-organization/ggrasp | 3c57b41cb67d0580c23430d865b18c9681765c70 | 52866f36ab23a1e083a1f5ce6d2b8f27f5bf3cb4 | refs/heads/master | 2021-06-01T21:25:44.989679 | 2016-07-12T03:05:21 | 2016-07-12T03:05:30 | 62,983,351 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,073 | r | nesting.R | #' @rdname nest-Spatial
#' @export
nest1_.Spatial <- function(data, ...) {
sptab <- sptable(data) %>%
group_by_("object_") %>% nest_(key_col = "Object")
attrd <- as_data_frame(as.data.frame(data))
y <- bind_cols(attrd, sptab)
attr(y, "crs") <- proj4string(data)
class(y) <- c("sp1nest", class(y))
y
}
#' Nested Spatial
#'
#' Create nested tables from Spatial classes.
#'
#' For \code{nest_} the tables are doubly nested.
#' For \code{nest1_} the tables are singly nested.
#' @param data sp Spatial object
#' @param ... ignored
#' @examples
#' library(tidyr)
#' data(holy_poly)
#' library(dplyr)
#' spdata <- spbabel::sp(holy_poly, attr_tab = data_frame(mydat = c("a", "b")))
#' ##ggplot(holy_poly, aes(x = x_, y = y_)) + geom_holygon(aes(group = branch_, fill = object_))
#' ggg <- nest(spdata)
#' plot(ggg, col = rainbow(nrow(ggg), alpha = 0.5))
#' ## plot with ggvis
#' ##holy_poly %>% group_by(object_) %>% ggvis(~x_, ~y_, fill = ~object_, stroke = ~branch_) %>% layer_paths()
#'
#' @return nested tibble
#' @export
#'
#' @importFrom spbabel sptable
#' @importFrom dplyr as_data_frame bind_cols group_by_
#' @importFrom sp proj4string
#' @importFrom tidyr nest_
#' @rdname nest-Spatial
nest_.Spatial <- function(data, ...) {
sptab <- sptable(data) %>%
group_by_("branch_", "object_", "island_") %>%
nest_(key_col = "Branch_") %>%
group_by_("object_") %>% nest_(key_col = "Object")
attrd <- as_data_frame(as.data.frame(data))
y <- bind_cols(attrd, sptab)
attr(y, "crs") <- proj4string(data)
class(y) <- c("spnest", class(y))
y
}
#' Plot spnest
#'
#' Basic ggplot of nested tibble.
#' @param x nested tibble
#'
#' @return
#' @export
#'
#' @examples
#' hp <- bind_rows(lapply(split(coords, coords$branch_), function(x) if (x$island_[1]) x else {x <- x[nrow(x):1, ]; x}))
#' @importFrom ggplot2 ggplot aes geom_polygon
#' @importFrom tidyr unnest
plot.spnest <- function(x, y = "object_", col = "#7F7F7F7F", ..., add = FALSE) {
allcoords <- unnest(unnest(x[, "Object"]))
#coords %>% group_by_("object_") %>% ggvis(~x_, ~y_, fill = ~object_, stroke = ~branch_) %>% layer_paths()
if (!add) plot0(allcoords$x_, allcoords$y_)
col <- rep(col, nrow(allcoords))
for (i in seq(nrow(x))) {
coords <- unnest(unnest(x[i, "Object"]))
nacoords <- na_grp(coords, coords$branch_)
polypath(nacoords$x_, nacoords$y_, col = col[i], ...)
}
invisible(NULL)
}
#' @importFrom dplyr bind_rows
na_grp <- function(x, g) {
head(bind_rows(lapply(split(x, g), function(xa) rbind(xa, NA))), -1)
}
## from https://github.com/rstudio/gggeom
plot0 <- function(x, y,
xlim = range(x, na.rm = TRUE),
ylim = range(y, na.rm = TRUE), ...) {
old <- par(mar = c(1.5, 1.5, 0, 0), cex = 0.8)
on.exit(par(old))
plot.default(xlim, ylim, type = "n", xlab = "", ylab = "", axes = FALSE)
axis(1, lwd = 0, lwd.ticks = 1, col = "grey80", col.axis = "grey60", padj = -1)
axis(2, lwd = 0, lwd.ticks = 1, col = "grey80", col.axis = "grey60", padj = 1)
grid(lty = "solid", col = "grey80")
}
|
e704b34df629213c8a70772920fb99e2b27c6a31 | 269c1c787a4c1796acb0820eefa0de6bdfad3072 | /man/coprime.Rd | 33290ec867b9810572b7ca2593967a4f1ecb5774 | [] | no_license | kelvinyangli/crypto | dc766d7f0dba3611148d5e63f5753eef617f6c57 | 51fc103f301904eca7885beb215f8d58b392a2a4 | refs/heads/master | 2023-01-02T05:20:36.836360 | 2020-10-16T10:12:40 | 2020-10-16T10:12:40 | 296,191,738 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 293 | rd | coprime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coprime.R
\name{coprime}
\alias{coprime}
\title{Checking coprime}
\usage{
coprime(a, b)
}
\arguments{
\item{a}{An integer.}
\item{b}{An integer.}
}
\description{
This function checks if two integers are coprime.
}
|
50b5acc6eb0a23a63086c8c76cbc63afd399ce46 | 130fac5c7630d17332e253b4da6870f028d6f4d2 | /man/RMma.Rd | a2ba0d91f78c39f5b664a67cf8312b260aa30e4c | [] | no_license | cran/RandomFields | 41efaabb19f883462ec3380f3d4c3102b0ed86b4 | 41d603eb8a5f4bfe82c56acee957c79e7500bfd4 | refs/heads/master | 2022-01-26T09:24:35.125597 | 2022-01-18T18:12:52 | 2022-01-18T18:12:52 | 17,693,063 | 5 | 4 | null | 2019-05-20T21:08:38 | 2014-03-13T03:21:25 | C++ | UTF-8 | R | false | false | 1,595 | rd | RMma.Rd | \name{RMma}
\alias{RMma}
\title{Ma operator}
\description{
\command{\link{RMma}} is a univariate stationary covariance model
depending on a univariate stationary covariance model.
The corresponding covariance function only depends on the difference
\eqn{h}{h} between two points and is given by
\deqn{C(h) = (\theta / (1 - (1-\theta) \phi(h)))^\alpha}{C(h) = (\theta
/ (1 - (1-\theta) * \phi(h)))^\alpha}
}
\usage{
RMma(phi, alpha, theta, var, scale, Aniso, proj)
}
\arguments{
\item{phi}{a stationary covariance \command{\link{RMmodel}}.}
\item{alpha}{a numerical value; positive}
\item{theta}{a numerical value; in the interval \eqn{(0,1)}{(0,1)}}
\item{var,scale,Aniso,proj}{optional arguments; same meaning for any
\command{\link{RMmodel}}. If not passed, the above
covariance function remains unmodified.}
}
%\details{}
\value{
\command{\link{RMma}} returns an object of class \code{\link[=RMmodel-class]{RMmodel}}.
}
\references{
\itemize{
\item Ma, C. (2003)
Spatio-temporal covariance functions generated by mixtures.
\emph{Math. Geol.}, \bold{34}, 965-975.
}
}
\me
\seealso{
\command{\link{RMmodel}},
\command{\link{RFsimulate}},
\command{\link{RFfit}}.
}
\keyword{spatial}
\keyword{models}
\examples{\dontshow{StartExample()}
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
model <- RMma(RMgauss(), alpha=4, theta=0.5)
x <- seq(0, 10, 0.02)
plot(model)
plot(RFsimulate(model, x=x))
\dontshow{FinalizeExample()}} |
d0e7aa76c26008eafd7336e60fb8838796ff8fa5 | 6acd86b9f9e76bb0eb3c08c650e16354d32eee77 | /server.R | f6b03014cacc1fc72a4f71149df17ace1dfd7d2d | [] | no_license | yuen26/hntaxicab-shiny | 4fab3117f965240546fe275f1ff2b57cec2c9794 | 211d99995ee07152a7ab0d2489d501b70c44c222 | refs/heads/master | 2021-08-27T23:31:13.422521 | 2017-12-10T19:27:18 | 2017-12-10T19:27:18 | 99,350,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,331 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
library(plotly)
library(data.table)
library(dplyr)
source("ranking/region.R")
source("ranking/timeslot.R")
source("ranking/matrix.R")
source("ranking/pagerank.R")
source("ranking/hits.R")
source("color.R")
shinyServer(function(input, output) {
heatMapColors <- getHeatMapColors()
clusterColors <- getClusterColors()
# ==============================================================
# =================== TRAFFIC FLAW RANKING =====================
# ==============================================================
# ============================= MAP ============================
regions <- drawMap()
viewPoint <- getViewPoint(regions)
output$map <- renderLeaflet({
leaflet(options = leafletOptions(minZoom = 0, maxZoom = 18)) %>%
setView(lng = viewPoint$lng, lat = viewPoint$lat, zoom = 13) %>%
addTiles() %>%
addRectangles(
data = regions,
lng1 = regions$east, lat1 = regions$north,
lng2 = regions$west, lat2 = regions$south,
color = "green", weight = 2) %>%
addLabelOnlyMarkers(
data = regions,
lng = regions$centerLng,
lat = regions$centerLat,
label = paste0(as.character(regions$id)),
labelOptions = labelOptions(
noHide = T,
direction = "top",
textOnly = T,
style = list('color' = 'green', 'font-size' = '12px')
)
)
})
# ==================== INPUT PANEL ====================
output$timeslots <- renderUI(selectInput(
"timeslots",
label = "",
choices = getTimeslots()
))
output$days <- renderUI(selectInput(
"days",
label = "",
choices = c("Work Day", "Rest Day")
))
output$algorithms <- renderUI(selectInput(
"algorithms",
label = "",
choices = c("PageRank", "HITS")
))
# ==================== DATA MINING ====================
output$table <- renderDataTable({
# Handle selected input
selectedTimeslot <- eventReactive(input$submit, input$timeslots)
selectedTimeslot <- selectedTimeslot()
selectedDay <- eventReactive(input$submit, input$days)
selectedDay <- selectedDay()
selectedAlgorithm <- eventReactive(input$submit, input$algorithms)
selectedAlgorithm <- selectedAlgorithm()
# Import data
withProgress(message = "Importing data ...", value = 1/4, {
# Read tracks from corresponding CSV file
tracks <- fread(getCSVPath(selectedTimeslot, selectedDay))
# Build region matrix
incProgress(1/4, message = "Building region matrix ...")
Sys.sleep(1)
nodes <- buildRegionMatrix(tracks)
# Execute algorithm
incProgress(1/4, message = paste("Executing", selectedAlgorithm, "..."))
Sys.sleep(1)
if (selectedAlgorithm == "PageRank") {
result <- PR(nodes)
} else {
result <- HITS(nodes, 5)
}
# Render results
incProgress(1/4, message = "Rendering results ...")
Sys.sleep(1)
if (selectedAlgorithm == "PageRank") {
sortedResult <- data.frame(No = c(1:100), result[order(-result$PageRank),])
regions$rank <- result$PageRank
} else {
sortedResult <- data.frame(No = c(1:100), result[order(-result$Average),])
regions$rank <- result$Average
}
regions <- regions[order(-regions$rank),]
regions$color <- heatMapColors
output$map <- renderLeaflet({
leaflet(options = leafletOptions(minZoom = 0, maxZoom = 18)) %>%
setView(lng = viewPoint$lng, lat = viewPoint$lat, zoom = 13) %>%
addTiles() %>%
addRectangles(
data = regions,
lng1 = regions$east, lat1 = regions$north,
lng2 = regions$west, lat2 = regions$south,
color = "#333333", weight = 2,
fillColor = regions$color, fillOpacity = 0.5) %>%
addLabelOnlyMarkers(
data = regions,
lng = regions$centerLng,
lat = regions$centerLat,
label = paste0(as.character(regions$id)),
labelOptions = labelOptions(
noHide = T,
direction = "top",
textOnly = T,
style = list('color' = 'green', 'font-size' = '12px')
)
)
})
})
return(sortedResult)
}, options = list(pageLength = 10))
# ==============================================================
# ======================= TAXI BEHAVIOR ========================
# ==============================================================
tracks <- reactive({
if (is.null(input$file)) {
return(NULL)
}
read.csv(input$file$datapath)
})
output$map2 <- renderLeaflet({
tracks <- tracks()
if (!is.null(tracks)) {
source("traclus/input.R")
source("traclus/partitioning.R")
source("traclus/clustering.R")
# Get dates (a trajectory is tracks in a date)
dates <- getDates(tracks)
# Partitioning phase
print("=================== PARTITIONING PHASE ===================")
lineSegments <- c()
for (i in 1:length(dates)) {
print(dates[i])
# Trajectory
trajectory <- getTrajectory(tracks, dates[i])
print("Trajectory removed stop points:")
print(trajectory)
# Convert cartesian to geographic coordinate
geoTrajectory <- getGeoTrajectory(trajectory)
print("Trajectory as geographic coordinate:")
print(geoTrajectory)
lineSegments <- c(lineSegments, partitioning(geoTrajectory))
}
print("Line segments:")
print(lineSegments)
# Clustering phase
print("=================== GROUPING PHASE ===================")
clusters <- generateCluster(lineSegments)
print(clusters)
# Output
print("=================== OUTPUT ===================")
viewPoint2 <- tracks[1,]
map2 <- leaflet(options = leafletOptions(minZoom = 0, maxZoom = 20)) %>%
setView(lng = viewPoint2$lng, lat = viewPoint2$lat, zoom = 15) %>%
addTiles() %>%
addPolylines(data = tracks, lat = tracks$lat, lng = tracks$lng, color = "black", weight = 3, opacity = 1)
for (i in 1:length(clusters)) {
print(paste("--- Cluster", i, " ---"))
cluster <- clusters[[i]]
lats <- c()
lngs <- c()
for (j in 1:length(cluster)) {
lineSegment <- lineSegments[[cluster[j]]]
start <- getCartesianVector(lineSegment[1], lineSegment[2], lineSegment[3])
end <- getCartesianVector(lineSegment[4], lineSegment[5], lineSegment[6])
lats <- c(lats, start[1], end[1])
lngs <- c(lngs, start[2], end[2])
print(paste(start[1], start[2], end[1], end[2]))
}
points <- data.frame(lat = lats, lng = lngs)
map2 <- addPolylines(map2, data = points, lat = points$lat, lng = points$lng, color = clusterColors[i], weight = 3, opacity = 1)
}
map2
}
})
}) |
6c3f49eaf71429fbe76a9a4e0d7a46efe2e03e67 | 0c780d32356a4b3a2fefa9392ac545bd078de909 | /man/generation-time.Rd | 610af9fc03c03c87bd89833556be977b442be062 | [
"MIT"
] | permissive | heavywatal/futurervpa | 80fdeb533017eba10478d760fd9e15c1cd5b34d5 | 8fc6cfa6e831c2b6e8245b4eb83d2a769cc508e9 | refs/heads/master | 2020-04-02T03:56:01.315555 | 2018-10-21T09:20:12 | 2018-10-21T09:20:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 288 | rd | generation-time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generation-time.R
\name{Generation.Time}
\alias{Generation.Time}
\title{Generation Time}
\usage{
Generation.Time(vpares, maa.year = 2014:2015, M.year = 2014:2015,
Plus = 19)
}
\description{
Generation Time
}
|
f757376f89a936dfc569e9b1c5c7097b6896dac4 | 3211f7bbd751ce099463d56045945e27581215bc | /plot1.R | eb0cf3ec744e592a33dbe372f71014868bc0666a | [] | no_license | awly2011/ExData_Plotting1 | 5a1d94c97fd7aee530a1b411e49e10c706d73f52 | 26790f1ef51d8358bed9fe74a9d145790da58aee | refs/heads/master | 2021-01-15T13:33:45.292280 | 2016-09-19T00:40:40 | 2016-09-19T00:40:40 | 68,553,678 | 0 | 0 | null | 2016-09-18T23:48:41 | 2016-09-18T23:48:41 | null | UTF-8 | R | false | false | 591 | r | plot1.R | # The data set is already downloaded, set current directory.
setwd("C:/Users/awl_y/desktop/R-chuanfu/class4/project1")
# Read data and the date "Feb/02" or "Feb/01" of 2007 were subsetted and saved to data2
data1 <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors = FALSE)
date1 <- data1$Date
data2 <- data1[(date1=="1/2/2007" | date1=="2/2/2007"), ]
Global_active_power <- as.numeric(data2$Global_active_power)
png(file="plot1.png")
hist(Global_active_power, col="red", main = "Global Active Power", xlab = "Global Active Power(kilowatts)")
dev.off()
|
ee297867945f524ee14ca3c4ad2c1cfde7a307d3 | e3704a1dc4a10d49a99d443b45ccf30d6aab8d55 | /R/si2-model-results.R | 52a19bc6e45cb254611bcc97b1f0725aa35b1a0c | [] | no_license | andybega/isa-2018 | f39ec5f0e08fb4a101b54ba9e16688b3a486d469 | 1a2c76663355b999a5cce231e06044f189ab3485 | refs/heads/master | 2021-01-25T11:49:03.055210 | 2019-10-21T07:11:56 | 2019-10-21T07:11:56 | 123,430,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,956 | r | si2-model-results.R |
library("tidyverse")
library("states")
library("hrbrthemes")
library("nlme")
library("lme4")
library("broom")
library("ggstance")
library("scoringRules")
library("PRROC")
source("code/functions.R")
cy <- readRDS("output/cy.rds")
data(gwstates)
cnames <- gwstates %>% group_by(gwcode) %>% summarize(country = unique(country_name)[1])
models <- tibble(file = dir("output/models", full.names = TRUE)) %>%
filter(!str_detect(basename(file), "xgboost")) %>%
mutate(model_name = basename(file) %>% str_replace(".rds", "")) %>%
mutate(model_obj = map(file, readRDS)) %>%
# Mark what kind of controls are in the model
mutate(controls = ifelse(str_detect(model_name, "(base2)|(controls)"), "Controls", "Base"),
model_form = case_when(
str_detect(model_name, "glm_pois") ~ "Poisson (GLM)",
str_detect(model_name, "glm_nb") ~ "NegBin (GLM)",
str_detect(model_name, "glmer_pois") ~ "Poisson w RE (GLMER)",
str_detect(model_name, "glmer_nb") ~ "NegBin (GLMER)"
))
# Coefficient plot/table --------------------------------------------------
coefs <- models %>%
mutate(estimates = map(model_obj, tidy.itt)) %>%
dplyr::select(-file, -model_obj) %>%
unnest(estimates) %>%
# # add a dummy row so factor level gets plotted
# bind_rows(., tibble(y = "itt_alleg_vtcriminal",
# term = "Legal system: Civil\n(reference category)",
# model_name = "mdl_base1")) %>%
mutate(y = str_replace(y, "itt_alleg_vt", "") %>% str_to_title()) %>%
mutate(term = rename_terms(term))
p <- coefs %>%
filter(term!="Global intercept") %>%
ggplot(., aes(y = estimate, x = term, color = controls, group = model_name)) +
geom_hline(yintercept = 0, linetype = 1, color = "gray70", size = .4) +
facet_wrap(~ y) +
coord_flip() +
geom_linerange(aes(ymin = estimate - 1.96*std.error, ymax = estimate + 1.96*std.error),
position = position_dodge(width = h_width), alpha = .6) +
geom_point(position = position_dodge(width = h_width), aes(shape = model_form),
alpha = .6) +
theme_ipsum() +
labs(x = "", y = "") +
scale_color_discrete("Specification:") +
scale_shape_discrete("Model type:") +
theme(legend.position = "top")
p
ggsave(p, file = "output/figures/model-coefs-all-model-forms.png", height = 8, width = 10)
# Tables
foo = coefs %>%
filter(model_form=="glmer_pois",
y=="Criminal") %>%
gather(key, value, estimate, std.error) %>%
mutate(col = paste0(y, "_", model_name, "_", key)) %>%
select(term, col, value)
stargazer(mdl[[1]], mdl[[2]], type = "text")
stargazer(mdl[[1]], mdl[[1]], type = "text",
coef = list(tidy_model$estimate, tidy_model$estimate),
se = list(tidy_model$std.error, tidy_model$std.error),
add.lines = lapply(1:nrow(fit_stats), function(i) unlist(fit_stats[i, ])),
omit.table.layout = "s"
)
# Out of sample fit -------------------------------------------------------
oos_preds <- models %>%
mutate(preds = map(model_obj, function(x) cv_predict.itt(x, data = cy, folds = 11))) %>%
dplyr::select(-file, -model_obj) %>%
unnest()
oos_fit <- oos_preds %>%
filter(!is.na(yhat)) %>%
rename(outcome = yname) %>%
mutate(outcome = str_replace(outcome, "itt_alleg_vt", "")) %>%
group_by(model_name, outcome) %>%
summarize(MAE = mae(y, yhat),
RMSE = rmse(y, yhat),
CRPS = mean(crps_pois(y, yhat)),
Recall = sum(yhatgt0[ygt0]) / sum(ygt0),
Precision = sum(yhatgt0[ygt0]) / sum(yhatgt0)) %>%
arrange(outcome, model_name)
write_csv(oos_fit, "output/model-fit-out-of-sample.csv")
oos_preds <- oos_preds %>%
mutate(ygt0 = as.integer(y > 0),
yhatgt0 = as.integer(yhat > 0))
with(oos_preds, foo<<-roc.curve(scores.class0 = yhatgt0[ygt0==1], scores.class1 = yhatgt0[ygt0==0], curve = TRUE))
# Compose fit plot/table --------------------------------------------------
res1 <- read_csv("output/count-model-fit.csv",
col_types = cols(
outcome = col_character(),
model_name = col_character(),
AIC = col_double(),
BIC = col_double(),
MAE = col_double(),
RMSE = col_double()
))
res1$type <- "in sample"
res2 <- read_csv("output/count-model-oos-fit.csv",
col_types = cols(
outcome = col_character(),
model_name = col_character(),
MAE = col_double(),
RMSE = col_double()
))
res2$type <- "out of sample"
res3 <- read_csv("output/xgboost-fit.csv",
col_types = cols(
outcome = col_character(),
model_name = col_character(),
MAE = col_double(),
RMSE = col_double()
))
res3$type <- "out of sample"
res <- bind_rows(res1, res2, res3)
p <- res %>%
gather(metric, value, AIC:RMSE) %>%
filter(type=="out of sample" | metric %in% c("AIC", "BIC")) %>%
mutate(model_name = factor(model_name) %>% fct_rev() %>%
fct_recode("Intercepts-only (M1)" = "mdl_base1",
"Controls only (M2)" = "mdl_base2",
"Democracy + intercepts (M1)" = "mdl_dem1",
"Democracy + controls (M2)" = "mdl_dem2",
"LJI + intercepts (M1)" = "mdl_lji1",
"LJI + democracy" = "mdl_lji2",
"legal_system" = "mdl_legal1")) %>%
ggplot(.) +
geom_point(aes(x = value, y = model_name, colour = outcome)) +
geom_path(aes(x = value, y = model_name, group = outcome, colour = outcome),
linetype = 3) +
facet_wrap(~ metric, scales = "free") +
theme_ipsum() +
labs(x = "", y = "")
p
ggsave(p, file = "output/figures/model-fit-plot.png", height = 5, width = 8)
|
9d52b6536d5a73180e0a90333b6e7a1b0eb5130e | b8b32740539350781817bc3b3088afd94351e43d | /main.R | a3065832db36c77a18cf7f9c4dbeb819bc66dbf7 | [
"MIT"
] | permissive | fubuki/R-example | 62227eb982f3f93f29ad2d2abf00259cbe67fc8e | 228448dedc07fa2a55889ae7c5f4be6725eb6b65 | refs/heads/master | 2021-01-19T15:38:54.670076 | 2014-07-08T15:58:02 | 2014-07-08T15:58:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | main.R | library(ggplot2)
data <-read.csv("C:/us-cities.csv",sep=",")
plot(mtcars$wt,mtcars$mpg);
v1 <- c(1,2,3);
v2 <- c(4,5,6);
c(v1,v2);
Sys.Date()
x <-combn(1:5,3)
y <-rnorm(1, mean=100, sd=15)
print(x)
print(y)
|
62194893d66981e0184b60b46f80a0f3ebe56616 | 7d6fc36fd109ed6e9af382feb0094b801e40b837 | /scripts/spt5_figure6-global-nuc-fuzz-occ.R | 243075583f63321c434f51098b9c68d46b7a2529 | [] | no_license | james-chuang/prospectus | 1fe19fe3ceec67640d8673382b49c80ad4b86009 | 3f5dc8da15649c454a825d29315b224973d84132 | refs/heads/master | 2020-03-22T18:57:05.085186 | 2018-10-11T16:30:25 | 2018-10-11T16:30:25 | 140,493,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,578 | r | spt5_figure6-global-nuc-fuzz-occ.R |
import = function(path, group){
read_tsv(path) %>%
transmute(occupancy = smt_value,
fuzziness = fuzziness_score,
group = group) %>%
return()
}
main = function(theme_spec,
wt_mnase_quant, spt6_mnase_quant,
annotation,
fig_width, fig_height,
pdf_out){
source(theme_spec)
df = import(wt_mnase_quant, group="non-depleted") %>%
bind_rows(import(spt6_mnase_quant, group="depleted")) %>%
mutate(group = fct_inorder(group, ordered=TRUE))
summary_df = df %>%
group_by(group) %>%
summarise(mean_occ = mean(occupancy),
sd_occ = sd(occupancy),
mean_fuzz = mean(fuzziness),
sd_fuzz = sd(fuzziness),
median_occ = median(occupancy),
median_fuzz = median(fuzziness))
fig_four_c = ggplot() +
# geom_segment(data = summary_df,
# aes(x=median_fuzz, xend=median_fuzz,
# y=0, yend=median_occ, color=group),
# alpha=0.3, linetype="dashed", size=0.3) +
# geom_segment(data = summary_df,
# aes(x=35, xend=median_fuzz,
# y=median_occ, yend=median_occ, color=group),
# alpha=0.3, linetype="dashed", size=0.3) +
geom_density2d(data = df,
aes(x=fuzziness, y=occupancy,
color=group, alpha=log10(..level..)),
na.rm=TRUE, h=c(10,7000), size=0.3, bins=6) +
# stat_bin_hex(geom="point",
# data = df,
# aes(x=fuzziness, y=occupancy, color=..count..),
# binwidth=c(1,500),
# size=0.1) +
facet_grid(.~group) +
# scale_color_viridis(option="inferno") +
# geom_point(data = summary_df,
# aes(x=median_fuzz, y=median_occ, color=group),
# size=0.5) +
scale_color_ptol(guide=guide_legend(keyheight = unit(9, "pt"),
keywidth = unit(12, "pt"))) +
scale_alpha(guide=FALSE, range = c(0.35, 1)) +
scale_x_continuous(limits = c(30, 80),
breaks = scales::pretty_breaks(n=3),
expand = c(0,0),
name = expression(fuzziness %==% std. ~ dev ~ of ~ dyad ~ positions ~ (bp))) +
scale_y_continuous(limits = c(NA, 80000),
breaks = scales::pretty_breaks(n=2),
labels = function(x){x/1e4},
expand = c(0,0),
name = "occupancy (au)") +
ggtitle("nucleosome occupancy and fuzziness") +
theme_default +
theme(panel.grid = element_blank(),
panel.border = element_blank(),
axis.line = element_line(size=0.25, color="grey65"),
axis.title.x = element_text(size=7),
legend.position = c(0.99, 0.99),
plot.margin = margin(11/2, 11/2, 0, 0, "pt"))
ggsave(pdf_out, plot=fig_four_c, width=fig_width, height=fig_height, units="cm")
}
main(theme_spec = snakemake@input[["theme"]],
wt_mnase_quant = snakemake@input[["wt_mnase_quant"]],
spt6_mnase_quant = snakemake@input[["spt6_mnase_quant"]],
fig_width = snakemake@params[["width"]],
fig_height = snakemake@params[["height"]],
pdf_out = snakemake@output[["pdf"]])
|
0516997f11f4b290c3d723a7b4a99c45b6cb40b8 | e534f1251b0611b7e44d130fc5ce00e9e8b28916 | /R/ross.R | faa722e9c0fb59ef1dbd631480743fa4bd790eb0 | [
"MIT"
] | permissive | gahoo/ross | 9116c960bd74fcbb68686256fc6787063c429cfb | d613630ecb2819e4be38679d699239fe944e0393 | refs/heads/master | 2021-01-22T19:05:07.001927 | 2017-08-03T09:16:51 | 2017-08-03T09:16:51 | 85,161,274 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,895 | r | ross.R | #' ross: ross is an aliyun OSS API Wrapper for R.
#'
#' The ross package provides the basic OSS API. includes:
#' Bucket, Object, STS
#'
#' @section Foo functions:
#' The foo functions ...
#'
#' @docType package
#' @name ross
NULL
.state <- new.env(parent=emptyenv())
.state$location <- list()
.state$multipart <- list()
.state$upload <- list()
.state$download <- list()
.state$acl <- list()
.state$CDN <- list()
.api.request <- function(sign.func, method, ossresource=NULL,
bucketname=NULL, Location=NULL, ...,
header=NULL, path=NULL, query=NULL) {
host <- .build.host(bucketname, Location=Location,
internal=getOption('ross.internal', FALSE),
vpc=getOption('ross.vpc', FALSE))
.headers <- .build.header(header)
ossheader <- .build.ossheader(header)
if(is.null(path)){
url <- host
}else{
url <- URLencode(httr::modify_url(host, path=path))
}
if(is.null(ossresource)){
ossresource <- .build.ossresource(bucketname, path, query)
}
sign.func(method, url, ossresource,
.headers=.headers,
ossheader=ossheader,
query=query,
...)
}
.api.header.request <- function(...) {
.api.request(.sign.header, ...)
}
.api.put.header.request <- function(...){
.api.header.request(method = 'PUT', ...)
}
.api.post.header.request <- function(...){
.api.header.request(method = 'POST', ...)
}
.api.get.header.request <- function(...){
.api.header.request(method = 'GET', ...)
}
.api.head.header.request <- function(...){
.api.header.request(method = 'HEAD', ...)
}
.api.delete.header.request <- function(...){
.api.header.request(method = 'DELETE', ...)
}
.api.url.request <- function(...) {
.api.request(.sign.url, ...)
}
.api.get.url.request <- function(...){
.api.url.request(method = 'GET', ...)
}
|
1deda55bb765adfb858111a9bfcda2bfa8adcfdd | 9796bc96e36ba632506e3dcaf5efa0dc09acf451 | /run_analysis.R | 3fa963b944ef4ead22568a5919041c2a9c541582 | [] | no_license | engrasa/Getting_Cleaning_Data | df71da4d740e1566cc6a3ee8a3478e9985102450 | 7dd0b87f217eac496b07d0fe02a0dc6d1f2df02e | refs/heads/master | 2021-05-11T00:40:25.197370 | 2018-01-21T05:28:05 | 2018-01-21T05:28:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,867 | r | run_analysis.R | ##LOAD USEFUL R PACKAGES
library(plyr);library(dplyr);library(tidyr);library(knitr);library(data.table)
##CREATE A DIRECTORY TO STORE THE FILES FOR MODULE 3 PROJECT
if(!file.exists("./Module 3 Project"))
{dir.create("./Module 3 Project")}
##DOWNLOAD THE FILES FROM THE UCI SITE
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if(!file.exists("./Module 3 Project/module3project.zip"))
{download.file(url,destfile="./Module 3 Project/module3project.zip")}
##UNZIP THE DOWNLOADED FILE
if(!file.exists("./Module 3 Project/UCI HAR Dataset"))
{unzip(zipfile="./Module 3 Project/module3project.zip",exdir="./Module 3 Project")}
##CREATE A FILEPATH FOR ALL UCI HAR DATA
path <- file.path("./Module 3 Project" , "UCI HAR Dataset")
files<-list.files(path, recursive=TRUE)
##LOAD TRAINING DATASETS
x_train <- read.table(file.path(path, "train", "X_train.txt"),header = FALSE)
y_train <- read.table(file.path(path, "train", "Y_train.txt"),header = FALSE)
subject_train <- read.table(file.path(path, "train", "subject_train.txt"),header = FALSE)
##LOAD TEST DATASETS
x_test <- read.table(file.path(path, "test" , "X_test.txt" ),header = FALSE)
y_test <- read.table(file.path(path, "test" , "Y_test.txt" ),header = FALSE)
subject_test <- read.table(file.path(path, "test" , "subject_test.txt"),header = FALSE)
##LOAD LABEL DATASETS
feature_labels <- read.table(file.path(path,"features.txt"),header = FALSE)
activity_labels <- read.table(file.path(path,"activity_labels.txt"),header = FALSE)
##NAME VARIABLES
colnames(feature_labels) <- c('Index','featureName')
colnames(activity_labels) <- c('Activity','ActivityType')
colnames(x_train) <- feature_labels[,2]
colnames(y_train) <-"Activity"
colnames(subject_train) <- "SubjectID"
colnames(x_test) <- feature_labels[,2]
colnames(y_test) <- "Activity"
colnames(subject_test) <- "SubjectID"
##MERGE DATA
train <- cbind(subject_train, y_train, x_train)
test <- cbind(subject_test, y_test, x_test)
data_comp <- rbind(train, test)
#You may check the data if all columns have variables names
#You may run: View(head(data_comp,n=5)) to show first 5 rows or View(colnames(data_comp)) to see complete list of variable names
##EXTRACT MEAN and STD VARIABLES
mean_std<-feature_labels$featureName[grep("(mean|std)\\(\\)", feature_labels$featureName)]
measurement_labels<-c("SubjectID", "Activity",as.character(mean_std))
Finaldata<-subset(data_comp,select=measurement_labels)
Finaldata$Activity<-factor(Finaldata$Activity,labels=activity_labels[,2])
#You may check the data if all columns have variables names with mean() or std()
#You may run: str(Finaldata) or View(head(Finaldata,n=5)) to show first 5 rows or View(colnames(Finaldata)) to see list of selected variable names
##LABEL DATA with DESCRIPTIVE NAMES
names(Finaldata)<-gsub("[-()]", "", names(Finaldata))
names(Finaldata)<-gsub("mean", "Mean", names(Finaldata))
names(Finaldata)<-gsub("std", "Std", names(Finaldata))
names(Finaldata)<-gsub("Acc", "Accelerometer", names(Finaldata))
names(Finaldata)<-gsub("Gyro", "Gyroscope", names(Finaldata))
names(Finaldata)<-gsub("Mag", "Magnitude", names(Finaldata))
names(Finaldata)<-gsub("BodyBody", "Body", names(Finaldata))
names(Finaldata)<-gsub("^t", "Time", names(Finaldata))
names(Finaldata)<-gsub("^f", "Frequency", names(Finaldata))
#You may check the data if columns names have changed
#You may run: str(Finaldata) or View(head(Finaldata,n=5)) to show first 5 rows or View(colnames(Finaldata)) to see list of variable names
##CREATE TIDY DATASET
Tidydata<-aggregate(. ~SubjectID + Activity, Finaldata, mean)
Tidydata<-Tidydata[order(Tidydata$SubjectID,Tidydata$Activity),]
write.table(Tidydata, file = "tidydata.txt",row.name=FALSE,quote = FALSE, sep = '\t')
|
8e009a7565e40ec642ed1cd99773340f1a1d2df9 | 33c418dcf8d3703d7a4e44fa99da69d1577a6ff2 | /_functions/fPuSaCom.R | 419947659457883e80411281d7940e722f23dcf5 | [] | no_license | terrasys/CAWa_Classification | cc59a8e31cb06d77a4b6873dca55c1e9cdcd5f11 | c4c62e839eec03d7cf6319bd0e8d53a180cba4fb | refs/heads/master | 2021-10-24T07:48:36.870435 | 2019-03-23T15:53:18 | 2019-03-23T15:53:18 | 167,062,929 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,164 | r | fPuSaCom.R | featureScale <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
#-----------------------------------------------------------------------------------------------------
#Dissimiliariy test of pure samples
#-----------------------------------------------------------------------------------------------------
fPuSaCom <- function(W.DIR,
IN.DIR,
OUT.DIR,
CLASS.NAME,
PS1,
PS2,
PS1PF,
PS2PF,
TH){
#select directory with sample profiles
#-----------------------------------------------------------------------------------------------------
print(paste("Visual comparison of two pure sample sets:",PS1,"and",PS2))
#-----------------------------------------------------------------------------------------------------
ps1 <- read.table(file.path(W.DIR,IN.DIR,PS1),
dec=",",
sep=";",
header = TRUE,
check.names=FALSE)
ps1 <- ps1[order(ps1[paste(CLASS.NAME)]),]
ps2 <- read.table(file.path(W.DIR,OUT.DIR,PS2),
dec=",",
sep=";",
header = TRUE,
check.names=FALSE)
ps2 <- ps2[order(ps2[paste(CLASS.NAME)]),]
ps1
ps2
#-------------------------------------------------------------------------------
print("Plot NDVI profiles")
#-------------------------------------------------------------------------------
#split data set according to class
l1.class <- split(ps1,ps1[[paste(CLASS.NAME)]])
l2.class <- split(ps2,ps2[[paste(CLASS.NAME)]])
#plot
setwd(file.path(W.DIR,OUT.DIR))
pdf(paste(substr(PS1,1,nchar(PS1)-4),"__",substr(PS2,1,nchar(PS2)-4),".pdf",sep=""),
height=5,width=9)
for(c in 1:length(l1.class)){
#select parcel-specific row
ndvi1 <- l1.class[[c]]
ndvi2 <- l2.class[[c]]
#plot NDVI profiles when the number of NDVI values exeeds threshold TH
DOY <- data.frame(DOY=-25:410,NDVI=-1)
plot(DOY,
xaxt="n",
ylim=c(0,1),
xlim=c(0,380),
ylab=as.expression(bquote(italic(NDVI))),
xlab=as.expression(bquote(italic(DOY))),
cex.lab=1.4,
main=paste("CLASS-ID =",l1.class[[c]]$CLASS))
#axis
x1 <- seq(1,365,10)
x2 <- seq(1,360,2)
axis(1, at=x2, col.tick="grey", las=1,labels=FALSE,cex=1.2)
axis(1, at=x1, col.axis="black", las=1,cex=1.2)
#extract all columns containing LS in column name
ndvi1 <- ndvi1[grepl(paste(PS1PF,sep=""), names(ndvi1))]
ndvi2 <- ndvi2[grepl(paste(PS2PF,sep=""), names(ndvi2))]
#replace all values smaller 0 with 0
ndvi1[ndvi1<0] <- 0
ndvi2[ndvi2<0] <- 0
#interpolate NA values if they exist
if(sum(is.na(ndvi1[1,]))>0){
ndvi1[1,] <- na.spline(c(ndvi1))
}
if(sum(is.na(ndvi2[1,]))>0){
ndvi2[1,] <- na.spline(c(ndvi2))
}
#empty data frames for sample 1
v.doy <- data.frame(DOY=NULL)
v.ndvi <- data.frame(NDVI=NULL)
for(j in 1:length(ndvi1)){
points(noquote(substr(names(ndvi1[j]),3,6)),ndvi1[1,j])
v.doy <- rbind(v.doy,as.numeric(noquote(substr(names(ndvi1[j]),3,6))))
v.ndvi <- rbind(v.ndvi,ndvi1[1,j])
}
ks <- spline(v.doy[[1]],v.ndvi[[1]], method='n', n=length(ndvi1)*10)
lines(ks, col="red",lwd=1.5,lty=5)
#empty data frames for sample 2
v.doy <- data.frame(DOY=NULL)
v.ndvi <- data.frame(NDVI=NULL)
for(j in 1:length(ndvi2)){
points(noquote(substr(names(ndvi2[j]),3,6)),ndvi2[1,j])
v.doy <- rbind(v.doy,as.numeric(noquote(substr(names(ndvi2[j]),3,6))))
v.ndvi <- rbind(v.ndvi,ndvi2[1,j])
}
ks <- spline(v.doy[[1]],v.ndvi[[1]], method='n', n=length(ndvi2)*10)
lines(ks, col="blue",lwd=2,lty=5)
#lgend
legend("topleft",
legend = c(paste(PS1), paste(PS2)),
col = c("red","blue"),
lty = c(5,5),
lwd=c(2,2),
bty = "y",
pt.cex = 2,
cex = 1.2,
text.col = "black")
}
dev.off()
}
|
da659b32eabf5652fa061918a3516df4cb5a4cbd | 5b9b9913221bf41461673a88e1e499b5624094be | /tests/testthat/test-idproxy.R | fb126703ac04a8424520aa79a489e794adc65450 | [
"Apache-2.0"
] | permissive | patperry/r-frame | 12241f0e4845db6455b1c4d19f5ff695c8b33ad6 | 5c56110fe0bf74a0fbaeaaa74258fa338dca700f | refs/heads/master | 2020-03-06T18:27:47.334808 | 2018-04-12T17:07:09 | 2018-04-12T17:07:09 | 127,007,322 | 8 | 1 | null | null | null | null | UTF-8 | R | false | false | 739 | r | test-idproxy.R | context("idproxy")
test_that("atomic", {
expect_equal(idproxy(letters), letters)
expect_equal(idproxy(NULL), NULL)
expect_equal(idproxy(1:10), 1:10)
})
test_that("vector object", {
expect_equal(idproxy(as.hexmode(10:1)),
xtfrm(as.hexmode(10:1)))
})
test_that("invalid", {
expect_error(idproxy(sin),
'cannot compute idproxy for objects of class "function"')
})
test_that("matrix", {
x <- matrix(1:20, 4, 5)
y <- as.dataset.record(list(1:4, 5:8, 9:12, 13:16, 17:20))
expect_equal(idproxy(x), y)
expect_equal(idproxy.default(x), y)
})
test_that("array", {
x <- array(1, c(1, 1, 1))
expect_error(idproxy(x), "cannot compute idproxy for rank-3 objects")
})
|
8c6c38be9f6a25336dfcd2df43b7c5d7260be072 | 5823fb5f3267251e2df08dfa72845e0604fb5ccf | /Labs/Lab 1/Lab1_SVM12.R | a8cecea24ef961a50bfd478f698a732c870554c5 | [] | no_license | Dtrain27/DataAnalytics2021_Dominic_Schroeder | 02d008e83fbcc84427e7e15f61710de0d2a133fa | 5793ae8e8bd79e3c337312dd534da17e38663559 | refs/heads/main | 2023-04-04T20:55:46.603371 | 2021-04-29T12:36:36 | 2021-04-29T12:36:36 | 333,999,939 | 0 | 0 | null | 2021-04-29T12:36:37 | 2021-01-29T01:09:08 | R | UTF-8 | R | false | false | 451 | r | Lab1_SVM12.R | # load the kernlab package
library(kernlab)
data(spam)
attach(spam)
ytrain <- as.factor(spam$type)
xtrain <- as.matrix(spam[,-58])
# train the SVM
svp <- ksvm(xtrain,ytrain,type="C-svc",kernel='vanilladot',C=100,scaled=c())
# General summary
svp
# Attributes that you can access
attributes(svp)
# For example, the support vectors
alpha(svp)
alphaindex(svp)
b(svp)
# Use the built-in function to pretty-plot the classifier
plot(svp,data=xtrain)
|
b4298a7c59ef46637e75b86e7c7eeee2218a56aa | 61a88247e1261f03659be3ada7c70ec7944b3ae0 | /RegressionModels/RegressionModel.R | b9b9397b44ffebf799c448319ec7e5eb57e22348 | [] | no_license | niehusst/R-DataSci | 09e844de8d5290b3bf64de3c24666d69b8cc680b | 1bf14f9624ea7459037d6d61f3dc61198eeb7721 | refs/heads/master | 2020-03-30T06:54:19.596360 | 2018-11-26T01:34:57 | 2018-11-26T01:34:57 | 150,898,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,127 | r | RegressionModel.R | #Liam Niehus-Staab
# Regression Modelling
#11/14/18
library(dplyr)
library(leaps)
#data
MPG <- read.csv("~/Shared/F18MAT295/MPG.csv")
brain <- read.csv("~/Shared/F18MAT295/BrainBody.csv")
# For Monday, Nov 19: Hand in questions 1 - 6 from the lab. Submit graphs for question 6 only.
# For questions 1 - 5, do not submit graphs or code, just equations and explanations.
#make linear regression model to predict MPG from speed
lmtest <- lm(mpg~speed,data=MPG)
summary(lmtest)
# make residual graphs to check validity of model
par(mfrow=c(2,2))
plot(mpg~speed, data = MPG)
abline(lm(MPG$mpg~MPG$speed), col="blue")
plot(lmtest$residuals~MPG$speed)
abline(h = 0)
plot(mpg~displacement, data=MPG)
abline(lm(MPG$mpg~MPG$displacement))
plot(lmtest$residuals~MPG$displacement)
abline(h = 0)
# the residual plots show clear shapes (not clouds), indicating linear isn't good model for this data
# QQ plot can also be made to analyze the distribution to see if its normal
par(mfrow=c(1,1))
qqnorm(lmtest$residuals)
qqline(lmtest$residuals)
#plot shows that the distrubution is not normal, again indicating linear isnt a good model for the data
# #### Another simple linear regression model
# 1a) Create a regression model to predict mpg from displacement,
# mpg=a + b*displacement. What is the regression equation and $R^2$?
linMPG = lm(mpg ~ displacement, data = MPG)
summary(linMPG)
y = 7.622 + 4.034x
R^2 = .4055
# 1b) Look at residual vs. speed and residual vs. displacement plots.
# Describe any patterns in the residual plots.
plot(linMPG$residuals~MPG$speed)
plot(linMPG$residuals~MPG$displacement)
Speed has a clear quadratic shape to its residual/speed points, so speed is not a good indicator for the model
Displacement has no clear shape, meaning it could be a good predictor for the model.
# 1c) Describe any patterns in the residual normal probability plot.
qqnorm(linMPG$residuals)
qqline(linMPG$residuals)
the normal probability plot shows a fairly normal distribution of the data.
That means that it is not breaking assumptions to try to use this data as a
predictor in a model.
# #### Building models with more than one explanatory variable
# 2a) Conduct a regression analysis to predict mpg from speed and displacement,
# $mpg=a + b*speed + c*displacement$ What is the regression equation and $R^2$?
linDouble = lm(mpg ~ speed + displacement, data = MPG)
linDouble
summary(linDouble)
y = 11.69 - .044(speed) + 4.176(displacement)
R^2 = .5499
# 2b) Look at residual vs. speed and residual vs. displacement plots.
# Describe any patterns you see.
plot(linDouble$residuals ~ MPG$speed)
plot(linDouble$residuals ~ MPG$displacement)
The same patterns we saw before are still present;
Speed has a clear quadratic shape to its residual/speed points, so speed is not a good indicator for the model
Displacement has no clear shape, meaning it could be a good predictor for the model.
# 2c) What does the residual normal probability plot show?
qqnorm(linDouble$residuals)
qqline(linDouble$residuals)
# The data appears to be not quite normally distributed; the points form an 'S' shape,
# with the points coming off the normal line at the top and bottom of the plot.
# 2d) Is $mpg=a + b*speed + c*displacement$ a better model than
# $mpg = a + b*speed$? Use the residual plots and $R^2$ to explain why.
The new 2 variable model has a higher R^2 value, but that is to be expected as there are
more variables in the 2 variable model. Looking at the residulas though, we can see that
there isn't much difference between the models. Since the residuals are more important
for analyzing the validity of a model, I would say that the new 2 variable model isn't
really much better than the model that included just speed.
# 3) Without creating any new models or graphs, predict which of the following
# regression equations would create a better model for your data?
# Use the residual plots in question 1) to explain why. $mpg = a +b*speed^2+ c*displacement$
# or $mpg = a + b* LN(speed) + c*displacement$
Since the residuals vs. speed graph showed a negative quadratic shape, it might be smarter
to try using a model that squares the speed variable; $mpg = a +b*speed^2+ c*displacement$
would most likely be the best choice of action.
# 4) Create a model with four new variables: $speed^2$, LN(speed),
# $displacement^2$, and a speed*displacement. What is the regression equation
# and $R^2$ if all 6 terms are included in the model? View (but do not print)
# each of the residual plots. If a clear pattern exists in any of these plots,
# describe the pattern.
newVars = lm(mpg ~ speed + displacement + log(speed) + I(speed*displacement) + I(speed^2) + I(displacement^(2)), data = MPG)
summary(newVars)
#R^2 = .933
Where the speed variable was present, the same parabolic shape was visible. The transformations on
the data appeared to have little effect on the overall shape.
# 5) Give the regression equation and $R^2$ value of the best model using some
# combination of all 6 terms. Note that including all 6 variables will give the
# highest $R^2$ value. But some of the terms really have very little impact on
# $R^2$. The idea is to find the simplest model (fewest variables) without
# significantly reducing the $R^2$ value.
newerVars = lm(mpg ~ displacement + I(speed^2) + speed , data = MPG)
y = displacement + speed + speed^2
R^2 = .927
# 6a) Create a scatterplot to predict brain weight from body weight with a regression
# line, a plot of residuals versus the explanatory variable, a plot of residuals versus
# predicted values (i.e. `fitted.values`), and either a normal probability plot or a
# histogram of the residuals. Describe any patterns you see in the residual plots.
brainlm = lm(BrainWeightg ~ BodyWeightkg, data = brain)
plot(brainlm$residuals ~ brain$BodyWeightkg)
abline(h = 0)
plot(brainlm$residuals ~ brainlm$fitted.values)
abline(h = 0)
qqnorm(brainlm$residuals)
qqline(brainlm$residuals)
Its hard to say if there is a pattern or not in the residual plots because the outliers spread the fram so much. There
may be a slight logorithmic pattern to the data.
# 6b) Try various transformations of the explanatory and response variables to create a
# better linear regression model. Submit a fitted line plot and describe any patterns
# you see in the residual plots.
plot(BrainWeightg ~ BodyWeightkg + MaxLifeSpanyears, data = brain)
lmBrain = lm(BrainWeightg ~ BodyWeightkg + MaxLifeSpanyears, data = brain)
#residuals
plot(lmBrain$residuals~brain$BodyWeightkg)
abline(h = 0)
plot(lmBrain$residuals~brain$MaxLifeSpanyears)
abline(h = 0)
#normal plot
qqnorm(lmBrain$residuals)
qqline(lmBrain$residuals)
lmBrain
summary(lmBrain)
The normal plot shows one outlier, the rest of the points seem to closesly follow the normal distribution line.
The residual plots show some strong patterns. Residuals vs body weight shows that most of the points are perpendicular
to the abline, although they are closely packed together beacuse the outliers stretch the plot.
Residuals vs max life span shows a negative linear trend in the data points, crossing the abline.
|
284b7da33c8f94b0c272b01a2cdc71c5a8b14018 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/R.methodsS3/examples/throw.Rd.R | 16f6c469db6227149d681e2ba23942913963736c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 394 | r | throw.Rd.R | library(R.methodsS3)
### Name: throw
### Title: Throws an exception
### Aliases: throw.default throw
### Keywords: error
### ** Examples
rbern <- function(n=1, prob=1/2) {
if (prob < 0 || prob > 1)
throw("Argument 'prob' is out of range: ", prob)
rbinom(n=n, size=1, prob=prob)
}
rbern(10, 0.4)
# [1] 0 1 0 0 0 1 0 0 1 0
tryCatch({
rbern(10, 10*0.4)
}, error=function(ex) {})
|
c85de8180a7b89d0854e1d4a990f50e2bb28d8d5 | 6684882274e26c88e266a571ec84b785889cd2bc | /ebay_ipad_kaggle.R | cf2bbc58e0d2b14c3c666a2ee9b9ebb0d56a3853 | [] | no_license | akselix/ebay_kaggle | 6a598bef48641c2df85e40b25b0480d04f142891 | 4b5e72feb5904939c2d9096d36d2893e1c0d94fe | refs/heads/master | 2016-08-12T06:57:21.101164 | 2015-08-03T14:41:47 | 2015-08-03T14:41:47 | 51,300,156 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,114 | r | ebay_ipad_kaggle.R | # ebay_ipad_kaggle.R
# EdX - Analytics Edge - Unit 6
# 2015-07-28
# SETTINGS AND LIBRARIES ####
setwd('/Users/r2/MOOC/Analytics Edge - MITx/Kaggle')
library(dplyr)
library(caret)
library(randomForest)
library(tm)
# GET AND CLEAN DATA ####
# Load data
rawTrain <- tbl_df(read.csv('eBayiPadTrain.csv', stringsAsFactors = F))
rawTest <- tbl_df(read.csv('eBayiPadTest.csv', stringsAsFactors = F))
# Put description as last variable to make data easier to look at
train <- select(rawTrain, 2:11, 1)
test <- select(rawTest, 2:10, 1)
# Convert suitable variables to factors
train <- mutate(train,
sold = as.factor(train$sold),
biddable = as.factor(train$biddable),
condition = as.factor(train$condition),
cellular = as.factor(train$cellular),
carrier = as.factor(train$carrier),
color = as.factor(train$color),
storage = as.factor(train$storage),
productline = as.factor(train$productline)
)
test <- mutate(test,
biddable = as.factor(test$biddable),
condition = as.factor(test$condition),
cellular = as.factor(test$cellular),
carrier = as.factor(test$carrier),
color = as.factor(test$color),
storage = as.factor(test$storage),
productline = as.factor(test$productline)
)
# BUILD NEW FEATURES ####
# Add feature for how many characters listing's description
train <- mutate(train, nchar = nchar(train$description))
test <- mutate(test, nchar = nchar(test$description))
# Add logical feature if the listing has a description or not
train <- mutate(train, hasDescription = ifelse(train$nchar == 0, 0, 1))
test <- mutate(test, hasDescription = ifelse(test$nchar == 0, 0, 1))
# CORPUS FROM TEXTUAL DATA ####
# Create corpus by combining test and training sets
trainDescription <- as.matrix(train$description)
testDescription <- as.matrix(test$description)
corpus <- Corpus(VectorSource(c(trainDescription, testDescription)))
# Pre-process text
corpus <- tm_map(corpus, content_transformer(tolower)) ; corpus <- tm_map(corpus, PlainTextDocument)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeWords, stopwords('english'))
corpus <- tm_map(corpus, stemDocument)
# Build a document term matrix with 0.99 sparsity
dtm <- DocumentTermMatrix(corpus)
sparseDtm <- removeSparseTerms(dtm, 0.99)
words <- as.matrix(sparseDtm)
rownames(words) <- NULL
words <- as.data.frame(words)
colnames(words) <- make.names(colnames(words))
# Divide bag to train and test set
trainWords <- head(words, nrow(train))
testWords <- tail(words, nrow(test))
# CHOOSE VARIABLES TO USE IN THE MODEL ####
trainMod <- select(train, -description)
trainMod <- cbind(trainMod, trainWords)
testMod <- select(test, -description)
testMod <- cbind(testMod, testWords)
# BUILD MODELS ####
trainMod <- as.data.frame(trainMod)
testMod <- as.data.frame(testMod)
# General linear model
glmMod <- glm(sold ~ ., data = trainMod, family = binomial)
glmPredictTrain <- predict(glmMod, newdata = trainMod, type = 'response')
table(train$sold, glmPredict > 0.5)
glmStep <- step(glmMod)
glmPredictStepTrain <- predict(glmStep, newdata = trainMod, type = 'response')
table(train$sold, glmPredictStepTrain > 0.5)
# Random forest model
rfMod <- train(sold ~ ., data = trainMod, method = 'rf')
rfPredictTrain <- predict(rfMod, newdata = trainMod)
table(train$sold, rfPredict)
# Final predictions on test set
glmPredict <- predict(glmMod, newdata = testMod, type = 'response')
glmPredictStep <- predict(glmStep, newdata = testMod, type = 'response')
rfPredict <- predict(rfMod, newdata = testMod, type = 'prob')
# CREATE SUBMISSION FILE
glmSubmission = data.frame(UniqueID = test$UniqueID, Probability1 = glmPredictStep)
write.csv(glmSubmission, "GLMSubmissionDescriptionLog.csv", row.names=FALSE)
rfSubmission = data.frame(UniqueID = test$UniqueID, Probability1 = rfPredict)
write.csv(rfSubmission, "SubmissionDescriptionLog.csv", row.names=FALSE)
|
f6935155ff5d1eae9999a6ceaa9fbae67f5abed2 | 4279b1b5ee6da23549b9495d872dd733227c9680 | /tests/testthat/test_rank_variants.R | a4adc258e6822e1d75faf024244e967fdf1911d1 | [] | no_license | knausb/vcfR | c5fe7efe8da89f2b140abff0427175d54f3a07a0 | a160996e35437a657774e2769d43ea2ee960d24c | refs/heads/master | 2023-02-22T23:59:07.163875 | 2023-02-10T12:12:29 | 2023-02-10T12:12:29 | 13,932,575 | 229 | 68 | null | 2022-10-18T07:50:52 | 2013-10-28T17:09:04 | R | UTF-8 | R | false | false | 1,066 | r | test_rank_variants.R | #
# rank_variants tests.
# detach(package:vcfR, unload=T)
library(vcfR)
#library(testthat)
context("rank_variants functions")
#data(vcfR_example)
##### ##### ##### ##### #####
test_that("rank.variants.chromR works",{
data(vcfR_test)
chrom <- create.chromR(name="Supercontig", vcf=vcfR_test, verbose=FALSE)
chrom <- proc.chromR(chrom, verbose = FALSE)
set.seed(9)
scores <- runif(n=nrow(vcfR_test))
chrom <- rank.variants.chromR(chrom, scores)
expect_equal( length(chrom@var.info$rank), nrow(vcfR_test) )
})
test_that("rank.variants.chromR throws error when nvars != nscores",{
data(vcfR_test)
chrom <- create.chromR(name="Supercontig", vcf=vcfR_test, verbose=FALSE)
chrom <- proc.chromR(chrom, verbose = FALSE)
set.seed(9)
scores <- runif(n=nrow(vcfR_test) + 1)
msg <- "The number of variants and scores do not match."
# msg <- paste(msg, " nrow(x@vcf): ", nrow(chrom@vcf), sep = "")
# msg <- paste(msg, ", length(scores): ", length(scores), sep = "")
expect_error(chrom <- rank.variants.chromR(chrom, scores), msg )
})
|
258b8c9d75c2ba79ab7eaee9821cd93eb69303e1 | 11d39ea7a03373eaa370c2b6c749ce3f8ef8fe89 | /ui.r | 48eb13dd0b29e0b0818bf3eb62597a8874a82a3a | [] | no_license | ikuznet1/data-analytics-website | 534c0e17ad7a49419238f5af5209161b48f3616b | 34577c7cfdd8e822881b8f6f772aa0f23a5f5446 | refs/heads/master | 2021-01-10T09:44:02.697302 | 2016-01-04T19:20:17 | 2016-01-04T19:20:17 | 45,401,740 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,549 | r | ui.r | # Define UI for application
shinyUI(navbarPage("vx:vector explorer", id = "tabs",
tabPanel("Data", value = "D",
sidebarPanel(
fileInput('data', 'Choose CSV File', accept=c('text/csv', 'text/comma-separated-values,text/plain','.csv')),
tags$hr(),
checkboxInput('header', 'Header', TRUE),
radioButtons('sep', 'Separator',
c(Comma=',',
Semicolon=';',
Tab='\t'),
','),
radioButtons('quote', 'Quote',
c(None='',
'Double Quote'='"',
'Single Quote'="'"),
'"'),
tags$hr(),
selectInput(inputId = "colormap",
label = "Select Color Scheme",
list("Blue" = "Blues",
"Blue-Purple" = "BuPu",
"Blue-Green" = "BuGn",
"Green-Blue" = "GnBu",
"Green" = "Greens",
"Grey" = "Greys",
"Orange" = "Oranges",
"Orange-Red" = "OrRd",
"Purple-Blue" = "PuBu",
"Purple-Blue-Green" = "PuBuGn",
"Purple-Red" = "PuRd",
"Purple" = "Purples",
"Red-Purple" = "RdPu",
"Red" = "Reds",
"Yellow-Green" = "YlGn",
"Yellow-Green-Blue" = "YlGnBu",
"Yellow-Orange-Brown" = "YlOrBr",
"Yellow-Orange-Red" = "YlOrRd"))
),
mainPanel(
dataTableOutput(outputId="table")
)
),
tabPanel("Data Heatmap", value = "HM",
sidebarPanel(
selectInput(inputId = "heatmap_type",
label = "Select",
list("Raw Data" = "raw_heatmap",
"Z-scores" = "zscores_heatmap",
"Quantiles" = "quantiles_heatmap",
"Ranks" = "rank_heatmap")),
sliderInput(inputId = "num_bin_data_heatmap", label = "Number of Color Bins", min=2, max=16, value=4, step = 1)
),
mainPanel(
plotOutput("data_heatmap", width = "100%",height = "1800px")
)
),
tabPanel("Marginal Distributions", value = "MD",
# Sidebar with a slider input for number of observations
sidebarPanel(
uiOutput("marginal_column"),
selectInput(inputId = "show_type",
label = "Select",
list("Histogram" = "hist",
"Kernel Density" = "kd",
"Combined" = "comb"))
),
# Show a plot of the generated distribution
mainPanel(
#includeHTML("graph.js")
#reactiveBar(outputId = "perfbarplot")
plotOutput("MarginalPlot")
)
),
tabPanel("Outlier Analysis", value = "OA",
sidebarPanel(
sliderInput(inputId = "pval", label = "Rejection P-Value", min=0, max=10, value=5, step = 1),
dataTableOutput(outputId="outlier_info")
),
mainPanel(
plotOutput("Outliers")
)
),
tabPanel("Correlation Analysis", value = "CA",
sidebarPanel(
checkboxInput('rmout_corr', 'Remove Outliers', TRUE),
selectInput(inputId = "corr_type",
label = "Select Scaling",
list("Raw Data" = "raw_corr",
"Z-scores" = "zscores_corr",
"Quantiles" = "quantiles_corr",
"Ranks" = "rank_corr")),
selectInput(inputId = "correlation_dropdown",
label = "Select Metric",
list("Pearson's Correlation" = "p_corr",
"Distance Metric" = "dist_met"))
),
mainPanel(
#includeHTML("graph.js")
plotOutput("Corr", width = "150%",height = "1200px")
)
),
tabPanel("Mean Vector", value = "MV",
sidebarPanel(
checkboxInput('rmout_mean', 'Remove Outliers', TRUE),
selectInput(inputId = "mean_type",
label = "Select Type of Plot",
list("Scatter", "Scatter with error bars", "Box Plot")
),
selectInput(inputId = "mean_pp_type",
label = "Select",
list("Raw Data" = "raw_mean",
"R-scores" = "rscores_mean"))
),
mainPanel(
plotOutput("Mean_o", height = "800px", dblclick = "plot1_dblclick",
brush = brushOpts(
id = "plot1_brush",
resetOnNew = TRUE))
)
),
tabPanel("Clustering", value = "C",
fluidPage(
plotOutput("Clust"),
fluidRow(
column(4,
wellPanel(
sliderInput(inputId = "num_clust", label = "Number of Clusters", min=1, max=20, value=3, step = 1),
checkboxInput('rmout', 'Remove Outliers', TRUE),
selectInput(inputId = "embed_type",
label = "Select Dimensionality Reduction Technique",
list("PCA","t-SNE")
),
selectInput(inputId = "clust_pp_type",
label = "Select",
list("Raw Data" = "raw_pp",
"Z-scores" = "zscores_pp",
"Quantiles" = "quantiles_pp",
"Ranks" = "rank_pp"))
)
),
column(8,
plotOutput("Scree")
)
)
)
# sidebarPanel(
# plotOutput("Scree")
# ),
# mainPanel(
# sliderInput(inputId = "num_clust", label = "Number of Clusters", min=1, max=20, value=3, step = 1),
# plotOutput("Clust")
# )
)
))
|
15a04280a9236e9eacf6a923c7691e359750f59b | 922d6910dbc05bf2cd573f0a798e7d81d772b68b | /01-LogicaDifusa.R | d1f466d6eeedda59f8173f6fbeaa7c5fc7de6de5 | [
"MIT"
] | permissive | sudo-ninguem/R-LogicaDefusa | 18fc825ef9fb5b40af99e8fd24c7fb7e194fbddc | 00dd1783eb0e51e8d01b5bbb11b64d7617f8dbe3 | refs/heads/main | 2022-12-17T12:01:06.525383 | 2020-09-19T14:52:34 | 2020-09-19T14:52:34 | 296,890,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,930 | r | 01-LogicaDifusa.R | ############################### LOGICA DIFUSA #############################################
## Utilizando a lógica difusa vamos aperfeiçoar um sistema especialista para diagnostico de asma
install.packages("sets", dependencies = T )
## O pacote para logica difusa em R é o (sets) além disso vamos baixar as dependencias do pacote
library(sets)
## Para fins didaticos e testar a lógica difusa vamos usar um caso de diagnostico de asma
sets_options("universe", seq(1, 100, 1))
## neste comando estamos criando um (universo) de possíbilidades, póderiamos criar diversos universos
## mas estamos criando 1 universo que vai até 100 e vai de 1 em 1 (1,100,1)
variaveis <- set(
Frequencia = fuzzy_partition(varnames = c( MenDSemanas = 30, MaiDSemanas = 60, Diario = 70, Continuo=90), radius=20, FUN = fuzzy_cone),
SABA = fuzzy_partition(varnames = c(MenDSemanas= 20, MaiDSemanas = 30, Diario = 70, DuasxDia=90), sd = 10),
DebitoExp = fuzzy_partition(varnames = c(CinqOiten = 20, TrintTCinqCin = 30, MaisTrintT = 70), sd = 10),
Classificacao = fuzzy_partition(varnames = c(Moderada = 20, AgudaGrave=40 , RiscoVida = 60), sd=10)
)
## Aqui nos estamos criando nossas váriaveis para o caso ficticio de diagnostico de asma sendo que estes dados também são ficticios
## O importante não é saber o que cada cado significa, mas basicamente são:
## Frequencia = Quantidade de vezes que uma pessoa da crise de asma na semana
## SABA = Quantidade de vezes que a pessoa usa a bombinha (O remédio chama SABA) por semana
## DebitoExp = Quantidade de falta de oxigêngio que a pessoa tem na semana
## Classificação = A classificação da asma da pessoa com base nas váriaveis anteriores
## Perceba que estamos criando as váriaveis na função (set) e com o método (fuzzy_partition)
regras <-
set(
fuzzy_rule( Frequencia %is% MenDSemanas && SABA %is% MenDSemanas && DebitoExp %is% CinqOiten, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MenDSemanas && SABA %is% MenDSemanas && DebitoExp %is% TrintTCinqCin, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MenDSemanas && SABA %is% MenDSemanas && DebitoExp %is% MaisTrintT, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MenDSemanas && SABA %is% MaiDSemanas && DebitoExp %is% MaisTrintT, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MaiDSemanas && SABA %is% MenDSemanas && DebitoExp %is% CinqOiten, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MaiDSemanas && SABA %is% MenDSemanas && DebitoExp %is% MaisTrintT, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MaiDSemanas && SABA %is% MaiDSemanas && DebitoExp %is% CinqOiten, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% MaiDSemanas && SABA %is% MaiDSemanas && DebitoExp %is% TrintTCinqCin, Classificacao %is% Moderada ),
fuzzy_rule( Frequencia %is% Diario && SABA %is% Diario && DebitoExp %is% TrintTCinqCin, Classificacao %is% AgudaGrave ),
fuzzy_rule( Frequencia %is% Diario && SABA %is% Diario && DebitoExp %is% CinqOiten, Classificacao %is% AgudaGrave ),
fuzzy_rule( Frequencia %is% Diario && SABA %is% DuasxDia && DebitoExp %is% TrintTCinqCin, Classificacao %is% AgudaGrave ),
fuzzy_rule( Frequencia %is% Diario && SABA %is% DuasxDia && DebitoExp %is% MaisTrintT, Classificacao %is% AgudaGrave ),
fuzzy_rule( Frequencia %is% Continuo && SABA %is% Diario && DebitoExp %is% TrintTCinqCin, Classificacao %is% RiscoVida ),
fuzzy_rule( Frequencia %is% Continuo && SABA %is% Diario && DebitoExp %is% MaisTrintT, Classificacao %is% RiscoVida ),
fuzzy_rule( Frequencia %is% Continuo && SABA %is% DuasxDia && DebitoExp %is% TrintTCinqCin, Classificacao %is% RiscoVida ),
fuzzy_rule( Frequencia %is% Continuo && SABA %is% DuasxDia && DebitoExp %is% MaisTrintT, Classificacao %is% RiscoVida )
)
## Aqui estamos estabelecendo as regras para dizer quando uma asma é grave, media moderada etc.
## Aqui também estamos usando dados ficticios e eles não são importantes em si, é só para completar o exemplo
## Mas perceba que essas regras também são criadas usando a função (set) é o método (fuzzy_rule)
## Perceba que ao criarmos váriaveis usamos o método (fuzzy_partition) e ao criar as regras usámos o método (fuse_rule)
sistema <-fuzzy_system(variaveis, regras)## Aqui estamos criando nosso sistema passando as váriaveis e as regras
sistema ## Com o nosso sistema criado podemos ver ele agora e ele vai trazer nada mais nada menos que as variaveis e regras
plot(sistema) ## Podemos gerar um grafico com o nosso sistema para ficar mais visível
inferencia <- fuzzy_inference(sistema, list(Frequencia = 80, SABA = 70, DebitoExp = 80))
## Uma vez criado o nosso (sistema) podemos fazer inferencias com base em (casos) imagine então
## Um paciente que apresentou estes dados (frequencia 80, saba 70, debitoExp = 80)
## Sendo assim criamos uma váriavel e usando o método (fuzzy_inference) passamos como parametro
## O nosso sistema e em forma de lista o (caso) a ser analisado
inferencia ## Assim podemos ver a inferencia do caso específico
plot(inferencia) ## Bem como gerar um grafico dele
gset_defuzzify(inferencia, "centroid") ## Após tudo feito podemos gerar também um valor central
## Para isso usamos o método (gset_defuzzify) passando como parametro a nossa inferencia e "centroid"
## Para obtermos um valor central o método mais utilizado é o "centroid"
plot(sistema$variables$Classificacao)
lines(inferencia, col= "blue", lwd=4)
## Por fim estamos gerando um grafico com o nosso sistema variaveis e classificação tudo junto
## E passando a nossa inferencia como uma linha da cor azul e grossura 4 sobre o grafico
## Sendo assim obteremos, no nosso (sistema), a visualização da situação da nossa (inferencia)
sets_options("universe", NULL) ## Por fim estamos limpando nosso universo |
b9ac8ae8528631a7b386013d818d98aa27c20cab | 92346d2a138b36073bf83230f20b9f0a0f6ce9b7 | /weekend project.R | ddf5461ecf9cfbc3cdf7e518d2107cb9de15b853 | [] | no_license | ShaimaM/Weekend-Project- | 4d439faa0d6e128cf430f58c53a3432ccd61f79b | f52589b28095dacb2c2ed00563bbcfd50c50dd5b | refs/heads/main | 2023-02-06T17:41:41.575144 | 2020-12-14T02:34:06 | 2020-12-14T02:34:06 | 321,211,544 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,629 | r | weekend project.R | # Cost for adults and children
ticket_cost <- 60
ticket_cost_child <- 30
# List 5 of your favorite movies
movies <- c('Toy Story', 'Another Round', 'captain phillips',
'once upon a time','The Assistant')
#n_movies <- 5
# How many screens does the theater have? (assume 1 per movie)
screens <- 5
# How many seats does each theater hold
seats <- 100
week_days <- rep(0, 7) # Store totals for each day
# iterate through the week
for (i in 1:length(week_days)) {
# Keep track of total revenue for the day
Total_revenue <- 0
# iterate through the amount of screens on a particular day
for (j in 1:length(screens)) {
# Calculate how many adults and children are watching the movie
visitors_adults <- sample(seats, 1)
visitors_children <- sample((seats - visitors_adults), 1)
# Calculate the revenue for adults and children
Total_revenue_adult = visitors_adults * ticket_cost
Total_revenue_children = visitors_children * ticket_cost_child
# Calculate revenue, and add to running total for the day
Total_revenue = Total_revenue_adult + Total_revenue_children + Total_revenue
print(Total_revenue)
}
week_days[i] <- Total_revenue
}
print (week_days)
highest_revenue_day = max(which.max(week_days))
cat("Revenue was the highest in the day:", highest_revenue_day)
week_day = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
barplot(week_days, names.arg = week_day, col = "lavender", border = "maroon",
xlab = "Days", ylab = "Total Revenue", main = "The Week Revenue")
order(week_day, decreasing=TRUE) |
813cf5c83e93465fb869219172f2b0ee4bdaab12 | 0b5e64377bee24d92033157f5d091913e9e6a9bb | /R/Reversible.PRNG.CryptoHash.R | ad492c07b09be2dce3937a8ce79784434ea1de76 | [] | no_license | nishanthu/Reversible.PRNG | 01dc0d41cf0db9f22317f515c4d02bdd816cf189 | 160a9e2a0b074e0bd65a853cb504c7bb0e945441 | refs/heads/master | 2020-12-25T07:17:36.760234 | 2016-07-09T05:54:20 | 2016-07-09T05:54:20 | 62,934,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 825 | r | Reversible.PRNG.CryptoHash.R |
runif.hash.next <- function(n) {
if(!exists('.rprng.hash.state')) set.seed.hash()
res = numeric(n)
for(i in seq_len(n)) {
.rprng.hash.state <<- .rprng.hash.state + 1
res[i] = (strtoi(paste0(tail(unlist(strsplit(digest(.rprng.hash.state, "xxhash32"),'')),.num.digits), collapse = ''), 16L) / .max.num)
}
res
}
runif.hash.prev <- function() {
if(!exists('.rprng.hash.state')) set.seed.hash()
res = numeric(n)
for(i in seq_len(n)) {
.rprng.hash.state <<- .rprng.hash.state - 1
res[i] = (strtoi(paste0(tail(unlist(strsplit(digest(.rprng.hash.state, "xxhash32"),'')),.num.digits), collapse = ''), 16L) / .max.num)
}
res
}
set.seed.hash <- function(seed=0) {
.rprng.hash.state <<- seed
.num.digits <<- 5
.max.num <<- strtoi(paste0(c("0x",rep("f",.num.digits)), collapse = ''))
}
|
7e41905576c17ce50707707f87e181229b8d299f | 9d3e3c3950c4101bc863a90e69606d7c7d03a4e9 | /chilling/07_fill_in_paper_details_Springer_2_Attempt200_ObsHist/ugly_lines/01_median_plot.R | 637edb762ee6c50f42dab08e31429c19f85271ba | [
"MIT"
] | permissive | HNoorazar/Ag | ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee | 24fea71e9740de7eb01782fa102ad79491257b58 | refs/heads/main | 2023-09-03T18:14:12.241300 | 2023-08-23T00:03:40 | 2023-08-23T00:03:40 | 146,382,473 | 3 | 6 | null | 2019-09-23T16:45:37 | 2018-08-28T02:44:37 | R | UTF-8 | R | false | false | 5,664 | r | 01_median_plot.R | #
# This is a copy of the same file from /Users/hn/Documents/00_GitHub/Ag/chilling/06_fill_in_paper_details
# which includes more scripts!
#
rm(list=ls())
library(data.table)
library(dplyr)
library(ggpubr)
library(ggplot2)
###########################################
post_fix <- "/0_replaced_with_367/"
data_dir <- "/Users/hn/Documents/01_research_data/Ag_Papers_data/Chill_Paper/tables/table_for_ugly_lines_plots/"
data_dir <- paste0(data_dir, post_fix)
param_dir <- "/Users/hn/Documents/00_GitHub/Ag/chilling/"
###########################################
data <- data.table(read.csv(file=paste0(data_dir, "medians.csv"), header=TRUE, as.is=TRUE))
DoY_map <- read.csv(paste0(param_dir, "chill_DoY_map.csv"), as.is=TRUE)
###########################################
#
# clean data
#
setnames(data, old=c("thresh_20_med", "thresh_25_med",
"thresh_30_med", "thresh_35_med",
"thresh_40_med", "thresh_45_med",
"thresh_50_med", "thresh_55_med",
"thresh_60_med", "thresh_65_med",
"thresh_70_med", "thresh_75_med"),
new=c("20", "25", "30", "35", "40",
"45", "50", "55", "60", "65", "70", "75"))
data <- data %>% filter(time_period != "2006-2025") %>% data.table()
data$time_period[data$time_period == "1979-2015"] <- "Historical"
time_periods = c("Historical", "2026-2050", "2051-2075", "2076-2099")
data$time_period <- factor(data$time_period, levels = time_periods, order=TRUE)
data_melt = melt(data, id=c("city", "emission", "time_period"))
# Convert the column variable to integers
data_melt[,] <- lapply(data_melt, factor)
data_melt[,] <- lapply(data_melt, function(x) type.convert(as.character(x), as.is = TRUE))
time_periods = c("Historical", "2026-2050", "2051-2075", "2076-2099")
data_melt$time_period <- factor(data_melt$time_period, levels = time_periods, order=TRUE)
ict <- c("Omak", "Yakima", "Walla Walla", "Eugene")
data_melt <- data_melt %>%
filter(city %in% ict) %>%
data.table()
data_melt$city <- factor(data_melt$city, levels = ict, order=TRUE)
tickSize = 16
axlabelSize = 18
the_thm <- theme(plot.margin = unit(c(t=.2, r=.2, b=.2, l=0.2), "cm"),
panel.border = element_rect(fill=NA, size=.3),
panel.grid.major = element_line(size = 0.05),
panel.grid.minor = element_blank(),
panel.spacing = unit(.25, "cm"),
legend.position = "bottom",
legend.key.size = unit(1.5, "line"),
legend.spacing.x = unit(.05, 'cm'),
panel.spacing.y = unit(.5, 'cm'),
legend.text = element_text(size=axlabelSize),
legend.margin = margin(t=0, r=0, b=0, l=0, unit = 'cm'),
legend.title = element_blank(),
plot.title = element_text(size=axlabelSize, face = "bold"),
plot.subtitle = element_text(face = "bold"),
strip.text.x = element_text(size=axlabelSize, face="bold"),
strip.text.y = element_text(size=axlabelSize, face="bold"),
axis.ticks = element_line(size=.1, color="black"),
axis.title.x = element_text(size = axlabelSize, face="bold", margin = margin(t=10, r=0, b=0, l=0)),
axis.title.y = element_text(size = axlabelSize, face="bold", margin = margin(t=0, r=10, b=0, l=0)),
axis.text.x = element_text(size = tickSize, face="plain", color="black", angle=90, hjust = 1),
axis.text.y = element_text(size = tickSize, face="plain", color="black")
)
color_ord <- c("black", "dodgerblue", "olivedrab4", "tomato1")
color_ord <- c("grey47" , "dodgerblue", "olivedrab4", "red") #
plot_path <- "/Users/hn/Documents/00_GitHub/ag_papers/chill_paper/02_Springer_2/figure_200/"
if (dir.exists(plot_path) == F) {
dir.create(path = plot_path, recursive = T)
}
x_start = 30
qual = 400
data_melt$emission <- factor(data_melt$emission,
levels=c("RCP 8.5", "RCP 4.5"),
order=TRUE)
plot = ggplot(data_melt, aes(y=variable, x=value), fill=factor(time_period)) +
geom_path(aes(colour = factor(time_period))) +
facet_grid(~ emission ~ city, scales = "free") +
labs(y = "accumulated chill portions", x = "day of year", fill = "Climate Group") +
scale_color_manual(labels = time_periods, values = color_ord) +
scale_x_continuous(breaks = DoY_map$day_count_since_sept, labels= DoY_map$letter_day) +
scale_y_continuous(limits = c(20, 75), breaks = seq(20, 80, by = 10)) +
the_thm +
coord_cartesian(xlim = c(min(data_melt$value), max(data_melt$value)))
plot
plot = ggplot(data_melt, aes(x=variable, y=value), fill=factor(time_period)) +
geom_path(aes(colour = factor(time_period))) +
facet_grid(~ emission ~ city, scales = "free") +
labs(y = "accumulated chill portions", x = "day of year", fill = "Climate Group") +
scale_color_manual(labels = time_periods, values = color_ord) +
scale_y_continuous(breaks = DoY_map$day_count_since_sept, labels= DoY_map$letter_day) +
scale_x_continuous(limits = c(x_start, 75), breaks = seq(x_start, 80, by = 10)) +
the_thm +
coord_cartesian(ylim = c(min(data_melt$value), max(data_melt$value)))
plot
qual = 400
output_name = paste0("median_DoY_thresh_start_", x_start, ".pdf")
ggsave(filename=output_name, plot=plot, device="pdf",
path=plot_path,
width=12, height=7, unit="in", dpi=qual)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.