blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e80df917a09fa4cbbe7d269cab8a30a7615bd6e1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/BatchJobs/tests/testthat/test_loadResults.R
|
d83a189aee1753218c7d4832390694da7521a6e2
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
test_loadResults.R
|
context("loadResults")
test_that("loadResults", {
reg = makeTestRegistry()
ids = 1:2
batchMap(reg, identity, ids)
submitJobs(reg)
waitForJobs(reg)
ys1 = 1:2
ys2 = loadResults(reg, simplify=TRUE, use.names="none")
expect_equal(ys1, ys2)
ys1 = as.list(ys1)
ys2 = loadResults(reg, simplify=FALSE, use.names="none")
expect_equal(ys1, ys2)
names(ys1) = ids
ys2 = loadResults(reg, simplify=FALSE, use.names="ids")
expect_equal(ys1, ys2)
ys1 = unlist(ys1)
ys2 = loadResults(reg, simplify=TRUE, use.names="ids")
expect_equal(ys1, ys2)
ys2 = loadResults(reg, 2)
expect_equal(list("2"=2), ys2)
nl = list()
names(nl) = character(0L)
expect_equal(loadResults(reg, ids=integer(0), simplify=TRUE, use.names="ids"), nl)
expect_equal(loadResults(reg, ids=integer(0), simplify=FALSE, use.names="ids"), nl)
expect_equal(loadResults(reg, ids=integer(0), simplify=TRUE, use.names="none"), list())
expect_equal(loadResults(reg, ids=integer(0), simplify=FALSE, use.names="none"), list())
# test names of loadResults
reg = makeTestRegistry()
batchMap(reg, identity, letters, use.names=TRUE)
submitJobs(reg)
waitForJobs(reg)
expect_equal(names(loadResults(reg, use.names = "ids")), as.character(1:26))
expect_equal(names(loadResults(reg, use.names = "names")), letters)
})
|
39f057a626d953dfc9008db7e03461cc9561f2cd
|
ab9676fc34ff7ddd49f4cc59ee4d04f79dc9bc77
|
/pdsi/data/raster/script_pdsi.R
|
a8689c3d56ab1bce7fbe0bdb298e52188fcc803b
|
[] |
no_license
|
mossypanga/Drought_CA
|
8d8aad89f70bdc00dabd9b166a1427de0223041a
|
e44b340b8f935ddcc4f595f6328d83f89b89ef4f
|
refs/heads/master
| 2021-05-28T21:29:56.057731
| 2015-09-07T11:42:35
| 2015-09-07T11:42:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,433
|
r
|
script_pdsi.R
|
# Load packages
library(RCurl)
library(raster)
library(rgdal)
library(maptools)
library(rasterVis)
# Set the directory
di <- '/Users/ajpeluLap/myrepos/pdsiSpatial/'
setwd(di)
#### run only a time!!!!! ########
## Raster data about scPDSI
## http://www.wrcc.dri.edu/wwdt/archive.php?folder=scpdsi
## Loop to download the raster data
#
# for (y in 1896:2014) {
# # Get the url
# url.aux <- paste('http://www.wrcc.dri.edu/monitor/WWDT/data/PRISM/scpdsi/scpdsi_',y,'_', sep='')
# for (m in 1:12){
# url <- paste(url.aux,m,'_PRISM.nc', sep='')
# filenamedest <- strsplit(url, split='http://www.wrcc.dri.edu/monitor/WWDT/data/PRISM/scpdsi/')[[1]][2]
# download.file(url, filenamedest)
# }}
#
#
# List the files within the directory
setwd(paste(di,'/test',sep=''))
pdsi<- list.files()
# Create a raster from first month of pdsi and apply a mask of california
r <- raster(pdsi[1])
mapas<- mask(r, ca)
# loop to create a stak of raster (with mask of california) from all the serie
for (a in pdsi[-1]){
r <- raster(a)
mapa<- mask(r, ca)
mapas<- stack(mapas, mapa)
}
writeRaster(mapas, file="mapas.nc")
cloudTheme <- rasterTheme(region=brewer.pal(n=12, 'red','Blues'))
tmp <- tempdir()
trellis.device(png, file=paste0(di, '/png/Rplot%02d.png'),
res=300, width=1500, height=1500)
levelplot(mapas, layout=c(1, 1), par.settings=cloudTheme)
# +layer(sp.lines(boundaries, lwd=0.6))
dev.off()
# set parameters
boundaries <- ca
mk <- mask(r, ca)
levelplot(mk)
+ layer(sp.lines())
alt.USA <- getData('alt', country='USA', path='./shapefile')
alt.USA <- alt.USA[[1]]
slope.USA <- terrain(alt.USA, opt='slope')
aspect.USA <- terrain(alt.USA, opt='aspect')
hill.USA <- hillShade(slope.USA, aspect.USA, angle=45, direction=315)
levelplot(r)
hsTheme <- modifyList(GrTheme(), list(regions=list(alpha=0.5)))
levelplot(mk, panel=panel.levelplot.raster,
margin=TRUE, colorkey=FALSE) + layer(sp.lines(boundaries, lwd=0.5))
projLL <- CRS('+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0')
cftLL <- projectExtent(mk, projLL)
ext <- c(-125,30,-112,45)
boundaries <- map('worldHires',
xlim=ext[c(1,3)], ylim=ext[c(2,4)],
plot=FALSE)
boundaries <- map2SpatialLines(boundaries, proj4string=projLL)
boundaries <- spTransform(boundaries, CRS(projLCC2d))
#################
## set projection
projLCC2d <- "+proj=longlat +lon_0=-125 +lat_0=30 +lat_1=45 +datum=WGS84 +ellps=WGS84"
projection(mk) <- projLCC2d
ext <- c(-125,30,-112,45)
boundaries <- map('worldHires',
xlim=cftExt[c(1,3)], ylim=cftExt[c(2,4)],
plot=FALSE)
boundaries <- map2SpatialLines(boundaries, proj4string=projLL)
boundaries <- spTransform(boundaries, CRS(projLCC2d))
##################################################################
## Animation
##################################################################
##################################################################
## Data
##################################################################
cft <- brick('data/cft_20130417_0000.nc')
## use memory instead of file
cft[] <- getValues(cft)
## set projection
projLCC2d <- "+proj=lcc +lon_0=-14.1 +lat_0=34.823 +lat_1=43 +lat_2=43 +x_0=536402.3 +y_0=-18558.61 +units=km +ellps=WGS84"
projection(cft) <- projLCC2d
#set time index
timeIndex <- seq(as.POSIXct('2013-04-17 01:00:00', tz='UTC'), length=96, by='hour')
cft <- setZ(cft, timeIndex)
names(cft) <- format(timeIndex, 'D%d_H%H')
##################################################################
## Spatial context: administrative boundaries
##################################################################
library(maptools)
library(rgdal)
library(maps)
library(mapdata)
levelplot(mk, xlim=c(-125,-112), ylim=c(30,45))
ext <- c(-125,30,-112,45)
### ESTO FUNCIONA ####
# set my projection
mip <- projection('m0.3')
projection(mk) <- mk
levelplot(mk, xlim=c(-125,-112), ylim=c(30,45))
projLL <- CRS('+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0')
cftLL <- projectExtent(cft, projLL)
cftExt <- as.vector(bbox(cftLL))
boundaries <- map('worldHires',
xlim=cftExt[c(1,3)], ylim=cftExt[c(2,4)],
plot=FALSE)
boundaries <- map2SpatialLines(boundaries, proj4string=projLL)
boundaries <- spTransform(boundaries, CRS(projLCC2d))
##################################################################
## Producing frames and movie
##################################################################
cloudTheme <- rasterTheme(region=brewer.pal(n=9, 'Blues'))
tmp <- tempdir()
trellis.device(png, file=paste0(tmp, '/Rplot%02d.png'),
res=300, width=1500, height=1500)
levelplot(mk, par.settings=cloudTheme,
## xlim and ylim to display a smaller region
levelplot(mk, xlim=c(179000, 181000), ylim=c(329500, 334000))
layer(sp.lines(boundaries, lwd=0.6))
dev.off()
old <- setwd(tmp)
## Create a movie with ffmpeg using 6 frames per second a bitrate of 300kbs
movieCMD <- 'ffmpeg -r 6 -b 300k -i Rplot%02d.png output.mp4'
system(movieCMD)
file.remove(dir(pattern='Rplot'))
file.copy('output.mp4', paste0(old, 'cft.mp4'), overwrite=TRUE)
setwd(old)
##################################################################
## Static image
##################################################################
pdf(file="cft.pdf")
levelplot(r, layers=25:48, layout=c(6, 4),
par.settings=cloudTheme,
names.attr=paste0(sprintf('%02d', 1:24), 'h'),
panel=panel.levelplot.raster) +
layer(sp.lines(boundaries, lwd=0.6))
dev.off()
mapita <- mask(r, ca)
plot(mapita, xlim =c(-125, -112), ylim =c(30, 45))
ogrInfo(dsn='/shapefile/USA_adm1.shp', layer='USA_admn1')
P4S.latlon <- CRS("+proj=longlat +datum=WGS84")
hrr.shp <- readShapePoly("HRR_Bdry", verbose=TRUE, proj4string=P4S.latlon)
plot(hrr.shp)
mapita <- mas
ras<- raster (a)
mapa<- mask (ras, california)
mapas<- stack (mapas, mapa)
#list all the bil files to open, open, crop and make a stack
# name<- list.files ("C:/Users/visitor/Sara/Drought/ppt", pattern="bil.bil")
name<- name [seq(1, length(name), 2)]
ras<- raster (name[1])
mapas<- mask (ras, california)
for (a in name [-1]){
ras<- raster (a)
mapa<- mask (ras, california)
mapas<- stack (mapas, mapa)
}
col = heat.colors(length(seq(-6, 6, by = 1))))
)
|
644d40c31f7cf9acca5ee92603acb97616502266
|
2465a334c91859bf40acfaa03af86fb48492df68
|
/machine_learning/caret_package/question_one.R
|
74c65234cfabc7f6bd572ec3a6eb9a053edc9974
|
[] |
no_license
|
dunagan5887/datasciencecoursera
|
987415f477e7d580902a2936414c607c8983e77c
|
be76a8117d5a715abd093482d6f991b8df560652
|
refs/heads/master
| 2021-01-21T05:00:25.808625
| 2016-07-15T21:17:09
| 2016-07-15T21:17:09
| 42,409,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 246
|
r
|
question_one.R
|
library(AppliedPredictiveModeling)
library(caret)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
trainIndex = createDataPartition(diagnosis, p = 0.50,list=FALSE)
training = adData[trainIndex,]
testing = adData[-trainIndex,]
|
6543f8095159610e4fbac0c4118c9f2a343d446b
|
f5d7971c957884f67ada8002df185ab51fbdaa45
|
/scripts/visualize/visualize-doy.R
|
0729bdac060d3d7d3ec49dc3a52e74c8789c3bba
|
[
"CC0-1.0"
] |
permissive
|
mwernimont/gages-through-ages
|
f196decc52c03efafadd0a258be02212ac070d2b
|
51e216a09a1775120f74437f271cabbaccc7e719
|
refs/heads/master
| 2021-01-24T18:27:16.895006
| 2017-03-10T22:36:51
| 2017-03-10T22:36:51
| 84,447,844
| 0
| 0
| null | 2017-03-09T13:56:53
| 2017-03-09T13:56:53
| null |
UTF-8
|
R
| false
| false
| 2,190
|
r
|
visualize-doy.R
|
visualize.doy <- function(viz = getContentInfo(viz.id = "doy-NM")){
library(svglite)
library(dplyr)
library(xml2)
daily <- readData(viz[["depends"]][["daily"]])
zoomer <- readData(viz[["depends"]][["zoomer"]])
zoomer.xml <- read_xml(zoomer)
doy_svg = svglite::xmlSVG({
par(omi=c(0,0,0,0), mai=c(0.5,0.75,0,0),las=1, xaxs = "i")
plot(1, type="n", xlab="", frame.plot=FALSE,
ylab="Cubic Feet Per Second", mgp=c(2.5,0.25,0),
xlim=c(0, 366), ylim=c(0, 16000),xaxt="n")
axis(1, at=c(15,46,74,105,135,166,196,227,258,288,319,349),
label = month.abb, tick = FALSE)
axis(1, at = c(32,60,91,121,152,182,213,244,274,305,335),
tick=TRUE, label=FALSE)
}, height=5, width=5.083333)
doy_svg <- clean_up_svg(doy_svg, viz)
grab_spark <- function(vals){
x = svglite::xmlSVG({
par(omi=c(0,0,0,0), mai=c(0.5,0.75,0,0),
las=1, mgp=c(2.5,0.25,0), xaxs = "i")
plot(vals, type='l', axes=F, ann=F,
xlim=c(0, 366), ylim=c(0, 16000))
}, height=5, width=5.083333)
}
g.doys <- xml_add_sibling(xml_children(doy_svg)[[length(xml_children(doy_svg))]], 'g', id='dayOfYear','class'='doy-polyline')
for(i in unique(daily$Year)){
year_data <- filter(daily, Year == i)
if((i%%4 == 0) & ((i%%100 != 0) | (i%%400 == 0))){
year_data$DayOfYear[year_data$DayOfYear>=60] <- year_data$DayOfYear[year_data$DayOfYear>=60]+1
}
sub_data <- data.frame(approx(year_data$DayOfYear, year_data$Flow, n = 183))
x <- grab_spark(sub_data)
polyline <- xml_children(x)[4]
xml_attr(polyline, "id") <- paste0("y",i)
xml_attr(polyline, "class") <- "doy-lines-by-year"
xml_attr(polyline, "clip-path") <- NULL
xml_attr(polyline, "style") <- NULL
xml_add_child(g.doys, polyline[[1]])
}
g.zoomer <- xml_add_sibling(xml_children(doy_svg)[[length(xml_children(doy_svg))]], 'g', id='total_hydro','class'='total_hydro')
xml_attr(g.zoomer, "transform") <- "translate(0,370)"
xml_add_child(g.zoomer, zoomer.xml)
xml_attr(doy_svg, "viewBox") <- paste(c(0,0,360,566), collapse=' ')
write_xml(doy_svg, viz[["location"]])
}
|
56361dfd591d357bae9d3c3e27e751cbc5a73cf3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rnaseqWrapper/examples/determineSynonymous.Rd.R
|
d55297e4bf8dd6d017f75b64545ce660db653416
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 456
|
r
|
determineSynonymous.Rd.R
|
library(rnaseqWrapper)
### Name: determineSynonymous
### Title: Determine whether or not variants are synonomous
### Aliases: determineSynonymous
### ** Examples
## Load needed data
data(varScanExample)
data(fastaExample)
determineSynonymous(varTable=varScanExample,
refPosCol=2,
refAlleleCol="Ref",
varAlleleCol="Var",
referenceSeqs=fastaExample)
|
1f31cdc3fe34e100e3bb7bb3d17bbd61f1f96650
|
c8328a268e6c2993a1396d6d8c20dbce963c1e0d
|
/R_DS_ML/Kmeans_after_PCA.R
|
5a4f2400fd0a46d43e7a36f388d62592333ce5e7
|
[] |
no_license
|
battyone/MLProjects
|
9781e83af0ec6f943c9f53d4e9baa1d1fa25e1c8
|
f36fa6f26b100107b04f6fa477a24a0d94a3e98c
|
refs/heads/master
| 2020-09-22T12:49:45.672878
| 2019-11-04T00:43:44
| 2019-11-04T00:43:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
Kmeans_after_PCA.R
|
---
title: "K-means Clustering after PCA for wine dataset"
author: "Amol Deshmukh"
date: "April 7, 2019"
output: html_document
---
```{r}
library(dplyr)
library(ggplot2)
```
```{r}
wine_kaka %>%
ggplot(aes(fixed.acidity, alcohol)) +
geom_point()
wine_kaka %>% summary()
```
```{r}
wine_kaka %>%
ggplot(aes(citric.acid)) +
geom_histogram()
# geom_histogram(aes(fill = color, color = good), alpha = 0.5)
data.frame(wine_kaka_scale) %>%
ggplot(aes(citric.acid)) +
geom_histogram() +
xlim(-3.5, 3.5)
```
```{r}
wine_kaka_scale <- scale(wine_kaka %>% select(-color, -good))
```
```{r}
wine_kaka_scale %>% summary()
```
```{r}
sd(data.frame(wine_kaka_scale)$density)
```
```{r}
winePCA <- princomp(wine_kaka_scale)
summary(winePCA)
attributes(winePCA)
```
```{r}
winePCA$loadings
data.frame(winePCA$scores) %>%
ggplot(aes(Comp.1, Comp.2)) +
geom_point(aes(color = ))
wineComb <- cbind(wine_kaka_scale, winePCA$scores)
data.frame(wineComb) %>% glimpse()
data.frame(wineComb) %>%
ggplot(aes(Comp.1, Comp.2)) +
geom_point(aes(color = white), alpha = 0.5)
```
```{r}
data.frame(wineComb) %>%
ggplot(aes(volatile.acidity, total.sulfur.dioxide)) +
geom_point(aes(color = white), alpha = 0.5)
```
```{r}
plot(winePCA)
```
```{r}
data.frame(wine_kaka_scale) %>% glimpse()
revWinePCA <- princomp(data.frame(wine_kaka_scale) %>% select(-white))
kmeanWine <- kmeans(revWinePCA$scores[,1:4], 2)
revPCA <- data.frame(revWinePCA$scores)
revPCA$cluster <- kmeanWine$cluster
revPCA %>% glimpse()
revPCA %>%
ggplot(aes(Comp.1, Comp.2)) +
geom_point(aes(color = cluster), alpha = 0.5)
```
|
8a088e3ec13abede87b372746772e51dd27001d5
|
49767c4f6942985f5f6ae4a5b30b0c1f0e619a83
|
/fresh/app.R
|
940899259b64f3e90486c2aeaab4e948471c2aba
|
[] |
no_license
|
GreatLakesSIIM/shiny-fhir
|
8cf22623d7e1c1ee44c967879ba66e50ade20020
|
1ce5ec615b858ab146b9bb5cae5e31a5a8085a1b
|
refs/heads/master
| 2020-06-10T02:01:34.734196
| 2019-12-04T16:00:04
| 2019-12-04T16:00:04
| 193,551,147
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
app.R
|
library(shiny)
library(shinyFiles)
library(jsonlite)
library(tidyverse)
library(DT)
library(rjson)
library(httr)
library(stringi)
library(RJSONIO)
post_data <- function(resourceType, data){
POST(paste0('http://hackathon.siim.org/fhir/',resourceType),
add_headers('apikey' = Sys.getenv(x='SiimApiKey'),
'Content-Type' = 'application/fhir+json'),
body=data,
encode="text")
}
# Define UI for application that draws a histogram
ui <- fluidPage(
titlePanel("JSON Test"),
fileInput("Json", "Choose Json File",
multiple = FALSE,
accept = c(".json")),
DTOutput('tbl')
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
output$tbl = renderDT({
req(input$Json)
jsonfile <- as.character(input$Json)
print(jsonfile)
string <- readChar(jsonfile, file.info(jsonfile)$size)
postAttempt <- post_data("",string)
print(postAttempt)
as.data.frame(fromJSON(file = input$Json$datapath))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
96450787e4a6ae562d5c08384d35ddd944074529
|
61decf7eddaafd1356342d8601455760c82541b7
|
/man/read_fb.Rd
|
57f9f4fb49999dbc77d60a2b754c7a75f133095f
|
[] |
no_license
|
grice2/GPATOOLS
|
f26ce237d095b4b410696ff9e11142bab9efcd3f
|
bb3a68a1707b03a03ffc1f247c3101e53abbc8d4
|
refs/heads/master
| 2023-03-24T05:20:55.725319
| 2021-03-19T16:56:33
| 2021-03-19T16:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 856
|
rd
|
read_fb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_fb.R
\name{read_fb}
\alias{read_fb}
\title{Read and Format GPA Facebook Data}
\usage{
read_fb(df_reactions_dir, df_engagement_dir)
}
\arguments{
\item{df_reactions_dir}{Directory to GPA Facebook post reaction types data (must contains 'post_reaction_type', 'post_reactions', and 'ad_name')}
\item{df_engagement_dir}{Directory to GPA Facebook post engagement data (must contains 'ad_name')}
}
\value{
A combined dataset with additional columns 'audience' and 'ad_id' needed for the next step of analysis.
}
\description{
This function will read in two data frames, i.e., the post reaction types data and the post engagement data, combine and ready them for analysis.
}
\examples{
read_fb(df_reactions_dir = "reaction_data.csv", df_engagement_dir = "engagement_data.csv")
}
|
42e0c7ce6c4cd5688a747f54815346db977ac775
|
3d171996a631a1df6bd7b04cc1f5776d4715273f
|
/man/varExp_Gaussian.Rd
|
9d688328098eccb6ab99441cd765d6532d920b7b
|
[] |
no_license
|
YipengUva/RpESCA
|
5c562bd0433afdafdff891f2ba7fe586658c87c8
|
d4f49f23015c2641353906f9152d25fbec209078
|
refs/heads/master
| 2020-05-09T11:13:49.574061
| 2019-07-01T13:54:11
| 2019-07-01T13:54:11
| 181,071,934
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 956
|
rd
|
varExp_Gaussian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supplementary_functions.R
\name{varExp_Gaussian}
\alias{varExp_Gaussian}
\title{Compute the variation expalined raitios when Gaussian data is used}
\usage{
varExp_Gaussian(X, mu, A, B, Q)
}
\arguments{
\item{X}{a \eqn{n*d} quantitative data set}
\item{mu}{a \eqn{n*1} column offset term}
\item{A}{a \eqn{n*R} score matrix}
\item{B}{a \eqn{d*R} loading matrix}
\item{Q}{a \eqn{n*d} weighting matrix of the same size of X}
}
\value{
This function returns a list contains the variaiton expalined
ratios of the whole model (varExp_total) or of each component (varExp_PCs).
}
\description{
This function computes the variation expalined ratios for component
model on quantitative data sets. Details can be found
in the paper \url{https://arxiv.org/abs/1902.06241}.
}
\examples{
\dontrun{ out <- varExp_Gaussian(X,mu,A,B,Q)
out$varExp_total
out$varExp_PCs
}
}
|
107e26d0bae0b627065e1ad9a7d07c3a15ddadf5
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/MTS/man/Vpmiss.Rd
|
987166ad6b8f8589f3a2df4bca0693117ac7f526
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
rd
|
Vpmiss.Rd
|
\name{Vpmiss}
\alias{Vpmiss}
\title{Partial Missing Value of a VARMA Series
}
\description{Assuming that the data is only partially missing,
this program estimates those missing values. The model is assumed to be
known.
}
\usage{
Vpmiss(zt, piwgt, sigma, tmiss, mdx, cnst = NULL, output = T)
}
\arguments{
\item{zt}{A T-by-k data matrix of a k-dimensional time series
}
\item{piwgt}{pi-weights of the model in the form
piwgt[pi0, pi1, pi2, ....]
}
\item{sigma}{Residual covariance matrix
}
\item{tmiss}{Time index of the partially missing data point
}
\item{mdx}{A k-dimensional indicator with "0" denoting missing
component and ""1" denoting observed value.
}
\item{cnst}{Constant term of the model
}
\item{output}{values of the partially missing data
}
}
\value{Estimates of the missing values
}
\references{Tsay (2014, Chapter 6). Multivariate Time Series
Analysis with R and Financial Applications.
John Wiley. Hoboken, NJ.
}
\author{Ruey S. Tsay
}
\seealso{Vmiss
}
\examples{
#data("mts-examples",package="MTS")
#gdp=log(qgdp[,3:5])
#m1=VAR(gdp,1)
#piwgt=m1$Phi; cnst=m1$Ph0; Sig=m1$Sigma
#mdx=c(0,1,1)
#m2=Vpmiss(gdp,piwgt,Sig,50,mdx,cnst)
}
|
fff112686de779eaee8a3daaaa3b3223cab21f1e
|
0ee7f4115cfbbcbd58814b90c7b3f629c4314a75
|
/Exercise1/ui.R
|
499c42640fbed52f7edb62ae80c74192b925d0e9
|
[] |
no_license
|
EdinbR/mango-shiny-workshop
|
d4c28f2410eea41d86fa3afffe0f3360c82f9722
|
7c4ed2508c4f4179eb31fea2f34c49185cb58b38
|
refs/heads/master
| 2021-01-19T03:24:11.501842
| 2015-06-18T10:45:36
| 2015-06-18T10:45:36
| 37,653,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 540
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# Define the header for the page
titlePanel("An Exciting Shiny App to Print Dates!"),
# Set up the page to have a sidebar
sidebarLayout(
# Define the contents of the sidebar
sidebarPanel(
dateInput(inputId = "dateInput",
label = "Enter Date to print:")
),
# Define the contents of the main panel
mainPanel(
textOutput(outputId = "dateOutput"),
textOutput(outputId = "yearOutput"))
)
)
)
|
b84719535b984e06a43d5c70bb65f3aa22e7b029
|
584a01c7f6cace0bb42e4c1e37b8957d24a6825d
|
/nanopore_qc/server.R
|
a0a6cf5e0dc0593c7ebf49b1d6003946c7f9dbbb
|
[] |
no_license
|
ambro01/nanopore_qc_mbi
|
15c5f1ad0fa2c10e64b4ec2375d91c93de86c44c
|
ab5358cd5cd67eaf741bfa7975f6a4f91496b970
|
refs/heads/master
| 2021-09-06T22:42:19.139377
| 2018-02-12T20:11:41
| 2018-02-12T20:11:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,248
|
r
|
server.R
|
library(IONiseR)
library(minionSummaryData)
library(ggplot2)
library(gridExtra)
library(poRe)
source('global.R')
source('ui.R')
source('server_ioniser.R')
source('server_pore.R')
source('server_poretools.R')
server <- function(input, output, session) {
# wszystko co laczy sie z odczytywanie stanu kontrolek na widoku musi byc zawarte w blokach
# oberwatora (jak ponizej) lub w blokach reactive()
# ponizej operacje ktore dzieja sie po przejsciu na inna zakladke
toolName <- NULL
observeEvent(ignoreNULL = TRUE, eventExpr = input$navbar, handlerExpr = {
toolName <<- input$navbar
print(toolName)
description <- generateProgramDescription(toolName)
if(toolName == "IONiseR"){
updateSelectInput(session, "plotSelect", choices = ioniserPlotSelectList)
updateSelectInput(session, "statSelect", choices = ioniserStatSelectList)
updateRadioButtons(session, "dataSource", choices = ioniserDataChoiceList)
hide("ioniserTable")
hide("ioniserPlot")
show("ioniserDescription")
output$ioniserDescription <- renderText(description)
} else if(toolName == "poRe"){
updateSelectInput(session, "plotSelect", choices = porePlotSelectList)
updateSelectInput(session, "statSelect", choices = poreStatSelectList)
updateRadioButtons(session, "dataSource", choices = poreDataChoiceList)
hide("poreTable")
hide("porePlot")
show("poreDescription")
output$poreDescription <- renderText(description)
} else if(toolName == "poreTools"){
updateSelectInput(session, "plotSelect", choices = poretoolsPlotSelectList)
updateSelectInput(session, "statSelect", choices = poretoolsStatSelectList)
updateRadioButtons(session, "dataSource", choices = poretoolsDataChoiceList)
hide("poretoolsTable")
hide("poretoolsPlot")
show("poretoolsDescription")
output$poretoolsDescription <- renderText(description)
}
})
# zaczytywanie sciezek wskazanych plikow
dataSource <- NULL
dataPath <- NULL
dirPath <- NULL
summaryData <- NULL
# wybor danych w zaleznosci od obecnie wybranego narzedzia
observeEvent(ignoreNULL = TRUE, eventExpr = c(input$fileInput, input$dataSource, input$navbar), handlerExpr = {
dataSource <<- input$dataSource
dataPath <<- input$fileInput$datapath
if(!is.null(dataPath)){
dirPath <<- dirname(input$fileInput$datapath)
}
if(toolName == "IONiseR"){
# by <<- variable is also visible outside the function
summaryData <<- (
if (isValid(dataSource)){
getSummaryData(dataSource, dataPath)
})
} else if(toolName == "poRe"){
show("fileLoading")
summaryData <<- if (isValid(dirPath)){
getPoreData(dirPath)
}
hide("fileLoading")
} else if(toolName == "poreTools"){
}
})
# wszystko co ponizej wykona sie po wcisnieciu przycisku 'generate plot'
observeEvent(ignoreNULL = TRUE, eventExpr = input$plotButton, handlerExpr = {
selectedMethod <- input$plotSelect
if(toolName == "IONiseR"){
hide("ioniserTable")
hide("ioniserPlot")
hide("ioniserDescription")
show("loadingImage")
#dane wejciowe moga zostac zaczytane z plikow lub z biblioteki
# to wybranej metody generuje sie wykres i opis
plot <- if (isValid(summaryData) && isValid(selectedMethod)){
generateIoniserPlotByFunctionName(selectedMethod, summaryData)
}
description <- generateIoniserDescription(selectedMethod, plot)
hide("loadingImage")
hide("ioniserTable")
show("ioniserDescription")
show("ioniserPlot")
# ustawienie wartosci wyjsc
output$ioniserPlot <- renderPlot(plot)
output$ioniserDescription <- renderText(description)
} else if (toolName == "poRe"){
hide("poreDescription")
hide("poreTable")
hide("porePlot")
show("loadingImage")
plot <- if (isValid(summaryData) && isValid(selectedMethod)){
generatePorePlotByFunctionName(selectedMethod, summaryData)
} else NULL
description <- generatePoreDescription(selectedMethod, plot)
hide("loadingImage")
hide("poreTable")
show("poreDescription")
show("porePlot")
output$porePlot <- plot
output$poreDescription <- renderText(description)
} else if (toolName == "poreTools"){
hide("poretoolsDescription")
hide("poretoolsTable")
hide("poretoolsPlot")
show("loadingImage")
plot <- if (isValid(dirPath) && isValid(selectedMethod)){
generatePoretoolsPlotByFunctionName(selectedMethod, dirPath)
}
description <- generatePoretoolsDescription(selectedMethod, plot)
hide("loadingImage")
hide("poretoolsTable")
show("poretoolsDescription")
show("poretoolsPlot")
output$poretoolsPlot <- renderImage({
filename <- base::normalizePath(file.path('./images', 'foo.jpg'))
# Return a list containing the filename and alt text
list(src = filename)
}, deleteFile = TRUE)
output$poretoolsDescription <- renderText(description)
}
})
# wszystko co ponizej wykona sie po wcisnieciu przycisku 'generate stat'
observeEvent(ignoreNULL = TRUE, eventExpr = input$statButton, handlerExpr = {
selectedMethod <- input$statSelect
if (toolName == "IONiseR"){
hide("ioniserDescription")
hide("ioniserTable")
hide("ioniserPlot")
show("loadingImage")
stat <- if (isValid(summaryData) && isValid(selectedMethod)){
generateIoniserStatByFunctionName(selectedMethod, summaryData)
}
description <- generateIoniserDescription(selectedMethod, stat)
hide("loadingImage")
hide("ioniserPlot")
show("ioniserDescription")
show("ioniserTable")
output$ioniserTable <- renderTable(stat)
output$ioniserDescription <- renderText(description)
} else if (toolName == "poRe"){
hide("poreDescription")
hide("poreTable")
hide("porePlot")
show("loadingImage")
stat <- if (isValid(summaryData) && isValid(selectedMethod)){
generatePoreStatByFunctionName(selectedMethod, summaryData)
}
description <- generatePoreDescription(selectedMethod, stat)
hide("loadingImage")
hide("porePlot")
show("poreDescription")
show("poreTable")
output$poreTable <- renderTable(stat)
output$poreDescription <- renderText(description)
} else if (toolName == "poreTools"){
hide("poretoolsDescription")
hide("poretoolsTable")
hide("poretoolsPlot")
show("loadingImage")
stat <- if (isValid(dirPath) && isValid(selectedMethod)){
generatePoretoolsStatByFunctionName(selectedMethod, dirPath)
}
descrption <- generatePoretoolsDescription(selectedMethod, stat)
hide("loadingImage")
hide("poretoolsPlot")
show("poretoolsDescription")
show("poretoolsTable")
output$poretoolsTable <- renderTable(stat)
output$poretoolsDescription <- renderText(descrption)
}
})
}
|
36f889d60fb333c973fe614258dcfb0d81f0075a
|
2a86e10f76fcbd693038479a9f13c1c4b7717b8d
|
/code/boosting/lr-ud-predictor.R
|
2b21b7bf216ddf7744b6b8259ef163336813466b
|
[] |
no_license
|
naturegirl/EavesDroid
|
a2bec34186841da6608e240666c463f7fd82612f
|
02f32e0f7a39aad86573e5b0c3e5332d350a02f0
|
refs/heads/master
| 2020-12-24T13:29:25.717850
| 2014-05-16T19:27:05
| 2014-05-16T19:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
lr-ud-predictor.R
|
library(RWeka)
library(rJava)
args <- commandArgs(trailingOnly = TRUE)
file_lr <- "./../../data/dataset/training.lr.arff"
data_lr <- read.arff(file_lr)
file_ud <- "./../../data/dataset/training.ud.arff"
data_ud <- read.arff(file_ud)
# load the saved models for LR and UD predictors
load(file="./adaboost.rf.lr.rda")
load(file="./adaboost.rf.ud.rda")
input_file <- "./../../data/weka-data/new/garbage_1399740519.csv.arff"
if (length(args) >= 1) {
input_file <- args[1]
}
data_input <- read.arff(input_file)
lr_labels <- predict(m_lr, newdata = data_input,
type = c("class", "probability"))
lr_labels <- ifelse(lr_labels == 1, "l", "r")
ud_labels <- predict(m_ud, newdata = data_input,
type = c("class", "probability"))
ud_labels <- ifelse(ud_labels == 1, "u", "d")
# print(lr_labels)
# print(ud_labels)
i <- 1
len <- length(lr_labels)
str_label <- ""
while ( i <= len ) {
str_label <- paste(str_label, lr_labels[i], ud_labels[i], sep="")
i <- i + 1
}
print(str_label)
output_label_filename <- paste(input_file, "pred", sep=".")
write(str_label, file = output_label_filename)
|
1970791912bdce7bacc181ef93e3f1c9038d050e
|
486deb2a88424a9dd6e4761af999263a6fa316b8
|
/inst/doc/v2.R
|
b87433476a17446ebd44f7bda978bdc63d2c1156
|
[] |
no_license
|
cran/eRTG3D
|
4ba5c89aba6d9f2a2500b6514de7a8fd157b7561
|
785c339e125caad743cc40502d58bfe15d53d24c
|
refs/heads/master
| 2022-03-24T18:26:26.296102
| 2022-02-25T11:10:05
| 2022-02-25T11:10:05
| 209,614,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,114
|
r
|
v2.R
|
## ---- echo = FALSE, eval=TRUE, include=FALSE----------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
options(knitr.table.format = "html", rmarkdown.html_vignette.check_title = FALSE)
library(eRTG3D)
set.seed(123)
## ----eval=FALSE---------------------------------------------------------------
# niclas <- track.properties.3d(niclas)
## ----echo=FALSE, results = "asis", fig.height=5, fig.width=10-----------------
niclas <- track.properties.3d(niclas)
pander::pandoc.table(head(round(niclas, 2),5))
## ----eval=FALSE---------------------------------------------------------------
# P <- get.track.densities.3d(niclas, heightDistEllipsoid = TRUE, DEM = dem)
## ----eval=FALSE---------------------------------------------------------------
# sim.locs <- nrow(niclas)
# f <- 1500
# uerw <- sim.uncond.3d(sim.locs*f, start = c(niclas$x[1], niclas$y[1], niclas$z[1]),
# a0 = niclas$a[1], g0 = niclas$g[1], densities = P)
# Q <- qProb.3d(uerw, sim.locs)
## ----eval=FALSE---------------------------------------------------------------
# start <- c(niclas$x[1], niclas$y[1], niclas$z[1])
# end <- c(niclas$x[nrow(niclas)], niclas$y[nrow(niclas)], niclas$z[nrow(niclas)])
# a0 <- niclas$a[1]
# g0 <- niclas$g[1]
## ----eval=FALSE---------------------------------------------------------------
# cerw <- sim.cond.3d(sim.locs, start = start, end = end, a0 = a0, g0 = g0,
# densities = P, qProbs = Q, DEM = dem)
## ----eval=FALSE---------------------------------------------------------------
# cerwList <- n.sim.cond.3d(n.sim = 100, sim.locs,
# start = start, end = end, a0 = a0, g0 = g0,
# densities = P, qProbs = Q, DEM = dem)
## ----eval=FALSE---------------------------------------------------------------
# trackSections <- track.split.3d(track, timeLag)
# P <- get.section.densities.3d(trackSections, DEM = dem)
## ---- fig.show='hold', eval=FALSE---------------------------------------------
# cerwList <- reproduce.track.3d(n.sim = 100, niclas, DEM = dem)
|
ae13aeeab3cc855e5d7ac98c63f743eb93cf6f69
|
474b9c0dd798418894fd47cea3251da1239cc027
|
/iteration.R
|
2b5811054a4eb7a7317b260dd963792ba27f3e77
|
[] |
no_license
|
abessudnov/dataanalysis3
|
d80af34487aca7685787fa8cf7a79a789e8d2402
|
b9689b7442526d96ca6f1d9ce1a874cac900c8b2
|
refs/heads/master
| 2021-05-12T00:09:24.271244
| 2019-01-17T21:19:35
| 2019-01-17T21:19:35
| 117,527,612
| 3
| 35
| null | 2018-08-21T16:34:34
| 2018-01-15T09:53:42
|
R
|
UTF-8
|
R
| false
| false
| 7,745
|
r
|
iteration.R
|
# 5 Feb 2017
# Iteration
# *********************
# Iteration in R
# Sometimes we need to repeat the same sequence of commands seeral times.
# Whenever you need to repeat the same action more than twice try to automate it and
# apply iteration.
# Loops in R
# 1. For loops
for (i in 1:5) {
print(i)
}
# (i in 1:5): sequence
# {
# print(i)
# }: body
# Calculating the number of characters in words
for (i in c("red", "blue", "yellow")) {
print(nchar(i))
}
# R is a vectorised language and this is why we rarely use loops in R
nchar(c("red", "blue", "yellow"))
a <- 1:3
b <- 4:6
a
b
a + b
for (i in 1:3) {
print(a[i] + b[i])
}
# 2. while loops
i = 1
# This is equivalent to i <- 1
while (i < 6) {
print(i)
i = i + 1
}
# (i < 6): condition
# {
# print(i)
# i = i + 1
# }: body
# i is a counter
# With while loops it is easy to write an infinite loop.
# Do not run:
# i = 1
# while (i < 6) {
# print(i)
# }
# 3. repeat loops
i = 1
repeat {
print(i)
i = i + 1
if (i > 5) break
}
# rarely used
# Exercises:
# 1. From here - http://www-math.bgsu.edu/~zirbel/programming/index.html
# Write some lines of code that will calculate the sum 1+2+3+...+300.
# The idea is to create a variable that will store the current value of the sum.
# Set it to zero, then use a for loop that will run through the numbers 1, 2, 3, ...
# and add each of these to the current sum. After the for loop, output the value of the sum.
x <- 0
for (y in 1:300) {
x = x + y
}
print(x)
sum(1:300)
# 2. Write some lines of code to calculate the sum 1·2+2·3+3·4+ ... + 249·250.
# Incorrect:
# x <- 0
# for (y in 1*2:249*250){
# x = x + y
# }
# print(x)
x <- 0
for (i in 1:249) {
x <- x + i*(i+1)
}
print(x)
# 3. Write a program to calculate 10! ("10 factorial"),
# which is defined to be 10*9*8*7*6*5*4*3*2*1.
x<-10
for(i in 9:1){
x<-i*x
}
print(x)
factorial(10)
ourResult <- 1
x <- 10
while (x > 0) {
ourResult <- ourResult * x
x <- x - 1
}
print(ourResult)
factorial(10)
# 4. From here: http://maths-people.anu.edu.au/~johnm/courses/r/exercises/pdf/r-exercises.pdf
# (a) Create a for loop that, given a numeric vector,
# prints out one number per line,
# with its square and cube alongside.
n <- 5:15
for(i in 1:length(n)){
print(c(n[i], n[i]^2, n[i]^3))
}
# (b) Look up help(while). Show how to use a while loop to achieve the same result.
n <- 3:1
i <- 1
while(i <= length(n)){
# print(c(n[i], n[i]^2, n[i]^3))
cat(n[i], n[i]^2, n[i]^3, "\n")
i = i + 1
}
# (c) Show how to achieve the same result without the use
# of an explicit loop.
i<-3:99
b<-i^2
c<-i^3
cbind(i, b, c)
n <- 3:7
n <- data.frame(n)
n$n2 <- n[,1] ^ 2
n$n3 <- n[,2] ^ 3
n
# *****************
# The apply family of functions
# Many iterations in R can be done without explicit use of loops.
? apply
library(tidyverse)
W1 <- read_csv("exData/W1.csv")
W1mod <- W1 %>%
mutate(heightcm = ifelse(a_hlht == 1 & a_hlhtf > 0,
a_hlhtf*30.48 + a_hlhti*2.54,
ifelse(a_hlht == 2 & a_hlhtc > 0,
a_hlhtc, NA))) %>%
mutate(weightkg = ifelse(a_hlwt == 1 & a_hlwts > 0,
a_hlwts*6.35 + a_hlwtp*0.45,
ifelse(a_hlwt == 2 & a_hlwtk > 0,
a_hlwtk, NA))) %>%
mutate(bmi = weightkg / (heightcm / 100)^2)
# We want to calculate the mean for three variables (height, weight, bmi)
# 1. We can do this with summarise in dplyr
W1mod %>%
summarise(
meanHeight = mean(heightcm, na.rm = TRUE),
meanWeight = mean(weightkg, na.rm = TRUE),
meanBMI = mean(bmi, na.rm = TRUE)
)
# 2. We could use a for loop
attach(W1mod)
for (i in c("heightcm", "weightkg", "bmi")) {
# Note the use of get. It gets an object given an object name.
y <- get(i)
print(mean(y, na.rm = TRUE))
}
# 3. We can also use apply.
colnames(W1mod)
apply(W1mod[, 20:22], 2, mean, na.rm = TRUE)
#
# Another example: calculate the sum of all variables by row
# (doesn't make any sense here, but we're interested in technical implementation)
apply(W1mod, 1, sum, na.rm = TRUE)
# tapply applies a function to a vector split by the values of a factor (or several factors)
? tapply
# This is a code from last class:
W1mod %>%
mutate(bmiover30 = ifelse(bmi > 30, 1, 0)) %>%
mutate(agegr = ifelse(a_dvage >= 18 & a_dvage <= 35, 1,
ifelse((a_dvage >= 36 & a_dvage <= 55), 2,
ifelse(a_dvage >= 56, 3, NA)))) %>%
filter(!is.na(agegr)) %>%
group_by(a_sex, agegr) %>%
summarise(
meanBMI = mean(bmi, na.rm=TRUE),
medianBMI = median(bmi, na.rm=TRUE),
proportion = mean(bmiover30, na.rm=TRUE)
)
# Using tapply
W1mod2 <- W1mod %>%
mutate(bmiover30 = ifelse(bmi > 30, 1, 0)) %>%
mutate(agegr = factor(ifelse(a_dvage >= 18 & a_dvage <= 35, 1,
ifelse((a_dvage >= 36 & a_dvage <= 55), 2,
ifelse(a_dvage >= 56, 3, NA))))) %>%
mutate(sex = factor(a_sex)) %>%
filter(!is.na(agegr))
tapply(W1mod2$bmiover30, list(W1mod2$sex, W1mod2$agegr), mean, na.rm = TRUE)
# Exercise. Write a for loop replicating these results.
# You will need to loop over the values of sex and agegr at the same time,
# so you will need to use a nested loop.
# Here is an example of a nested loop.
for (i in 1:3) {
for (j in 3:1) {
cat(i, j, "\n")
}
}
for (i in 1:2) {
for (j in 1:3) {
print(mean(W1mod2$bmiover30[W1mod2$a_sex == i & W1mod2$agegr == j],
na.rm = TRUE))
}
}
# Other functions in the apply family:
? lapply
# Returns a list
ourList <- list(
a = c("yellow", "red", "green"),
b = 1:10,
c = factor(c(rep("male", 5), rep("female", 5)))
)
ourList
# Calculate the length of each element of ourList
lapply(ourList, length)
# Note that
length(ourList)
# returns a different result
class(lapply(ourList, length))
# If we want to return the result as a vector:
sapply(ourList, length)
class(sapply(ourList, length))
? vapply
? mapply
? rapply
? eapply
# The purrr package (part of tidyverse) has map() and walk() functions that
# do the same job as the apply() family.
# For example:
map_int(ourList, length)
# I never use them (but maybe I should start).
# Yet another way is to use functions. More on this later in the course.
# Exercises
# Exercise 1. (from here --
# http://bioinformatics.nki.nl/courses/Rstat_12_I/texts/resources/exercises_apply_LP121206.pdf)
# Let's create a 5x5 matrix with values drawn from a normal distribution
mat <- matrix(NA, ncol=5, nrow=5)
for(i in 1:ncol(mat)) {
mat[,i] <- rnorm(5)
}
mat
# a) Use apply to calculate the standard deviation of the columns of the matrix.
# b) Use apply to calculate the maximum value in each of the rows of the matrix.
# Exercise 2.
# For each age (not age group) in W1mod find maximum BMI and write the BMIs
# into a vector.
#
# First do this using apply() family of functions and then using summarise()
# in dplyr
# Exercise 3.
# Create 1000 vectors of random length in the range from 1 to 100 that have values from the
# standard normal distribution. Put them into a list. For each vector calculate the mean
# and return the results as a) a list, b) a vector.
# Note that when indexing a list you need to use double square brackets: [[]]
# Create a scatter plot showing how the mean is associated with the sample size
# **************************
# Next class (Monday 12 February)
# Read ch. 13 Relational data - http://r4ds.had.co.nz/relational-data.html
# Do the exercises.
# This corresponds to the following DataCamp course -
# https://www.datacamp.com/courses/joining-data-in-r-with-dplyr
|
211fb149d5ea0f8d5c5d0c5762bcc3e8b2ae8b0d
|
7b72be0cf5fa9fcab13d04b4424ec2d8c4f56a65
|
/man/head.rdeque.Rd
|
97a7f803ec548f0aacb91a5e06b5ea5f8aa61f30
|
[] |
no_license
|
cran/rstackdeque
|
88722be9cbd257a9dccef79a223f91338fa49bc5
|
1ee75225901b90b2a674255952068f7588225d6a
|
refs/heads/master
| 2021-01-23T19:46:24.826995
| 2014-12-01T00:00:00
| 2014-12-01T00:00:00
| 27,569,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
rd
|
head.rdeque.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{head.rdeque}
\alias{head.rdeque}
\title{Return the first n elements of an rdeque as an rdeque}
\usage{
\method{head}{rdeque}(x, n = 6L, ...)
}
\arguments{
\item{x}{rdeque to get the head of.}
\item{n}{number of elements to get.}
\item{...}{arguments to be passed to or from other methods (ignored).}
}
\value{
a new rdeque.
}
\description{
Returns the first n elements of a deque as a deque, or all of the elements if its length is less than n.
}
\details{
Runs in \eqn{O(n)} time (in the size of the number of elements requested).
}
\examples{
d <- rdeque()
d <- insert_back(d, "a")
d <- insert_back(d, "b")
d <- insert_back(d, "c")
dt <- head(d, n = 2)
print(dt)
}
|
8d4b07e3f29e30b27341266ac48caa5f3211b39a
|
311e68e04863ccf23a8d689f8183bd2dce4108d0
|
/man/plotOUProcessPriors.Rd
|
99472698d933b118bf104c776e5a2245fcb8544f
|
[] |
no_license
|
carlopacioni/bdskytools
|
93f812ab3b889862264fbc994864fdac70c321da
|
d6b3faefe3c70357e081742fcf6f35192f054ac7
|
refs/heads/master
| 2021-01-19T21:00:10.721556
| 2017-02-04T20:49:08
| 2017-02-04T20:49:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 406
|
rd
|
plotOUProcessPriors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotPrior.R
\name{plotOUProcessPriors}
\alias{plotOUProcessPriors}
\title{Parameters may be a prior or a constant
Use getPrior to quote the prior function to be passed with parameters}
\usage{
plotOUProcessPriors(x0_prior, t, mu_prior, sigma_prior, nu_prior, ...)
}
\description{
Should probably rewrite this using do.call()
}
|
1c60456c61532e27db28b8044fbc9131c11f300b
|
0f5f50d2b69ca8054531355005ec759d015d3911
|
/man/stock.price.Rd
|
f0615e3708453495ac04ca37b80f6a342ae3739b
|
[] |
no_license
|
cran/RcmdrPlugin.RiskDemo
|
933d1f238271c80f1f9919cf38c1653c4d0c1eaf
|
69f613fcee6b3845c9877fd8b61699bdaf3ac1d0
|
refs/heads/master
| 2021-07-11T11:33:10.099976
| 2021-04-06T10:30:02
| 2021-04-06T10:30:02
| 99,853,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
rd
|
stock.price.Rd
|
\name{stock.price}
\alias{stock.price}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computing stock prices
}
\description{
This function computes the intrinsic stock price using the constant growth dividend discount model.
}
\usage{
stock.price(dividend, k = NULL, g = NULL, ROE = NULL, b = NULL,
riskFree = NULL, marketPremium = NULL, beta = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dividend}{
expected dividend(s) for the next year(s) (in euros), separated by commas
}
\item{k}{required rate of return
}
\item{g}{
growth rate of dividends
}
\item{ROE}{
return on investment
}
\item{b}{
plowback ratio
}
\item{riskFree}{
riskfree rate
}
\item{marketPremium}{
market risk premium
}
\item{beta}{
beta
}
}
\details{
All the above rates are given in percentages (except the dividends). One should provide either k or the following three: riskFree, marketPremium, beta. Further, one should provide either g or the following two: ROE and b. In the output, k and g are given in decimals.
}
\value{
\item{dividend}{expected dividend(s) for the next year(s) (in euros)}
\item{k}{required rate of return}
\item{g}{growth rate of dividends}
\item{PVGO}{present value of growths opportunities}
\item{stockPrice}{intrinsic stock price}
}
\references{
Bodie, Kane, and Marcus (2014) \emph{Investments, 10th Global Edition}, McGraw-Hill Education, (see Dividend Discount Models in Section 18.3).
}
\author{
Arto Luoma <arto.luoma@wippies.com>
}
\examples{
stock.price(dividend=c(1),k=12,g=10)
stock.price(dividend=c(1),ROE=50,b=20,riskFree=5,marketPremium=8,
beta=90)
}
|
1d9c4f1a009cddacf732cf40e2616e7fbb128785
|
63903e164b919a329c9b551b3384c6dfa28fb563
|
/simulated_genefamilies_analysis.R
|
27e5bf8b1218cdf883a3fb35e880c845312e53d6
|
[] |
no_license
|
dhbrand/drought_metagen
|
86c3dc2a8bd93600e7f02761ca1ad6a7ba69eb30
|
4f8a71eb40630b5e8d9168ecb897bb0fbee2de35
|
refs/heads/master
| 2020-03-11T10:12:34.984491
| 2018-07-03T02:03:46
| 2018-07-03T02:03:46
| 129,936,100
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,101
|
r
|
simulated_genefamilies_analysis.R
|
library(tidyverse)
library(magrittr)
library(rlang)
source("TwoStage_Package_Code.R")
dat_calls <- read_tsv("DNApolymAIII/Calls_genefamilies-cpm.tsv")[-1,] %>% dplyr::select(1, Annotation = "EstID", label = "EstEC", 4:34) %>% replace_na(list(label = "unknown")) %>% as.data.frame
factors <- cbind(names(dat_calls)[4:33], c(rep("A", 10), rep("B", 10), rep("C", 10))) %>% as.data.frame
TwoStage_Package(dat_id, factors, "gf_sim_id_tmm1.csv", 1)
# Sig EC labels
# 2.7.7.7, 2.3.1.16, 2.4.2.14
# Group A vs B
gf_a_b <- dat_calls %>% filter(Call %in% c("A", "B")) %>% dplyr::select(1:23)
factors_a_b <- factors %>% filter(V2 %in% c("A", "B"))
TwoStage_Package(gf_a_b[,-c(1,3)], factors_a_b, "gf_a_b_tmm1.csv", 1, .001)
# sig_a_b <- read_csv("gf_a_b_tmm1.csv")
# a_b <- left_join(sig_a_b, dat_calls, by = "Annotation") %>% dplyr::select(1:9)
# sig_ec <- c("2.7.7.7", "2.3.1.16", "2.4.2.14")
# sig_ec_found <- intersect(sig_ec, unique(a_b$label))
# ec_else <- setdiff(unique(a_b$label), sig_ec)
# tp <- sum(table(a_b$label)[sig_ec_found])
# fp <- sum(table(a_b$label)[ec_else])
# fn <- sum(table(gf_a_b$label)[sig_ec_found]) - tp
# tn <- nrow(gf_a_b) - sum(tp, fp, fn)
# confusion_matrix <- cbind("positive" = c(tp, fp), "negative" = c(tn, fn))
# rownames(confusion_matrix) <- (c("true", "false"))
# fdr <- tn / (tn + fn)
# confusion_matrix; paste("FDR = ",fdr)
# Group A vs C
gf_a_c <- dat_calls %>% filter(Call %in% c("A", "C")) %>% dplyr::select(1:12, 23:33)
factors_a_c <- factors %>% filter(V2 %in% c("A", "C"))
TwoStage_Package(gf_a_c[,-c(1,3)], factors_a_c, "gf_a_c_tmm1.csv", 1, .001)
# sig_a_c <- read_csv("gf_a_c_tmm1.csv")
# a_c <- left_join(sig_a_c, dat_calls, by = "Annotation") %>% dplyr::select(1:9)
# sig_ec <- c("2.7.7.7", "2.3.1.16", "2.4.2.14")
# sig_ec_found <- intersect(sig_ec, unique(a_c$label))
# ec_else <- setdiff(unique(a_c$label), sig_ec)
# tp <- sum(table(a_c$label)[sig_ec_found])
# fp <- sum(table(a_c$label)[ec_else])
# fn <- sum(table(gf_a_c$label)[sig_ec_found]) - tp
# tn <- nrow(gf_a_c) - sum(tp, fp, fn)
# confusion_matrix <- cbind("positive" = c(tp, fp), "negative" = c(tn, fn))
# rownames(confusion_matrix) <- (c("true", "false"))
# fdr <- tn / (tn + fn)
# confusion_matrix; paste("FDR = ",fdr)
# Group B vs C
gf_b_c <- dat_calls %>% filter(Call %in% c("B", "C")) %>% dplyr::select(1:3, 13:32)
factors_b_c <- factors %>% filter(V2 %in% c("B", "C"))
TwoStage_Package(gf_b_c[,-c(1,3)], factors_b_c, "gf_b_c_tmm1.csv", 1, .001)
# sig_b_c <- read_csv("gf_b_c_tmm1.csv")
# b_c <- left_join(sig_b_c, dat_calls, by = "Annotation") %>% dplyr::select(1:9)
# sig_ec <- c("2.7.7.7", "2.3.1.16", "2.4.2.14")
# sig_ec_found <- intersect(sig_ec, unique(b_c$label))
# ec_else <- setdiff(unique(b_c$label), sig_ec)
# tp <- sum(table(b_c$label)[sig_ec_found])
# fp <- sum(table(b_c$label)[ec_else])
# fn <- sum(table(gf_b_c$label)[sig_ec_found]) - tp
# tn <- nrow(gf_b_c) - sum(tp, fp, fn)
# confusion_matrix <- cbind("positive" = c(tp, fp), "negative" = c(tn, fn))
# rownames(confusion_matrix) <- (c("true", "false"))
# fdr <- tn / (tn + fn)
# confusion_matrix; paste("FDR = ",fdr)
calc_confusion_mat <- function(df){
new_df <- enquo(df)
groups <- unlist(str_split(as.character(get_expr(new_df)), "_", n = 3))
sig <- read_csv(paste0(get_expr(new_df),"_tmm1.csv"))
sig_label <- left_join(sig, dat_calls, by = "Annotation") %>% dplyr::select(1:9)
sig_ec <- c("2.7.7.7", "2.3.1.16", "2.4.2.14")
sig_ec_found <- intersect(sig_ec, unique(sig_label$label))
ec_else <- setdiff(unique(sig_label$label), sig_ec)
tp <- sum(table(sig_label$label)[sig_ec_found])
fp <- sum(table(sig_label$label)[ec_else])
fn <- sum(table(df$label)[sig_ec_found]) - tp
tn <- nrow(df) - sum(tp, fp, fn)
confusion_matrix <- cbind("positive" = c(tp, fp), "negative" = c(tn, fn))
rownames(confusion_matrix) <- (c("true", "false"))
fdr <- fp / (tp + fp)
cat(paste("output for", groups[2],"&", groups[3], sep = " "), "\n")
print(confusion_matrix)
cat(paste("FDR = ",fdr), "\n\n")
}
calc_confusion_mat(gf_b_c)
calc_confusion_mat(gf_a_c)
calc_confusion_mat(gf_a_b)
|
f77656f7946092a2a7477b004249bbb854818ffb
|
4ba442619cf976909ac303ae6abd96eb3a451fd9
|
/SCperf_v.1.0/R/bullwhip.R
|
bbc531ac736436d43b0374ae548a9ee1ee56db83
|
[] |
no_license
|
lulzzz/SCperf
|
11e96291cb037f14a7ebdc12a66661be81930aa5
|
cf7ffdef4664c0eedd5f4c6a2bc3f6e6c81a46ed
|
refs/heads/master
| 2021-05-10T23:27:19.314669
| 2018-01-09T12:50:30
| 2018-01-09T12:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 663
|
r
|
bullwhip.R
|
bullwhip=function(method=c("MMSE","SMA","ES"),phi,L,p,alpha)
{ method <- match.arg(method)
if (L==0){cat("L is at least one, the review period, ...\n")
}
else
{
if (method=="MMSE"){r<-1+2*phi*(1-phi^L)*(1-phi^(L+1))/(1-phi)
}
else{ if (method=="SMA"){r<-1+2*(1-phi^p)*((L/p)^2+(L/p))
}
else{ if (method=="ES"){r<-1+(L*alpha)*(2*(1-phi)/(1-(1-alpha)*phi))+(L*alpha)^2*(1-phi)/((1-alpha)*(1-(1-alpha)*phi))
}
else {r<-"error"
}
}
}
}
options(digits=5)
return(r)
}
|
04e636e9dddfc0236764c3b462986c0d5c642d23
|
78014f186b458c0dfbd4dd4dc660adfe858d4c62
|
/man/droplinks.Rd
|
8bb64f5aa22a189a9345f64e4e70d27664e21b4e
|
[] |
no_license
|
r-spatial/spdep
|
a1a8cebe6c86724bc4431afefc2ba2f06a3eb60c
|
a02d7aa6be8a42e62ba37d2999a35aff06a0ea59
|
refs/heads/main
| 2023-08-21T19:27:09.562669
| 2023-08-07T12:08:51
| 2023-08-07T12:08:51
| 109,153,369
| 109
| 30
| null | 2022-12-10T12:31:36
| 2017-11-01T16:10:35
|
R
|
UTF-8
|
R
| false
| false
| 1,787
|
rd
|
droplinks.Rd
|
% Copyright 2001 by Roger S. Bivand
\name{droplinks}
\alias{droplinks}
\title{Drop links in a neighbours list}
\description{
Drops links to and from or just to a region from a neighbours list. The example corresponds to Fingleton's Table 1, p. 6, for lattices 5 to 19.
}
\usage{
droplinks(nb, drop, sym=TRUE)
}
\arguments{
\item{nb}{a neighbours list object of class \code{nb}}
\item{drop}{either a logical vector the length of \code{nb}, or a character vector of named regions corresponding to \code{nb}'s region.id attribute, or an integer vector of region numbers}
\item{sym}{TRUE for removal of both "row" and "column" links, FALSE for only "row" links}
}
\value{
The function returns an object of class \code{nb} with a list of integer vectors containing neighbour region number ids.
}
\references{B. Fingleton (1999) Spurious spatial regression: some Monte Carlo results with a spatial unit root and spatial cointegration, Journal of Regional Science 39, pp. 1--19.}
\author{Roger Bivand \email{Roger.Bivand@nhh.no}}
\seealso{\code{\link{is.symmetric.nb}}}
\examples{
\donttest{
rho <- c(0.2, 0.5, 0.95, 0.999, 1.0)
ns <- c(5, 7, 9, 11, 13, 15, 17, 19)
mns <- matrix(0, nrow=length(ns), ncol=length(rho))
rownames(mns) <- ns
colnames(mns) <- rho
mxs <- matrix(0, nrow=length(ns), ncol=length(rho))
rownames(mxs) <- ns
colnames(mxs) <- rho
for (i in 1:length(ns)) {
nblist <- cell2nb(ns[i], ns[i])
nbdropped <- droplinks(nblist, ((ns[i]*ns[i])+1)/2, sym=FALSE)
listw <- nb2listw(nbdropped, style="W", zero.policy=TRUE)
wmat <- listw2mat(listw)
for (j in 1:length(rho)) {
mat <- diag(ns[i]*ns[i]) - rho[j] * wmat
res <- diag(solve(t(mat) \%*\% mat))
mns[i,j] <- mean(res)
mxs[i,j] <- max(res)
}
}
print(mns)
print(mxs)
}
}
\keyword{spatial}
|
98ba4751010cef61bbc6956160441a1e27843794
|
0c93e201530984c1dfd41db112457c017fd55a83
|
/tinyplyr/R/util.R
|
5824bec829c917071a5227f6e3c0fef26c2ea230
|
[] |
no_license
|
Rindrics/reformr
|
a763e4a0885f2913d24e687cc2e5ab8ceb7b14db
|
5f1994f9febe8d9adeb3f0003e93af6bb444b9c3
|
refs/heads/master
| 2021-10-20T00:11:22.350559
| 2019-02-25T01:05:50
| 2019-02-25T01:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
r
|
util.R
|
# ----------------------------------------------------------------------------.
# This file is tangled from tinyplyr.org. |
# (https://github.com/smxshxishxad/tinyplyr/blob/master/tinyplyrjmdatar.org) |
# Do not edit by hand. |
# ---------------------------------------------------------------------------'
num2datei <- function(x) {
jday_march01 <- 60
x <- as.numeric(x)
if (x == jday_march01) {
stop("This date is not correct in Excel.")
}
if (x > jday_march01) {
x <- x - 1 # because of leap year
}
x <- x - 1 # because of origin
date <- as.character(as.Date(x, origin = "1900-01-01"))
}
#' @export
num2date <- function(x) {
out <- purrr::map(x, num2datei)
out <- as.vector(unlist(out))
}
#' @export
is.jpdate <- function(x) {
stringr::str_detect(x, "[A-Z]\\.?[0-9]+\\..")
}
#' @export
split_jpdate <- function(x) {
initial <- substr(x, 1, 1)
switch(initial,
"H" = era <- "heisei")
year <- stringr::str_match(x, "^[A-Za-z]\\.?([0-9]+)\\.")[2]
month <- stringr::str_match(x, "^[A-Za-z]\\.?[0-9]+\\.([0-9]+)")[2]
day <- stringr::str_match(x, "^[A-Za-z]\\.?[0-9]+\\.[0-9]+\\.([0-9]+)")[2]
out <- list("era" = era,
"year" = as.numeric(year),
"month" = as.numeric(month),
"day" = as.numeric(day)
)
out
}
jpdate2juliani <- function(x) {
if (is.jpdate(x) == TRUE) {
split <- split_jpdate(x)
era <- split$era
jpyear <- stringr::str_match(x, "[A-Z]")
switch(era,
"heisei" = {
year <- split$year + 1988
month <- split$month
day <- split$day
date <- as.Date(paste(year, month, day, sep = "-"))
})
jday <- julian.Date(date, origin = as.Date("1900-01-01"))
jday <- jday + 1 # Correct origin difference
jday <- jday + 1 # Correct leap year bug
} else {
jday <- x
}
as.numeric(jday[1])
}
#' @export
jpdate2julian <- function(x) {
out <- purrr::map(x, jpdate2juliani)
out <- as.vector(unlist(out))
out
}
|
6c4f3312c4ac3567d59773331790436a208539a4
|
b613b9e3ef1ebff3d8a6ad1a552bc3742353914e
|
/module_12/problem_set_12/.Rproj.user/FA5B6B16/sources/per/t/D4AA0A11-contents
|
e8530b1bb8501f013e22054299733099dc2063b0
|
[
"MIT"
] |
permissive
|
SHASHANK992/computational-statistics-jhu-spring-20
|
044bff959b7ada946f156c95d292c369ae2d2293
|
75e4341c1adbf3be9df71fe967fcb7ed6f2cc7b2
|
refs/heads/master
| 2022-07-18T10:51:52.486357
| 2020-05-15T03:15:45
| 2020-05-15T03:15:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,827
|
D4AA0A11-contents
|
### Q1
xs <- scan(file.choose()) # Load data
ys <- scan(file.choose())
# plot(xs,ys)
df <- data.frame(x = xs, y = ys)
df_sort <- df[order(df$x),] # sort values by x_i's
rownames(df_sort) <- NULL # reset indices
# part (a)
### Plot the CVRSS_k(s_hat_k) versus k and an explanation
### of the chosen span.
odd_values <- function(k) { 2 * k + 1 } # calculate odd values
k_seq <- 1:11
k_candidates <- odd_values(k_seq) # test spans
### The s_hat function calculates the value of s_hat for a given
### span k. It utlizes the formula in eq. 11.5 of the textbook.
### It requires an argument of both the x-value and the i'th row
### index for that x-value.
s_hat <- function(obs_data = df, x, i = 3, k = 3) {
n <- nrow(obs_data) # Initialize variables
j_min <- max(c(i - (k - 1) / 2, 1)); j_max <- min(c(i + (k - 1) / 2, n))
s_hat_k <- mean(obs_data[j_min:j_max,'y'])
return(s_hat_k)
}
s_hat_vec <- Vectorize(s_hat, vectorize.args = c('x', 'i'))
test_row <- df[10,]
s_hat(obs_data = df, x = test_row[,1], i = integer(rownames(test_row)), k = 3)
trunc_col <- function(j, k, n) {
# for j'th row, which columns should be non-zero?
k_half <- (k - 1) / 2
# until row k_half + 1
if (j <= k_half) {
} else if ((k_half < j) & (j <= (n - k_half))) {
# when j between k_half + 1 and n - k_half
# e.g., n=100, j must be between 3 and 98 for k = 5
} else {
# when j > n - k_half
}
j
}
truncate_S <- function(n, k) {
NA_mat <- matrix(data = NA, nrow = n, ncol = n)
for(i in 1:n) {
NA_mat[i,]
}
}
cvrss <- function(obs_data = df, k) {
n <- nrow(obs_data) # Initialize variables
x_vec <- obs_data[,'x']; y_vec <- obs_data[,'y']
mean(
((y_vec - s_hat_vec(obs_data = obs_data, x = x_vec, i = 1:n, k = k)) /
(1 - S_ii))^2
)
}
|
|
282fd8f2e4785c9eec80a535954a72c27fcb259b
|
8615590c2f6976ceb62b7a3ec5bb2ea3329c0003
|
/man/elevation.Rd
|
b9b1915a802f064322b403dab9740662c015cdb9
|
[
"MIT"
] |
permissive
|
JanLauGe/rgbif
|
859fc81e7b81adb187c63ba1844f0a0ee1f81a7f
|
5f39a2f1ee4c4e00cdf1bf22ae76d7d600ed542d
|
refs/heads/master
| 2021-01-13T09:45:45.895135
| 2016-11-10T18:28:23
| 2016-11-10T18:28:23
| 69,766,790
| 1
| 0
| null | 2016-10-01T22:16:31
| 2016-10-01T22:16:31
| null |
UTF-8
|
R
| false
| true
| 2,288
|
rd
|
elevation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elevation.r
\name{elevation}
\alias{elevation}
\title{Get elevation for lat/long points from a data.frame or list of points.}
\usage{
elevation(input = NULL, latitude = NULL, longitude = NULL,
latlong = NULL, key, ...)
}
\arguments{
\item{input}{A data.frame of lat/long data. There must be columns decimalLatitude and
decimalLongitude.}
\item{latitude}{A vector of latitude's. Must be the same length as the longitude
vector.}
\item{longitude}{A vector of longitude's. Must be the same length as the latitude
vector.}
\item{latlong}{A vector of lat/long pairs. See examples.}
\item{key}{(character) Required. An API key. See Details.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc, passed on to
\code{\link[httr]{modify_url}} within \code{\link[httr]{GET}} call. Unnamed parameters will be
combined with \code{\link[httr]{config}}.}
}
\value{
A new column named elevation in the supplied data.frame or a vector with elevation of
each location in meters.
}
\description{
Get elevation for lat/long points from a data.frame or list of points.
}
\details{
To get an API key, see instructions at
https://developers.google.com/maps/documentation/elevation/#api_key - It should be an
easy process. Once you have the key pass it in to the \code{key} parameter. You can store
the key in your \code{.Rprofile} file and read it in via \code{getOption} as in the
examples below.
}
\examples{
\dontrun{
apikey <- getOption("g_elevation_api")
key <- name_suggest('Puma concolor')$key[1]
dat <- occ_search(taxonKey=key, return='data', limit=300, hasCoordinate=TRUE)
head( elevation(dat, key = apikey) )
# Pass in a vector of lat's and a vector of long's
elevation(latitude=dat$decimalLatitude, longitude=dat$decimalLongitude, key = apikey)
# Pass in lat/long pairs in a single vector
pairs <- list(c(31.8496,-110.576060), c(29.15503,-103.59828))
elevation(latlong=pairs, key = apikey)
# Pass on options to httr
library('httr')
pairs <- list(c(31.8496,-110.576060), c(29.15503,-103.59828))
elevation(latlong=pairs, config=verbose(), key = apikey)
}
}
\references{
Uses the Google Elevation API at the following link
https://developers.google.com/maps/documentation/elevation/start
}
|
314aea46fe9768a8650c6a740094cc3178ebe423
|
d8aaf8a83ce8dff401aa5fd22aa0c5be086d4d59
|
/dev/RScripts/plotFunction.R
|
fdd07dc6b26fae3665b8299373fdc7e03ed63601
|
[] |
no_license
|
ainaollevila/Lichen-s-project
|
2b3022bdfb41a08641996c41a0b281efb002a076
|
cd34e064688aa76c1b57df9d97818b498dede578
|
refs/heads/master
| 2020-04-15T12:45:21.009289
| 2017-04-26T22:40:18
| 2017-04-26T22:40:18
| 61,504,423
| 3
| 0
| null | 2017-05-25T23:15:34
| 2016-06-19T23:05:24
|
Matlab
|
UTF-8
|
R
| false
| false
| 3,483
|
r
|
plotFunction.R
|
plotOneStuff <- function(){
dev.off()
par(mar=rep(1,4))
plot(m29all$x[m29all$A != maxAlgae],m29all$y[m29all$A != maxAlgae],axes=F,pch=20,col="grey",xlab="lat",ylab="long",cex=2)
box()
points(m29all$x[m29all$A == maxAlgae],m29all$y[m29all$A == maxAlgae],col="dark red",pch=20,cex=2)
text(m29all$x[m29all$A == maxAlgae],m29all$y[m29all$A == maxAlgae]+5,"A4",col="dark red")
#points(m29all$x[m29all$A == sec],m29all$y[m29all$A == sec],col="blue",pch=20)
#points(m29all$x[m29all$F == sec]+1,m29all$y[m29all$F == sec],col="blue",pch=20)
}
unitTestPopSize<-function(y="betweenness", x="mad"){
dir.create("img/ModelVsData/")
allrep=c("A","B","C","D","E")
nrep="A"
x="mean_dist"
for(nsex in c(1,5,10,100)){
for(tick in c(10000,100000,50000)){
nsex=100
tick=10000
allModel=data.frame()
for(nrep in allrep){
dat=read.csv(paste("../../dev/data/ECHOresults/mutualism_michaelis-menten_",tick,"ticks_",nsex,"sexualreproduction_replicate",nrep,".dat",sep=""),header=F)
colnames(dat)=c("A","F","x","y")
showDistanceProp(dat) #print NA 'cause there is initially no pop in salva's output
dat=splitSpace(dat)
dat$y = dat$y*10 #This to se a scale more close to real scale (1unit model ~ 35m in reality
dat$x = dat$x*10
showDistanceProp(dat) #This Ho!Magic! we have stuff cause we splitted in different pop.
#matMod=cooccurenceMat(dat)
#wholesetModel=getNodesAndProp(matMod,dat) #this should not be used as this time the idea is to get node and properties for all matrices of all pop: use computeAllProp
todoModel=computeAllPop(dat)
allModel=rbind(allModel,todoModel)
}
png(paste("img/ModelVsData/",x,"VS",y,"-ticks=",tick,"_sp=",nsex,".png",sep=""),width=600)
par(mfrow=c(1,2),mar=c(5,4,4,.5))
plotProperties(todoB,x,y,ma="Dal Grande 2012",log="x")
mar=par()$mar
allModelB10000=allModel[allModel$betweenness>1,]
par(mar=c(5,2,4,1))
plotProperties(allModelB10000,x,y,main=paste("Model with:\nsexprob=",nsex,", ticks=",tick,sep=""),log="x")
par(mar=mar)
dev.off()
}
}
}
plotOneModel <- function(){
par(mar=rep(1,4))
data_model1=read.csv("../../dev/data/ECHOresults/mutualism_michaelis-menten_10000ticks_10sexualreproduction_replicateA.dat",header=F)
colnames(data_model1)=c("A","F","x","y")
plot(data_model1$x,data_model1$y,axes=F,xlab="",ylab="")
box()
}
plotAll <- function(allmatrice){
sapply(names(allmatrice),function(a){
#write.csv(test,paste("../../data/cooc_mat/mat_pop-",a,".csv",sep=""))
png(paste("pop-",a,".png",sep=""))
par(mar=rep(0.1,4))
plotNetwork(allmatrice[[a]])
dev.off()
})
}
par(mar=rep(.5,4))
aa=read.csv("../data/ECHOresults/mutualism_michaelis-menten_100000ticks_1sexualreproduction_replicateA.dat",header=F)
pdf("split.pdf")
par(mar=c(0,0,0,0))
plot(aa$V3,aa$V4,axes=F,ylab= "",xlab="",col="green",pch=20)
splitGraph(coln="darkgreen")
box()
dev.off()
pdf("nosplit.pdf")
par(mar=c(0,0,0,0))
plot(aa$V3,aa$V4,axes=F,ylab= "",xlab="",col="blue",pch=20)
box()
dev.off()
plotAllPop <- function(dat){
par(mar=rep(.5,4))
npop=round(sqrt(length(unique(dat$Population))))
par(mfrow=c(npop,npop),mar=rep(.1,4),oma=c(2,2,5,2))
sapply(unique(dat$Population),function(pop){plot(dat[dat$Population == pop,c("x","y")],axes=F);box();})
#mtext("Spatial Disperstion of Sample\n within each pop (real Data)",side=3,outer=T,line=2)
par(resetPar())
}
|
f8b9e4297cb7de2ecf4efdbcd1771f8662a75f06
|
af142fbc7a5c36557015e59f32c6fc1369e85b1d
|
/man/n2w.Rd
|
0d3306250557edcfc137a47b2b0f9f78f7c9f434
|
[] |
no_license
|
TheoMichelot/localGibbs
|
fdbd5dbab06c98aa02b2979d4a06ee8fc6db8359
|
e1dd16b41e2e6c03ac7a1aedc92cba47b923d2a0
|
refs/heads/master
| 2022-05-08T05:10:22.329007
| 2022-03-09T15:41:47
| 2022-03-09T15:41:47
| 135,612,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,124
|
rd
|
n2w.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/n2w.R
\name{n2w}
\alias{n2w}
\title{Transform parameters from natural to working scale}
\usage{
n2w(beta, rdist = c("fixed", "multistate", "exp", "gamma", "weibull"),
r = NULL, sigma = NULL, shape = NULL, rate = NULL, gamma = NULL,
xy = NULL, norm = FALSE)
}
\arguments{
\item{beta}{Coefficients of the RSF}
\item{rdist}{Distribution of the availability radius ("fixed", "multistate",
"exp", "gamma", or "weibull)}
\item{r}{Availability radius, if rdist="fixed" (or vector of radii if multistate)}
\item{sigma}{Vector of standard deviations (if norm=TRUE)}
\item{shape}{Shape parameter, if rdist="gamma"}
\item{rate}{Rate parameter, if rdist="exp" or rdist="gamma"}
\item{gamma}{Transition probability matrix, if rdist="multistate"}
\item{xy}{Matrix of observed locations, needed to derive maximum step length
if rdist="fixed"}
\item{norm}{Logical. TRUE if normal transition density. (Only for multistate case)}
}
\value{
Vector of parameters on the working scale
}
\description{
Transform parameters from natural to working scale
}
|
c6e2abe1bf0cc4050651249661b14884eed254d9
|
bb3b3bfc7fb7144176ed9a1f322e8515bbfdf8f5
|
/CAS Datenanalyse/Graphische Datenexploration/4 Plot-Techniken-drei-Variablen.R
|
a9f9d48b91e35bf7f93747f8a93156c85f00556f
|
[] |
no_license
|
huli/mas-data-science
|
c0d7e9fd14e67ea8fa81f8787e8a4ed8da642f74
|
0ca11a9e9534ce43b0bf291e1b47b21571b60091
|
refs/heads/master
| 2021-01-12T10:54:31.418764
| 2017-07-28T15:31:11
| 2017-07-28T15:31:11
| 72,750,778
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,950
|
r
|
4 Plot-Techniken-drei-Variablen.R
|
################################################################
## Skript: 4 Plot-Techniken-drei-Variablen
## Studiengang: CAS Datenanalyse 16/17
## Modul: Graphische Datenexploration und Datenvisualisierung
## Lernziele: Techniken der Datenexploration mit R - drei Variablen
##
####################################
##
# Benötigte libraries
library(ggplot2)
library(vcd)
###
# Mosaic-Plot für nominale Variablen
# Daten - UCBAdmissions - Student Admissions at UC Berkeley
# Machen Sie sich mit den Daten vertraut
UCBAdmissions
help("UCBAdmissions")
str(UCBAdmissions)
# UCBAdmissionss ist ein aggregierter Datensatz von Bewerbern der Universität
# Berkley unterschieden nach Departement und Geschlecht
# Hintergrund: Von 2691 Bewerbern, wurden 1198 (44.5%) zugelassen
# Zum Vergleich: von den 1835 Bewerberinnen, wurden ledilgich 557 (30.4%) zugelassen
# Die Universität Berkley wurde entsprechend verklagt.
# Bei der Diskriminierungsklage gegen die Universität Berkeley handelt es sich
# um ein berühmtes Beispiel zur Veranschaulichung des Simpson-Paradoxons
# https://de.wikipedia.org/wiki/Simpson-Paradoxon
# Frage: Wurden Frauen wirklich benachteiligt ?
# Nein
# Das Datenformat ist etwas spezieller
# (3-dimensionale Arrays sind 3-fache Häufigkeitsauszählungen.
# Sie können mit table() erstellt werden)
# mytable <- table(A, B, C)
# Schauen Sie sich die Daten mit ftable() an
ftable(UCBAdmissions)
## Ein Mosaik-Plot unterteilt die Daten der Reihenfolge der Eingabe nach
# Erstellen Sie einen Mosaik-Plot (mosaic()), der die Daten zuerst nach Zulassung (Admit) und danach nach Geschlecht (Gender) unterteilt
mosaic(~Admit+Gender, UCBAdmissions)
# Nun nehmen Sie als dritte Variable die Departemente hinzu +Dept
## Was sagt Ihnen der Mosaik-Plot in Bezug auf die Zulassungspraktiken nach Geschlecht?
mosaic(~Admit+Gender+Dept, UCBAdmissions)
## Was wird ersichtlich, wenn wir die Daten anders splitten: Dept+Gender+Admit
mosaic(~Dept+Gender+Admit, UCBAdmissions)
## Zusätzliche optische Unterstützung gibt es mit den Optionen highlighting und direction
## Highlighting hebt Ausprägungen einer Variable farblich hervor
## direction gibt an in welche Richtung die Splitt's erfolgen v=vertical, h=horizontal
## Heben Sie die Geschlechter farblich hervor mit folgendem Code-Schnippsel
## highlighting = "Gender",highlighting_fill=c("lightblue","pink"), direction=c("v","v","h")
## Testen Sie die Darstellungsmöglichkeiten indem Sie die Parameter "v" und "h" austauschen.
mosaic(~Dept+Gender+Admit, UCBAdmissions,
highlighting = "Gender",highlighting_fill=c("lightblue","pink"), direction=c("v","v","h"))
## Fällt Ihnen etwas auf bezüglich Zulassung nach Geschlecht?
####################
# Mehrere Linien in einem Plot
# eignen sich für ordinale Variable auf der x-Achse, kontinuierliche auf der y-Achse + nominale Variable (Gruppe)
# Benötigte Library (für Datenaufbereitung)
library(plyr)
# Daten: ToothGrowth - The Effect of Vitamin C on Tooth Growth in Guinea Pigs
help(ToothGrowth)
# Die Studie untersucht den Effekt von Vitamin C auf die Zähne. Dafür wurden unterschiedliche
# Verabreichungsmethoden getestet (VC=ascorbic acid, OJ=orange juice)
# Sind die Zähne der Meerschweinchen in Abhängigkeit der Dosis und der Verabreichungsmethode gewachsen?
# Wir berechnen zuerst das mittleres Wachstum der Zähne der Meerschweinchen
# nach Verabreichungsmethode und Dosis (6 Gruppen) und speichern diese im Objekt tg
tg<-ddply(ToothGrowth, c("supp","dose"), summarise, length=mean(len))
tg
# Erstellen Sie zur Beantwortung der Untersuchungsfrage
# einen Linien-Plot mit der Dosis auf der x-Achse (dose) und der Länge der Zähne auf der y-Achse (length)
# Stellen Sie den Linienverlauf nach Verabreichungsmethode farblich dar (colour=supp)
ggplot() +
aes(x=dose, y=length) +
geom_line(data = subset(tg,tg$supp == "OJ"), color="red") +
geom_line(data = subset(tg,tg$supp == "VC"), color="green")
ggplot(tg) +
aes(x=dose, y=length, color=supp) +
geom_line()
# Erstellen Sie den selben Linien-Plot, der die Verabreichungsmethode über unterschiedliche Linientypen darstellt (linetype) anstatt über Farben
# Zeichnen Sie zusätzlich zu den Linien alle Messpunkte in die Grafik ein
ggplot(tg) +
aes(x=dose, y=length, linetype=supp) +
geom_point() +
geom_line()
######
# Barplots
# eignen sich für 2 nominale Variablen und eine metrische Variable
# Wenn theoretische begründete Vorstellungen zu Ursache und Wirkungszusammenhängen bestehen,
# bietet sich folgende Anordnung an: x-Achse (erklärende Variable), Y-Achse (zu erklärende Variable),
# Farb-Unterschiede für Gruppen (Drittvariablen)
# Daten: cabbage_exp - Data from a cabbage field trial (Summary)
#
library(gcookbook)
help("cabbage_exp")
# Erstellen Sie einen Barplot, der auf der X-Achse die Information zum Datum enthält,
# an welchem der Versuchs-Kohl gepflanzt wurde (Date), das mittlere Gewicht auf der y-Achse
# sowie die unterschiedlichen Kultivierungsmethoden (cultivar) farblich aufzeigt (fill=)
# Hat die Kultivierungsmethode einen Einfluss auf das mittlere Gewicht der untersuchten Kohle?
ggplot(cabbage_exp) +
aes(x=Date, y=Weight, fill=Cultivar) +
geom_bar(position="dodge",stat="identity")
# Error: stat_count() must not be used with a y aesthetic.
# (Er will auszählen, das verhindern wir mit stat="identity")
#########
# Scatterplots (2 metrische Variablen) mit einer Gruppenvariable
# Benötigte Library (für Daten)
library(plyr)
# Daten: heightweight - Height and weight of schoolchildren
# Nehmen Sie eine erste Dateninspektion vor
heightweight
help("heightweight")
# Ausgangspunkt ist der im vorangehenden Skript erstellte Scatterplot,
# der Grösse und Alter der Schulkinder plottet
ggplot(heightweight, aes(x=ageYear,y=heightIn)) +
geom_point()
# Wie sieht der Plot aus, wenn Geschlechterunterschiede (sex) farblich abgebildet werden (colour=)?
# Ist der Zusammenhang von Alter und Grösse für Mädchen und Jungs anders?
ggplot(heightweight, aes(x=ageYear,y=heightIn,colour=sex)) +
geom_point()
# Ergänzen Sie den Plot mit Loess-Linien (),
# die Linien mit lokaler Anpassung an die Daten vornehmen
# und den Zusammenhang von Alter und Grösse für Mädchen und Jungs unterschieden aufzeigen.
ggplot(heightweight, aes(x=ageYear,y=heightIn,colour=sex)) +
geom_point()+
geom_smooth(method=loess)
####
# Sind Plots mit drei metrischen Variablen möglich?
# Klar. Hier kommt der Bubble-Chart/Ballon-Chart, eine Erweiterung des Scatterplots
## Daten: countries - Health and economic data about countries around the world from 1960-2010
library(gcookbook)
str(countries)
help(countries)
# Die Daten werden eingegrenzt - nur 2009
countsub<-filter(countries, Year==2009)
# Zeilen mit fehlenden Werten und die Variable laborrate[5] werden gelöscht)
countsub<-countsub %>%
na.omit() %>%
select(-laborrate)
# Wie ist der Zusammenhang zwischen Kindersterblichkeit(infmortality),
# Gesundheitsausgaben (healthexp) und dem Bruttosozialprodukt (GDP)?
# Erstellen Sie zur Beantwortung dieser Frage einen Bubble-Chart
# mit den Gesundheitsausgaben auf der x-Achse, der Kindersterblichkeit
# auf der y-Achse und dem Bruttosozialprodukt visualisiert über die Grösse der Punkte
# über aes(x=,y=,size=)
ggplot(countsub, aes(x=healthexp, y=infmortality, size=GDP))+
geom_point()
# Wenn man die Kreise etwas grösser zeichen will, braucht es scale_size_area(max_size=)
ggplot(countsub, aes(x=healthexp, y=infmortality, size=GDP))+
geom_point() +
scale_size_area(max_size=10)
# Alternative-Darstellung
ggplot(countsub, aes(x=GDP, y=healthexp, size=infmortality))+
geom_point() +
scale_size_area(max_size=15)
|
d9b389a098e2cd7b69508b581446c3713a0c1156
|
c395a92aee7a688cc6374c447ee76eda5e082723
|
/danboard/test.distribute.r
|
73fc233dcdb97360da9ef5d48d4d4fafaf09f2cf
|
[] |
no_license
|
sasakihajime/devasi
|
ebdc0378ab37be45bfe07ec3484e27264bcff5f8
|
81d5c84fcdfce9faf7e6907ee0e0e09c88a52966
|
refs/heads/master
| 2021-05-28T03:55:23.484054
| 2014-12-22T00:28:29
| 2014-12-22T00:28:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
test.distribute.r
|
##正規性の検定
#シャピロ-ウィルク検定を採用
for (i in 2:length(att)){
att.norm<-shapiro.test(att[,i])
print(att.norm)
}
#以下p-valueが0.05以上の変数をTRUEそうでないものをFALSEとする。
|
63a74989a8c77f51ba0d6e6893c1ba6fc9b61eda
|
8b7b4a096cabade05415edc75f8a5adce62b1576
|
/man/timeline_specs.Rd
|
9f6282dbbbfa7901c729b2da943a54d416826a50
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/r2dii.plot.static
|
0350b9fe0952be30d14bf8868ba3816adde6f63b
|
fcb89716c1668774cda5f0bf1ba10af9a0443057
|
refs/heads/master
| 2023-05-05T22:37:08.046611
| 2021-05-28T15:03:22
| 2021-05-28T15:03:22
| 327,862,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 686
|
rd
|
timeline_specs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/timeline_specs.R
\name{timeline_specs}
\alias{timeline_specs}
\title{Creates the default specs data frame for \code{\link[=plot_timelineA]{plot_timelineA()}}}
\usage{
timeline_specs(data)
}
\arguments{
\item{data}{Pre-processed data for the chart, with columns: year, value,
line_name.}
}
\value{
A data frame useful as the \code{specs} argument of \code{\link[=plot_timelineA]{plot_timelineA()}}.
}
\description{
Creates the default specs data frame for \code{\link[=plot_timelineA]{plot_timelineA()}}
}
\examples{
data <- prepare_for_timelineB(sda_target)
timeline_specs(data)
}
\seealso{
plot_timelineA
}
|
1459fe70d7692f57c38e50627e7377181a9b75ed
|
f87ec62962540a29ba3c879caeeec77bd601c88c
|
/Examples/Styles.R
|
59d28b5e9c406d2106658acb728c9e58a05b1c82
|
[] |
no_license
|
RobDregmans/R_Project_No_Poverty
|
4f16580043873d47e9f5537c7ac4cb4822aad4f0
|
5522aa11c41788327dfcb6a529e4a578bd470132
|
refs/heads/master
| 2020-04-01T01:11:15.712060
| 2018-11-09T19:25:03
| 2018-11-09T19:25:03
| 152,729,417
| 5
| 1
| null | 2018-10-12T10:40:09
| 2018-10-12T09:46:15
|
R
|
UTF-8
|
R
| false
| false
| 11,703
|
r
|
Styles.R
|
#' @export
style_base <- function(textsize=7) {
list(
gg_text_size = grid::convertX(grid::unit(textsize, "points"), "mm", valueOnly = TRUE),
gg_max_point_size = grid::convertX(grid::unit(0.1, "npc"), "mm", valueOnly = TRUE),
theme_map = function(aspect_ratio = 1) {
t <- theme(
panel.grid = element_blank(),
axis.text = element_blank()
)
if (aspect_ratio > 1) {
t + theme(
legend.position = "right",
legend.direction = "vertical",
legend.justification = c(1, 1),
legend.key.width = unit(1, "lines")
)
} else {
t + theme(
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(0.5, 1),
legend.key.width = unit(1.5, "lines"),
legend.text = element_text(margin=margin(0,3,0,0,"lines"))
#legend.margin = margin(0,0,0,1,"lines")
)
}
},
theme_bubble_map = function() {
theme(
panel.grid = element_blank(),
plot.margin=unit(c(0,0,0,0),"mm"),
axis.text = element_blank(),
legend.position = c(0,0),
legend.direction = "vertical",
legend.justification = c(0, 0)
)
},
theme_barchart = function() {
theme(
axis.text.y = element_text(face="plain"),
panel.grid.major.y = element_blank(),
panel.grid.major.x = theme_minimal()$panel.grid.major.x
)
},
theme_x_title = function() {
theme(
axis.title.x = element_text(margin = margin(1,0,0,0,"lines"))
)
},
theme_scatter = function() {
theme(
axis.title.x = element_text(margin = margin(1,0,0,0,"lines")),
panel.grid.major.x = NULL
)
},
theme_legend = function(position = "top") {
listy(
top = theme(
legend.position = "top",
legend.margin = margin(0,0,0.3,0, "lines")
),
topleft = top + theme(legend.justification = c(0, 0.5)),
right = theme(
legend.position = "right",
legend.margin = margin(0,0,0,0.5, "lines")
),
righttop = right + theme(legend.justification = c(0.5, 1)),
rightbottom = right + theme(legend.justification = c(0.5, 0)),
bottom = theme(
legend.position = "bottom",
legend.margin = margin(0.3,0,0,0, "lines")
),
left = theme(
legend.position = "left",
legend.margin = margin(0,0.5,0,0, "lines")
),
lefttop = left + theme(legend.justification = c(0.5, 1))
)[position]
}
)
}
#' @export
style_atlas <- function(textsize=7, family="Avenir Book", family.bold = "Avenir Heavy", is.cmyk = FALSE) {
modifyList(style_base(textsize), listy(
## FONTY STUFF #############################################################
family = family,
## COLORS ##################################################################
colors = listy(
neutral = "grey80",
text = "grey20",
text.inverse = "white",
spot.primary = if (!is.cmyk) "#cc0641" else cmyk(2.7, 100, 58.6, 12.2, maxColorValue = 100),
spot.primary.light = if (!is.cmyk) lighten(spot.primary) else cmyk(1.3, 50, 29.3, 6.1, maxColorValue = 100),
spot.primary.dark = if (!is.cmyk) darken(spot.primary) else cmyk(0, 97, 68, 75, maxColorValue = 100),
spot.secondary = if (!is.cmyk) "gray30" else cmyk(0, 0, 0, 80, maxColorValue = 100),
spot.secondary.light = if (!is.cmyk) lighten(spot.secondary) else cmyk(0, 0, 0, 50, maxColorValue = 100),
spot.secondary.dark = darken(spot.secondary),
regions = c(
EAS = if (!is.cmyk) "#DF7F2E" else cmyk(0, 55, 90, 10, maxColorValue = 100),
ECS = if (!is.cmyk) "#CE1249" else cmyk(2.7, 100, 58.6, 12.2, maxColorValue = 100),
LCN = if (!is.cmyk) "#3A943C" else cmyk(72, 5, 100, 20, maxColorValue = 100),
MEA = if (!is.cmyk) "#7F3E83" else cmyk(45, 83, 0, 20, maxColorValue = 100),
NAC = if (!is.cmyk) "#4D4D4C" else cmyk(0, 0, 0, 80, maxColorValue = 100),
SAS = if (!is.cmyk) "#2078B6" else cmyk(80, 40, 0, 10, maxColorValue = 100),
SSF = if (!is.cmyk) "#FFCB06" else cmyk(0, 20, 100, 0, maxColorValue = 100)
),
world = c(WLD = "black"),
regions.light = rgba2rgb(regions, alpha = 0.7, background = "white"),
regions.dark = rgba2rgb(regions, alpha = 0.7, background = "black"),
incomes = c(
HIC = spot.primary,
UMC = spot.primary.light,
LMC = spot.secondary.light,
LIC = spot.secondary
),
gender = c(
female = spot.primary,
male = spot.secondary
),
urban_rural = c(
urban = spot.primary,
rural = spot.secondary
),
categorical = c(
spot.primary,
spot.secondary,
spot.primary.light,
spot.secondary.light,
spot.primary.dark,
spot.secondary.dark
),
reference = "grey70",
baseline = "black",
continuous.primary = function(n) {
g <- scales::gradient_n_pal(c("white", spot.primary.light, spot.primary), values = c(0, 0.55, 1))
max_pale <- 0.20
if (n == 1) g(c(1))
else if (n == 2) g(c(0.55, 1))
else g(max_pale + (0:(n-1))/(n-1)*(1 - max_pale))
},
continuous.primary.dark = function(n) {
g <- scales::gradient_n_pal(c("white", spot.primary.light, spot.primary, spot.primary.dark))
max_pale <- 0.20
g(max_pale + (0:(n-1))/(n-1)*(1 - max_pale))
},
continuous.secondary = function(n) {
g <- scales::gradient_n_pal(c("white", spot.secondary.light, spot.secondary), values = c(0, 0.55, 1))
max_pale <- 0.20
if (n == 1) g(c(1))
else if (n == 2) g(c(0.55, 1))
else g(max_pale + (0:(n-1))/(n-1)*(1 - max_pale))
},
continuous = continuous.primary
),
## SHAPES & LINES ##########################################################
shapes = list(
point = 16 # has no stroke by default
#incomes = c(
# HIC = 21,
# UMC = 21,
# LMC = 1,
# LIC = 1
#)
#categorical = c(
# 19,
# 19,
# 1,
# 1,
# 1
#)
),
point_size = 2,
point_stroke = 0,
#point_stroked_stroke = 1,
#point_stroked_size = 0.75,
linetypes = list(
regions = c(
EAS = "solid",
ECS = "solid",
LCN = "solid",
MEA = "solid",
NAC = "solid",
SAS = "solid",
SSF = "solid"
),
world = c(WLD = "12"),
reference = "longdash",
baseline = "solid"
),
linesize = 0.8,
linesize_reference = 0.4,
arrow = function(ends = "last") { grid::arrow(length = unit(1.5, "mm"), type = "closed", ends = ends) },
## SHAPES & LINES ##########################################################
theme = function() {
theme_minimal() +
theme(text = element_text(family = family, size=textsize, color=colors$text),
line = element_line(size = 0.35),
panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
plot.caption = element_text(hjust=0, size=rel(6/7), lineheight = 1, margin=margin(1.5,0,0,0, unit="line"), color= if (is.cmyk) "grey60" else "black"),
plot.title = element_text(hjust=0, size=rel(10/7), lineheight = 1, family=family.bold, face="bold", color = "black"),
plot.subtitle = element_text(hjust=0, size=rel(8/7), lineheight = 1, color=colors$text),
strip.text = element_text(hjust = 0, size=rel(1.0), lineheight = 1, color=colors$text), # strip text is NOT kind of like subtitle text
axis.text = element_text(size = rel(1.0), color=colors$text),
axis.text.y = element_text(color=colors$text),
axis.text.x = element_text(color=colors$text),
axis.title = element_text(size = rel(1.0), color=colors$text),
axis.title.x = element_blank(),
#axis.title.x.top = element_blank(),
#axis.title.x.bottom = element_blank(),
axis.title.y = element_blank(),
#axis.title.y.left = element_blank(),
#axis.title.y.right = element_blank(),
legend.box.spacing = unit(0.2, "lines"),
legend.margin = margin(0,0,0.3,0, "lines"),
legend.title = element_blank(),
legend.key.size = unit(1.5*textsize, "points"),
legend.text = element_text(size = rel(1.0), lineheight = 0.8, color=colors$text),
legend.background = element_rect(fill = "white", color = NA),
legend.position = "none",
strip.placement = "outside",
plot.margin = margin(1,1,5,1, unit = "mm") #trbl
)
}
))
}
#' @export
style_atlas_cmyk <- purrr::partial(style_atlas, is.cmyk=TRUE)
#' @export
style_atlas_open <- function(textsize=7, ...) {
style_atlas(textsize=textsize, family="Nunito Sans", family.bold = "Nunito Sans", ...)
}
#' @export
style_worldbank.org <- function(textsize=7) {
modifyList(style_base(textsize), list(
theme = function() {
theme_minimal() +
theme(text = element_text(family = "Open Sans", size = textsize, color="#333333"),
panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
#legend.position = "none",
plot.caption=element_text(hjust=0, size=rel(0.9), margin=margin(1.5,0,0,0, unit="line")),
plot.title=element_text(hjust=0, size=rel(1.15), face="bold"),
plot.subtitle = element_text(hjust=0,size=rel(1.0)),
axis.text=element_text(size=rel(1.0)),
axis.text.y=element_text(face="bold"),
axis.title=element_blank(),
plot.margin=unit(c(5,5,5,0),"mm"),
legend.title = element_blank())
},
colors = list(
spot.primary = "#0071bc",
spot.secondary = "#009fda",
spot.secondary.light = "#a5e8ff",
regions =
c(EAS = "#0071bc",
ECS = "#009fda",
LCN = "#a5e8ff",
MEA = "#0071bc",
NAC = "#009fda",
SAS = "#a5e8ff",
SSF = "#0071bc"),
categorical = c(
"#0071bc",
"#009fda",
"#a5e8ff",
"#9e9f9e",
"#686868"
),
world = "black",
continuous = function(n) { scales::seq_gradient_pal(low = "white", high = "#0071bc")((1:n)/n) }
),
shapes = list(
categorical = c(
19,
19,
1,
1,
1
)
)
))
}
|
93e1642a000124cca6e1f53ad03e72fd53feaa2e
|
1ec77d38d3100edd84b092d76de0f5557ab613f4
|
/man/Dataset.Rd
|
5ff9c3f3c8e8e6d255272795a2d6df14d7a742f2
|
[
"Apache-2.0"
] |
permissive
|
MicroStrategy/mstrio
|
63cfe68995a7072a6a6013e386033d363c815229
|
b656000adf913bf0fc66dc0960021463fcf3214f
|
refs/heads/master
| 2023-07-20T05:14:27.985866
| 2023-07-06T10:08:35
| 2023-07-06T10:08:35
| 138,627,534
| 21
| 11
|
Apache-2.0
| 2023-07-06T10:08:37
| 2018-06-25T17:23:25
|
R
|
UTF-8
|
R
| false
| true
| 10,878
|
rd
|
Dataset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset.R
\docType{class}
\name{Dataset}
\alias{Dataset}
\title{Create, update, delete and certify MicroStrategy datasets}
\description{
When creating a new dataset, provide a dataset name and an optional description.
When updating a pre-existing dataset, provide the dataset identifier. Tables are added to the
dataset in an iterative manner using `add_table()`.
}
\examples{
\dontrun{
# Create data frames
df1 <- data.frame("id" = c(1, 2, 3, 4, 5),
"first_name" = c("Jason", "Molly", "Tina", "Jake", "Amy"),
"last_name" = c("Miller", "Jacobson", "Turner", "Milner", "Cooze"))
df2 <- data.frame("id" = c(1, 2, 3, 4, 5),
"age" = c(42, 52, 36, 24, 73),
"state" = c("VA", "NC", "WY", "CA", "CA"),
"salary" = c(50000, 100000, 75000, 85000, 250000))
# Create a list of tables containing one or more tables and their names
my_dataset <- Dataset$new(connection=conn, name="HR Analysis")
my_dataset$add_table("Employees", df1, "add")
my_dataset$add_table("Salaries", df2, "add")
my_dataset$create()
# By default Dataset$create() will upload the data to the Intelligence Server and publish the
dataset.
# If you just want to create the dataset but not upload the row-level data, use
Dataset$create(auto_upload=FALSE)
# followed by
Dataset$update()
Dataset$publish()
# When the source data changes and users need the latest data for analysis and reporting in
# MicroStrategy, mstrio allows you to update the previously created dataset.
ds <- Dataset$new(connection=conn, dataset_id="...")
ds$add_table(name = "Stores", data_frame = stores_df, update_policy = 'update')
ds$add_table(name = "Sales", data_frame = stores_df, update_policy = 'upsert')
ds$update(auto_publish=TRUE)
# By default Dataset$update() will upload the data to the Intelligence Server and publish the
dataset.
# If you just want to update the dataset but not publish the row-level data, use
Dataset$update(auto_publish=FALSE)
# By default, the raw data is transmitted to the server in increments of 100,000 rows. On very
# large datasets (>1 GB), it is beneficial to increase the number of rows transmitted to the
# Intelligence Server with each request. Do this with the chunksize parameter:
ds$update(chunksize = 500000)
# If you want to cerfify an existing dataset, use
ds$certify()
}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{connection}}{MicroStrategy connection object}
\item{\code{name}}{Name of the dataset}
\item{\code{description}}{Description of the dataset. Must be less than or equal to 250 characters}
\item{\code{folder_id}}{If specified the dataset will be saved in this folder}
\item{\code{dataset_id}}{Identifier of a pre-existing dataset. Used when updating a pre-existing dataset}
\item{\code{owner_id}}{Owner ID}
\item{\code{path}}{Cube path}
\item{\code{modification_time}}{Last modification time, "yyyy-MM-dd HH:mm:ss" in UTC}
\item{\code{size}}{Cube size}
\item{\code{cube_state}}{Cube status,for example, 0=unpublished, 1=publishing, 64=ready}
\item{\code{verbose}}{If True (default), displays additional messages.}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{Dataset$new()}}
\item \href{#method-add_table}{\code{Dataset$add_table()}}
\item \href{#method-create}{\code{Dataset$create()}}
\item \href{#method-update}{\code{Dataset$update()}}
\item \href{#method-publish}{\code{Dataset$publish()}}
\item \href{#method-publish_status}{\code{Dataset$publish_status()}}
\item \href{#method-delete}{\code{Dataset$delete()}}
\item \href{#method-certify}{\code{Dataset$certify()}}
\item \href{#method-clone}{\code{Dataset$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Interface for creating, updating, and deleting MicroStrategy in-memory datasets.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$new(
connection,
name = NULL,
description = NULL,
dataset_id = NULL,
verbose = TRUE
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{connection}}{MicroStrategy connection object returned by `Connection$New()`.}
\item{\code{name}}{(character): Name of the dataset.}
\item{\code{description}}{(character, optional): Description of the dataset. Must be less than or equal to 250 characters.}
\item{\code{dataset_id}}{(character, optional): Identifier of a pre-existing dataset. Used when updating a pre-existing
dataset.}
\item{\code{verbose}}{Setting to control the amount of feedback from the I-Server.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
When creating a new dataset, provide a dataset name and an optional description. When
updating a pre-existing dataset, provide the dataset identifier. Tables are added to the
dataset in an iterative manner using `add_table()`.
}
\subsection{Returns}{
A new `Datasets` object
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-add_table"></a>}}
\if{latex}{\out{\hypertarget{method-add_table}{}}}
\subsection{Method \code{add_table()}}{
Add a data.frame to a collection of tables which are later used to update the MicroStrategy dataset
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$add_table(
name,
data_frame,
update_policy,
to_metric = NULL,
to_attribute = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{name}}{(character): Logical name of the table that is visible to users of the dataset in MicroStrategy.}
\item{\code{data_frame}}{(`data.frame`): R data.frame to add or update.}
\item{\code{update_policy}}{(character): Update operation to perform. One of 'add' (inserts new, unique rows), 'update'
(updates data in existing rows and columns), 'upsert' (updates existing data and inserts new rows), or 'replace'
(replaces the existing data with new data).}
\item{\code{to_metric}}{(optional, vector): By default, R numeric data types are treated as metrics in
the MicroStrategy dataset while character and date types are treated as attributes. For example, a
column of integer-like strings ("1", "2", "3") would, by default, be an attribute in the newly created
dataset. If the intent is to format this data as a metric, provide the respective column name as
a character vector in `to_metric` parameter.}
\item{\code{to_attribute}}{(optional, vector): Logical opposite of `to_metric`. Helpful for formatting an
integer-based row identifier as a primary key in the dataset.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Add tables to the dataset in an iterative manner using `add_table()`.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-create"></a>}}
\if{latex}{\out{\hypertarget{method-create}{}}}
\subsection{Method \code{create()}}{
Create a new dataset.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$create(
folder_id = NULL,
auto_upload = TRUE,
auto_publish = TRUE,
chunksize = 1e+05
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{folder_id}}{ID of the shared folder that the dataset should be created within. If `None`, defaults to the
user's My Reports folder.}
\item{\code{auto_upload}}{(default TRUE) If True, automatically uploads the data to the I-Server. If False, simply
creates the dataset definition but does not upload data to it.}
\item{\code{auto_publish}}{(default TRUE) If True, automatically publishes the data used to create the dataset
definition. If False, simply creates the dataset but does not publish it. To publish the dataset, data has to be
uploaded first.}
\item{\code{chunksize}}{(int, optional) Number of rows to transmit to the I-Server with each request when uploading.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-update"></a>}}
\if{latex}{\out{\hypertarget{method-update}{}}}
\subsection{Method \code{update()}}{
Updates an existing dataset with new data.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$update(chunksize = 1e+05, auto_publish = TRUE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{chunksize}}{(int, optional): Number of rows to transmit to the I-Server with each request when uploading.}
\item{\code{auto_publish}}{(default TRUE) If True, automatically publishes the data. If False, data will be uploaded but
the cube will not be published}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-publish"></a>}}
\if{latex}{\out{\hypertarget{method-publish}{}}}
\subsection{Method \code{publish()}}{
Publish the uploaded data to the selected dataset. A dataset can be published just once.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$publish()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-publish_status"></a>}}
\if{latex}{\out{\hypertarget{method-publish_status}{}}}
\subsection{Method \code{publish_status()}}{
Check the status of data that was uploaded to a dataset.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$publish_status()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Response status code
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-delete"></a>}}
\if{latex}{\out{\hypertarget{method-delete}{}}}
\subsection{Method \code{delete()}}{
Delete a dataset that was previously created using the REST API.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$delete()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Response object from the Intelligence Server acknowledging the deletion process.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-certify"></a>}}
\if{latex}{\out{\hypertarget{method-certify}{}}}
\subsection{Method \code{certify()}}{
Certify a dataset that was previously creted using the REST API
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$certify()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
Response object from the Intelligence Server acknowledging the certification process.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Dataset$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
e88a5107d3620f1ec9ccad73fc2dc5ca6b3bdc60
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.business.applications/man/alexaforbusiness_disassociate_device_from_room.Rd
|
67e685facf4b0836ba6df30a8f7b16b10a14f42d
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 798
|
rd
|
alexaforbusiness_disassociate_device_from_room.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alexaforbusiness_operations.R
\name{alexaforbusiness_disassociate_device_from_room}
\alias{alexaforbusiness_disassociate_device_from_room}
\title{Disassociates a device from its current room}
\usage{
alexaforbusiness_disassociate_device_from_room(DeviceArn)
}
\arguments{
\item{DeviceArn}{The ARN of the device to disassociate from a room. Required.}
}
\value{
An empty list.
}
\description{
Disassociates a device from its current room. The device continues to be
connected to the Wi-Fi network and is still registered to the account.
The device settings and skills are removed from the room.
}
\section{Request syntax}{
\preformatted{svc$disassociate_device_from_room(
DeviceArn = "string"
)
}
}
\keyword{internal}
|
eb6ff8d7bf1d96792337ebedff2c1e89fb07155f
|
4baecdaa52eca140342b4b84055cf70a71b5c1e1
|
/plot_mate.R
|
1836226bdd06afcfd5c652b66bd3fa66973cddcb
|
[] |
no_license
|
baibaidj/VisualData
|
aa68624e0217e7a723ae25fa3558ec464c10dee6
|
c206aee27bb530079c7d1031547f844b418b37e4
|
refs/heads/master
| 2021-10-23T09:03:22.695954
| 2019-03-16T10:46:22
| 2019-03-16T10:46:22
| 123,928,367
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,817
|
r
|
plot_mate.R
|
# function: install new packages and their dependencies
ipak <- function (pkg) {
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
## new packages can be added here
packages <- c("reshape2", "plyr", "readxl", #"sandwich", "nlme", "effects", "binom", "doBy", "grid",
"boot", "ggplot2", #"scales", "lme4", "bootstrap", "car","arm","mfx","psych",
"tidyr", #"devtools", "data.table", "stringr", "stats", "QuantPsyc", "lsmeans",
"multicon")
ipak(packages)
# library("readxl")
# library("ggplot2")
# function: Get the descrptive statistics of a data set:
## including mean, sd, sem, by between-subject variable and within-subject variable
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = paste(measurevar, "mean", sep = "_")))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
##========================================
###assign body parts such as chest or headneck to individual results based on organ name
assignBodyPart <- function(data_frame, organ_columns = c('pred_rois')){
data_frame$bodypart <- NA
# eye_brain <- tolower(c("eye","Eye_L", "Eye_R", "lens", "Lens_L", "Lens_R", "opticnerve","OpticNerve_L",
# "OpticNerve_R", "temporallobe","TemporalLobe_L", "TemporalLobe_R", "Cochlea_L",
# "Cochlea_R", 'BrainStem'))
# headgland <- tolower(c("Parotid", "Parotid_L", "Parotid_R", "Pituitary", "Thyroid"))
# thoracic <- tolower(c("lung", "Lung_L", "Lung_R", "Trachea", "Heart", "Esophagus","pulmonary",
# "Atrium_L", "Atrium_R", "Ventricle_L", "Ventricle_R"))
# abdomen <- tolower(c("kidney", "Kidney_L", "Kidney_R", "Liver", "Spleen", "Stomach"))
# pelvis <-tolower(c("Bladder", "Rectum","FemoralHead_L", "FemoralHead_R", "pelvis",
# "Femur_L", "Femur_R", "PelvicBone"))
# longorgan <- tolower(c( "Body", "SpinalCord"))
eye_brain <- tolower(c("Eye_L", "Eye_R","Lens_L", "Lens_R", "OpticNerve_L", "OpticNerve_R", "OpticChiasm",
"TemporalLobe_L", "TemporalLobe_R", "Cochlea_L", "Cochlea_R", 'BrainStem',
"Brain", "Cerebrum", "Cerebellum", "Body",'body',
'cochlea', 'eye', 'lens', 'optic_chiasm', 'optic_nerve', 'temporal_lobe',
'mandibleLR', 'opticnerve', 'temporallobe'))
headgland <- tolower(c("Parotid", "Parotid_L", "Parotid_R", "Pituitary", "Thyroid", "Mandible",
"Mandible_L", "Mandible_R", "TMJ_L", "TMJ_R", "Tongue", "Larynx", "OralCavity",
"SMG_L", "SMG_R", 'SMG', 'oral', 'pulmonary_vessel'))
thoracic <- tolower(c("lung", "Lung_L", "Lung_R", "Trachea", "Trachea_Bronchus", "Heart","pulmonary",
"Atrium_L", "Atrium_R", "Ventricle_L", "Ventricle_R", "Breast_L", "Breast_R",
"SpinalCord", "Esophagus"))
abdomen <- tolower(c("kidney", "Kidney_L", "Kidney_R", "Liver", "Spleen", "Stomach", "BowelBag", "Pancreas"))
pelvis <-tolower(c("Bladder", "Rectum","FemoralHead_L", "FemoralHead_R", "pelvis",
"Femur_L", "Femur_R", "PelvicBone"))
vessels <- tolower(c( "Vertebral.A_L", "Vertebral.A_R", 'CCA',"CCA_L", "CCA_R", "Aorta", "IJV_L", "IJV_R",
"BCV_L", "BCV_R", "SVC", "IMA_L", "IMA_R", "IVC", "Subclavian.A_L", "Subclavian.A_R",
"Pulmonary.A", "Pulmonary.V", "IMA", 'vein', 'Vertebral.A','pulmonaryvessel',
'subclavian', 'Vertebral.A'
))
muscles <- tolower(c( "Sternohyoid.M", "Scleido.M", 'scleido', 'sternohyoid'))
for (i in 1:dim(data_frame)[1]){
if(tolower(data_frame[i, organ_columns]) %in% eye_brain){
data_frame[i,"bodypart"] =1
}else if(tolower(data_frame[i, organ_columns]) %in% headgland){
data_frame[i,"bodypart"] =1
}else if(tolower(data_frame[i, organ_columns]) %in% thoracic){
data_frame[i,"bodypart"] = 2
}else if(tolower(data_frame[i, organ_columns]) %in% abdomen){
data_frame[i,"bodypart"] = 3
}else if(tolower(data_frame[i, organ_columns]) %in% pelvis){
data_frame[i,"bodypart"] = 4
}
# else if(tolower(data_frame[i, organ_columns]) %in% longorgan){
# data_frame[i,"bodypart"] = 5}
else if(tolower(data_frame[i, organ_columns]) %in% vessels){
data_frame[i,"bodypart"] = 5
}else if(tolower(data_frame[i, organ_columns]) %in% muscles){
data_frame[i,"bodypart"] = 6
}else{
data_frame[i,"bodypart"] = 0
}
}
bodypart_name <- c("Others", "HeadNeck", "Chest", "Abdomen", "Pelvis", "Vessels", "Muscles")
data_frame$bodypart<-factor(data_frame$bodypart,
levels = c(0:6),
labels = bodypart_name)
return(data_frame)
}
# calculate the descriptive statistics of a metric, including mean, standard deviation, and standard error
metricStat <- function(dataframe, target_metric, groupsvars, valid_range){
metricdata_valid = dataframe[!is.na(dataframe[target_metric])&
dataframe[target_metric]>=valid_range[1] &
dataframe[target_metric]<=valid_range[2],]
print(dim(metricdata_valid)) #colnames(metricdata_valid)
metric_by_groups <- summarySE(metricdata_valid, measurevar=target_metric, groupvars=groupsvars) #"z_spacing",
write.csv(metric_by_groups, file = paste(target_metric, 'by', paste(groupsvars, collapse = '.'), 'csv', sep = '.'))
return(metric_by_groups)
}
#############scatter plot the dice or hausdorff distance of different organs by hospitals###################
AccuracyScatterPlot <- function(dataframe, x_variable, target_metric, colors_on,
valid_range = c(0,1), fac1 = '.', fac2 = 'bodypart',
x_label = 'Organs', y_label = 'Dice Coefficent'){
# metricdata_valid = clean_data[!is.na(clean_data$dice) & clean_data$dice>0.1,]
metricdata_valid = dataframe[!is.na(dataframe[target_metric])&
dataframe[target_metric]>=valid_range[1] &
dataframe[target_metric]<=valid_range[2],]
ggplot(metricdata_valid, aes_string(x_variable, target_metric))+
geom_boxplot( outlier.shape = NA, colour = "#3366FF", fill = "white")+
#outlier.shape = NA, colour = "#3366FF", outlier.colour = "black", outlier.shape = 1
geom_jitter(aes_string(colour = colors_on), size = 1.0, width = 0.2)+ # plot data points of all participants
scale_shape_manual(values=c(1,17))+ # decide the shape of the data points for each group
facet_grid(reformulate(fac2, fac1), scales = "free_x", space = "free_x")+ # divide the plot into several parts by conditions
# stat_summary(fun.data="mean_se", fun.args = list(mult=1), # plot mean and error bar in the middle
# geom="crossbar", width=0.3, color = "red")+
labs(x = x_label, y = y_label)+
theme_bw()+
theme(axis.title.x = element_blank(),
axis.text.x = element_text(face = "bold", angle=90, vjust =0.7, size=8), #vjust =0,
axis.title.y = element_text(face="bold", size=12),
axis.text.y = element_text(angle=0, vjust=0.5, size=8),
legend.title=element_text(face="bold", size=12),
legend.text = element_text(size = 8),
legend.position="top" ## legend.position="none" if you don't need legend
)
# +coord_flip()
# guides(shape = guide_legend(override.aes = list(size=4))) # change the text size of the legend
}
|
b49421182f3d244d9256b89f777f4d0978c43efd
|
6ba63d79afa36c706583045654f262afd57ec3ea
|
/run_analysis.r
|
e778da5117f88193993d7ad632dc4718b2b54161
|
[] |
no_license
|
sandeepsingh2406/Coursera-Getting-and-Cleaning-Data
|
2ae960fe08a28a0535dda4791e899913cafe7c3b
|
794e2e6cd1bfee67079cf19d1ecec75d0c70d335
|
refs/heads/master
| 2020-07-22T01:58:03.041321
| 2019-09-11T04:52:50
| 2019-09-11T04:52:50
| 207,039,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,826
|
r
|
run_analysis.r
|
library(dplyr);
library(data.table)
#download the zip file if it doesn't exist
file_name<-"getdata_projectfiles.zip"
if(!file.exists(file_name)){
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",file_name,method="curl")
}
#unzip the file if not already done
if(!file.exists("UCI HAR Dataset")){
unzip(file_name)
}
#get feature names and train and test data
features<-read.table("UCI HAR Dataset/features.txt")
features<-t(features)[2,]
x_train<-read.table("UCI HAR Dataset/train/X_train.txt")
y_train<-read.table("UCI HAR Dataset/train/y_train.txt")
x_test<-read.table("UCI HAR Dataset/test/X_test.txt")
y_test<-read.table("UCI HAR Dataset/test/y_test.txt")
subject_train<-read.table("UCI HAR Dataset/train/subject_train.txt")
subject_test<-read.table("UCI HAR Dataset/test/subject_test.txt")
#Merge the training and the test sets to create one data set and give column names to the merged data
x_data <- rbind(x_train, x_test)
y_data<-rbind(y_train,y_test)
subject_data<-rbind(subject_train, subject_test)
col_names<-c("subject",features,"activity")
merged_data<-cbind(subject_data,x_data,y_data)
colnames(merged_data)<-col_names
#Extract only the measurements on the mean and standard deviation for each measurement
mean_std_data<-merged_data[grepl("mean\\(\\)|std\\(\\)|^subject$|^activity$",col_names)]
#getdescriptive activity names and add a column to name the activities in the data set generated above
activity_labels<-read.table("UCI HAR Dataset/activity_labels.txt")
activity_names<-activity_labels[mean_std_data$activity,2]
mean_std_data<-cbind(mean_std_data,activity_names)
mean_std_data<-mean_std_data[ , !names(mean_std_data)== "activity"]
merged_data<-mean_std_data
#Appropriately label the data set with descriptive variable names
names(merged_data)<-gsub("^t", "time", names(merged_data))
names(merged_data)<-gsub("^f", "frequency", names(merged_data))
names(merged_data)<-gsub("Mag-", "Magnitude-", names(merged_data))
names(merged_data)<-gsub("Acc", "Acceleration", names(merged_data))
names(merged_data)<-gsub("Gyro", "Gyroscope", names(merged_data))
names(merged_data)<-gsub("iqr\\(\\)", "InterquartileRange()", names(merged_data))
names(merged_data)<-gsub("arCoeff\\(\\)", "Autorregresion_coefficients()", names(merged_data))
#Create a second, independent tidy data set with the average of each variable for each activity and each subject
tidyData_aggregated<-
aggregate(merged_data[, 2:(ncol(merged_data)-1)], list(merged_data$subject,merged_data$activity_names), mean)
colnames(tidyData_aggregated)<-c("subject","activity_names",colnames(tidyData_aggregated[3:ncol(merged_data)]))
#Write this tidy dataset to a file
write.table(x = tidyData_aggregated, file = "tidyData_aggregated.txt", quote = FALSE,sep="~", row.name=FALSE)
|
b4e85b3c70497e500f9ff98f653ceff34c887e7f
|
1bedc58d6ad7d98194c986c6f47ad6c9691ebb73
|
/src/03-logistic_regression.R
|
9e7de4c0c9491dd047ed7b8d748a8f111b8e220e
|
[] |
no_license
|
bgulbis/NMBA_Enteral_Feeding
|
a1533cabbdf21eee450ed8299651f54b052f588b
|
6419350388e47a1468005ee3cf76b48f2ba3aadf
|
refs/heads/master
| 2021-01-11T21:29:12.152815
| 2017-04-10T16:17:27
| 2017-04-10T16:17:27
| 78,790,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
03-logistic_regression.R
|
library(tidyverse)
library(readxl)
library(broom)
nm <- c("goal", "pressor_avg", "osmolality", "motility_agent")
df <- read_excel("data/external/regression_data.xls", col_names = nm, skip = 1) %>%
dmap_at(c("goal", "motility_agent"), ~ .x == 1)
mod <- glm(goal ~ pressor_avg + osmolality + motility_agent, df, family = "binomial")
summary(mod)
glance(mod)
tidy(mod)
|
75363e151daaf438a21bf5caa11eeefe9226a6bd
|
c0316bffea019bcd9d895124c8cf0cd5523b6352
|
/Clases.R
|
a669563b3aa539e48a187f7e4cbb509ee128f166
|
[] |
no_license
|
IsmaelSPL/Mis_Clases_R
|
87ea41172800e94260ea76189acd87b0d0e21bc8
|
06f55b8fabeb4a9b2a97d3906f79eb5d3f1ed812
|
refs/heads/main
| 2023-04-28T13:07:16.695072
| 2021-05-13T05:52:26
| 2021-05-13T05:52:26
| 356,630,283
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 50,601
|
r
|
Clases.R
|
#(Clase 1) Comandos básicos, Asignación de variables,
#tipo de datos y operaciones en Rstudio
#Print
print("Hola mundo")
"hola mundo"
#Help
help(summary)
mean() #Sombreas + FI
#Instalación de paquete y activamos el paquete
install.packages("car")
library(car)
#Para saber donde está el directorio de trabajo
getwd()
#Cambiamos nuestro directorio de tabajo
setwd("C:/Users/Ismael/Documents/GitHub/Mis_clases")
#Para limpiar la consola: control + L
#Asignación de Variables y Tipos de datos
#Numeric
x<- 10.5
class(x)
x<-123
class(x)
#Integer
x<-100L
class(x)
#Complex
x<- 9i + 3
class(x)
#Character/string
x<- "Si te gusta el video dejame un like"
class(x)
#Logical/boolean
x<- TRUE
class(x)
#Cambio de tipo de datos
x<- 61.5
class(x)
x<- as.character(x)
x<- as.numeric(x)
x<- as.complex(x)
x<- as.integer(x)
x<- as.logical(x)
x<- FALSE
#Operadores aritmeticos
5+5
7-2
7*9
12/5
5^2
5%%2 #Para obtener el residuo
35%/%2 #División entera
abs(-10) #Valor absoluto
log(100) #Logaritmo neperiano
log10(100)
#Operadores de comparación
5==5
5!=7
7>9
7<9
7>=9
7<=9
#Operaciones con asignación
x<- 12
y<- 5
x+y
x*y
x/y
x^y
x<y
print(x+y)
#Para eliminar una variable
rm(list = c("x"))
#Para eliminar dos o más variables
x<- 12
y<- 5
z<- 6
rm(list = c("x","y"))
#ls: sirve para conocer objetos existentes
ls()
#Eliminamos todos los objetos
rm(list = ls())
#Redondeo
#ceilling: redondea un número hacia arriba a su entero más cercano
ceiling(2.4)
#floor: redondea un número hacia abajo a su entero más cercano
floor(2.4)
#String
a<- "Aprediendo a
programar"
a
cat(a)
#Para encontrar el número de caracteres de una cadena
nchar(a)
#grepl: verifica si un caracter o una secuencia de
#caracteres están presentes en una cadena
a<- "Hoy voy a pasear a mi gran perro que tiene 1 año"
grepl("H",a)
grepl("gran", a)
grepl(1,a)
grepl(5,a)
#paste(): función para fusionar o concatenar dos cadenas:
a<- "Mi hobby es"
b<- "tocar la guitarra"
paste(a,b)
c<- paste(a,b)
print(c)
c
###############################################################################
#(Clase 2) Vectores en Rstudio
#Vectores numeric
numeros<- c(20,5,8)
numeros
class(numeros)
#Para crear un vector con una secuencia de números
numeros<- 3:10
numeros
numeros<- c(3:10)
#Vectores con decimales
#Cuando pertenece el último elemento a la secuencia
numero1<- c(1.5:6.5)
numero1
#cuando no pertenece el ultimo elemento a la secuencia
numero2<- c(1.5:6.3)
numero2
#Generación de vectores secuenciados
numeros<- seq(from=0, to=100, by=20)
numeros
numeros<- seq(0,100, by=20)
#Para repetir cada elemento del vector
rep_each<- rep(c(1,2,3), each=3)
rep_each
#Repite la secuencia del vector
rep_times<- rep(c(1,2,3), times=3)
rep_times
#Repetimos cada valor de forma independiente
rep_independiente<- rep(c(1,2,3), times=c(5,2,1))
rep_independiente
#Vectores character
frutas<- c("manzana","naranja","fresa","pera")
class(frutas)
#Vector de valores logicos
vec_logico<- c(TRUE, FALSE, T, F)
class(vec_logico)
#Para saber cuantos elementos tiene un vector
numeros<- c(20,5,8)
frutas<- c("manzana","naranja","fresa","pera")
vec_logico<- c(TRUE, FALSE, T, F)
length(numeros)
length(frutas)
length(vec_logico)
#Ordenamos los elementos de un vector
numeros<- c(20,5,8)
frutas<- c("manzana","naranja","fresa","pera")
vec_logico<- c(TRUE, FALSE, T, F)
sort(numeros)
sort(frutas)
sort(vec_logico)
#Accedemos a un elemento del vector
numeros<- c(20,5,8)
frutas<- c("manzana","naranja","fresa","pera")
vec_logico<- c(TRUE, FALSE, T, F)
numeros[3]
frutas[2]
vec_logico[4]
#Accedemos a varios elementos del vector
numeros<- c(20,5,8,10,15,7)
frutas<- c("platano", "manzana", "naranja", "mango", "limon")
vec_logico<- c(TRUE, FALSE, T, F,T,F)
numeros[c(2,5)]
frutas[c(1,3)]
vec_logico[c(3,4)]
#Acceder a todos los elementos excepto a los especificados
numeros<- c(20,5,8,10,15,7)
frutas<- c("platano", "manzana", "naranja", "mango", "limon")
vec_logico<- c(TRUE, FALSE, T, F,T,F)
numeros[c(-1)]
frutas[c(-1,-2)]
vec_logico[c(-1,-3)]
#Cambiamos el elemento de un vector
numeros<- c(20,5,8,10,15,7)
frutas<- c("platano", "manzana", "naranja", "mango", "limon")
vec_logico<- c(TRUE, FALSE, T, F,T,F)
numeros[2]<- 23
frutas[1]<- "mandarina"
vec_logico[3]<- F
#Insertamos los nombres de las frutas en el vector numeros
numeros<- c(20,5,8,10,15,7)
frutas<- c("platano", "manzana", "naranja", "mango", "limon", "fresa")
names(numeros)<- c(frutas)
numeros
#Jerarquía (character > numeric > logical)
error1 <- c("Angela", "Carlos", 22, "Contabilidad",FALSE,TRUE)
class(error1)
error2 <- c(23,T,FALSE)
class(error2)
#Operaciones con vectores
vec1<- c(1,5,6,30)
vec2<- c(5,10,3,2)
vec3<- vec1 + vec2
vec3<- vec2 - vec1
vec3<- vec1*vec2
vec3<- vec1/vec2
#Operamos vectores con diferente logitud
vec1<- c(1,5,6,30)
vec2<- c(5,10,3,2,10,15)
#R agarrará los valores del inicios hasta completar la longitud del otro vector
vec3<- vec1 + vec2
###############################################################################
#(Clase 3) Listas en Rstudio
#Una lista en R puede contener muchos tipos de datos diferentes
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
#Accesos a elementos de una lista
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
milist[1]
milist[c(2,4)]
milist2[[1]]
milist2[c(1,2)]
milist2[[c(1,2)]]
milist3[1]
milist3[[1]]
milist3[c(1,2)]
milist3[[c(1,2)]]
milist3[["edades"]]
#Cambiamos un elemento de la lista
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
milist[1]<- "Pamela"
milist2[[1]]<- c("Pamela","Juan","Shirley")
milist3[["edades"]]<- c(25,30,18)
milist3[[2]]<- c(21,28,24)
#Para saber cuántos elementos tiene una lista
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
length(milist)
length(milist2)
length(milist3)
#Para saber si un elemento especifico está presente en una lista usamos
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
"Esteban" %in% milist
"Angela" %in% milist2[[1]]
20 %in% milist3[[2]]
#Para agregar un elemento al final de la lista, usamos append()
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
append(milist, "Pamela")
append(milist[1], "Pamela")
append(milist2, "Juan")
append(milist2[[1]], "Juan")
append(milist3, 3)
append(milist3[[2]], 3)
milist<- append(milist, "Pamela")
#Eliminar elementos de la lista
milist<- list("Angela", "Carlos", "Allison", 20, 25, 23)
milist2<- list(c("Angela", "Carlos","Allison"), c(20,25,23))
milist3<- list(nombres= c("Angela", "Carlos","Allison"), edades= c(20,25,23))
newlist<- milist[-1]
newlist2<- milist2[[-1]]
newlist3<- milist3[[-2]]
#Unir dos listas
list1<- list("Rosa", "Jesus", "Carmen")
list2<- list(21,23,26)
list3<- c(list1, list2)
###############################################################################
#(Clase 4) Matrices en Rstudio
#Matriz numeric
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
#Matriz character
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
#Accedemos a los elementos de una matriz
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
numatrix[2,2]
numatrix[1,] #Mostramos la fila 1
numatrix[,2] #Mostramos la columna 2
animalmatrix[1,2]
animalmatrix[2,] #Mostramos la fila 2
animalmatrix[,2] #Mostramos la columna 2
#Acceda a más de una fila y columna
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
numatrix[c(1,2),] #Muestro la fila 1 y 2
animalmatrix[,c(1,3)] #Muestro la columna 1 y 3
#Longitud de la matriz
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
length(numatrix)
length(animalmatrix)
#Para agregar filas adicionales usamos rbind()
#Ejemplo 1
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
numatrix2<- rbind(numatrix, c(23,27))
animalmatrix2<- rbind(animalmatrix, c("coala", "tigre","perro"))
#Las celdas de la nueva fila deben tener la misma longitud
#que la matriz existente.
#Usamos cbind() para agregar columnas adicionales en la matriz
#Ejemplo 2
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
numatrix2<- cbind(numatrix, c(23,27,13,14))
animalmatrix2<- cbind(animalmatrix, c("coala", "tigre","perro"))
#Las celdas de la nueva columna deben tener la misma longitud
#que la matriz existente.
#Eliminamos filas y columnas
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
numatrix<- numatrix[-c(1),-c(1)] #Removerála primera fila y la primera columna
animalmatrix<- animalmatrix2[,-c(2)] #Eliminará la columna 2
#Compruebe si existe un elemento en la matriz
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
23 %in% numatrix
"lobo" %in% animalmatrix
#Utilizamos dim() para saber la cantidad de filas y columnas en una matriz
numatrix<- matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
dim(numatrix)
dim(animalmatrix)
#Jerarquía (character > numeric > logical)
error1<- matrix(c("león", "5", 6, 21,8,20, TRUE, FALSE), nrow=2, ncol=4)
error2<- matrix(c(5,TRUE, FALSE, T, F,F), nrow=2, ncol=3)
#Combinar dos matrices con filas
#tienen que tener la misma cantidad de columnas
numatrix<- matrix(c(1,2,3,4,5,6,7,8,9,10,11,12), nrow=4, ncol=3)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
combinar_matrix<- rbind(numatrix, animalmatrix)
#Combinar dos matrices con columnas
#Tienen que tener la misma cantidad de filas
numatrix<- matrix(c(1,2,3,4,5,6,7,8,9,10,11,12), nrow=3, ncol=4)
animalmatrix<- matrix(c("león", "panda", "gato", "canguro","cocodrilo",
"elefante", "lobo", "jirafa",
"conejo"), nrow=3, ncol = 3)
combinar_matrix2<- cbind(numatrix, animalmatrix)
#Operaciones con matrices
numatrix<- matrix(c(22,8,15,12), nrow = 2, ncol = 2)
numatrix2<- matrix(c(2,4,5,6), nrow = 2, ncol= 2 )
numatrix + numatrix2
numatrix - numatrix2
numatrix %*% numatrix2
numatrix / numatrix2
5*numatrix
###############################################################################
#(Clase 5) Factores en Rstudio
#Factores se utilizan para categorizar los datos. Ejemplos:
#Sexo: masculino/femenino
#Música: Rock, Pop, Clásica, Jazz
#País: Perú, Argentina, España, Francia
#Para crear un factor usamos factor()
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
#Para imprimir solo los niveles hacemos
levels(music_genre)
#También se puede establecer los niveles
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"),
levels= c("Classic","Jazz","Pop",
"Rock","Otros"))
levels(music_genre)
#Longitud del factor
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
length(music_genre)
#Tabla de frecuencia
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
table(music_genre)
#Factores de acceso
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
music_genre[3]
music_genre[-1]
#Cambiar el valor de un item
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
music_genre[3]<- "Pop"
#Tenga en cuenta que no puede cambiar el valor de un
#artículo específico si aún no está especificado en el factor.
#El siguiente ejemplo producirá un error:
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
music_genre[3]<- "Opera"
#Tenemos que especificarlo dentro de levels
music_genre <- factor(c("Jazz", "Rock", "Classic",
"Classic", "Pop", "Jazz", "Rock",
"Jazz"),
levels = c("Classic", "Jazz", "Pop",
"Rock", "Opera"))
music_genre[3]<- "Opera"
#Renombrar los levels
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
levels(music_genre)<- c("Piano", "violín", "batería", "guitarra")
#Juntando dos factores
dias_factor<- factor(c("lunes", "martes", "miercoles", "juves",
"viernes", "sabado", "domingo"))
clases_factor<- factor(c("martes", "juves", "sabado"))
dia_clas_fac<- factor(clases_factor, levels= dias_factor)
table(dia_clas_fac)
#Eliminamos contenido
#pero sin arrancarlo del level de los factores
music_genre<- factor(c("Jazz","Rock","Classic","Classic",
"Pop","Jazz","Rock", "Jazz"))
mus_no_arrancar<- music_genre[-c(1,3)]
###############################################################################
#(Clase 6) Data.Frames en Rstudio
#Son datos que se muestran en formato de tabla
#Pueden tener diferentes tipos de datos en su interior.
#Mientras que la primera columna puede ser character,
#la segunda y la tercera pueden ser numeric o logical.
#Sin embargo, cada columna debe tener el mismo tipo de datos
#Forma 1
trabajadores<- c("Angela", "David", "Melanie", "Hector","Emma")
edades<- c(22,23,24,26,25)
hijos<- c(F,F,T,T,F)
ingresos<- c(1500, 12000,8500,5500,14500)
genero<- c("Femenino","Masculino","Femenino","Masculino","Femenino")
grupo_df<- data.frame(trabajadores, edades, hijos, ingresos, genero)
#Forma 2
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
View(grupo_df)
#Acceso de elementos de la data.frame
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
grupo_df[1,]
grupo_df[,1]
grupo_df[1]
grupo_df[["trabajadores"]]
grupo_df$trabajadores
#Agregamos filas con rbind()
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
newfila_grupo_df<- rbind(grupo_df, c("Pamela",23,F,4300,"Femenino"))
#Agregamos columnas con cbind()
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
#Primera forma
newcol_grupo_df<- cbind(grupo_df,
ahorros= c(15500,30000,21200,18300,42300))
#Segunda forma
grupo_df$ahorros<- c(15500,30000,21200,18300,42300)
#Cambiamos los nombres de las columnas
colnames(grupo_df)<- c("TRABAJADORES", "EDADES", "HIJOS", "INGRESOS",
"GENERO", "AHORROS")
#Cambiamos los nombres de las filas
rownames(grupo_df)<- c("Fila 1", "Fila 2", "Fila 3", "Fila 4", "Fila 5")
#Usamos cbind() para combinar dos o más data frame horizontalmente
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
grupo_df2<- data.frame(
ahorros= c(15500,30000,21200,18300,42300),
mascotas= c(T,F,F,T,T))
new_grupo_df<- cbind(grupo_df, grupo_df2)
#Utilice rbind() para combinar dos o más data.frame verticalmente
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
grupo_df2<- data.frame(
trabajadores= c("Ronald", "Rosa", "Victor", "Pamela", "Daniel"),
edades = c(24,27,23,22,28),
hijos = c(F,T,F,F,T),
ingresos = c(2150,7500,1200,3650,10200),
genero = c("Masculino", "Femenino", "Masculino", "Femenino", "Masculino"))
new_grupo_df<- rbind(grupo_df, grupo_df2)
#Eliminar fila y columna
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
#Primera forma
grupo_df<- grupo_df[-c(1),-c(2)]
grupo_df<- grupo_df[-c(1),]
grupo_df<- grupo_df[,-c(2)]
#Segunda forma para borrar columna
grupo_df$ingresos<- NULL
#Cantidad de filas y columnas
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
dim(grupo_df)
#También se puede usar ncol() para encontrar el número
#de columnas y nrow() para encontrar el número de filas
ncol(grupo_df)
nrow(grupo_df)
#Longitud de data.frame
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma"),
edades = c(22,23,24,26,25),
hijos = c(F,F,T,T,F),
ingresos = c(1500, 12000,8500,5500,14500),
genero = c("Femenino","Masculino","Femenino","Masculino","Femenino"))
length(grupo_df)
#Para saber el nombre de las variables
names(grupo_df)
#Para saber la estructura del data.frame
str(grupo_df)
###############################################################################
#(Clase 7) Subconjunto de un data.frame en Rstudio
grupo_df<- data.frame(
trabajadores = c("Angela", "David", "Melanie", "Hector","Emma", "Daniel"),
edades = c(22,23,24,26,25,27),
hijos = c(F,F,T,T,F,T),
ingresos = c(1500, 12000,8500,5500,14500,12300),
genero = c("Femenino","Masculino","Femenino","Masculino",
"Femenino","Masculino"))
#subconjuntos subset(data, condición)
hombres<- subset(grupo_df, genero == "Masculino")
mujeres<- subset(grupo_df, genero == "Femenino")
ingresos_sub<- subset(grupo_df, ingresos>7000)
#Subconjunto con más de una condición
hombres_ingreso<- subset(grupo_df, genero == "Masculino" & ingresos>9000)
mujeres_edades<- subset(grupo_df, genero == "Femenino" & edades>23)
mujeres_ed_ing<- subset(grupo_df, genero == "Femenino" & ingresos>5000 &
edades>24)
###############################################################################
#(Clase 8) Introducción a los gráficos en Rstudio
#Dibujamos un punto en el diagrama, en la posición (1) y
#la posición (3)
x11()
plot(1,3)
#Dibuja dos puntos en el diagrama, uno en la posición (1, 3)
#y otro en la posición (8, 10)
x11()
plot(c(1,8),c(3,10))
#Dibujamos Varios puntos
x11()
plot(c(1,2,3,4,5),c(3,7,8,9,12))
#Para ordenarlo mejor
x<- c(1,2,3,4,5)
y<- c(3,7,8,9,12)
x11()
plot(x,y)
#Secuencia de puntos tanto en el eje X y el eje Y
x11()
plot(1:10)
#Etiquetas(labels)
#main: colocamos título
#xlab: etiqueta en el eje de abscisa
#ylab: etiqueta en el eje de ordenada
x11()
plot(1:10, main= "Mi gráfico", xlab = "Eje de abscisas",
ylab = "Eje de ordenadas")
#Cambiamos el tamaño de los puntos
#1 es el valor predeterminado, mientras que 0.5 significa 50% más pequeño
#y 2 significa 100% más grande
x11()
plot(1:10, main= "Mi gráfico", xlab = "Eje de abscisas",
ylab = "Eje de ordenadas", cex= 2)
#Cambiamos la forma del punto
#Use pch con un valor de 0 a 25, donde 1 es el predeterminado
help("points")
x11()
plot(1:10, main= "Mi gráfico", xlab = "Eje de abscisas",
ylab = "Eje de ordenadas", cex= 2, pch= 18)
#Apariencia del gráfico
x11()
plot(1:10, main= "Mi gráfico", xlab = "Eje de abscisas",
ylab = "Eje de ordenadas", cex= 2, pch=19 ,col= "red")
#Vemos la lista de todos los colores
colors()
###############################################################################
#(Clase 9) Gráfico de dispersión en Rstudio
#Un gráfico de dispersión es un tipo de gráfico que se
#utiliza para mostrar la relación entre dos variables
#numéricas y traza un punto para cada observación
#Creamos un data.frame del precio y consumo de pescado
df_pescado<- data.frame(
precio= c(5,7,8,7,2,2,9,4,11,12,9,6,10),
consumo= c(99,86,87,88,111,103,87,94,78,77,85,86,80))
x11()
plot(df_pescado$consumo,df_pescado$precio,
main = "Gráfico de dispersión",
xlab= "Consumo",
ylab= "Precio",
cex= 2, pch= 19, col= "orange")
#Insertamos un leyenda
#usamos legend()
##La leyenda se puede colocar como:
#bottomright, bottomleft, bottom, topright, topleft,
#top, center, right, left.
x11()
plot(df_pescado$consumo,df_pescado$precio,
main = "Gráfico de dispersión",
xlab= "Consumo",
ylab= "Precio",
cex= 2, pch= 19, col= "orange")
legend("topright", "Pescado", fill = "orange")
#Combinamos dos gráficos de dispersión
df_pescado<- data.frame(
precio= c(5,7,8,7,2,2,9,4,11,12,9,6,10),
consumo= c(99,86,87,88,111,103,87,94,78,77,85,86,80))
df_pollo<- data.frame(
precio= c(2,2,8,1,15,8,12,9,7,3,11,4,7,14,12),
consumo= c(100,105,84,105,90,99,90,95,94,100,79,112,91,80,85))
x11()
plot(df_pescado$consumo,df_pescado$precio,
main = "Gráfico de dispersión",
xlab= "Consumo",
ylab= "Precio",
cex= 2, pch= 19, col= "orange")
points(df_pollo$consumo,df_pollo$precio,
cex= 2, pch= 19, col= "dark green")
legend("topright", c("Pescado", "Pollo"), fill= c("orange", "dark green"))
###############################################################################
#(Clase 10) Gráfico de linea en Rstudio
#Colocamos type = "l" para que nos de un gráfico lineal
x<- c(10,15,18,23,26)
y<- c(9,11,15,20,22)
x11()
plot(x,y, main= "Mi gráfico lineal", xlab= "Eje de abscisas",
ylab = "Eje de ordenadas", type = "l")
#Cambiamos el grosor de la linea
#1 es el valor predeterminado, mientras que 0.5 significa 50% más pequeño
#y 2 significa 100% más grande
x11()
plot(x,y, main= "Mi gráfico lineal", xlab= "Eje de abscisas",
ylab = "Eje de ordenadas", type = "l", lwd=2)
#Estilos de linea
#Utilizamos lty con un valor de 0 a 6 para especificar el formato de línea.
x11()
plot(x,y, main= "Mi gráfico lineal", xlab= "Eje de abscisas",
ylab = "Eje de ordenadas", type = "l", lwd=2, lty=3)
#Valores de parámetro disponibles para lty
#0 quita la línea
#1 muestra una línea continua
#2 muestra una línea discontinua
#3 muestra una línea de puntos
#4 muestra una línea de "puntos discontinuos"
#5 muestra una línea de "trazos largos"
#6 muestra una línea de "dos guiones"
#Graficamos los ingresos anuales
grupo_df<- data.frame(
años = c(2016, 2017, 2018, 2019, 2020),
ingresos = c(1500, 12000,8500,14300,12500))
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "l", lwd= 2, lty= 5)
#Color de linea
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "l", lwd= 2, lty= 5,
col="dark blue")
#Insertamos un leyenda
#usamos legend()
##La leyenda se puede colocar como:
#bottomright, bottomleft, bottom, topright, topleft,
#top, center, right, left.
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "l", lwd= 2, lty= 5,
col="dark blue")
legend("bottomright", "Angela", fill = "dark blue")
#Juntamos dos gráficos lineales
grupo_df<- data.frame(
años = c(2016, 2017, 2018, 2019, 2020),
ingresos = c(1500, 12000,8500,14300,12500))
grupo_df2<- data.frame(
años= c(2016, 2017, 2018, 2019, 2020),
ingresos= c(8300,4200,7500,10800,9700))
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "l", lwd= 2, lty= 5,
col="dark blue")
lines(grupo_df2$años, grupo_df2$ingresos, type= "l", lwd= 2, lty= 6,
col= "dark red")
legend("bottomright", c("Angela", "Victor"),
fill = c("dark blue", "dark red"))
#Gráfico de lineas siguiente la secuencia de puntos
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "o", lwd= 2, lty= 5,
col="dark blue")
lines(grupo_df2$años, grupo_df2$ingresos, type= "o", lwd= 2, lty= 6,
col= "dark red")
legend("bottomright", c("Angela", "Victor"),
fill = c("dark blue", "dark red"))
#Cambios el tipo de puntos y aumentamos su tamaño
x11()
plot(grupo_df$años, grupo_df$ingresos, main= "Ingresos anuales",
xlab= "Años", ylab= "Ingresos", type= "o", lwd= 2, lty= 5,
col="dark blue", pch= 19, cex= 2)
lines(grupo_df2$años, grupo_df2$ingresos, type= "o", lwd= 2, lty= 6,
col= "dark red", pch= 19, cex= 2)
legend("bottomright", c("Angela", "Victor"),
fill = c("dark blue", "dark red"))
###############################################################################
#(Clase 11) Gráficos circulares en Rstudio
df_gustos<- data.frame(
preferencia= c(0.32, 0.24,0.16,0.29),
cursos= c("Álgebra", "Trigonometría", "Geometría","Aritmética"),
colores= c("dark blue", "dark red", "orange", "purple"))
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón")
#Etiquetas y encabezados
etiqueta1<- paste(df_gustos$preferencia)
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón",
label= etiqueta1)
etiqueta2<- paste(df_gustos$preferencia, "%", sep= " ")
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón",
label= etiqueta2)
#Apariencia
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón",
label= etiqueta2,
col = df_gustos$colores)
#Insertamos un leyenda
#usamos legend()
##La leyenda se puede colocar como:
#bottomright, bottomleft, bottom, topright, topleft,
#top, center, right, left.
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón",
label= etiqueta2,
col = df_gustos$colores)
legend("topleft", df_gustos$cursos, fill = df_gustos$colores)
#Tamaño de leyenda
x11()
pie(df_gustos$preferencia, main = "Preferencia de los cursos en el salón",
label= etiqueta2,
col = df_gustos$colores)
legend("topleft", df_gustos$cursos, fill = df_gustos$colores, cex= 0.85)
###############################################################################
#(Clase 12) Gráfico de barras en Rstudio
df_ingresos<- data.frame(
meses= c("En", "Fe", "Mar", "Abr", "May", "Jun",
"Jul", "Ago", "Sep", "Oct", "Nov",
"Dic"),
ingresos= c(1500,2500,2000,3500,7000,4000,8500,6000,10000,5000,7500,
11000))
#Usamaos barplot para graficar
#Usamos namex.arg para definir los nombres de cada
#observación en el eje x
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses)
#Cambiamos el tamaño de los nombres
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses, cex.names = 0.9)
#Titulo y ejes
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "Meses", ylab= "Ingresos",
cex.names = 0.9)
#Apariencia
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "Meses", ylab= "Ingresos",
cex.names = 0.9, col = "tomato3")
#Densidad / Textura de barra
#Para cambiar la textura de la barra usamos density
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "Meses", ylab= "Ingresos",
cex.names = 0.9, col= "tomato3", density= 25)
#Utilizamos width para el ancho de la barra
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "Meses", ylab= "Ingresos",
cex.names = 0.9, col= "tomato3", density= 25,
width= c(1,2,3,4,5,6,7,8,9,10,11,12))
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "Meses", ylab= "Ingresos",
cex.names = 0.9, col= "tomato3", density= 25,
width= c(4,4,4,4,4,4,4,4,4,4,4,4))
#Barras horizontales
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "ingresos", ylab= "Meses",
cex.names = 0.8, col= "tomato3", density= 25,
width= c(4,4,4,4,4,4,4,4,4,4,4,4),
horiz= TRUE)
#Modificamos los límites del eje x
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "ingresos", ylab= "Meses",
cex.names = 0.8, col= "tomato3", density= 25,
width= c(4,4,4,4,4,4,4,4,4,4,4,4),
horiz= TRUE, xlim = c(0,14000))
#Insertamos un leyenda
#usamos legend()
##La leyenda se puede colocar como:
#bottomright, bottomleft, bottom, topright, topleft,
#top, center, right, left.
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "ingresos", ylab= "Meses",
cex.names = 0.8, col= "tomato3", density= 25,
width= c(4,4,4,4,4,4,4,4,4,4,4,4),
horiz= TRUE, xlim = c(0,14000))
legend("bottomright", "Ingresos de Pamela", fill = "tomato3")
#Otra forma de insertar leyenda
meses_table<- table(df_ingresos$meses)
colores<- c("blue", "red", "yellow", "orange", "gray", "green",
"tomato3", "dark red", "violetred", "turquoise", "pink2",
"honeydew")
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "ingresos", ylab= "Meses",
cex.names = 0.8, col= colores,
width= c(4,4,4,4,4,4,4,4,4,4,4,4),
horiz= TRUE, xlim = c(0,14000),
legend= rownames(meses_table))
#Cambiamos la orientación de los caracteres de los ejes con last
#0: predeterminado
#1: pone horizontal el eje de ordenadas
#2: pone horizontal el eje de ordenadas y vertical abscisas
#3: pone todos los ejes verticalmente
x11()
barplot(df_ingresos$ingresos, names.arg=df_ingresos$meses,
main= "Ingresos mensuales", xlab= "ingresos", ylab= "Meses",
cex.names = 0.8, col= colores,
width= c(4,4,4,4,4,4,4,4,4,4,4,4),
horiz= TRUE, xlim = c(0,14000),
legend= rownames(meses_table),
las= 1)
###############################################################################
#(Clase 13) Gráfico de barras apiladas y agrupadas en Rstudio
df_fuma<- data.frame(
genero= rep(c("Femenino", "Masculino", "Masculino", "Femenino",
"Masculino", "Femenino", "Masculino", "Femenino"),
times= c(2,3,8,7,9,5,4,6)),
fuma= rep(c("Si", "No", "Si", "Si", "Si", "No", "Si", "No"),
times= c(7,3,5,9,4,6,2,8)))
fuma_table<- table(df_fuma$genero, df_fuma$fuma)
x11()
barplot(fuma_table, main= "Cantidad de personas que fuman y no fuman",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"))
#Modificamos el eje de ordenadas
x11()
barplot(fuma_table, main= "Cantidad de personas que fuman y no fuman",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35))
#Insertamos leyenda
x11()
barplot(fuma_table, main= "Cantidad de personas que fuman y no fuman",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35),
legend= rownames(fuma_table))
#Otra forma de insertar leyenda
#usamos legend()
##La leyenda se puede colocar como:
#bottomright, bottomleft, bottom, topright, topleft,
#top, center, right, left.
x11()
barplot(fuma_table, main= "Cantidad de personas que fuman y no fuman",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35))
legend("top", rownames(fuma_table), fill = c("dark red", "dark blue"))
#Diagrama de barras agrupadas
x11()
barplot(fuma_table, main= "Cantidad de personas que fuman y no fuman",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35),
legend= rownames(fuma_table),
beside= TRUE)
#Combinamos gráficos
#Dividimos la gráfica en una fila y dos columnas.
#Normalmente está como mfrow= c(1,1)
x11()
par(mfrow= c(1,2))
barplot(fuma_table, main= "Grafico de barras apiladas",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35),
legend= rownames(fuma_table))
barplot(fuma_table, main= "Grafico de barras agrupadas",
xlab= "Fuman", ylab = "Cantidad",
col= c("dark red", "dark blue"), ylim= c(0,35),
legend= rownames(fuma_table),
beside= TRUE)
###############################################################################
#(Clase 14) Máximo, mínimo, desviación estándar, varianza,
#media, mediana, percentil en Rstudio
#Hay un conjunto de datos incorporado en R llamado
#mtcars (prueba de carrera de automóviles de Motor Tred)
#Información sobre le conjunto de datos
help(mtcars)
#mpg Miles/(US) gallon
#cyl Number of cylinders
#disp Displacement (cu.in.)
#hp Gross horsepower
#drat Rear axle ratio
#wt Weight (1000 lbs)
#qsec 1/4 mile time
#vs Engine (0 = V-shaped, 1 = straight)
#am Transmission (0 = automatic, 1 = manual)
#gear Number of forward gears
#carb Number of carburetors
#Obtenemos la información y lo guardamos en la variable Data_Cars
Data_Cars<- mtcars
#Utilizamos la función la dim() para encontrar las dimensiones
#del data set y la función names() para ver
#los nombres de las variables:
dim(Data_Cars)
names(Data_Cars)
#Usamos la función rownames() para obtener el nombre de cada fila
#que son los nombres de los automóviles
rownames(Data_Cars)
#Acceso a la data
Data_Cars[2]
Data_Cars[,2]
Data_Cars[2,]
Data_Cars[c(1,2)]
#Usamos sort() para ordenar las variables
Data_Cars$cyl
sort(Data_Cars$cyl)
#Usamos summary() para obtener un resumen estadístico de los datos
summary(Data_Cars)
# Máximo y mínimo de la variable hp
Data_Cars["hp"]
Data_Cars$hp
max(Data_Cars$hp)
min(Data_Cars$hp)
#Usamos las funciones which.max() y
#which.min() para encontrar la posición del índice del valor
#máximo y mínimo en la tabla:
which.max(Data_Cars$hp)
which.min(Data_Cars$hp)
#Combinamos las funciones which.max() y which.min() con la función
#rownames() para obtener el nombre del automóvil que tiene la
#máxima y mínima potencia
rownames(Data_Cars)[which.max(Data_Cars$hp)]
rownames(Data_Cars)[which.min(Data_Cars$hp)]
#Media de la variable wt
mean(Data_Cars$mpg)
#Mediana de la variable wt
median(Data_Cars$mpg)
#Rango
sort(Data_Cars$mpg)
range(Data_Cars$mpg)
#Desviación estándar
sd(Data_Cars$wt)
#Varianza
var(Data_Cars$wt)
#Percentil
#El percentil es una medida de posición usada en estadística
#que indica, una vez ordenados los datos de menor a mayor,
#el valor de la variable por debajo del cual se encuentra
#un porcentaje dado de observaciones en un grupo
#¿Cuál es el precentil 75 de la variable wt?,
sort(Data_Cars$wt)
quantile(Data_Cars$wt, c(0.75))
#Es decir, el 75% de automóviles que pesan 3.61 o menos
#Si ejecuta quantile() sin especificar el c(), obtendrá
#los percentiles 0% 25% 50% 75% 100%
summary(Data_Cars$wt)
quantile(Data_Cars$wt)
#Los cuartiles son datos divididos en cuatro partes, cuando se ordenan
#en orden ascendente:
#El valor del primer cuartil corta el primer 25% de los datos.
#El valor del segundo cuartil corta el primer 50% de los datos.
#El valor del tercer cuartil corta el primer 75% de los datos.
#El valor del cuarto cuartil corta el 100% de los datos.
###############################################################################
#(Clase 15) Gráfico histograma en Rstudio
#Mostrará la frecuencia con la que se presenta los datos seleccionados.
Data_Cars<- mtcars
sort(Data_Cars$hp)
x11()
hist(Data_Cars$hp, main = "Histograma", xlab= "Caballos de fuerza",
ylab= "Frecuencia")
#Apariencia
x11()
hist(Data_Cars$hp, main = "Histograma", xlab= "Caballos de fuerza",
ylab= "Frecuencia", col= "plum4")
#Cambiamos el limite del eje de ordenadas
x11()
hist(Data_Cars$hp, main = "Histograma", xlab= "Caballos de fuerza",
ylab= "Frecuencia", col= "plum4", ylim= c(0,12))
#Histograma de densidad
x11()
hist(Data_Cars$hp, main = "Histograma de densidad", xlab= "Caballos de fuerza",
ylab= "Densidad", col= "plum4", ylim= c(0,0.01),
prob= T)
#Si queremos cambiar el número de barras usamos breaks
x11()
par(mfrow= c(1,3))
hist(Data_Cars$hp, main = "Histograma de densidad", xlab= "Caballos de fuerza",
ylab= "Densidad", col= "plum4", ylim= c(0,0.01),
prob= T)
hist(Data_Cars$hp, main = "Histograma de densidad", xlab= "Caballos de fuerza",
ylab= "Densidad", col= "skyblue", ylim= c(0,0.01),
prob= T, breaks= 2)
hist(Data_Cars$hp, main = "Histograma de densidad", xlab= "Caballos de fuerza",
ylab= "Densidad", col= "orange", ylim= c(0,0.01),
prob= T, breaks= 40)
#rnorm (r significa random (aleatorio), norm significa normal)
#rnorm(n, mean, sd) donde n es el número de valores a obtener
x <- rnorm(1000, mean= 0, sd= 2)
y <- rnorm(500, mean= 0, sd= 2)
x11()
par(mfrow= c(1,2))
hist(x, main = "Primer histograma", ylab = "Frecuencia", col= "tomato2")
hist(y, main= "Segundo histograma", ylab= "Frecuencia", col= c("skyblue"))
#Histograma con dos variables
#add= TRUE nos permitirá unir un histograma con otro
x11()
hist(x, main = "Histograma con dos variables", ylab = "Frecuencia",
col= "tomato2")
hist(y, add = TRUE, col= c("skyblue"))
#Juntando los tres gráficos para visualizarlos
x11()
par(mfrow= c(1,3))
hist(x, main = "Primer histograma", ylab = "Frecuencia", col= "tomato2")
hist(y, main= "Segundo histograma", ylab= "Frecuencia", col= c("skyblue"))
hist(x, main = "Histograma con dos variables", ylab = "Frecuencia",
col= "tomato2")
hist(y, add = TRUE, col= c("skyblue"))
###############################################################################
#(Clase 16) if y else en Rstudio
#Condición if
a<- 33
b<- 200
if (b>a) {
print("b es mayor que a")
}
#Condición else if
#else if: es la forma en que R dice
#"si las condiciones anteriores no eran verdaderas,
#pruebe esta condición"
a<- 120
b<- 120
if (b>a) {
print("b es mayor que a")
} else if (a==b) {
print("a y b son iguales")
}
#else: captura cualquier cosa que no sea capturada por las
#condiciones anteriores:
a<- 150
b<- 75
if (b>a) {
print("b es mayor que a")
} else if (a==b) {
print("a es igual que b")
} else {
print("a es mayor que b")
}
#También podemos usar else sin else if
a<- 90
b<- 26
if (b>a) {
print("b es mayor que a")
} else {
print("b no es mayor que a")
}
#Declaraciones de if anidadas
edad<- 15
if (edad>18) {
print("Tu edad está por encima de los 18 años")
if (edad>20){
print("y también está por encima de los 20 años")
} else {
print("pero no por encima de los 20")
}
} else {
print("No eres mayor de edad, tu tines:")
paste(edad, "años")
}
#and
#&: se usa para combinar declaraciones condicionales
#Ejemplo 1
a<- 200
b<- 150
c<- 500
if (a>b & c>a) {
print("Ambas condiciones son verdaderas")
}
#Ejemplo 2
desc_tarje<- 0.4
desc_efec<- 0.2
monto<- 160
metodo_pago<- "tarjeta"
if (monto>150 & metodo_pago== "tarjeta") {
paste("El total será de",monto - monto*desc_tarje, "soles")
} else if (monto>150 & metodo_pago== "efectivo") {
paste("El total será de", monto - monto*desc_efec, "soles")
} else{
paste("El total será de", monto, "soles")
}
#O
#Ejemplo 1
#|: se usa para combinar declaraciones condicionales:
a<- 180
b<- 150
c<- 250
if (a>b | a>c) {
print("Al menos una de las condiciones es verdadera")
}
#Ejemplo 2
precioA<- 10
precioB<- 5
A<- 49
B<- 80
if(A>=50 | B>40) {
paste("Tendrá un descuento", (A*precioA + B*precioB)*0.2, "soles")
} else if (A<50 | B<=40) {
print("No tendrán descuento")
}
###############################################################################
#(Clase 17) Funciones en Rstudio
mi_funcion<- function() {
print("Mi primera función")
}
#Llamar a una función
#Use el nombre de la función seguido de paréntesis.
mi_funcion()
#Argumentos
#La información se puede pasar a funciones como argumentos,
#los argumentos se especifican después del nombre de la
#función, entre paréntesis. Puede agregar tantos argumentos
#como desee, solo sepárelos con una coma.
#El siguiente ejemplo tiene una función con argumento
#(fnombre). Cuando se llama a la función, pasamos un nombre,
#que se usa dentro de la función para imprimir el
#nombre completo
#Ejemplo 1
mi_funcion<- function(elemento) {
print(elemento)
}
mi_funcion("Ismael")
mi_funcion(15)
mi_funcion(20*5)
#Ejemplo 2
edad_funcion<- function(edad, peso) {
paste(edad, "años")
}
edad_funcion(24)
edad_funcion("Yo tengo 24")
#Si su función espera 2 argumentos, debe llamar
#a la función con 2 argumentos.
mi_funcion<- function(edad, hobby) {
paste(edad, hobby)
}
mi_funcion(20, "años y mi hobby es tocar guitarra")
#Valor de parámetro predeterminado
mi_funcion<- function(pais = "Perú"){
paste("Yo soy de", pais)
}
mi_funcion()
mi_funcion("Colombia")
mi_funcion(23)
#Valores devueltos
#usamos la función return()
mi_funcion<- function(x) {
return(5*x)
}
mi_funcion(10)
print(mi_funcion(3))
#Funciones anidadas
#Ejemplo 1
fcosto<- function(precio, cantidad) {
costo<- precio*cantidad
return(costo)
}
fcosto(4,9)
fcosto(fcosto(2,2), fcosto(3,3))
#Ejemplo 2
fcosto1<- function(precio) {
fcosto2<- function(cantidad) {
costo<- precio*cantidad
return(costo)
}
return(fcosto2)
}
costo<- fcosto1(4)
costo(9)
###############################################################################
#(Clase 18) Variables Globales, redLines en Rstudio
#Las variables globales que se crean fuera de una función se conocen
#como variables globales
#todo el mundo puede utilizar las variables globales,
#tanto dentro como fuera de las funciones.
#Ejemplo 1
Global1<- "Hola"
mi_funcion<- function() {
paste(Global1, "mundo")
}
mi_funcion()
#Ejemplo 2
#Si Global2 se usa como el nombre de un objeto dentro de la función,
#el valor de Global2 en el entorno global no cambia.
Global2<- 3500
mi_funcion<- function() {
Global2= 9500
paste("Mi ingreso mensual es", Global2)
}
mi_funcion()
Global2
#Si usa el operador de asignación <<- la variable pertenece
#al ámbito global
mi_funcion<- function() {
Global3<<- 2200
paste("Mi gasto mensual es", Global3)
}
mi_funcion()
Global3
#Además, use el operador de asignación global <<- si desea
#cambiar una variable global dentro de una función
edad<- 20
mi_funcion<- function() {
edad<<- 25
paste("Yo tengo", edad, "años")
}
mi_funcion()
edad
#redLines
#Ejemplo 1
pregunta<- function() {
cat("Introduzca su edad: ")
edad<- readLines(n= 3)
paste("Yo tengo", edad, "años")
}
pregunta()
#Ejemplo 2
pregunta<- function() {
cat("Introduzca su edad: ")
edad<- readLines(n= 1)
paste("Yo tengo", edad, "años")
}
#Condicionales
pA<- 20
pB<- 25
pC<- 28
pregunta<- function() {
cat("Introduzca el número de hamburguesas que comprará:")
cantidad<- readLines(n=1)
cantidad<- as.numeric(cantidad)
cat("Indicar si pagará con efectivo con un si, caso contrario colocar no:")
pago<- readLines(n= 1)
if (pago == "si"){
cat("Indicar que hamburguesa desea comprar: A, B, C")
hamburguesa<- readLines(n=1)
if (hamburguesa == "A") {
costo<- pA*cantidad
print(costo)
}else if (hamburguesa == "B") {
costo<- pA*cantidad
print(costo)
}else if (hamburguesa == "C") {
costo<- pC*cantidad
print(costo)
}else {
print("Hubo un problema, por favor intente de nuevo")
}
} else if (pago == "no") {
cat("Indicar que hamburguesa desea comprar: A, B, C")
hamburguesa<- readLines(n=1)
if (hamburguesa == "A") {
costo<- (pA - pA*0.05)*cantidad
print(costo)
} else if (hamburguesa == "B") {
costo<- (pB - pB*0.05)*cantidad
print(costo)
}else if (hamburguesa == "c"){
costo<- (pC - pC*0.05)*cantidad
print(costo)
}else {
print("Hubo un problema, por favor intente de nuevo")
}
}else {
print("Hubo un problema, por favor intente de nuevo")
}
}
pregunta()
###############################################################################
#(Clase 19) Bucles while y for
#Los blucles pueden ejecutar un bloque de código siempre que se
#alcance una condición específica
#while: bucle que permite ejecutar un conjunto de declaraciones
#Siempre que una condición sea Verdadera
#Ejemplo 1
#No se ejecutará porque no es verdadera la condición
i<- 1
while (i>10) {
print(i)
i<- i + 1
}
#Ejemplo 2
i<- 1
while (i<10) {
print(i)
i<- i + 1
}
print(i)
#break: podemos detener el ciclo
i<- 1
while (i<10) {
print(i)
i<- i+1
if (i==4) {
print(i)
break
}
}
#next: podemos omitir una iteración sin terminar el ciclo
i<- 0
while (i<10) {
i<- i + 1
if (i==4) {
next
}
print(i)
}
#Funciones con bucle while
milist_m3<- list()
milist_no3<- list()
pregunta<- function(){
cat("Introduzca el número inicial")
inicio<- readLines(n= 1)
inicio<- as.numeric(inicio)
cat("Introduzca el número final")
final<- readLines(n=1)
final<- as.numeric(final)
while (inicio <= final) {
if ((inicio%%3) != 0) {
cat(inicio, "no es multiplo de 3\n")
milist_no3<<- append(milist_no3, inicio)
} else {
cat(inicio, "es multiplo de 3\n")
milist_m3<<- append(milist_m3, inicio)
}
inicio<- inicio + 1
}
}
pregunta()
#for: bucle que se usa para iterar sobre una secuencia,
#podemos ejecutar un conjunto de sentencias,
#una vez por cada elemento de un vector, matriz, lista, etc.
for (x in 1:10) {
print(x)
}
#Ejemplo 1: imprima todos los elementos de una lista
frutas<- list("manzana", "mango", "fresa")
for (x in frutas) {
print(x)
}
#Ejemplo 2: imprime el número de dados:
dados<- c(1,2,3,4,5,6)
for (x in dados) {
print(x)
}
#break: podemos detener el ciclo antes de que haya pasado
#por todos los elementos
frutas<- list("manzana", "mango", "fresa")
for (x in frutas) {
if (x == "mango") {
break
}
print(x)
}
#next: declaración, podemos omitir una iteración sin
#terminar el ciclo
frutas<- list("manzana", "mango", "fresa")
for (x in frutas) {
if (x == "mango") {
next
}
print(x)
}
#Bucles anidados
#Es un bucle dentro de un bucle
#El "bucle interno" se ejecutará una vez por cada
#iteración del "iteración" del "bucle externo"
num1<- c(5, 3)
num2<- c(2, 4, 8)
for (x in num1) {
for (y in num2) {
print(x*y)
}
}
#Funciones con ciclo for
pregunta<- function(){
cat("Introduzca el número inicial")
inicio<- readLines(n= 1)
inicio<- as.numeric(inicio)
cat("Introduzca el número final")
final<- readLines(n=1)
final<- as.numeric(final)
for (x in inicio:final) {
if ((x%%2) != 0) {
cat(x, "no es múltiplo de 2\n")
sumaimpar<<- ((x + 1)/2)^2
} else {
cat(x, "es múltiplo de 2\n")
sumapar<<- (x/2)*((x/2) + 1)
}
}
cat("La suma de los números pares es", sumapar, "\n")
cat("La suma de los números impares es", sumaimpar, "\n")
}
pregunta()
#Funciones con data.frame, bucle for
dfimpar<- c()
dfpar<- c()
pregunta<- function(){
cat("Introduzca el número inicial")
inicio<- readLines(n= 1)
inicio<- as.numeric(inicio)
cat("Introduzca el número final")
final<- readLines(n=1)
final<- as.numeric(final)
for (x in inicio:final) {
if ((x%%2) != 0) {
cat(x, "no es múltiplo de 2\n")
df_impar<- data.frame(impares = x)
dfimpar<<- rbind(dfimpar, df_impar)
sumaimpar<<- ((x + 1)/2)^2
} else {
cat(x, "es múltiplo de 2\n")
df_par<- data.frame(pares = x)
dfpar<<- rbind(dfpar, df_par)
sumapar<<- (x/2)*((x/2) + 1)
}
}
cat("La suma de los números pares es", sumapar, "\n")
cat("La suma de los números impares es", sumaimpar, "\n")
}
pregunta()
|
af2236b69d322d81475e8a86315969ab1fb6d6ca
|
520f1f550b555247e5eeec2b43459f826f446fd5
|
/R/difMH.r
|
58311a61c3d22602d70b8e4fd8a8b5e5f1ec4115
|
[] |
no_license
|
cran/difR
|
d11a3dc2bd1c6c07e8a5b28cf55aa30abe15e311
|
c6a14ca0d0f72777881387c87314ebe8c884c1bd
|
refs/heads/master
| 2021-01-01T05:37:28.999542
| 2020-05-13T07:10:02
| 2020-05-13T07:10:02
| 17,713,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,645
|
r
|
difMH.r
|
# DIF MANTEL-HAENZEL
difMH<-function (Data, group, focal.name, anchor=NULL, match="score", MHstat = "MHChisq", correct = TRUE, exact=FALSE,
alpha = 0.05, purify = FALSE, nrIter = 10, p.adjust.method=NULL, save.output = FALSE,
output = c("out", "default"))
{
if (purify & match[1] != "score")
stop("purification not allowed when matching variable is not 'score'",
call. = FALSE)
internalMH <- function() {
if (length(group) == 1) {
if (is.numeric(group)) {
gr <- Data[, group]
DATA <- Data[, (1:ncol(Data)) != group]
colnames(DATA) <- colnames(Data)[(1:ncol(Data)) !=
group]
}
else {
gr <- Data[, colnames(Data) == group]
DATA <- Data[, colnames(Data) != group]
colnames(DATA) <- colnames(Data)[colnames(Data) !=
group]
}
}
else {
gr <- group
DATA <- Data
}
Group <- rep(0, nrow(DATA))
Group[gr == focal.name] <- 1
Q <- switch(MHstat, MHChisq = qchisq(1 - alpha, 1), logOR = qnorm(1 -
alpha/2))
if (is.null(Q)) stop("'MHstat' argument not valid", call. = FALSE)
if (!is.null(anchor)){
dif.anchor<-anchor
if (is.numeric(anchor)) ANCHOR<-anchor
else{
ANCHOR<-NULL
for (i in 1:length(anchor)) ANCHOR[i]<-(1:ncol(DATA))[colnames(DATA)==anchor[i]]
}
}
else {
ANCHOR<-1:ncol(DATA)
dif.anchor<-NULL
}
if (exact){
if (!purify | match[1]!="score" | !is.null(anchor)) {
PROV <- mantelHaenszel(DATA, Group, match=match,correct = correct,exact=exact,anchor=ANCHOR)
STATS <- PROV$resMH
if (min(PROV$Pval) >=alpha) DIFitems <- "No DIF item detected"
else DIFitems <- (1:ncol(DATA))[PROV$Pval < alpha]
RES <- list(MH = STATS, p.value=PROV$Pval, alpha = alpha, DIFitems = DIFitems,
correct = correct, exact=exact, match=PROV$match, p.adjust.method=p.adjust.method, adjusted.p=NULL, purification = purify, names = colnames(DATA),
anchor.names=dif.anchor,save.output = save.output, output = output)
if (!is.null(anchor)) {
RES$MH[ANCHOR]<-NA
RES$Pval[ANCHOR]<-NA
for (i in 1:length(RES$DIFitems)){
if (sum(RES$DIFitems[i]==ANCHOR)==1) RES$DIFitems[i]<-NA
}
RES$DIFitems<-RES$DIFitems[!is.na(RES$DIFitems)]
}
}
else {
nrPur <- 0
difPur <- NULL
noLoop <- FALSE
prov1 <- mantelHaenszel(DATA, Group, match=match,correct = correct,exact=exact)
stats1 <- prov1$resMH
if (min(prov1$Pval)>=alpha) {
DIFitems <- "No DIF item detected"
noLoop <- TRUE
}
else {
dif <- (1:ncol(DATA))[prov1$Pval<alpha]
difPur <- rep(0, length(stats1))
difPur[dif] <- 1
repeat {
if (nrPur >= nrIter)
break
else {
nrPur <- nrPur + 1
nodif <- NULL
if (is.null(dif))
nodif <- 1:ncol(DATA)
else {
for (i in 1:ncol(DATA)) {
if (sum(i == dif) == 0)
nodif <- c(nodif, i)
}
}
prov2 <- mantelHaenszel(DATA, Group, correct = correct,
match=match, anchor = nodif,exact=exact)
stats2 <- prov2$resMH
if (min(prov2$Pval)>=alpha) dif2 <- NULL
else dif2 <- (1:ncol(DATA))[prov2$Pval<alpha]
difPur <- rbind(difPur, rep(0, ncol(DATA)))
difPur[nrPur + 1, dif2] <- 1
if (length(dif) != length(dif2))
dif <- dif2
else {
dif <- sort(dif)
dif2 <- sort(dif2)
if (sum(dif == dif2) == length(dif)) {
noLoop <- TRUE
break
}
else dif <- dif2
}
}
}
stats1 <- stats2
prov1 <- prov2
DIFitems <- (1:ncol(DATA))[prov1$Pval<alpha]
}
if (!is.null(difPur)) {
ro <- co <- NULL
for (ir in 1:nrow(difPur)) ro[ir] <- paste("Step",
ir - 1, sep = "")
for (ic in 1:ncol(difPur)) co[ic] <- paste("Item",
ic, sep = "")
rownames(difPur) <- ro
colnames(difPur) <- co
}
RES <- list(MH = stats1, p.value=prov1$Pval, alpha = alpha, DIFitems = DIFitems,
correct = correct, exact=exact, match=prov1$match, p.adjust.method=p.adjust.method, adjusted.p=NULL, purification = purify, nrPur = nrPur,
difPur = difPur, convergence = noLoop, names = colnames(DATA),
anchor.names=NULL, save.output = save.output, output = output)
}
}
else{
if (!purify | match[1]!="score" | !is.null(anchor)) {
PROV <- mantelHaenszel(DATA, Group, match=match, correct = correct,exact=exact,anchor=ANCHOR)
if (MHstat == "MHChisq"){
STATS <- PROV$resMH
PVAL<-1-pchisq(STATS,1)
}
else {
STATS <- log(PROV$resAlpha)/sqrt(PROV$varLambda)
PVAL<-2*(1-pnorm(abs(STATS)))
}
if (max(abs(STATS),na.rm=TRUE) <= Q)
DIFitems <- "No DIF item detected"
else DIFitems <- (1:ncol(DATA))[is.na(STATS)==FALSE & abs(STATS) > Q]
RES <- list(MH = STATS, p.value=PVAL, alphaMH = PROV$resAlpha,
varLambda = PROV$varLambda, MHstat = MHstat,
alpha = alpha, thr = Q, DIFitems = DIFitems,
correct = correct, exact=exact, match=PROV$match, p.adjust.method=p.adjust.method, adjusted.p=NULL, purification = purify, names = colnames(DATA),
anchor.names=dif.anchor, save.output = save.output, output = output)
if (!is.null(anchor)) {
RES$MH[ANCHOR]<-NA
RES$alphaMH[ANCHOR]<-NA
RES$varLambda[ANCHOR]<-NA
for (i in 1:length(RES$DIFitems)){
if (sum(RES$DIFitems[i]==ANCHOR)==1) RES$DIFitems[i]<-NA
}
RES$DIFitems<-RES$DIFitems[!is.na(RES$DIFitems)]
}
}
else {
nrPur <- 0
difPur <- NULL
noLoop <- FALSE
prov1 <- mantelHaenszel(DATA, Group, match=match, correct = correct,exact=exact)
if (MHstat == "MHChisq")
stats1 <- prov1$resMH
else stats1 <- log(prov1$resAlpha)/sqrt(prov1$varLambda)
if (max(abs(stats1),na.rm=TRUE) <= Q) {
DIFitems <- "No DIF item detected"
noLoop <- TRUE
}
else {
dif <- (1:ncol(DATA))[is.na(stats1)==FALSE & abs(stats1) > Q]
difPur <- rep(0, length(stats1))
difPur[dif] <- 1
repeat {
if (nrPur >= nrIter)
break
else {
nrPur <- nrPur + 1
nodif <- NULL
if (is.null(dif) == TRUE)
nodif <- 1:ncol(DATA)
else {
for (i in 1:ncol(DATA)) {
if (sum(i == dif) == 0)
nodif <- c(nodif, i)
}
}
prov2 <- mantelHaenszel(DATA, Group, match=match, correct = correct,
anchor = nodif,exact=exact)
if (MHstat == "MHChisq")
stats2 <- prov2$resMH
else stats2 <- log(prov2$resAlpha)/sqrt(prov2$varLambda)
if (max(abs(stats2),na.rm=TRUE) <= Q)
dif2 <- NULL
else dif2 <- (1:ncol(DATA))[is.na(stats2)==FALSE & abs(stats2) >
Q]
difPur <- rbind(difPur, rep(0, ncol(DATA)))
difPur[nrPur + 1, dif2] <- 1
if (length(dif) != length(dif2))
dif <- dif2
else {
dif <- sort(dif)
dif2 <- sort(dif2)
if (sum(dif == dif2) == length(dif)) {
noLoop <- TRUE
break
}
else dif <- dif2
}
}
}
stats1 <- stats2
prov1 <- prov2
DIFitems <- (1:ncol(DATA))[is.na(stats1)==FALSE & abs(stats1) > Q]
}
if (is.null(difPur) == FALSE) {
ro <- co <- NULL
for (ir in 1:nrow(difPur)) ro[ir] <- paste("Step",
ir - 1, sep = "")
for (ic in 1:ncol(difPur)) co[ic] <- paste("Item",
ic, sep = "")
rownames(difPur) <- ro
colnames(difPur) <- co
}
if (MHstat=="MHChisq") PVAL<-1-pchisq(stats1,1)
else PVAL<-2*(1-pnorm(abs(stats1)))
RES <- list(MH = stats1, p.value=PVAL,alphaMH = prov1$resAlpha,
varLambda = prov1$varLambda, MHstat = MHstat,
alpha = alpha, thr = Q, DIFitems = DIFitems,
correct = correct, exact=exact, match=prov1$match, p.adjust.method=p.adjust.method, adjusted.p=NULL, purification = purify, nrPur = nrPur,
difPur = difPur, convergence = noLoop, names = colnames(DATA),
anchor.names=NULL, save.output = save.output, output = output)
}
}
if (!is.null(p.adjust.method)){
if (exact) pval<-RES$Pval
else {
if (RES$MHstat=="MHChisq") pval<-1-pchisq(RES$MH,1)
else pval<-2 * (1 - pnorm(abs(RES$MH)))
}
RES$adjusted.p<-p.adjust(pval,method=p.adjust.method)
if (min(RES$adjusted.p,na.rm=TRUE)>alpha) RES$DIFitems<-"No DIF item detected"
else RES$DIFitems<-which(RES$adjusted.p<alpha)
}
class(RES) <- "MH"
return(RES)
}
resToReturn <- internalMH()
if (save.output == TRUE) {
if (output[2] == "default")
wd <- paste(getwd(), "/", sep = "")
else wd <- output[2]
fileName <- paste(wd, output[1], ".txt", sep = "")
capture.output(resToReturn, file = fileName)
}
return(resToReturn)
}
# METHODS
plot.MH<-function (x, pch = 8, number = TRUE, col = "red", save.plot=FALSE,save.options=c("plot","default","pdf"),...)
{
if (x$exact) stop("Error: plot is not available with exact Mantel-Haenszel test",call.=FALSE)
internalMH<-function(){
res <- x
if (res$MHstat == "MHChisq")
yl <- c(0, max(c(res$MH, res$thr) + 1,na.rm=TRUE))
else yl <- c(min(c(res$MH, -res$thr) - 0.5,na.rm=TRUE), max(c(res$MH,
res$thr) + 0.5,na.rm=TRUE))
ytitle = switch(res$MHstat, MHChisq = "MH Chi-square statistic",
logOR = "log OR statistic")
if (!number) {
plot(res$MH, xlab = "Item", ylab = ytitle, ylim = yl,
pch = pch, main = "Mantel-Haenszel")
if (!is.character(res$DIFitems))
points(res$DIFitems, res$MH[res$DIFitems], pch = pch,
col = col)
}
else {
plot(res$MH, xlab = "Item", ylab = ytitle, ylim = yl,
col = "white", main = "Mantel-Haenszel")
text(1:length(res$MH), res$MH, 1:length(res$MH))
if (!is.character(res$DIFitems))
text(res$DIFitems, res$MH[res$DIFitems], res$DIFitems,
col = col)
}
abline(h = res$thr)
if (res$MHstat == "logOR")
abline(h = -res$thr)
}
internalMH()
if (save.plot){
plotype<-NULL
if (save.options[3]=="pdf") plotype<-1
if (save.options[3]=="jpeg") plotype<-2
if (is.null(plotype)) cat("Invalid plot type (should be either 'pdf' or 'jpeg').","\n","The plot was not captured!","\n")
else {
if (save.options[2]=="default") wd<-paste(getwd(),"/",sep="")
else wd<-save.options[2]
fileName<-paste(wd,save.options[1],switch(plotype,'1'=".pdf",'2'=".jpg"),sep="")
if (plotype==1){
{
pdf(file=fileName)
internalMH()
}
dev.off()
}
if (plotype==2){
{
jpeg(filename=fileName)
internalMH()
}
dev.off()
}
cat("The plot was captured and saved into","\n"," '",fileName,"'","\n","\n",sep="")
}
}
else cat("The plot was not captured!","\n",sep="")
}
###
print.MH<-function (x, ...)
{
res <- x
cat("\n")
cat("Detection of Differential Item Functioning using Mantel-Haenszel method",
"\n")
if (res$correct & !res$exact)
corr <- "with "
else corr <- "without "
if (res$purification & is.null(res$anchor.names))
pur <- "with "
else pur <- "without "
cat(corr, "continuity correction and ", pur, "item purification",
"\n", "\n", sep = "")
if (res$exact)
cat("Results based on exact inference", "\n", "\n")
else cat("Results based on asymptotic inference", "\n", "\n")
if (res$purification & is.null(res$anchor.names)) {
if (res$nrPur <= 1)
word <- " iteration"
else word <- " iterations"
if (!res$convergence) {
cat("WARNING: no item purification convergence after ",
res$nrPur, word, "\n", sep = "")
loop <- NULL
for (i in 1:res$nrPur) loop[i] <- sum(res$difPur[1,
] == res$difPur[i + 1, ])
if (max(loop) != length(res$MH))
cat("(Note: no loop detected in less than ",
res$nrPur, word, ")", "\n", sep = "")
else cat("(Note: loop of length ", min((1:res$nrPur)[loop ==
length(res$MH)]), " in the item purification process)",
"\n", sep = "")
cat("WARNING: following results based on the last iteration of the purification",
"\n", "\n")
}
else cat("Convergence reached after ", res$nrPur, word,
"\n", "\n", sep = "")
}
if (res$match[1] == "score")
cat("Matching variable: test score", "\n", "\n")
else cat("Matching variable: specified matching variable",
"\n", "\n")
if (is.null(res$anchor.names)) {
itk <- 1:length(res$MH)
cat("No set of anchor items was provided", "\n", "\n")
}
else {
itk <- (1:length(res$MH))[!is.na(res$MH)]
cat("Anchor items (provided by the user):", "\n")
if (is.numeric(res$anchor.names))
mm <- res$names[res$anchor.names]
else mm <- res$anchor.names
mm <- cbind(mm)
rownames(mm) <- rep("", nrow(mm))
colnames(mm) <- ""
print(mm, quote = FALSE)
cat("\n", "\n")
}
if (is.null(res$p.adjust.method))
cat("No p-value adjustment for multiple comparisons",
"\n", "\n")
else {
pAdjMeth <- switch(res$p.adjust.method, bonferroni = "Bonferroni",
holm = "Holm", hochberg = "Hochberg", hommel = "Hommel",
BH = "Benjamini-Hochberg", BY = "Benjamini-Yekutieli")
cat("Multiple comparisons made with", pAdjMeth, "adjustement of p-values",
"\n", "\n")
}
if (res$exact)
met <- "Exact statistic:"
else met <- switch(res$MHstat, MHChisq = "Mantel-Haenszel Chi-square statistic:",
logOR = "Log odds-ratio statistic:")
cat(met, "\n", "\n")
if (res$exact)
pval <- round(res$p.value, 4)
else {
if (res$MHstat == "MHChisq")
pval <- round(1 - pchisq(res$MH, 1), 4)
else pval <- round(2 * (1 - pnorm(abs(res$MH))),
4)
}
if (is.null(res$p.adjust.method))
symb <- symnum(pval, c(0, 0.001, 0.01, 0.05, 0.1, 1),
symbols = c("***", "**", "*", ".", ""))
else symb <- symnum(round(res$adjusted.p, 4), c(0, 0.001,
0.01, 0.05, 0.1, 1), symbols = c("***", "**", "*", ".",
""))
if (!res$exact)
m1 <- cbind(round(res$MH[itk], 4), pval[itk])
else m1 <- cbind(round(res$MH[itk]), pval[itk])
if (!is.null(res$p.adjust.method))
m1 <- cbind(m1, round(res$adjusted.p[itk], 4))
m1 <- round(m1, 4)
m1 <- noquote(cbind(format(m1, justify = "right"), symb[itk]))
if (!is.null(res$names))
rownames(m1) <- res$names[itk]
else {
rn <- NULL
for (i in 1:nrow(m1)) rn[i] <- paste("Item", i, sep = "")
rownames(m1) <- rn[itk]
}
if (is.null(res$p.adjust.method))
colnames(m1) <- c("Stat.", "P-value", "")
else colnames(m1) <- c("Stat.", "P-value", "Adj. P", "")
print(m1)
cat("\n")
cat("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ",
"\n")
if (res$exact)
cat("\n", "Significance level: ", res$alpha, "\n", "\n",
sep = "")
else cat("\n", "Detection threshold: ", round(res$thr, 4),
" (significance level: ", res$alpha, ")", "\n", "\n",
sep = "")
if (is.character(res$DIFitems))
cat("Items detected as DIF items:", res$DIFitems, "\n",
"\n")
else {
cat("Items detected as DIF items:", "\n")
if (!is.null(res$names))
m2 <- res$names
else {
rn <- NULL
for (i in 1:length(res$MH)) rn[i] <- paste("Item",
i, sep = "")
m2 <- rn
}
m2 <- cbind(m2[res$DIFitems])
rownames(m2) <- rep("", nrow(m2))
colnames(m2) <- ""
print(m2, quote = FALSE)
cat("\n", "\n")
}
if (!res$exact) {
cat("Effect size (ETS Delta scale):", "\n", "\n")
cat("Effect size code:", "\n")
cat(" 'A': negligible effect", "\n")
cat(" 'B': moderate effect", "\n")
cat(" 'C': large effect", "\n", "\n")
r2 <- round(-2.35 * log(res$alphaMH), 4)
symb1 <- symnum(abs(r2), c(0, 1, 1.5, Inf), symbols = c("A",
"B", "C"))
matR2 <- cbind(round(res$alphaMH[itk], 4), r2[itk])
matR2 <- noquote(cbind(format(matR2, justify = "right"),
symb1[itk]))
if (!is.null(res$names))
rownames(matR2) <- res$names[itk]
else {
rn <- NULL
for (i in 1:nrow(matR2)) rn[i] <- paste("Item", i,
sep = "")
rownames(matR2) <- rn[itk]
}
colnames(matR2) <- c("alphaMH", "deltaMH", "")
print(matR2)
cat("\n")
cat("Effect size codes: 0 'A' 1.0 'B' 1.5 'C'", "\n")
cat(" (for absolute values of 'deltaMH')", "\n", "\n")
}
if (!x$save.output)
cat("Output was not captured!", "\n")
else {
if (x$output[2] == "default")
wd <- paste(getwd(), "/", sep = "")
else wd <- x$output[2]
fileName <- paste(wd, x$output[1], ".txt", sep = "")
cat("Output was captured and saved into file", "\n",
" '", fileName, "'", "\n", "\n", sep = "")
}
}
|
4e015047cfb0e6fda2ccf9488bf3ab39f53c8954
|
e1f093f20200ed2bd820d4ee0884c87c73e41d66
|
/man/mantel.Rd
|
aee19050c2e0785ef81cc8491d159c999228709f
|
[] |
no_license
|
cran/ecodist
|
8431a5659f02211c3131e282fbd2c90765285aa0
|
a34b199c4d70d5ee21e2d6abbd54d2a9729d7dd0
|
refs/heads/master
| 2022-05-13T06:14:42.563254
| 2022-05-05T05:50:08
| 2022-05-05T05:50:08
| 17,695,709
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,465
|
rd
|
mantel.Rd
|
\name{mantel}
\alias{mantel}
\title{ Mantel test }
\description{
Simple and partial Mantel tests, with options for ranked data, permutation tests, and bootstrapped confidence limits.
}
\usage{
mantel(formula = formula(data), data, nperm = 1000,
mrank = FALSE, nboot = 500, pboot = 0.9, cboot = 0.95)
}
\arguments{
\item{formula}{ formula describing the test to be conducted. For this test, y ~ x will perform a simple Mantel test, while y ~ x + z1 + z2 + z3 will do a partial Mantel test of the relationship between x and y given z1, z2, z3. All variables can be either a distance matrix of class dist or vectors of dissimilarities. }
\item{data}{ an optional dataframe containing the variables in the model as columns of dissimilarities. By default the variables are taken from the current environment. }
\item{nperm}{ number of permutations to use. If set to 0, the permutation test will be omitted. }
\item{mrank}{ if this is set to FALSE (the default option), Pearson correlations will be used. If
set to TRUE, the Spearman correlation (correlation ranked distances) will be used. }
\item{nboot}{ number of iterations to use for the bootstrapped confidence limits. If set to 0,
the bootstrapping will be omitted. }
\item{pboot}{ the level at which to resample the data for the bootstrapping procedure. }
\item{cboot}{ the level of the confidence limits to estimate. }
}
\details{
If only one independent variable is given, the simple Mantel r (r12) is calculated. If
more than one independent variable is given, the partial Mantel r (ryx|x1 ...) is
calculated by permuting one of the original dissimilarity matrices.
The bootstrapping is actually resampling without replacement, because duplication of
samples is not useful in a dissimilarity context (the dissimilarity of a sample with
itself is zero). Resampling within dissimilarity values is inappropriate, just as for
permutation.
}
\value{
\item{mantelr }{Mantel coefficient.}
\item{pval1 }{one-tailed p-value (null hypothesis: r <= 0).}
\item{pval2 }{one-tailed p-value (null hypothesis: r >= 0).}
\item{pval3 }{two-tailed p-value (null hypothesis: r = 0).}
\item{llim }{lower confidence limit.}
\item{ulim }{upper confidence limit.}
}
\references{ Mantel, N. 1967. The detection of disease clustering and a generalized
regression approach. Cancer Research 27:209-220.
Smouse, P.E., J.C. Long and R.R. Sokal. 1986. Multiple regression and correlation
extensions of the Mantel test of matrix correspondence. Systematic Zoology 35:62
7-632.
Goslee, S.C. and Urban, D.L. 2007. The ecodist package for dissimilarity-based analysis
of ecological data. Journal of Statistical Software 22(7):1-19.
Goslee, S.C. 2010. Correlation analysis of dissimilarity matrices. Plant
Ecology 206(2):279-286.
}
\author{ Sarah Goslee }
\seealso{ \code{\link{mgram}}, \code{\link{mgroup}} }
\examples{
data(graze)
grasses <- graze[, colnames(graze) \%in\% c("DAGL", "LOAR10", "LOPE", "POPR")]
legumes <- graze[, colnames(graze) \%in\% c("LOCO6", "TRPR2", "TRRE3")]
grasses.bc <- bcdist(grasses)
legumes.bc <- bcdist(legumes)
space.d <- dist(graze$sitelocation)
forest.d <- dist(graze$forestpct)
# Mantel test: is the difference in forest cover between sites
# related to the difference in grass composition between sites?
mantel(grasses.bc ~ forest.d)
# Mantel test: is the geographic distance between sites
# related to the difference in grass composition between sites?
mantel(grasses.bc ~ space.d)
# Partial Mantel test: is the difference in forest cover between sites
# related to the difference in grass composition once the
# linear effects of geographic distance are removed?
mantel(grasses.bc ~ forest.d + space.d)
# Mantel test: is the difference in forest cover between sites
# related to the difference in legume composition between sites?
mantel(legumes.bc ~ forest.d)
# Mantel test: is the geographic distance between sites
# related to the difference in legume composition between sites?
mantel(legumes.bc ~ space.d)
# Partial Mantel test: is the difference in forest cover between sites
# related to the difference in legume composition once the
# linear effects of geographic distance are removed?
mantel(legumes.bc ~ forest.d + space.d)
# Is there nonlinear pattern in the relationship with geographic distance?
par(mfrow=c(2, 1))
plot(mgram(grasses.bc, space.d, nclass=8))
plot(mgram(legumes.bc, space.d, nclass=8))
}
\keyword{ multivariate }
|
c610e66433f87a9eaee7f4cfd03d1b5b79c2723e
|
5aee1f7922b6ffb6a3aec7a0a73d22429957fc4a
|
/plot2.r
|
8c4e29ff63ae9193aa488cd951aa4f6c087bc42a
|
[] |
no_license
|
rodrigo-m/ExData_Plotting1
|
81b355b5b979deefca010f157a74d1ec117c3b3b
|
891e6bcb796e0c949300958418b8444fc7aaf93c
|
refs/heads/master
| 2020-12-25T21:34:57.203333
| 2015-06-07T07:49:40
| 2015-06-07T07:49:40
| 37,008,589
| 0
| 0
| null | 2015-06-07T07:45:07
| 2015-06-07T07:45:07
| null |
UTF-8
|
R
| false
| false
| 661
|
r
|
plot2.r
|
# Make sure the library for read.csv.sql is loaded
library(sqldf)
# Set working folder
setwd ("c:/R/ExploratoryA1")
# Load the two days that we are interested in
hpc <- read.csv.sql("household_power_consumption.txt", sql = "select * from file where Date in ('1/2/2007', '2/2/2007')", header = TRUE, sep =";",stringsAsFactors = FALSE)
# Add concatenated date + time as a DateTime field
hpc$DateTime <- as.POSIXct(strptime(paste(hpc[,1], hpc[,2]), "%d/%m/%Y %H:%M:%S"))
# Create the chart
with(hpc, plot(Global_active_power ~ DateTime, type = "l", xlab="", ylab="Global Active Power (kilowatts)"))
# Save to PNG
dev.copy(png,filename="plot2.png");
dev.off()
|
9041124258cfd2a071e4c7de507e2bdb5ffb5ebe
|
629d07722bb2dee9a6aaa9701df9b921e3f232f2
|
/MCPcounter.Score.R
|
fd4da5e98fbf38f7fda398f8a1ea58151b67a799
|
[] |
no_license
|
trial-lab/TCGA
|
58db4b796421d8f0931790cd14234f7945a3dabe
|
f5e68eb92a87e4e0f62fb4210dd326060729920f
|
refs/heads/master
| 2020-04-28T23:39:05.100028
| 2019-04-03T06:09:32
| 2019-04-03T06:09:32
| 175,663,987
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 318
|
r
|
MCPcounter.Score.R
|
library(devtools)
library(MCPcounter)
df<-read.csv("",header=TRUE,row.names=1)
#replace the first gene_id column to HUGO_symbols
mcpcounter_estimates <- MCPcounter.estimate(df,"HUGO_symbols")
heatmap(as.matrix(mcpcounter_estimates),col=colorRampPalette(c("blue","white","red"))(100))
write.csv(mcpcounter_estimates,"")
|
290920f26fc6d6e6f367f0e4e8a4f85e2eb640d4
|
4f64eb6bf672ddc53e10ce3da70e2f4d4b88adad
|
/run_analysis.R
|
9d07ba2649182d9c79dfc2b4008b9cd98f9bdf09
|
[] |
no_license
|
Swarno1984/Getting-and-Cleaning-Data-Project
|
f3bc137daddb73b8703970521b39290d46df7d36
|
b83e973f85c68e6e79d248d9688d4db58e4273cf
|
refs/heads/master
| 2021-01-23T01:21:05.864120
| 2017-05-30T19:49:10
| 2017-05-30T19:49:10
| 92,867,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,427
|
r
|
run_analysis.R
|
library(reshape2)
# Set the working Directory and unzip and extract data
setwd('C:/Users/ss45360/Documents/Coursera/UCI/');
filename <- "getdata_dataset.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, mode ='wb')
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
setwd('C:/Users/ss45360/Documents/Coursera/UCI/UCI HAR Dataset/');
# Read the data directly from the files
features = read.table('./features.txt',header=FALSE); #imports features.txt
activityType = read.table('./activity_labels.txt',header=FALSE); #imports activity_labels.txt
subjectTrain = read.table('./train/subject_train.txt',header=FALSE); #imports subject_train.txt
subjectTest = read.table('./test/subject_test.txt',header=FALSE); #imports subject_train.txt
Ytrain = read.table('./train/y_train.txt',header=FALSE); #imports y_train.txt
Ytest = read.table("./test/Y_test.txt",header=FALSE) #imports y_test.txt
# Load activity types and features
activityType[,2] <- as.character(activityType[,2])
features[,2] <- as.character(features[,2])
# Extract only the data on mean and standard deviation
neededfeatures <- grep(".*mean.*|.*std.*", features[,2])
neededfeatures.names <- features[neededfeatures,2]
neededfeatures.names <- gsub('-mean', 'Mean', neededfeatures.names)
neededfeatures.names <- gsub('-std', 'Std', neededfeatures.names)
neededfeatures.names <- gsub('[-()]', '', neededfeatures.names)
# Load the Train and Test datasets
Xtrain <- read.table('./train/x_train.txt')[neededfeatures]
Xtrain <- cbind(subjectTrain, Ytrain, Xtrain)
Xtest <- read.table("./test/X_test.txt")[neededfeatures]
Xtest <- cbind(subjectTest, Ytest, Xtest)
# merge both the datasets add labels
mergedData <- rbind(Xtrain, Xtest)
colnames(mergedData) <- c("subject", "activity", neededfeatures.names)
# Convert the activities and labels into factors
mergedData$activity <- factor(mergedData$activity, levels = activityType[,1], labels = activityType[,2])
mergedData$subject <- as.factor(mergedData$subject)
mergedData.melted <- melt(mergedData, id = c("subject", "activity"))
mergedData.mean <- dcast(mergedData.melted, subject + activity ~ variable, mean)
write.table(mergedData.mean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
4fa37a581c478f7d7b4a0bb81697cae0f9eafaa2
|
0f811c3e6c6bac8fcab04175516d085c26834ee1
|
/man/mutation_gene_data_solution.Rd
|
b6fa7df79adf77e9fb50599f32d7a7de2cc2021f
|
[
"MIT"
] |
permissive
|
lukatrkla/lanpAnalysis
|
7674b1b5a137b9456b956d2b30eee41c08aa6bb7
|
b4a747df063103d7c11ef4e2bd288cd2d4eb6a26
|
refs/heads/master
| 2023-02-04T17:00:58.641015
| 2020-12-10T01:32:52
| 2020-12-10T01:32:52
| 307,875,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
mutation_gene_data_solution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mutation_gene_data_solution}
\alias{mutation_gene_data_solution}
\title{Produced mutation_gene_data_solution}
\format{
NULL
@examples
}
\usage{
mutation_gene_data_solution
}
\description{
The solution to running mutation_gene_data("TP53", tp53mutated3times)
}
\keyword{datasets}
|
dc4534efd5533449c18c0ca719aa543e06177b7c
|
f5d517ac83c8860abdf8a7db2223d58886e24155
|
/R/dReport.r
|
7cb9e93e902c069ee0a3863a621217722a46d24e
|
[] |
no_license
|
dupontct/greport
|
8efe567313486de2c1c51cb5e93c4f50057f0b57
|
7cc99883f6ff27ba682c8d207d7d0e112a9018a4
|
refs/heads/master
| 2021-09-05T08:46:28.937827
| 2018-01-25T19:06:21
| 2018-01-25T19:06:21
| 118,954,658
| 0
| 0
| null | 2018-01-25T18:58:32
| 2018-01-25T18:58:31
| null |
UTF-8
|
R
| false
| false
| 23,192
|
r
|
dReport.r
|
#' Descriptive Statistics Report
#'
#' Generate graphics and LaTeX with descriptive statistics
#'
#' \code{dReport} generates multi-panel charts, separately for categorical analysis variables and continuous ones. The Hmisc \code{summaryP} function and its plot method are used for categorical variables, and \code{bpplotM} is used to make extended box plots for continuous ones unless \code{what='byx'}. Stratification is by treatment or other variables. The user must have defined a LaTeX macro \code{\\eboxpopup} (which may be defined to do nothing) with one argument. This macro is called with argument \code{extended box plot} whenever that phrase appears in the legend, so that a \code{PDF} popup may be generated to show the prototype. See the example in \code{report.Rnw} in the \code{tests} directory. Similarly a popup macro \code{\\qintpopup} must be defined, which generates a tooltip for the phrase \code{quantile intervals}.
#'
#' @param formula a formula accepted by the \code{bpplotM} or \code{summaryP} functions. \code{formula} must have an \code{id(subjectidvariable)} term if there are repeated measures, in order to get correct subject counts as \code{nobs}.
#' @param groups a superpositioning variable, usually treatment, for categorical charts. For continuous analysis variables, \code{groups} becomes the \code{y}-axis stratification variable. This is a single character string.
#' @param what \code{"box"} (the default) or \code{"xy"} for continuous analysis variables, or \code{"proportions"} (or shorter) for categorical ones. Instead, specifying \code{what="byx"} results in an array of quantile intervals for continuous \code{y}, Wilson confidence intervals for proportions when \code{y} is binary, or means and parametric confidence limits when \code{y} is not continuous but is not binary. If \code{what} is omitted or \code{what="byx"}, actions will be inferred from the most continuous variable listed in \code{formula}. When \code{fun} is given, different behavior results (see below).
#' @param byx.type set to \code{"quantiles"} to show vertical quantile intervals of \code{y} at each \code{x} for when \code{what="byx"} and the \code{y} variable is continuous numeric, or set \code{byx.type="violin"} (the default) to plot half-violin plots at each \code{x}.
#' @param violinbox set to \code{TRUE} to add violin plots to box plots
#' @param violinbox.opts a list to pass to \code{panel.violin}
#' @param summaryPsort set to \code{TRUE} to sort categories in descending order of frequencies
#' @param exclude1 logical used for \code{latex} methods when \code{summaryM} or \code{summaryP} are called by \code{dReport}, or for plot methods for \code{summaryP}. The default is \code{TRUE} to cause the most frequent level of any two-level categorical variable to not be used as a separate category in the graphic or table. See \code{\link[Hmisc]{summaryM}}.
#' @param stable set to \code{FALSE} to suppress creation of backup supplemental tables for graphics
#' @param fun a function that takes individual response variables (which may be matrices, as in \code{\link[survival]{Surv}} objects) and creates one or more summary statistics that will be computed while the resulting data frame is being collapsed to one row per condition. Dot charts are drawn when \code{fun} is given.
#' @param data data frame
#' @param subset a subsetting epression for the entire analysis
#' @param na.action a NA handling function for data frames, default is \code{na.retain}
#' @param panel character string. Name of panel, which goes into file base names and figure labels for cross-referencing
#' @param subpanel If calling \code{dReport} more than once for the same type of chart (by different values of \code{what}), specify \code{subpanel} to distinguish the multiple calls. In that case, \code{-subpanel} will be appended to \code{panel} when creating figure labels and cross-references.
#' @param head character string. Specifies initial text in the figure caption, otherwise a default is used
#' @param tail optional character string. Specifies final text in the figure caption, e.g., what might have been put in a footnote in an ordinary text page. This appears just before any needles.
#' @param continuous the minimum number of numeric values a variable must have in order to be considered continuous. Also passed to \code{summaryM}.
#' @param h numeric. Height of plot, in inches
#' @param w numeric. Width of plot
#' @param outerlabels logical that if \code{TRUE}, pass \code{lattice} graphics through the \code{latticeExtra} package's \code{useOuterStrips}function if there are two conditioning (paneling) variables, to put panel labels in outer margins.
#' @param \dots. Passed to \code{summaryP} or \code{bpplotM}
#' @param append logical. Set to \code{FALSE} to start a new panel
#' @param sopts list specifying extra arguments to pass to \code{bpplotM}, \code{summaryP}, or \code{summaryS}
#' @param popts list specifying extra arguments to pass to a plot method. One example is \code{text.at} to specify some number beyond \code{xlim[2]} to leave extra space for numerators and denominators when using \code{summaryP} for categorical analysis variables. Another common use is for example \code{popts=list(layout=c(columns,rows))} to be used in rendering \code{lattice} plots. \code{key} and \code{panel} are also frequently used.
#' @param lattice set to \code{TRUE} to use \code{lattice} instead of \code{ggplot2} for proportions. When this option is in effect, numerators and denominators are shown.
#' @export
#' @examples
#' # See test.Rnw in tests directory
dReport <-
function(formula, groups=NULL,
what=c('box', 'proportions', 'xy', 'byx'),
byx.type=c('violin', 'quantiles'),
violinbox=TRUE,
violinbox.opts=list(col=adjustcolor('blue', alpha.f=.25),
border=FALSE),
summaryPsort=FALSE, exclude1=TRUE,
stable=TRUE,
fun=NULL, data=NULL, subset=NULL, na.action=na.retain,
panel = 'desc', subpanel=NULL, head=NULL, tail=NULL,
continuous=10, h=5.5, w=5.5, outerlabels=TRUE, append=FALSE,
sopts=NULL, popts=NULL, lattice=FALSE)
{
mwhat <- missing(what)
what <- match.arg(what)
byx.type <- match.arg(byx.type)
tvar <- getgreportOption('tx.var')
if(grepl('[^a-zA-Z-]', panel))
stop('panel must contain only A-Z a-z -')
if(length(subpanel) && grepl('[^a-zA-Z-]', subpanel))
stop('subpanel must contain only A-Z a-z -')
# rel <- ggplot2::rel
# theme <- ggplot2::theme
# element_text <- ggplot2::element_text
# guides <- ggplot2::guides
# guide_legend <- ggplot2::guide_legend
center <- 'centerline'
legend <- NULL
Nobs <- nobsY(formula, group=tvar,
data=data, subset=subset, na.action=na.action)
formula.no.id <- Nobs$formula ## removes id()
form <- Formula(formula)
environment(form) <- new.env(parent = environment(form))
en <- environment(form)
assign(envir = en, 'id', function(x) x)
Y <- if(length(subset)) model.frame(form, data=data, subset=subset,
na.action=na.action)
else model.frame(form, data=data, na.action=na.action)
X <- model.part(form, data=Y, rhs=1)
Y <- model.part(form, data=Y, lhs=1)
rhs <- terms(form, rhs=1, specials='id')
sr <- attr(rhs, 'specials')
## specials counts from lhs variables
wid <- sr$id
if(length(wid)) wid <- wid - ncol(Y)
glevels <- if(length(groups)) levels(X[[groups]])
manygroups <- length(glevels) > 3
nstrata <- 1
## If missing argument 'what' assign value of 'what' based
## on other arguements.
if(mwhat) {
if(length(fun)) what <- 'xy'
else {
y <- Y[[1]]
if(is.character(y) || is.factor(y) || inherits(y, 'ynbind'))
what <- 'proportions' else type <- 'box'
}
}
## Extract Labels from the right hand side of the formula
labs <- sapply(X, label)
## If id() column exists then remove that label from the vector of label values.
if(length(wid)) labs <- labs[- wid]
stratlabs <- ifelse(labs == '',
if(length(wid)) names(X)[-wid] else names(X), labs)
## Extract Labels from the left hand side of the formula
ylabs <- sapply(Y, label)
ylabs <- ifelse(ylabs == '', names(Y), ylabs)
## paste to gether a comma seperated lexical list
past <- function(x) {
l <- length(x)
if(l < 2) x
else if(l == 2) paste(x, collapse=' and ')
else paste(paste(x[1 : (l - 1)], collapse=', '), x[l], sep=', and ')
}
## Extract the 0.05, 0.125, 0.25, 0.375, 0.625, 0.75, 0.875, and 0.95
## quantiles the median, standard deviation, and length from the given vector.
## if less then 3 elements in the given vector then return the meadian
## 9 NA's and the length of the given vector.
quant <- function(y) {
probs <- c(0.05, 0.125, 0.25, 0.375)
probs <- sort(c(probs, 1 - probs))
y <- y[! is.na(y)]
if(length(y) < 3) {
w <- c(median(y), rep(NA, 9), length(y))
names(w) <- c('Median', format(probs), 'se', 'n')
return(w)
}
w <- hdquantile(y, probs)
m <- hdquantile(y, 0.5, se=TRUE)
se <- as.numeric(attr(m, 'se'))
c(Median=as.numeric(m), w, se=se, n=length(y))
}
## Get the mean and standard deviation for the given vector
meanse <- function(y) {
y <- y[! is.na(y)]
n <- length(y)
se <- if(n < 2) NA else sd(y) / sqrt(n)
if(is.logical(y) || all(y %in% c(0., 1.))) {
p <- mean(y)
ci <- binconf(sum(y), n)[1, ]
if(p == 0. || p == 1.) {
## Don't trust se=0 at extremes; backsolve from Wilson interval
w <- diff(ci[c('Lower', 'Upper')])
se <- 0.5 * w / qnorm(0.975)
} else se <- sqrt(p * (1. - p) / n)
}
else ci <- smean.cl.boot(y, na.rm=FALSE)
z <- c(ci, se=se, n=length(y))
z
}
## Find the proportion, lower and upper confidence intervals the
## standard deviation and length of the given vector.
propw <- function(y) {
y <- y[!is.na(y)]
n <- length(y)
p <- mean(y)
ci <- binconf(sum(y), n)[1, ]
if(p == 0. || p == 1.) {
## Don't trust se=0 at extremes; backsolve from Wilson interval
w <- diff(ci[c('Lower', 'Upper')])
se <- 0.5 * w / qnorm(0.975)
}
else se <- sqrt(p * (1. - p) / n)
structure(c(ci, se=se, n=n),
names=c('Proportion', 'Lower', 'Upper', 'se', 'n'))
}
## create the latex table for the object s. Return 'full' if 's'
## attribute 'xnames' has 2 entries other wise return 'mini'
latexit <- function(s, what, byx.type, file) {
at <- attributes(s)
xv <- at$xnames
## panel function did the work:
if(what == 'byx.cont' && byx.type == 'violin') {
g <- function(y) {
y <- y[! is.na(y)]
if(length(y) < 3)
return(c(n=length(y), Median=median(y), Q1=NA, Q3=NA))
w <- hdquantile(y, c(0.50, 0.25, 0.75))
r <- c(length(y), w)
names(r) <- c('n', 'Median', '0.250', '0.750')
r
}
## Attempt to find a good number of digits to right of .
r <- min(tapply(s$y, s$yvar, function(x) max(abs(x), na.rm=TRUE)),
na.rm=TRUE)
dig <- if(r == 0) 2
else max(0, min(5, 3 - round(log10(r))))
s <- with(s, summarize(y, s[c('yvar', xv)],
g, type='matrix', keepcolnames=TRUE))
} else dig <- 2
sk <- switch(what,
byx.cont = c(n='n', Median='Median', Q1='0.250', Q3='0.750'),
byx.binary = c(n='n', Proportion='Proportion'),
byx.discrete = c(n='n', Mean='Mean', Lower='Lower',
Upper='Upper'))
cround <- switch(what,
byx.cont = 2:4,
byx.binary = 2,
byx.discrete = 2:4)
s$y <- s$y[, sk, drop=FALSE]
s$y[, cround] <- round(s$y[, cround], dig)
colnames(s$y) <- names(sk)
yv <- unique(as.character(s$yvar))
ny <- length(yv)
ylab <- character(ny)
names(ylab) <- yv
for(v in yv) ylab[v] <-
labelLatex(label=upFirst(at$ylabels[v]), units=at$yunits[v], hfill=TRUE)
if(length(xv) == 2) {
r <- reshape(s, timevar=xv[2], direction='wide', idvar=c('yvar', xv[1]))
class(r) <- 'data.frame'
lev <- levels(s[[xv[2]]])
nl <- length(lev)
yvar <- unique(as.character(r$yvar))
w <- latex(r[colnames(r) != 'yvar'],
table.env=FALSE, file=file, append=TRUE, rowlabel='',
landscape=FALSE, size=szg,
rowname=rep('', nrow(r)),
cgroup=c('', lev),
n.cgroup=c(1, rep(ncol(s$y), nl)),
rgroup=ylab[yvar],
colheads=c(upFirst(xv[1]), rep(names(sk), nl)), center=center)
}
else {
yvar <- unique(as.character(s$yvar))
w <- latex(s[colnames(s) != 'yvar'],
table.env=FALSE, file=file, append=TRUE,
landscape=FALSE,
rowlabel='', rowname=rep('', nrow(s)),
rgroup=ylab[yvar], size=szg,
colheads=c(upFirst(xv[1]), names(sk)), center=center)
}
if(length(xv) == 2) 'full' else 'mini'
}
## If what is 'byx' then determine which summary function to use
## when summarizing a vairable.
if(what == 'byx') {
if(length(fun)) stop('may not specify fun= when what="byx"')
g <- function(y) {
if(is.logical(y)) 2
else if(! is.numeric(y)) 0
else length(unique(y[! is.na(y)]))
}
nu <- max(sapply(Y, g))
what <- if(nu < 3) {
fun <- propw
'byx.binary'
} else if(nu < continuous) {
fun <- meanse
'byx.discrete'
} else {
if(byx.type == 'quantiles') fun <- quant
'byx.cont'
}
}
file <- sprintf('%s/%s.tex', getgreportOption('texdir'), panel)
if(getgreportOption('texwhere') == '') file <- ''
else if(!append) cat('', file=file)
cat('%dReport:', deparse(formula), ' what:', what, ' group levels:',
paste(glevels, collapse=','), '\n',
file=file, append=TRUE)
if(what == 'box' && ! length(groups) && ncol(X) == 1)
manygroups <- length(levels(X[[1]])) > 3
szg <- if(manygroups) 'smaller[2]' else 'smaller'
## create table label for supplemental table
lb <- sprintf('%s-%s', panel, what)
if(length(subpanel)) lb <- paste(lb, subpanel, sep='-')
lbn <- gsub('\\.', '', gsub('-', '', lb))
lttpop <- paste('ltt', lbn, sep='')
## Is first x variable on the x-axis of an x-y plot?
fx <- (what == 'xy' && ! length(fun)) || substring(what, 1, 3) == 'byx'
## Determine the base part of the title of the plot.
a <- if(fx) {
if(length(ylabs) < 7)
paste(if(what != 'xy') 'for', past(ylabs), 'vs.\\', stratlabs[1])
else paste('for', length(ylabs), 'variables vs.\\', stratlabs[1])
} else paste('for',
if(length(ylabs) < 7) past(ylabs) else
paste(length(ylabs), 'variables'))
al <- upFirst(a, alllower=TRUE)
al <- latexTranslate(al)
## Create the default title if 'head' is of length zero
if(!length(head))
head <-
switch(what,
box = paste('Extended box',
if(violinbox) 'and violin', 'plots', al),
proportions = paste('Proportions', al),
xy = if(length(fun)) 'Statistics' else a,
byx.binary = paste('Proportions and confidence limits', al),
byx.discrete =
paste('Means and 0.95 bootstrap percentile confidence limits', al),
byx.cont = paste('Medians',
switch(byx.type, quantiles='with quantile intervals',
violin='with violin (density) plots'),
al) )
## Create statification label by creating a english language list of
## stratification variables.
sl <- tolower(past(if((what == 'xy' && ! length(fun)) ||
what %in% c('byx.binary', 'byx.discrete',
'byx.cont'))
stratlabs[-1] else stratlabs))
## create short caption for graphic.
cap <- if(!length(sl)) head
else sprintf('%s stratified by %s', head, sl)
shortcap <- cap
## Create table caption for accompanying table.
tcap <- switch(what,
box = paste('Statistics', al),
proportions = paste('Proportions', al),
xy = if(length(fun)) 'Statistics' else a,
byx.binary=paste('Proportions and confidence limits', al),
byx.discrete=paste('Means and 0.95 bootstrap CLs', al),
byx.cont=paste('Medians', al))
tcap <- if(length(sl)) sprintf('%s stratified by %s', tcap, sl)
## Insert pop-up box calls in caption
cap <- gsub('Extended box', '\\\\protect\\\\eboxpopup{Extended box}', cap)
cap <- gsub('quantile intervals', '\\\\protect\\\\qintpopup{quantile intervals}',
cap)
## Begin the plot
startPlot(lb, h=h, w=w)
## extract the data list for ploting functions
dl <- list(formula=formula.no.id,
data=data, subset=subset, na.action=na.action,
outerlabels=outerlabels)
## Extact the key values for the plot from the 'popts' argument
key <- popts$key
## If no key specified and multiple groups then determine
## default key values for the plot
if(! length(key) && length(groups)) {
klines <- list(x=.6, y=-.07, cex=.8,
columns=length(glevels), lines=TRUE, points=FALSE)
key=switch(what,
box = NULL,
proportions = list(columns=length(glevels),
x=.75, y=-.04, cex=.9,
col=trellis.par.get('superpose.symbol')$col, corner=c(0,1)),
xy = klines,
byx.binary =,
byx.discrete =,
byx.cont = klines)
}
if(length(key)) popts$key <- key
## Generate the plot of the object based on the value of 'what'
switch(what,
box = {
sopts$violin <- violinbox
sopts$violin.opts <- violinbox.opts
s <- do.call('bpplotM', c(dl, sopts))
print(s)
},
proportions = {
sopts$sort <- summaryPsort
s <- do.call('summaryP', c(dl, sopts))
if(lattice) p <- do.call('plot', c(list(x=s, groups=groups, exclude1=exclude1), popts))
else {
popts <- if(length(groups) == 1 && groups == tvar)
c(popts, list(col =getgreportOption('tx.col'),
shape=getgreportOption('tx.pch'),
abblen=12))
else list(col=getgreportOption('nontx.col'), abblen=12)
popts$addlayer <-
theme(axis.text.x =
element_text(size = rel(0.8), angle=-45,
hjust=0, vjust=1),
strip.text.x=element_text(size=rel(0.75), color='blue'),
strip.text.y=element_text(size=rel(0.75), color='blue',
angle=0),
legend.position='bottom')
p <- do.call('ggplot', c(list(data=s, groups=groups, exclude1=exclude1), popts))
fnvar <- attr(p, 'fnvar')
if(length(fnvar)) tail <- paste(tail, ' ', fnvar, '.', sep='')
if(length(groups)) p <- p + guides(color=guide_legend(title=''),
shape=guide_legend(title=''))
}
presult <- tryCatch(
colorFacet(p,
col=adjustcolor('blue', alpha.f=0.18)),
error=function(e) list(fail=TRUE) )
if(length(presult$fail) && presult$fail) print(p)
},
xy = {
s <- do.call('summaryS', c(dl, list(fun=fun), sopts))
p <- do.call('plot', c(list(x=s, groups=groups), popts))
print(p)
},
byx.binary = ,
byx.discrete =,
byx.cont = {
s <- do.call('summaryS', c(dl, list(fun=fun), sopts))
ylim <- NULL
if(what %in% c('byx.binary', 'byx.discrete') &&
all(c('Lower', 'Upper') %in% colnames(s$y))) {
yvl <- levels(s$yvar)
ylim <- vector('list', length(yvl))
names(ylim) <- yvl
for(yv in levels(s$yvar)) {
j <- s$yvar == yv
ylim[[yv]] <- c(min(s$y[j, 'Lower'], na.rm=TRUE),
max(s$y[j, 'Upper'], na.rm=TRUE))
}
}
p <- do.call('plot',
c(list(x=s, groups=groups, ylim=ylim,
panel=if(byx.type == 'violin' && what == 'byx.cont')
medvPanel else mbarclPanel,
paneldoesgroups=TRUE), popts))
print(p)
} )
## Create a vector of pop-up table latex names
popname <- paste('poptable', lbn, sep='')
## if supplemental table is asked for create latex
## function for later use.
if(stable) cat(sprintf('\\def\\%s{\\protect\n', popname), file=file, append=TRUE)
poptab <- NULL
if(stable && substring(what, 1, 3) == 'byx')
## Create the pop-up table using latexit function
poptab <- latexit(s, what, byx.type, file=file)
else if(stable && what == 'proportions') {
## Create the pop-up table using the latex function
z <- latex(s, groups=groups, exclude1=exclude1, size=szg, file=file, append=TRUE,
landscape=FALSE) ## may sometimes need landscape=manygroups
nstrata <- attr(z, 'nstrata')
poptab <- if(manygroups) 'full' else 'mini'
}
else if(what == 'box' || (what == 'xy' && length(fun))) {
S <- summaryM(formula.no.id, data=data, subset=subset, na.action=na.action,
test=FALSE, groups=groups, continuous=continuous)
if(stable) {
z <- latex(S, table.env=FALSE, file=file, append=TRUE, prmsd=TRUE,
npct='both', exclude1=exclude1, middle.bold=TRUE, center=center,
round='auto', insert.bottom=FALSE, size=szg,
landscape=manygroups)
poptab <- if(length(S$group.freq) > 3) 'full' else 'mini'
legend <- attr(z, 'legend')
legend <- if(! length(legend)) ''
else paste('. ', paste(legend, collapse='\n'), sep='')
nstrata <- attr(z, 'nstrata')
}
}
if(stable) cat('}\n', file=file, append=TRUE)
nobs <- Nobs$nobs
r <- range(nobs)
nn <- if(r[1] == r[2]) r[1] else paste(r[1], 'to', r[2])
cap <- sprintf('%s. $N$=%s', cap, nn)
if(length(tail)) cap <- paste(cap, tail, sep='. ')
n <- c(randomized=r[2])
nobsg <- Nobs$nobsg
if(length(nobsg)) n <- c(n, apply(nobsg, 1, max))
dNeedle(sampleFrac(n, nobsY=Nobs), name=lttpop, file=file)
cap <- sprintf('%s~\\hfill\\%s', cap, lttpop)
endPlot()
## out put the nessary code to import the plot created.
putFig(panel = panel, name = lb, caption = shortcap,
longcaption = cap, tcaption=tcap,
tlongcaption = paste(tcap, legend, sep=''),
poptable= if(length(poptab)) paste('\\', popname, sep=''),
popfull = length(poptab) && poptab == 'full',
outtable = nstrata > 1 || manygroups)
# hyperref doesn't work with multiple tabulars (runs off page) or landscape
invisible()
}
|
9aefeb5cd2743eda2c512db3f8f9219b2edfe491
|
6f83ecfc6399fb31f10f74e8bf819f1e7042e80e
|
/R/02-coercion.R
|
836e4188ac543eda2cb35b693f19ffdb64a5e824
|
[
"MIT"
] |
permissive
|
jessesadler/debvctrs
|
d510edfd21d968d5df7ba237c7503552154cec8a
|
d3a04b446c5f20b34a5044b5b52530d4adcb2d23
|
refs/heads/main
| 2022-05-04T02:22:29.272112
| 2020-06-29T19:38:15
| 2020-06-29T19:38:15
| 207,156,132
| 27
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,070
|
r
|
02-coercion.R
|
## Coercion for deb_decimal and deb_lsd prototypes ##
# Create coercion hierarchy: numeric() -> deb_decimal() -> deb_lsd()
# A) deb_decimal ----------------------------------------------------------
# 1. deb_decimal and deb_decimal ------------------------------------------
#' Coercion hierarchy for deb_decimal units
#'
#' Hierarchy: d -> s -> l
#' @keywords internal
unit_hierarchy <- function(x, y) {
if (identical(deb_unit(x), deb_unit(y))) {
deb_unit(x)
} else if (any(c(deb_unit(x), deb_unit(y)) == "l")) {
"l"
} else {
"s"
}
}
#' @export
vec_ptype2.deb_decimal.deb_decimal <- function(x, y, ...) {
bases_equal(x, y)
unit <- unit_hierarchy(x, y)
new_decimal(bases = deb_bases(x), unit = unit)
}
# 2. Coercion with compatible types ---------------------------------------
# a) double -> deb_decimal
#' @export
vec_ptype2.deb_decimal.double <- function(x, y, ...) x
#' @export
vec_ptype2.double.deb_decimal <- function(x, y, ...) y
# b) integer -> deb_decimal
#' @export
vec_ptype2.deb_decimal.integer <- function(x, y, ...) x
#' @export
vec_ptype2.integer.deb_decimal <- function(x, y, ...) y
# B) deb_lsd --------------------------------------------------------------
# 1. deb_lsd and deb_lsd --------------------------------------------------
#' @export
vec_ptype2.deb_lsd.deb_lsd <- function(x, y, ...) {
# Ensure that the bases are equal
bases_equal(x, y)
new_lsd(bases = deb_bases(x))
}
# 2. Coercion with compatible types ---------------------------------------
# a) double -> deb_lsd
#' @export
vec_ptype2.deb_lsd.double <- function(x, y, ...) x
#' @export
vec_ptype2.double.deb_lsd <- function(x, y, ...) y
# b) integer -> deb_lsd
#' @export
vec_ptype2.deb_lsd.integer <- function(x, y, ...) x
#' @export
vec_ptype2.integer.deb_lsd <- function(x, y, ...) y
# C) Coercion with deb_lsd and deb_decimal --------------------------------
# deb_decimal -> deb_lsd
#' @export
vec_ptype2.deb_lsd.deb_decimal <- function(x, y, ...) x
#' @export
vec_ptype2.deb_decimal.deb_lsd <- function(x, y, ...) y
|
d7413345dd7e2112ef39954841cdff5a44cb899b
|
88c20b9bac5999fa1cc73d63ffb124958df2dc0d
|
/man/mariadbClientLibraryVersions.Rd
|
1124ca709ac5d7671db89d79d8155d0a926fb81b
|
[
"MIT"
] |
permissive
|
r-dbi/RMariaDB
|
2fb2656ba0e36391856943aa07d6391102802ebd
|
b5a54aecde60621ec600471ae894ed89e6eaeefe
|
refs/heads/main
| 2023-07-06T08:40:50.295370
| 2023-04-02T02:07:20
| 2023-04-02T02:07:20
| 96,334,875
| 103
| 38
|
NOASSERTION
| 2023-06-14T04:16:58
| 2017-07-05T15:35:16
|
R
|
UTF-8
|
R
| false
| true
| 594
|
rd
|
mariadbClientLibraryVersions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MariaDBDriver.R
\name{mariadbClientLibraryVersions}
\alias{mariadbClientLibraryVersions}
\title{MariaDB Check for Compiled Versus Loaded Client Library Versions}
\usage{
mariadbClientLibraryVersions()
}
\value{
A named integer vector of length two, the first element
representing the compiled library version and the second element
representing the loaded client library version.
}
\description{
This function prints out the compiled and loaded client library versions.
}
\examples{
mariadbClientLibraryVersions()
}
|
ce33c89ab14ca5646485b4199aded315333e3dc6
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/dataRetrieval/R/readNWISunit.r
|
ffb0962d8431e5406edb43b5f84b78b3e4b09a52
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,734
|
r
|
readNWISunit.r
|
#' Instantaneous value data retrieval from USGS (NWIS)
#'
#' Imports data from NWIS web service. This function gets the data from here: \url{http://waterservices.usgs.gov/}
#' A list of parameter codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/pmcodes/}
#' A list of statistic codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table}.
#' More information on the web service can be found here: \url{http://waterservices.usgs.gov/rest/IV-Service.html}.
#'
#' @param siteNumbers character USGS site number (or multiple sites). This is usually an 8 digit number
#' @param parameterCd character USGS parameter code. This is usually an 5 digit number.
#' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the earliest possible record. Simple date arguments are specified in local time.
#' See more information here: \url{http://waterservices.usgs.gov/rest/IV-Service.html#Specifying}.
#' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the latest possible record. Simple date arguments are specified in local time.
#' See more information here: \url{http://waterservices.usgs.gov/rest/IV-Service.html#Specifying}.
#' @param tz character to set timezone attribute of dateTime. Default is an empty quote, which converts the
#' dateTimes to UTC (properly accounting for daylight savings times based on the data's provided tz_cd column).
#' Possible values to provide are "America/New_York","America/Chicago", "America/Denver","America/Los_Angeles",
#' "America/Anchorage","America/Honolulu","America/Jamaica","America/Managua","America/Phoenix", and "America/Metlakatla"
#' @keywords data import USGS web service
#' @return A data frame with the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
#' site_no \tab character \tab The USGS site number \cr
#' dateTime \tab POSIXct \tab The date and time of the value converted to UTC \cr
#' tz_cd \tab character \tab The time zone code for dateTime \cr
#' code \tab character \tab Any codes that qualify the corresponding value\cr
#' value \tab numeric \tab The numeric value for the parameter \cr
#' }
#' Note that code and value are repeated for the parameters requested. The names are of the form:
#' X_D_P_S, where X is literal,
#' D is an option description of the parameter,
#' P is the parameter code,
#' and S is the statistic code (if applicable).
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' variableInfo \tab data.frame \tab A data frame containing information on the requested parameters \cr
#' statisticInfo \tab data.frame \tab A data frame containing information on the requested statistics on the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' }
#'
#' @seealso \code{\link{renameNWISColumns}}, \code{\link{importWaterML1}}
#' @export
#' @examples
#' siteNumber <- '05114000'
#' parameterCd <- '00060'
#' startDate <- "2014-10-10"
#' endDate <- "2014-10-10"
#' \dontrun{
#' rawData <- readNWISuv(siteNumber,parameterCd,startDate,endDate)
#'
#' timeZoneChange <- readNWISuv(c('04024430','04024000'),parameterCd,
#' "2013-11-03","2013-11-03")
#'
#' centralTime <- readNWISuv(siteNumber,parameterCd,
#' "2014-10-10T12:00", "2014-10-10T23:59",
#' tz="America/Chicago")
#'
#' # Adding 'Z' to the time indicates to the web service to call the data with UTC time:
#' GMTdata <- readNWISuv(siteNumber,parameterCd,
#' "2014-10-10T00:00Z", "2014-10-10T23:59Z")
#' }
#'
readNWISuv <- function (siteNumbers,parameterCd,startDate="",endDate="", tz=""){
url <- constructNWISURL(siteNumbers,parameterCd,startDate,endDate,"uv",format="xml")
data <- importWaterML1(url,asDateTime=TRUE,tz=tz)
return (data)
}
#' Peak flow data from USGS (NWIS)
#'
#' Reads peak flow from NWISweb. Data is retrieved from \url{http://waterdata.usgs.gov/nwis}.
#' In some cases, the specific date of the peak data is not know. This function will default to
#' converting the complete dates, dropping rows with incomplete dates. If those incomplete dates are
#' needed, set the `asDateTime` argument to FALSE. No rows will be removed, and no dates will be converted
#' to R Date objects.
#'
#' @param siteNumbers character USGS site number(or multiple sites). This is usually an 8 digit number.
#' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the earliest possible record.
#' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the latest possible record.
#' @param asDateTime logical default to \code{TRUE}. When \code{TRUE}, the peak_dt column is converted
#' to a Date object, and incomplete dates are removed. When \code{FALSE}, no columns are removed, but no dates are converted.
#' @param convertType logical, defaults to \code{TRUE}. If \code{TRUE}, the function will convert the data to dates, datetimes,
#' numerics based on a standard algorithm. If false, everything is returned as a character
#' @return A data frame with the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
#' site_no \tab character \tab The USGS site number \cr
#' peak_dt \tab Date \tab Date of peak streamflow \cr
#' peak_tm \tab character \tab Time of peak streamflow as character \cr
#' peak_va \tab numeric \tab Annual peak streamflow value in cfs \cr
#' peak_cd \tab character \tab Peak Discharge-Qualification codes (see \code{comment} for more information) \cr
#' gage_ht \tab numeric \tab Gage height for the associated peak streamflow in feet \cr
#' gage_ht_cd \tab character \tab Gage height qualification codes \cr
#' year_last_pk \tab numeric \tab Peak streamflow reported is the highest since this year \cr
#' ag_dt \tab Date \tab Date of maximum gage-height for water year (if not concurrent with peak) \cr
#' ag_tm \tab character \tab Time of maximum gage-height for water year (if not concurrent with peak) \cr
#' ag_gage_ht \tab numeric \tab maximum Gage height for water year in feet (if not concurrent with peak) \cr
#' ag_gage_ht_cd \tab character \tab maximum Gage height code \cr
#' }
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' comment \tab character \tab Header comments from the RDB file \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' }
#' @seealso \code{\link{constructNWISURL}}, \code{\link{importRDB1}}
#' @export
#' @importFrom dplyr left_join
#' @examples
#' siteNumbers <- c('01594440','040851325')
#' \dontrun{
#' data <- readNWISpeak(siteNumbers)
#' data2 <- readNWISpeak(siteNumbers, asDateTime=FALSE)
#' stations<-c("06011000")
#' peakdata<-readNWISpeak(stations,convertType=FALSE)
#' }
readNWISpeak <- function (siteNumbers,startDate="",endDate="", asDateTime=TRUE, convertType = TRUE){
# Doesn't seem to be a peak xml service
url <- constructNWISURL(siteNumbers,NA,startDate,endDate,"peak")
data <- importRDB1(url, asDateTime=asDateTime, convertType = convertType)
if(nrow(data) > 0){
if(asDateTime & convertType){
badDates <- which(grepl("[0-9]*-[0-9]*-00",data$peak_dt))
if(length(badDates) > 0){
data <- data[-badDates,]
if(length(badDates) > 0){
warning(length(badDates), " rows were thrown out due to incomplete dates")
}
}
if("peak_dt" %in% names(data)) data$peak_dt <- as.Date(data$peak_dt, format="%Y-%m-%d")
if("ag_dt" %in% names(data)) data$ag_dt <- as.Date(data$ag_dt, format="%Y-%m-%d")
}
# if(convertType){
# data$gage_ht <- as.numeric(data$gage_ht)
# data$ag_gage_ht <- as.numeric(data$ag_gage_ht)
# data$year_last_pk <- as.numeric(data$year_last_pk)
# }
siteInfo <- readNWISsite(siteNumbers)
siteInfo <- left_join(unique(data[,c("agency_cd","site_no")]),siteInfo, by=c("agency_cd","site_no"))
attr(data, "siteInfo") <- siteInfo
attr(data, "variableInfo") <- NULL
attr(data, "statisticInfo") <- NULL
}
return (data)
}
#' Rating table for an active USGS streamgage retrieval
#'
#' Reads current rating table for an active USGS streamgage from NWISweb.
#' Data is retrieved from \url{http://waterdata.usgs.gov/nwis}.
#'
#' @param siteNumber character USGS site number. This is usually an 8 digit number
#' @param type character can be "base", "corr", or "exsa"
#' @param convertType logical, defaults to \code{TRUE}. If \code{TRUE}, the function will convert the data to dates, datetimes,
#' numerics based on a standard algorithm. If false, everything is returned as a character
#' @return A data frame. If \code{type} is "base," then the columns are
#'INDEP, typically the gage height, in feet; DEP, typically the streamflow,
#'in cubic feet per second; and STOR, where "*" indicates that the pair are
#'a fixed point of the rating curve. If \code{type} is "exsa," then an
#'additional column, SHIFT, is included that indicates the current shift in
#'the rating for that value of INDEP. If \code{type} is "corr," then the
#'columns are INDEP, typically the gage height, in feet; CORR, the correction
#'for that value; and CORRINDEP, the corrected value for CORR.\cr
#'If \code{type} is "base," then the data frame has an attribute called "RATING"
#'that describes the rating curve is included.
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' comment \tab character \tab Header comments from the RDB file \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' RATING \tab character \tab Rating information \cr
#' }
#'
#' @note Not all active USGS streamgages have traditional rating curves that
#'relate flow to stage.
#' @seealso \code{\link{constructNWISURL}}, \code{\link{importRDB1}}
#' @export
#' @examples
#' siteNumber <- '01594440'
#' \dontrun{
#' data <- readNWISrating(siteNumber, "base")
#' attr(data, "RATING")
#' }
readNWISrating <- function (siteNumber,type="base",convertType = TRUE){
# No rating xml service
url <- constructNWISURL(siteNumber,service="rating",ratingType = type)
data <- importRDB1(url, asDateTime=FALSE, convertType = convertType)
if("current_rating_nu" %in% names(data)){
intColumns <- intColumns[!("current_rating_nu" %in% names(data)[intColumns])]
data$current_rating_nu <- gsub(" ", "", data$current_rating_nu)
}
if(nrow(data) > 0){
if(type == "base") {
Rat <- grep("//RATING ", comment(data), value=TRUE, fixed=TRUE)
Rat <- sub("# //RATING ", "", Rat)
Rat <- scan(text=Rat, sep=" ", what="")
attr(data, "RATING") <- Rat
}
siteInfo <- readNWISsite(siteNumber)
attr(data, "siteInfo") <- siteInfo
attr(data, "variableInfo") <- NULL
attr(data, "statisticInfo") <- NULL
}
return (data)
}
#'Surface-water measurement data retrieval from USGS (NWIS)
#'
#'Reads surface-water measurement data from NWISweb. Data is retrieved from \url{http://waterdata.usgs.gov/nwis}.
#'See \url{http://waterdata.usgs.gov/usa/nwis/sw} for details about surface water.
#'
#' @param siteNumbers character USGS site number (or multiple sites). This is usually an 8 digit number
#' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the earliest possible record.
#' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the latest possible record.
#' @param tz character to set timezone attribute of dateTime. Default is an empty quote, which converts the
#' dateTimes to UTC (properly accounting for daylight savings times based on the data's provided tz_cd column).
#' Possible values to provide are "America/New_York","America/Chicago", "America/Denver","America/Los_Angeles",
#' "America/Anchorage","America/Honolulu","America/Jamaica","America/Managua","America/Phoenix", and "America/Metlakatla"
#' @param expanded logical. Whether or not (TRUE or FALSE) to call the expanded data.
#' @param convertType logical, defaults to \code{TRUE}. If \code{TRUE}, the function will convert the data to dates, datetimes,
#' numerics based on a standard algorithm. If false, everything is returned as a character
#' @return A data frame with at least the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
#' site_no \tab character \tab The USGS site number \cr
#' measurement_dt \tab POSIXct \tab The date and time (in POSIXct) of the measurement. Unless specified
#' with the tz parameter, this is converted to UTC. If the measurement_dt column is an incomplete, a measurement_dt_date and
#' measurement_dt_time column are added to the returned data frame. \cr
#' tz_cd \tab character \tab The time zone code for the measurement_dt column \cr
#' }
#'
#' See \url{http://waterdata.usgs.gov/usa/nwis/sw} for details about surface water, and
#' \url{http://waterdata.usgs.gov/nwis/help?output_formats_help#streamflow_measurement_data}
#' for help on the columns and codes.
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' comment \tab character \tab Header comments from the RDB file \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' tz_cd_reported \tab The originally reported time zone \cr
#' }
#' @seealso \code{\link{constructNWISURL}}, \code{\link{importRDB1}}
#' @export
#' @importFrom dplyr left_join
#' @examples
#' siteNumbers <- c('01594440','040851325')
#' \dontrun{
#' data <- readNWISmeas(siteNumbers)
#' Meas05316840 <- readNWISmeas("05316840")
#' Meas05316840.ex <- readNWISmeas("05316840",expanded=TRUE)
#' Meas07227500.ex <- readNWISmeas("07227500",expanded=TRUE)
#' Meas07227500.exRaw <- readNWISmeas("07227500",expanded=TRUE, convertType = FALSE)
#' }
readNWISmeas <- function (siteNumbers,startDate="",endDate="", tz="", expanded=FALSE, convertType = TRUE){
# Doesn't seem to be a WaterML1 format option
url <- constructNWISURL(siteNumbers,NA,startDate,endDate,"meas", expanded = expanded)
data <- importRDB1(url,asDateTime=TRUE,tz=tz, convertType = convertType)
if(nrow(data) > 0){
if("diff_from_rating_pc" %in% names(data)){
data$diff_from_rating_pc <- as.numeric(data$diff_from_rating_pc)
}
url <- attr(data, "url")
comment <- attr(data, "comment")
queryTime <- attr(data, "queryTime")
header <- attr(data, "header")
if(convertType){
data$measurement_dateTime <- data$measurement_dt
data$measurement_dt <- as.Date(data$measurement_dateTime)
data$measurement_tm <- strftime(data$measurement_dateTime, "%H:%M")
data$measurement_tm[is.na(data$tz_cd_reported)] <- ""
indexDT <- which("measurement_dt" == names(data))
indexTZ <- which("tz_cd" == names(data))
indexTM <- which("measurement_tm" == names(data))
indexTZrep <- which("tz_cd_reported" == names(data))
newOrder <- c(1:indexDT,indexTM,indexTZrep,c((indexDT+1):ncol(data))[!(c((indexDT+1):ncol(data)) %in% c(indexTZrep,indexTM,indexTZ))],indexTZ)
data <- data[,newOrder]
}
siteInfo <- readNWISsite(siteNumbers)
siteInfo <- left_join(unique(data[,c("agency_cd","site_no")]),siteInfo, by=c("agency_cd","site_no"))
attr(data, "url") <- url
attr(data, "comment") <- comment
attr(data, "queryTime") <- queryTime
attr(data, "header") <- header
attr(data, "siteInfo") <- siteInfo
attr(data, "variableInfo") <- NULL
attr(data, "statisticInfo") <- NULL
}
return (data)
}
#' Groundwater level measurements retrieval from USGS (NWIS)
#'
#' Reads groundwater level measurements from NWISweb. Mixed date/times come back from the service
#' depending on the year that the data was collected. See \url{http://waterdata.usgs.gov/usa/nwis/gw}
#' for details about groundwater. Groundwater dates and times are returned in many different formats, therefore the
#' date/time information is returned as a character string. Users will need to convert to a date object.
#' See \url{http://waterservices.usgs.gov/rest/GW-Levels-Service.html} for more information.
#'
#' @param siteNumbers character USGS site number (or multiple sites). This is usually an 8 digit number
#' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the earliest possible record.
#' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates
#' retrieval for the latest possible record.
#' @param convertType logical, defaults to \code{TRUE}. If \code{TRUE}, the function will convert the data to dates, datetimes,
#' numerics based on a standard algorithm. If false, everything is returned as a character
#' @return A data frame with the following columns:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr
#' site_no \tab character \tab The USGS site number \cr
#' site_tp_cd \tab character \tab Site type code \cr
#' lev_dt \tab Date \tab Date level measured\cr
#' lev_tm \tab character \tab Time level measured \cr
#' lev_tz_cd \tab character \tab Time datum \cr
#' lev_va \tab numeric \tab Water level value in feet below land surface\cr
#' sl_lev_va \tab numeric \tab Water level value in feet above specific vertical datum \cr
#' lev_status_cd \tab character \tab The status of the site at the time the water level was measured \cr
#' lev_agency_cd \tab character \tab The agency code of the person measuring the water level \cr
#' }
#'
#' There are also several useful attributes attached to the data frame:
#' \tabular{lll}{
#' Name \tab Type \tab Description \cr
#' url \tab character \tab The url used to generate the data \cr
#' queryTime \tab POSIXct \tab The time the data was returned \cr
#' comment \tab character \tab Header comments from the RDB file \cr
#' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr
#' }
#'
#' @seealso \code{\link{constructNWISURL}}, \code{\link{importRDB1}}
#' @export
#' @importFrom dplyr left_join
#' @examples
#' siteNumber <- "434400121275801"
#' \dontrun{
#' data <- readNWISgwl(siteNumber, '','')
#' sites <- c("434400121275801", "375907091432201")
#' data2 <- readNWISgwl(sites, '','')
#' data3 <- readNWISgwl("420125073193001", '','')
#' }
readNWISgwl <- function (siteNumbers,startDate="",endDate="", convertType = TRUE){
url <- constructNWISURL(siteNumbers,NA,startDate,endDate,"gwlevels",format="tsv")
data <- importRDB1(url,asDateTime=TRUE, convertType = convertType)
if(nrow(data) > 0){
data$lev_dt <- as.Date(data$lev_dt)
siteInfo <- readNWISsite(siteNumbers)
siteInfo <- left_join(unique(data[,c("agency_cd","site_no")]),siteInfo, by=c("agency_cd","site_no"))
attr(data, "siteInfo") <- siteInfo
}
return (data)
}
|
4da9acce35ee291174a5721af8ea1508bd3f21c2
|
58b943c940514e5c08106b97da156404ba6345cf
|
/man/simulate.R.K.Rd
|
db14e19b35914e8424eb3c00d2e82fde0c38e0d0
|
[] |
no_license
|
bcm-uga/apTreeshape
|
1dd209ae8e4a0a30d22b906a99f98f459ef992f7
|
369236069399b25f462bc109db305cbdb4a6113c
|
refs/heads/master
| 2020-04-09T18:54:00.858207
| 2019-09-25T12:08:34
| 2019-09-25T12:08:34
| 124,239,795
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
rd
|
simulate.R.K.Rd
|
\name{simulate.R.K}
\alias{simulate.R.K}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Simulate (R,K)
}
\description{
Simulate a split and the number of marks in each resulting subintervals
}
\usage{
simulate.R.K(beta, n)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{beta}{
Imbalance index
}
\item{n}{
Tip number in the parent clade
}
}
\references{
Maliet O., Gascuel F., Lambert A. (2018) \emph{Ranked tree shapes, non-random
extinctions and the loss of phylogenetic diversity}, bioRxiv 224295,
doi: https://doi.org/10.1101/224295
}
\author{
Odile Maliet, Fanny Gascuel & Amaury Lambert
}
|
b3abe67e1c3f0d25358280362fa7043a37de1dea
|
a2fabf34e4bb74729e274c2d1c3f2f45a7536588
|
/Plot5.R
|
4e80a6cc4967c8aa36e820163892bd60f337304d
|
[] |
no_license
|
jsennator/ExData_CourseProject2
|
d609b7763b556d39a94e874dc35335bbfec2cc4f
|
6ebd99e07f87dd68c50df6ed0780f8f6434a05b3
|
refs/heads/master
| 2016-08-10T14:22:37.972053
| 2015-09-24T21:26:45
| 2015-09-24T21:26:45
| 43,093,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
r
|
Plot5.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#subset for Baltimore & On Road
baltimore <- NEI[NEI$fips=="24510" & NEI$type=="ON-ROAD",]
# Aggregate by sum Baltimore emissions by year
aggtotalyearBaltimore <- aggregate(Emissions ~ year, baltimore, sum)
png('plot5.png')
g <- ggplot(aggtotalyearBaltimore,aes(factor(year),Emissions))
g <- g + geom_bar(stat="identity", fill="purple", width=.5) +
theme_bw() +guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
print(g)
dev.off()
|
3550186b8e97f0a27fe9a5c455565f79b84d7f03
|
71af4c97f1dd3ad115d7fc230b50b535886ef641
|
/missing together.R
|
ef8de9dc79734a1effafa120895a92e235261636
|
[
"MIT"
] |
permissive
|
610311101/Home-Value-Prediction
|
dc211126d1f2cfbec9ecf52e0f63d20e3f259d0b
|
c90f0ff9aba7b4a076362bd7d09836985f60ce5a
|
refs/heads/master
| 2021-06-27T23:37:54.127108
| 2017-09-19T20:57:01
| 2017-09-19T20:57:01
| 103,949,521
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
missing together.R
|
library("dplyr");
library("magrittr");
setwd("C:/Users/Hou/Desktop/Home-Value-Prediction");
rm(list = ls());
Data <- read.csv("properties_2016.csv");
##
## fips, latitude, longitude,
## propertylandusetypeid, rawcensustractandblock,
## regionidcounty,
## are the same missing 11437# data.
index <- 1:nrow(Data);
Data <- data.frame(index, Data)
subData <- Data %>%
select(
index, fips, latitude, longitude,
propertylandusetypeid, rawcensustractandblock,
regionidcounty
)
## => If missing one of them, then others are missing two.
|
1c500c0d6c0236ec5b780d1bf251cf31bdae8ce8
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Sauer-Reimer/ISCAS89/s09234_PR_4_10/s09234_PR_4_10.R
|
5caaba6008b7f3d6e5d10a8887aa7fcb2180805b
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
s09234_PR_4_10.R
|
5bb48a03c11c155192a156dbe320a4c2 s09234_PR_4_10.qdimacs 5464 15583
|
899bdc3f9539e140cf2663f2f878f9660ad4af5e
|
55190b9854fd714e7bac146a836a7bba8435adc5
|
/src/helper.R
|
262b423024019b294a19c0e5857d7358c5f35741
|
[] |
no_license
|
danielns/2019-01-max-milhas
|
5853b319157668f7a70701720d60ac01a472f2a8
|
369855915da196756a1ee014ec7f5ad274895600
|
refs/heads/master
| 2020-04-15T12:52:37.926457
| 2019-01-08T16:56:14
| 2019-01-08T16:56:14
| 164,687,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,959
|
r
|
helper.R
|
# Funções para auxiliar a análise
removeAcentos = function(vetor){
#===================================================================================
# Remove acentos de um vetor, considerando se o Encoding deste é UTF-8 ou unknown.
# Outros tipos de enconding não foram considerados
#===================================================================================
removeAcentoEncoding = function(texto, encoding){
encoding = ifelse(encoding=="UTF-8", "TRUE", "FALSE")
switch(encoding, "TRUE" = tolower(iconv(texto, from="UTF-8", to="ASCII//TRANSLIT")),
"FALSE" = tolower(iconv(texto, to="ASCII//TRANSLIT")))
}
return(unlist(lapply(vetor, function(x) removeAcentoEncoding(x, Encoding(x)))))
}
corrige_nome_mun = function(var){
#===================================================================================
# Identifica o nome do município para casos especiais do banco de buscas
#===================================================================================
var[var == "lencois chapada diamantina"] = "lencois"
var[var == "sao joao del-rei"] = "sao joao del rei"
var[var == "aeroporto de teixeira de freitas"] = "teixeira de freitas"
var[var == "campinas viracopos"] = "campinas"
return(var)
}
gera_percurso = function(x){
#===================================================================================
# Gera o mesmo resultado para A indo pra B e B indo pra A.
# Ex.: Belo Horizonte - Navegantes, ou Navegantes - Belo Horizonte
# será considerado unicamente como Belo Horizonte - Navegantes
#===================================================================================
return(paste(sort(x), collapse = " - "))
}
gera_moda <- function(x) {
#===================================================================================
# Gera a Moda de um vetor x
#===================================================================================
vl_uniq <- unique(x)
vl_uniq[which.max(tabulate(match(x, vl_uniq)))]
}
gera_grafico1 = function(){
#===================================================================================
# Gera o gráfico de barras empilhadas da porcentagem das buscas para cada nivel,
# por mes de busca
#===================================================================================
graph_mes_ida = ggplot() + theme_bw()
graph_mes_ida = graph_mes_ida + geom_bar(aes(y = porcentagem, x = mes_ida, fill = nivel),
data = nivel_mes, stat="identity")
graph_mes_ida = graph_mes_ida + geom_text(data=nivel_mes,
aes(x = mes_ida, y = posicao,
label = paste0(round(porcentagem, 0), "%")), size=3, col = "white")
graph_mes_ida = graph_mes_ida + scale_fill_manual(values=c("#0e0872", "#660000"))
graph_mes_ida = graph_mes_ida + scale_y_continuous(labels = function(x) paste0(x, "%"))
graph_mes_ida = graph_mes_ida + ggtitle("Percentual de Buscas por Nível e por Mês da Data de Ida")
graph_mes_ida = graph_mes_ida + theme(legend.position="bottom", legend.direction="horizontal",
legend.title = element_blank(),
legend.spacing.x = unit(1.0, 'cm'),
axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(angle = 90, hjust = 1, colour="black",
size = 8, face = "bold"),
axis.text.y=element_text(colour="black", size = 8, face = "bold"),
plot.title = element_text(hjust = 0.5))
graph_mes_ida = graph_mes_ida + labs(x="Mês da data de ida", y="Porcentagem")
return(graph_mes_ida)
}
gera_lista_ca_faltantes = function(analise_2018_03){
#===================================================================================
# Gera a variável lst_ca_faltantes que é a diferença entre a lista de companhias
# esperadas e a lista de companhias apresentadas na busca
#===================================================================================
analise_2018_03[, lst_ca_faltantes := ""]
for(i in 1:nrow(analise_2018_03)){
valor = paste(setdiff(strsplit(analise_2018_03$cia_esperadas[i], " ")[[1]],
strsplit(analise_2018_03$lst_companhias[i], " ")[[1]]), collapse = " ")
analise_2018_03$lst_ca_faltantes[i] = valor
}
return(analise_2018_03)
}
gen_freq_cia = function(recorte, nome_cia){
#===================================================================================
# Gera uma variável com o nome da companhia. Essa variável assumirá o valor 1
# quando nome_cia aparecer em lst_ca_faltantes
#===================================================================================
recorte[, V1 := 0]
recorte[grepl(nome_cia, lst_ca_faltantes), V1 := 1]
setnames(recorte, "V1", nome_cia)
return(recorte)
}
gera_grafico2 = function(){
#===================================================================================
# Gera o gráfico de linhas mostrando a frequência que as companhias estão faltando
# nas buscas
#===================================================================================
p = ggplot(analise_2018_03_dia, aes(x=data_busca, y=freq, group=companhia, col = companhia))
p = p + theme_bw()
p = p + geom_line(size=1.2)
p = p + geom_point(data = analise_2018_03_dia, aes(shape=companhia), size = 2)
p = p + theme(legend.position="bottom", legend.direction="horizontal",
legend.title = element_blank(),
legend.spacing.x = unit(1.0, 'cm'),
axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(angle = 90, hjust = 1, colour="black",
size = 8, face = "bold"),
axis.text.y=element_text(colour="black", size = 8, face = "bold"),
plot.title = element_text(hjust = 0.5))
p = p + ggtitle("Evolução do Número de Ausência nas Buscas por Companhia Aérea")
p = p + labs(x="Data da Busca", y="Frequência")
p
}
|
2983e260591cc452e556a602c1a223b745b1e07d
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/scripts_analysis/debugging_rerun/check_LDA.R
|
99d2c5fda938420b1bbcee4895fe0d3b8154396e
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,194
|
r
|
check_LDA.R
|
# Jake Yeung
# Date of Creation: 2019-12-20
# File: ~/projects/scchic/scripts/scripts_analysis/debugging_rerun/check_LDA.R
# Check LDA output
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(hash)
library(igraph)
library(umap)
library(Rtsne)
# Load LDA ---------------------------------------------------------------
inf <- "/Users/yeung/data/scchic/from_cluster/ldaAnalysisBins_B6BM_All_allmarks.2019-12-16/lda_outputs.B6BM_AllMerged_H3K9me3.TAcutoff_0.5.countscutoff_1000.binfilt_cellfilt.2019-12-16.K-50.binarize.FALSE/ldaOut.B6BM_AllMerged_H3K9me3.TAcutoff_0.5.countscutoff_1000.binfilt_cellfilt.2019-12-16.K-50.Robj"
load(inf, v=T)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
# Do umap ----------------------------------------------------------------
tm.result <- posterior(out.lda)
tsne.out <- Rtsne(tm.result$topics)
dat.tsne.long <- data.frame(cell = rownames(tm.result$topics), umap1 = tsne.out$Y[, 1], umap2 = tsne.out$Y[, 2], stringsAsFactors = FALSE) %>%
rowwise() %>%
mutate(experi = ClipLast(cell),
experi = gsub("Bl6BMSC", "B6BMSC", experi),
plate = ClipLast(cell, jsep = "_"))
ggplot(dat.tsne.long, aes(x = umap1, y = umap2, color = experi)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
ggplot(dat.tsne.long, aes(x = umap1, y = umap2, color = experi)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate)
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
umap.out <- umap(tm.result$topics, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out$layout), umap1 = umap.out$layout[, 1], umap2 = umap.out$layout[, 2], stringsAsFactors = FALSE) %>%
rowwise() %>%
mutate(experi = ClipLast(cell),
experi = gsub("Bl6BMSC", "B6BMSC", experi),
plate = ClipLast(cell, jsep = "_"))
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = experi)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = experi)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette) +
facet_wrap(~plate)
# get var
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.tsne.long <- left_join(dat.tsne.long, dat.var)
dat.umap.long <- left_join(dat.umap.long, dat.var)
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) +
facet_wrap(~plate)
ggplot(dat.tsne.long, aes(x = umap1, y = umap2, color = cell.var.within.sum)) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~experi)
dat.cellsizes <- data.frame(cell = colnames(count.mat), cellsize = colSums(count.mat) / 5)
dat.tsne.long <- left_join(dat.tsne.long, dat.cellsizes)
dat.umap.long <- left_join(dat.umap.long, dat.cellsizes)
ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = log10(cellsize))) + geom_point(alpha = 0.5) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = 1) +
facet_wrap(~plate)
|
076e097c2189caaece1007c1dd0aed45991fe918
|
dfb2a56f64c2c206cd0b5c67e708b3ba030b4984
|
/week_10/case_study_10.R
|
032fb9a73ff4c953942ef5abf133e6979aa675c4
|
[] |
no_license
|
geo511-2020/geo511-2020-tasks-shruti8297
|
982ba288d63b045a1ab29ebebec3192af30f7891
|
b0c92b30200315199525dd07abdb50994861d5aa
|
refs/heads/master
| 2023-02-02T07:17:31.909382
| 2020-12-18T21:13:01
| 2020-12-18T21:13:01
| 296,927,787
| 0
| 0
| null | 2020-10-11T01:13:36
| 2020-09-19T18:24:31
|
R
|
UTF-8
|
R
| false
| false
| 4,195
|
r
|
case_study_10.R
|
# performed the task with groups help
library(raster)
library(rasterVis)
library(rgdal)
library(ggmap)
library(tidyverse)
library(knitr)
# used groups help for performing the task
# New Packages
library(ncdf4) # to import data from netcdf format
# Land use Land cover
# Create afolder to hold the downloaded data
dir.create("data",showWarnings = F) #create a folder to hold the data
lulc_url="https://github.com/adammwilson/DataScienceData/blob/master/inst/extdata/appeears/MCD12Q1.051_aid0001.nc?raw=true"
lst_url="https://github.com/adammwilson/DataScienceData/blob/master/inst/extdata/appeears/MOD11A2.006_aid0001.nc?raw=true"
# download them
download.file(lulc_url,destfile="data/MCD12Q1.051_aid0001.nc", mode="wb")
download.file(lst_url,destfile="data/MOD11A2.006_aid0001.nc", mode="wb")
# loading data into R
lulc=stack("data/MCD12Q1.051_aid0001.nc",varname="Land_Cover_Type_1")
lst=stack("data/MOD11A2.006_aid0001.nc",varname="LST_Day_1km")
# exploring lulc data
lulc=lulc[[13]]
plot(lulc)
# process landcover data
Land_Cover_Type_1 = c(
Water = 0,
`Evergreen Needleleaf forest` = 1,
`Evergreen Broadleaf forest` = 2,
`Deciduous Needleleaf forest` = 3,
`Deciduous Broadleaf forest` = 4,
`Mixed forest` = 5,
`Closed shrublands` = 6,
`Open shrublands` = 7,
`Woody savannas` = 8,
Savannas = 9,
Grasslands = 10,
`Permanent wetlands` = 11,
Croplands = 12,
`Urban & built-up` = 13,
`Cropland/Natural vegetation mosaic` = 14,
`Snow & ice` = 15,
`Barren/Sparsely vegetated` = 16,
Unclassified = 254,
NoDataFill = 255)
lcd=data.frame(
ID=Land_Cover_Type_1,
landcover=names(Land_Cover_Type_1),
col=c("#000080","#008000","#00FF00", "#99CC00","#99FF99", "#339966", "#993366", "#FFCC99", "#CCFFCC", "#FFCC00", "#FF9900", "#006699", "#FFFF00", "#FF0000", "#999966", "#FFFFFF", "#808080", "#000000", "#000000"),
stringsAsFactors = F)
# colors from https://lpdaac.usgs.gov/about/news_archive/modisterra_land_cover_types_yearly_l3_global_005deg_cmg_mod12c1
kable(head(lcd))
# convert to raster (easy)
lulc=as.factor(lulc)
# update the RAT with a left join
levels(lulc)=left_join(levels(lulc)[[1]],lcd)
# plot it
gplot(lulc)+
geom_raster(aes(fill=as.factor(value)))+
scale_fill_manual(values=levels(lulc)[[1]]$col,
labels=levels(lulc)[[1]]$landcover,
name="Landcover Type")+
coord_equal()+
theme(legend.position = "bottom")+
guides(fill=guide_legend(ncol=1,byrow=TRUE))
# plot land surface temperature
plot(lst[[1:12]])
# convert lst to degrees c
offs(lst)=-273.15
plot(lst[[1:10]])
# adding dates to z dimension
names(lst)[1:5]
tdates=names(lst)%>%
sub(pattern="X",replacement="")%>%
as.Date("%Y.%m.%d")
names(lst)=1:nlayers(lst)
lst=setZ(lst,tdates)
# took erik's help with this section
# part 1 - Extract time series for a point
lw <- SpatialPoints(data.frame(x= -78.791547,y=43.007211))
projection(lw) = "+proj=longlat"
lw_transform <- spTransform(lw, "+proj=longlat")
Extract <- raster::extract(lst, lw, buffer=1000,fun=mean,na.rm=T)
Transpose <- t(Extract)
Dates <- getZ(lst)
time_series <- bind_cols(Transpose, Dates)
t_series <- time_series %>%
rename(y_col = ...1, x_col = ...2)
ggplot(t_series, aes(x = x_col, y = y_col)) +
geom_point() +
geom_smooth(method = 'loess', span =0.02) +
labs(x = "date", y = "Monthly Mean Land Surafce Temperature")
# Part 2: Summarize weekly data to monthly climatologies
tmonth <- as.numeric(format(getZ(lst),"%m"))
lst_month <- stackApply(x = lst, fun = mean, indices = tmonth)
names(lst_month)=month.name
gplot(lst_month) + geom_raster(aes(fill=value)) +
scale_fill_gradientn(colours = c(low = "blue", mid = "grey", high = "red")) +
facet_wrap(~variable)
# part 3 :
lulc2 <- resample(lulc, lst, method = "ngb")
lcds1=cbind.data.frame(
values(lst_month),
ID=values(lulc2[[1]]))%>%
na.omit()
my_data <- gather(lcds1, key='month',value='value',-ID) %>%
mutate(ID = as.numeric(ID)) %>%
mutate(month = factor(month, levels = month.name, ordered=T))
left <- left_join(my_data, lcd)
plot2 <- left %>%
filter(landcover%in%c("Urban & built-up","Deciduous Broadleaf forest"))
plot2
|
099dd4e99abf0f52c07531182be1d8d558b7649f
|
50197791ff29706c824e3b3d0a893a870ad2d70e
|
/Week 2/complete.R
|
26984209392b905f908c9445e4d1a84fab0d36f3
|
[] |
no_license
|
alittlec/datasciencecoursera
|
771e16c7e3f5a0b349178ad9549fb722f7d5490d
|
46ad2c7bfbf7ac55030cc711d5599b5331879b8e
|
refs/heads/master
| 2020-12-25T15:17:29.028952
| 2016-09-23T08:43:25
| 2016-09-23T08:43:25
| 67,360,857
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
complete.R
|
readfiles <- function(directory, id){
filenames <- paste(directory, "/", formatC(id, width=3, flag="0"), ".csv", sep="")
specdata <- list(length = length(filenames))
for (i in seq_along(filenames)){
specdata[[i]] <- read.csv(filenames[i])
}
specdata
}
complete <- function(directory,id=1:332){
specdata <- readfiles(directory, id)
num_comp_case <- ret_id <- numeric()
for (i in seq_along(specdata)){
filedata <- specdata[[i]]
num_comp_case[i] <- sum(complete.cases(filedata))
ret_id[i]<-filedata$ID[i]
}
ret_data<-data.frame(id=ret_id,nobs=num_comp_case)
ret_data
}
|
b9e9ee0a67f0f7660251bb3c844496da5deb9e7f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gear/tests/test-mle-geolmStd.R
|
926d470063eede217e94a10054808221eeb68ae3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,723
|
r
|
test-mle-geolmStd.R
|
set.seed(1)
library(geoR)
fields <- grf(100, cov.pars = c(1, 1), nugget = 0.1, cov.model = "exponential",
kappa = 0.5, messages = FALSE)
geoR_fit = likfit(fields, trend = "1st", ini.cov.pars = c(1, 1),
nugget = 0.1, lik.method = "ML")
cmod1 = cmod.std("exponential", psill = 1, r = 1, evar = 0.1)
data = data.frame(y = fields$data, x1 = fields$coords[,1], x2 = fields$coords[,2])
object = geolm(y ~ x1 + x2, data = data, coordnames = c("x1", "x2"),
cmod = cmod1)
gear_fit = mle(object, reml = FALSE)
# make sure the results are similar
test_that("mle.geolmStd calculations are correct", {
expect_true(abs(gear_fit$cmod_evar0$psill - geoR_fit$cov.pars[1]) < 1e-3)
expect_true(abs(gear_fit$cmod_evar0$r - geoR_fit$cov.pars[2]) < 1e-3)
expect_true(abs(gear_fit$evar - geoR_fit$nugget) < 1e-3)
expect_true(abs(gear_fit$loglik - geoR_fit$loglik) < 1e-3)
})
geoR_fit_reml = likfit(fields, trend = "1st", ini.cov.pars = c(1.6, 3.7),
nugget = 0.1, lik.method = "REML")
gear_fit_reml = mle(object, reml = TRUE)
test_that("mle.geolmStd calculations are correct", {
expect_true(abs(gear_fit_reml$cmod_evar0$psill - geoR_fit_reml$cov.pars[1]) < 2e-2)
expect_true(abs(gear_fit_reml$cmod_evar0$r - geoR_fit_reml$cov.pars[2]) < 4e-2)
expect_true(abs(gear_fit_reml$evar - geoR_fit_reml$nugget) < 1e-3)
# expect_true(abs(gear_fit_reml$loglik - geoR_fit_reml$loglik) < 1e-3)
})
# compare to reml likelihood manually (p. 117 of Model Based Geostatistics)
n = length(object$y)
# manually create covariance matrix
v = gear_fit_reml$cmod_evar0$psill * exp(-as.matrix(dist(object$coords))/
gear_fit_reml$cmod_evar0$r) +
gear_fit_reml$evar * diag(n)
x = object$x
yhat = x %*% gear_fit_reml$coeff
resid = object$y - yhat
minus2_reml_ll = (n * log(2 * pi) +
determinant(gear_fit_reml$v, logarithm = TRUE)$mod +
determinant(crossprod(x, solve(v, x)), logarithm = TRUE)$mod +
crossprod(resid, solve(v, resid))[1, 1])
# make sure the results are similar
test_that("mle.geolmStd reml objective function correct", {
expect_true(abs(gear_fit_reml$optimx$value - minus2_reml_ll) < 1e-3)
})
# par = c(1, .1)
# weights = rep(1, 100)
# x = cbind(1, fields$coords)
# y = fields$data + x %*% c(1, 2, 1)
# scmod = cmod.std("exponential", psill = 1, r = 1, evar = 0)
# d = as.matrix(dist(fields$coords))
# cmod = cmod.std("exponential", psill = 1, r = 1, evar = 0.1)
# data = data.frame(y = y, x1 = fields$coords[,1], x2 = fields$coords[,2])
# object = geolm(y ~ x1 + x2, data = data, coordnames = c("x1", "x2"),
# cmod = cmod)
#
# nugget = "e"
# reml = FALSE
|
71a0bdd9a7ff227c4e3b3aecc4ee9187ecc91453
|
9735953ba24ca7f7d25d8bf7da409c1115544446
|
/man/RearrangeEdges.Rd
|
a4a09129fdc266cf82c191504d3d285dca4b7cf6
|
[] |
no_license
|
cran/TreeSearch
|
38dd910f725f6782363bc8bc6f8cfba3fc6f2611
|
4c5db5e620f3bc0a253d5ba24117ad7062e2f4dd
|
refs/heads/master
| 2023-09-01T04:22:59.096778
| 2023-08-26T04:50:02
| 2023-08-26T05:30:29
| 109,836,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,758
|
rd
|
RearrangeEdges.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_rearrangement.R
\name{RearrangeEdges}
\alias{RearrangeEdges}
\title{Rearrange edges of a phylogenetic tree}
\usage{
RearrangeEdges(
parent,
child,
dataset,
TreeScorer = MorphyLength,
EdgeSwapper,
scoreToBeat = TreeScorer(parent, child, dataset, ...),
iter = "?",
hits = 0L,
verbosity = 0L,
...
)
}
\arguments{
\item{parent}{Integer vector corresponding to the first column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 1]}.}
\item{child}{Integer vector corresponding to the second column of the edge
matrix of a tree of class \code{\link{phylo}}, i.e. \code{tree$edge[, 2]}.}
\item{dataset}{Third argument to pass to \code{TreeScorer}.}
\item{TreeScorer}{function to score a given tree.
The function will be passed three parameters, corresponding to the
\code{parent} and \code{child} entries of a tree's edge list, and a dataset.}
\item{EdgeSwapper}{a function that rearranges a parent and child vector,
and returns a list with modified vectors; for example \code{\link[=SPRSwap]{SPRSwap()}}.}
\item{scoreToBeat}{Double giving score of input tree.}
\item{iter}{iteration number of calling function, for reporting to user only.}
\item{hits}{Integer giving number of times the input tree has already been hit.}
\item{verbosity}{Numeric specifying level of detail to display in console:
larger numbers provide more verbose feedback to the user.}
\item{\dots}{further arguments to pass to \code{TreeScorer()}, e.g. \verb{dataset = }.}
}
\value{
This function returns a list with two to four elements, corresponding to a binary tree:
- 1. Integer vector listing the parent node of each edge;
- 2. Integer vector listing the child node of each edge;
- 3. Score of the tree;
- 4. Number of times that score has been hit.
}
\description{
\code{RearrangeEdges()} performs the specified edge rearrangement on a matrix
that corresponds to the edges of a phylogenetic tree, returning the score of
the new tree.
Will generally be called from within a tree search function.
}
\details{
\code{RearrangeTree()} performs one tree rearrangement of a
specified type, and returns the score of the tree (with the given dataset).
It also reports the number of times that this score was hit in the
current function call.
}
\examples{
data("Lobo", package="TreeTools")
tree <- TreeTools::NJTree(Lobo.phy)
edge <- tree$edge
parent <- edge[, 1]
child <- edge[, 2]
dataset <- PhyDat2Morphy(Lobo.phy)
RearrangeEdges(parent, child, dataset, EdgeSwapper = RootedNNISwap)
# Remember to free memory:
dataset <- UnloadMorphy(dataset)
}
\author{
Martin R. Smith
}
|
6360e7696f85cf8f41b817e8dbfe1db3644dace0
|
49ff4bbc5ee1a548a1b3c9543693a496acf4a7bc
|
/cachematrix.R
|
0eb8c4b62d5b69d915040468934114128d41dcb0
|
[] |
no_license
|
christinegaribian/ProgrammingAssignment2
|
8096cb430e8ea4ccc8eb03e320e8daaa6a2fd559
|
1f30df15f572ebf6753164ef7cf9a84c0b5c8ef1
|
refs/heads/master
| 2020-12-28T19:11:47.951226
| 2015-04-26T02:26:14
| 2015-04-26T02:26:14
| 34,594,008
| 0
| 0
| null | 2015-04-26T01:59:02
| 2015-04-26T01:59:00
| null |
UTF-8
|
R
| false
| false
| 1,165
|
r
|
cachematrix.R
|
## makeCacheMatrix and cacheSolve cache the value of the inverse matrix so that when needed, the inverse
## matrix can be looked up in the cahce rather than recomputed, saving time.
## makeCacheMatrix returns a list containing functions to:
## set value of matrix
## get value of matrix
## set value of inverse
## get value of inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve either returns the previously calculated inverse matrix from the cache, or calculates the inverse
## matrix and sets its value in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) { #if mean has already been calculated, return it
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...) #otherwise calculate the mean
x$setinverse(m) #set the value of the mean in the cache
m
}
|
1ea5b9afdaf41a4d482721d403dfae9a6a2452fe
|
2b23caf2784021a3b21dddf73985a043b850206e
|
/Run_Miscellaneous.R
|
ac3fdf8838bc23d0cd7f9be329c34d6cf6e491b2
|
[] |
no_license
|
HaBi2018/code_msis_project
|
8ab59a7781faf6504ca65a36f590ca5f7011a7fa
|
90ef669d080d92e0074de9e1711c25a5a5f2211c
|
refs/heads/master
| 2021-07-16T05:08:03.052042
| 2018-12-04T12:37:32
| 2018-12-04T12:37:32
| 139,113,174
| 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 5,503
|
r
|
Run_Miscellaneous.R
|
# Aggregate/count outbreak signals pr system, pr year
results<-fullDataSVM[,.(
numS=sum(s_status!="Normal"),
numM=sum(msis_outbreak==T),
numV=sum(vesuv_outbreak==1)
),by=.(year)]
results
openxlsx::write.xlsx(results,file=file.path(SHARED_FOLDER_TODAY,"signaler.xlsx"))
## Graph signals total ######
toPlot <- fullDataSVM[,.(
Sykdomspulsen=sum(s_status!="Normal"),
MSIS=sum(msis_outbreak==T),
Vesuv=sum(vesuv_outbreak==1)
),by=.(year,week)]
setorder(toPlot,year,week)
toPlot[,xValue:=1:nrow(toPlot)]
library(ggplot2)
long <- melt.data.table(toPlot,id.vars = c("year","week", "xValue"))
breaks <- unique(long[,c("year","week","xValue")])
breaks <- breaks[week %in% c(1)]
breaks[,label:=sprintf("%s-%s",year,week)]
print(breaks)
p <- ggplot(data=long, mapping=aes(x=xValue,y=value))
p <- p + geom_bar(stat="identity")
p <- p + facet_wrap(~variable,ncol=1)
p <- p + scale_x_continuous("Ukenummer",labels=breaks$label,breaks=breaks$xValue)
p <- p + theme_grey (base_size = 16)
#p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave(filename = file.path(
SHARED_FOLDER_TODAY,
"figure_1.png"),
height=210,
width=297,
units="mm",
plot=p)
# Graph signals 2017
breaks <- unique(long[,c("year","week","xValue")])
breaks <- breaks[week %in% c(seq(1,52,4))]
breaks[,label:=sprintf("%s-%s",year,week)]
print(breaks)
p <- ggplot(data=long[year==2017], mapping=aes(x=xValue,y=value,group=variable,colour=variable))
p <- p + geom_line()
p <- p + scale_x_continuous("År og ukenummer",labels=breaks$label,breaks=breaks$xValue)
p <- p + theme_grey (base_size = 16)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p <- p + scale_y_continuous("Antall utbruddsignaler")
ggsave(filename = file.path(
SHARED_FOLDER_TODAY,
"figure_graph2017.png"),
height=210,
width=297,
units="mm",
plot=p)
# Graph signals 2012
breaks <- unique(long[,c("year","week","xValue")])
breaks <- breaks[week %in% c(seq(1,52,4))]
breaks[,label:=sprintf("%s-%s",year,week)]
print(breaks)
p <- ggplot(data=long[year==2012], mapping=aes(x=xValue,y=value,group=variable,colour=variable))
p <- p + geom_line()
p <- p + scale_x_continuous("År og ukenummer",labels=breaks$label,breaks=breaks$xValue)
p <- p + theme_grey (base_size = 16)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p <- p + scale_y_continuous("Antall utbruddsignaler")
ggsave(filename = file.path(
SHARED_FOLDER_TODAY,
"figure_graph2012.png"),
height=210,
width=297,
units="mm",
plot=p)
# Graph signals 2007
breaks <- unique(long[,c("year","week","xValue")])
breaks <- breaks[week %in% c(seq(1,52,4))]
breaks[,label:=sprintf("%s-%s",year,week)]
print(breaks)
p <- ggplot(data=long[year==2007], mapping=aes(x=xValue,y=value,group=variable,colour=variable))
p <- p + geom_line()
p <- p + scale_x_continuous("År og ukenummer",labels=breaks$label,breaks=breaks$xValue)
p <- p + theme_grey (base_size = 16)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p <- p + scale_y_continuous("Antall utbruddsignaler")
ggsave(filename = file.path(
SHARED_FOLDER_TODAY,
"figure_graph2007.png"),
height=210,
width=297,
units="mm",
plot=p)
# Graph signals 2014
breaks <- unique(long[,c("year","week","xValue")])
breaks <- breaks[week %in% c(seq(1,52,4))]
breaks[,label:=sprintf("%s-%s",year,week)]
print(breaks)
p <- ggplot(data=long[year==2014], mapping=aes(x=xValue,y=value,group=variable,colour=variable))
p <- p + geom_line()
p <- p + scale_x_continuous("År og ukenummer",labels=breaks$label,breaks=breaks$xValue)
p <- p + theme_grey (base_size = 16)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p <- p + scale_y_continuous("Antall utbruddsignaler")
ggsave(filename = file.path(
SHARED_FOLDER_TODAY,
"figure_graph2014.png"),
height=210,
width=297,
units="mm",
plot=p)
## AGGREGATIONS ####################################################################
# Aggregate/count outbreak signals pr system, pr year, level 2 (SP=high)
resultsSignals2<-fullDataSVM[,.(
numShigh=sum(s_status=="High"),
numM=sum(msis_outbreak==T),
numV=sum(vesuv_outbreak==1)
),by=.(year)]
openxlsx::write.xlsx(resultsSignals2,file=file.path(SHARED_FOLDER_TODAY,"signaler_high.xlsx"))
resultsSignals2
mean(resultsSignals2$numShigh)
# Aggregate registrations in SP, Vesuv and MSIS pr year
resultsReg<-fullDataSVM[,.(
numSreg=sum(n),
numMreg=sum(num),
numVreg=sum(v_n,na.rm=T)
), by=.(year)]
openxlsx::write.xlsx(resultsReg,file=file.path(SHARED_FOLDER_TODAY,"registrations_year.xlsx"))
resultsReg
mean(resultsReg$numSreg)
mean(resultsReg$numMreg)
# Aggregate registrations/n in outbreaks pr system pr year
res1 <- fullDataSVM[s_status!="Normal",.(
numSregO=sum(n)
),by=.(year)]
res2 <- fullDataSVM[msis_outbreak==T,.(
numMregO=sum(num)
),by=.(year)]
res3 <- fullDataSVM[vesuv_outbreak==1,.(
numVregO=sum(v_n,na.rm=T)
),by=.(year)]
results <- merge(res1,res2,by="year",all.x=T)
results <- merge(results,res3,by="year",all.x=T)
results
openxlsx::write.xlsx(results,file=file.path(SHARED_FOLDER_TODAY,"reg.IN.outbreaks_year.xlsx"))
## Median & variation
res1 <- fullDataSVM[s_status!="Normal",.(
numSregO=sum(n)
),by=.(year,week, location)]
summary(res1)
res2 <- fullDataSVM[msis_outbreak==T,.(
numMregO=sum(num)
),by=.(year, week, location)]
summary(res2)
res3 <- fullDataSVM[vesuv_outbreak==1,.(
numVregO=sum(v_n,na.rm=T)
),by=.(year, week, location)]
summary(res3)
|
53596f2544cd897d213ce3503f1b0a4c58e8f3a4
|
ecb1d037e50203f2e5e79da0d845598c5b6cdd99
|
/man/logistic_regression_multiclasses.Rd
|
c52238ef62abec3083c0daa4ddaba15660e87099
|
[] |
no_license
|
Z1chenZhao/bis557
|
59669f343f9764ba6aa4c132dfefe1d4eba0b8ab
|
b9c1f1d2fc60ec23d2ec18f274fe743769477ab3
|
refs/heads/master
| 2023-02-01T00:36:18.885873
| 2020-12-18T19:45:21
| 2020-12-18T19:45:21
| 296,169,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 847
|
rd
|
logistic_regression_multiclasses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logistic_regression_multiclasses.R
\name{logistic_regression_multiclasses}
\alias{logistic_regression_multiclasses}
\title{logistic_regression_multiclasses}
\usage{
logistic_regression_multiclasses(X, Y, mu_fun, maxit = 1e+05, tol = 1e-05)
}
\arguments{
\item{X}{The design matrix}
\item{Y}{Response variable}
\item{mu_fun}{A function to use eta get mu}
\item{maxit}{The maximum iteration times to allow the loop to exit, set default as 1e5}
\item{tol}{The difference between previous beta and new beta to allow
the loop to exit, set default as 1e-5.}
}
\value{
A list of beta coefficients.
}
\description{
Implement a classification model generalizing
logistic regression to accommodate more than two classes.
}
\details{
The code is adapted from lecture notes.
}
|
69d282f83d4175b5fc76649bad58aad28c8fc8a3
|
16cb7477604cea01c8dc1d5c850415c007acc1fd
|
/man/df_list2csv.Rd
|
856edaa4c2341fc2e1af3bdee27e6766b26bcd33
|
[] |
no_license
|
rdinnager/r-personal-utility
|
9c14ee79e81d264a72d4da0c8533fe4c0e6a3091
|
e113af97a2dc887ce49469ae28cd579123720d81
|
refs/heads/master
| 2020-06-01T10:07:49.173099
| 2014-06-27T06:14:12
| 2014-06-27T06:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 886
|
rd
|
df_list2csv.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{df_list2csv}
\alias{df_list2csv}
\title{Save a list of data.frames as seperate csv's}
\usage{
df_list2csv(df_list, filename, digits = nchar(as.character(length(df_list))))
}
\arguments{
\item{df_list}{A list of data.frame objects}
\item{filename}{The base filename which forms the first part of the csv file's name, followed by the number of the list item}
\item{digits}{The number of digits to coerce the list item number to before appending to filename. The default is
the same number of digits in the length of the list. e.g. 999 items = 3 digits, giving 001, 002, ... 999. This keeps files
sorted in the correct order.}
}
\value{
The base filename for the csv files.
}
\description{
This function takes a list of data.frames and saves them as numbered csv files. Often used in conjunction with
\code{\link{df_split}}.
}
|
2ec8b326532cea6e9246f92a3db97c4da51a99cb
|
b6ec7e998306e610899c6866daaa4a95b63b300c
|
/MS_server.R
|
67f02ce463057ff18bc5fea677124bbe32403d00
|
[] |
no_license
|
taiyunkim/QCMAP
|
4acf498383242f56aa54ef79fd232da32ff9d2eb
|
30468836fbe00b42140f8132b47a9ac2ac6f806d
|
refs/heads/master
| 2020-03-22T08:47:20.543554
| 2019-04-30T07:42:25
| 2019-04-30T07:42:25
| 139,790,920
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65,002
|
r
|
MS_server.R
|
# author - Taiyun Kim
options(shiny.maxRequestSize=5*1024^3)
Sys.setlocale("LC_ALL","C")
library(progress)
library(htmlwidgets)
library(colourpicker)
library(shinyTime)
library(rhandsontable)
library(D3TableFilter)
library(DT)
library(glmnet)
# source the necessary functions
source("./functions/summariseFile.R")
source("./functions/MS_db_update.R")
source("./functions/helperFunctions.R")
# MS server function
msServer <- function(input,output,session) {
instrumentNames <- read.csv("allFeatures.csv", colClasses = c(rep("NULL", 14), "character"), check.names = F)[,1]
instrumentNames <- unique(instrumentNames)
featureNames <- colnames(read.csv("allFeatures.csv", nrows = 1, check.names = F))[-1]
featureNames = featureNames[-which(featureNames %in% c("Peptide Sequences Identified", "Date", "Instrument", "Type"))]
# declare fileIn to store input files as reactive values.
fileIn <- reactiveValues()
ms_runReader <- reactiveValues(run = F, example = F)
selected_inst <- reactiveValues()
inst_threshold <- reactiveValues(present = F)
comment_text <- reactiveValues(present = F)
comment_reactive_table <- reactiveValues(colorsHex = c("#27D3E3", "#99FF33", "#3B5E2B", "#FFFF00", "#8A2BE2"),
colorsName = c("blue", "lime", "green", "yellow", "violet"))
observe({
withProgress(message = "Validating txt files ...", value = 0, {
if(input$ms_example & (input$ms_try_example != "")) {
fileObj <- list()
fileObj$name <- input$ms_try_example
fileObj$datapath <- paste(getwd(), "/www/data/", fileObj$name, ".csv", sep = "")
# outputDir <- gsub(paste("^(.*/)(", fileObj$name, ")$", sep = ""), "\\1", fileObj$datapath)
#
# # extraction to temporary directory does not work on windows machine.
# # This may be due to os directory structuture
# # (outputDir cannot be found when tried to access with exdir in windows)
# outputDir <- gsub("^(.*)(/)$", "\\1", outputDir)
# unzippedFolderDir <- unzip(fileObj$datapath, exdir = outputDir)
# unzippedFolderName <- gsub("(.*/)(.*\\.txt)$", "\\2", unzippedFolderDir)
fileIn$path <- fileObj$datapath
fileIn$name <- fileObj$name
ms_runReader$example = T
combinedInputFile <- read.csv(fileIn$path, check.names = F, row.names = 1)
combinedInputFile$Date <- as.POSIXct(as.character(combinedInputFile$Date))
} else {
fileInList()
ms_runReader$example = F
incProgress(1/4, message = "Validating txt files ...")
ms_validateFileNames()
incProgress(1/4, message = "Reading and summarising txt files ...")
combinedInputFile <- summariseInput()
}
ms_runReader$run <- T
batch.2 <- combinedInputFile
batch.2 <- batch.2[order(batch.2$Date),]
dat.batch2 <- data.frame(
date = c(batch.2$Date),
pep = c(batch.2$`Peptide Sequences Identified`),
batch = c(rep("input batch", nrow(batch.2))),
name = c(rownames(batch.2)),
instrument = c(as.character(batch.2$Instrument)),
type = c(as.character(batch.2$Type))
)
incProgress(1/4, message = "Plotting ...")
})
ms_runReader$batch.2 <- batch.2
ms_runReader$dat.batch2 <- dat.batch2
})
# Switch tab to first instrument in the input batch
observe({
if (ms_runReader$run) {
updateTabsetPanel(
session,
"t",
selected = ms_runReader$batch.2$Instrument[1]
)
}
})
# modal to add comment
lapply(1:length(instrumentNames), function(i) {
observeEvent(input[[paste("ms_comment_", instrumentNames[i], sep = "")]], {
showModal(modalDialog(
title = "Comment form",
renderUI({
list(
selectInput("comment_instrument", "Instrument", selected = instrumentNames[i], choices = instrumentNames)
)
}),
renderUI({
list(
dateInput("comment_date", "Date")
# textInput("comment_date", "Date", placeholder = "Date", value = "")
# renderText({update_request_firstName_validation()})
)
}),
br(),
renderUI({
tagList(
timeInput("comment_time", "Time")
# textInput("comment_time", "Time", placeholder = "Date")
# renderText({update_request_lastName_validation()})
)
}),
br(),
renderUI({
list(
colourpicker::colourInput("comment_colour", "Color", value ="blue", palette = "limited",
showColour = "background",
allowedCols = comment_reactive_table[["colorsHex"]])
# textInput("comment_color", "Color", placeholder = "Color")
# renderText({update_request_email_validation()})
)
}),
br(),
renderUI({
list(
textAreaInput("comment_comments", "Comments", resize = "none", width = "100%",
height = "150px", placeholder = "Your comment goes here.")
# renderText({update_request_comment_validation()})
)
}),
br(),
actionButton("ms_submit_request", "Submit"),
easyClose = TRUE,
footer = NULL,
style = "width:100%"
))
})
})
# Pop-up message that saves comment
observeEvent(input$ms_submit_request, {
# saveComment <- eventReactive(input$ma_submit_request, {
newRow <- c(input$comment_instrument, input$comment_colour, input$comment_comments, as.character(paste(input$comment_date, strftime(input$comment_time, "%T"))))
write.table(t(newRow), file = paste(getwd(), "/allComments.csv", sep = ""), append = T, sep = ",", col.names = F)
showModal(modalDialog(
title = "Comment saved!",
p("Your comment is saved!"),
br(),
easyClose = TRUE,
footer = NULL,
style = "width:100%"
))
})
comment_fileReader <- reactiveFileReader(1000, session, paste(getwd(), "/allComments.csv", sep = ""), function(commentPath) {
read.csv(commentPath, check.names = F, header = T)
})
# modal to edit comment
lapply(1:length(instrumentNames), function(i) {
observeEvent(input[[paste("ms_edit_comment_", instrumentNames[i], sep = "")]], {
if (nrow(comment_fileReader()) != 0) {
showModal(modalDialog(
size = "l",
title = "Comment editor",
rHandsontableOutput("comment_editor"),
# rHandsontableOutput(paste("comment_editor_", instrumentNames[i], sep = "")),
# d3tfOutput("comment_editor", height = "auto"),
br(),
actionButton("ms_save_request", "Save"),
easyClose = TRUE,
footer = NULL,
style = "width:100%"
))
} else {
showModal(modalDialog(
size = "l",
title = "Comment editor",
h4("No Comments so far"),
br(),
easyClose = TRUE,
footer = NULL,
style = "width:100%"
))
}
})
})
output$comment_editor <- renderRHandsontable({
# output$comment_editor <- renderD3tf({
comment_table <- comment_fileReader()
comment_table <- comment_table[,-1]
for (i in 1:length(comment_reactive_table[["colorsHex"]])) {
levels(comment_table$Colour)[levels(comment_table$Colour) == comment_reactive_table[["colorsHex"]][i]] <- comment_reactive_table[["colorsName"]][i]
}
comment_reactive_table[["table"]] <- as.data.frame(comment_table, stringsAsFactors = F)
comment_reactive_table[["table"]]$Instrument <- factor(comment_reactive_table[["table"]]$Instrument)
comment_reactive_table[["table"]]$Colour <- factor(comment_reactive_table[["table"]]$Colour)
rhandsontable(comment_reactive_table[["table"]], stretchH = "all",
colHeaders = colnames(comment_reactive_table[["table"]]), search = T, height = 500) %>%
hot_col("Comment", allowInvalid = T) %>%
hot_col("Colour", type = "dropdown", source = comment_reactive_table[["colorsName"]]) %>%
hot_col("Instrument", type = "dropdown", source = instrumentNames) %>%
hot_col("datetime", type = "date", dateFormat = "YYYY-MM-DD HH:mm:ss") %>%
hot_cols(columnSorting = TRUE)
# tableProps <- list(
# btn_reset = TRUE,
# # alphabetic sorting for the row names column, numeric for all other columns
# col_types = c("string", "string", "string", "date")
# )
# d3tf(comment_reactive_table[["table"]],
# tableProps = tableProps,
# extensions = list(
# list(name = "sort")
# ),
# showRowNames = F,
# tableStyle = "table table-bordered",
# colsResizable = T,
# edit = T)
})
# Pop-up message that updates the comments
observeEvent(input$ms_save_request, {
comment_table <- isolate(hot_to_r(input$comment_editor))
# comment_table <- isolate(comment_reactive_table[["table"]])
for (i in 1:length(comment_reactive_table[["colorsHex"]])) {
levels(comment_table$Colour)[levels(comment_table$Colour) == comment_reactive_table[["colorsName"]][i]] <- comment_reactive_table[["colorsHex"]][i]
# comment_table$Colour[comment_table$Colour == comment_reactive_table[["colorsName"]][i]] <- comment_reactive_table[["colorsHex"]][i]
}
write.csv(comment_table, file = paste(getwd(), "/allComments.csv", sep = ""))
showModal(modalDialog(
title = "Update",
p("Your comments are updated!"),
br(),
easyClose = TRUE,
footer = NULL,
style = "width:100%"
))
})
################################################################################
# QC_recaptcha
#
# function to call Google reCAPTCHA on server side
################################################################################
QC_recaptcha <- function(input, output, session, secret = Sys.getenv("recaptcha_secret")) {
status <- reactive({
if (isTruthy(input$recaptcha_response)) {
url <- "https://www.google.com/recaptcha/api/siteverify"
resp <- httr::POST(url, body = list(
secret = secret,
response = input$recaptcha_response
))
fromJSON(httr::content(resp, "text"))
} else {
list(success = FALSE)
}
})
return(status)
}
ms_validateFileNames <- reactive({
fileNameList <- c("summary.txt", "allPeptides.txt", "msScans.txt", "evidence.txt")
shiny::validate(
need(
(all(fileNameList %in% fileIn$name) & (length(fileIn$name) >= length(fileNameList) ) ),
"Please choose the correct files"
)
)
})
################################################################################
# fileInList
#
# read the files when run is clicked
################################################################################
fileInList <- reactive({
fileObj <- input$maxQuantFiles
shiny::validate(
shiny::need(
(!is.null(fileObj$name)),
"Please upload your txt files(/zipped folder)."
)
)
if (grepl("*\\.txt$", fileObj$name[1])) {
fileIn$name <- fileObj$name
fileIn$path <- fileObj$datapath
} else if (grepl(".*\\.zip$",fileObj$name[1])) {
if (length(fileObj$name) == 1) { # zipped folder
outputDir <- gsub("^(.*/)(0\\.zip$)", "\\1", fileObj$datapath) # Assume the filename is alwasy 0.zip
# extraction to temporary directory does not work on windows machine.
# This may be due to os directory structuture
# (outputDir cannot be found when tried to access with exdir in windows)
unzippedFolderDir <- unzip(fileObj$datapath, exdir = outputDir)
unzippedFolderName <- gsub("(.*/)(.*\\.txt)$", "\\2", unzippedFolderDir)
fileIn$path <- unzippedFolderDir
fileIn$name <- unzippedFolderName
}
}
})
################################################################################
# MS_save_DB
#
# save input batch files to the database. This is run when "Save" button is clicked
################################################################################
MS_save_DB <- observeEvent(input$MS_saveDB, {
result <- callModule(QC_recaptcha, "MS_recaptcha_test", secret = "6Lfg_jYUAAAAAI9UEuZ2FI_t0pjllcleGnWD5YfX")
req(result()$success)
# check all Fields
# save the data
allFeatures <- readData()
combinedInputFile <- summariseInput()
combinedInputFile$Type <- "DB"
# order the columns
combinedInputFile <- combinedInputFile[,match(colnames(allFeatures), colnames(combinedInputFile))]
# combine two files
allFeatures <- rbind(allFeatures, combinedInputFile)
MS_db_update(allFeatures)
# alert
showModal(
modalDialog(
title = HTML('<span style="color:green; font-size: 20px; font-weight:bold; font-family:sans-serif ">Data saved!<span>
<button type = "button" class="close" data-dismiss="modal" ">
<span style="color:white; "><span>
</button> '),
p("Your has been saved."),
p("Changes will affect in your next run."),
easyClose = TRUE,
footer = NULL
)
)
})
# ################################################################################
# # readData
# #
# # It reads the CSV file (database). It also sets the "Date" as a R Date format
# ################################################################################
# readData <- function() {
# allFeatures <- read.csv(paste0(getwd(), "/allFeatures.csv", sep = ""), row.names = 1, check.names = FALSE)
# allFeatures$Date <- as.POSIXct(as.character(allFeatures$Date))
#
# return(allFeatures)
# }
#
################################################################################
# summariseInput
#
# This will summarise input text files (summary.txt, allPeptides.txt, msScans.txt, evidence.txt)
################################################################################
summariseInput <- reactive({
sumFileHeaders <- read.delim2(fileIn$path[which(fileIn$name == "summary.txt")], sep = "\t", header = TRUE, check.names = FALSE, nrow = 1)
sumFileHeadersIndex <- which(colnames(sumFileHeaders) %in% c("Raw file", "Enzyme", "MS/MS", "MS/MS Identified", "MS/MS Identified [%]",
"Mass Standard Deviation [ppm]", "Peaks Repeatedly Sequenced [%]", "Peptide Sequences Identified"))
allPepHeaders <- read.delim2(fileIn$path[which(fileIn$name == "allPeptides.txt")], sep = "\t", header = TRUE, check.names = FALSE, nrow = 1)
allPepHeadersIndex <- which(colnames(allPepHeaders) %in% c("Raw file", "Intensity", "Mass precision [ppm]", "Retention length (FWHM)", "Score", "Sequence"))
msScansHeaders <- read.delim2(fileIn$path[which(fileIn$name == "msScans.txt")], sep = "\t", header = TRUE, check.names = FALSE, nrow = 1)
msScansHeadersIndex <- which(colnames(msScansHeaders) %in% c("Raw file", "Cycle time"))
evidenceHeaders <- read.delim2(fileIn$path[which(fileIn$name == "evidence.txt")], sep = "\t", header = TRUE, check.names = FALSE, nrow = 1)
evidenceHeadersIndex <- which(colnames(evidenceHeaders) %in% c("Raw file", "Mass Error [ppm]"))
# [ncol == 52] Raw file (1), Experiment(2), MS/MS(20), MS/MS Identified(26), MS/MS Identified [%](30), Peptide Sequences Identified(34), Peaks Repeatedly Sequenced [%](39), Mass Standard Deviation [ppm](49))
sumFileTable <- read.delim2(fileIn$path[which(fileIn$name == "summary.txt")], sep = "\t", header = TRUE, check.names = FALSE,
colClasses = unlist( lapply(1:ncol(sumFileHeaders), function(i) {if(i %in% sumFileHeadersIndex) {"character"} else {"NULL"} } ) ))
# [ncol == 33] Raw file(1), Mass Precision [ppm](14), FWHM(18), Sequences(23), Score(28), Intensity(29)
allPepFileTable <- read.delim2(fileIn$path[which(fileIn$name == "allPeptides.txt")], sep = "\t", header = TRUE, check.names = FALSE,
colClasses = unlist( lapply(1:ncol(allPepHeaders), function(i) {if(i %in% allPepHeadersIndex) {"character"} else {"NULL"} } ) ))
# [ncol == 29] Raw file(1), Cycle time(5)
msScansFileTable <- read.delim2(fileIn$path[which(fileIn$name == "msScans.txt")], sep = "\t", header = TRUE, check.names = FALSE,
colClasses = unlist( lapply(1:ncol(msScansHeaders), function(i) {if(i %in% msScansHeadersIndex) {"character"} else {"NULL"} } ) ))
# Raw file, Mass error [ppm]
# evidenceFileTable <- read.delim2(fileIn$path[which(fileIn$name == "evidence.txt")], sep = "\t", header = TRUE, check.names = FALSE,
# colClasses = c(rep("NULL", 15), "character", rep("NULL", 9), "character", rep("NULL", 37)))
evidenceFileTable <- read.delim2(fileIn$path[which(fileIn$name == "evidence.txt")], sep = "\t", header = TRUE, check.names = FALSE,
colClasses = unlist( lapply(1:ncol(evidenceHeaders), function(i) {if(i %in% evidenceHeadersIndex) {"character"} else {"NULL"} } ) ))
summary.result <- summariseFile(
list(
sumFileTable = sumFileTable,
allPepFileTable = allPepFileTable,
msScansFileTable = msScansFileTable,
evidenceFileTable = evidenceFileTable
),
instrumentNames
)
combinedFile <- summary.result[["sumTable"]]
if (length(summary.result$unknown.inst) > 0) {
showModal(
modalDialog(
title = HTML('<span style="color:red; font-size: 20px; font-weight:bold; font-family:sans-serif ">Sorry, some of your file instruments are not in our database<span>
<button type = "button" class="close" data-dismiss="modal" ">
<span style="color:white; "><span>
</button> '),
# "Update success!",
h3("The following files are removed from the plots."),
do.call(p, c(lapply(1:length(summary.result$unknown.inst), function(i) {
p(summary.result$unknown.inst[i])
}))),
easyClose = TRUE,
footer = NULL
)
)
}
return (combinedFile)
})
output$ms_validationMessage <- renderText({
fileInList()
ms_validateFileNames()
})
# loop bar plot
lapply(1:length(instrumentNames), function(i) {
output[[paste0("bar_", instrumentNames[i], "_history", sep = "")]] <- renderPlotly({
this.instrument <- instrumentNames[i]
allFeatures <- readData()
batch.1 <- allFeatures
batch.1 <- batch.1[order(batch.1$Date),]
batch.1 <- batch.1[batch.1$Instrument == this.instrument,]
dat <- data.frame(
date = c(as.POSIXct(batch.1$Date)),
pep = c(batch.1$`Peptide Sequences Identified`),
batch = c(rep("Database", nrow(batch.1))),
name = c(rownames(batch.1)),
instrument = c(as.character(batch.1$Instrument)),
type = c(as.character(batch.1$Type))
)
goodColors <- c("#8B4513", "#98FFCC", "#0000FF", "#808080", "#FF00FF", "#f88379", "#9400D3")
p <- plot_ly(dat, x = ~date, source = "peptideNumPlot", y = ~pep, text = ~name, name = "database", key = ~instrument, showlegend = T, color = ~instrument, colors = "#808080", type = "bar") %>%
layout(
barmode = 'group',
title = paste0("[", this.instrument, "] Peptide Sequences Identified", sep = ""),
showlegend = T,
xaxis = list(
title = "Date",
rangeselector = list(
buttons = list(
list(
count = 7,
label = "1W",
step = "day",
stepmode = "backward"),
list(
count = 14,
label = "2W",
step = "day",
stepmode = "backward"),
list(
count = 1,
label = "1M",
step = "month",
stepmode = "backward"),
list(
count = 3,
label = "3M",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6M",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1Y",
step = "year",
stepmode = "backward"),
list(
count = 1,
label = "YTD",
step = "year",
stepmode = "todate"),
list(step = "all")
)
),
rangeslider = list(type = "date")
),
yaxis = list(title = "Peptide Sequences Identified")
) %>%
rangeslider(start = (as.POSIXct(tail(dat$date,1)) - 1296000), end = as.POSIXct(tail(dat$date,1)))
if (ms_runReader$run == T) {
dat.batch2 <- ms_runReader$dat.batch2
if (any(dat.batch2$instrument == this.instrument)) {
dat.batch2 <- dat.batch2[dat.batch2$instrument == this.instrument,]
p <- p %>%
add_bars(data = dat.batch2, x = ~date, y = ~pep, key = ~type,
marker = list(color = "#e60000"), text = ~name,
name = ~instrument, color = ~name, showlegend = T, inherit = T)
p <- p %>%
rangeslider(start = (as.POSIXct(tail(dat.batch2$date,1)) - 1296000), end = as.POSIXct(tail(dat.batch2$date,1)))
}
}
thresholdData <- paste(getwd(), "/thresholds.txt", sep = "")
if (!file.exists(thresholdData)) {
thresholdData = NA
}
if (!is.na(thresholdData)) {
inst_threshold$present = T
tmp <- read.delim(thresholdData, check.names = F, header = T)
for (i in 1:nrow(tmp)) {
inst_threshold[[as.character(tmp[i,1])]] <- as.numeric(tmp[i,2])
}
}
if (inst_threshold$present == T) {
if (ms_runReader$run == T) {
p <- p %>%
add_lines(x = c(dat$date, dat.batch2$date, tail(dat.batch2$date,1)+86400), y = inst_threshold[[this.instrument]], name = paste(this.instrument, "<br />threshold", sep = ""),
key = "threshold", line = list(color = "#ff7f50"), hoverinfo = "none", showlegend = T, inherit = F)
} else {
p <- p %>%
add_lines(y = inst_threshold[[this.instrument]], name = "threshold",
key = "threshold", line = list(color = "#ff7f50"), hoverinfo = "none", showlegend = T, inherit = T)
}
}
commentData <- paste(getwd(), "/allComments.csv", sep = "")
if (!file.exists(commentData)) {
commentData <- NA
}
if (!is.na(commentData)) {
comment_text$present = T
tmp <- comment_fileReader()
# tmp <- read.csv(commentData, check.names = F, header = T)
# remove numbering
tmp <- tmp[,-1]
tmp$datetime <- as.POSIXct(tmp$datetime)
this.comment <- tmp[tmp$Instrument == this.instrument,]
if (nrow(this.comment) > 0) {
p <- p %>%
add_markers(x = this.comment$datetime, y = 0, hoverinfo = "text", showlegend = T, inherit = F, name = paste(this.instrument, "<br />comments", sep = ""),
key = "comment", hovertext = ~this.comment$Comment, marker = list(color = ~this.comment$Colour, symbol = "x", size = 10))
}
}
p %>%
onRender(jsCode = JS("comment_javascript"))
p %>%
onRender(jsCode = JS("javascript"))
# layout(barmode = "overlay")
})
})
# loop box plot
lapply(1:length(instrumentNames), function(i) {
output[[paste0("box_", instrumentNames[i], sep = "")]] <- renderPlotly({
this.Instrument = instrumentNames[i]
s <- event_data("plotly_click", source = "peptideNumPlot")
allFeatures <- readData()
batch.1 <- allFeatures
batch.1 <- batch.1[order(batch.1$Date),]
# Reshape - for raw
ds1 <- reshape2::melt(batch.1, id = c("Date","Instrument"))
ds1 <- filter(ds1, variable != "Type")
ds1$value <- as.numeric(ds1$value)
ds1$value <- round(ds1$value, digits = 3)
goodColors <- c("#8B4513", "#98FFCC", "#0000FF", "#808080", "#FF00FF", "#f88379", "#9400D3")
# i goes through each instrument
# find rows in ds1 that is current instrument
ds1.i.row <- which(as.character(ds1$Instrument) %in% this.Instrument)
ds1.i.inst.table <- ds1[ds1.i.row,]
# j goes through each features
if (ms_runReader$run) { # this means that input was made
batch.2 <- ms_runReader$batch.2
ds2 <- reshape2::melt(batch.2, id = c("Date", "Instrument"))
ds2$value <- as.numeric(ds2$value)
}
p <- list()
for (j in 1:length(unique(ds1$variable))) {
# find rows in ds1.i.inst.table that is current variable
ds1.j.row <- which(as.character(ds1.i.inst.table$variable) %in% as.character(unique(ds1.i.inst.table$variable)[j]))
if (j == 1) {
plotLegend = T
} else {
plotLegend = F
}
q <- plot_ly(ds1.i.inst.table[ds1.j.row,], x = ~variable, y = ~value, type = "box", color = ~Instrument, colors = "#0033cc", name = "database", legendgroup = this.Instrument, showlegend = plotLegend) %>%
layout(boxmode = "group", paper_bgcolor = 'rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)', xaxis = list(showticklabels = T, tickangle = -45))
if (length(s)) {
if ((s[["key"]] %in% this.Instrument) & (s[["curveNumber"]] == 0)) {
selectedInstrument <- batch.1[batch.1$Instrument == s[["key"]], ]
selectedFileName <- rownames(selectedInstrument)[s[["pointNumber"]]+1]
selected.ds1 <- ds1[ds1$Instrument == s[["key"]],]
ds1Row.selected <- which(as.character(selected.ds1$variable) %in% as.character(unique(ds1$variable)[j]))
q <- q %>%
add_markers(data = selected.ds1[ds1Row.selected[s[["pointNumber"]]+1],], x = ~variable, y = ~value,
legendgroup = selectedFileName, showlegend = plotLegend, marker = list(size = 13, color = "#00cc00"), # #ff9933
name = selectedFileName)
} else if (s[["key"]] == "input") {
selectedFileName <- rownames(batch.2[batch.2$Instrument == this.Instrument,])[s[["curveNumber"]]]
ds2Row.selected <- which(as.character(ds2$variable) %in% as.character(unique(ds1$variable)[j]))
ds2.inst <- ds2[ds2Row.selected,]
ds2.inst <- ds2.inst[ds2.inst$Instrument == this.Instrument,]
q <- q %>%
add_markers(data = ds2.inst[s[["curveNumber"]],], x = ~variable, y = ~value,
legendgroup = selectedFileName, showlegend = plotLegend, marker = list(size = 13, color = "#00cc00"),
name = selectedFileName)
}
}
tmp.batch.1 <- batch.1[batch.1$Instrument == this.Instrument,]
tmp.batch.1 <- tmp.batch.1[order(tmp.batch.1$Date, decreasing = FALSE),]
tmp.batch.1 <- tmp.batch.1[,order(colnames(tmp.batch.1))]
if (ms_runReader$run) {
batch.2 <- ms_runReader$batch.2
tmp.batch.2 <- batch.2[batch.2$Instrument == this.Instrument,]
tmp.batch.2 <- tmp.batch.2[order(tmp.batch.2$Date, decreasing = FALSE),]
tmp.batch.2 <- tmp.batch.2[,order(colnames(tmp.batch.2))]
lastRuns <- rbind(tail(tmp.batch.1,5), tail(tmp.batch.2,5))
lastRuns <- lastRuns[order(lastRuns$Date, decreasing = FALSE),]
lastRuns <- tail(lastRuns, 5)
} else {
lastRuns <- tail(tmp.batch.1,5)
}
lastRuns <- lastRuns[,-(which(colnames(lastRuns) %in% c("Date", "Instrument")))]
lastRuns.dat <- data.frame(
filename = rep(rownames(lastRuns), ncol(lastRuns)),
variable = rep(colnames(lastRuns), each = nrow(lastRuns)), # nrow should be 5 (last 5 runs)
value = as.vector(as.matrix(lastRuns)),
Instrument = rep(this.Instrument, nrow(lastRuns)*ncol(lastRuns))
)
q <- q %>%
add_markers(data = lastRuns.dat[lastRuns.dat$variable == unique(ds1$variable)[j],], x = ~variable, y = ~value,
showlegend = plotLegend, marker = list(size = 10, color = c("#fee391", "#fec44f", "#fe9929", "#d95f0e", "#993404")),#"#00cc00"), # #00FF00", "#00cc00", "#009933", "#336600", "#003300")),
text = ~filename, name = ~filename)
p <- append(p, list(q))
}
subplot(p, nrows = 1, titleX = F) %>%
layout(title = paste0(this.Instrument, " Features", sep = ""), showlegend = T, margin = list(l = 100, r = 50, b = 200, t = 50, pad = 4))
})
})
lapply(1:length(instrumentNames), function(i) {
this.instrument <- instrumentNames[i]
allFeatures <- readData()
curData = allFeatures[which(allFeatures$Instrument == this.instrument),]
curData = curData %>%
select(-c("Date", "Instrument", "Type" ))
output[[paste("cor_", this.instrument, sep = "")]] = renderPlot({
corrplot::corrplot(cor(curData))
})
})
# loop feature bar plot
## For each instrument
lapply(1:length(instrumentNames), function(i) {
this.instrument <- instrumentNames[i]
allFeatures <- readData()
lapply(1:length(featureNames), function(j) {
output[[paste0("bar_", instrumentNames[i], "_", featureNames[j],"_history", sep = "")]] <- renderPlotly({
batch.1 <- allFeatures
batch.1 <- batch.1[order(batch.1$Date),]
batch.1 <- batch.1[batch.1$Instrument == this.instrument,]
dat <- data.frame(
date = c(as.POSIXct(batch.1$Date)),
feat = c(batch.1[[featureNames[j]]]),
batch = c(rep("Database", nrow(batch.1))),
name = c(rownames(batch.1)),
instrument = c(as.character(batch.1$Instrument)),
type = c(as.character(batch.1$Type))
)
goodColors <- c("#8B4513", "#98FFCC", "#0000FF", "#808080", "#FF00FF", "#f88379", "#9400D3")
p <- plot_ly(dat, x = ~date, y = ~feat, text = ~name, name = "database", key = ~instrument, showlegend = T, color = ~instrument, colors = "#808080", type = "bar") %>%
layout(
barmode = 'group',
title = paste0("[", this.instrument, "] ", featureNames[j], sep = ""),
showlegend = T,
xaxis = list(
title = "Date",
rangeselector = list(
buttons = list(
list(
count = 7,
label = "1W",
step = "day",
stepmode = "backward"),
list(
count = 14,
label = "2W",
step = "day",
stepmode = "backward"),
list(
count = 1,
label = "1M",
step = "month",
stepmode = "backward"),
list(
count = 3,
label = "3M",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6M",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1Y",
step = "year",
stepmode = "backward"),
list(
count = 1,
label = "YTD",
step = "year",
stepmode = "todate"),
list(step = "all")
)
),
rangeslider = list(type = "date")
),
yaxis = list(title = featureNames[j])
) %>%
rangeslider(start = (as.POSIXct(tail(dat$date,1)) - 1296000), end = as.POSIXct(tail(dat$date,1)))
# if (ms_runReader$run == T) {
# dat.batch2 <- ms_runReader$dat.batch2
# if (any(dat.batch2$instrument == this.instrument)) {
# dat.batch2 <- dat.batch2[dat.batch2$instrument == this.instrument,]
# p <- p %>%
# add_bars(data = dat.batch2, x = ~date, y = ~pep, key = ~type,
# marker = list(color = "#e60000"), text = ~name,
# name = ~instrument, color = ~name, showlegend = T, inherit = T)
#
# p <- p %>%
# rangeslider(start = (as.POSIXct(tail(dat.batch2$date,1)) - 1296000), end = as.POSIXct(tail(dat.batch2$date,1)))
# }
# }
p
})
})
})
################################################################################
# output$bar_plotHistory
#
# This will output bar plots of "peptide sequences identified" by time
################################################################################
output$bar_plotHistory <- renderPlotly({
allFeatures <- readData()
batch.1 <- allFeatures
batch.1 <- batch.1[order(batch.1$Date),]
dat <- data.frame(
date = c(batch.1$Date),
pep = c(batch.1$`Peptide Sequences Identified`),
batch = c(rep("Database", nrow(batch.1))),
name = c(rownames(batch.1)),
instrument = c(as.character(batch.1$Instrument))
)
goodColors <- c("#8B4513", "#98FFCC", "#0000FF", "#808080", "#FF00FF", "#f88379", "#9400D3")
p <- plot_ly(dat, x = ~date, source = "allPeptidePlot", y = ~pep, text = ~name, key = ~instrument, showlegend = T, color = ~instrument, colors = c(goodColors[1:(length(levels(dat$instrument))-1)], "#9400D3"), type = "bar") %>%
layout(
title = "Peptide Sequences Identified",
xaxis = list(
rangeselector = list(
buttons = list(
list(
count = 1,
label = "1 month",
step = "month",
stepmode = "backward"),
list(
count = 3,
label = "3 months",
step = "month",
stepmode = "backward"),
list(
count = 6,
label = "6 months",
step = "month",
stepmode = "backward"),
list(
count = 1,
label = "1 year",
step = "year",
stepmode = "backward"),
list(
count = 1,
label = "Year To Date",
step = "year",
stepmode = "todate"),
list(step = "all")
)
),
rangeslider = list(type = "date")
),
yaxis = list(title = "Peptide Sequences Identified")
)
if (ms_runReader$run == T) {
dat.batch2 <- ms_runReader$dat.batch2
p <- p %>%
add_bars(data = dat.batch2, x = ~date, y = ~pep, key = ~instrument, marker = list(color = "#e60000"), text = ~name, name = "inputs", showlegend = T, inherit = F)
}
p
})
################################################################################
# output$box_individualFeatures
#
# This will plot a selected bar's (from output$bar_plotHistory) each features in
# a box plots with raw values
################################################################################
output$box_individualFeatures <- renderPlotly({
s <- event_data("plotly_click", source = "allPeptidePlot")
allFeatures <- readData()
batch.1 <- allFeatures
batch.1 <- batch.1[order(batch.1$Date),]
# Reshape - for raw
ds1 <- reshape2::melt(batch.1, id = c("Date","Instrument"))
ds1$value <- as.numeric(ds1$value)
# when a peak (bar) is clicked from history plot, find a file that is selected
if (length(s)) {
if (s[["key"]] %in% c("QEplus", "QECl", "QEHF", "QEFFX1", "Fusion")) {
selectedInstrument <- batch.1[batch.1$Instrument == s[["key"]], ]
selectedFileName <- rownames(selectedInstrument)[s[["pointNumber"]]+1]
} else if (s[["key"]] == "input") {
batch.2 <- ms_runReader$batch.2
ds2 <- reshape2::melt(batch.2, id = c("Date", "Instrument"))
# ds2 <- filter(ds2, variable != "File size (kb)")
ds2$value <- as.numeric(ds2$value)
ds2$Instrument <- input$inst
ds2$Instrument <- as.factor(ds2$Instrument)
selectedFileName <- rownames(batch.2)[s[["pointNumber"]]+1]
}
}
# plot the first 4 features
p <- list()
goodColors <- c("#8B4513", "#98FFCC", "#0000FF", "#808080", "#FF00FF", "#f88379", "#9400D3")
for (i in 1:4) {
ds1Row <- which(as.character(ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- plot_ly(ds1[ds1Row,], y = ~value, color = ~Instrument, colors = goodColors[1:(length(levels(ds1[ds1Row,]$Instrument)))], type = "box" ) %>%
layout(boxmode = "group", paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)', xaxis = list(title = as.character(unique(ds1$variable)[i]), showticklabels = T))
# when a peak (bar) is clicked from history plot
if (length(s)) {
if (s[["key"]] %in% c("QEplus", "QECl", "QEHF", "QEFFX1", "Fusion")) {
selected.ds1 <- ds1[ds1$Instrument == s[["key"]],]
ds1Row.selected <- which(as.character(selected.ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = selected.ds1[ds1Row.selected[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
} else if (s[["key"]] == "input") {
ds2Row <- which(as.character(ds2$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = ds2[ds2Row[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
}
}
p <- append(p, list(q))
}
s1 <- subplot(p, nrows = 1, titleX = TRUE)
# plot next 4 features
p <- list()
for (i in 5:8) {
ds1Row <- which(as.character(ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- plot_ly(ds1[ds1Row,], y = ~value, color = ~Instrument, colors = goodColors[1:(length(levels(ds1[ds1Row,]$Instrument)))], type = "box" ) %>%
layout(boxmode = "group", paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)', xaxis = list(title = as.character(unique(ds1$variable)[i]), showticklabels = T))
# when a peak (bar) is clicked from history plot
if (length(s)) {
if (s[["key"]] %in% c("QEplus", "QECl", "QEHF", "QEFFX1", "Fusion")) {
selected.ds1 <- ds1[ds1$Instrument == s[["key"]],]
ds1Row.selected <- which(as.character(selected.ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = selected.ds1[ds1Row.selected[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
} else if (s[["key"]] == "input") {
ds2Row <- which(as.character(ds2$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = ds2[ds2Row[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
}
}
p <- append(p, list(q))
}
s2 <- subplot(p, nrows = 1, titleX = TRUE)
# plot next 4 features
p <- list()
for (i in 9:12) {
ds1Row <- which(as.character(ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- plot_ly(ds1[ds1Row,], y = ~value, color = ~Instrument, colors = goodColors[1:(length(levels(ds1[ds1Row,]$Instrument)))], type = "box" ) %>%
layout(boxmode = "group", paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)', xaxis = list(title = as.character(unique(ds1$variable)[i]), showticklabels = T))
# when a peak (bar) is clicked from history plot
if (length(s)) {
if (s[["key"]] %in% c("QEplus", "QECl", "QEHF", "QEFFX1", "Fusion")) {
selected.ds1 <- ds1[ds1$Instrument == s[["key"]],]
ds1Row.selected <- which(as.character(selected.ds1$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = selected.ds1[ds1Row.selected[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
} else if (s[["key"]] == "input") {
ds2Row <- which(as.character(ds2$variable) %in% as.character(unique(ds1$variable)[i]))
q <- q %>%
add_markers(data = ds2[ds2Row[s[["pointNumber"]]+1],], x = ~Instrument, y = ~value, marker = list(size = 10, color = "#ff9933"), name = selectedFileName)
}
}
p <- append(p, list(q))
}
s3 <- subplot(p, nrows = 1, titleX = TRUE)
# plot all feature boxplots
subplot(s1,s2,s3, nrows = 3, margin = 0.05, heights = rep(1/3, 3), titleX = TRUE) %>%
layout(showlegend = FALSE)
})
output$ms_saveUI <- renderUI({
if (ms_runReader$run & !ms_runReader$example) {
sidebarPanel(
div(
h4("Click to save this batch to the Database"),
QC_recaptchaUI("MS_recaptcha_test", sitekey = "6Lfg_jYUAAAAAAVwxpEHlaRupSp87bl2Wj1pACkn"),
actionButton("MS_saveDB", "Save")
),
width = "100%"
)
}
})
####Irene's addition#########################################################################################################
#calculate score function#############################################################################################
calculateQC <- function(calculateFile, Instrument_50_re) {
peptideScore<- 0.5*calculateFile[1,1]
Feature11Score <- Instrument_50_re*calculateFile[1,2:12]
sum11sCORE<- sum(Feature11Score)
QCscore<-peptideScore+sum11sCORE
QCScore <- round(QCscore, digits=3)
return(QCScore)
}
#####################################################################################################################
#calculate the QC score
#####################################################################################################################
lapply(1:length(instrumentNames), function(i) {
output[[paste0("score_", instrumentNames[i], "_check", sep = "")]] <- renderText({
this.instrument <- instrumentNames[i]
allFeatures <- readData()
batch.1 <- allFeatures
if (ms_runReader$run == T) {
batch.2 <- ms_runReader$batch.2
if (any(batch.2$instrument == this.instrument)) {
batch.2 <- batch.2[batch.2$instrument == this.instrument,]
}
}
#if a peak bar being selected, find the file that is selected
s <- event_data("plotly_click",source = "peptideNumPlot")
if (length(s)) {
if ((s[["key"]] %in% this.instrument) & (s[["curveNumber"]] == 0)) {
selectedInstrument <- batch.1[batch.1$Instrument == s[["key"]], ]
#read raw data X (11 features)for that selectedInstrument data
X.raw_Instrument <- selectedInstrument[,2:12]
#read raw data Y (peptide value) for that selectedInstrument data
y_Instrument <- selectedInstrument[,1]
#find the high leverage points and remove from the existing database#############
selectInstrument_leverage <- selectedInstrument[,1:12]
colnames(X.raw_Instrument) <- c("MS.MS.Identifiedp", "MassStandardDevisationppm","MS.MS.Identified","PeaksRepeatedlySequencedp" ,"MS.MS" ,"Retention.length.FWHM","Score","Intensity","MassPrecision.ppm","CycleTime","MassError")
results_Instrument <- lm(y_Instrument ~ MS.MS.Identifiedp + MassStandardDevisationppm+MS.MS.Identified+PeaksRepeatedlySequencedp +MS.MS +Retention.length.FWHM+Score+Intensity+MassPrecision.ppm+CycleTime+MassError, data=X.raw_Instrument)
D<- cooks.distance(results_Instrument)
#no of Feature(parametrs) / no of observers (p/n)
#find the cut off value, identify the leverage datasets, then remove them
HL<- as.matrix(which (D> qf(0.5,11,nrow(selectInstrument_leverage)-11)), rownames.force = NA)
removeName <- as.list(row.names(HL))
rownames(HL)<- row.names(HL)
remove <- rownames(HL)
#REMOVE the high leverage points from the datasets
selectInstrument_remove <- selectInstrument_leverage[!rownames(selectInstrument_leverage)%in% remove,]
#normalize the data from the existing database
#Function to normalise the data (only existing database) #
#firstly normalize the y value
y_Instrument_remove <- selectInstrument_remove[,1]
scaled_y_Instrument_re<-(y_Instrument_remove - min(y_Instrument_remove))/(max(y_Instrument_remove)-min(y_Instrument_remove))
#secondly normalize the x values
X_Instrument_remove <- selectInstrument_remove[,2:12]
median_features <- apply(X_Instrument_remove, 2, FUN = median)
median_features <- t(median_features)
median_features <- as.data.frame(median_features)
# assumes that the column order of "median_features" and "X_QECl_remove" are identical
range01_Instrument_t <- X_Instrument_remove
for (i in 1:ncol(X_Instrument_remove)) {
range01_Instrument_t[,i] <- range01_Instrument_t[,i]/median_features[1,i]
}
#combine the y and x after normalization
range01_Instrument_t <- as.matrix(range01_Instrument_t)
normalize_Instrument <- cbind(scaled_y_Instrument_re,range01_Instrument_t)
####Build the function for Instrument Specific Model analysis with normalized datasets#############
set.seed(1)
cv.out_Instrument_t <- cv.glmnet(x=range01_Instrument_t, y= scaled_y_Instrument_re,alpha=0, nfolds=5)
#regression with the best lambda
ridge.mod_Instrument_t <- glmnet (x=range01_Instrument_t, y=scaled_y_Instrument_re,alpha = 0)
#contribution values from 11 features
ridge.coef_Instrument_t <- predict (ridge.mod_Instrument_t, type="coefficients", s=cv.out_Instrument_t$lambda.min)
#before converting the 50% weights
contributionInstrument_t <- t(ridge.coef_Instrument_t[-1,1])
abs_contributionInstrument_t <- abs(contributionInstrument_t)
sum_Instrument_t <- sum(abs_contributionInstrument_t)
#get each weights of 11 feature among 50%
weight_Instrument_t <- function(t){t/sum_Instrument_t}
#keep the sign to get the percentage of the weights
norm_Instrument_t <- weight_Instrument_t(contributionInstrument_t)
Instrument_50_t <- norm_Instrument_t *0.5
#calculate the weights for 11 features and to 3 digits
Instrument_50_re<- round(Instrument_50_t,digits=3)
#write.csv(Instrument_50_re, file="./weights.csv")
#selected the data when user click on specific bar
selectedFileName <- rownames(selectedInstrument)[s[["pointNumber"]]+1]
oldData<- selectedInstrument[grep(selectedFileName,row.names(selectedInstrument),ignore.case=TRUE),]
#Function to normalise the olddata #
#firstly normalize the y value
y_oldData <- oldData[,1]
scaled_y_oldData<-(y_oldData - min(selectedInstrument[,1]))/(max(selectedInstrument[,1])-min(selectedInstrument[,1]))
#secondly normalize the x values
range01_Instrument_t <- oldData[,2:12]
# assumes order of columns are identical
for (i in 1:ncol(range01_Instrument_t)) {
range01_Instrument_t[,i] <- range01_Instrument_t[,i]/median_features[1,i]
}
##########################################################################
# range01 <- function(x){(x-min(selectedInstrument[,2:12]))/(max(selectedInstrument[,2:12])-min(selectedInstrument[,2:12]))}
# range01_Instrument_msms<- range01(X_oldData$`MS/MS Identified [%]`)
# range01_Instrument_msd <- range01(X_oldData$`Mass Standard Deviation [ppm]`)
# range01_Instrument_msms2<-range01(X_oldData$`MS/MS Identified`)
# range01_Instrument_PRS <- range01(X_oldData$`Peaks Repeatedly Sequenced [%]`)
# range01_Instrument_ms_ms<- range01(X_oldData$`MS/MS`)
# range01_Instrument_FWHM <- range01(X_oldData$`Retention length (FWHM)`)
# range01_Instrument_score <- range01(X_oldData$Score)
# range01_Instrument_Intensity <- range01(X_oldData$Intensity)
# range01_Instrument_ppm <- range01(X_oldData$`Mass precision [ppm]`)
# range01_Instrument_cycletime<- range01(X_oldData$`Cycle time`)
# range01_Instrument_masserror<- range01(X_oldData$`Mass Error [ppm]`)
#
# #combine all the variable to the table
# range01_Instrument_t <- cbind(range01_Instrument_msms, range01_Instrument_msd,range01_Instrument_msms2,range01_Instrument_PRS,range01_Instrument_ms_ms,range01_Instrument_FWHM,range01_Instrument_score,range01_Instrument_Intensity,range01_Instrument_ppm,range01_Instrument_cycletime,range01_Instrument_masserror)
# #format the column names
# colnames(range01_Instrument_t) <- c("MS/MS Identified [%]","Mass Standard Deviation [ppm]","MS/MS Identified","Peaks Repeatedly Sequenced [%]","MS/MS","Retention length (FWHM)","Score","Intensity","Mass precision [ppm]","Cycle time","Mass Error [ppm]")
##########################################################################
# combine the selected (y and x) after normalization
NoroldData <- cbind(scaled_y_oldData,range01_Instrument_t)
calculateFile <- as.matrix(NoroldData[1,,drop=F])
# use function to calculate the QC score for each old dataset,
Score <- round(calculateQC(calculateFile, Instrument_50_re),digits=3)
Score <- paste('Quality score for "',selectedFileName, '" is: ',Score, sep="")
###If new dataset was clicked
} else if (s[["key"]] == "input") {
# order the columns
newInput <- batch.2[,match(colnames(allFeatures), colnames(batch.2))]
#this.instrument <- as.character(batch.2$Instrument)
selectedInstrument <- batch.1[grep(this.instrument,batch.1$Instrument,ignore.case = FALSE), ]
oldselectedInstrument <- selectedInstrument[,match(colnames(allFeatures),colnames(selectedInstrument))]
selectedCombined<- rbind(oldselectedInstrument, newInput)
# normalise the combineddata #
#firstly normalize the combined y value
#when the file being selected from the new input
selectedFileName <- rownames(batch.2[batch.2$Instrument == this.instrument,])[s[["curveNumber"]]]
#get that row with the selected filename for the 12 features
newData <- newInput[grep(selectedFileName,row.names(newInput),ignore.case=TRUE),]
#to normalise the new data #
#firstly normalize the new y value
y_newData <- newData[,1]
scaled_y_newData<-(y_newData - min(selectedCombined[,1]))/(max(selectedCombined[,1])-min(selectedCombined[,1]))
median_features <- apply(oldselectedInstrument[,2:12], 2, FUN = median)
median_features <- t(median_features)
median_features <- as.data.frame(median_features)
#secondly normalize the new x values
X_newData <- newData[,2:12]
# assumes that the column order of "median_features" and "X_QECl_remove" are identical
range01_x_newData <- X_newData
for (i in 1:ncol(range01_x_newData)) {
range01_x_newData[,i] <- range01_x_newData[,i]/median_features[1,i]
}
# range01 <- function(x){(x-min(selectedCombined[,2:12]))/(max(selectedCombined[,2:12])-min(selectedCombined[,2:12]))}
# range01_Instrument_msms<- range01(X_newData$`MS/MS Identified [%]`)
# range01_Instrument_msd <- range01(X_newData$`Mass Standard Deviation [ppm]`)
# range01_Instrument_msms2<-range01(X_newData$`MS/MS Identified`)
# range01_Instrument_PRS <- range01(X_newData$`Peaks Repeatedly Sequenced [%]`)
# range01_Instrument_ms_ms<- range01(X_newData$`MS/MS`)
# range01_Instrument_FWHM <- range01(X_newData$`Retention length (FWHM)`)
# range01_Instrument_score <- range01(X_newData$Score)
# range01_Instrument_Intensity <- range01(X_newData$Intensity)
# range01_Instrument_ppm <- range01(X_newData$`Mass precision [ppm]`)
# range01_Instrument_cycletime<- range01(X_newData$`Cycle time`)
# range01_Instrument_masserror<- range01(X_newData$`Mass Error [ppm]`)
#
# #combine all the variable to the table
# range01_x_newData <- cbind(range01_Instrument_msms, range01_Instrument_msd,range01_Instrument_msms2,range01_Instrument_PRS,range01_Instrument_ms_ms,range01_Instrument_FWHM,range01_Instrument_score,range01_Instrument_Intensity,range01_Instrument_ppm,range01_Instrument_cycletime,range01_Instrument_masserror)
# #format the column names
# colnames(range01_x_newData) <- c("MS/MS Identified [%]","Mass Standard Deviation [ppm]","MS/MS Identified","Peaks Repeatedly Sequenced [%]","MS/MS","Retention length (FWHM)","Score","Intensity","Mass precision [ppm]","Cycle time","Mass Error [ppm]")
# combine the y and x after normalization
normalize_newxy <- cbind(scaled_y_newData,as.matrix(range01_x_newData))
################################################################################################################
#build the model depend on which instrument from the existing database for the new input selected
################################################################################################################
X.raw_Instrument <- selectedInstrument[,2:12]
#read raw data Y (peptide value) for that selectedInstrument data
y_Instrument <- selectedInstrument[,1]
#find the high leverage points and remove from the existing database#############
selectInstrument_leverage <- selectedInstrument[,1:12]
colnames(X.raw_Instrument) <- c("MS.MS.Identifiedp", "MassStandardDevisationppm","MS.MS.Identified","PeaksRepeatedlySequencedp" ,"MS.MS" ,"Retention.length.FWHM","Score","Intensity","MassPrecision.ppm","CycleTime","MassError")
results_Instrument <- lm(y_Instrument ~ MS.MS.Identifiedp + MassStandardDevisationppm+MS.MS.Identified+PeaksRepeatedlySequencedp +MS.MS +Retention.length.FWHM+Score+Intensity+MassPrecision.ppm+CycleTime+MassError, data=X.raw_Instrument)
D<- cooks.distance(results_Instrument)
#no of Feature(parametrs) / no of observers (p/n)
#find the cut off value, identify the leverage datasets, then remove them
HL<- as.matrix(which (D> qf(0.5,11,nrow(selectInstrument_leverage)-11)), rownames.force = NA)
removeName <- as.list(row.names(HL))
rownames(HL)<- row.names(HL)
remove <- rownames(HL)
#REMOVE the high leverage points from the datasets
selectInstrument_remove <- selectInstrument_leverage[!rownames(selectInstrument_leverage)%in% remove,]
#normalize the data from the existing database
#Function to normalise the data (only existing database) #
#firstly normalize the y value
y_Instrument_remove <- selectInstrument_remove[,1]
scaled_y_Instrument_re<-(y_Instrument_remove - min(y_Instrument_remove))/(max(y_Instrument_remove)-min(y_Instrument_remove))
#secondly normalize the x values
range01_Instrument_t <- selectInstrument_remove[,2:12]
for (i in 1:ncol(range01_Instrument_t)) {
range01_Instrument_t[,i] <- range01_Instrument_t[,i]/median_features[1,i]
}
# range01 <- function(k){(k-min(k))/(max(k)-min(k))}
#
# range01_Instrument_msms<- range01(X_Instrument_remove$`MS/MS Identified [%]`)
# range01_Instrument_msd <- range01(X_Instrument_remove$`Mass Standard Deviation [ppm]`)
# range01_Instrument_msms2<-range01(X_Instrument_remove$`MS/MS Identified`)
# range01_Instrument_PRS <- range01(X_Instrument_remove$`Peaks Repeatedly Sequenced [%]`)
# range01_Instrument_ms_ms<- range01(X_Instrument_remove$`MS/MS`)
# range01_Instrument_FWHM <- range01(X_Instrument_remove$`Retention length (FWHM)`)
# range01_Instrument_score <- range01(X_Instrument_remove$Score)
# range01_Instrument_Intensity <- range01(X_Instrument_remove$Intensity)
# range01_Instrument_ppm <- range01(X_Instrument_remove$`Mass precision [ppm]`)
# range01_Instrument_cycletime<- range01(X_Instrument_remove$`Cycle time`)
# range01_Instrument_masserror<- range01(X_Instrument_remove$`Mass Error [ppm]`)
#
#combine all the variable to the table
# range01_Instrument_t <- cbind(range01_Instrument_msms, range01_Instrument_msd,range01_Instrument_msms2,range01_Instrument_PRS,range01_Instrument_ms_ms,range01_Instrument_FWHM,range01_Instrument_score,range01_Instrument_Intensity,range01_Instrument_ppm,range01_Instrument_cycletime,range01_Instrument_masserror)
#format the column names
# colnames(range01_Instrument_t) <- c("MS/MS Identified [%]","Mass Standard Deviation [ppm]","MS/MS Identified","Peaks Repeatedly Sequenced [%]","MS/MS","Retention length (FWHM)","Score","Intensity","Mass precision [ppm]","Cycle time","Mass Error [ppm]")
#combine the y and x after normalization
normalize_Instrument <- cbind(scaled_y_Instrument_re,range01_Instrument_t)
####Build the function for Instrument Specific Model analysis with normalized datasets#############
set.seed(1)
cv.out_Instrument_t <- cv.glmnet(x=as.matrix(range01_Instrument_t), y= scaled_y_Instrument_re,alpha=0, nfolds=5)
#regression with the best lambda
ridge.mod_Instrument_t <- glmnet (x=as.matrix(range01_Instrument_t), y=scaled_y_Instrument_re,alpha = 0)
#contribution values from 11 features
ridge.coef_Instrument_t <- predict (ridge.mod_Instrument_t, type="coefficients", s=cv.out_Instrument_t$lambda.min)
#before converting the 50% weights
contributionInstrument_t <- t(ridge.coef_Instrument_t[-1,1])
abs_contributionInstrument_t <- abs(contributionInstrument_t)
sum_Instrument_t <- sum(abs_contributionInstrument_t)
#get each weights of 11 feature among 50%
weight_Instrument_t <- function(t){t/sum_Instrument_t}
#keep the sign to get the percentage of the weights
norm_Instrument_t <- weight_Instrument_t(contributionInstrument_t)
Instrument_50_t <- norm_Instrument_t *0.5
#calculate the weights for 11 features and to 3 digits
Instrument_50_re<- round(Instrument_50_t,digits=3)
#write.csv(Instrument_50_re, file="./weights.csv")
################################################################################################################
calculateFile <- as.matrix(normalize_newxy[1,,drop=F])
# to calculate the QC score for each new dataset, x is the new dataset of the row
Score <- round(calculateQC(calculateFile, Instrument_50_re),digits=3)
Score <- paste('Quality score for new input "',selectedFileName, '" is: ',Score, sep="")
#tmp <- NormalizenewXY(var1, cvar2)
# qcscore <- calculateQC(tmp, var4)
}
}
})
})
}
|
7f6ef2c75d217b014d8093d4704b1995bb3c56cb
|
036a2a87b291db062719a8db83b88e1fba01306d
|
/R/hello.R
|
badedb301540e0bfedd1dde532a240a8982b4064
|
[] |
no_license
|
KellenX/RPackageHelloWorld
|
2522ee598686e4475fe4caabb210a1a570250cb5
|
a748bd01fac694368503d5b961107f468df25d5f
|
refs/heads/main
| 2023-02-06T10:59:58.451205
| 2021-01-01T02:11:35
| 2021-01-01T02:11:35
| 325,891,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 327
|
r
|
hello.R
|
#' Prints "Hello, world!"
#' @title Some text
#' @description Some other text
#'
#'
#' @param x The name of the person to say hi to
#' @return The output from \code{\link{print}}
#' @examples
#' hello('Foo')
#' \dontrun{hello('Bar')
#' }
#' @export
hello <- function(x){
print(paste0("Hello,",x ,", this is the world!"))
}
|
cee2253be8d5481af89da1ceca1a4db9d602291f
|
c1832bdea9445795c9a216f97dcdb6e3bbc02c2a
|
/R/tam_linking_extract_list.R
|
f66ea36d6e768df89ff7f351f73f2335d55bc684
|
[] |
no_license
|
alexanderrobitzsch/TAM
|
c5b4a04094336adb198916d87590917f9f7f4c51
|
7fd74230a55c8296fd8865db1700009445e4e54f
|
refs/heads/master
| 2022-09-04T10:52:37.928736
| 2022-08-29T09:31:35
| 2022-08-29T09:31:35
| 95,301,463
| 16
| 10
| null | 2020-06-28T07:59:48
| 2017-06-24T14:08:58
|
R
|
UTF-8
|
R
| false
| false
| 343
|
r
|
tam_linking_extract_list.R
|
## File Name: tam_linking_extract_list.R
## File Version: 0.05
tam_linking_extract_list <- function( input, entries, names=NULL )
{
if ( is.null(names) ){
names <- entries
}
NE <- length(entries)
output <- list()
for (ee in 1:NE){
output[[ names[ee] ]] <- input[[ entries[ee] ]]
}
return(output)
}
|
e670f5a0695e8980e3d754f4d03e3807f6305ea1
|
77dc8a24ea79fa40e4e22d8a8e7d9f10fca3fae3
|
/code/generate_drug_data.R
|
ce7565020b362d18b50dbd79c9c940eff62789e1
|
[] |
no_license
|
Duke-GCB/ddh
|
e6801c0b83c4a722e227e20b1b365fc0e3bb65c4
|
372f7de176d3a4d001dabea9bf868e3920274352
|
refs/heads/master
| 2021-06-28T14:29:35.002158
| 2021-01-15T01:25:30
| 2021-01-15T01:25:30
| 210,918,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,853
|
r
|
generate_drug_data.R
|
#load libraries
library(tidyverse)
library(here)
library(janitor)
library(corrr)
library(moderndive)
library(purrr)
library(webchem)
#rm(list=ls())
#read current release information to set parameters for download
source(here::here("code", "current_release.R"))
time_begin_data <- Sys.time()
#These are log-fold change collapsed replicates with outliers and controls removed of PRISM
prism <- read_csv(prism_url, col_names = TRUE) %>%
clean_names()
#censor drugs that have gene names #"lta", "nppb", "prima1"
censor_names <- c("LTA", "NPPB", "PRIMA1")
censor_ids <- c("brd_k52914072_001_01_5_2_5_mts004", "brd_k89272762_001_12_7_2_5_hts", "brd_k15318909_001_10_5_2_5_hts")
prism <-
prism %>%
select(-any_of(censor_ids))
#get meta file
prism_meta <- read_csv(prismmeta_url, col_names = TRUE) %>%
clean_names() %>%
mutate(clean_drug = make_clean_names(column_name)) %>%
distinct(name, .keep_all = TRUE) %>% #drop rows that have duplicate names
filter(!name %in% censor_names) #censor 3 drugs from meta
# prism_sample <- prism_meta %>%
# sample_n(3)
# cids consists of "query" column and "cid" column
# LONG STEP (3h); can load from /data/ when testing
cids <- get_cid(prism_meta$name)
prism_meta <-
prism_meta %>%
left_join(cids, by = c("name" = "query")) %>%
filter(!is.na(name)) %>%
distinct(clean_drug, .keep_all = TRUE)
#make name/join/search df
prism_names <- prism_meta %>%
select("name", "moa", "cid", "clean_drug")
prism_long <- prism %>% #need this for joining below
pivot_longer(cols = where(is.numeric), names_to = "drug", values_to = "log2fc") %>%
left_join(prism_names, by = c("drug" = "clean_drug")) %>%
filter(!is.na(name)) %>% #remove drugs which don't have a searchable name
select(x1, name, log2fc)
#get log2FC values of ACHILLES for integration
achilles_log2fc_raw <- read_csv(achilles_log_url, col_names = TRUE)
achilles_guide_map <- read_csv(achilles_guide_map_url, col_names = TRUE)
achilles_rep_map <- read_csv(achilles_rep_map_url, col_names = TRUE)
#clean
achilles_guide_map$gene <- str_remove_all(achilles_guide_map$gene, "\\s\\(\\d+\\)")
achilles_log2fc <- achilles_guide_map %>%
left_join(achilles_log2fc_raw, by = c("sgrna" = "X1")) #%>% select(1:100)
achilles_log2fc_long <- achilles_log2fc %>%
pivot_longer(cols = "143B-311Cas9_RepA_p6_batch3":"BT549-311cas9 Rep A p5_batch2", names_to = "cell_line", values_to = "log2fc")
achilles_log2fc_long <- achilles_log2fc_long %>%
left_join(achilles_rep_map, by = c( "cell_line" = "replicate_ID")) %>%
select(DepMap_ID, gene, log2fc)
achilles_log2fc_long_mean <- achilles_log2fc_long %>%
group_by(DepMap_ID, gene) %>%
summarize(meanlog2fc = mean(log2fc),
sdlog2fc = sd(log2fc)) %>% #add QC for SD that is too high by filter()?
ungroup()
#combine, and pivot_wider for matrix generation
combined <- achilles_log2fc_long_mean %>%
dplyr::select(x1 = DepMap_ID, name = gene, log2fc = meanlog2fc) %>%
bind_rows(prism_long) %>%
rename(DepMap_ID = x1) %>%
pivot_wider(names_from = name, values_from = log2fc)
#Combined CORRELATION MATRIX
combined_cor <- combined %>%
dplyr::select(-1) %>%
corrr::correlate() %>%
dplyr::rename(rowname = 1)
#test gene corrs ##comment out
# fav_gene <- "TSC1"
# combined_cor %>%
# focus(!!fav_gene) %>%
# arrange(desc(.[[2]]))
#resample for stats
#make some long files
combined_cor_long <- combined_cor %>%
tidyr::pivot_longer(cols = where(is.numeric), names_to = "gene_symbol", values_to = "r")
#Permutation tests
virtual_prism <- combined_cor_long %>%
dplyr::filter(!is.na(r)) %>%
moderndive::rep_sample_n(size = 20000, reps = 1000) %>%
dplyr::group_by(replicate) %>%
dplyr::summarize(mean = mean(r), max = max(r), min = min(r), sd = sd(r))
mean_virtual_prism <- mean(virtual_prism$mean)
sd_virtual_prism <- mean(virtual_prism$sd)
drug_sd_threshold <- 2
prism_upper <- mean_virtual_prism + drug_sd_threshold*sd_virtual_prism
prism_lower <- mean_virtual_prism - drug_sd_threshold*sd_virtual_prism
#save
saveRDS(drug_sd_threshold, file = here::here("data", paste0(release, "_drug_sd_threshold.Rds")))
saveRDS(prism_lower, file = here::here("data", paste0(release, "_prism_lower.Rds")))
saveRDS(prism_upper, file = here::here("data", paste0(release, "_prism_upper.Rds")))
saveRDS(mean_virtual_prism, file = here::here("data", paste0(release, "_mean_virtual_prism.Rds")))
saveRDS(sd_virtual_prism, file = here::here("data", paste0(release, "_sd_virtual_prism.Rds")))
#cutoff and make tables
gene_drugs_table <- tibble(
fav_gene = character(),
data = list()
)
drug_genes_table <- tibble(
fav_drug = character(),
data = list()
)
#define list
#genes <- sample(names(select(combined_cor, A1BG:ZZZ3)), size = 10) #comment this out
#drugs <- sample(names(select(combined_cor, !rowname:ZZZ3)), size = 10) #comment this out
genes <- names(select(combined_cor, A1BG:ZZZ3))
drugs <- names(select(combined_cor, !1:ZZZ3))
#18524+4514 == 23038 (same as combined_cor)
#drug table for a gene
for (fav_gene in genes) {
message("Drug tables for ", fav_gene)
gene_top <-
combined_cor %>%
dplyr::select(1, fav_gene) %>%
dplyr::arrange(desc(.[[2]])) %>% #use column index
dplyr::filter(rowname %in% drugs, #remove genes
.[[2]] > prism_upper) %>% #mean +/- 2sd
dplyr::rename(drug = 1,
r2 = 2) %>%
dplyr::mutate(r2 = round(r2, 2),
z_score = round((r2 - mean_virtual_prism)/sd_virtual_prism, 1)) %>%
dplyr::select(drug, z_score, r2)
gene_table <-
gene_top %>%
dplyr::mutate(fav_gene = fav_gene) %>%
dplyr::group_by(fav_gene) %>%
tidyr::nest()
gene_drugs_table <- gene_drugs_table %>%
dplyr::bind_rows(gene_table)
}
#gene table for a drug query
for (fav_drug in drugs) {
message("Gene tables for ", fav_drug)
drug_top <-
combined_cor %>%
dplyr::select(1, fav_gene) %>%
dplyr::arrange(desc(.[[2]])) %>% #use column index
dplyr::filter(rowname %in% genes, #remove drugs
.[[2]] > prism_upper) %>% #mean +/- 2sd
dplyr::rename(gene = 1,
r2 = 2) %>%
dplyr::mutate(r2 = round(r2, 2),
z_score = round((r2 - mean_virtual_prism)/sd_virtual_prism, 1)) %>%
dplyr::select(gene, z_score, r2)
drug_table <- drug_top %>%
dplyr::mutate(fav_drug = fav_drug) %>%
dplyr::group_by(fav_drug) %>%
tidyr::nest()
drug_genes_table <- drug_genes_table %>%
bind_rows(drug_table)
}
#TEST get data out
# make_drug_table <- function(gene_data = gene_drugs_table, gene_symbol) {
# gene_data %>%
# dplyr::filter(fav_gene %in% gene_symbol) %>%
# tidyr::unnest(data) %>%
# dplyr::arrange(desc(r2)) %>%
# dplyr::rename("Query" = "fav_gene", "Drug" = "drug", "R^2" = "r2", "Z Score" = "z_score")
# }
# make_gene_table <- function(drug_data = drug_genes_table, drug_name) {
# drug_data %>%
# dplyr::filter(fav_drug %in% drug_name) %>%
# tidyr::unnest(data) %>%
# dplyr::arrange(desc(r2)) %>%
# dplyr::rename("Query" = "fav_drug", "Gene" = "gene", "R^2" = "r2", "Z Score" = "z_score")
# }
#combined_cor_long %>% arrange(desc(r)) %>% filter(x %in% drugs) %>% filter(y %in% genes)
#save files
saveRDS(prism, file = here::here("data", paste0(release, "_prism.Rds")))
saveRDS(prism_meta, file = here::here("data", paste0(release, "_prism_meta.Rds")))
saveRDS(prism_names, file = here::here("data", paste0(release, "_prism_names.Rds")))
#saveRDS(combined_cor, file = here::here("data", paste0(release, "_combined_cor.Rds")))
saveRDS(gene_drugs_table, file=here::here("data", paste0(release, "_gene_drugs_table.Rds")))
saveRDS(drug_genes_table, file=here::here("data", paste0(release, "_drug_genes_table.Rds")))
saveRDS(cids, file = here::here("data", paste0(release, "_cids.Rds")))
#how long
time_end_data <- Sys.time()
|
bb052a5005fca5c7de90b10155038a92b0f8ce8c
|
8f5056ddfcf185da545eab99728b545af931c015
|
/causalA16/man/make_data_for_figure_4_1.Rd
|
063d705856c11127de6d712c9eca08f86054f5da
|
[] |
no_license
|
asrosenberg/Quant3_A16
|
2bb9590d0c8370309555b8f6150efd2c48c86c79
|
11f8ef8c069d3d864635818fce285fd5bd797e12
|
refs/heads/master
| 2020-04-12T03:51:07.734178
| 2017-01-14T17:20:57
| 2017-01-14T17:20:57
| 68,039,960
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 443
|
rd
|
make_data_for_figure_4_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/causal_graphs.R
\name{make_data_for_figure_4_1}
\alias{make_data_for_figure_4_1}
\title{Simulate data that corresponds to Figure 4.1 from MgW}
\usage{
make_data_for_figure_4_1(n, seed = sample.int(.Machine$integer.max, 1))
}
\arguments{
\item{n}{number of observations}
\item{seed}{RNG seed}
}
\value{
data.table with potential outcomes
}
\description{
DETAILS
}
|
806830532d5533a271eea7a6e9527c9bdd405785
|
9f568e53d15e4fd26286ef2eec7ea8a5de222c35
|
/NBA/pullDataWeeklySB.R
|
57a538e9b985f3930303a5aba5c5f7e1c0183b0e
|
[] |
no_license
|
tcash21/sports_2016
|
d9c239d76387ba558e983acbac28c9df3b9ddb84
|
881fc8d9b2cb7a6ff79353d00e4ceaad0612eca9
|
refs/heads/master
| 2021-06-14T07:27:59.429216
| 2017-03-08T20:59:59
| 2017-03-08T20:59:59
| 45,196,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,432
|
r
|
pullDataWeeklySB.R
|
library(plyr)
library(RSQLite)
library(sendmailR)
drv <- dbDriver("SQLite")
con <- dbConnect(drv, "/home/ec2-user/sports2015/NBA/sports.db")
tables <- dbListTables(con)
lDataFrames <- vector("list", length=length(tables))
## create a data.frame for each table
for (i in seq(along=tables)) {
if(tables[[i]] == 'NBAHalflines' | tables[[i]] == 'NBAlines' | tables[[i]] == 'NBASBLines' | tables[[i]] == 'NBASBHalfLines'){
lDataFrames[[i]] <- dbGetQuery(conn=con, statement=paste0("SELECT n.away_team, n.home_team, n.game_date, n.line, n.spread, n.game_time from '", tables[[i]], "' n inner join
(select game_date, away_team,home_team, max(game_time) as mgt from '", tables[[i]], "' group by game_date, away_team, home_team) s2 on s2.game_date = n.game_date and
s2.away_team = n.away_team and s2.home_team = n.home_team and n.game_time = s2.mgt;"))
} else {
lDataFrames[[i]] <- dbGetQuery(conn=con, statement=paste("SELECT * FROM '", tables[[i]], "'", sep=""))
}
cat(tables[[i]], ":", i, "\n")
}
halflines <- lDataFrames[[which(tables == "NBASBHalfLines")]]
games <- lDataFrames[[which(tables == "NBAGames")]]
lines <- lDataFrames[[which(tables == "NBASBLines")]]
teamstats <- lDataFrames[[which(tables == "NBAseasonstats")]]
boxscores <- lDataFrames[[which(tables == "NBAstats")]]
lookup <- lDataFrames[[which(tables == "NBASBTeamLookup")]]
nbafinal <- lDataFrames[[which(tables == "NBAfinalstats")]]
seasontotals <- lDataFrames[[which(tables == "NBAseasontotals")]]
b<-apply(boxscores[,3:5], 2, function(x) strsplit(x, "-"))
boxscores$fgm <- do.call("rbind",b$fgma)[,1]
boxscores$fga <- do.call("rbind",b$fgma)[,2]
boxscores$tpm <- do.call("rbind",b$tpma)[,1]
boxscores$tpa <- do.call("rbind",b$tpma)[,2]
boxscores$ftm <- do.call("rbind",b$ftma)[,1]
boxscores$fta <- do.call("rbind",b$ftma)[,2]
boxscores <- boxscores[,c(1,2,16:21,6:15)]
m1<-merge(boxscores, games, by="game_id")
m1$key <- paste(m1$team, m1$game_date)
teamstats$key <- paste(teamstats$team, teamstats$the_date)
m2<-merge(m1, teamstats, by="key")
lookup$away_team <- lookup$sb_team
lookup$home_team <- lookup$sb_team
## Total Lines
la<-merge(lookup, lines, by="away_team")
lh<-merge(lookup, lines, by="home_team")
la$key <- paste(la$espn_abbr, la$game_date)
lh$key <- paste(lh$espn_abbr, lh$game_date)
m3a<-merge(m2, la, by="key")
m3h<-merge(m2, lh, by="key")
colnames(m3a)[49] <- "CoversTotalLineUpdateTime"
colnames(m3h)[49] <- "CoversTotalLineUpdateTime"
## Halftime Lines
##halflines <- halflines[-which(halflines$line == "OFF"),]
la2<-merge(lookup, halflines, by="away_team")
lh2<-merge(lookup, halflines, by="home_team")
la2$key <- paste(la2$espn_abbr, la2$game_date)
lh2$key <- paste(lh2$espn_abbr, lh2$game_date)
m3a2<-merge(m2, la2, by="key")
m3h2<-merge(m2, lh2, by="key")
colnames(m3a2)[49] <- "CoversHalfLineUpdateTime"
colnames(m3h2)[49] <- "CoversHalfLineUpdateTime"
l<-merge(m3a, m3a2, by=c("game_date.y", "away_team"))
l<-l[match(m3a$key, l$key.y),]
m3a<-cbind(m3a, l[,94:96])
l2<-merge(m3h, m3h2, by=c("game_date.y", "home_team"))
l2<-l2[match(m3h$key, l2$key.y),]
m3h<-cbind(m3h, l2[,94:96])
nbafinal$key <- paste(nbafinal$game_id, nbafinal$team)
n<-apply(nbafinal[,3:5], 2, function(x) strsplit(x, "-"))
nbafinal$fgm <- do.call("rbind",n$fgma)[,1]
nbafinal$fga <- do.call("rbind",n$fgma)[,2]
nbafinal$tpm <- do.call("rbind",n$tpma)[,1]
nbafinal$tpa <- do.call("rbind",n$tpma)[,2]
nbafinal$ftm <- do.call("rbind",n$ftma)[,1]
nbafinal$fta <- do.call("rbind",n$ftma)[,2]
nbafinal <- nbafinal[,c(1,2,17:22,6:16)]
colnames(m3h)[44:45] <- c("home_team.x", "home_team.y")
colnames(m3a)[40] <- "home_team"
all <- rbind(m3a, m3h)
all <- all[,-1]
all$key <- paste(all$game_id, all$team.y)
all<-all[match(unique(all$key), all$key),]
final<-merge(nbafinal, all, by="key")
final <- final[,-1]
colnames(final) <- c("GAME_ID","TEAM","FINAL_FGM","FINAL_FGA", "FINAL_3PM","FINAL_3PA","FINAL_FTM","FINAL_FTA","FINAL_OREB","FINAL_DREB","FINAL_REB",
"FINAL_AST","FINAL_STL","FINAL_BLK","FINAL_TO","FINAL_PF","FINAL_PTS","FINAL_BOXSCORE_TIMESTAMP", "REMOVE0","REMOVE1","HALF_FGM", "HALF_FGA", "HALF_3PM",
"HALF_3PA", "HALF_FTM","HALF_FTA","HALF_OREB", "HALF_DREB", "HALF_REB", "HALF_AST", "HALF_STL", "HALF_BLK", "HALF_TO", "HALF_PF", "HALF_PTS",
"HALF_TIMESTAMP", "TEAM1", "TEAM2", "GAME_DATE","GAME_TIME","REMOVE2","REMOVE3", "SEASON_FGM","SEASON_FGA","SEASON_FG%", "SEASON_3PM", "SEASON_3PA","SEASON_3P%",
"SEASON_FTM","SEASON_FTA","SEASON_FT%","SEASON_2PM", "SEASON_2PA", "SEASON_2P%", "SEASON_PPS", "SEASON_AFG", "REMOVE4","REMOVE5","REMOVE6","REMOVE7","REMOVE8","REMOVE9",
"REMOVE10", "LINE", "SPREAD", "COVERS_UPDATE","LINE_HALF", "SPREAD_HALF", "COVERS_HALF_UPDATE")
final <- final[,-grep("REMOVE", colnames(final))]
## Add the season total stats
colnames(seasontotals)[1] <- "TEAM"
colnames(seasontotals)[2] <- "GAME_DATE"
#today <- format(Sys.Date(), "%m/%d/%Y")
#seasontotals <- subset(seasontotals, GAME_DATE == today)
final$key <- paste(final$GAME_DATE, final$TEAM)
#seasontotals$TEAM<-match(seasontotals$TEAM, lookup$espn_name)
seasontotals$key <- paste(seasontotals$GAME_DATE, seasontotals$TEAM)
#x<-merge(seasontotals, final, by=c("key"))
#x<- x[,c(-1, -16, -51)]
#final<-x[,c(14:46, 1:13, 47:69)]
#colnames(final)[36:46] <- c("SEASON_GP", "SEASON_PPG", "SEASON_ORPG", "SEASON_DEFR", "SEASON_RPG", "SEASON_APG", "SEASON_SPG", "SEASON_BPG", "SEASON_TPG", "SEASON_FPG", "SEASON_ATO")
#final$GAME_DATE <- seasontotals$GAME_DATE[1]
#final$GAME_DATE<-games[match(final$GAME_ID, games$game_id),]$game_date
x <- cbind(final, seasontotals[match(final$key, seasontotals$key),])
final<-x[,c(1:72)]
colnames(final)[62:72] <- c("SEASON_GP", "SEASON_PPG", "SEASON_ORPG", "SEASON_DEFRPG", "SEASON_RPG", "SEASON_APG", "SEASON_SPG", "SEASON_BGP","SEASON_TPG", "SEASON_FPG", "SEASON_ATO")
#final<-final[order(final$GAME_DATE, decreasing=TRUE),]
final<-final[order(final$GAME_DATE, decreasing=TRUE),]
final$LINE_HALF <- as.numeric(final$LINE_HALF)
final$LINE <- as.numeric(final$LINE)
final$COVERS_UPDATE<-as.character(final$COVERS_UPDATE)
final<-ddply(final, .(GAME_ID), transform, mwt=HALF_PTS[1] + HALF_PTS[2] + LINE_HALF - LINE)
final <- ddply(final, .(GAME_ID), transform, half_diff=HALF_PTS[1] - HALF_PTS[2])
## transform to numerics
final[,3:17]<-apply(final[,3:17], 2, as.numeric)
final[,19:33]<-apply(final[,19:33], 2, as.numeric)
final[,c(39:54)]<-apply(final[,c(39:54)], 2, as.numeric)
final[,c(62:74)]<-apply(final[,c(62:74)], 2, as.numeric)
## Team1 and Team2 Halftime Differentials
final$fg_percent <- ((final$HALF_FGM / final$HALF_FGA) - (final$SEASON_FGM / final$SEASON_FGA) - .01)
final$fg_percent_noadjustment <- (final$HALF_FGM / final$HALF_FGA) - (final$SEASON_FGM / final$SEASON_FGA)
final$FGM <- (final$HALF_FGM - (final$SEASON_FGM / 2))
final$TPM <- (final$HALF_3PM - (final$SEASON_3PM / 2))
final$FTM <- (final$HALF_FTM - (final$SEASON_FTM / 2 - 1))
final$TO <- (final$HALF_TO - (final$SEASON_ATO / 2))
final$OREB <- (final$HALF_OREB - (final$SEASON_ORPG / 2))
## Cumulative Halftime Differentials
final$chd_fg <- ddply(final, .(GAME_ID), transform, chd_fg = (fg_percent[1] + fg_percent[2]) / 2)$chd_fg
final$chd_fgm <- ddply(final, .(GAME_ID), transform, chd_fgm = (FGM[1] + FGM[2]) / 2)$chd_fgm
final$chd_tpm <- ddply(final, .(GAME_ID), transform, chd_tpm = (TPM[1] + TPM[2]) / 2)$chd_tpm
final$chd_ftm <- ddply(final, .(GAME_ID), transform, chd_ftm = (FTM[1] + FTM[2]) / 2)$chd_ftm
final$chd_to <- ddply(final, .(GAME_ID), transform, chd_to = (TO[1] + TO[1]) / 2)$chd_to
final$chd_oreb <- ddply(final, .(GAME_ID), transform, chd_oreb = (OREB[1] + OREB[2]) / 2)$chd_oreb
result <- final
## Add Criteria for Over/Under
result$mwtO <- as.numeric(result$mwt < 7.1 & result$mwt > -3.9)
result$chd_fgO <- as.numeric(result$chd_fg < .15 & result$chd_fg > -.07)
result$chd_fgmO <- as.numeric(result$chd_fgm < -3.9)
result$chd_tpmO <- as.numeric(result$chd_tpm < -1.9)
result$chd_ftmO <- as.numeric(result$chd_ftm < -.9)
result$chd_toO <- as.numeric(result$chd_to < -1.9)
result$mwtO[is.na(result$mwtO)] <- 0
result$chd_fgO[is.na(result$chd_fgO)] <- 0
result$chd_fgmO[is.na(result$chd_fgmO)] <- 0
result$chd_tpmO[is.na(result$chd_tpmO)] <- 0
result$chd_ftmO[is.na(result$chd_ftmO)] <- 0
result$chd_toO[is.na(result$chd_toO)] <- 0
result$overSum <- result$mwtO + result$chd_fgO + result$chd_fgmO + result$chd_tpmO + result$chd_ftmO + result$chd_toO
result$fullSpreadU <- as.numeric(abs(as.numeric(result$SPREAD)) > 10.9)
result$mwtU <- as.numeric(result$mwt > 7.1)
result$chd_fgU <- as.numeric(result$chd_fg > .15 | result$chd_fg < -.07)
result$chd_fgmU <- 0
result$chd_tpmU <- 0
result$chd_ftmU <- as.numeric(result$chd_ftm > -0.9)
result$chd_toU <- as.numeric(result$chd_to > -1.9)
result$mwtU[is.na(result$mwtU)] <- 0
result$chd_fgO[is.na(result$chd_fgU)] <- 0
result$chd_fgmU[is.na(result$chd_fgmU)] <- 0
result$chd_tpmU[is.na(result$chd_tpmU)] <- 0
result$chd_ftmU[is.na(result$chd_ftmU)] <- 0
result$chd_toU[is.na(result$chd_toU)] <- 0
result$underSum <- result$fullSpreadU + result$mwtU + result$chd_fgU + result$chd_fgmU + result$chd_tpmU + result$chd_ftmU + result$chd_toU
result<-result[order(result$GAME_ID),]
result$team <- ""
result[seq(from=1, to=dim(result)[1], by=2),]$team <- "TEAM1"
result[seq(from=2, to=dim(result)[1], by=2),]$team <- "TEAM2"
wide<-reshape(result, direction = "wide", idvar="GAME_ID", timevar="team")
result$secondHalfPts.TEAM1 <- result$FINAL_PTS.TEAM1 - result$HALF_PTS.TEAM1
result$secondHalfPts.TEAM2 <- result$FINAL_PTS.TEAM2 - result$HALF_PTS.TEAM2
result$secondHalfPtsTotal <- result$secondHalfPts.TEAM1 + result$secondHalfPts.TEAM2
result$Over<-result$secondHalfPtsTotal > result$LINE_HALF.TEAM1
result <- result[-which(is.na(result$Over)),]
result$SPREAD_HALF.TEAM1<-as.numeric(result$SPREAD_HALF.TEAM1)
result$FGS_GROUP <- NA
if(length(which(abs(result$SPREAD.TEAM1) < 3.1)) > 0){
result[which(abs(result$SPREAD.TEAM1) < 3.1),]$FGS_GROUP <- '1'
}
if(length(which(abs(result$SPREAD.TEAM1) >= 3.1 & abs(result$SPREAD.TEAM1) < 8.1)) > 0){
result[which(abs(result$SPREAD.TEAM1) >= 3.1 & abs(result$SPREAD.TEAM1) < 8.1),]$FGS_GROUP <- '2'
}
if(length(which(abs(result$SPREAD.TEAM1) >= 8.1)) > 0){
result[which(abs(result$SPREAD.TEAM1) >= 8.1),]$FGS_GROUP <- '3'
}
result$LINE_HALF.TEAM1<-as.numeric(result$LINE_HALF.TEAM1)
result$HALF_DIFF <- NA
result$HOME_TEAM.TEAM1 <- FALSE
result$underDog.TEAM1 <- result$HOME_TEAM.TEAM1 == FALSE & result$SPREAD.TEAM1 > 0
under.teams <- which(result$underDog.TEAM1)
favorite.teams <- which(!result$underDog.TEAM1)
result[under.teams,]$HALF_DIFF <- result[under.teams,]$HALF_PTS.TEAM2 - result[under.teams,]$HALF_PTS.TEAM1
result[favorite.teams,]$HALF_DIFF <- result[favorite.teams,]$HALF_PTS.TEAM1 - result[favorite.teams,]$HALF_PTS.TEAM2
result$MWTv2 <- result$LINE_HALF.TEAM1 - (result$LINE.TEAM1 /2)
result$possessions.TEAM1 <- result$HALF_FGA.TEAM1 + (result$HALF_FTA.TEAM1 / 2) + result$HALF_TO.TEAM1 - result$HALF_OREB.TEAM1
result$possessions.TEAM2 <- result$HALF_FGA.TEAM2 + (result$HALF_FTA.TEAM2 / 2) + result$HALF_TO.TEAM2 - result$HALF_OREB.TEAM2
result$possessions.TEAM1.SEASON <- result$SEASON_FGA.TEAM1 + (result$SEASON_FTA.TEAM1 / 2) + result$SEASON_TPG.TEAM1 - result$SEASON_ORPG.TEAM1
result$possessions.TEAM2.SEASON <- result$SEASON_FGA.TEAM2 + (result$SEASON_FTA.TEAM2 / 2) + result$SEASON_TPG.TEAM2 - result$SEASON_ORPG.TEAM2
result$POSSvE <- NA
## Adjust this for Fav and Dog
result[under.teams,]$POSSvE <- ((result[under.teams,]$possessions.TEAM2 + result[under.teams,]$possessions.TEAM1) / 2) - ((result[under.teams,]$possessions.TEAM2.SEASON /
2 + result[under.teams,]$possessions.TEAM1.SEASON / 2) / 2)
result[favorite.teams,]$POSSvE <- ((result[favorite.teams,]$possessions.TEAM1 + result[favorite.teams,]$possessions.TEAM2) / 2) - ((result[favorite.teams,]$possessions.TEAM1.SEASON /
2 + result[favorite.teams,]$possessions.TEAM2.SEASON / 2) / 2)
result$P100vE <- NA
result$P100.TEAM1 <- result$HALF_PTS.TEAM1 / result$possessions.TEAM1 * 100
result$P100.TEAM1.SEASON <- result$SEASON_PPG.TEAM1 / result$possessions.TEAM1.SEASON * 100
result$P100.TEAM2 <- result$HALF_PTS.TEAM2 / result$possessions.TEAM2 * 100
result$P100.TEAM2.SEASON <- result$SEASON_PPG.TEAM2 / result$possessions.TEAM2.SEASON * 100
result$P100_DIFF <- NA
result[under.teams,]$P100_DIFF <- (result[under.teams,]$P100.TEAM2 - result[under.teams,]$P100.TEAM2.SEASON) - (result[under.teams,]$P100.TEAM1 - result[under.teams,]$P100.TEAM1.SEASON)
result[favorite.teams,]$P100_DIFF <- (result[favorite.teams,]$P100.TEAM1 - result[favorite.teams,]$P100.TEAM1.SEASON) - (result[favorite.teams,]$P100.TEAM2 - result[favorite.teams,]$P100.TEAM2.SEASON)
result[favorite.teams,]$P100vE <- (result[favorite.teams,]$P100.TEAM1 - result[favorite.teams,]$P100.TEAM1.SEASON) + (result[favorite.teams,]$P100.TEAM2 -
result[favorite.teams,]$P100.TEAM2.SEASON)
result[under.teams,]$P100vE <- (result[under.teams,]$P100.TEAM2 - result[under.teams,]$P100.TEAM2.SEASON) + (result[under.teams,]$P100.TEAM1 -
result[under.teams,]$P100.TEAM1.SEASON)
result$prediction<-predict(rpart.model,newdata=result, type="class")
result$FAV <- ""
result[which(result$underDog.TEAM1),]$FAV <- result[which(result$underDog.TEAM1),]$TEAM2.TEAM2
result[which(!result$underDog.TEAM1),]$FAV <- result[which(!result$underDog.TEAM1),]$TEAM1.TEAM1
result$MWTv3 <- 0
if(length(which(result$TEAM2.TEAM2 == result$FAV)) > 0){
i <- which(result$TEAM2.TEAM2 == result$FAV)
result$MWTv3[i] <- abs(result[i,]$SPREAD_HALF.TEAM1) - (abs(result[i,]$SPREAD.TEAM1) / 2)
}
if (length(which(result$TEAM2.TEAM2 != result$FAV)) > 0){
i <- which(result$TEAM2.TEAM2 != result$FAV)
result$MWTv3[i] <- -abs(result[i,]$SPREAD_HALF.TEAM1) - (-abs(result[i,]$SPREAD.TEAM1) / 2)
}
write.csv(result, file="/home/ec2-user/sports2015/NBA/sportsbook.csv", row.names=FALSE)
sendmailV <- Vectorize( sendmail , vectorize.args = "to" )
#emails <- c( "<tanyacash@gmail.com>" , "<malloyc@yahoo.com>", "<sschopen@gmail.com>")
emails <- c("<tanyacash@gmail.com>")
from <- "<tanyacash@gmail.com>"
subject <- "Weekly NBA Data Report - SportsBook"
body <- c(
"Chris -- see the attached file.",
mime_part("/home/ec2-user/sports2015/NBA/sportsbook.csv", "WeeklyDataNBA_SB.csv")
)
sendmailV(from, to=emails, subject, body)
|
087d7cf57d065a7275dfcdb8cbf1625fa1c27cdf
|
705fc2f3c1f1d01775655a78024f6f4a0f824515
|
/multiple_linear_regressionTest.R
|
b6fcb480e41083b4cda8a0409176e6c200f1dbc3
|
[] |
no_license
|
sajiasd/hello-world
|
ccc73d2d87fa519fb47dbdecc598235ca4bdaa34
|
bebf29c322b3e31befc10973f3a8a71879742d93
|
refs/heads/master
| 2020-04-16T19:55:57.265428
| 2019-01-15T15:57:32
| 2019-01-15T15:57:32
| 165,879,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,318
|
r
|
multiple_linear_regressionTest.R
|
# Multileaner regression
dataset = read.csv('50_Startups.csv')
# Encoding categorical data
dataset$State = factor(dataset$State,
levels = c('New York', 'California', 'Florida'),
labels = c(1, 2, 3))
#Spliting data in to training and test set
# install package ('caTools)
library(caTools)
set.seed(123)
split = sample.split(dataset$Profit, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
#fitting multiple regression to training set
regressor = lm(formula = Profit ~.,
data = training_set)
#Predicting the Test set results
y_pred = predict(regressor, newdata = test_set)
# building optimal model using backward elimination
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend + State,
data = dataset)
summary(regressor)
regressor = lm(formula = Profit ~ R.D.Spend + Administration + Marketing.Spend,
data = dataset)
summary(regressor)
regressor = lm(formula = Profit ~ R.D.Spend + Marketing.Spend,
data = dataset)
summary(regressor)
regressor = lm(formula = Profit ~ R.D.Spend,
data = dataset)
summary(regressor)
y_pred = predict(regressor, newdata = test_set)
|
9d165c25f90dc844c4f00fc03b4c8ecf68c4307e
|
8340317041a7f6aded928bc61237c78d32e059ee
|
/exclude/data_creator.R
|
1cc5cfecc72947f8b41759dcd07fd41cf9b6fc74
|
[] |
no_license
|
msadatsafavi/txBenefit
|
c2b2051168db0e0b0ef4a6015136c60f7a4f6b30
|
7342099f8cadadb7090eb7557c330d04f27e0520
|
refs/heads/master
| 2020-12-02T23:35:00.883225
| 2020-02-01T00:57:58
| 2020-02-01T00:57:58
| 231,154,313
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 841
|
r
|
data_creator.R
|
rct_data_raw<-readRDS(file="M:\\Projects\\2018\\Project.RCTSubGroup\\Output\\1.StatMed\\R2\\Code&DataForReview\\macro_data.RDS")
n<-dim(rct_data_raw)[1]
time<-1.1-rexp(n,10)
time[which(time>1)]<-1
time[which(time<0.5)]<-0.5
rct_data_raw[,'time']<-time
rct_data_raw[,'tte']<-NA
for(i in 1:n)
{
message(i)
n_exac<-rct_data_raw[i,'outcome']
if(n_exac>0)
{
#browser()
fu<-rct_data_raw[i,'time']
while(TRUE)
{
exac_times<-rexp(n_exac,1)
tf<-sum(exac_times)
if(tf<fu) break
}
rct_data_raw[i,'tte']<-exac_times[1]
}
}
rct_data_raw[,'n_exac']<-rct_data_raw[,'outcome']
rct_data_raw[,'sgrq']<-rct_data_raw[,'sgrq100']
rct_data<-rct_data_raw[,c('tx','female','age','prev_hosp','prev_ster','fev1','sgrq','time','tte','n_exac')]
save(rct_data,file=paste0(getwd(),"/data/rct_data.RData"))
|
25909908de82abec45968600c9d2762c192b93a2
|
880a3d1e88a31a36cc76669ed4b7e70db5c02e54
|
/Admission Prediction Model.R
|
6ff3174456ed4bf588c060432ce4804fc615b5a2
|
[] |
no_license
|
Suggestions-Only/Logistic-Regression--Grad-School-Admit
|
a22cae23769277d7ff8c475ff2bc759d3fde9279
|
89f5e50874d92a449aca6af801f1bb317377d596
|
refs/heads/master
| 2022-07-25T12:11:57.628440
| 2020-05-14T05:08:54
| 2020-05-14T05:08:54
| 263,820,279
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 976
|
r
|
Admission Prediction Model.R
|
library(mlbench)
library(caret)
library(ModelMetrics)
#load in data, change admit to factor
mydata <- read.csv("https://stats.idre.ucla.edu/stat/data/binary.csv")
mydata$admit <- as.factor(mydata$admit)
#fit logistic regression model
admit.model <- glm(admit ~ gre + gpa + rank, family = binomial(link = "logit"),
data = mydata)
summary(admit.model)
#test logistic regression model fit
anova(admit.model, test = "Chisq")
#generate prediction on fake data
hannah <- data.frame(gre = 652, gpa = 3.72, rank = 1)
predict(admit.model, newdata = hannah, type = "response")
#generate predictions on existing data, specify cutoff, and assess prediction accuracy
admit.pred <- predict(admit.model, newdata = mydata, type = "response")
mydata$admit.pred <- admit.pred
mydata %>%
group_by(admit) %>%
summarise(med = median(admit.pred))
pred.admit <- ifelse(admit.pred > .379, 1, 0)
auc(actual = mydata$admit, predicted = pred.admit)
|
5b769718891dd9c39f2d0408ac23ec4cae7f162c
|
17120f21cf39f3e90cb5d8fea73dd18b8e1256db
|
/PatternCausality.R
|
9f11dbd05b177d2cf7fa0711a072d4e34d171dac
|
[] |
no_license
|
DingYinghui/pattern_causality
|
beafc5beac439118ae385b0e2a3601516572a764
|
d931c164d57735a752fb018ba1661ff2f8b85495
|
refs/heads/master
| 2022-03-27T22:28:08.633676
| 2020-01-21T18:52:46
| 2020-01-21T18:52:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,279
|
r
|
PatternCausality.R
|
patternCausality <- function(X,Y,E,tau,metric,h,weighted) {
###################################
### STEP 0: PREPARATORY ACTIONS ###
###################################
NNSPAN = E+1 # Former NN | Reserves a minimum number of nearest neighbors
CCSPAN = (E-1)*tau # This will remove the common coordinate NNs
hashedpatterns <- patternHashing(E)
#####################################
### STEP 1: THE SHADOW ATTRACTORS ###
#####################################
#= [A] =# State Space
Mx <- stateSpace(X,E,tau)
My <- stateSpace(Y,E,tau)
#= [B] =# Signature Space
SMx <- signatureSpace(Mx,E)
SMy <- signatureSpace(My,E)
#= [C] =# Pattern Space
PSMx <- patternSpace(SMx,E)
PSMy <- patternSpace(SMy,E)
#= [D] =# Distance Matrix | First row corresponds to t=1
Dx <- distanceMatrix(Mx,metric)
Dy <- distanceMatrix(My,metric)
#= Check whether time series length is sufficient
FCP <- firstCausalityPoint(E,tau,h,X)
#= Calculate the main loop duration of the algorithm
al_loop_dur <- FCP:(length(X)-(E-1)*tau-h)
#= Calculate the loop duration for out of sample forecasts
out_of_sample_loop_dur <- ((length(X)-(E-1)*tau-h)+1):nrow(Mx)
#= KEEPING THE PC MATRICES | Causality is considered only from FCP onwards
predictedPCMatrix <- dataBank(type = "array",dimensions=c(3^(E-1),3^(E-1),length(Y)))
#pb <- tkProgressBar(title = "Deploying PC Mk. II", min = 0,
# max = length(al_loop_dur), width = 500)
for(i in al_loop_dur) {
if (!anyNA(c(Mx[i,],My[i+h,]))) {
###################################################################
### STEP 2: The Nearest Neighbours and their Future projections ###
###################################################################
NNx <- pastNNsInfo(CCSPAN,NNSPAN,Mx,Dx,SMx,PSMx,i,h)
if (!anyNA(Dy[i,NNx$times+h])) {
projNNy <- projectedNNsInfo(My,Dy,SMy,PSMy,NNx$times,i,h)
#######################################################################
### STEP 3: The affected variable's predicted pattern h steps ahead ###
#######################################################################
predictedSignatureY <- predictionY(E,projNNy,zeroTolerance=E-1)$predictedSignatureY
predictedPatternY <- predictionY(E,projNNy,zeroTolerance=E-1)$predictedPatternY[1]
#############################################
### STEP 4: The causal variable's pattern ###
#############################################
####################signatureX <- signaE(E,SignX,i)
signatureX <- SMx[i,]
patternX <- PSMx[i,]
####################################################
### STEP 5: The affected variable's real pattern ###
####################################################
#######realSignatureY <- signaE(E,SignY,(i+h))
realSignatureY <- SMy[(i+h),]
realPatternY <- PSMy[i+h]
##########################################################################
### STEP 6: The nature and intensity of causality at every time step t ###
##########################################################################
pc <- fillPCMatrix(weighted,predictedPatternY,realPatternY,predictedSignatureY,realSignatureY,patternX,signatureX)
predictedPCMatrix[which(hashedpatterns==patternX),which(hashedpatterns==predictedPatternY),i] <- pc$predicted
}
}
#setTkProgressBar(pb, i, label=paste( i/al_loop_dur[length(al_loop_dur)], 0),"% PC Mk. II In-Sample Assignment Completion")
}
causality <- natureOfCausality(predictedPCMatrix,al_loop_dur,hashedpatterns,X)
totalCausPercent <- 1-mean(causality$noCausality[al_loop_dur],na.rm = T)
posiCausPercent <- mean(ifelse(causality$noCausality[al_loop_dur]!=1,causality$Positive[al_loop_dur],NA),na.rm = T)
negaCausPercent <- mean(ifelse(causality$noCausality[al_loop_dur]!=1,causality$Negative[al_loop_dur],NA),na.rm = T)
darkCausPercent <- mean(ifelse(causality$noCausality[al_loop_dur]!=1,causality$Dark[al_loop_dur],NA),na.rm = T)
#return(list(causality,totalCausPercent,posiCausPercent,negaCausPercent,darkCausPercent))
return(data.frame(total=totalCausPercent,positive=posiCausPercent,negative=negaCausPercent,dark=darkCausPercent))
}
|
3ac25ded95bb75f2235ca0178c5b03eb9a5439ea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GWEX/examples/simGwexModel.Rd.R
|
245dda607118bca21cee40df65a5003c0ecb0078
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,353
|
r
|
simGwexModel.Rd.R
|
library(GWEX)
### Name: simGwexModel
### Title: simGwexModel
### Aliases: simGwexModel
### ** Examples
# vector of dates
vecDates = seq(from=as.Date("01/01/2005",format="%d/%m/%Y"),
to=as.Date("31/12/2014",format="%d/%m/%Y"),by='day')
###############################################################
# FIT AND SIMULATE FROM THE PRECIPITATION MODEL
###############################################################
# Format observations: create a G-Wex object
myObsPrec = GwexObs(variable='Prec',date=vecDates,obs=dailyPrecipGWEX[,1:2])
# default options except for 'nChainFit'
list.options = list(nChainFit=1000)
# generate 2 scenarios for one year, using a existing 'GwexFit' object
mySimPrec = simGwexModel(objGwexFit=myParPrecGWEX, nb.rep=2, d.start=vecDates[1],
d.end=vecDates[365])
mySimPrec # print object
###############################################################
# FIT AND SIMULATE FROM THE TEMPERATURE MODEL, COND. TO PRECIPITATION
###############################################################
# Format observations: create a G-Wex object
myObsTemp = GwexObs(variable='Temp',date=vecDates,obs=dailyTemperGWEX)
# generate 2 scenarios for one year, using a existing 'GwexFit' object
mySimTemp = simGwexModel(objGwexFit=myParTempGWEX, nb.rep=2, d.start=vecDates[1],
d.end=vecDates[365])
mySimTemp # print object
|
96f3cb4355bc29465cf9f65e11b3cde61b6102bd
|
f80a6370ca97c230dc2dd84ec4d9c90f91c9d38f
|
/man/order_docs.Rd
|
36b994ff726a69efdd0a8399ea8d91140615d585
|
[] |
no_license
|
zhanglipku/rainette
|
936fea76a91b307618a9229e4be6fde89810a341
|
7229e7321dae120b9f13f709817fcd86bac83e02
|
refs/heads/master
| 2023-04-20T00:33:59.403453
| 2021-05-10T12:46:44
| 2021-05-10T12:46:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 504
|
rd
|
order_docs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rainette.R
\name{order_docs}
\alias{order_docs}
\title{return documents indices ordered by CA first axis coordinates}
\usage{
order_docs(m)
}
\arguments{
\item{m}{dtm on which to compute the CA and order documents, converted to an integer matrix.}
}
\value{
ordered list of document indices
}
\description{
return documents indices ordered by CA first axis coordinates
}
\details{
Internal function, not to be used directly
}
|
d8c98f5546026514eb0bd72ca21a9802ac8db387
|
a01a8e73ede70072aa7a71f41ac72821243cac60
|
/cachematrix.R
|
d4bd725d4226ba4376eb656c76e769cc60949b8a
|
[] |
no_license
|
giconi/ProgrammingAssignment2
|
5804d173bf11ad0914544244bb16820be4a493e6
|
80ce1c94045fa8a0686ab780fd90b8f0991a5dd7
|
refs/heads/master
| 2020-12-25T08:38:21.254879
| 2015-02-20T20:31:15
| 2015-02-20T20:31:15
| 30,846,719
| 0
| 0
| null | 2015-02-15T23:44:44
| 2015-02-15T23:44:44
| null |
UTF-8
|
R
| false
| false
| 2,559
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix - caches the inverse of a matrix
## get - returns the matrix
## set - sets env var x (i.e. the matrix)
## setInverse - sets env variable inv (i.e. the inverse)
## getInverse - returns contents of var inv
##
## cacheSolve - returns a matrix of the inverse of x
## - if the inverse has previously been cached, use the cached value
## caches the inverse of a matrix in the inv var is the primary goal of this function
makeCacheMatrix <- function(x = matrix()) {
# initial value for inv
inv <- NULL
set <- function(y) {
# set x env var as the matrix
x <<- y
# if the matrix changes we need to set the inverse to null
inv <<- NULL
}
# retuns the matrix
get <- function() x
# set the inv env var with the value of the passed var
setInverse <- function(inverse) inv <<- inverse
# return the inv env var
getInverse <- function() inv
list(set=set,
get=get,
setinverse=setInverse,
getinverse=getInverse)
}
## calculates the inverse of a matrix, if it has not previously been cached
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# call the getinverse function to get the value of the inverse
inv <- x$getinverse()
# it will be null if we have not calculated it
if(!is.null(inv)) {
# just a message to say we are using the cache
message("Inverse retrieved from cache.")
#return the inv var
return(inv)
}
# if we get this far, we need to calculate the inverse
message("Creating inverse for the first time.")
# get the matrix value
trix <- x$get()
# solve the matix value (inverse it)
inv <- solve(trix)
# set the inverse in the cache
x$setinverse(inv)
# return the inv
return(inv)
}
#
# Just some testing
#
#
# > t$set(matrix(c(4,2,3,1),2,2))
# > t$get()
# [,1] [,2]
# [1,] 4 3
# [2,] 2 1
# > cacheSolve(t)
# Creating inverse for the first time.
# [,1] [,2]
# [1,] -0.5 1.5
# [2,] 1.0 -2.0
# > cacheSolve(t)
# Inverse retrieved from cache.
# [,1] [,2]
# [1,] -0.5 1.5
# [2,] 1.0 -2.0
#
# Change the matrix
# > t$set(matrix(c(5,3,2,8),2,2))
# > t$get()
# [,1] [,2]
# [1,] 5 2
# [2,] 3 8
# > cacheSolve(t)
# Creating inverse for the first time.
# [,1] [,2]
# [1,] 0.23529412 -0.05882353
# [2,] -0.08823529 0.14705882
# > cacheSolve(t)
# Inverse retrieved from cache.
# [,1] [,2]
# [1,] 0.23529412 -0.05882353
# [2,] -0.08823529 0.14705882
#
#
|
4593dba19db3453ff29da7972ca913ceb1474ebd
|
e7c8d9ff9fa91af61dba9aafe952c0b111a6eaaf
|
/references/2. SVM.R
|
b4a4bb690d215f7d2465d7ed80d8c61222229ee9
|
[] |
no_license
|
devonwalshe/havamal
|
ed1672b934526f13f3e7120219f3267f3427297d
|
1a1ea234c041139f6030b9fe3571250f69eace32
|
refs/heads/master
| 2021-06-17T16:04:22.520283
| 2017-05-23T15:05:32
| 2017-05-23T15:05:32
| 59,772,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,073
|
r
|
2. SVM.R
|
### SVM model testing
### Data
cc = read.csv("./data/creditcard.csv")
mtsvr = mtcars[order(mtcars$mpg),]
mtsvr_factor = mtsvr
mtsvr_factor[,c("vs", "am", "cyl", "gear", "carb")] = lapply(mtsvr_factor[,c("vs", "am", "cyl", "gear", "carb")], as.factor)
mtsvr_norm = data.frame(scale(mtsvr))
library(e1071)
### Fit model
mtsvr_fit = svm(mpg ~ ., mtsvr)
mtsvr_factor_fit = svm(mpg ~ ., mtsvr_factor)
mtsvr_norm_fit = svm(mpg ~ ., mtsvr_norm)
### predict
mtsvr_pred = predict(mtsvr_fit, mtsvr)
mtsvr_factor_pred = predict(mtsvr_factor_fit, mtsvr_factor)
mtsvr_norm_pred = predict(mtsvr_norm_fit, mtsvr_norm)
### Check fitted model error
plot(mtsvr$mpg)
lines(mtsvr_pred, col="red", pch=4)
lines(mtsvr_factor_pred, col="blue", pch=19)
plot(mtsvr_norm$mpg)
lines(mtsvr_norm_pred, col="green", pch=3)
### inspect SVM parameters
sqrt(mean((mtsvr$mpg-mtsvr_pred)^2)) # 2.12 RMSE of the vanilla vit
sqrt(mean((scale(mtsvr$mpg)-scale(mtsvr_pred))^2)) # .32 - scaled vanilla fit - still better
sqrt(mean((mtsvr$mpg-mtsvr_factor_pred)^2)) # 2.30 RMSE of the factor fit
sqrt(mean((mtsvr_norm$mpg-mtsvr_norm_pred)^2)) # .35 RMSE of the scaled fit
### RMSE is worse than our multivariate linear model!
### Tune the model with grid-search
mtsvr_tuned = tune(svm, mpg ~ ., data=mtsvr, ranges = list(epsilon=seq(0,1,0.1), cost = 2^(2:9)))
plot(mtsvr_tuned, type='perspective', swapxy=TRUE)
mtsvr_tuned = tune(svm, mpg ~ ., data=mtsvr, ranges = list(epsilon=seq(0.3,0.6,0.01), cost = 2^(2:9)))
plot(mtsvr_tuned, type='perspective', swapxy=TRUE)
mtsvr_tuned = tune(svm, mpg ~ ., data=mtsvr, ranges = list(epsilon=seq(0.35,0.55,0.01), cost = 2^(2:9)))
plot(mtsvr_tuned, type='perspective', swapxy=TRUE)
### pick the final best model
mtsvr_fit =mtsvr_tuned$best.model
### new predictions
mtsvr_best_pred = predict(mtsvr_fit, mtsvr)
### Test the prediction
plot(mtsvr$mpg)
lines(mtsvr_best_pred, pch=4)
lines(mtsvr_pred, col="green", pch=4)
lines(mtsvr_factor_pred, col="blue", pch=4)
### Still not great
### What happens if we take out features?
corr = round(cor(mtsvr), 1) # Correlation matrix
corr_p = cor_pmat(mtsvr) # significance values for each one
cplot = ggcorrplot(corr, method='square', hc.order=TRUE , type="lower",outline.col='white', lab='true', p.mat=corr_p, insig='blank')
### clip features
mtsvr_clipped = mtsvr %>% select(wt, mpg)
### Tune model
mtsvr_clipped_tuned = tune(svm, mpg ~ ., data=mtsvr_clipped, ranges = list(epsilon=seq(0,1,0.01), cost = 2^(2:9)))
plot(mtsvr_clipped_tuned, type='perspective', swapxy=TRUE)
mtsvr_clipped_tuned = tune(svm, mpg ~ ., data=mtsvr_clipped, ranges = list(epsilon=seq(0.4,0.6,0.01), cost = 2^(2:9)))
plot(mtsvr_clipped_tuned, type='perspective', swapxy=TRUE)
### pick the final best model
mtsvr_best_fit2 =mtsvr_clipped_tuned$best.model
### new predictions
mtsvr_best_pred2 = predict(mtsvr_best_fit2, mtsvr)
### Test it out
plot(mtsvr$mpg)
lines(mtsvr_best_pred2, pch=4)
lines(mtsvr_pred, col="green", pch=4)
lines(mtsvr_factor_pred, col="blue", pch=4)
### Still not good - guess 32 training examples isn't good enough!
|
6dbcfd3093728402617a0a9bbb54d812ddae21ad
|
75f1f683951e8da74dc4e225df6e6b0bf47cd095
|
/code/2_sampling.R
|
d8330d0a1c6c7fde86a820ff057f1e1a41c6540b
|
[] |
no_license
|
MonikaKonarska/Binary-Classification-Models
|
d36f9c5480dd8b8ac9789040a219b801b0e6fa3e
|
1d256085063e332a8daa78f3662aa1cb538958eb
|
refs/heads/master
| 2020-05-18T03:01:11.208477
| 2020-01-06T11:29:55
| 2020-01-06T11:29:55
| 184,133,825
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
2_sampling.R
|
samplingTrainTestValid <- function() {
set.seed(1234)
load(file.path(dataPath, "dataToTranTestValid_cleaned.RData"))
dataToTranTestValid_cleaned$group <- sample(c("train", "test", "valid"),
size = nrow(dataToTranTestValid_cleaned),
replace = TRUE,
prob = c(0.6, 0.2, 0.2))
dataTrain <- dataToTranTestValid_cleaned %>% filter(group == "train")
dataTest <- dataToTranTestValid_cleaned %>% filter(group == "test") %>% filter(!is.na(tot_cur_bal) & !is.na(total_rev_hi_lim))
dataValid <- dataToTranTestValid_cleaned %>% filter(group == "valid")
save(dataTrain, dataTest, dataValid, file = file.path(dataPath, "dataToModeling.RData"))
}
|
27c5eabb690286460e0664e70f7e91c004156acb
|
5c2dcf913088ef4671fa2bd07f9fbcd4ad564e71
|
/tests/testthat/test-ops.R
|
a6b1e81a48f6bd225dfa6da7df33fd3a152b41c7
|
[] |
no_license
|
GRSEB9S/errors
|
9c93724ddbcb2056eede1ac65f1242076afcc290
|
68d28a5dab9c69065d0d7a7f6adaa1eac7586304
|
refs/heads/master
| 2021-08-22T17:02:45.003708
| 2017-11-30T18:27:04
| 2017-11-30T18:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,552
|
r
|
test-ops.R
|
context("ops")
detach("package:errors", unload = TRUE)
library(errors)
test_that("bolean ops throw a warning once", {
xval <- 1
x <- set_errors(xval, 1)
expect_warning(expect_equal(!x, !xval))
expect_silent(expect_equal(!x, !xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x & x, xval & xval))
expect_silent(expect_equal(x & x, xval & xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x | x, xval | xval))
expect_silent(expect_equal(x | x, xval | xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x == x, xval == xval))
expect_silent(expect_equal(x == x, xval == xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x != x, xval != xval))
expect_silent(expect_equal(x != x, xval != xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x < x, xval < xval))
expect_silent(expect_equal(x < x, xval < xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x > x, xval > xval))
expect_silent(expect_equal(x > x, xval > xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x <= x, xval <= xval))
expect_silent(expect_equal(x <= x, xval <= xval))
options(errors.warn.bool = TRUE)
expect_warning(expect_equal(x >= x, xval >= xval))
expect_silent(expect_equal(x >= x, xval >= xval))
})
test_that("ops with numerics throw a warning", {
x <- set_errors(1, 1)
expect_warning(1 + x)
expect_silent(1 + x)
options(errors.warn.coercion = TRUE)
expect_warning(x + 1)
expect_silent(x + 1)
})
test_that("ops work properly", {
xval <- -4.1:5.1
xerr <- seq(0.005, 0.05, 0.005)
x <- set_errors(xval, xerr)
expect_equal(+x, x)
expect_equal(x + set_errors(1), set_errors(xval + 1, xerr))
expect_equal(as.numeric(x + x), as.numeric(set_errors(2) * x))
expect_true(all(errors(x + x) < errors(set_errors(2) * x)))
expect_equal(-x, set_errors(-xval, xerr))
expect_equal(x - set_errors(1), set_errors(xval - 1, xerr))
expect_equal(set_errors(2) * x, set_errors(2 * xval, 2 * xerr))
expect_equal(as.numeric(x * x), as.numeric(x^set_errors(2)))
expect_true(all(errors(x * x) < errors(x^set_errors(2))))
expect_equal(x / set_errors(2), set_errors(xval / 2, xerr / 2))
expect_equal(as.numeric(x / x), rep(1, length(x)))
expect_true(all(errors(x / x) < 1))
expect_true(all(errors(x / x) > 0))
expect_equal(as.numeric(x^x), xval^xval)
expect_equal(x %/% set_errors(3), set_errors(floor(xval/3), xerr/3))
expect_warning(expect_equal(x %*% x, xval %*% xval))
})
|
4f618abc4f74c3b774b89fc82695fe2393bab7b2
|
a5093ed082403fd13679a5be7635113d280f7824
|
/ggparallel.R
|
70f483919db20bb3578f7d105f70164a186a2170
|
[] |
no_license
|
jironghuang/ggparallel_test
|
b65996c62d6ce1431725d7ad32e1f0d06a16697d
|
1f8cbad8904b41687fb405a6bcc5ac0712ae0f8d
|
refs/heads/master
| 2020-04-19T11:57:34.353499
| 2019-01-29T15:57:24
| 2019-01-29T15:57:24
| 168,180,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
ggparallel.R
|
library("triangle")
set.seed(0)
q1_d1 <- round(rtriangle(1000, 1, 7, 5))
q1_d2 <- round(rtriangle(1000, 1, 7, 6))
q1_d3 <- round(rtriangle(1000, 1, 7, 2))
df <- data.frame(q1_d1 = factor(q1_d1),
q1_d2 = factor(q1_d2),
q1_d3 = factor(q1_d3))
library(dplyr)
# group by combinations and count
df_grouped <- df %>% group_by(q1_d1, q1_d2, q1_d3) %>% count()
# set an "id" string that denotes the value combination
df_grouped <- df_grouped %>%
mutate(id = factor(paste(q1_d1, q1_d2, q1_d3, sep = '-')))
# sort by count and select top rows
df_grouped <- (df_grouped %>% arrange(desc(n)))[1:10,]
library(GGally)
ggparcoord(df_grouped, columns = 1:3, groupColumn = 'id', scale = 'globalminmax')
library("ggparallel")
ggparallel(list('q1_d1', 'q1_d2', 'q1_d3'), df, order = 0)
df_pcp <- as.data.frame(df_grouped) # this is important!
ggparallel(list('q1_d1', 'q1_d2', 'q1_d3'), df_pcp, weight = 'n', order = 0)
|
a2b906b0e591eedd06113012c2fa8fa014f70f97
|
9136f3f10181bff473701a43697c6961415c4e6d
|
/man/get_instruments.Rd
|
52664e274acd8309cbbb4276eafdef0f0e324911
|
[] |
no_license
|
MRCIEU/mrever
|
c51ccb3de431822a9b16edf0a1817ec1be056f18
|
497aa311b03441d0fe3df832217ab9abd15e0063
|
refs/heads/master
| 2022-12-21T07:39:39.327660
| 2022-12-12T13:29:18
| 2022-12-12T13:29:18
| 172,141,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 371
|
rd
|
get_instruments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.r
\name{get_instruments}
\alias{get_instruments}
\title{<brief desc>}
\usage{
get_instruments(id, graph = options()$eveneo4jg)
}
\arguments{
\item{graph}{= options()$eveneo4jg <what param does>}
\item{id=NULL}{<what param does>}
}
\value{
dataframe
}
\description{
<full description>
}
|
270878228c6344025ded8f1de537cf57317a863e
|
3a4c8fc4e09edb9be762949b266192fb4abbf22e
|
/app.R
|
bc92ffb01987ddba75ef904b8f6d6c05ed4dc276
|
[] |
no_license
|
kreitmew/mpp
|
4a195f0fa92f9ba9172336d80859a7edbcc8cd01
|
a5e8313c559dbe91dc741d78a954a46b48121b8d
|
refs/heads/master
| 2021-05-08T15:04:41.111065
| 2018-03-02T18:44:15
| 2018-03-02T18:44:15
| 120,103,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,952
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(plotrix)
library(rlist)
library(xlsx)
library(markdown)
library(rmarkdown)
library(grDevices)
library(rgl)
library(htmlwidgets)
library(GenSA)
library(shinyjs)
library(ggplot2)
library(plotly)
library(Bessel)
options(shiny.sanitize.errors = FALSE)
source('air_parameters.R')
source('init_values.R')
source('acoustic_models.R')
source('ui_routines.R')
source("panel_thickness.R", local = TRUE)
source("panel_radius.R", local = TRUE)
source("panel_porosity.R", local = TRUE)
source("panel_cavern.R", local = TRUE)
source("borders_optim.R", local = TRUE)
source("ui_tab_basic_values.R", local = TRUE)
source("ui_tab_panel_mpp.R", local = TRUE)
ui <- dashboardPage(
dashboardHeader(title = "Absorptionskennlinien von Mehrschichtabsorptionselementen",
titleWidth = 600),
dashboardSidebar(
sidebarMenu(id = "sbmenu",
menuItem("Umgebungsparameter", tabName = "env_param", icon = icon("thermometer-half")),
menuItem("Lochplatte - Luft - 3 Schichten", tabName = "mpp_cavern",
icon = icon("volume-up"))
)
),
body <- dashboardBody(
tabItems(
tabItem(tabName = "env_param",
h2("Definition von Umgebungsparametern"),
fluidPage( uiTabBasicValues() ) ),
tabItem(tabName = "mpp_cavern",
fluidPage( uiTabPanelMPP() ) )
)
)
)
server <- function(input, output, session) {
source("ui_mpp_validation.R", local = TRUE)
observeEvent(input$sbmenu, source("update_basic_values.R", local = TRUE))
observeEvent(input$goButton, source("update_basic_values.R", local = TRUE))
observeEvent(input$resetSA, isolate(source("reset_search_space.R", local = TRUE)))
observeEvent(input$docuMPP,
showModal(modalDialog(
title = "",
includeMarkdown("docu_mpp.md"),
easyClose = TRUE,
footer = NULL,
size = "l"
))
)
observeEvent(input$docuModels,
showModal(modalDialog(
title = "",
includeMarkdown("docu_models.md"),
easyClose = TRUE,
footer = NULL,
size = "l"
))
)
observeEvent(input$resetButton, {
updateNumericInput(session, "temp", value = Tc)
updateNumericInput(session, "press", value = pda)
updateNumericInput(session, "humid", value = rh * 100)
})
observeEvent(input$resetMPPButton, source("reset_mpp_values.R", local = TRUE))
output$plotMPPGeometry <- renderPlot(source("plot_mpp_values.R", local = TRUE))
output$plotMPPHoles <- renderPlot( source("plot_mpp_holes.R", local = TRUE))
output$plotMPPAbsorber <- renderPlot(source("plot_absorption.R", local = TRUE))
source("plot_models.R", local = TRUE)
output$downloadMPPButton <- downloadHandler(
filename = function() {
paste("mpp_data", ".xlsx", sep = "")
},
content = function(file) {
source("download_excel.R", local = TRUE)
}
)
output$downloadMPPcharts <- downloadHandler(
filename = function() {
paste("mpp_charts", ".pdf", sep = "")
},
content = function(file) {
tempReport <- file.path(tempdir(), "mpp_charts.Rmd")
file.copy("mpp_charts.Rmd", tempReport, overwrite = TRUE)
params <- list(date = paste(" ", Sys.time(), sep=""),
absorption = list(g_chart_values, g_chart_holes, g_chart_absorption))
rmarkdown::render(tempReport, output_file = file,
params = params,
envir = new.env(parent = globalenv()))
}
)
observeEvent(input$tabs, { isolate( source("tab_switch.R", local = TRUE) ) })
observeEvent(input$startSA, { source("sa_routine.R", local = TRUE) })
}
shinyApp(ui, server)
|
418f9fa10deafcf11f6e91e8d9eb7791dd83bdf4
|
de76bcdc1b54d52d8a9308cd4ae34290adb3754c
|
/inst/examples/Shakespeare_data.R
|
21f1b8fd824a0996cf889a0fbfcffbc8c968ab15
|
[] |
no_license
|
COMHIS/estc
|
d0e3f249e23d76140f50cc5040e067cc2207a615
|
44a1befe88e4cfe2909d40364a246d946485e7cc
|
refs/heads/master
| 2022-05-01T05:46:41.347913
| 2022-04-02T11:27:48
| 2022-04-02T11:27:48
| 107,229,188
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 665
|
r
|
Shakespeare_data.R
|
# Pick info sheet for Shakespeare & Cervantes
my.authors <- c("Shakespeare, William (1564-1616)", "Cervantes Saavedra, Miguel De (1547-1616)")
for (my.author in my.authors) {
df <- df.preprocessed %>%
dplyr::filter(author %in% my.author) %>%
dplyr::select(estc_control_number = control_number, title, publication_year, language,
publication_place, country, publisher) %>%
dplyr::arrange(estc_control_number, title, publication_year, language,
publication_place, country, publisher)
write.table(df, file = paste("~/tmp/", gsub(" ", "_", my.author), ".csv", sep = ""), quote = F, row.names = F, sep = "|")
}
|
7e4d029b2331ffe8017be47c6e43d72026393f4b
|
8d74e1620f99fea978e9b2e1473eebec8af1785f
|
/1 - Basic R/scripts/7_plots.R
|
16776a4b91e7d6eaf949db56478f82e5208cd0a7
|
[] |
no_license
|
anhnguyendepocen/workshop-R
|
bf18292a348b3bc51c51ab127b8b7d849a4e8bfc
|
0c44940475817d57e56087e55e90a6a3dfae8258
|
refs/heads/master
| 2022-01-08T19:02:22.953694
| 2019-06-06T11:27:39
| 2019-06-06T11:27:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,999
|
r
|
7_plots.R
|
## ----eval=FALSE----------------------------------------------------------
plot(log(subs), log(citeprice), data = Journals)
rug(log(Journals$subs))
rug(log(Journals$citeprice), side = 2)
## ----eval=FALSE----------------------------------------------------------
plot(log(citeprice) ~ log(subs), data = Journals, pch = 19,
col = "blue", xlim = c(0, 8), ylim = c(-7, 4),
main = "Library subscriptions")
rug(log(Journals$subs))
rug(log(Journals$citeprice), side=2)
## ----out.width='45%', fig.align="center"---------------------------------
curve(dnorm, from = -5, to = 5, col = "red", lwd = 3,
main = "Density of the standard normal distribution")
## ----eval=FALSE----------------------------------------------------------
library(ggplot2)
ggplot(data = mpg)
ggplot(mpg)
## ----comment = '', tidy=TRUE, message=FALSE, out.width='50%', fig.align="center"----
library(ggplot2)
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy))
## ----comment = '', tidy=TRUE, message=FALSE, out.width='60%', fig.align="center"----
ggplot(data = mpg) +
geom_point(aes(x = displ, y = hwy,
color = class))
## ----eval=FALSE----------------------------------------------------------
ggplot(mpg) + geom_point(aes(x = displ, y = hwy, color = class))
## ----eval=FALSE----------------------------------------------------------
ggplot(mpg) + geom_point(aes(x = displ, y = hwy, color = "blue"))
## ----eval=FALSE----------------------------------------------------------
ggplot(mpg) + geom_point(aes(x = displ, y = hwy), color = "blue") + theme_bw()
## ----eval=FALSE----------------------------------------------------------
ggplot(mpg) + geom_point(mapping = aes(x = class, y = hwy))
## ----eval=FALSE----------------------------------------------------------
ggplot(data = mpg) +
geom_boxplot(mapping = aes(x = class, y = hwy))
## ----eval=FALSE----------------------------------------------------------
ggplot(data = mpg) +
geom_histogram(mapping = aes(x = hwy))
## ----eval=FALSE----------------------------------------------------------
ggplot(data = mpg) +
geom_density(mapping = aes(x = hwy))
## ----eval=FALSE----------------------------------------------------------
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy)) +
geom_smooth(mapping = aes(x = displ, y = hwy))
## ----message=FALSE, out.width='45%', fig.align="center"------------------
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point() +
geom_smooth() + theme_bw() # adjust theme
## ----message=FALSE, out.width='45%', fig.align="center"------------------
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth() + theme_bw()
## ----message=FALSE, out.width='45%', fig.align="center"------------------
library(dplyr)
ggplot(data = mpg, mapping = aes(x = displ, y = hwy)) +
geom_point(mapping = aes(color = drv)) +
geom_smooth(data = filter(mpg, drv == "f")) + theme_bw()
|
0277db73c1cd43b042b8d3a77ad34a1550329aa1
|
882f43bcf93a8b35864dbcd20fdc1c4d2690106d
|
/run_analysis.R
|
ea7b588d815d2c11cdba59f78b8ef6ce754bc7a4
|
[] |
no_license
|
andreacrescini/GettingandCleaningData
|
8dd273ad5082caed41432e3bef9950c1a0a3e5a8
|
845a69e4f98ebfdf780898ec87585c7fdae3fb6f
|
refs/heads/master
| 2016-09-03T06:41:11.350731
| 2015-05-20T11:31:33
| 2015-05-20T11:31:33
| 35,942,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,786
|
r
|
run_analysis.R
|
if (!getwd() == "./temp_temp") {
dir.create("./temp_temp")
}
library(plyr)
library(data.table)
library(dplyr)
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",temp)
unzip(temp, list = TRUE) #This provides the list of variables and I choose the ones that are applicable for this data set
YTest <- read.table(unzip(temp, "UCI HAR Dataset/test/y_test.txt"))
XTest <- read.table(unzip(temp, "UCI HAR Dataset/test/X_test.txt"))
SubjectTest <- read.table(unzip(temp, "UCI HAR Dataset/test/subject_test.txt"))
YTrain <- read.table(unzip(temp, "UCI HAR Dataset/train/y_train.txt"))
XTrain <- read.table(unzip(temp, "UCI HAR Dataset/train/X_train.txt"))
SubjectTrain <- read.table(unzip(temp, "UCI HAR Dataset/train/subject_train.txt"))
Features <- read.table(unzip(temp, "UCI HAR Dataset/features.txt"))
unlink(temp)
#Concatenate the data tables by rows
#Give the name to the column of XTrain and test
colnames(XTrain) <- t(Features[2])
colnames(XTest) <- t(Features[2])
## Add the colum activities (whici is YTrain or YTest)
## and participants (which is SubjectTrain or SubjectTest) to X Train & Test from
XTrain$activities <- YTrain[, 1]
XTrain$participants <- SubjectTrain[, 1]
XTest$activities <- YTest[, 1]
XTest$participants <- SubjectTest[, 1]
#Q1 Merges the training and the test sets to create one data set.
Merged <- rbind(XTrain, XTest)
#Q2 Extracts only the measurements on the mean and standard deviation for each measurement.
pick_mean_std_Features<-Features$V2[grep("mean\\(\\)|std\\(\\)", Features$V2)]
as.char_pick_mean_std_Features <- as.character(pick_mean_std_Features)
as.char_pick_mean_std_Features <- c(as.char_pick_mean_std_Features,"activities","participants")
Sub_Merged_MeanStd <- subset(Merged, select =as.char_pick_mean_std_Features)
head(Sub_Merged_MeanStd,1)
##Another way
Pick_Mean <- grep("mean()", names(Merged), value = FALSE, fixed = TRUE)
Pick_Std <- grep("std()", names(Merged), value = FALSE, fixed = TRUE)
Sub_Merged_Mean2 <- Merged[Pick_Mean]
Sub_Merged_Std2z <-Merged[Pick_Std]
#Q3 Uses descriptive activity names to name the activities in the data set
Merged$activities <- as.character(Merged$activities) #change the class to replace strings
Merged$activities[Merged$activities == 1] <- "Walking dead"
Merged$activities[Merged$activities == 2] <- "Walking dead Upstairs"
Merged$activities[Merged$activities == 3] <- "Walking dead Downstairs"
Merged$activities[Merged$activities == 4] <- "Sitting doing nothing"
Merged$activities[Merged$activities == 5] <- "Standing like a stupid"
Merged$activities[Merged$activities == 6] <- "Laying like a lazyboy"
Sub_Merged_MeanStd$activities<- as.character(Sub_Merged_MeanStd$activities) #change the class to replace strings
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 1] <- "Walking dead"
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 2] <- "Walking dead Upstairs"
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 3] <- "Walking dead Downstairs"
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 4] <- "Sitting doing nothing"
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 5] <- "Standing like a stupid"
Sub_Merged_MeanStd$activities[Sub_Merged_MeanStd$activities == 6] <- "Laying like a lazyboy"
#Q4 Appropriately labels the data set with descriptive variable names.
# let's do just: Accelerator, magnitude Gyroscope time and frequenzy
names(Merged) <- gsub("Acc", "Accelerator", names(Merged))
names(Merged) <- gsub("Mag", "Magnitude", names(Merged))
names(Merged) <- gsub("Gyro", "Gyroscope", names(Merged))
names(Merged) <- gsub("^t", "time", names(Merged))
names(Merged) <- gsub("^f", "frequency", names(Merged))
names(Sub_Merged_MeanStd) <- gsub("Acc", "Accelerator", names(Sub_Merged_MeanStd))
names(Sub_Merged_MeanStd) <- gsub("Mag", "Magnitude", names(Sub_Merged_MeanStd))
names(Sub_Merged_MeanStd) <- gsub("Gyro", "Gyroscope", names(Sub_Merged_MeanStd))
names(Sub_Merged_MeanStd) <- gsub("^t", "time", names(Sub_Merged_MeanStd))
names(Sub_Merged_MeanStd) <- gsub("^f", "frequency", names(Sub_Merged_MeanStd))
#Q5 From the data set in step 4, creates a second, independent tidy data set with
#the average of each variable for each activity and each subject
duplicated(colnames(Merged))
Merged <- Merged[, !duplicated(colnames(Merged))]
duplicated(colnames(Sub_Merged_MeanStd))
Sub_Merged_MeanStd <- Sub_Merged_MeanStd[, !duplicated(colnames(Sub_Merged_MeanStd))]
final<-aggregate(. ~participants + activities, Sub_Merged_MeanStd, mean)
final<-final[order(final$participants,final$activities),]
write.table(final, "c:\\Users\\Andrea\\Desktop\\coursera\\Get_cleaning_data\\Project_2\\final_with_Arg.txt", row.name=FALSE)
|
faf42e720736c6d7958d67f74099d287a4761193
|
fcb7e4a6ed15054450abf708e044b1cb45b428f0
|
/R/lds.R
|
e34d87e343377ce5a012e8aef89435e6ea74f9ad
|
[] |
no_license
|
eshilts/handysmurf
|
c4dd7071490c05d3c2386924d70871811ad450d1
|
952e9fefae256678eefc3be3722a20ef53ce9fae
|
refs/heads/master
| 2020-04-26T12:29:21.333818
| 2012-06-13T15:58:18
| 2012-06-13T15:58:18
| 3,317,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
lds.R
|
#' Easy Dubug Logging
#' Streamlined debug logging using sprintf. Unfortunately,
#' this function is dependent on the logging package and setting
#' the log level to debug.
#'
#' @param \dots arguments for logging using \code{sprintf}
#' @author Erik Shilts
#' @note Part of a suite of Opower packages
#' @export
lds <- function(...)
logdebug(sprintf(...))
|
509e78f1d5c6701fc028265c7799fbd53aadd225
|
58e8b007f00ff8868b53772844f3b0888b6f2876
|
/tests/testthat/test-kin.R
|
e4d4f120b8dccee871884fdfa813caa3f9b1387b
|
[] |
no_license
|
ckenaley/trackter
|
650989f740faefa715c4d322082b07eac9b70f99
|
a1e8d02bd921814ea71d1f6257fe0bfc3da619cc
|
refs/heads/master
| 2022-02-08T16:51:52.706637
| 2022-02-07T21:52:21
| 2022-02-07T21:52:21
| 120,318,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,049
|
r
|
test-kin.R
|
context("kin functions")
test_that("kin.simple works fine", {
y <- EBImage::readImage(system.file("extdata/img", "sunfish_BCF.jpg", package = "trackter"))
t <-tempdir()
ti <-paste0(tempdir(),"/images")
tp <- paste0(tempdir(),"/processed_images")
dir.create(ti)
dir.create(tp)
EBImage::writeImage(y,paste0(ti,"/sunfish001.jpg"),type = "jpeg")
invisible(capture.output( kin.y <- kin.simple(image.dir = ti,save = TRUE,out.dir =tp)))
expect_length(list.files(tp),1)
expect_is(kin.y,"list")
expect_named(kin.y,c("kin.dat", "midline","cont","cont.sm","mid.pred","all.classes","dim"))
expect_true(kin.y$kin.dat$head.pval!=0)
expect_type(kin.y$midline$roi,type = "character")
expect_type(kin.y$cont$x,type = "integer")
expect_true(kin.y$all.classes$size>0)
expect_error(invisible(capture.output( kin.y <- kin.simple(image.dir = ti,save = TRUE))),"'out.dir' not specified")
dir.create(paste0(t,"/test_images2"))
expect_error(invisible(capture.output( kin.simple(image.dir = paste0(t,"/test_images2"),save=FALSE))),"no images in image.dir")
unlink(paste0(t,"/test_images2"),recursive = TRUE)
expect_error(invisible(capture.output( kin.simple(image.dir = "foo",out.dir=tp,save=TRUE))),"does not exist")
expect_error(invisible(capture.output( kin.simple(image.dir = ti,out.dir="foo",save=TRUE))),"does not exist")
expect_error(invisible(capture.output(kin.simple(image.dir =ti ,save=TRUE))),"not specified")
expect_error(invisible(capture.output( kin.simple(image.dir =ti,frames=2,save=FALSE))),"out of range")
expect_error(invisible(capture.output( kin.simple(image.dir =ti ,ml.smooth=list(0.5,"foo"),save=FALSE))),"'ml.smooth' must")
unlink(ti,recursive = TRUE)
unlink(tp,recursive = TRUE)
})
test_that("kin.search works fine", {
y <- EBImage::readImage(system.file("extdata/img", "sunfish_BCF.jpg", package = "trackter"))
t <-tempdir()
ti <-paste0(tempdir(),"/images")
tp <- paste0(tempdir(),"/processed_images")
dir.create(ti)
dir.create(tp)
EBImage::writeImage(y,paste0(ti,"/sunfish001.jpg"),type = "jpeg")
invisible(capture.output( kin.y <- kin.search(image.dir =ti,save = TRUE,out.dir =tp)))
expect_length(list.files(tp),1)
expect_is(kin.y,"list")
expect_named(kin.y,c("kin.dat", "midline","cont","cont.sm","mid.pred","all.classes","dim"))
expect_true(kin.y$all.classes$size>0)
expect_type(kin.y$midline$roi,type = "character")
expect_type(kin.y$cont$x,type = "integer")
expect_error(invisible(capture.output( kin.y <- kin.search(image.dir = ti,save = TRUE))),"'out.dir' not specified")
expect_error(invisible(capture.output( kin.search(image.dir = "foo",out.dir=tp,save=TRUE))),"does not exist")
expect_error(invisible(capture.output( kin.search(image.dir = ti,out.dir="foo",save=TRUE))),"does not exist")
expect_error(invisible(capture.output(kin.search(image.dir =ti ,save=TRUE))),"not specified")
expect_error(invisible(capture.output( kin.search(image.dir = ti,frames=2,save=FALSE))),"out of range")
expect_error(invisible(capture.output( kin.search(image.dir =ti , thr="foo",save=FALSE))),"must be set to")
expect_error(invisible(capture.output( kin.search(image.dir =ti ,ml.smooth=list("foo",0.6),save=FALSE))),"'ml.smooth' must")
expect_error(invisible(capture.output( kin.search(image.dir =ti ,search.for="foo",save=FALSE))),"must be set to")
expect_error(invisible(capture.output( kin.search(image.dir =ti,save=TRUE,out.qual=1.1,paste0(t,"/test_images")))),"'out.qual' must be >=0 and <=1")
dir.create(paste0(t,"/test_images2"))
expect_error(invisible(capture.output( kin.search(image.dir = paste0(t,"/test_images2"),save=FALSE))),"no images in image.dir")
unlink(paste0(t,"/test_images2"),recursive = TRUE)
unlink(ti,recursive = TRUE)
unlink(tp,recursive = TRUE)
})
test_that("kin.free works fine", {
y <- list.files(system.file("extdata/img", package = "trackter"),full.names = TRUE)
y <- y[grepl("lamp",y)]
ti <-paste0(tempdir(),"/images")
tp <- paste0(tempdir(),"/processed_images")
dir.create(ti)
dir.create(tp)
file.copy(y,paste0(ti,"/",basename(y)))
invisible(capture.output( kin.y <- kin.free(image.dir = ti,save = TRUE,out.dir =tp,red=0.5)))
expect_length(list.files(tp),2)
expect_length(kin.y$all.classes$size,2)
expect_is(kin.y,"list")
expect_named(kin.y,c('kin.dat', 'midline', 'cont', 'cont.sm', 'all.classes', 'mid.pred', 'dim'))
expect_true(kin.y$all.classes$size[1]>0)
expect_type(kin.y$midline$roi,type = "character")
expect_type(kin.y$cont$x,type = "integer")
expect_error(invisible(capture.output( kin.free(image.dir = ti,out.dir=tp,save = FALSE))),"To save processed images")
expect_error(invisible(capture.output( kin.free(image.dir ="foo",out.dir=tp,save=TRUE))),"does not exist")
expect_error(invisible(capture.output( kin.free(image.dir = ti,out.dir="foo",save=TRUE))),"does not exist")
expect_error(invisible(capture.output(kin.free(image.dir =ti,save=TRUE))),"not specified")
expect_error(invisible(capture.output( kin.free(image.dir = ti,frames=1:3,save=FALSE))),"out of range")
expect_error(invisible(capture.output( kin.free(image.dir = ti,frames=1,save=FALSE))),"number of frames must be")
expect_error(invisible(capture.output( kin.free(image.dir =ti , thr="foo",save=FALSE))),"must be set to")
expect_error(invisible(capture.output( kin.free(image.dir =ti,ml.smooth = list(1,"foo"),save=FALSE))),"must contain 'loess'")
expect_error(invisible(capture.output( kin.free(image.dir =ti,ml.smooth = list(1,2),save=FALSE))),"'ml.smooth' must be a list of length 2")
expect_error(invisible(capture.output( kin.free(image.dir =ti ,search.for="foo",save=FALSE))),"must be set to")
expect_error(invisible(capture.output( kin.free(image.dir=ti,save=TRUE,out.qual=1.1,out.dir=tp))),"'out.qual' must be >=0 and <=1")
expect_error(invisible(capture.output( kin.free(image.dir=ti,save=FALSE,ml.smooth=list("spline",1.1)))),"'smooth' must <1")
chk <- Sys.getenv("_R_CHECK_LIMIT_CORES_", "")
.Platform$OS.type
if (nzchar(chk) && chk == "TRUE") {
# use 2 cores in CRAN/Travis/AppVeyor
cor.n <- 2L
} else {
# use all cores in devtools::test()
cor.n <- parallel::detectCores()
}
n.cor <- parallel::detectCores()
if (.Platform$OS.type == "windows" && n.cor > 1) {
skip("mc.cores > 1 is not supported on Windows.")
}
invisible(capture.output( kin.yp <- kin.free(image.dir = ti,save = TRUE,out.dir =tp,red=0.5,par=TRUE,cores.n = cor.n)))
expect_identical(kin.y,kin.yp)
dir.create(paste0(tempdir(),"/test_images2"))
expect_error(invisible(capture.output( kin.free(image.dir = paste0(tempdir(),"/test_images2"),save=FALSE))),"no images in image.dir")
unlink(paste0(tempdir(),"/test_images2"),recursive = TRUE)
unlink(ti,recursive = TRUE)
unlink(tp,recursive = TRUE)
})
test_that("fin.kin works fine", {
y <- EBImage::readImage(system.file("extdata/img", "sunfish_BCF.jpg", package = "trackter"))
t <-tempdir()
ti <-paste0(tempdir(),"/images")
tp <- paste0(tempdir(),"/processed_images")
dir.create(ti)
dir.create(tp)
EBImage::writeImage(y,paste0(ti,"/sunfish001.jpg"),type = "jpeg")
invisible(capture.output( kin.y <- kin.simple(image.dir = ti,save = TRUE,out.dir =tp)))
fin.pos <- c(0.2,0.55)
fin.y <- fin.kin(kin=kin.y,fin.pos = fin.pos,smooth.n=1,ml.smooth=0.3)
expect_is(fin.y,"list")
expect_named(fin.y,c("cont","fin","fin.pts","comp","midline","amp","bl"))
expect_type(fin.y$cont$y,type = "double")
expect_type(fin.y$comp$y,type = "double")
expect_type(fin.y$fin.pts$y,type = "double")
expect_type(fin.y$fin$y,type = "double")
expect_type(fin.y$bl$bl,type = "double")
expect_type(fin.y$amp$amp2[1],type = "double")
expect_error(fin.kin(kin.y$cont))
expect_error(fin.kin(kin.y,fin.pos=0.1))
expect_error(fin.kin(kin.y,fin.pos=NULL))
})
|
dc52dafa96cd9fd664125a9eae7d4f233e619371
|
09269ff66e0ce0874858425d7ca5f3b8a72a889f
|
/inst/shiny_interfaces/agro/server.R
|
5ce9d1a95383ff306b9f8d93ff60becf5405dae6
|
[] |
no_license
|
gaelleVF/PPBstats-PPBmelange
|
13bae3ee4646036ae46a7862b3611eca809802e4
|
0a68aa3fd37e12210e8a75e577cdec8b93a62797
|
refs/heads/master
| 2023-05-10T23:33:10.101992
| 2021-05-21T15:16:01
| 2021-05-21T15:16:01
| 371,670,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
r
|
server.R
|
# PPBstats_interface
# Agro server
server <- function(input, output) {
data("data_GxE")
data_GxE = format_data_PPBstats(data_GxE, type = "data_agro")
# Design experiment ----------
# out_design_experiment = reactive(
# design_experiment(
# expe.type = input$design_experiment_expe.type,
# location = input$design_experiment_location,
# year = input$design_experiment_year,
# germplasm = input$design_experiment_germplasm,
# controls = input$design_experiment_controls,
# nb.controls.per.block = input$design_experiment_nb.controls.per.block,
# nb.blocks = input$design_experiment_nb.blocks,
# nb.cols = input$design_experiment_nb.cols,
# return.format = input$design_experiment_return.format
# )
# )
#
# output$design_experiment_plot = renderPlot({out_design_experiment$design})
# output$design_experiment_data = renderPlot({out_design_experiment$data.frame})
#
# Describe data ----------
output$describe_data_plot <- renderPlot({
describe_data(
data = data_GxE,
plot_type = input$describe_data_plot_type,
x_axis = input$describe_data_x_axis,
in_col = input$describe_data_in_col,
vec_variables = input$describe_data_vec_variables,
nb_parameters_per_plot_x_axis = input$describe_data_nb_parameters_per_plot_x_axis,
nb_parameters_per_plot_in_col = input$describe_data_nb_parameters_per_plot_in_col,
labels_on = input$describe_data_labels_on,
labels_size = input$describe_data_labels_size
)
})
# AMMI GGE ----------
# data_GxE = format_data_PPBstats(data_GxE, type = "data_agro")
}
|
34a1f47888e86a500cab6a42a67859b4ca306448
|
9fe1127bad83f9181c9148190953cde2c31985cc
|
/R/transitorio/transitorio.R
|
3b76a32790e6fd5ae60c72bf3bdd81b07fee8495
|
[] |
no_license
|
bendeivide/statscience
|
6ab66d101848d08d3bd411ffa4f9918005a9f4dc
|
b934b5b38f27b113d698ec950b4cafa366ac984d
|
refs/heads/main
| 2023-01-21T21:45:08.504973
| 2020-11-27T07:15:32
| 2020-11-27T07:15:32
| 316,344,148
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,774
|
r
|
transitorio.R
|
# tt <- tktoplevel()
# tkpack(quadroinicial <- tkframe(tt), fill = "both", expand = TRUE)
# telainicial <- ttknotebook(quadroinicial)
# ##
# paned1 <- ttkpanedwindow(telainicial, orient = "horizontal", style = "Toolbar.TPanedwindow")
# paned2 <- ttkpanedwindow(telainicial, orient = "horizontal", style = "Toolbar.TPanedwindow")
# paned3 <- ttkpanedwindow(telainicial, orient = "horizontal", style = "Toolbar.TPanedwindow")
# ##
# ##############
# # Child groups
# ##############
# #group1 <- NULL
# wid2 <- 400
# group1input <- ttkpanedwindow(paned1, orient = "vertical", width = wid2, style = "Toolbar.TPanedwindow")
# tkadd(paned1, group1input)
#
# group2input <- ttkpanedwindow(paned1, orient = "vertical", width = wid - wid2, style = "Toolbar.TPanedwindow")
# tkadd(paned1, group2input)
# ##
# tkadd(telainicial, paned1, text = "Input")
# tkadd(telainicial, paned2, text = "Graphics")
# tkadd(telainicial, paned3, text = "Output")
# tkpack(telainicial, fill = "both", expand = TRUE)
#
# # Design of Exepriments
# #----------------------
# # Aba Input
# tkpack(infodados <- ttklabelframe(group1input,
# text = gettext("Configuration of the data",
# domain = "R-statscience")),
# fill = "both", expand = TRUE)
#
# tkpack(infoexp <- ttklabelframe(group1input,
# text = gettext("Configuration of the Experiment",
# domain = "R-statscience")),
# fill = "both", expand = TRUE)
#
# tkpack(treatstr <- ttklabelframe(group1input,
# text = gettext("Treatment Structures",
# domain = "R-statscience")),
# fill = "both", expand = TRUE)
|
9859088364656cab9c4b41e873affd5f66c92734
|
34cc9dcaba027079ae08e441f6b1ee0c46432a93
|
/man/municipios_2014.Rd
|
61b2dc705e8e2466e8c1f13fe5607832a98132b2
|
[
"MIT"
] |
permissive
|
rOpenSpain/LAU2boundaries4spain
|
d471d1356e5a43125b0ac93fdf14a105ef00a815
|
e07fdc690186b63d1792485054d295c17a1679c7
|
refs/heads/master
| 2023-04-02T06:37:04.917731
| 2021-04-14T21:01:20
| 2021-04-14T21:01:20
| 125,485,567
| 3
| 1
|
MIT
| 2021-04-14T20:36:48
| 2018-03-16T08:19:50
| null |
UTF-8
|
R
| false
| true
| 910
|
rd
|
municipios_2014.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/document_data.R
\docType{data}
\name{municipios_2014}
\alias{municipios_2014}
\title{LINDES-2014 del IGN para los municipios (LAU2-SPAIN)}
\format{
A sf-data frame con 8.201 filas y 7 variables
\itemize{
\item INECodMuni: Codigo INE para el municipio (5 digitos)
\item NombreMuni: Nombre del municipio (o condominio)
\item INECodProv: Codigo provincial
\item NombreProv: Nombre de la provincia
\item INECodCCAA: Codigo de la C.A.
\item NombreCCAA: Nombre de la C.A.
\item geometry: Lindes municipales
}
}
\source{
\url{http://centrodedescargas.cnig.es/CentroDescargas/index.jsp}
}
\usage{
municipios_2014
}
\description{
Poligonos/geometrias/shapes municipales (LAU2-SPAIN). Sacados del IGN
contiene 8.201 filas (8.117 municipios + 84 condominios)
}
\examples{
\dontrun{
municipios_2014 <- municipios_2014
}
}
\keyword{datasets}
|
d4809ade61e55306ab8900e9d866e685be83077d
|
be741ee891f309778069ba92f4232e9ef18f038a
|
/R/crime.R
|
d08c2fa738e713feb93c7f5cd719337966c96ec7
|
[
"OGL-UK-3.0"
] |
permissive
|
xmr17/ukpolice
|
14822407bf181e8b927b8dbecf33537d47a288bb
|
e2d2705bead920c8fa1d16f5a795269456da89bc
|
refs/heads/master
| 2020-06-10T21:59:45.097493
| 2019-04-17T02:00:34
| 2019-04-17T02:00:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,531
|
r
|
crime.R
|
#' Find street level crime within a specified distance or area
#'
#' Crimes at street-level; either within a 1 mile radius of a single point, or
#' within a custom area. The street-level crimes returned in the API are
#' only an approximation of where the actual crimes occurred, they are not
#' the exact locations. See the about page
#' (<https://data.police.uk/about/#location-anonymisation>) for more
#' information about location anonymisation. Note that crime levels may
#' appear lower in Scotland, as only the British Transport Police provide
#' this data.
#'
#' @param lat latitude of the requested crime area
#' @param lng, longitude of the requested crime area
#' @param date, Optional. (YYY-MM), limit results to a specific month. The
#' latest month will be shown by default. e.g. date = "2013-01"
#' @param ... further arguments passed to or from other methods. For example,
#' verbose option can be added with
#' `ukp_api("call", config = httr::verbose())`. See more in `?httr::GET`
#' documentation
#' (<https://cran.r-project.org/web/packages/httr/>) and
#' (<https://cran.r-project.org/web/packages/httr/vignettes/quickstart.html>).
#'
#' @note The API will return a 400 status code in response to a GET request
#' longer than 4094 characters. For submitting particularly complex poly
#' parameters, consider using POST instead.
#'
#' @return a tibble with the columns:
#' \itemize{
#' \item category: Category of the crime
#' (<https://data.police.uk/docs/method/crime-street/>)
#' \item persistent_id: 64-character unique identifier for that crime.
#' (This is different to the existing 'id' attribute, which is not
#' guaranteed to always stay the same for each crime.)
#' \item date: Date of the crime YYYY-MM
#' \item latitude: Latitude
#' \item longitude: Longitude
#' \item street_id: Unique identifier for the street
#' \item street_name: Name of the location. This is only an approximation of
#' where the crime happened
#' \item context: Extra information about the crime (if applicable)
#' \item id: ID of the crime. This ID only relates to the API, it is NOT a
#' police identifier
#' \item location_type: The type of the location. Either Force or BTP:
#' Force indicates a normal police force location; BTP indicates a British
#' Transport Police location. BTP locations fall within normal police
#' force boundaries.
#' \item location_subtype: For BTP locations, the type of location at which
#' this crime was recorded.
#' \item outcome_status: The category and date of the latest recorded
#' outcome for the crime
#' }
#'
#' @note more documentation here:
#' <https://data.police.uk/docs/method/crime-street/>
#'
#' @examples
#'
#' ukp_crime_data <- ukp_crime(lat = 52.629729, lng = -1.131592)
#'
#' head(ukp_crime_data)
#'
#' @export
#'
ukp_crime <- function(lat,
lng,
date = NULL,
...){
# if date is used
if (is.null(date) == FALSE) {
result <- ukp_api(
glue::glue("api/crimes-street/all-crime?lat={lat}&lng={lng}&date={date}")
)
# else if no date is specified
} else if (is.null(date) == TRUE) {
result <- ukp_api(
glue::glue("api/crimes-street/all-crime?lat={lat}&lng={lng}")
)
}
extract_result <- purrr::map_dfr(.x = result$content,
.f = ukp_crime_unlist)
# rename the data
extract_result <- dplyr::rename(
extract_result,
lat = location.latitude,
long = location.longitude,
street_id = location.street.id,
street_name = location.street.name,
date = month,
outcome_status = outcome_status.category,
outcome_date = outcome_status.date
)
final_result <- dplyr::mutate(extract_result,
lat = as.numeric(lat),
long = as.numeric(long))
final_result <- dplyr::select(final_result,
category,
persistent_id,
date,
lat,
long,
street_id,
street_name,
context,
id,
location_type,
location_subtype,
outcome_status,
category)
return(final_result)
} # end function
#' Extract crime areas within a polygon
#'
#' @param poly_df dataframe containing the lat/lng pairs which define the
#' boundary of the custom area. If a custom area contains more than 10,000
#' crimes, the API will return a 503 status code. ukp_crime_poly converts the
#' dataframe into lat/lng pairs, separated by colons:
#' `lat`,`lng`:`lat`,`lng`:`lat`,`lng`. The first and last coordinates need
#' not be the same — they will be joined by a straight line once the request
#' is made.
#' @param date, Optional. (YYY-MM), limit results to a specific month. The
#' latest month will be shown by default. e.g. date = "2013-01"
#' @param ... further arguments passed to or from other methods. For example,
#' verbose option can be added with
#' `ukp_api("call", config = httr::verbose())`.
#' See more in `?httr::GET` documentation
#' <https://cran.r-project.org/web/packages/httr/> and
#' <https://cran.r-project.org/web/packages/httr/vignettes/quickstart.html>.
#' @note further documentation here:
#' <https://data.police.uk/docs/method/crime-street/>
#'
#' @examples
#'
#' library(ukpolice)
#'
#' # with 3 points
#' poly_df_3 = data.frame(lat = c(52.268, 52.794, 52.130),
#' long = c(0.543, 0.238, 0.478))
#'
#' ukp_data_poly_3 <- ukp_crime_poly(poly_df_3)
#' head(ukp_data_poly_3)
#'
#' # with 4 points
#' poly_df_4 = data.frame(lat = c(52.268, 52.794, 52.130, 52.000),
#' long = c(0.543, 0.238, 0.478, 0.400))
#' ukp_data_poly_4 <- ukp_crime_poly(poly_df = poly_df_4)
#'
#' head(ukp_data_poly_4)
#'
#' @export
ukp_crime_poly <- function(poly_df,
date = NULL,
...){
# poly must be a dataframe
stopifnot(inherits(poly_df, "data.frame"))
# "poly_df must contain columns named 'lat' and 'long'"
stopifnot(c("lat", "long") %in% names(poly_df))
# date = NULL
poly_string <- ukp_poly_paste(poly_df,
"long",
"lat")
# if date is used
if (is.null(date) == FALSE) {
result <- ukp_api(
glue::glue("api/crimes-street/all-crime?poly={poly_string}&date={date}")
)
# else if no date is specified
} else if (is.null(date) == TRUE) {
# get the latest date
# last_date <- ukpolice::ukp_last_update()
result <- ukp_api(
glue::glue("api/crimes-street/all-crime?poly={poly_string}")
)
} # end ifelse
extract_result <- purrr::map_dfr(.x = result$content,
.f = ukp_crime_unlist)
# rename the data
extract_result <- dplyr::rename(
extract_result,
lat = location.latitude,
long = location.longitude,
street_id = location.street.id,
street_name = location.street.name,
date = month,
outcome_status = outcome_status.category,
outcome_date = outcome_status.date
)
# ensure that lat and long are numeric
final_result <- dplyr::mutate(extract_result,
lat = as.numeric(lat),
long = as.numeric(long))
final_result <- dplyr::select(final_result,
category,
persistent_id,
date,
lat,
long,
street_id,
street_name,
context,
id,
location_type,
location_subtype,
outcome_status,
category)
return(final_result)
} # end function
#' ukp_crime_street_outcome
#' ukp_crime_location
#' ukp_crime_no_location
#' ukp_crime_categories
# Outcomes for a specific crime:
# https://data.police.uk/docs/method/outcomes-for-crime/
#' ukp_crime_outcome
|
f41f31a2d07f329377a2d2e453385f8723c388cd
|
ed155a50ba7cc4cbad11b17cc8a2b85d7ecc95df
|
/Heatmaps.R
|
9057b2357521ab51cfcc33c2d0b73a22789adfda
|
[] |
no_license
|
Abdulrahims/Climate-Change-Survey-Heatmaps
|
29d0d1efcfec5039cd6efff223cdf325254b81c6
|
385a7ffbac9d1640bf889dbce421027b7c029ffc
|
refs/heads/master
| 2021-01-01T05:45:29.168056
| 2016-05-06T02:27:58
| 2016-05-06T02:27:58
| 58,097,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,214
|
r
|
Heatmaps.R
|
library(ggplot2)
library(ggmap)
climate_change <- read.csv("Climate_change.csv", stringsAsFactors = FALSE)
#for loop to remove columns that aren't US. Or use dplyr, one of those R/SQL packages
#13866 is the magic number
#try subsetting by = "US"
for(i in 1:nrow(climate_change)){
if(climate_change$countryCode[i] != "US"){
climate_change <- climate_change[-c(i), ]
}
}
#Just US data
climate_change_us <- data.frame(climate_change)
climate_change_us_problevel <- climate_change_us
#Just one question for now, seriousness of climate change. Remove other variables
climate_change_us_problevel$harm.level <- NULL
climate_change_us_problevel$personal <- NULL
climate_change_us_problevel$Date <- NULL
#Recode responses to numeric values
#1 - Not a problem
#2 - A not too serious problem
#3 - A somewhat serious problem
#4 - A very serious problem
for(i in 1:nrow(climate_change_us_problevel)){
if(climate_change_us_problevel$Prob.level[i] == "Not a problem"){
climate_change_us_problevel$Prob.level[i] = 1
}
else if (climate_change_us_problevel$Prob.level[i] == "A not too serious problem"){
climate_change_us_problevel$Prob.level[i] = 2
}
else if (climate_change_us_problevel$Prob.level[i] == "A somewhat serious problem"){
climate_change_us_problevel$Prob.level[i] = 3
}
else if (climate_change_us_problevel$Prob.level[i] == "A very serious problem"){
climate_change_us_problevel$Prob.level[i] = 4
}
else{
print("Irregular value")
}
}
#Need numeric variable to map. Convert from character to numeric
climate_change_us_problevel$Prob.level <- as.numeric(climate_change_us_problevel$Prob.level)
#Only need latitude and longitude for mapping, and the data
climate_change_us_problevel <- subset(climate_change_us_problevel, , -c(X, ID, cityName, countryCode, countryName, ipAddress,timeZone,zipCode))
#Begin creating heatmaps
#National prob level
nat_problevel <- get_map(location = c(lon = -96, lat = 38), zoom = 4,
maptype = "roadmap", scale = 2)
ggmap(nat_problevel) +
geom_point(data = climate_change_us_problevel, aes(x = longitude, y = latitude, colour = Prob.level, alpha = 0.9), size = 2, shape = 21) +
guides(fill=FALSE, alpha=FALSE, size=FALSE) +
scale_colour_continuous(low = "blue", high = "red", space = "Lab", guide = "colorbar")
#Florida prob level map
florida_problevel <- get_map(location = c(lon = -83, lat = 28), zoom = 7,
maptype = "roadmap", scale = 2)
ggmap(florida_problevel) +
geom_point(data = climate_change_us_problevel, aes(x = longitude, y = latitude, colour = Prob.level, alpha = 0.3), size = 5, shape = 20) +
guides(fill=TRUE, alpha=FALSE, size=FALSE) +
scale_colour_continuous(low = "blue", high = "red", space = "Lab", guide = "colorbar")
#Harm level
climate_change_us_harmlevel <- climate_change_us
climate_change_us_harmlevel <- subset(climate_change_us, ,-c(X, Prob.level, personal, ID, Date, cityName, countryCode, countryName, ipAddress,timeZone,zipCode))
#Recoding
#1 - Never
#2 - Not for many years
#3 - In the next few years
#4 - Now
for(i in 1:nrow(climate_change_us_harmlevel)){
if(climate_change_us_harmlevel$harm.level[i] == "Never"){
climate_change_us_harmlevel$harm.level[i] = 1
}
else if (climate_change_us_harmlevel$harm.level[i] == "Not for many years"){
climate_change_us_harmlevel$harm.level[i] = 2
}
else if (climate_change_us_harmlevel$harm.level[i] == "In the next few years"){
climate_change_us_harmlevel$harm.level[i] = 3
}
else if (climate_change_us_harmlevel$harm.level[i] == "Now"){
climate_change_us_harmlevel$harm.level[i] = 4
}
else{
print("Irregular value")
}
}
climate_change_us_harmlevel$harm.level <- as.numeric(climate_change_us_harmlevel$harm.level)
#Florida harm level
harmmap <- get_map(location = c(lon = -83, lat = 28), zoom = 7,
maptype = "roadmap", scale = 2)
ggmap(harmmap) +
geom_point(data = climate_change_us_harmlevel, aes(x = longitude, y = latitude, colour = harm.level, alpha = 0.3), size = 5, shape = 20) +
guides(fill=TRUE, alpha=FALSE, size=FALSE) +
scale_colour_continuous(low = "blue", high = "red", space = "Lab", guide = "colorbar")
|
be77f8420b55221122fc202a2d82762494039461
|
8b9c4825565521d67b192990016b5a174f9650d6
|
/man/burr_plt.Rd
|
3de02ce317c234a975800e5297e692cdb7d84388
|
[] |
no_license
|
cran/distributionsrd
|
d6c0efa50e5ad60999b6ffa3cee94dff50047973
|
e380c274007c99e2ccaa6b34775f6cfb4fefe1c9
|
refs/heads/master
| 2022-09-07T03:41:19.652239
| 2020-05-25T17:50:03
| 2020-05-25T17:50:03
| 267,007,404
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,097
|
rd
|
burr_plt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/burr.R
\name{burr_plt}
\alias{burr_plt}
\title{Burr coefficients after power-law transformation}
\usage{
burr_plt(shape1 = 2, shape2 = 1, scale = 0.5, a = 1, b = 1, inv = FALSE)
}
\arguments{
\item{shape1, shape2, scale}{Shape1, shape2 and scale of the Burr distribution, defaults to 2, 1 and 1 respectively.}
\item{a, b}{constant and power of power-law transformation, defaults to 1 and 1 respectively.}
\item{inv}{logical indicating whether coefficients of the outcome variable of the power-law transformation should be returned (FALSE) or whether coefficients of the input variable being power-law transformed should be returned (TRUE). Defaults to FALSE.}
}
\value{
Returns a named list containing
\describe{
\item{coefficients}{Named vector of coefficients}
}
## Comparing probabilites of power-law transformed transformed variables
pburr(3,shape1=2,shape2=3,scale=1)
coeff = burr_plt(shape1=2,shape2=3,scale=1,a=5,b=7)$coefficients
pburr(5*3^7,shape1=coeff[["shape1"]],shape2=coeff[["shape2"]],scale=coeff[["scale"]])
pburr(5*0.9^7,shape1=2,shape2=3,scale=1)
coeff = burr_plt(shape1=2,shape2=3,scale=1,a=5,b=7, inv=TRUE)$coefficients
pburr(0.9,shape1=coeff[["shape1"]],shape2=coeff[["shape2"]],scale=coeff[["scale"]])
## Comparing the first moments and sample means of power-law transformed variables for large enough samples
x = rburr(1e5,shape1=2,shape2=3,scale=1)
coeff = burr_plt(shape1=2,shape2=3,scale=1,a=2,b=0.5)$coefficients
y = rburr(1e5,shape1=coeff[["shape1"]],shape2=coeff[["shape2"]],scale=coeff[["scale"]])
mean(2*x^0.5)
mean(y)
mburr(r=1,shape1=coeff[["shape1"]],shape2=coeff[["shape2"]],scale=coeff[["scale"]],lower.tail=FALSE)
}
\description{
Coefficients of a power-law transformed Burr distribution
}
\details{
If the random variable x is Burr distributed with scale shape and shape scale, then the power-law transformed variable
\deqn{ y = ax^b }
is Burr distributed with shape1 \eqn{shape1}, shape2 \eqn{b*shape2} and scale \eqn{ ( \frac{scale}{a})^{\frac{1}{b}} }.
}
|
86ac5dc5038f82488e238fb5116b4d8ece4aad31
|
b6cb8530baaf46f4907713acae0c36bb301903bd
|
/2_content/Unit_4/practical/exercise_milk.R
|
75d8d20ff8869b0b2e69cf54564f230bec7483cc
|
[] |
no_license
|
hokirv/BIO144
|
10581ba7fc2123d43027ae96dd2588e574c20130
|
e103e3dd9218819a7e63ae5d03ab98145e683287
|
refs/heads/master
| 2021-01-26T06:58:56.273474
| 2020-02-21T19:53:07
| 2020-02-21T19:53:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,243
|
r
|
exercise_milk.R
|
##
## Clear R
rm(list=ls())
## load some libraries
library(rethinking)
library(tidyverse)
library(ggfortify)
library(cowplot)
## load the dataset
milk <- read_csv("https://github.com/opetchey/BIO144/raw/master/3_datasets/milk_rethinking.csv")
## look at the data and we see there are NAs in the neocortex.perc variable.
## Lets remove the rows with these NAs
milk <- na.omit(milk)
## Have a look at the distribution of the variables
ggplot(gather(milk, key=variable, value=value, 3:8), aes(value)) +
geom_histogram(bins=10) + facet_wrap(~variable, scales = "free")
## the response variable, kcal.per.g, and the mass variable seem a bit right skewed
## so we can try log transformation
milk <- mutate(milk,
log10_kcal.per.g = log10(kcal.per.g),
log10_mass=log10(mass))
## not that much better
ggplot(gather(milk, key=variable, value=value, 3:10), aes(value)) +
geom_histogram(bins=10) + facet_wrap(~variable, scales = "free")
## look at the bivariate scatter plots
ggplot(milk, aes(neocortex.perc, kcal.per.g)) + geom_point()
ggplot(milk, aes(log10_mass, kcal.per.g)) + geom_point()
## Not much going on here
ggplot(milk, aes(neocortex.perc, kcal.per.g)) + geom_point() + facet_wrap(~cut(log10_mass,2))
with(milk, pairs(cbind(kcal.per.g,neocortex.perc, log10_mass)))
## and are the explanatory variables correlated?
ggplot(milk, aes(neocortex.perc, log10_mass)) + geom_point()
## Yes, they are positively correlated.
## degrees of freedom for regression with one explanatory variable should be 15
## (one intercept and one slope estimated)
## degrees of freedom for regression with two explanatory variable should be 14
## (one intercept and two slope estimated)
## one of the regressions
m1 <- lm(kcal.per.g ~ neocortex.perc, data=milk)
autoplot(m1) ## pretty bad qqplot!
summary(m1)
## nothing significant, supporting our eyeball
## r2 0.024
## the other of the regressions
m2 <- lm(kcal.per.g ~ log10_mass, data=milk)
autoplot(m2) ## better qqplot
summary(m2)
## nothing significant, supporting our eyeball
## r2 0.12
## both explanatory variables...
m3 <- lm(kcal.per.g ~ mass + neocortex.perc, data=milk)
autoplot(m3) ## pretty bad qqplot, though few data points
summary(m3)
anova(m3)
## both variables significant
## r2 0.53
## WOW!
r1 <- rnorm(17)
m4 <- lm(kcal.per.g ~ mass + neocortex.perc + r1, data=milk)
summary(m4)
library(car)
vif(m3)
https://onlinecourses.science.psu.edu/stat501/node/347
## this happens due to correlation in the explantory variables.
## Get marriage rate residuals and plot against divorce, and same for
## marriage age residuals
Mass_residuals <- residuals(lm(log10_mass ~ neocortex.perc, milk))
p1 <- ggplot(milk, aes(Mass_residuals, kcal.per.g)) + geom_point()
Neocortex.perc_residuals <- residuals(lm(neocortex.perc ~ log10_mass, milk))
p2 <- ggplot(milk, aes(Neocortex.perc_residuals, kcal.per.g)) + geom_point()
plot_grid(p1, p2, labels=c("A", "B"), ncol = 2, nrow = 1)
## Counterfactual plot should work!
## scaled explanatory variables
milk <- mutate(milk,
s_log10_mass=scale(log10_mass),
s_neocortex.perc=scale(neocortex.perc))
m3 <- lm(kcal.per.g ~ s_log10_mass + s_neocortex.perc, data=milk)
summary(m3)
|
0a4daebf388291401743a3dd9bd226ff4060ade2
|
fe7bd0af52c925054508955ca6679bd23aefb91c
|
/myfirstscript.R
|
646262832ec0b03bb2192c0523066e1c17eeff73
|
[] |
no_license
|
ChrisDStats/MyFirstProject
|
5b960c281ea5ec0748bf5443f1530333e5eaf2f8
|
d499d2dcba45663d56247f0a472fe12de39ff796
|
refs/heads/master
| 2021-06-25T18:52:47.214669
| 2017-09-07T15:19:15
| 2017-09-07T15:19:15
| 102,742,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,405
|
r
|
myfirstscript.R
|
#Thisis a comment
5+ #this is the famous five
209+ #I could note that this is the CCGs in 2016
211+ #while this is the number of CCGs in 2015
6
/
3
mean
3<4
"2017-09-04"
as.Date("2017-09-04")
"what?"
2^2
sqrt(-1)
1:5
pi
pi^2
1/0
#floating point errors
0.2<0.2
0.2==0.2
(0.8-0.6)==(0.7-0.5)
all.equal(0.8-0.6, 0.7-0.5)
5!=4
(0.8-0.6)!=(0.7-0.5)
!all.equal(0.8-0.6, 0.7-0.5)
#other operators
9%%4 #the amount left over after dividing 9 by a whole number of fours
9%/%4 #the number of times four will fit wholly into 9.
pi>3
1:5>3
1:5<=3
5!
sum(1:5<=3) #how many of the numbers in the series 1:5 are <= 3.
#search if something isr in
colours<- c("red", "blue", "orange")
"ORANGE"%in% colours
tolower("ORANGE")%in% tolower(colours)
"LETTERS"
"a"%in% letters
tolower(LETTERS)==letters
sum(tolower(LETTERS)==letters)
1:10 >=3
1:10<7
1:10 >=3 & 1:10<7
1:10 &&(>=3, <7) #why is this not working use ctrl-shift-c to comment this whole line out.
`1a` <- "b"
`1a` <- NULL
rm("1a")
a<-10
10->b
my_first_variable<-1+2+3+4 #I can create variables
MyFirstVariable<-1+2+3+4+5 #keep naming convention stable within script.
`2017Data`<-333000 #can't start with a number so must always use backticks
#2017Data <-333000 won't work
c<-1:5
ls()
#rm(list=ls()) #This will delete every value saved in the environment, ie. everything returned by the ls() function
#make vectors and combine with c
c(1:5, 9:20)
#Vectors are useful to enter tiny datasets to test script.
c(1:50)
c("red", "pink")
c(1:50, "red", "pink") #Now the numbers are also text, as a vector can only be one data type
c(1:50, TRUE, FALSE, NA, "")
#Now we want 2D tabular data, these are Data frames Data.frames
incidence<-c(101, 500, 700)
ageweight<-c(0.1,2.5,1.5)
incDF<-data.frame(incidence, ageweight)
largeABC<-data.frame (id=1:26, abc=letters)
View(largeABC)
nums<-c(1:50)
words<- c("red", "pink")
New<-data.frame(nums, words)
1:20*5
1:20*1:3
data.frame(1:21 *1:3) #so this is 1:50 and second number is 1*1, then 2*2, 3*3 then 4*1, 5*2, 6*3 etc.
data.frame(1:21, 1:3)
data.frame(a=1:21, b=1:3) #same as above but neater
#lists
#E.g. keeping metadata with data
#make sure to names things as locations might change (sort of like vlookup vs a cell ref)
list(1:50, c("red", "hotels"))
list(
list(c("red", "hotels")),
list(c(1:50))
)#This is a mess to read the output, therefore name things so the output is easier to read
list(
firstlist=list(words=c("red", "hotels")),
secondlist= list(numbers=c(1:50))
)
#doesn't work
#list(
# ListOfLists=list(c (`2017Data`),
# list(c (a),
# list (c(ageweight))
# )
data.frame (1:37, "a") #Look at the awful column headings
list(1:5, b=list(a=1, b=FALSE)) #look at this mix of lists some with names some without.
c("a", "b", "d") %in% c("a", "c", "d") #compares each element with each
1:5^2
(1:5)^2 #: is below ^ in hierarchy
all.equal(10,20)
ls()
c(1, TRUE) #How is this storted and how to MAKE it be an integer.
is.integer(c(1, TRUE))
as.integer(c(1, TRUE))
is.integer(as.integer(c(1, TRUE)))
OrigVec<-c(1, TRUE) #How is this storted and how to MAKE it be an integer.
class (OrigVec)
is.integer(OrigVec)
OrigVec <-as.integer(OrigVec)
is.integer(OrigVec)
length (OrigVec)
mean(OrigVec)
paste (1:30, 1:3, sep="-", collapse=";")#look how I can play with numbers
paste0 (1:30, 1:3)
#concatData<- c(1:30, 1:3, sep="-", collapse=";",
# 1:20, 1:2, sep="-", collapse=";") #broken
dir.create("data")
write.csv(incDF, file="data/simple.csv")
read.csv("data/simple.csv")
help("mean")
apropos("mean")
Sys.Date()
Sys.Date #Typing these without the extra bits brings up info about the function
mean
file.path #This one is recursive.
set.seed(123) #if you run this first you'll get the SAME "random" numbers.
rnorm(100, mean=10, sd=1) #generates test data
runif(50,1,50)
#To install packages I can go throuhg menus in packages or use function
#install.packages("tidyverse")
library(tidyverse)
stats::filter #filter is diferent in the original package vs the new one.
#When uploading tidyverse it highlights this for e.g. dplyr
dplyr::filter
tidyverse::tidyverse_conflicts() #see what the conflicts are.
#Shows you how much you are using that package.
#so if you are using unapproved code, after getting the end result you can find the unapproved bits to rewrite them.
browseVignettes(package = "dplyr")
library("dplyr")
|
dabc51a5b93ec928b80f8fc4216688d8d7ad8fe9
|
31cff46884118639496db381186e6156ca45fa7e
|
/R/Stability.R
|
080e87f80f7e21850cfe56d91d8ce04abf07a76d
|
[
"MIT"
] |
permissive
|
meyer-lab-cshl/drStable
|
aeb908d251cb1b4363c688b6e3a848b5e9d3b72d
|
692d56aa4ddb7b3d924dad4cd0931c6a2d1bd96c
|
refs/heads/master
| 2023-02-22T11:37:22.179948
| 2021-01-27T19:24:55
| 2021-01-27T19:24:55
| 136,906,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,592
|
r
|
Stability.R
|
#' Estimate stability
#'
#' Takes the output of \code{\link{subsetDimReduction}} and finds
#' the stable low-dimension components across dimensionality reduced
#' subsets of the original data.
#' @param dr [list] either output of \code{\link{subsetDimReduction}} or
#' manually generated list with dimensionality reduction results on different
#' subsets of the same dataset. Each list entry is a matrix with row and column
#' names; row.names are crucial to allow for the comparison of the lower
#' dimensional embedding across common sample sets.
#' @param threshold [double] Threshold for stability between 0 and 1 or vector
#' of thresholds between 0 and 1.
#' @param procrustes [logical] indication whether a procrustes transformation
#' to match the lower dimensional representations of common samples between
#' subsets should be performed.
#' @param verbose [logical] If set, progress messages are printed to standard
#' out
#' @return named list with [1] stability: list with one dataframe per threshold,
#' where each dataframe contains the stability estimate of the low-dimensional
#' components for that threshold value, [2] corr: data.frame with correlation of
#' the low-dimensional components for all pairwise comparisons and components.
#' @export
estimateStability <- function(dr, threshold, procrustes=FALSE, verbose=FALSE) {
comparison <- resultsComparison(dr, procrustes=procrustes, verbose=verbose)
formated <- formatComparison(comparison)
cor_matrix <- formated$maxcor
rownames(cor_matrix) <- unlist(sapply(1:(length(comparison)-1),
function(i) {
sapply(i:(length(comparison)-1), function(j, i) {
paste(names(comparison)[i], names(comparison)[j+1],sep="_")
}, i=i)
}))
cor_df <- reshape2::melt(cor_matrix)
colnames(cor_df) <- c("comparison", "component", "correlation")
cor_df$abs_correlation <- abs(cor_df$correlation)
unique_components <- data.frame(component=unique(cor_df$component),
threshold=threshold)
if (any(cor_df$abs_correlation > threshold)) {
stability_count <- reshape2::acast(cor_df, component~.,
value.var="abs_correlation",
subset = plyr::.(cor_df$abs_correlation >
threshold),
length)
stability_norm <- as.numeric(stability_count)/
length(levels(cor_df$comparison))
stability_comp <-
data.frame(stability=stability_norm,
component=as.numeric(rownames(stability_count)))
} else {
stability_comp <- data.frame(stability=rep(0, nrow(unique_components)),
component=unique_components$component)
}
stability_all <- merge(stability_comp, unique_components, by="component",
all.y=TRUE)
stability_all$stability[is.na(stability_all$stability)] <- 0
return(list(stability=stability_all, corr=cor_df))
}
#' Compute median correlation of low-dimensional components across subsamples
#'
#' Takes the output of \code{\link{estimateStability}} and returns the median
#' correlation per component for the specified threshold.
#' @param es output [list] of \code{\link{estimateStability}} with named entries
#' [1] stability: list with one dataframe per threshold,
#' where each dataframe contains the stability estimate of the low-dimensional
#' components for that threshold value, [2] corr: data.frame with correlation of
#' the low-dimensional components for all pairwise comparisons and components.
#' @param threshold threshold [double] to mark median correlation as
#' above/below.
#' @return Dataframe with median correlation for each component.
medianCorr <- function(es, threshold) {
medians_corr <- stats::aggregate(abs_correlation ~ component, es$corr,
median)
medians_corr$threshold <-
sapply(medians_corr$abs_correlation, function(m) {
# index of last TRUE
iT <- length(which(m >= threshold))
if (iT == 0) return(paste("<", min(threshold),sep=""))
if (iT != 0) return(threshold[iT])
})
return(medians_corr)
}
#' Apply compareSetup and format results
#'
#' @param data_list [list] output of \code{\link{subsetDimReduction}}. Each list
#' entry is a matrix with row and column names; row.names are crucial to allow
#' for the comparison of the lower dimensional embedding across common sample
#' sets.
#' @param procrustes [logical] indication whether a procrustes transformation
#' to match the lower dimensional representations of common samples between
#' subsets should be performed.
#' @param verbose [logical] If set, progress messages are printed to standard
#' out.
#' @return named list with the size of the overlapping samples between samples
#' sets (size), the reordered correlation coefficients (reorder), the maximum
#' correlation (maxcor), the original column-wise correlation coefficients
#' (componentxcomponent_r) and the reordered column-wise correlation
#' coefficients (componentxcomponent_r_reordered)
resultsComparison <- function(data_list, procrustes, verbose=FALSE) {
if (is.null(names(data_list))) {
names(data_list) <- paste("cv", 1:length(data_list), sep="")
}
tmp <- lapply(seq_along(data_list), function(perm1) {
tmp <- lapply(seq_along(data_list), function(perm2, perm1) {
if (perm1 < perm2) {
vmessage(c("Comparing list element", perm1, "with element",
perm2), verbose=verbose)
compareSets(data_list[[perm2]]$Yred, data_list[[perm1]]$Yred,
procrustes=procrustes)
}
}, perm1=perm1)
names(tmp) <- names(data_list)
size <- lapply(tmp, function(perm) return(perm$size))
reorder_correlation <- lapply(tmp, function(perm)
return(perm$reorder_correlation))
reorder <- lapply(reorder_correlation, function(perm)
return(perm$reorder))
maxcor <- lapply(reorder_correlation, function(perm)
return(perm$maxcor))
componentxcomponent_r <- lapply(tmp, function(perm)
return(perm$componentxcomponent_r))
componentxcomponent_r_reordered <- lapply(tmp, function(perm)
return(perm$componentxcomponent_r_reordered))
return(list(size=size, reorder=reorder,
maxcor=maxcor, componentxcomponent_r=componentxcomponent_r,
componentxcomponent_r_reordered=
componentxcomponent_r_reordered))
})
names(tmp) <- paste("cv", 1:length(data_list), sep="")
return(tmp)
}
#' Compare dimensionality reduction across subsets
#'
#' @param set1 M1 x D [matrix] with M1 samples and D dimensionality reduced
#' data P.
#' @param set2 M2 x D [matrix] with M2 samples and D dimensionality reduced
#' data P.
#' @param procrustes [logical] indication whether a procrustes transformation
#' to match the lower dimensional representations of common samples between
#' set1 and set2 should be performed.
#' @param verbose [logical] If set, progress messages are printed to standard
#' out.
#' @return named list
compareSets <- function(set1, set2, verbose=FALSE, procrustes=FALSE) {
common_subset <- findIntersect(set1, set2)
size_subset <- dim(common_subset[[1]])[1]
if (procrustes) {
common_subset[[2]] <- MCMCpack::procrustes(common_subset[[1]],
common_subset[[2]],
dilation=TRUE, translation=TRUE)$X.new
}
componentxcomponent <- correlateSets(common_subset[[1]],
common_subset[[2]])
componentxcomponent_r <- sapply(componentxcomponent, function(fac) fac$r)
componentxcomponent_p <- sapply(componentxcomponent, function(fac) fac$p)
reorder_correlation <- analyseCorrelation(componentxcomponent_r,
verbose=verbose)
componentxcomponent_r_reordered <-
componentxcomponent_r[reorder_correlation$reorder[1,],]
return(list(size=size_subset, reorder_correlation=reorder_correlation,
componentxcomponent_r=componentxcomponent_r,
componentxcomponent_p=componentxcomponent_p,
componentxcomponent_r_reordered=
componentxcomponent_r_reordered))
}
#' Format the comparison results
#'
#' Extract relevant data and remove null-entries
#' @param comparison_list output [list] from resultsComparison with
#' the size of the overlapping samples between samples sets (size), the
#' reordered correlation coefficients (reorder), the maximum correlation
#' (maxcor), the original column-wise correlation coefficients
#' (componentxcomponent_r) and the reordered column-wise correlation
#' coefficients (componentxcomponent_r_reordered)
#'
#' @return list with same entries as input list, formated to only contain unique
#' comparisons: size of the overlapping samples between samples
#' sets (size), the reordered correlation coefficients (reorder), the maximum
#' correlation (maxcor), the original column-wise correlation coefficients
#' (componentxcomponent_r) and the reordered column-wise correlation
#' coefficients (componentxcomponent_r_reordered)
#'
formatComparison <- function(comparison_list) {
sample_sizes <- sapply(comparison_list, function(perm) return(perm$size))
reorder <- lapply(comparison_list, function(perm)
return(perm$reorder))[-length(comparison_list)]
maxcor <- lapply(comparison_list, function(perm)
return(perm$maxcor))[-length(comparison_list)]
componentxcomponent_r <- lapply(comparison_list, function(perm)
return(perm$componentxcomponent_r))[-length(comparison_list)]
componentxcomponent_r_reordered <- lapply(comparison_list, function(perm)
return(perm$componentxcomponent_r_reordered))[-length(comparison_list)]
# rm all nulls
maxcor <- do.call(rbind, sapply(sapply(maxcor, rmnulls), function(l)
do.call(rbind,l)))
reorder <- lapply(reorder, rmnulls)
componentxcomponent_r <- lapply(componentxcomponent_r, rmnulls)
componentxcomponent_r_reordered <- lapply(componentxcomponent_r_reordered,
rmnulls)
return(list(sample_sizes= sample_sizes, maxcor=maxcor, reorder=reorder,
componentxcomponent_r=componentxcomponent_r,
componentxcomponent_r_reordered=
componentxcomponent_r_reordered))
}
#' Find intersecting samples between two sets and order according to first
#'
#' @param set1 M1 x D [matrix] with M1 samples and D dimensionality reduced
#' data P.
#' @param set2 M2 x D [matrix] with M2 samples and D dimensionality reduced
#' data P.
#' @return [list] with set1 and set2 filtered for overlapping samples and
#' ordered to the order of set1.
findIntersect <- function(set1, set2) {
set2 <- set2[which(rownames(set2) %in% rownames(set1)),]
set1 <- set1[which(rownames(set1) %in% rownames(set2)),]
set2 <- set2[match(rownames(set1),rownames(set2)),]
return(list(set1, set2))
}
#' Aligng datasets with procrustes analyses
#'
#' @param set1 M x D [matrix] with M samples and D dimensionality reduced
#' data P; the matrix to be transformed.
#' @param set2 M x D [matrix] with M samples and D dimensionality reduced
#' data P (with the same sample order as set1); the target matrix.
#' @param dilation [logical] indicating whether set1 should be dilated.
#' @param translation [logical] indicating whether set1 should be translated.
#' @return [matrix] that is the Procrustes transformed version of set1.
alignSets <- function(set1, set2, dilation=TRUE, translation=TRUE) {
res <- MCMCpack::procrustes(set1, set2,
dilation=dilation, translation=translation)
return(res$X.new)
}
#' Column-wise correlation of two datasets
#'
#' @param set1 [M x D] matrix with M samples and D dimensionality reduced
#' data P
#' @param set2 [M x D] matrix with M samples and D dimensionality reduced
#' data P, with the same sample order as set1
#' @param type type [string] of correlation to use, one of spearman or pearson
#' @return named list with correlation coefficient (r) and p-value of
#' correlation (p) for each column-column correlation
correlateSets <- function(set1, set2, type="spearman") {
apply(set1, 2, function(fac1) {
facxfac <- apply(set2, 2, function(fac2, fac1) {
tmp <- Hmisc::rcorr(cbind(fac2, fac1), type=type)
return(list(r=tmp$r[1,2], p=tmp$P[1,2]))
}, fac1=fac1)
facxfac_r <- sapply(facxfac, function(fac) fac$r)
facxfac_p <- sapply(facxfac, function(fac) fac$p)
return(list(r=facxfac_r, p=facxfac_p))
})
}
#' Order results based on highest correlation
#'
#' @param mat [D x D] matrix with correlation coefficients from column-wise
#' comparison of D lower dimensions
#' @param verbose [logical] If set, progress messages are printed to standard
#' out
#' @return named list with ordering of the correlations (reorder) and the
#' maximum correlations (maxcor)
analyseCorrelation <- function(mat, verbose=FALSE) {
reorder <- rbind(rep(0, nrow(mat)), rep(0, nrow(mat)))
maxcor <- rep(0, nrow(mat))
diag_used <- rep(0, nrow(mat))
mat_in = matrix(0, nrow=nrow(mat), ncol=ncol(mat))
iteration=1
medianTopCorr <- median(order(abs(diag(mat)), decreasing=TRUE)[1:3])
if (medianTopCorr > ceiling(ncol(mat)/2)) {
mat <- apply(mat, 1, rev)
mat <- apply(mat, 1, rev)
reversed <- TRUE
} else {
reversed <- FALSE
}
while(!sum(diag(mat)) == 0) {
highest_diag_index_memory=c()
for (i in 1: nrow(mat)){
vmessage(c("Iteration:", iteration, "i:", i, "\n"), verbose=FALSE)
# Find the diagonal element with the highest correlation
if (i == 1) {
index_max_diag <- which.max(abs(diag(mat)))
} else if ( i == nrow(mat)) {
index_max_diag <- c(1:nrow(mat))[!c(1:nrow(mat)) %in%
highest_diag_index_memory]
} else {
max_diag <- max(abs(diag(mat[-highest_diag_index_memory,
-highest_diag_index_memory])))
index_max_diag <- which(abs(diag(mat)) == max_diag)
}
if (length(index_max_diag) > 1) {
index_max_diag = index_max_diag[1]
}
# find maximum column and row element for max diagonal element
if (iteration == 1) {
max_col_index_max_diag <- which.max(abs(mat[index_max_diag,]))
max_row_index_max_diag <- which.max(abs(mat[,index_max_diag]))
} else {
exclude_from_max <-
unique(c(index_max_diag, which(diag_used == 1)))
max_col_index_max_diag <-
which(abs(mat[index_max_diag,]) ==
max(abs(mat[index_max_diag, -exclude_from_max])))
max_row_index_max_diag <-
which(abs(mat[,index_max_diag]) ==
max(abs(mat[-exclude_from_max, index_max_diag])))
}
if (length(max_col_index_max_diag) > 1) {
max_col_index_max_diag = max_col_index_max_diag[1]
}
if (length(max_row_index_max_diag) > 1) {
max_row_index_max_diag = max_row_index_max_diag[1]
}
# check if the highest diagonal element is the highest element for
# that row/column
if (abs(mat[ index_max_diag, index_max_diag]) >=
abs(mat[ index_max_diag,max_col_index_max_diag]) &&
abs(mat[ index_max_diag, index_max_diag]) >=
abs(mat[max_row_index_max_diag, index_max_diag]) &&
diag_used[index_max_diag] != 1) {
reorder[,index_max_diag] = t(rep(index_max_diag,2))
maxcor[index_max_diag] = mat[ index_max_diag, index_max_diag]
mat[index_max_diag,] <- 0
mat[,index_max_diag] <- 0
} else {
if (abs(mat[ index_max_diag,max_col_index_max_diag]) >=
abs(mat[max_row_index_max_diag, index_max_diag])) {
reorder[1,index_max_diag] <- max_col_index_max_diag
reorder[2,index_max_diag] <- index_max_diag
maxcor[index_max_diag] <- mat[index_max_diag,
max_col_index_max_diag]
mat[-max_col_index_max_diag,max_col_index_max_diag] <- 0
mat[index_max_diag,] <- 0
diag_used[max_col_index_max_diag] <- 1
} else {
reorder[1,index_max_diag] <- max_row_index_max_diag
reorder[2,index_max_diag] <- index_max_diag
maxcor[index_max_diag] <- mat[max_row_index_max_diag,
index_max_diag]
mat[max_row_index_max_diag, -max_row_index_max_diag] <- 0
mat[,index_max_diag] <- 0
diag_used[max_row_index_max_diag] <- 1
}
}
if (all(diag(mat) == 0)){
if (reversed) {
reorder <- t(apply(reorder, 1, rev))
maxcor <- rev(maxcor)
}
return(list(reorder=reorder, maxcor=maxcor))
}
highest_diag_index_memory <- c(highest_diag_index_memory,
index_max_diag)
}
iteration <- iteration + 1
}
return(list(reorder=reorder, maxcor=maxcor))
}
#' Filter correlations based on sequence of thresholds
#'
#' @param corrmat [M x D] correlation matrix
#' @param threshold stability threshold [double]
#' @return vector with number of stable components depending on threshold
corrPass <- function(corrmat, threshold) {
sapply(threshold, function(thr, corrmat) {
apply(corrmat, 1, function(perm, thr)
length(which(abs(perm) >= thr)), thr=thr)
}, corrmat=corrmat)
}
|
fb8ba646ab1f8d8754ef8524f2a548c7e09c4fc5
|
72f23d77494b391dfae650a46f3d563d25bcf049
|
/analyses/Vieux/anacom3.R
|
d163023c26f68c29befa3fd5fbb345c701f75b8b
|
[
"MIT"
] |
permissive
|
JehanneRiv/SeineMSP
|
dfa4034c41b04cab59ccd8f0f65c7840119c436b
|
1e85bd87d9f8aea5937b2b8d5773e137df9417b6
|
refs/heads/master
| 2022-12-30T13:06:08.401014
| 2020-10-11T14:43:44
| 2020-10-11T14:43:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,449
|
r
|
anacom3.R
|
library(ggplot2)
library(dplyr)
library(FactoMineR) # pour MCA
library(missMDA)
library(cluster) # pour agnes
library(RColorBrewer)
library(fastcluster)
library(NbClust)
library(sp)
library(raster)
library(cowplot)
setwd("../")
{
load(file="data/ICES/Tabfin1.Rdata")
Tabfin1<- titi
names(Tabfin1)[5]<- "Pred1"
Tabfin1<- Tabfin1 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin2.Rdata")
Tabfin2<- titi
names(Tabfin2)[5]<- "Pred2"
Tabfin2<- Tabfin2 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin3.Rdata")
Tabfin3<- titi
names(Tabfin3)[5]<- "Pred3"
Tabfin3<- Tabfin3 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin4.Rdata")
Tabfin4<- titi
names(Tabfin4)[5]<- "Pred4"
Tabfin4<- Tabfin4 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin5.Rdata")
Tabfin5<- titi
names(Tabfin5)[5]<- "Pred5"
Tabfin5<- Tabfin5 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin6.Rdata")
Tabfin6<- titi
names(Tabfin6)[5]<- "Pred6"
Tabfin6<- Tabfin6 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin7.Rdata")
Tabfin7<- titi
names(Tabfin7)[5]<- "Pred7"
Tabfin7<- Tabfin7 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin8.Rdata")
Tabfin8<- titi
names(Tabfin8)[5]<- "Pred8"
Tabfin8<- Tabfin8 %>% dplyr::select(-Community, -Clust)
load(file="data/ICES/Tabfin9.Rdata")
Tabfin9<- titi
names(Tabfin9)[5]<- "Pred9"
Tabfin9<- Tabfin9 %>% dplyr::select(-Community, -Clust)
}
{
load("results/Communautes bio/Zones/Community1_polygons.Rdata")
pol1<- pol
load("results/Communautes bio/Zones/Community2_polygons.Rdata")
pol2<- pol
load("results/Communautes bio/Zones/Community3_polygons.Rdata")
pol3<- pol
load("results/Communautes bio/Zones/Community4_polygons.Rdata")
pol4<- pol
load("results/Communautes bio/Zones/Community5_polygons.Rdata")
pol5<- pol
load("results/Communautes bio/Zones/Community6_polygons.Rdata")
pol6<- pol
load("results/Communautes bio/Zones/Community7_polygons.Rdata")
pol7<- pol
load("results/Communautes bio/Zones/Community8_polygons.Rdata")
pol8<- pol
load("results/Communautes bio/Zones/Community9_polygons.Rdata")
pol9<- pol
}
load("data/Polycut.Rdata")
r0<-rasterFromXYZ(Tabfin9)
r1<-rasterize(pol1,r0)
r2<-rasterize(pol2,r0)
r3<-rasterize(pol3,r0)
r4<-rasterize(pol4,r0)
r5<-rasterize(pol5,r0)
r6<-rasterize(pol6,r0)
r7<-rasterize(pol7,r0)
r8<-rasterize(pol8,r0)
r9<-rasterize(pol9,r0)
rcom<-stack(r1,r2,r3,r4,r5,r6,r7,r8,r9)
names(rcom)<-c("P1","P2","P3","P4","B1","B2","B3","B4","C")
dcom<-data.frame(rasterToPoints(rcom))
for(i in 3:11){
dcom[,i]<-paste0(names(dcom)[i],dcom[,i])
}
#read raw kriged data
load("data/krigeage log.RData")
names(Kriege.logdens)[6]<- "com"
Kriege.logdens$Community<- as.numeric(Kriege.logdens$Community)
tcom<-Kriege.logdens%>%
transmute(x=Longitude,y=Latitude,com=Community,year=Year,pred=Prediction)
#map com name
comid<-data.frame(com=1:9,nom=c("P1","P2","P3","P4","B1","B2","B3","B4","C"))
tcom<-left_join(tcom,comid)%>%mutate(com=nom)%>%dplyr::select(-nom)
#tcom<-tcom%>%tidyr::pivot_wider(names_from=year,values_from=pred)
pipo<-cbind(tcom[,1:2],data.frame(extract(rcom,tcom[,1:2])))%>%
tidyr::pivot_longer(P1:C,names_to="com",values_to="subcom")%>%
mutate(subcom=paste0(com,subcom))%>%distinct()
tcom<-left_join(tcom,pipo)
fctgraph<-function(tcom,com0="P1"){
pltcom<-ggplot(tcom%>%filter(com==com0)%>%dplyr::select(x,y,subcom)%>%distinct(),
aes(x=x,y=y,fill=subcom))+
geom_raster()+
scale_fill_brewer(palette="Set2",name="Zones")+
borders("world",fill="grey",colour=NA)+
coord_sf(xlim=range(tcom$x),ylim=range(tcom$y))+
xlab("Longitude")+ylab("Latitude")+
theme_bw()+
theme(legend.position="bottom")
plttcom<-ggplot(tcom%>%filter(com==com0),#%>%group_by(subcom,year)%>%summarise(m=mean(pred)),
aes(x=year,y=pred,color=subcom,group=subcom))+
scale_color_brewer(palette="Set2",name="Zones")+
theme_bw()+xlab("Année")+ylab("Densité")+
ggtitle("Evolutions temporelles par zone")+ geom_point(alpha=.5,color="grey")+
facet_grid(subcom~.)+
geom_smooth(span=0.5)+theme(legend.position="none")
bxpltcom<-ggplot(tcom%>%filter(com==com0),#%>%group_by(subcom,year)%>%summarise(m=mean(pred)),
aes(x=subcom,y=pred,fill=subcom,group=subcom))+
geom_boxplot()+
scale_fill_brewer(palette="Set2",name="Zones")+
ggtitle("Boxplot par zone")+
theme_bw()+ theme(legend.position="none")+xlab("Zones")+ylab("Densité")
pltfinal<-ggplot()+
theme_void()+
coord_equal(xlim=c(0,100),ylim=c(0,100),expand=F)+
annotation_custom(ggplotGrob(pltcom),xmin=0,xmax=100,ymin=50,ymax=100)+
annotation_custom(ggplotGrob(plttcom),xmin=0,xmax=50,ymin=0,ymax=50)+
annotation_custom(ggplotGrob(bxpltcom),xmin=50,xmax=100,ymin=0,ymax=50)+
ggtitle(paste0("Zonation ",com0))
ggsave(file=paste0("./results/Communautes bio/Zones/Zonation",com0,".png"),pltfinal)
return(pltfinal)
}
fctgraph(tcom%>%filter(year>1014),"P1")
fctgraph(tcom%>%filter(year>1014),"P2")
fctgraph(tcom%>%filter(year>1014),"P3")
fctgraph(tcom%>%filter(year>1014),"P4")
fctgraph(tcom%>%filter(year>2014),"B1")
fctgraph(tcom%>%filter(year>2014),"B2")
fctgraph(tcom%>%filter(year>2014),"B3")
fctgraph(tcom%>%filter(year>2014),"B4")
fctgraph(tcom%>%filter(year>1014),"C")
#MCA
rez<- MCA(dcom[,-c(1,2)], ncp=999, method="Burt", graph=F)
#rez<- MCA(dcom[,c("P1","P2","P3","P4")], ncp=999, method="Burt", graph=F)
plt1<- plotellipses(rez, axes=c(1,2))
plt2<- plotellipses(rez, axes=c(1,3))
plot(rez)
plt1
summary(rez)
library(factoextra)
fviz_eig(rez)
plot(rez$eig[,3])
(rez$eig[,3])
# Classification
arbre<- hclust(dist(rez$ind$coord[,1:12]), method="ward.D2")
#arbre<- agnes(rez$ind$coord, method="ward", par.method=1)
#save(arbre, file="data/ICES/arbre_régiona_fin_communi_dens.Rdata")
#load("data/ICES/arbre_régiona_fin_communi_dens.Rdata")
plot(arbre, which=2, hang=-1)
if(F){
source("./analyses/FastNbClust.R")
reztmp<-FastNbClust(data=rez$ind$coord[,1:12], min.nc = 2, max.nc = 10, index="all", method = "ward.D2")
}
#nb cluster
nbk<-8
rect.hclust(arbre, k=nbk)
groups<- cutree(arbre, k=nbk)
plot(rasterFromXYZ( data.frame(rasterToPoints(rcom))%>%mutate(z=as.numeric(groups)) ))
#a graph to ocmpare stuff
dcom<- dcom%>%mutate(Clust=factor(groups))
Allcom<- ggplot(dcom)+
geom_raster(aes(x=x, y=y, fill=Clust))+
geom_polygon(data=PolyCut, aes(x=long, y=lat, group=group), fill=NA, col="black")+
#ggtitle("Final bioregionalization")+
ggtitle("Regionalisation biologique")+
#scale_fill_gradientn(colours =brewer.pal(n = nbk, name = "YlGnBu")) +
xlab("Longitude")+
ylab("Latitude")+
theme_minimal()
Allcom
#add clust to tcom then map with time series
cmap<-dcom%>%dplyr::select(x,y,Clust)%>%distinct()%>%rasterFromXYZ()
pipo<-cbind(tcom[,1:2],data.frame(extract(cmap,tcom[,1:2])))%>%distinct()
names(pipo)<-c("x","y","Clust")
tcom<-left_join(tcom,pipo)%>%mutate(Clust=as.character(Clust))
#remove before 2014 for B
tcom1<-tcom%>%filter(grepl("B",com),year>=2014)
tcom2<-tcom%>%filter(!grepl("B",com))
tcom<-rbind(tcom1,tcom2)
pltcom<-ggplot(tcom%>%dplyr::select(x,y,Clust)%>%distinct(),
aes(x=x,y=y,fill=Clust))+
geom_raster()+
scale_fill_brewer(palette="Set2",name="Zones")+
borders("world",fill="grey",colour=NA)+
coord_sf(xlim=range(tcom$x),ylim=range(tcom$y))+
xlab("Longitude")+ylab("Latitude")+
theme_bw()+
theme(legend.position="right")
bxpltcom<-ggplot(tcom,#%>%filter(com==com0),#%>%group_by(subcom,year)%>%summarise(m=mean(pred)),
aes(x=subcom,y=pred,fill=Clust,color=Clust,group=subcom))+
geom_boxplot(outlier.color="grey")+#outlier.shape=NA,coef=1e30,color="black")+
scale_y_log10()+
scale_fill_brewer(palette="Set2",name="Zones")+
scale_color_brewer(palette="Set2",name="Zones")+
#ggtitle("Boxplot par zone")+
facet_grid(Clust~com,scale="free_x",drop=T)+
theme_bw()+ theme(legend.position="none", axis.text.x = element_text(angle=90, hjust=1))+
xlab("Zones")+ylab("Densité (log10)")
bxpltcom
pltfinal<-ggplot()+
theme_void()+
coord_equal(xlim=c(0,100),ylim=c(0,100),expand=F)+
annotation_custom(ggplotGrob(pltcom),xmin=0,xmax=100,ymin=70,ymax=100)+
#annotation_custom(ggplotGrob(plttcom),xmin=0,xmax=50,ymin=0,ymax=50)+
annotation_custom(ggplotGrob(bxpltcom),xmin=00,xmax=100,ymin=0,ymax=70)+
ggtitle(paste0("Zonation des communautés biologiques"))
pltfinal
ggsave(file=paste0("./results/Communautes bio/Zones/Zonation","Clust",".png"),pltfinal)
|
34ecc88bfc58c8733d248c05bce7248f123593eb
|
bb767f6a07340c0c313c79587ea6c96ce5e17f33
|
/man/tCorpus-cash-fold_rsyntax.Rd
|
c83925a4b1ba12a99d8af0489307cc6618c7160c
|
[] |
no_license
|
psychobas/corpustools
|
82694086aa0b3d861e38624a5cf17a53ce61e23e
|
e9c1ac2011234a62b2fc2c7b46ab01dd9159e4ac
|
refs/heads/master
| 2023-05-04T01:35:30.735603
| 2021-05-25T10:41:30
| 2021-05-25T10:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,660
|
rd
|
tCorpus-cash-fold_rsyntax.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsyntax.r
\name{tCorpus$fold_rsyntax}
\alias{tCorpus$fold_rsyntax}
\title{Fold rsyntax annotations}
\arguments{
\item{annotation}{The name of an rsyntax annotation column}
\item{by_label}{The labels in this column for which you want to aggregate the tokens}
\item{...}{Specify the new aggregated columns in name-value pairs. The name is the name of the new column, and the value should be a function over a column in $tokens.
For example: subject = paste(token, collapse = ' ') would create the column 'subject', of which the values are the concatenated tokens. See examples for more.}
\item{txt}{If TRUE, add _txt column with concatenated tokens for by_label}
\item{rm_by}{If TRUE (default), remove the column(s) specified in by_label}
\item{copy}{If TRUE, return a copy of the transformed tCorpus, instead of transforming the tCorpus by reference}
}
\description{
If a tCorpus has rsyntax annotations (see \code{\link{annotate_rsyntax}}), it can be convenient to aggregate tokens that have a certain semantic label.
For example, if you have a query for labeling "source" and "quote", you can add an aggegated value for the sources (such as a unique ID) as a column, and then remove the quote tokens.
}
\details{
\strong{Usage:}
## R6 method for class tCorpus. Use as tc$method (where tc is a tCorpus object).
\preformatted{
fold_rsyntax(annotation, by_label, ...,
to_label=NULL, rm_by=T, copy=F)}
}
\examples{
tc = tc_sotu_udpipe$copy()
tc$udpipe_clauses()
tc$fold_rsyntax('clause', by_label = 'subject', subject = paste(token, collapse=' '))
tc$tokens
}
|
eaf865a13e93c0a53682137d537fc35d0f3951db
|
80f6dd459811091ed7cafd02968241f23ea206da
|
/jennysFlower.R
|
bd241e7b591f6ded5e9fcf6a016a0ea41ffce678
|
[] |
no_license
|
betanaught/R
|
68afa5f450823161bbd5d294581ec4a12df40f9c
|
29c60aef9c6d45ae1be22c7e5526b9b54e66dedf
|
refs/heads/master
| 2021-09-15T15:28:28.767401
| 2018-06-05T19:52:57
| 2018-06-05T19:52:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,997
|
r
|
jennysFlower.R
|
options(repr.plot.width = 4, repr.plot.height = 4)
library("ggplot2")
# Make a dandelion
angle <- pi*(3-(sqrt(5)))
points <- 500
t <- (1:points)*angle
x <- sin(t)
y <- cos(t)
df <- data.frame(t, x, y)
p <- ggplot(df, aes(x*t, y*t))
p + geom_point(aes(size=t), alpha = 0.5, color = "black", shape=8) +
theme(
plot.title = NULL,
panel.background = element_rect(fill = "white"),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
legend.position = "none"
)
# Make a sunflower
p <- ggplot(df, aes(x*t, y*t))
p + geom_point(aes(size=t), alpha = 0.5, color = "yellow", shape=17) +
theme(
plot.title = NULL,
panel.background = element_rect(fill = "darkmagenta"),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
legend.position = "none"
)
# Sunflower with alpha angle of 2.0
angle <- 2
points <- 1000
t <- (1:points)*angle
x <- sin(t)
y <- cos(t)
df <- data.frame(t, x, y)
p <- ggplot(df, aes(x*t, y*t))
p + geom_point(aes(size=t), alpha = 0.5, color = "yellow", shape=17) +
theme(
plot.title = NULL,
panel.background = element_rect(fill = "darkmagenta"),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
legend.position = "none"
)
# Final flower
angle <- 13*(pi/180)
points <- 2000
t <- (1:points)*angle
x <- sin(t)
y <- cos(t)
goosey.data.frame <- data.frame(t, x, y)
jennys.flower <- ggplot(goosey.data.frame, aes(x*t, y*t)) +
geom_point(size = 90, alpha = 0.1, color = "magenta4", shape = 1,
stroke = .5) +
theme(plot.title = NULL, panel.background = element_rect(fill = "white"),
panel.grid = element_blank(), axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
legend.position = "none")
jennys.flower
|
5f6d31a1e4b50d477ed78573f113092d91bba37b
|
5d9f8ec0e042104a57b4af446f71ec9f2a5d60bc
|
/R/rbm_train.R
|
59bd2253c7ead1254ca83fa88e91a666bcd7e309
|
[] |
no_license
|
cran/deepnet
|
93155a5f01a778e4340fa01c2f669cf111d0431f
|
6e1efd18aed8971c1c398b296e4c2b81533bee9f
|
refs/heads/master
| 2022-07-20T20:09:48.283817
| 2022-06-24T11:29:27
| 2022-06-24T11:29:27
| 17,942,921
| 24
| 23
| null | 2015-12-05T19:09:32
| 2014-03-20T13:11:10
|
R
|
UTF-8
|
R
| false
| false
| 4,984
|
r
|
rbm_train.R
|
##' Training a RBM(restricted Boltzmann Machine)
##'
##' Training a RBM(restricted Boltzmann Machine)
##' @param x matrix of x values for examples
##' @param hidden number of hidden units
##' @param visible_type activation function of input unit.Only support "sigm" now
##' @param hidden_type activation function of hidden unit.Only support "sigm" now
##' @param learningrate learning rate for gradient descent. Default is 0.8.
##' @param momentum momentum for gradient descent. Default is 0.5 .
##' @param learningrate_scale learning rate will be mutiplied by this scale after every iteration. Default is 1 .
##' @param numepochs number of iteration for samples Default is 3.
##' @param batchsize size of mini-batch. Default is 100.
##' @param cd number of iteration for Gibbs sample of CD algorithm.
##' @examples
##' Var1 <- c(rep(1,50),rep(0,50))
##' Var2 <- c(rep(0,50),rep(1,50))
##' x3 <- matrix(c(Var1,Var2),nrow=100,ncol=2)
##' r1 <- rbm.train(x3,10,numepochs=20,cd=10)
##' @author Xiao Rong
##' @export
rbm.train <- function(x,hidden,
numepochs=3,batchsize=100,
learningrate=0.8,learningrate_scale=1,momentum=0.5,
visible_type="bin",hidden_type="bin",cd=1){
if (!is.matrix(x))
stop("x must be a matrix!")
input_dim <- ncol(x)
rbm <- list(
size = c(input_dim, hidden),
W = matrix(runif(hidden*input_dim,min=-0.1,max=0.1), c(hidden,input_dim)),
vW = matrix(rep(0,hidden*input_dim), c(hidden,input_dim)),
B = runif(input_dim,min=-0.1,max=0.1),
vB = rep(0,input_dim),
C = runif(hidden,min=-0.1,max=0.1),
vC = rep(0,hidden),
learningrate = learningrate,
learningrate_scale = learningrate_scale,
momentum = momentum,
hidden_type = hidden_type, visible_type = visible_type,
cd=cd
)
m <- nrow(x);
numbatches <- m / batchsize;
s <- 0
for(i in 1:numepochs){
randperm <- sample(1:m,m)
if(numbatches >= 1){
for(l in 1 : numbatches){
s <- s + 1
batch_x <- x[randperm[((l-1)*batchsize+1):(l*batchsize)], ]
rbm <- do.rbm.train(rbm,batch_x,s)
}
}
#last fraction of sample
if(numbatches > as.integer(numbatches)){
batch_x <- x[randperm[(as.integer(numbatches)*batchsize):m], ]
s <- s + 1
rbm <- do.rbm.train(rbm,batch_x,s)
}
rbm$learningrate <- rbm$learningrate * rbm$learningrate_scale;
}
rbm
}
do.rbm.train <- function(rbm,batch_x,s){
m <- nrow(batch_x)
v1 <- batch_x
h1 <- binary.state(rbm.up(rbm, v1))
vn <- v1
hn <- h1
for(i in 1:rbm$cd){
vn <- rbm.down(rbm, hn)
hn <- rbm.up(rbm, vn)
#only last hidden state use probability real value
if(i < rbm$cd){
hn <- binary.state(hn);
}
}
dW <- (t(h1) %*% v1 - t(hn) %*% vn) / m
dW <- rbm$learningrate * dW
rbm$vW <- rbm$vW * rbm$momentum + dW
dw <- rbm$vW
rbm$W <- rbm$W + dW
dB <- colMeans(v1 - vn)
dB <- rbm$learningrate * dB
rbm$vB <- rbm$vB * rbm$momentum + dB
dB <- rbm$vB
rbm$B <- rbm$B + dB
dC <- colMeans(h1 - hn)
dC <- rbm$learningrate * dC
rbm$vC <- rbm$vC * rbm$momentum + dC
dC <- rbm$vC
rbm$C <- rbm$C + dC
rbm$e[s] <- sum((v1 - vn)^2)/m
rbm
}
##' Infer hidden units state by visible units
##'
##' Infer hidden units states by visible units
##' @param rbm an rbm object trained by function train.rbm
##' @param v visible units states
##' @return hidden units states
##' @examples
##' Var1 <- c(rep(1,50),rep(0,50))
##' Var2 <- c(rep(0,50),rep(1,50))
##' x3 <- matrix(c(Var1,Var2),nrow=100,ncol=2)
##' r1 <- rbm.train(x3,3,numepochs=20,cd=10)
##' v <- c(0.2,0.8)
##' h <- rbm.up(r1,v)
##' @author Xiao Rong
##' @export
rbm.up <- function(rbm,v){
m <- nrow(v)
if(rbm$hidden_type == "bin"){
sum <- t( t(v %*% t(rbm$W)) + rbm$C )
h <- sigm( sum )
}else{
stop("only support binary state for rbm hidden layer!")
}
h
}
##' Generate visible vector by hidden units states
##'
##' Generate visible vector by hidden units states
##' @param rbm an rbm object trained by function train.rbm
##' @param h hidden units states
##' @return generated visible vector
##' @examples
##' Var1 <- c(rep(1,50),rep(0,50))
##' Var2 <- c(rep(0,50),rep(1,50))
##' x3 <- matrix(c(Var1,Var2),nrow=100,ncol=2)
##' r1 <- rbm.train(x3,3,numepochs=20,cd=10)
##' h <- c(0.2,0.8,0.1)
##' v <- rbm.down(r1,h)
##' @author Xiao Rong
##' @export
rbm.down <- function(rbm,h){
m <- nrow(h)
if(rbm$visible_type == "bin"){
sum <- t( t(h %*% rbm$W) + rbm$B )
v <- sigm( sum )
}else{
stop("only support binary state for rbm hidden layer!")
}
v
}
binary.state <- function(h){
p <- matrix( runif(length(h),min=0,max=1)
,nrow=nrow(h),ncol=ncol(h))
h[h>p] <- 1
h[h<=p] <- 0
h
}
|
179b1c94b1e134f81f835f018e117173821477d7
|
9f7b4256915d26c5221bedc20af3278879f46fa5
|
/Suduroy_fannar/main.R
|
e7036b6760165498b9cc615713350c0971cf49cf
|
[] |
no_license
|
KasperEinarson/Faroe-Islands
|
c53468f910c04cf39f36134ace38b130476608fa
|
833e09848dd53aaaebeccc08dc55673d779f4fd5
|
refs/heads/master
| 2021-01-20T09:26:07.890153
| 2017-05-04T10:58:24
| 2017-05-04T10:58:24
| 90,253,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 668
|
r
|
main.R
|
rm(list = ls())
graphics.off()
source("libs/initR.R") # initiating the R session
source("libs/getData.R") # getting basic data files
# --------------------------------------------------------------------------- #
# Dataanalyse
# source("libs/Analyse_vindhastighed.R")
source("libs/Analyse_vindretning.R")
source("libs/PowerCurve.R")
# --------------------------------------------------------------------------- #
## Figures
source("figs/Figures.R") # plot figures for presentation
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
|
57eb525ec09b72283d03b998a5e668fc9ab3d7ed
|
4250e06e89ad4b7042d798691d2a17b258a51e21
|
/Calculate Position In Race from Telemetry Segments.R
|
7e6cc463ad9856fec718d7ccbda7777b5ec159e5
|
[] |
no_license
|
jalnichols/p-c
|
b143bd210e222d6f755a7034d688db31b10f1720
|
1ea6587e28a1c20ce8a2f6870eb4ece7aeaa05e3
|
refs/heads/master
| 2022-06-15T22:03:40.823212
| 2022-06-11T20:25:31
| 2022-06-11T20:25:31
| 199,725,985
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,036
|
r
|
Calculate Position In Race from Telemetry Segments.R
|
library(tidyverse)
library(rvest)
library(RMySQL)
dbDisconnect(con)
con <- dbConnect(MySQL(),
host='localhost',
dbname='cycling',
user='jalnichols',
password='braves')
#
# Bring in Strava Activities
#
all_race_activities <- dbGetQuery(con, "SELECT activity_id, PCS, VALUE, Stat, DATE
FROM strava_activity_data
WHERE Stat IN ('Distance')") %>%
# clean up the dates
mutate(Y = str_sub(DATE, nchar(DATE)-3, nchar(DATE))) %>%
separate(DATE, into = c("weekday", "date", "drop"), sep = ",") %>%
mutate(date = paste0(str_trim(date),", ", Y)) %>%
select(-weekday, -drop, -Y) %>%
# clean up the stat values
mutate(VALUE = str_replace(VALUE, "mi", ""),
VALUE = str_replace(VALUE, "W", ""),
VALUE = as.numeric(VALUE)) %>%
mutate(date = lubridate::mdy(date)) %>%
unique() %>%
spread(Stat, VALUE) %>%
janitor::clean_names() %>%
mutate(pcs = str_to_title(pcs)) %>%
inner_join(dbGetQuery(con, "SELECT * FROM stage_data_perf
WHERE year > 2018") %>%
mutate(date = as.Date(date)) %>%
mutate(date = as.Date(date, origin = '1970-01-01')) %>%
unique(), by = c("date", "pcs" = "rider")) %>%
# if two results exist for same day matching distance, it's probably a recon and TT which
# means drop the lower watts
# also, many riders include distance outside the TT as part of their strava activity
# so maybe accept any riders +/- 10 km? or maybe we just can't get accurate TT data
mutate(distance = distance * 1.609) %>%
filter((distance / length) > 0.90) %>%
filter((distance / length) < 1.10) %>%
filter(time_trial == 0)
#
# Link with telemetry data downloaded and filter to those races with 20+ files
#
telemetry_available <- all_race_activities %>%
inner_join(
fs::dir_info('D:/Jake/Documents/STRAVA_JSON/') %>%
select(path, birth_time) %>%
mutate(activity_id = str_replace(path, 'D:/Jake/Documents/STRAVA_JSON/strava-activity-id-', ''),
activity_id = str_replace(activity_id, ".rds", "")), by = c("activity_id")) %>%
group_by(stage, race, year, class, date) %>%
filter(n() >= 15) %>%
ungroup()
#
# Join with Segments calculated from those power files
#
all_new_segments_across_riders <- dbReadTable(con, "strava_new_segment_creation_interim") %>%
filter(creation_type == "TIME") %>%
select(-creation_type) %>%
inner_join(telemetry_available %>% select(activity_id, rider=pcs, total_seconds) %>% unique()) %>%
filter(segment_distance > 0) %>%
group_by(rowid, stage, race, year, date, class) %>%
mutate(median_distance = median(segment_distance, na.rm = T),
median_time = median(segment_time, na.rm = T),
median_gradient = median(segment_gradient, na.rm = T),
median_speed = mean(segment_speed_kmh, na.rm = T)) %>%
ungroup() %>%
mutate(ratio = (median_distance/segment_distance),
spd_ratio = segment_time / median_time) %>%
mutate(bad_data = ifelse((median_distance >= 1500 & (ratio > 1.1 | ratio < 0.9)) |
(median_distance < 1500 & (ratio > 1.2 | ratio < 0.8)), 1, 0)) %>%
group_by(stage, race, year, class, rowid, date) %>%
mutate(not_bad_data_speed = ifelse(bad_data == 1, NA, segment_speed_kmh),
not_bad_data_speed = mean(not_bad_data_speed, na.rm = T),
not_bad_data_dist = ifelse(bad_data == 1, NA, segment_distance),
not_bad_data_dist = mean(not_bad_data_dist, na.rm = T)) %>%
ungroup() %>%
# what this does is it assigns median speed if your data is bad, but your speed otherwise
# then it assigns everyone the median distance
# and then time is calculated off of the median distance * whatever speed
mutate(new_segment_speed_kmh = ifelse(bad_data == 1, not_bad_data_speed, segment_speed_kmh),
new_segment_distance = ifelse(bad_data == 1, should_be_distance, should_be_distance),
new_segment_time = new_segment_distance / ((new_segment_speed_kmh*1000/3600))) %>%
#filter(segment_speed_kmh <= 120) %>%
group_by(rowid, stage, race, year, date, class, should_be_distance) %>%
mutate(rel_speed = segment_speed_kmh / mean(segment_speed_kmh)) %>%
ungroup()
#
# Filter out races where position has been calculated
#
races_to_generate_position_data <- all_new_segments_across_riders %>%
anti_join(dbGetQuery(con, "SELECT DISTINCT stage, race, year, class, date
FROM strava_position_in_race") %>%
mutate(stage = as.character(stage))) %>%
select(rider, race, stage, year, class, date) %>%
unique() %>%
group_by(race, stage, year, class, date) %>%
filter(n() >= 15) %>%
ungroup() %>%
select(race, stage, year, class, date) %>%
unique()
#
# Loop through missing races and output to table
#
for(i in 1:length(races_to_generate_position_data$race)) {
R = races_to_generate_position_data$race[[i]]
S = races_to_generate_position_data$stage[[i]]
Y = races_to_generate_position_data$year[[i]]
#
segments_to_consider <- all_new_segments_across_riders %>%
filter(race == R & year == Y & stage == S)
#
summed_time <- rbind(
segments_to_consider %>%
filter(bad_data == 0) %>%
select(rowid, stage, race, year, class, date, new_segment_distance = should_be_distance,
new_segment_speed_kmh = not_bad_data_speed, segment_gradient = median_gradient, segment_vertgain,
start_lat, end_lat, start_long, end_long, start_prior, end_next) %>%
group_by(rowid) %>%
mutate(segment_vertgain = median(segment_vertgain)) %>%
unique() %>%
inner_join(segments_to_consider %>% select(rider, stage, race, year, class, date, activity_id) %>% unique()) %>%
anti_join(segments_to_consider, by = c("rowid", "rider")) %>%
mutate(new_segment_time = new_segment_distance / ((new_segment_speed_kmh*1000/3600))) %>%
as_tibble(),
segments_to_consider %>%
select(-rel_speed, -not_bad_data_speed, -not_bad_data_dist, -bad_data, -spd_ratio, -ratio,
-median_gradient, -median_speed, -median_time, -median_distance, -should_be_distance,
-position_distance, -segment_speed_kmh, -segment_time, -segment_distance,
-segment_time)) %>%
arrange(start_prior) %>%
group_by(Segment = rowid, stage, race, year, class, date) %>%
mutate(best_time = min(new_segment_time, na.rm = T),
median_time = median(new_segment_time, na.rm = T)) %>%
ungroup() %>%
group_by(stage, race, year, class, rider, date) %>%
mutate(behind_median = cumsum(new_segment_time - median_time)) %>%
ungroup() %>%
group_by(rider, stage, race, year, class, date) %>%
mutate(Segment_Cnt = n()) %>%
ungroup() %>%
group_by(Segment, stage, race, year, class, date) %>%
mutate(vs_median = behind_median - min(behind_median, na.rm = T)) %>%
ungroup() %>%
group_by(Segment, stage, race, year, class, date) %>%
mutate(position_at_time = rank(behind_median, ties.method = "min")) %>%
ungroup()
dbWriteTable(con, "strava_position_in_race", summed_time %>%
select(Segment, Gradient = segment_gradient, Distance = new_segment_distance,
race, stage, year, class, date, low_end = start_prior, high_end = end_next,
rider, activity_id, time = new_segment_time, best_time, median_time, behind_median, vs_median),
row.names = F, append = TRUE)
print(i)
}
#
#
#
#
# Loop through missing races and output to table
#
for(i in 1:length(races_to_generate_position_data$race)) {
R = races_to_generate_position_data$race[[i]]
S = races_to_generate_position_data$stage[[i]]
Y = races_to_generate_position_data$year[[i]]
#
segments_to_consider <- all_new_segments_across_riders %>%
filter(race == R & year == Y & stage == S)
#
summed_time <- rbind(
segments_to_consider %>%
filter(bad_data == 0) %>%
select(rowid, stage, race, year, class, date, new_segment_distance = should_be_distance,
new_segment_speed_kmh = not_bad_data_speed, segment_gradient = median_gradient, segment_vertgain,
start_lat, end_lat, start_long, end_long, start_prior, end_next) %>%
group_by(rowid) %>%
mutate(segment_vertgain = median(segment_vertgain)) %>%
unique() %>%
inner_join(segments_to_consider %>% select(rider, stage, race, year, class, date, activity_id, total_seconds) %>% unique()) %>%
anti_join(segments_to_consider, by = c("rowid", "rider")) %>%
mutate(new_segment_time = new_segment_distance / ((new_segment_speed_kmh*1000/3600))) %>%
as_tibble() %>%
mutate(bad_data = 1,
Power = as.numeric(NA),
ValidPoints = 0,
ValidPower = 0),
segments_to_consider %>%
select(-rel_speed, -not_bad_data_speed, -not_bad_data_dist, -bad_data, -spd_ratio, -ratio,
-median_gradient, -median_speed, -median_time, -median_distance, -should_be_distance,
-position_distance, -segment_speed_kmh, -segment_time, -segment_distance,
-segment_time) %>%
mutate(bad_data = 0)) %>%
arrange(desc(start_prior)) %>%
mutate(lastSegment = max(rowid)) %>%
group_by(activity_id) %>%
filter(lastSegment == max(rowid)) %>%
ungroup() %>%
group_by(Segment = rowid, stage, race, year, class, date) %>%
mutate(best_time = min(new_segment_time, na.rm = T),
median_time = median(new_segment_time, na.rm = T),
median_finish = median(total_seconds, na.rm = T),
best_finish = min(total_seconds, na.rm = T)) %>%
ungroup() %>%
group_by(stage, race, year, class, rider, date) %>%
mutate(behind_median = cumsum(new_segment_time - median_time)) %>%
ungroup() %>%
group_by(rider, stage, race, year, class, date) %>%
mutate(Segment_Cnt = n()) %>%
ungroup() %>%
mutate(vs_median_before = (total_seconds - median_finish) - behind_median) %>%
group_by(Segment, stage, race, year, class, date) %>%
mutate(position_at_time = rank(vs_median_before, ties.method = "min")) %>%
ungroup()
dbWriteTable(con, "strava_position_in_race", summed_time %>%
select(Segment, Gradient = segment_gradient, Distance = new_segment_distance,
race, stage, year, class, date, low_end = start_prior, high_end = end_next,
rider, activity_id, time = new_segment_time, best_time, median_time, behind_median, vs_median_before,
total_seconds, median_finish, position_at_time, bad_data),
row.names = F, append = TRUE)
print(i)
}
|
84e992c6f10d0a21d389c1c9917b7f0b31cbf2fb
|
875d72407d0de9e8c597a09c2792fb76a0521416
|
/2_amplitude_CIs.R
|
8252f3ad2ee7f22cbbed6b0e1fca5c99cb4582e0
|
[] |
no_license
|
agbarnett/weekends
|
1071af8d5562d153f68ead4ad675cc6683a66dbf
|
6a4897261841081c5d42b62c80c2043ffcdcc751
|
refs/heads/master
| 2020-07-23T03:12:01.538292
| 2020-01-03T11:53:47
| 2020-01-03T11:53:47
| 207,429,869
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,360
|
r
|
2_amplitude_CIs.R
|
# 2_amplitude_CIs.R
# make credible intervals for amplitude for hour of day analysis
# October 2019
# get the chain estimates
index = grep('^cosine.c|^sine.c', rownames(bugs.results$summary))
cnames = rownames(bugs.results$summary)[index]
to.use1 = as.matrix(bugs.results$sims.array[,1,index]) # first chain
to.use2 = as.matrix(bugs.results$sims.array[,2,index]) # second chain
colnames(to.use1) = cnames
colnames(to.use2) = cnames
# switch from wide to long
long1 = data.frame(to.use1) %>%
mutate(i = 1:n(), chain=1) %>%
tidyr::gather(key='var', value='value', -i, -chain)
long2 = data.frame(to.use2) %>%
mutate(i = 1:n(), chain=2) %>%
tidyr::gather(key='var', value='value', -i, -chain)
binded = bind_rows(long1, long2) %>%
mutate(country.num = as.numeric(str_remove_all(string=var, pattern='[^0-9]')), # remove all but numbers
var = str_remove_all(string=var, pattern='[^a-zA-Z]')) %>%
group_by(chain, i, country.num) %>%
spread(var, value) %>% # cosine and sine on same row
ungroup() %>%
mutate(amp = sqrt(cosinec^2 + sinec^2)) # amplitude of sinusoid
# now get stats as probability ratios per country
amp.stats = group_by(binded, country.num) %>%
summarise(meana = exp(mean(amp)),
lowera = exp(quantile(amp, 0.025)),
uppera = exp(quantile(amp, 0.975))) %>%
ungroup()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.