blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67fe7fb23c3d662e5826ce53ed891426af3cbc26
|
2f0cbb9303747f445da1c9faf4bf75b055d725a1
|
/R/detectFaces.R
|
0238e7047e61fb13ca4c8e29881ee61ca776c183
|
[] |
no_license
|
peoplecure/r_facepp
|
77a6e8528a6a06e93a8d71525dee91f9c004df8a
|
01bbb385c741d53cc7ff30837b0f8fbc3ddb263c
|
refs/heads/master
| 2020-05-30T13:45:29.323971
| 2017-09-17T14:33:18
| 2017-09-17T14:33:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 337
|
r
|
detectFaces.R
|
detectFaces <-
function(proxy, image_file){
r <- POST(url='https://api-cn.faceplusplus.com/facepp/v3/detect',
body=list(api_key=proxy$api_key,
api_secret=proxy$api_secret,
image_file=upload_file(path=image_file)),
encode='multipart')
content(r)$faces
}
|
83ec27ce4279daf25136337983bd1708e400e508
|
592bf5bfffd630f6372a710f12cbdc6ad71e0b07
|
/R/predictionTheta.R
|
aa26b13a21a25833089114ff3d274ae134d31008
|
[] |
no_license
|
cran/warpMix
|
306043d19671cb9fc02cffd49c8f5c65946c5192
|
156fe1cb94375f903d905765ad6fab10f1b24130
|
refs/heads/master
| 2021-01-19T08:03:39.362417
| 2017-02-15T14:11:39
| 2017-02-15T14:11:39
| 82,067,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
predictionTheta.R
|
#' Predict the warping parameters.
#'
#' This function predict the warping parameters, using the estimations of those parameters,
#' and fitting a linear mixed effect model on them.
#'
#' @param thetaObs A matrix (size: n * T) corresponding of the estimations of the warping parameters.
#' @param sigmaEpsilon A number, defining the variance of the noise in the linear mixed-
#' effect model fitted on the warping parameters.
#'
#' @return A list, with theta, a matrix of predicted warping parameters,
#' sigmaE the covariance of the random effects, and theta0 the mean.
#'
#'
predictionTheta = function(thetaObs,sigmaEpsilon){
## Initialization
thetaObs = t(thetaObs)
A = dim(thetaObs)
n = A[1]
T = A[2]
## Compute the prediction
theta0hat = apply(thetaObs,2, mean)
sigmaEhat = cov(thetaObs) - sigmaEpsilon * diag(1,T)
effetAlea = sigmaEhat %*% solve(cov(thetaObs)) %*% t(thetaObs)
result = list(theta = effetAlea, sigmaE = sigmaEhat, theta0 = theta0hat)
return(result)
}
|
1db870fc65f0d9ced210c8c40e993a55d9af6866
|
fac69dc12b6607d5a1b08f694453164ad5c61326
|
/ps_user_stuttgart_part2.R
|
b6ed58720377f30dac7a2f755a4be3831112766a
|
[] |
no_license
|
Japhilko/ps_2017_11_user_stuttgart
|
154cc8def1d45280ddac4384c5eeece1210291bd
|
afd53942b362242c12f2599a96ab778463a16be9
|
refs/heads/master
| 2021-09-03T08:42:20.864002
| 2018-01-07T17:07:55
| 2018-01-07T17:07:55
| 111,187,978
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,975
|
r
|
ps_user_stuttgart_part2.R
|
## ---- include=FALSE------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE,cache=T,warning=F,message=FALSE)
par(mai=c(0,0,0,0))
log_gesis=F
log_home=!log_gesis
internet=F
noint = !internet
## ----echo=F,eval=F-------------------------------------------------------
## install.packages("knitr")
## install.packages("sp")
## install.packages("tmap")
## install.packages("choroplethr")
## install.packages("choroplethrMaps")
## install.packages("acs")
## install.packages("rJava")
## install.packages("xlsxjars")
## install.packages("xlsx")
## ----echo=F--------------------------------------------------------------
library(knitr)
## ----echo=F,eval=F-------------------------------------------------------
## setwd("~/GitHub/GeoData/presentations/ps_user_stuttgart")
## purl("ps_user_stuttgart_part3.Rmd")
## ----eval=F,echo=F-------------------------------------------------------
## setwd("D:/Daten/GitHub/GeoData/presentations/ps_user_stuttgart")
## purl("ps_user_stuttgart_part3.Rmd")
## ------------------------------------------------------------------------
library(maps)
map()
## ------------------------------------------------------------------------
map("world", "Germany")
## ------------------------------------------------------------------------
data(world.cities)
map("france")
map.cities(world.cities,col="blue")
## ------------------------------------------------------------------------
library(maptools)
data(wrld_simpl)
plot(wrld_simpl,col="royalblue")
## ----eval=F--------------------------------------------------------------
## head(wrld_simpl@data)
## ----echo=F,eval=noint---------------------------------------------------
kable(head(wrld_simpl@data))
## ----echo=F,eval=internet------------------------------------------------
## library(DT)
## datatable(wrld_simpl@data)
## ------------------------------------------------------------------------
length(wrld_simpl)
nrow(wrld_simpl@data)
## ------------------------------------------------------------------------
ind <- which(wrld_simpl$ISO3=="DEU")
## ------------------------------------------------------------------------
plot(wrld_simpl[ind,])
## ------------------------------------------------------------------------
wrld_simpl@data[ind,]
## ------------------------------------------------------------------------
library(ggplot2);library(choroplethrMaps)
data(country.map)
ggplot(country.map, aes(long, lat, group=group))
+ geom_polygon()
## ------------------------------------------------------------------------
data(state.map)
ggplot(state.map,aes(long,lat,group=group))+geom_polygon()
## ----warning=F-----------------------------------------------------------
library(raster)
LUX1 <- getData('GADM', country='LUX', level=1)
plot(LUX1)
## ----eval=F--------------------------------------------------------------
## head(LUX1@data)
## ----eval=T,echo=F-------------------------------------------------------
kable(head(LUX1@data))
## ----eval=F,echo=F-------------------------------------------------------
## datatable(LUX1@data)
## ----eval=F--------------------------------------------------------------
## library(maptools)
## krs <- readShapePoly("vg250_ebenen/vg250_krs.shp")
## plot(krs)
## ----echo=F,eval=log_gesis-----------------------------------------------
## library(maptools)
## krs <- readShapePoly("D:/Daten/Daten/GeoDaten/vg250_ebenen/vg250_krs.shp")
## ----echo=F--------------------------------------------------------------
library(DT)
## ----echo=F,eval=F-------------------------------------------------------
## datatable(krs@data)
## ------------------------------------------------------------------------
head(krs@data$RS)
## ------------------------------------------------------------------------
BLA <- substr(krs@data$RS,1,2)
plot(krs[BLA=="08",])
## ----echo=F,eval=log_gesis-----------------------------------------------
## setwd("D:/Daten/Daten/GeoDaten/")
## ----echo=F,eval=log_home------------------------------------------------
setwd("D:/GESIS/Vorträge/20171122_userStuttgart/data/")
## ----eval=F,echo=F-------------------------------------------------------
## install.packages("maptools")
## ----eval=log_gesis,echo=F-----------------------------------------------
## library(maptools)
## setwd("D:/Daten/Daten/GeoDaten/")
## onb <- readShapePoly("onb_grenzen.shp")
## ----eval=log_home,echo=F------------------------------------------------
library(maptools)
setwd("D:/GESIS/Vorträge/20171122_userStuttgart/data/")
onb <- readShapePoly("ONB_BnetzA_DHDN_Gauss3d-3.shp")
## ----eval=F--------------------------------------------------------------
## onb <- readShapePoly("onb_grenzen.shp")
## ----eval=F--------------------------------------------------------------
## head(onb@data)
## ----eval=noint,echo=F---------------------------------------------------
kable(head(onb@data))
## ----eval=internet,echo=F------------------------------------------------
## datatable(onb@data)
## ----eval=log_gesis------------------------------------------------------
## vwb <- as.character(onb@data$VORWAHL)
## vwb1 <- substr(vwb, 1,2)
## vwb7 <- onb[vwb1=="07",]
## plot(vwb7)
## ----eval=log_home,echo=F------------------------------------------------
vwb <- as.character(onb@data$ONB_NUMMER)
vwb1 <- substr(vwb, 1,1)
vwb7 <- onb[vwb1=="7",]
plot(vwb7)
## ------------------------------------------------------------------------
library(rgdal)
## ----eval=log_gesis,echo=F-----------------------------------------------
## setwd("D:/Daten/Daten/GeoDaten")
## PLZ <- readOGR ("post_pl.shp","post_pl")
## ----eval=log_home,echo=F------------------------------------------------
setwd("D:/GESIS/Workshops/GeoDaten/data/")
PLZ <- readOGR ("post_pl.shp","post_pl")
## ----eval=F--------------------------------------------------------------
## library(rgdal)
## PLZ <- readOGR ("post_pl.shp","post_pl")
## ------------------------------------------------------------------------
SG <- PLZ[PLZ@data$PLZORT99=="Stuttgart",]
plot(SG,col="chocolate1")
## ------------------------------------------------------------------------
BE <- PLZ[PLZ@data$PLZORT99%in%c("Berlin-West",
"Berlin (östl. Stadtbezirke)"),]
plot(BE,col="chocolate2",border="lightgray")
## ------------------------------------------------------------------------
library(sp)
spplot(wrld_simpl,"POP2005")
## ----eval=F,echo=F-------------------------------------------------------
## install.packages("colorRamps")
## ------------------------------------------------------------------------
library(colorRamps)
spplot(wrld_simpl,"POP2005",col.regions=blue2red(100))
## ------------------------------------------------------------------------
spplot(wrld_simpl,"POP2005",col.regions=matlab.like(100))
## ------------------------------------------------------------------------
library(choroplethr)
data(df_pop_state)
## ----eval=F--------------------------------------------------------------
## head(df_pop_state)
## ----echo=F,eval=internet------------------------------------------------
## datatable(df_pop_state)
## ----echo=F,eval=noint---------------------------------------------------
kable(head(df_pop_state))
## ------------------------------------------------------------------------
state_choropleth(df_pop_state)
## ------------------------------------------------------------------------
state_choropleth(df_pop_state,
title = "2012 Population Estimates",
legend = "Population",num_colors = 1,
zoom = c("california", "washington",
"oregon"))
## ------------------------------------------------------------------------
data(df_pop_county)
county_choropleth(df_pop_county)
## ------------------------------------------------------------------------
data(df_pop_country)
country_choropleth(df_pop_country,
title = "2012 Population Estimates",
legend = "Population",num_colors = 1,
zoom = c("austria","germany",
"poland", "switzerland"))
## ------------------------------------------------------------------------
library(WDI)
WDI_dat <- WDI(country="all",
indicator=c("AG.AGR.TRAC.NO",
"TM.TAX.TCOM.BC.ZS"),
start=1990, end=2000)
## ----eval=F--------------------------------------------------------------
## head(WDI_dat)
## ----eval=noint,echo=F---------------------------------------------------
kable(head(WDI_dat))
## ----eval=internet,echo=F------------------------------------------------
## datatable(WDI_dat)
## ------------------------------------------------------------------------
choroplethr_wdi(code="SP.DYN.LE00.IN", year=2012,
title="2012 Life Expectancy")
## ----echo=F,eval=log_gesis-----------------------------------------------
## setwd("J:/Work/Statistik/Kolb/Workshops/2015/Spatial_MA/Folien/dataImport/data/")
## ----eval=T--------------------------------------------------------------
library(xlsx)
HHsr <- read.xlsx2("data/HHsavingRate.xls",1)
## ----echo=F,eval=T-------------------------------------------------------
kable(HHsr[1:8,1:6])
## ----eval=F,echo=F-------------------------------------------------------
## library(xlsx)
## setwd("D:/GESIS/Vorträge/20171122_userStuttgart/data/")
## bev_dat <- read.xlsx("xlsx_Bevoelkerung.xlsx",3)
## ------------------------------------------------------------------------
zen <- read.csv2("data/Zensus_extract.csv")
# Personen mit eigener Migrationserfahrung
# mit beidseitigem Migrationshintergrund
zen2 <- data.frame(Personen_Mig=zen[,which(zen[9,]==128)],
Personen_Mig_bs=zen[,which(zen[9,]==133)])
## ---- eval=F,echo=F------------------------------------------------------
## library(knitr)
## kable(head(bev_dat))
## ----eval=F--------------------------------------------------------------
## url <- "https://raw.githubusercontent.com/Japhilko/
## GeoData/master/2015/data/whcSites.csv"
##
## whcSites <- read.csv(url)
## ----echo=F--------------------------------------------------------------
url <- "https://raw.githubusercontent.com/Japhilko/GeoData/master/2015/data/whcSites.csv"
whcSites <- read.csv(url)
## ----echo=F--------------------------------------------------------------
kable(head(whcSites[,c("name_en","date_inscribed","longitude","latitude","area_hectares","category","states_name_fr")]))
## ------------------------------------------------------------------------
ind <- match(HHsr$geo,wrld_simpl@data$NAME)
ind <- ind[-which(is.na(ind))]
## ------------------------------------------------------------------------
EUR <- wrld_simpl[ind,]
## ------------------------------------------------------------------------
EUR@data$HHSR_2012Q3 <- as.numeric(as.character(HHsr[-(1:2),2]))
EUR@data$HHSR_2015Q2 <- as.numeric(as.character(HHsr[-(1:2),13]))
## ------------------------------------------------------------------------
spplot(EUR,c("HHSR_2012Q3","HHSR_2015Q2"))
## ----eval=T,echo=T-------------------------------------------------------
(load("data/info_bar_Berlin.RData"))
## ----echo=F--------------------------------------------------------------
info_be <- info[,c("addr.postcode","addr.street","name","lat","lon")]
## ----echo=F--------------------------------------------------------------
kable(head(info_be))
## ----eval=F--------------------------------------------------------------
## devtools::install_github("Japhilko/gosmd")
## ----eval=F--------------------------------------------------------------
## library("gosmd")
## pg_MA <- get_osm_nodes(object="leisure=playground","Mannheim")
## pg_MA <- extract_osm_nodes(pg_MA,value='playground')
## ------------------------------------------------------------------------
tab_plz <- table(info_be$addr.postcode)
## ------------------------------------------------------------------------
ind <- match(BE@data$PLZ99_N,names(tab_plz))
ind
## ------------------------------------------------------------------------
BE@data$num_plz <- tab_plz[ind]
## ----eval=F,echo=F-------------------------------------------------------
## install.packages("colorRamps")
## install.packages("XML")
## install.packages("geosphere")
## install.packages("tmap")
## install.packages("curl")
## install.packages("R.oo")
## ------------------------------------------------------------------------
library(tmap)
## ------------------------------------------------------------------------
BE@data$num_plz[is.na(BE@data$num_plz)] <- 0
qtm(BE,fill = "num_plz")
## ------------------------------------------------------------------------
load("data/osmsa_PLZ_14.RData")
## ----echo=F--------------------------------------------------------------
dat_plz <- PLZ@data
kable(head(dat_plz))
## ----echo=F--------------------------------------------------------------
PLZ_SG <- PLZ[PLZ@data$PLZORT99=="Stuttgart",]
## ------------------------------------------------------------------------
qtm(PLZ_SG,fill="bakery")
## ------------------------------------------------------------------------
kable(PLZ_SG@data[which.max(PLZ_SG$bakery),c("PLZ99","lat","lon","bakery")])
## ----eval=F,echo=F-------------------------------------------------------
## install.packages("RDSTK")
## ------------------------------------------------------------------------
library("RDSTK")
## ------------------------------------------------------------------------
PLZ_SG <- PLZ[PLZ@data$PLZORT99=="Stuttgart",]
## ----echo=F--------------------------------------------------------------
tab_landcover <- table(PLZ_SG$land_cover.value)
df_landcover <- data.frame(tab_landcover)
colnames(df_landcover)[1] <- c("Type_landcover")
kable(df_landcover)
## ------------------------------------------------------------------------
qtm(PLZ_SG,fill="land_cover.value")
## ------------------------------------------------------------------------
qtm(PLZ_SG,fill="elevation.value")
## ----eval=F--------------------------------------------------------------
## devtools::install_github("dkahle/ggmap")
## install.packages("ggmap")
## ------------------------------------------------------------------------
library(ggmap)
## ----message=F,eval=F----------------------------------------------------
## qmap("Stuttgart")
## ----message=F,eval=F----------------------------------------------------
## qmap("Germany")
## ----message=F,eval=F----------------------------------------------------
## qmap("Germany", zoom = 6)
## ----echo=F--------------------------------------------------------------
# https://www.nceas.ucsb.edu/~frazier/RSpatialGuides/ggmap/ggmapCheatsheet.pdf
## ----message=F,eval=F----------------------------------------------------
## WIL <- qmap("Wilhelma",zoom=20, maptype="satellite")
## WIL
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart Hauptbahnhof', zoom = 15, maptype="hybrid")
## ----message=F,cache=T,eval=F--------------------------------------------
## qmap('Stuttgart Fernsehturm', zoom = 14,
## maptype="terrain")
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart', zoom = 14,
## maptype="watercolor",source="stamen")
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart', zoom = 14,
## maptype="toner",source="stamen")
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart', zoom = 14,
## maptype="toner-lite",source="stamen")
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart', zoom = 14,
## maptype="toner-hybrid",source="stamen")
## ----message=F,eval=F----------------------------------------------------
## qmap('Stuttgart', zoom = 14,
## maptype="terrain-lines",source="stamen")
## ----message=F,eval=T,warning=F------------------------------------------
library(ggmap)
geocode("Stuttgart")
## ----echo=F,message=F,warning=F------------------------------------------
MAgc <- geocode("Stuttgart Wormser Str. 15")
kable(MAgc)
## ----cache=T,message=F---------------------------------------------------
revgeocode(c(48,8))
## ----message=F-----------------------------------------------------------
mapdist("Marienplatz Stuttgart","Hauptbahnhof Stuttgart")
## ----message=F-----------------------------------------------------------
mapdist("Marienplatz Stuttgart","Hauptbahnhof Stuttgart",mode="walking")
## ----message=F-----------------------------------------------------------
mapdist("Marienplatz Stuttgart","Hauptbahnhof Stuttgart",mode="bicycling")
## ----message=F,warning=F-------------------------------------------------
POI1 <- geocode("B2, 1 Mannheim",source="google")
POI2 <- geocode("Hbf Mannheim",source="google")
POI3 <- geocode("Mannheim, Friedrichsplatz",source="google")
ListPOI <-rbind(POI1,POI2,POI3)
POI1;POI2;POI3
## ----message=F,warning=F,eval=F------------------------------------------
## MA_map +
## geom_point(aes(x = lon, y = lat),
## data = ListPOI)
## ----message=F,warning=F,eval=F------------------------------------------
## MA_map +
## geom_point(aes(x = lon, y = lat),col="red",
## data = ListPOI)
## ----eval=F--------------------------------------------------------------
## ListPOI$color <- c("A","B","C")
## MA_map +
## geom_point(aes(x = lon, y = lat,col=color),
## data = ListPOI)
## ----eval=F--------------------------------------------------------------
## ListPOI$size <- c(10,20,30)
## MA_map +
## geom_point(aes(x = lon, y = lat,col=color,size=size),
## data = ListPOI)
## ----message=F,warning=F,cache=T,eval=F----------------------------------
## from <- "Mannheim Hbf"
## to <- "Mannheim B2 , 1"
## route_df <- route(from, to, structure = "route")
## ----message=F,warning=F,cache=T,eval=F----------------------------------
## qmap("Mannheim Hbf", zoom = 14) +
## geom_path(
## aes(x = lon, y = lat), colour = "red", size = 1.5,
## data = route_df, lineend = "round"
## )
## ----ggmap_citycenter----------------------------------------------------
library(ggmap)
lon_plz <- PLZ_SG@data[which.max(PLZ_SG$bakery),"lon"]
lat_plz <- PLZ_SG@data[which.max(PLZ_SG$bakery),"lat"]
mp_plz <- as.numeric(c(lon_plz,lat_plz))
qmap(location = mp_plz,zoom=15)
## ------------------------------------------------------------------------
library(osmar)
## ----eval=F--------------------------------------------------------------
## src <- osmsource_api()
## gc <- geocode("Stuttgart-Degerloch")
## bb <- center_bbox(gc$lon, gc$lat, 800, 800)
## ua <- get_osm(bb, source = src)
## plot(ua)
## ----echo=F--------------------------------------------------------------
load("data/ua_SG_cc.RData")
plot(ua)
## ------------------------------------------------------------------------
bg_ids <- find(ua, way(tags(k=="building")))
bg_ids <- find_down(ua, way(bg_ids))
bg <- subset(ua, ids = bg_ids)
bg_poly <- as_sp(bg, "polygons")
plot(bg_poly)
|
2c782765b6e83db86b7e497e9587cb80df403c41
|
69bd4458ed69408391c7f1876e2d156885433b43
|
/R/robust-lmrob-tidiers.R
|
47a90fc7c01bcf046ee928112106da8ba34a72e9
|
[] |
no_license
|
sjewo/broom
|
51f52249069ad0e29063609129ffe14996e7fd16
|
e10e7598e33b675cc804a5d3871089c8fc7d5a93
|
refs/heads/master
| 2020-03-09T18:59:29.549589
| 2018-09-07T11:33:22
| 2018-09-07T11:33:22
| 128,946,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,595
|
r
|
robust-lmrob-tidiers.R
|
#' @templateVar class lmRob
#' @template title_desc_tidy_lm_wrapper
#'
#' @param x A `lmRob` object returned from [robust::lmRob()].
#'
#' @details For tidiers for robust models from the \pkg{MASS} package see
#' [tidy.rlm()].
#'
#' @examples
#'
#' library(robust)
#' m <- lmRob(mpg ~ wt, data = mtcars)
#'
#' tidy(m)
#' augment(m)
#' glance(m)
#'
#' gm <- glmRob(am ~ wt, data = mtcars, family = "binomial")
#' glance(gm)
#'
#' @aliases robust_tidiers
#' @export
#' @family robust tidiers
#' @seealso [robust::lmRob()]
tidy.lmRob <- function(x, ...) {
tidy.lm(x, ...)
}
#' @templateVar class lmRob
#' @template title_desc_augment_lm_wrapper
#'
#' @param x A `lmRob` object returned from [robust::lmRob()].
#'
#' @details For tidiers for robust models from the \pkg{MASS} package see
#' [tidy.rlm()].
#'
#' @export
#' @family robust tidiers
#' @seealso [robust::lmRob()]
augment.lmRob <- function(x, ...) {
augment.lm(x, ...)
}
#' @templateVar class lmRob
#' @template title_desc_glance
#'
#' @param x A `lmRob` object returned from [robust::lmRob()].
#' @template param_unused_dots
#'
#' @return A one-row [tibble::tibble] with columns:
#'
#' \item{r.squared}{R-squared}
#' \item{deviance}{Robust deviance}
#' \item{sigma}{Residual scale estimate}
#' \item{df.residual}{Number of residual degrees of freedom}
#'
#' @export
#' @family robust tidiers
#' @seealso [robust::lmRob()]
#'
glance.lmRob <- function(x, ...) {
s <- robust::summary.lmRob(x)
tibble(
r.squared = x$r.squared,
deviance = x$dev,
sigma = s$sigma,
df.residual = x$df.residual
)
}
|
42ae0ebe76d1fb8bd0ffc7c0611a7679598049d6
|
e653cd6ae50f5b178a25253423a9e09f8efb8790
|
/man/checkpnr.Rd
|
834c3b1b970a96ce208b152eb769a102dfca8c72
|
[
"MIT"
] |
permissive
|
chrk623/cooccurExtra
|
c44621496dafa56aa1dfdec6383668cc525b637e
|
bea970034f7ef24940282d4e9dc43fc773abba10
|
refs/heads/master
| 2020-08-28T19:19:09.714150
| 2019-10-28T10:43:32
| 2019-10-28T10:43:32
| 217,797,075
| 0
| 0
| null | 2019-10-27T02:52:25
| 2019-10-27T02:52:25
| null |
UTF-8
|
R
| false
| true
| 1,595
|
rd
|
checkpnr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkpnr.R
\name{checkpnr}
\alias{checkpnr}
\title{A function to distinct the cooccurence of species pairs from a "cooccur" ouput model object.}
\usage{
checkpnr(cooccur.mod)
}
\arguments{
\item{cooccur.mod}{An model object generated by "cooccur" function from package "cooccur".}
\item{file}{The name of the file where the data will be saved. Default to \code{NULL}, no saving required.}
}
\value{
A list with letters and numbers.
\itemize{
\item sp1_name - Name of a specie in a pair.
\item sp2_name - Name of the other specie in a pair.
\item p_gt - Probabilities for rejecting classifying positive and negative associations between the species in each pairs.
\item PNR - The cooccurence associations ("positive", "negative", or "random") between the species in each pairs.
}
}
\description{
This is a function of the package "cooccurExtra". The main idea of this function is distincting the cooccurence ("positive", "negative", or "random") of species pairs from a "cooccur" ouput model object.
It provides an output of table in the class of data frame to show probabilities as well as the classifying positive, negative or no cooccurence associations between species in each pair.
}
\examples{
# require packages
# devtools::install_github("rstudio/chromote")
# install.packages("showimage")
library(chromote)
library(showimage)
# ask for a "coocana" output model
data(ModelCA)
mytest = displaytable(mymod = modelca)
plot_htmlwidget(mytest[[3]])
plot_htmlwidget(mytest[[4]])
}
\author{
Yingjia Jot He
}
|
dd7882cc20397f6bbb310956c2385ab46007c5fb
|
58f7e798793e68a9b22d767782d1e5e0bdde7755
|
/src/01_pipeline/00_industry_code_change_scraper.R
|
4db216c3df53daf173452fbabba187b5aed71e77
|
[] |
no_license
|
tjvananne/dataoftheunion
|
b661e1fb654738ddc5c6cdc8af3ad5928525abb7
|
6dd67de84532dcefdc8a5dd43c821164d2f6e3bb
|
refs/heads/master
| 2022-01-20T05:53:11.141499
| 2021-12-30T19:39:31
| 2021-12-30T19:39:31
| 173,947,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
00_industry_code_change_scraper.R
|
# NAICS industry code change mapping tables
# Script config -------
FILE_NAME_2012_TO_2017 <- "proc_data/NAICS_2012_to_2017_map.csv"
# Load libs -----
library(dplyr)
library(rvest)
library(xml2)
# Scrape changes -----
url <- "https://www.naics.com/naics-resources/2017-naics-changes-preview/"
urldata <- xml2::read_html(url)
urltables <- rvest::html_table(urldata)
industry_code_mapper <- urltables[[1]]
names(industry_code_mapper)
industry_code_mapper <- industry_code_mapper %>%
rename(
industry_code_2017=`2017 NAICS Codes`,
industry_title_2017=`2017 NAICS Descriptions`,
industry_code_2012=`2012 NAICS Codes`,
industry_title_2012=`2012 NAICS Descriptions`
)
write.csv(industry_code_mapper, FILE_NAME_2012_TO_2017, row.names=F)
|
4bd22cbec0ca272f78444fdb007cd3fd374e93d8
|
d08e69198fbd60086aa35d765c7675006d06cf3f
|
/R/RidgeOrdinalLogistic.R
|
be7e0d3350ff6f4d93721286dbc7a6a834cbdd44
|
[] |
no_license
|
villardon/MultBiplotR
|
7d2e1b3b25fb5a1971b52fa2674df714f14176ca
|
9ac841d0402e0fb4ac93dbff078170188b25b291
|
refs/heads/master
| 2023-01-22T12:37:03.318282
| 2021-05-31T09:18:20
| 2021-05-31T09:18:20
| 97,450,677
| 3
| 2
| null | 2023-01-13T13:34:51
| 2017-07-17T08:02:54
|
R
|
UTF-8
|
R
| false
| false
| 1,031
|
r
|
RidgeOrdinalLogistic.R
|
RidgeOrdinalLogistic <- function(y, x, penalization = 0.1, tol = 1e-04, maxiter = 200, show = FALSE) {
if (!is.ordered(y)) stop("The dependent variable must be ordinal")
if (is.matrix(x)) {
n <- nrow(x)
}
else {
n <- length(x)
}
Y=y
Niveles=levels(y)
y=as.numeric(y)
model=OrdinalLogisticFit(y,x, penalization = penalization, tol = tol, maxiter = maxiter, show = show)
null=OrdinalLogisticFit(y,x=NULL, penalization = penalization, tol = tol, maxiter = maxiter, show = show)
model$DevianceNull = null$Deviance
model$Dif = (model$DevianceNull - model$Deviance)
model$df = model$nvar
model$pval = 1 - pchisq(model$Dif, df = model$df)
model$CoxSnell = 1 - exp(-1 * model$Dif/n)
model$Nagelkerke = model$CoxSnell/(1 - exp((model$DevianceNull/(-2)))^(2/n))
model$MacFaden = 1 - (model$Deviance/model$DevianceNull)
class(model) = "OrdinalLogisticRegression"
ord=Niveles[sort(unique(model$pred))]
model$pred=ordered(model$pred)
levels(model$pred)=ord
return(model)
}
|
8e54b39b39363f19460198f486d74dd1b365e307
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ashr/examples/lik_normal.Rd.R
|
8d99a33a2b62e7032accc0d4c381a43dcaec164e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240
|
r
|
lik_normal.Rd.R
|
library(ashr)
### Name: lik_normal
### Title: Likelihood object for normal error distribution
### Aliases: lik_normal
### ** Examples
z = rnorm(100) + rnorm(100) # simulate some data with normal error
ash(z,1,lik=lik_normal())
|
88a933506be578da6cccc27b75330108a388eb71
|
82ce9573daab73ac52534e9baddbdf6244abd5d3
|
/pgm-r/stacking_20171108r1.R
|
252cf61d5a2c48c8e17f8a5b3fe8d5248bddabf0
|
[] |
no_license
|
zoe3/bank
|
be2fa37e00123941a56a633fa63e0bf53e49473d
|
7db841a3fec4256083291355e9f51597ec5837e9
|
refs/heads/master
| 2021-08-18T22:06:41.613722
| 2017-11-24T03:10:32
| 2017-11-24T03:10:32
| 110,041,227
| 0
| 0
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 8,634
|
r
|
stacking_20171108r1.R
|
##スタッキングの実装例(投稿用)
#使用ライブラリ
library(dplyr)
library(rpart)
library(pROC)
library(ggplot2)
library(partykit)
#データ読込
train<-read.csv("../motodata/train.csv", header=TRUE)
test<-read.csv("../motodata/test.csv", header=TRUE)
#### データ加工
## Job
test$y <- 9
combi <- rbind(train,test)
combi <- combi %>%
dplyr::mutate(job2 = if_else(job %in% c('retired','students'), 'notworker', 'worker')) %>%
dplyr::mutate(job2 = if_else(job == 'unknown' , 'unknown', job2)) %>%
dplyr::mutate(job2 = as.factor(job2)) %>%
glimpse
combi <- combi %>%
dplyr::mutate(job3 = if_else(job %in% c('admin.','bule-collar','management','services','technician'), 'major', 'minor')) %>%
dplyr::mutate(job3 = as.factor(job3)) %>%
glimpse
train <- combi %>%
dplyr::filter(y < 9)
str(train)
test <- combi %>%
dplyr::filter(y == 9) %>%
dplyr::select(-y)
str(test)
## 最終接触時間(duration)は外れ値を0.995%tileを寄せる。線形にするため、ルートを取る。
## 年齢(age)は、50で折り返し。
## 年間平均残高(balance)は、95%tileを取る。
train <- train %>%
dplyr::mutate(duration2=ifelse(duration >= quantile(duration,probs=.995),
quantile(duration,probs=.995),
duration)) %>%
dplyr::mutate(duration3 = sqrt(duration2)) %>%
dplyr::mutate(age2=abs(50-age)) %>%
dplyr::mutate(balance2=ifelse(balance >= quantile(balance,probs=.95),
quantile(balance,probs=.95),
balance))
test <- test %>%
dplyr::mutate(duration2=ifelse(duration >= quantile(duration,probs=.995),
quantile(duration,probs=.995),
duration)) %>%
dplyr::mutate(duration3 = sqrt(duration2)) %>%
dplyr::mutate(age2=abs(50-age)) %>%
dplyr::mutate(balance2=ifelse(balance >= quantile(balance,probs=.95),
quantile(balance,probs=.95),
balance))
## Customerを分割. 前回キャンペーン有
train_old <- train %>%
dplyr::filter(pdays > -1)
test_old <- test %>%
dplyr::filter(pdays > -1)
train_new <- train %>%
dplyr::filter(pdays==-1) %>%
dplyr::select(-c(pdays,previous,poutcome))
test_new <- test %>%
dplyr::filter(pdays==-1) %>%
dplyr::select(-c(pdays,previous,poutcome))
## 前キャンペーンの日付求める
lct <- Sys.getlocale("LC_TIME"); Sys.setlocale("LC_TIME", "C")
train_old <- train_old %>%
mutate(lastdate = as.POSIXct(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
test_old <- test_old %>%
mutate(lastdate = as.POSIXct(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y")))
train_old <- train_old %>%
mutate(pdate = as.POSIXct(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y") - pdays))
test_old <- test_old %>%
mutate(pdate = as.POSIXct(as.Date(paste(day,month,"2017",sep=""),"%d%b%Y") - pdays))
Sys.setlocale("LC_TIME", lct)
##スタッキングの実装
#今回は決定木(rpart)とロジスティック回帰(glm)をロジスティック回帰(glm)でアンサンブル
### Old
#再現性のため乱数シードを固定
set.seed(17)
#学習データをK個にグループ分け
K<-5
#sample(ベクトル, ランダムに取得する個数, 復元抽出の有無, ベクトルの各要素が抽出される確率)
train_old$cv_group<-sample(1:K, nrow(train_old), replace=TRUE, prob=rep(1/K, K))
#構築, 検証データ予測スコアの初期化
score_train_tree<-NULL
score_train_logi<-NULL
score_test_tree<-NULL
score_test_logi<-NULL
y<-NULL
#クロスバリデーション
for(j in 1:K){
#構築, 検証データに分ける
train_tmp<-train_old %>%
dplyr::filter(cv_group!=j) %>%
dplyr::select(-cv_group)
test_tmp<-train_old %>%
dplyr::filter(cv_group==j) %>%
dplyr::select(-cv_group)
## 構築データでモデル構築(決定木)
tree_tmp<-rpart(y~., data=train_tmp,
maxdepth=10, minbucket=12, cp=0.000008,
method="class", parms=list(split="gini"))
## 構築データでモデル構築(ロジスティック回帰)
logi_tmp<-glm(y~., data=train_tmp, family=binomial(link="logit"))
#モデル構築に使用していないデータの予測値と目的変数
pred_train_tree<-predict(tree_tmp, test_tmp)[,2]
pred_train_logi<-predict(logi_tmp, test_tmp, type="response")
y<-c(y, test_tmp$y)
score_train_tree<-c(score_train_tree, pred_train_tree)
score_train_logi<-c(score_train_logi, pred_train_logi)
#検証データの予測値
pred_test_tree<-predict(tree_tmp, test_old)[,2]
pred_test_logi<-predict(logi_tmp, test_old, type="response")
score_test_tree<-cbind(score_test_tree, pred_test_tree)
score_test_logi<-cbind(score_test_logi, pred_test_logi)
}
#余計な変数削除
train_old <- train_old %>%
dplyr::select(-cv_group)
#検証データの予測値の平均
#apply(データ, 1, 関数)で行ごとに関数を適用する
score_test_tree<-apply(score_test_tree, 1, mean)
score_test_logi<-apply(score_test_logi, 1, mean)
m_dat_test1<-data.frame(tree=score_test_tree, logi=score_test_logi)
#メタモデル用変数作成
m_dat_train<-data.frame(tree=score_train_tree, logi=score_train_logi, y=y)
#メタモデル構築(今回はロジスティック回帰)
m_logi<-glm(y~., data=m_dat_train, family=binomial(link="logit"))
##検証データ適用1
#メタモデル適用
pred_test_m_logi1<-predict(m_logi, m_dat_test1, type="response")
#CSV出力
submit1_old <- data.frame(id=test_old$id, score=pred_test_m_logi1)
### New
#再現性のため乱数シードを固定
set.seed(17)
#学習データをK個にグループ分け
K<-5
#sample(ベクトル, ランダムに取得する個数, 復元抽出の有無, ベクトルの各要素が抽出される確率)
train_new$cv_group<-sample(1:K, nrow(train_new), replace=TRUE, prob=rep(1/K, K))
#構築, 検証データ予測スコアの初期化
score_train_tree<-NULL
score_train_logi<-NULL
score_test_tree<-NULL
score_test_logi<-NULL
y<-NULL
#クロスバリデーション
for(j in 1:K){
#構築, 検証データに分ける
train_tmp<-train_new %>%
dplyr::filter(cv_group!=j) %>%
dplyr::select(-cv_group)
test_tmp<-train_new %>%
dplyr::filter(cv_group==j) %>%
dplyr::select(-cv_group)
## 構築データでモデル構築(決定木)
tree_tmp<-rpart(y~., data=train_tmp,
maxdepth=10, minbucket=12, cp=0.000008,
method="class", parms=list(split="gini"))
## 構築データでモデル構築(ロジスティック回帰)
logi_tmp<-glm(y~., data=train_tmp, family=binomial(link="logit"))
#モデル構築に使用していないデータの予測値と目的変数
pred_train_tree<-predict(tree_tmp, test_tmp)[,2]
pred_train_logi<-predict(logi_tmp, test_tmp, type="response")
y<-c(y, test_tmp$y)
score_train_tree<-c(score_train_tree, pred_train_tree)
score_train_logi<-c(score_train_logi, pred_train_logi)
#検証データの予測値
pred_test_tree<-predict(tree_tmp, test_new)[,2]
pred_test_logi<-predict(logi_tmp, test_new, type="response")
score_test_tree<-cbind(score_test_tree, pred_test_tree)
score_test_logi<-cbind(score_test_logi, pred_test_logi)
}
#余計な変数削除
train_new <- train_new %>%
dplyr::select(-cv_group)
#検証データの予測値の平均
#apply(データ, 1, 関数)で行ごとに関数を適用する
score_test_tree<-apply(score_test_tree, 1, mean)
score_test_logi<-apply(score_test_logi, 1, mean)
m_dat_test1<-data.frame(tree=score_test_tree, logi=score_test_logi)
#メタモデル用変数作成
m_dat_train<-data.frame(tree=score_train_tree, logi=score_train_logi, y=y)
#メタモデル構築(今回はロジスティック回帰)
m_logi<-glm(y~., data=m_dat_train, family=binomial(link="logit"))
##検証データ適用1
#メタモデル適用
pred_test_m_logi1<-predict(m_logi, m_dat_test1, type="response")
#CSV出力
submit1_new <- data.frame(id=test_new$id, score=pred_test_m_logi1)
## Marge
submit1 <- rbind(submit1_old, submit1_new)
write.table(submit1,
file="../submit/submit_20171108_ens_tree_logi_1.csv",
quote=F, sep=",", row.names=F, col.names=F)
|
1357ebc6636d1198e1d5aae8e909fd208bb65ba5
|
b81b84fe38fd6e7580f07818a09e900566a55c5c
|
/R/training.R
|
2e8a630a19d50589bf339ad11aaabeef34940306
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Moonerss/violentSurv
|
ee4569b0abddb6b12ee9d80f330b8068b726fc78
|
39cce5e200ac35c339bce10b7c7db269c88c3675
|
refs/heads/main
| 2023-04-06T20:08:12.149462
| 2021-04-22T11:33:09
| 2021-04-22T11:33:09
| 316,680,883
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,451
|
r
|
training.R
|
#' @name train_signature
#' @title Train the signature model in a data sets
#' @description this function will filter the signature are not significant in the logrank test
#' @param surv_data a `data.frame` containing variables, ids, time and event.
#' @param id column name specifying samples ID , default is 'ids'.
#' @param time column name specifying time, default is 'time'.
#' @param event column name specifying event status, default is 'status'.
#' @param exp_data a `matrix` of expression profile. row names are genes, column names are sample ID.
#' @param genes a character vector of genes to construct risk model. They must be in `exp_data` row names.
#' @param num a numeric vector of the number of genes in the risk model.
#' @param beta if `NULL`, the function compute the beta value for each gene in univariate cox analysis;
#' or a data like the return object of \link{get_beta}()
#' @param labels_value the same like `value` in \link{label_sample}()
#' @param cut_p a p value to filter the result of `train_signature`, we only keep the risk model with p value
#' samller than the value
#' @param keep_data whether to keep the data used to compute logrank test in this function,
#' It may be very important for follow-up analysis.
#' @importFrom dplyr bind_cols left_join group_by ungroup mutate filter select all_of bind_rows
#' @importFrom purrr map reduce map_dbl map2
#' @importFrom tidyr nest
#' @import cli
#' @export
#' @examples
#' \dontrun{
#' data(blca_clinical)
#' data(blca_exp)
#' obj <- train_signature(surv_data = blca_clinical, id = "ids", time = "time", event = "status",
#' exp_data = blca_exp, genes = c("IL6", "TGFB3", "VHL", "CXCR4"), num = 2:4,
#' beta = NULL, labels_value = NULL, cut_p = 1, keep_data = T)
#' }
#'
train_signature <- function(surv_data, id = "ids", time = "time", event = "status",
exp_data, genes, num, beta = NULL, labels_value = NULL,
cut_p = NULL, keep_data = TRUE) {
cli::cli_process_start("Checking the data")
stopifnot(is.data.frame(surv_data))
stopifnot(all(is.element(c(id, time, event), colnames(surv_data))))
stopifnot(is.matrix(exp_data), is.numeric(exp_data), all(is.element(genes, rownames(exp_data))))
cli::cli_process_done()
# get overlap samples
cli::cli_process_start("Getting overlap samples")
co_samples <- intersect(surv_data[[id]], colnames(exp_data))
surv_data <- surv_data[match(co_samples, surv_data[[id]]) ,]
exp_data <- exp_data[, co_samples]
cli::cli_process_done()
# get beta value
cli::cli_process_start("Getting beta value")
if (is.null(beta)) {
dat <- surv_data[, c(id, time, event)] %>%
dplyr::bind_cols(as.data.frame(t(exp_data[genes,])))
## whether use parallel
cox_obj <- optimal_cox(data = dat, time = time, event = event,
variate = genes, multicox = F, global_method = "wald")
beta <- get_beta(cox_obj)
} else {
beta <- beta
}
cli::cli_process_done()
# get signature combinations
cli::cli_process_start("Combining signatures")
signature_list <- combn_signature(genes = genes, n = num)
cli::cli_process_done()
# get risk score
cli::cli_process_start("Calculating risk score")
all_score <- purrr::map(signature_list, function(x) {
x %<>% t() %>% as.data.frame()
purrr::map(x, function(y) {
risk_score(exp_data = exp_data, genes = y, beta = beta) %>%
dplyr::left_join(surv_data, by = id)
})
}) %>%
purrr::map(~purrr::reduce(.x, bind_rows)) %>%
purrr::reduce(bind_rows) %>%
dplyr::group_by(signature) %>%
nest() %>%
ungroup()
all_score %<>% dplyr::mutate(beta_value = purrr::map(signature, function(x) {
sig <- unlist(strsplit(x, split = " "))
res <- dplyr::slice(beta, match(sig, Variable))
})) %>% dplyr::mutate(beta_value = purrr::map(beta_value, function(x) {
class(x) <- setdiff(class(x), "run_cox"); x}))
cli::cli_process_done()
# set labels
cli::cli_process_start("Setting labels")
case <- all_score %>%
dplyr::pull(data) %>%
purrr::map(~label_sample(score = .x$risk_score, value = labels_value))
labeled_sample <- all_score %>%
dplyr::mutate(data = purrr::map2(data, case, function(x, y) {x %>% dplyr::mutate(labels = y$labeled_sample)}),
cutoff_value = purrr::map_dbl(case, ~.x$value))
cli::cli_process_done()
# do logrank test
cli::cli_process_start("Evaluating logrank test")
logrank_res <- labeled_sample %>%
dplyr::mutate(logrank_pval = purrr::map_dbl(data, function(x) {
logrank_p(data = x, time = time, event = event, variate = "labels", verbose = F) %>% pull(p_value)
}) %>% unlist()) %>%
dplyr::select(signature, cutoff_value, logrank_pval, beta_value, data)
cli::cli_process_done()
# get result
cli::cli_process_start("Filtering result")
if (is.null(cut_p)) {
if (isTRUE(keep_data)) {
res <- logrank_res
} else {
res <- dplyr::select(logrank_res, -data)
}
} else {
if (isTRUE(keep_data)) {
res <- dplyr::filter(logrank_res, logrank_pval < cut_p)
} else {
res <- dplyr::filter(logrank_res, logrank_pval < cut_p) %>%
dplyr::select(-data)
}
}
class(res) <- c("training_signature", class(res))
cli::cli_process_done()
return(res)
}
#' @name train_unicox
#' @title Do the univariate cox analysis in training data sets.
#' @param obj the `training_signature` object get from \link{train_signature}() .
#' @param type Use which variate to do the univariate cox analysis, if `continuous`, use the `risk_score`;
#' if `discrete`, use the `labels`.
#' @param cut_p the cutoff p value of univariate cox analysis. Default 0.05.
#' @importFrom dplyr mutate filter select pull
#' @importFrom purrr map
#' @importFrom cli cli_process_start cli_process_done
#' @return return a `training_signature` object with `unicox_pval` column.
#' @export
#' @example
#' \dontrun{
#' uni_cox_obj <- train_unicox(obj, type = "discrete", cut_p = 1)
#' }
#'
train_unicox <- function(obj, type = c("continuous", "discrete"), cut_p = 0.05) {
test_obj(obj)
cli::cli_process_start("Doing univariate cox analysis")
var <- ifelse(match.arg(type) == "continuous", "risk_score", "labels")
obj %<>% dplyr::mutate(unicox_pval = purrr::map(data, optimal_cox, variate = var, multicox = FALSE,
global_method = "wald")) %>%
dplyr::mutate(unicox_pval = purrr::map(unicox_pval, dplyr::pull, p_value) %>% unlist()) %>%
dplyr::filter(unicox_pval < cut_p)
if (rlang::has_name(obj, "multicox_pval")) {
res <- obj %>% select(1:3, 7, 4, 5:6)
} else {
res <- obj %>% select(1:3, 6, 4:5)
}
cli::cli_process_done()
return(res)
}
#' @name train_multicox
#' @title Do the multivariate cox analysis in training data sets.
#' @param obj the `training_signature` object get from \link{train_signature}() .
#' @param type Use which variate to do the multivariate cox analysis, if `continuous`, use the `risk_score`;
#' if `discrete`, use the `labels`.
#' @param cut_p the cutoff p value of multivariate cox analysis. Default 0.05.
#' @importFrom dplyr mutate filter select pull
#' @importFrom purrr map
#' @importFrom cli cli_process_start cli_process_done
#' @return return a `training_signature` object with `multicox_pval` column.
#' @export
#' @examples
#' \dontrun{
#' multi_cox_obj <- train_multicox(obj = uni_cox_obj, type = "discrete", covariate = c("Age", "Gender"), cut_p = 1)
#' multi_cox_obj <- train_multicox(obj = obj, type = "discrete", covariate = c("Age", "Gender"), cut_p = 1)
#' ## covariate = NULL
#' multi_cox_obj <- train_multicox(obj = uni_cox_obj, type = "discrete", cut_p = 1)
#' }
train_multicox <- function(obj, type = c("continuous", "discrete"), covariate = NULL, cut_p = 0.05) {
test_obj(obj)
## type
type <- match.arg(type)
cli::cli_process_start("Doing Multivariate cox analysis")
if (is.null(covariate)) {
cli::cli_alert_info("The `covariate` is NULL, keep univariate result!")
## check whether done univariate cox analysis
if (rlang::has_name(obj, "unicox_pval")) {
res <- obj
} else {
res <- train_unicox(obj, type = type, cut_p = cut_p)
}
} else {
stopifnot(is.element(covariate, colnames(obj$data[[1]])))
uni_type <- ifelse(match.arg(type) == "continuous", "risk_score", "labels")
covars <- c(uni_type, covariate)
obj %<>% dplyr::mutate(multicox_pval = purrr::map(data, optimal_cox, variate = covars, multicox = TRUE,
global_method = "wald")) %>%
dplyr::mutate(multicox_pval = purrr::map(multicox_pval, function(x) {
x %>% filter(stringr::str_detect(Variable, uni_type)) %>% select(p_value)
}) %>% unlist()) %>%
dplyr::filter(multicox_pval < cut_p)
if (rlang::has_name(obj, "unicox_pval")) {
res <- obj %>% select(1:4, 7, 5:6)
} else {
res <- obj %>% select(1:3, 6, 4:5)
}
}
cli::cli_process_done()
return(res)
}
### useful function ######
test_obj <- function(obj) {
if (!is(obj, "training_signature")) {
stop("The `obj` is not a `training_signature` object!")
} else {
if (!rlang::has_name(obj, "data")) {
stop("There is no information to do the analysis, please set the `keep_data` to TRUE in the
`train_signature` function to get the needed data!")
}
}
}
|
51f1d4a7fb22de7292038b6ab6c72e63db57344f
|
175e45e8344a1d2a8fac50e12fca4a9bfb6b5e18
|
/man/position-methods.Rd
|
c1797bd85cd526a4ca19544d3614079eace0c37b
|
[] |
no_license
|
reidt03/MassArray
|
99b3c0c303b1df2d47dbc212e1e440a848824b57
|
186fad2e1bc09670566fc6f1c0c6398f44f4a66e
|
refs/heads/master
| 2020-04-29T18:41:16.530471
| 2019-03-18T21:56:09
| 2019-03-18T21:56:09
| 176,330,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 769
|
rd
|
position-methods.Rd
|
\name{position-methods}
\docType{methods}
\alias{position-methods}
\alias{position,MassArrayData-method}
\alias{position<-,MassArrayData,missing-method}
\alias{position<-,MassArrayData,character-method}
\title{ Operate on positional information (methods)}
\description{
Methods to access (and/or assign) positional information for a MassArrayData object
}
\section{Methods}{
\describe{
\item{object = "MassArrayData"}{ Access positional information for MassArrayData object }
\item{object = "MassArrayData", value = "missing"}{ Handle empty function call, simply return the MassArrayData object }
\item{object = "MassArrayData", value = "character"}{ Assign position of MassArrayData object to \code{value} }
}}
\seealso{ \code{\link{position}} }
\keyword{methods}
|
b40c69239b078dbeb3a0dd067d9c53e799b063f1
|
1ea35aa8adc3131f178d873800c1c818343b9dec
|
/src/R/shiny/ROMOPOmics/src/applyFilters.R
|
e65298ab75e93061451b871e66975cc50f61c57e
|
[
"MIT"
] |
permissive
|
NCBI-Codeathons/OMOPOmics
|
9afa7abd4f59baa48248b73a823d5e50d0197663
|
c6f0293f99189cc682d04aef9f40e43a8878ca8b
|
refs/heads/master
| 2020-12-06T04:54:42.723704
| 2020-06-04T16:45:14
| 2020-06-04T16:45:14
| 232,348,286
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 851
|
r
|
applyFilters.R
|
#!/bin/Rscript
#applyFilters
# Given the query table and a compiled filter table, this function iteratively
# applies each filter (one per row of the table) based on the filter's type
# indicated in the "type" column. For instance, a filter of type "txt" is
# applied using the characterFilter() function.
applyFilters <- function(query_in = qu,filter_table =ft){
if(is.null(filter_table)){return(query_in)}
query_out <- query_in
for(i in 1:nrow(filter_table)){
if(filter_table[i,"type"]=="txt"){
query_out <- filterCharacter(query_in=query_out,
col_in = unlist(filter_table[i,"flt_col_name"]),
txt_in = unlist(filter_table[i,"txt"]),
logic_in=unlist(filter_table[i,"logic"]))
}
}
return(query_out)
}
#applyFilters()
|
0af73d27b3d19481d275e28124e881399e1f3a8c
|
7c3b1b37f1986d00ef740e0185db4e24b5ca4cb4
|
/man/gimage.Rd
|
95b00bc39792e6ddfea12e8c7b3358383e911bb8
|
[] |
no_license
|
jverzani/gWidgetsWWW2.rapache
|
2b9ea2402b334d9b57cc434ef81d8169d5a88f54
|
f0678d800d0e824f15f0098212271caac71bb67c
|
refs/heads/master
| 2020-04-06T07:02:06.600687
| 2014-02-01T03:47:41
| 2014-02-01T03:47:41
| 5,430,063
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,512
|
rd
|
gimage.Rd
|
\name{gimage}
\alias{gimage}
\title{Container for an image}
\usage{
gimage(filename = "", dirname = "", size = NULL,
handler = NULL, action = NULL, container = NULL, ...,
width = NULL, height = NULL, ext.args = NULL)
}
\arguments{
\item{filename}{an image file.}
\item{dirname}{ignored.}
\item{size}{A vector passed to \code{width} and
\code{height} arguments.}
\item{handler}{optional handler bound via
\code{addHandlerChanged}}
\item{action}{optional value to paramaterize handler}
\item{container}{parent container}
\item{...}{passed along to \code{add} call of the
container. Can be used to adjust layout parameters. May
also have other uses.}
\item{width}{a pre-specified width (in pixels) for the
widget}
\item{height}{a pre-specified height (in pixels) for the
widget}
\item{ext.args}{A list of extra arguments to pass to the
ExtJS constructor}
}
\description{
The image shows an image file. Use \code{ghtml} with the
"img" tag to show a url
}
\note{
requires tempdir to be mapped to a specific url, as this
is assumed by \code{get_tempfile} and
\code{get_tempfile_url}
}
\examples{
w <- gwindow("hello", renderTo="replaceme")
sb <- gstatusbar("Powered by gWidgetsWWW and Rook", cont=w)
g <- ggroup(cont=w, horizontal=FALSE)
f <- tempfile()
png(f)
hist(rnorm(100))
dev.off()
i <- gimage(f, container=g)
b <- gbutton("click", cont=g, handler=function(h,...) {
f <- tempfile()
png(f)
hist(rnorm(100))
dev.off()
svalue(i) <- f
})
}
|
44ac495125f014ab6c6473677fb1cef61d9ff074
|
72fd0ce524135aad3de7a54fb8a6d6be72e76c6a
|
/ANNUncomplicatedMalAug2020.r
|
2f38b9d9aebe7fcd17bacd8b726eb973584ba083
|
[] |
no_license
|
winfrednyoroka/Machine-Learning-in-Clinical-Malaria
|
2d955ad1890feb64b8209c6bf1e0c7d8c1faf7c0
|
d2108043a8b94ed0748801ae9e0cf07ec9f1f9b0
|
refs/heads/master
| 2022-12-17T10:18:48.325575
| 2020-09-28T09:04:10
| 2020-09-28T09:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,117
|
r
|
ANNUncomplicatedMalAug2020.r
|
#Script for ANN for UM vs nMI
####################################@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(pacman)
pacman::p_load(ggplot2, reshape2, gplots, grid, spatstat, raster, sp, dplyr,
klaR, ggfortify, stringr, cluster, Rtsne, readr, RColorBrewer, Hmisc, mice, tidyr,
purrr, VIM, magrittr, corrplot, caret, gridExtra, ape, tidytree, pheatmap, stats,
vegan, FactoMineR, factoextra, outliers, ggpubr, keras, lime, tidyquant, rsample,
recipes, corrr, yardstick, tensorflow, caret, limma, compareGroups, forcats)
#S####################################@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#Set working directory
setwd("/home/root01/Documents/machine_learn_sep_2020/Analysis/")
#get pre-processed data
clinhem <- read.csv("../Data/Imputed_um_nmi.csv", header = T, na.strings = T); glimpse(clinhem)
#remove the X column, hb_level, hematocrit
#remove the X column, hb_level, hematocrit
clinhem <- clinhem %>% select(-c(X, hb_level, hematocrit,location, patient_age)); glimpse(clinhem)
#randomize the data
clinhem <- clinhem[sample(1:nrow(clinhem)), ]
# Split test/training sets
set.seed(1234)
train_test_split <- initial_split(clinhem, prop = 0.8); train_test_split
## Retrieve train and test sets
train_tbl_with_ids <- training(train_test_split); test_tbl_with_ids <- testing(train_test_split)
train_tbl <- select(train_tbl_with_ids, -SampleID); test_tbl <- select(test_tbl_with_ids, -SampleID)
# Create recipe
recipe_UM <- recipe(Clinical_Diagnosis ~ ., data = train_tbl) %>%
#step_dummy(all_nominal(), -all_outcomes()) %>%
step_YeoJohnson(all_predictors(), -all_outcomes()) %>%
step_center(all_predictors(), -all_outcomes()) %>%
step_scale(all_predictors(), -all_outcomes()) %>%
prep(data = train_tbl)
recipe_UM
# Predictors4
x_train_tbl2 <- bake(recipe_UM, new_data = train_tbl) ; x_test_tbl2 <- bake(recipe_UM, new_data = test_tbl)
x_train_tbl <- x_train_tbl2 %>% select(-Clinical_Diagnosis) ; x_test_tbl <- x_test_tbl2 %>% select(-Clinical_Diagnosis)
# Response variables for training and testing sets
y_train_vec <- ifelse(pull(train_tbl, Clinical_Diagnosis) == "Uncomplicated Malaria", 1, 0)
y_test_vec <- ifelse(pull(test_tbl, Clinical_Diagnosis) == "Uncomplicated Malaria", 1, 0)
######################################################################################################################
# Building our Artificial Neural Network
model_keras <- keras_model_sequential()
model_keras %>%
# First hidden layer and Dropout to prevent overfitting
layer_dense(units = 256, kernel_initializer = "uniform", activation = "relu", input_shape = ncol(x_train_tbl),
kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01)) %>% layer_dropout(rate = 0.1) %>% layer_batch_normalization() %>%
# Second hidden layer and Dropout to prevent overfitting
layer_dense(units = 64, kernel_initializer = "uniform", activation= "relu",
kernel_regularizer = regularizer_l1_l2(l1 = 0.001, l2 = 0.001)) %>% layer_dropout(rate = 0.3) %>% layer_batch_normalization() %>%
# Third hidden layer and Dropout to prevent overfitting
layer_dense(units = 16, kernel_initializer = "uniform", activation= "relu",
kernel_regularizer = regularizer_l1_l2(l1 = 0.01, l2 = 0.01)) %>% layer_dropout(rate = 0.1) %>% layer_batch_normalization() %>%
# Output layer
layer_dense(units= 1, kernel_initializer = "uniform",
activation = "sigmoid") %>%
# Compile ANN and backpropagation
compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = c('accuracy')) ; model_keras
# Fit the keras model to the training data
fit_keras <- fit(
object = model_keras,
x = as.matrix(x_train_tbl),
y = y_train_vec,
batch_size = 64,
epochs = 500,
validation_split = 0.30, #for cross validation
shuffle = TRUE,
verbose = TRUE,
callbacks = list(
#callback_early_stopping(patience = 50),
callback_tensorboard("run_uncompli"),
callback_reduce_lr_on_plateau(factor = 0.001)
)
) ; #tensorboard("run_uncompli");
fit_keras # Print the final model
save_model_hdf5(model_keras, 'Uncompli_malaria_Final.hdf5')
# Plot the training/validation history of our Keras model
plot_keras <- plot(fit_keras) +
theme_tq() + scale_color_tq() + scale_fill_tq()
#labs(title = "Accuracy and loss of during Training for Severe malaria") ;
plot_keras
#######################################@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Predicted Class
yhat_keras_class_vec <- predict_classes(object = model_keras, x = as.matrix(x_test_tbl)) %>%
as.vector()
# Predicted Class Probability
yhat_keras_prob_vec <- predict_proba(object = model_keras, x = as.matrix(x_test_tbl)) %>%
as.vector()
# Format test data and predictions for yardstick metrics
estimates_keras_tbl <- tibble(
truth = as.factor(y_test_vec) %>% fct_recode(Uncomplicated = "1", nonMalaria = "0"),
estimate = as.factor(yhat_keras_class_vec) %>% fct_recode(Uncomplicated = "1", nonMalaria = "0"),
class_prob = yhat_keras_prob_vec); estimates_keras_tbl
options(yardstick.event_first = FALSE)
# Confusion Table
estimates_keras_tbl %>% conf_mat(truth, estimate)
# Accuracy
estimates_keras_tbl %>% metrics(truth, estimate)
# AUC
estimates_keras_tbl %>% roc_auc(truth, class_prob)
#######!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(pROC)
# Get the probality threshold for specificity = 0.6
pROC_obj <- roc(estimates_keras_tbl$truth, estimates_keras_tbl$class_prob,
smoothed = TRUE,
# arguments for ci
ci=FALSE, ci.alpha=0.9, stratified=FALSE,
# arguments for plot
plot=TRUE, auc.polygon=TRUE, max.auc.polygon=TRUE, grid=TRUE,
print.auc=TRUE, show.thres=TRUE, print.thres = c(0.1, 0.5, 0.8))
##########@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Precision
# Precision
estimates_keras_tbl %>% precision(truth, estimate)
estimates_keras_tbl %>% recall(truth, estimate)
# F1-Statistic
estimates_keras_tbl %>% f_meas(truth, estimate, beta = 1)
class(model_keras)
####################################@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Setup lime::model_type() function for keras
model_type.keras.engine.sequential.Sequential <- function(x, ...) {
return("classification")
}
# Setup lime::predict_model() function for keras
predict_model.keras.engine.sequential.Sequential <- function(x, newdata, type, ...) {
pred <- predict_proba(object = x, x = as.matrix(newdata))
return(data.frame(Uncomplicated = pred, nonMalaria = 1 - pred))
}
# Setup lime::model_type() function for keras
model_type.keras.models.Sequential <- function(x, ...) {
return("classification")
}
predictions <- predict_model(x = model_keras, newdata = x_test_tbl, type = 'raw') %>%
tibble::as_tibble(); test_tbl_with_ids$churn_prob <- predictions$Uncomplicated
# Run lime() on training set
explainer <- lime::lime(
x = x_train_tbl,
model = model_keras,
bin_continuous = FALSE)
# Run explain() on explainer
explanation <- lime::explain(
x_test_tbl[96:99,],
explainer = explainer,
n_labels = 1,
n_features = 16,
kernel_width = 0.5)
Featurebars <- plot_features(explanation) +
labs(title = "Compact visual representation of feature importance in cases",
subtitle = "Uncomplicated malaria compared to Non-malaria infections")
Featurebars
explanation2 <- lime::explain(
x_test_tbl[1:336,],
explainer = explainer,
n_labels = 1,
n_features = 15,
kernel_width = 0.5)
## Plot heatmap
x <- explanation2$feature
y <- explanation2$feature_weight
z <- explanation2$label
w <- explanation2$case
x_name <- "feature"
y_name <- "feature_weight"
z_name <- "Disease Outcome"
w_name <- "case"
df <- data.frame(w,z,x,y)
names(df) <- c(w_name, z_name, x_name,y_name)
glimpse(df)
library(plyr)
table(df$`Disease Outcome`)
df$`Disease Outcome` <- revalue(df$`Disease Outcome`, c("Uncomplicated"="Uncomplicated Malaria", "nonMalaria"="Non-malaria Infections"))
df_wide <- spread(df, key = feature, value = feature_weight)
df_wide <- df_wide[order(df_wide$`Disease Outcome`),]
table(df_wide$`Disease Outcome`)
df_wideA <- slice(df_wide, 1:336); dim(df_wideA)
df_wide_2 <- df_wideA[, -2]
row.names(df_wide_2) <- df_wide_2$case
df_wide_2[1] <- NULL
df_Wide_dem <- df_wideA[,-(3:17)]
row.names(df_Wide_dem) <- df_Wide_dem$case
df_Wide_dem[1] <- NULL
df_matx <- as.matrix(df_wide_2)
inde <- read.csv("../Data/indices.csv", row.names = 1)
my.colors <- c(colorRampPalette(colors = c("blue", "Red")))
pheatmap(df_matx, annotation_col = inde, cutree_rows = 2, clustering_distance_cols = "correlation",
cluster_rows = TRUE, cluster_cols = TRUE, annotation_row = df_Wide_dem, annotation_colors = my.colors, fontsize = 12, show_rownames = F)
#'correlation', 'euclidean', 'maximum', 'manhattan', 'canberra', 'binary', 'minkowski'
save(list = ls(), file = 'Uncompli_malariaFinal.RData')
|
b2b169f1dc9a4280baf86f14929ff4f0c0a5394c
|
bb3c6821ebd76a7f6d6f87478007a82baa59352c
|
/Actividad_0/Zyrus/Practicas.r
|
18fe521d43368e27535339cf74f42f56e64a8332
|
[] |
no_license
|
franciscosucre/Estadistica-2016
|
3c8e8910c79f788f74f26d3001eed637155ebdd6
|
8c4eebd316c388d4661a17956b1b6242b175f03b
|
refs/heads/master
| 2021-01-20T19:49:51.245454
| 2016-08-20T01:40:34
| 2016-08-20T01:40:34
| 63,622,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,955
|
r
|
Practicas.r
|
#Practice 1
#1
esc <- 11:15
#2
vec <- seq(1,19,2)
#3
x <- c(esc,vec)
#4
x[c(2,3,5)] <- x[c(2,3,5)] * -1
#5
x <- x[-c(4,8)]
#6
length(x)
#7
Nombres <- c('A','D','X','Z','Y','M','L','B','V','E','R','A','B','T','Z','Z','U')
#8
which(Nombres == 'A') #This function wasn't described anywhere in the class, was in the documentation though.
which(Nombres == 'B')
which(Nombres == 'L')
#9
which(Nombres == 'A' | Nombres == 'Z')
#Practice 2
#1
a <- 1:5
#2
b <- 1:10
#3
a+b #It actually isn't well defined, but R doesn't cares, it just solves it with the repetition trick.
a-b #Basically, it completes 'a' so it matches 'b' in size, repeating the elements.
a*b #So 'a' acts as (1,2,3,4,5,1,2,3,4,5) for the purposes of these 3 operations.
#4
x <- c(a,b)
#5
y <- rev(x)
#6
unique(x)
#7
hist(cumsum(x))
#8
mean(y)
var(y)
sd(y)
#Practice 3
#1
data(cars)
#2
attach(cars) #Assuming an attach is used here, the exercise makes no sense without this line.
plot(speed,dist)
#3
speed <- seq(1,25,0.5)
#4
dist <- speed^2
#5
plot(speed,dist)
#6
#One would use $ to access the variables from cars, for example, cars$speed and cars$dist.
#Practice 4
#1
?USJudgeRatings
#2
data(USJudgeRatings) #First, gotta load the data.
subset(USJudgeRatings, rowSums(USJudgeRatings) == max(rowSums(USJudgeRatings))) #Extracting the judge with subset.
subset(USJudgeRatings, rowSums(USJudgeRatings) == min(rowSums(USJudgeRatings))) #Same thing, but with min now.
#3
summary(rowSums(USJudgeRatings)) #This is just the summary of the scoring from all judges, mean is obtained here.
summary(USJudgeRatings) #This is for each individual test, mean is also obtained here.
#4
USJudgeRatings[order(rowSums(USJudgeRatings)),] #Notice order is used to sort dataframes, we're just sorting them by scores.
#Practice 5
#1
substraction <- function(x1, x2) {return (x1 - x2)}
#2
substraction <- function(x1, x2 = 0) {return (x1 - x2)}
#3
substraction <- function(x1, x2 = 2 * x1) {return (x1 - x2)}
|
97009c761f128a36e21ba5ee77388748497233ed
|
fced4b5a08001c0a186c49a1bcc60031349521a1
|
/R/scoringTools.R
|
aff5e36972cdd2223ced05bf84058154267ec5ab
|
[] |
no_license
|
adimajo/scoringTools
|
470577a9adafced24fc364264bb298c31d49a49e
|
2bc2c29b0ecebecaf1b5a69f4a515d0e833111a7
|
refs/heads/master
| 2023-02-13T03:37:41.735293
| 2021-01-10T14:42:41
| 2021-01-10T14:42:41
| 84,586,749
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
scoringTools.R
|
#' Credit Scoring Tools.
#'
#' Refer to the package's vignette.
"_PACKAGE"
|
3ca0927d6812bf980e339ac72ed90e6d962af46c
|
dcf54728279ae9b361a1830c5573b50773542292
|
/man/decomposer.Rd
|
e36082aef3fa94318b4f17409b367108ce70f155
|
[] |
no_license
|
CGnal/EnergyPricingModel
|
02d4da8636372cff8cc66aea1c2852cffaae7226
|
c692845fefc7872710ca6a4ea36b4fbdaa260615
|
refs/heads/master
| 2021-03-16T10:23:46.632419
| 2016-12-22T14:59:52
| 2016-12-22T14:59:52
| 77,153,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,830
|
rd
|
decomposer.Rd
|
\name{decomposer}
\alias{decomposer}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to decompose complex price series
}
\description{
This function implements different possible decomposition methods
}
\usage{
decomposer(TS, method = c("emd","eemd","ceemdan"), plot = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{TS}{Vector of length N. The input signal to decompose.}
\item{method}{Decomposition method to be used.}
\item{plot}{Logical. Indicating whether or not to plot the results of the decomposition}
\item{...}{arguments to be supplied to the chosen method (see the respective descriptions on kernlab package documentation).}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{IMFs}{Time series object of class "mts" where series corresponds to IMFs of the input signal, with the last series being the final residual.}
\item{MaxErr}{The maximum absolute error associated with the chosen decomposition}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Nicola Donelli
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{ceemdan}, \link{eemd}, \link{emd}}
}
\examples{
attach(HenryHubDailyPrices)
#### Decomposition in IMFs
Dec <- decomposer(TS = TrainPrices, method = "ceemdan", plot = T, num_imfs = 0,
ensemble_size = 250L, noise_strength = 0.2, S_number = 4L,
num_siftings = 50L, rng_seed = 0L, threads = 0L)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
27416da58964fc46c6a0efdcee4f5acde3f4c2b6
|
902037115141ead7b315e7b63e437ec61c01c2c1
|
/man/rowTables.Rd
|
83109558f4f489ac6f915e6153480588c9504ce7
|
[] |
no_license
|
cran/scrime
|
4bdc7e989ba9e648d004ca47cd2d10bb5e78a717
|
cf0033dbfe2a6fa807593a460ef4bcb0931db96a
|
refs/heads/master
| 2021-06-02T21:50:17.706604
| 2018-12-01T10:00:03
| 2018-12-01T10:00:03
| 17,699,500
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,211
|
rd
|
rowTables.Rd
|
\name{rowTables}
\alias{rowTables}
\title{Rowwise Tables}
\description{
Computes a one-dimensional table for each row of a matrix that summarizes
the values of the categorical variables represented by the rows of the matrix.
}
\usage{
rowTables(x, levels = 1:3, affy = FALSE, includeNA = FALSE,
useNN = c("not", "only", "also"), check = TRUE)
}
\arguments{
\item{x}{a matrix in which each row represents a categorical variable (e.g., a SNP)
and each column an observation, where the variables are assumed to show the
levels specified by \code{levels}. Missing values are allowed in \code{x}.}
\item{levels}{vector specifying the levels that the categorical variables in \code{x}
show. Ignored if \code{affy = TRUE}.}
\item{affy}{logical specifying whether the SNPs in \code{x} are coded in the Affymetrix
standard way. If \code{TRUE}, \code{levels = c("AA", "AB", "BB")} and \code{useNN = "also"}
will be used (the latter only when \code{includeNA = TRUE}).}
\item{includeNA}{should a column be added to the output matrix containing the number of
missing values for each variable?}
\item{useNN}{character specifying whether missing values can also be coded by \code{"NN"}.
If \code{useNN = "not"} (default), missing values are assumed to be coded only by \code{NA}.
If \code{"only"}, then missing values are assumed to be coded only by \code{"NN"} (and not
by \code{NA}. If \code{"both"}, both \code{"NN"} and \code{NA} are considered. Ignored
if \code{affy = TRUE}.}
\item{check}{should it be checked whether some of the variables show other levels than the one
specified by \code{levels}?}
}
\value{
A matrix with the same number of rows as \code{x} containing for each variable the numbers
of observations showing the levels specified by \code{levels}.
}
\author{Holger Schwender, \email{holger.schwender@udo.edu}}
\seealso{\code{\link{rowFreqs}}, \code{\link{rowScales}}}
\examples{\dontrun{
# Generate a matrix containing data for 10 categorical
# variables with levels 1, 2, 3.
mat <- matrix(sample(3, 500, TRUE), 10)
rowTables(mat)
}}
\keyword{array}
\keyword{manip}
|
5a8fe6234d527e76ffc964485f1eeab470e80ffb
|
11f79671651f5b2ebfed0adb91728e66c4d7eaea
|
/man/mp_update_rgmp_offc_id.Rd
|
4920f1bb229108647e4672a25bd65b4bc6802e26
|
[] |
no_license
|
gyang274/route
|
aabb4302a9f8f841d3e6818ff94ecb3c39871bb6
|
94ea662006f7aafa1435269ce121f60e2a288290
|
refs/heads/master
| 2020-05-29T15:11:53.086200
| 2016-08-30T20:03:54
| 2016-08-30T20:03:54
| 65,648,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 466
|
rd
|
mp_update_rgmp_offc_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route_mp.r
\name{mp_update_rgmp_offc_id}
\alias{mp_update_rgmp_offc_id}
\title{mp_update_rgmp_offc_id (animation)}
\usage{
mp_update_rgmp_offc_id(offc_id, zoom = 12L)
}
\value{
1. rgmp (side effect): updated global rgmp with new offc_id
2. rgmp_ms (return object): a transient rgmp with popup on new offc_id
}
\description{
map operation: update server side map rgmp with new offc_id
}
|
39d913f606ee403b5164d41bbe106900b7b394d8
|
a0bedd98b914e7d410d26978fdde987bc8cec426
|
/POS+WEB Model.R
|
ddc357e78352ebc191ff303c41f074a912143afc
|
[] |
no_license
|
ruthvik07071995/New-Product-Performance-Prediction-in-Fashion-Retailing
|
5c0022031d4c1abc2c0ad8ba0b14bfeeef26f4c0
|
d20102420a2c4b7bbf8f003ce5440c151377b1ac
|
refs/heads/master
| 2022-09-15T17:57:58.137892
| 2020-06-01T01:46:53
| 2020-06-01T01:46:53
| 256,284,821
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47,836
|
r
|
POS+WEB Model.R
|
options(java.parameters = "-Xmx64048m") # 64048 is 64 GB
#install.packages("odbc")
#install.packages("RMariaDB")
library(RMariaDB)
# Connect to a MariaDB version of a MySQL database
con <- dbConnect(RMariaDB::MariaDB(), host="datamine.rcac.purdue.edu", port=3306
, dbname="***********"
, user="", password="")
# list of db tables
dbListTables(con)
#####################DATA CLEANING AND UNDERSTANDING######################################
# understanding the data and changing the datatypes of the variables
# importing the transactional_web_data table and removing the negative order quantity
web <- dbGetQuery(con, "select * from transactional_web_data")
# changing the datatypes
web$DIM_ORDER_ENTRY_METHOD_KEY <- as.factor(web$DIM_ORDER_ENTRY_METHOD_KEY)
web$ORDER_NUMBER <- as.factor(web$ORDER_NUMBER)
web$DIM_SKU_KEY <- as.factor(web$DIM_SKU_KEY)
web$ORDER_DATE <- as.Date(web$ORDER_DATE, '%Y-%m-%d')
web$ORDER_LINE_NUMBER <- as.numeric(web$ORDER_LINE_NUMBER)
library(sqldf)
# selecting only the required columns for analysis and only
# for DIM_ORDER_ENTRY_METHOD_KEY in '21'
web_21 <- sqldf('select
DIM_SKU_KEY,
ORDER_DATE,
ORDER_NUMBER,
ORDER_LINE_NUMBER,
AMOUNT,
STANDARD_COST_AT_TRANSACTION,
RETAIL_PRICE_AT_TRANSACTION,
CONFIRMED_QUANTITY_BASE_UNIT,
WEB_DISCOUNT_AMOUNT
from web
where DIM_ORDER_ENTRY_METHOD_KEY = 21')
# removing those orders with negative quantity
negative_amount_qty_order <- sqldf('select
order_number
from
(select
order_number,
sum(amount)
from web_21
group by order_number
having sum(amount) < 0)
union
select
order_number
from
(select
order_number,
sum(CONFIRMED_QUANTITY_BASE_UNIT)
from web_21
group by order_number
having sum(CONFIRMED_QUANTITY_BASE_UNIT) < 0) ')
negative_amount_qty_order$order_number <- as.factor(negative_amount_qty_order$order_number)
# creating positive amount table and writing it into the database
web_21_pos_amt <- sqldf('select A.*, B.order_number as order_number_neg
from web_21 as A
left join negative_amount_qty_order as B on A.order_number = B.order_number
where B.order_number is NULL')
dbWriteTable(con,name='web_line_positive_amt',value=web_21_pos_amt,row.names=FALSE)
##### We will be further using 'web_line_positive_amt' table for our analysis
# Importing transaction POS Data
# selecting only the required columns for analysis
pos <- dbGetQuery(con, "select
DIM_STORE_KEY,
DIM_SKU_KEY,
SALE_DATE,
SALE_QUANTITY,
STANDARD_COST_AT_TRANSACTION,
RETAIL_PRICE_AT_TRANSACTION,
NET_SALE_AMOUNT,
SALE_DISCOUNT_AMOUNT,
MISC_DISCOUNT_AMOUNT,
TAX_AMOUNT,
POS_SLIPKEY_SLIP_LVL,
POS_SLIPKEY_LINE_LVL
from Transaction_POS_Data")
#Converting to correct data types
pos$DIM_STORE_KEY <- as.factor(pos$DIM_STORE_KEY)
pos$DIM_SKU_KEY <- as.factor(pos$DIM_SKU_KEY)
pos$SALE_DATE <- as.Date(pos$SALE_DATE, '%Y-%m-%d')
#Identifying distinct order numbers where either the amount or quantity is negative @order-level
library('sqldf')
negative_amount_qty_order_pos <- sqldf('select
POS_SLIPKEY_SLIP_LVL
from
(select
POS_SLIPKEY_SLIP_LVL,
sum(NET_SALE_AMOUNT)
from pos
group by POS_SLIPKEY_SLIP_LVL
having sum(NET_SALE_AMOUNT) < 0)
union
select
POS_SLIPKEY_SLIP_LVL
from
(select
POS_SLIPKEY_SLIP_LVL,
sum(SALE_QUANTITY)
from pos
group by POS_SLIPKEY_SLIP_LVL
having sum(SALE_QUANTITY) < 0) ')
negative_amount_qty_order_pos$POS_SLIPKEY_SLIP_LVL <- as.factor(negative_amount_qty_order_pos$POS_SLIPKEY_SLIP_LVL)
str(negative_amount_qty_order_pos)
#Joining back to the pos data and removing orders having negative amount/quantity
pos_positive_amt <- sqldf('select
A.*,
B.POS_SLIPKEY_SLIP_LVL as order_number_neg
from pos as A
left join negative_amount_qty_order_pos as B
on A.POS_SLIPKEY_SLIP_LVL = B.POS_SLIPKEY_SLIP_LVL
where B.POS_SLIPKEY_SLIP_LVL is NULL')
# writing this table back to the data base
dbWriteTable(con,name='pos_line_positive_amt',value=pos_positive_amt,row.names=FALSE)
######## this cleaned table will be further used for our analysis ##################
############################## DATA CLEANING SECTION ENDS#####################################
############################### AGGREGATING THE DATA #########################################
# importing web transactions where orders amount > 0
web <- dbGetQuery(con, "select * from web_line_positive_amt")
# importing SKU Table
skus <- dbGetQuery(con, "select * from skus_filtered")
# Web-data table aggregated at SKU and date-level
library(sqldf)
# taking minimum date of the launch if SKU table doesn't have launch date or
# in some cases launch date is greater than minimum launch date
agg_transaction_web_date <- sqldf("select A.DIM_SKU_KEY
, ORDER_DATE
, CASE WHEN C.launch_date is null or C.launch_date > B.min_order_date then B.min_order_date else C.launch_date end as 'min_launch_date'
, sum(AMOUNT) as totalsales
, count(distinct ORDER_NUMBER) as trans
, SUM(CONFIRMED_QUANTITY_BASE_UNIT) as units
, sum(AMOUNT-standard_cost_at_transaction) as margins
, sum(RETAIL_PRICE_AT_TRANSACTION*CONFIRMED_QUANTITY_BASE_UNIT) as price
from web A
join
(
select DIM_SKU_KEY
, MIN(ORDER_DATE) as min_order_date
from web
group by DIM_SKU_KEY
) B on B.DIM_SKU_KEY = A.DIM_SKU_KEY
inner join
(
select DIM_SKU_KEY,
launch_date
from skus
) C on C.DIM_SKU_KEY = B.DIM_SKU_KEY
group by A.DIM_SKU_KEY
, ORDER_DATE
, CASE WHEN C.launch_date is null or C.launch_date > B.min_order_date then B.min_order_date else C.launch_date end
")
agg_transaction_web_date$min_launch_date <- as.Date(agg_transaction_web_date$min_launch_date, origin = "1970-01-01")
# importing POS table where orders>0
pos <- dbGetQuery(con, "select * from pos_line_positive_amt")
library(sqldf)
# imputing launch date as min(sale_date) where launch date is null
launch_date <- sqldf("
select A.DIM_SKU_KEY,
CASE when C.LAUNCH_DATE is NULL or C.LAUNCH_DATE > A.min_order_date then min_order_date else C.LAUNCH_DATE end as 'LaunchDate'
from
(
select DIM_SKU_KEY
, MIN(SALE_DATE) as min_order_date
from pos
group by DIM_SKU_KEY
) A
inner join
(
select DIM_SKU_KEY,
LAUNCH_DATE
from skus
) C ON C.DIM_SKU_KEY = A.DIM_SKU_KEY")
# aggregating POS transactions at SKU, Store, Date level
# removing disney store from the analysis
agg_pos_date <- sqldf("select A.DIM_SKU_KEY
, A.DIM_STORE_KEY
, SALE_DATE
, sum(NET_SALE_AMOUNT) as totalsales
, count(distinct POS_SLIPKEY_SLIP_LVL) as trans
, SUM(SALE_QUANTITY) as units
, sum(NET_SALE_AMOUNT-STANDARD_COST_AT_TRANSACTION) as margins
, sum(RETAIL_PRICE_AT_TRANSACTION*SALE_QUANTITY) as price
from pos A
where A.DIM_STORE_KEY != '179'
group by
A.DIM_SKU_KEY
, A.DIM_STORE_KEY
, SALE_DATE
")
# importing store table
store <- dbGetQuery(con, "select * from store")
# aggregating POS transactions at SKU, Region (West, East), saledate level
agg_pos_date_1 <- sqldf("select A.DIM_SKU_KEY
, S.district_description
, SALE_DATE
, sum(totalsales) as totalsales
, sum(trans) as trans
, SUM(units) as units
, sum(margins) as margins
, sum(price) as price
from agg_pos_date A
left join store s on s.DIM_STORE_KEY = A.DIM_STORE_KEY
where A.DIM_STORE_KEY != '179'
and s.STORE_TYPE_CHARACTERISTIC_DESCRIPTION in ('Full Line')
group by
A.DIM_SKU_KEY
, S.district_description
, SALE_DATE
")
# joining the launch_date table to get the minimum launchdate
agg_1 <- sqldf("select A.*, B.LaunchDate
from agg_pos_date_1 A
left join launch_date B on B.DIM_SKU_KEY = A.DIM_SKU_KEY")
agg_1$LaunchDate <- as.Date(agg_1$LaunchDate, origin = "1970-01-01")
# combining pos and web transactions using union statement
agg_2 <- sqldf("select channel
, dim_sku_key
, order_date as sale_date
, min(min_launch_date) as LaunchDate,
sum(totalsales) as totalsales,
sum(trans) as trans,
sum(units) as units,
sum(margins) as margins,
sum(price) as price
from
(
select 'Web' as Channel, A.*
from agg_transaction_web_date A
where DIM_SKU_KEY > 0
union all
select 'Pos' as Channel
, dim_sku_key
, SALE_DATE as order_date
, LaunchDate as min_launch_date
, totalsales
, trans
, units
, margins
, price
from agg_1
)
where price > 0
group by channel
, dim_sku_key
, sale_date")
agg_2$LaunchDate <- as.Date(agg_2$LaunchDate, origin = "1970-01-01")
# importing the SKU Table
skus <- dbGetQuery(con, "select * from SKUs")
# joining the SKU table to get the features related to SKU(ex- color/pattern, merchant_class)
agg_3 <- sqldf("select A.*
, B.STYLE_DESCRIPTION
, B.COLOR_DESCRIPTION
, B.RELEASE_SEASON_ID
, B.RETIREMENT_DATE
, B.MERCHANT_DEPARTMENT
, B.MERCHANT_CLASS
, B.PLM_PRIMARY_COLOR
, B.PLM_SECONDARY_COLOR
, B.PLM_COLOR_FAMILY
from agg_2 A
inner join skus B on B.DIM_SKU_KEY = A.DIM_SKU_KEY
where B.MERCHANT_CLASS != 'Marketing'
")
# writing this table back to the database
# this is the table which will be used for clustering and modeling
dbWriteTable(conn = con, name = "web_pos_saledate", value = agg_3, row.names = FALSE)
############################### AGGREGATING THE DATA ENDS ################################
#### The table written back to the database is used for data modelling and clustering
############################### Clustering and Modelling section #########################
# importing web-pos-salesdate table
agg_3 <- dbGetQuery(con, "select * from web_pos_saledate")
agg_3$sale_date <- as.Date(agg_3$sale_date, origin = '1970-01-01')
# filtering only for the SKU's released in past 3 years
agg_4 <- subset(agg_3, agg_3$RELEASE_SEASON_ID == 'F17' |
agg_3$RELEASE_SEASON_ID == 'F18' |
agg_3$RELEASE_SEASON_ID == 'F19' |
agg_3$RELEASE_SEASON_ID == 'M17' |
agg_3$RELEASE_SEASON_ID == 'M18' |
agg_3$RELEASE_SEASON_ID == 'M19' |
agg_3$RELEASE_SEASON_ID == 'S17' |
agg_3$RELEASE_SEASON_ID == 'S18' |
agg_3$RELEASE_SEASON_ID == 'S19' |
agg_3$RELEASE_SEASON_ID == 'S20' |
agg_3$RELEASE_SEASON_ID == 'W17' |
agg_3$RELEASE_SEASON_ID == 'W18' |
agg_3$RELEASE_SEASON_ID == 'W19' )
# filtering for the SKU's released after Feb-2017 as transaction table has only records after Feb-2017
agg_4 <- subset(agg_4, agg_4$LaunchDate >= '2017-02-01')
agg_4 <- subset(agg_4, agg_4$STYLE_DESCRIPTION != 'E-Gift Card')
# adding solid_flag to identify if a pattern is solid/pattern
agg_4$Solid_Flag <- ifelse(agg_4$PLM_COLOR_FAMILY == 'Solid' , 'Solid' , 'Non-Solid')
agg_4$Solid_Flag <- replace(agg_4$Solid_Flag , is.na(agg_4$Solid_Flag) , 'Non-Solid')
# identifying top-150 patterns based on total sales
library(sqldf)
top_patterns <- sqldf("select color_description, sum(totalsales) as totalsales
from agg_4
group by color_description
order by totalsales desc")
top_patterns <- head(top_patterns, 150)
# binning the variables into 1W, 2W, 1M, 2M sale buckets based on difference
# between sale_date and launch_date
agg_4$sale_date <- as.Date(agg_4$sale_date, origin = '1970-01-01')
agg_4$datediff <- as.Date(agg_4$sale_date) - as.Date(agg_4$LaunchDate)
agg_4$datediff <- as.numeric(agg_4$datediff)
# binning the datediff column
breaks <- c(0,7,14,21,60,90,120,150,180,Inf)
# specify interval/bin labels
tags <- c("1W","2W", "3W", "2M", "3M","4M", "5M","6M", ">6M")
# bucketing values into bins
group_tags <- cut(agg_4$datediff,
breaks=breaks,
include.lowest=TRUE,
right=FALSE,
labels=tags)
agg_4 <- cbind(agg_4 , group_tags)
############### CLUSTERING THE PATTERNS ##########################
# Clustering the patterns based on different parameters like
# no.of merchant classes, no of SKU's, no.of styles, total sales, units sold,
# margins, avg_price
# Clustering is performed only based on first 3 weeks sales
clustering <- sqldf("select color_description, count(distinct dim_sku_key) as count_sku
, count(distinct style_description) as count_styles
, count(distinct merchant_class) as count_merchantclass
, sum(totalsales) as totalsales
, sum(units) as units
, sum(margins) as total_margins
, sum(margins)/sum(units) as avg_margins
, sum(price)/sum(units) as avg_price
from agg_4
where group_tags in ('1W', '2W', '3W')
group by color_description")
# Taking only top 150 patterns
clustering_1 <- sqldf("select A.*
from clustering A
inner join top_patterns B on B.color_description = A.color_description")
# Taking the required numeric columns for clustering
df <- clustering_1[,c(2,3,4,5,6,8,9)]
# z-score standardize for these variable
dfz <- scale(df)
dfz <- data.frame(scale(df))
cost_df <- data.frame() #accumulator for cost results
cost_df
for(k in 1:15){
# allow up to 50 iterations to obtain convergence, and do 20 random starts
kmeans_tr <- kmeans(x=dfz, centers=k, nstart=20, iter.max=100)
#Combine cluster number and cost together, write to cost_df
cost_df <- rbind(cost_df, cbind(k, kmeans_tr$tot.withinss))
}
# the cost_df data.frame contains the # of clusters k and the Mean Squared Error
# (MSE) for each cluster
names(cost_df) <- c("cluster", "tr_cost")
cost_df
# create an elbow plot to validate the optimal number of clusters
par(mfrow=c(1,1))
cost_df[,2] <- cost_df[,2]
plot(x=cost_df$cluster, y=cost_df$tr_cost, main="k-Means Elbow Plot"
, col="blue", pch=19, type="b", cex.lab=1.2
, xlab="Number of Clusters", ylab="MSE (in 1000s)")
points(x=cost_df$cluster, y=cost_df$te_cost, col="green")
library(cluster)
# creating Silhouette plot
km3 <- kmeans(x=dfz, centers=3, nstart=20, iter.max=100)
dist3 <- dist(dfz, method="euclidean")
sil3 <- silhouette(km3$cluster, dist3)
plot(sil3, col=c("black","red","green"), main="Silhouette plot (k=3) K-means (withoutseasons)", border=NA)
# concatenating the cluster back to table
clustering_1 <- cbind(clustering_1, km3$cluster)
colnames(clustering_1)[10] <- 'cluster'
######### CLUSTERING ENDS #####################################
############ Considering cannibalisation effects ######################
## including cannibalisation factors by taking into account no.of patterns launched in the
# past 3,2,1 months respectively
# considering only top 150 patterns
effects <- sqldf("select A.*
from agg_4 A
inner join top_patterns B on B.color_description = A.color_description")
# aggregating for top 150 patterns at pattern, channel, merchantclass and daily level
effects_1 <- sqldf("select channel
, color_description
, merchant_class
, Solid_Flag
, MIN(LaunchDate) over (partition by color_description, Merchant_class,channel) as min_launch_date
, sale_date
, sum(totalsales) as totalsales
, sum(units) as units
, sum(margins) as margins
, sum(price) as price
from effects
group by
channel
, color_description
, merchant_class
, Solid_Flag
, sale_date
")
# changing the datatypes
effects_1$min_launch_date <- as.Date(effects_1$min_launch_date, origin = '1970-01-01')
colnames(effects_1)[5] <- 'launchdate'
# adding cannibalization features # no.of patterns launched
# in the past 3,2,1 months and their sales in the first three weeks of
# a new launch (# of units, avg.price, total sales)
test <- sqldf("select channel, color_description, merchant_class, launchdate, solid_flag
from effects_1
group by channel, color_description, merchant_class, launchdate, solid_flag")
# cannibalization features for 3Month
can_1 <- sqldf("select A.channel, A.color_description, A.merchant_class, A.launchdate
, A.solid_flag
, count(distinct B.color_description) as launched_3M
, sum(B.totalsales) as existing_3M_totalsales
, sum(B.units) as existing_3M_units
, sum(B.margins)/sum(B.units) as existing_3M_avgmargins
, sum(B.totalsales)/sum(B.units) as existing_3M_avg_price
from test A
left join effects_1 B on B.color_description != A.color_description
and B.Merchant_class = A.Merchant_class and
B.channel = A.channel and
B.solid_flag = A.solid_flag and
B.launchdate between A.LaunchDate-90 and A.LaunchDate -1
and B.sale_date between A.launchDate and A.launchDate + 21
group by A.channel, A.color_description, A.merchant_class, A.launchdate, A.solid_flag
")
# cannibalization features for 2Month
can_2 <- sqldf("select A.channel, A.color_description, A.merchant_class, A.launchdate
, A.solid_flag
, count(distinct C.color_description) as launched_2M
, sum(C.totalsales) as existing_2M_totalsales
, sum(C.units) as existing_2M_units
, sum(C.margins)/sum(C.units) as existing_2M_avgmargins
, sum(C.totalsales)/sum(C.units) as existing_2M_avg_price
from test A
left join effects_1 C on C.color_description != A.color_description
and C.Merchant_class = A.Merchant_class and
C.channel = A.channel and
C.solid_flag = A.solid_flag and
C.launchdate between A.LaunchDate-60 and A.LaunchDate -1
and C.sale_date between A.launchDate and A.launchDate + 21
group by A.channel, A.color_description, A.merchant_class
, A.launchdate, A.solid_flag
")
# cannibalization features for 1Month
can_3 <- sqldf("select A.channel, A.color_description, A.merchant_class, A.launchdate
, A.solid_flag
, count(distinct D.color_description) as launched_1M
, sum(D.totalsales) as existing_1M_totalsales
, sum(D.units) as existing_1M_units
, sum(D.margins)/sum(D.units) as existing_1M_avgmargins
, sum(D.totalsales)/sum(D.units) as existing_1M_avg_price
from test A
left join effects_1 D on D.color_description != A.color_description
and D.Merchant_class = A.Merchant_class and
D.channel = A.channel and
D.solid_flag = A.solid_flag and
D.launchdate between A.LaunchDate -30 and A.LaunchDate -1
and D.sale_date between A.launchDate and A.launchDate + 21
group by A.channel, A.color_description, A.merchant_class, A.launchdate
, A.solid_flag
")
# joining all the cannibalization features to one table for each pattern
# joining cannibalisation 1month and 2month features to a single table
can <- sqldf("select A.* , B.launched_2M
, B.existing_2M_totalsales
, B.existing_2M_units
, B.existing_2M_avgmargins
, B.existing_2M_avg_price
from can_1 A
left join can_2 B on B.channel = A.channel and B.merchant_class = A.merchant_class
and B.color_description = A.color_description and B.launchdate = A.launchdate
and B.solid_flag = A.solid_flag")
# joining cannibalisation 3month features
can <- sqldf("select A.*
, B.launched_1M
, B.existing_1M_totalsales
, B.existing_1M_units
, B.existing_1M_avgmargins
, B.existing_1M_avg_price
from can A
left join can_3 B on B.channel = A.channel and B.merchant_class = A.merchant_class
and B.color_description = A.color_description and B.launchdate = A.launchdate
and B.solid_flag = A.solid_flag")
############### considering cannibalisation effects ends ################################
############### MODELLING DATA SET #######################################
# adding seasonality features like Spring, Summer
model <- sqldf("select Channel, COLOR_DESCRIPTION, MERCHANT_CLASS
, CASE when Release_Season_ID like 'M%' then 'SUMMER'
when Release_Season_ID like 'S%' then 'Spring'
when Release_Season_ID like 'W%' then 'Winter'
when Release_Season_ID like 'F%' then 'Fall'
END as Season
, Solid_Flag
, min(LaunchDate)
, group_tags
, sum(totalsales) as totalsales
, sum(units) as units
, sum(margins)/sum(units) as avg_margins
, sum(price)/sum(units) as avg_price
from agg_4
group by Channel
, Solid_Flag
, CASE when Release_Season_ID like 'M%' then 'SUMMER'
when Release_Season_ID like 'S%' then 'Spring'
when Release_Season_ID like 'W%' then 'Winter'
when Release_Season_ID like 'F%' then 'Fall'
END
, COLOR_DESCRIPTION
, MERCHANT_CLASS
, group_tags")
# restricting to only 150-patterns
model <- sqldf("select A.*
from model A
inner join top_patterns B on B.COLOR_DESCRIPTION = A.COLOR_DESCRIPTION")
colnames(model)[6] <- 'LaunchDate'
model$LaunchDate <- as.Date(model$LaunchDate, origin = '1970-01-01')
#transposing - since week tags need to be used as features
library(tidyverse)
library(dplyr)
model_transpose <- pivot_wider(data = model, names_from=group_tags,
values_from = c("totalsales","units","avg_margins","avg_price"))
# imputing missing values by zero
model_transpose_1 <- model_transpose %>%
mutate(totalsales_1W = coalesce(totalsales_1W, 0),
totalsales_2W = coalesce(totalsales_2W, 0),
totalsales_3W = coalesce(totalsales_3W, 0),
totalsales_2M = coalesce(totalsales_2M, 0),
totalsales_3M = coalesce(totalsales_3M, 0),
units_1W = coalesce(units_1W, 0),
units_2W = coalesce(units_2W, 0),
units_3W = coalesce(units_3W, 0),
units_2M = coalesce(units_2M, 0),
units_3M = coalesce(units_3M, 0),
avg_margins_1W = coalesce(avg_margins_1W, 0),
avg_margins_2W = coalesce(avg_margins_2W, 0),
avg_margins_3W = coalesce(avg_margins_3W, 0),
avg_margins_2M = coalesce(avg_margins_2M, 0),
avg_margins_3M = coalesce(avg_margins_3M, 0),
avg_price_1W = coalesce(avg_price_1W, 0),
avg_price_2W = coalesce(avg_price_2W, 0),
avg_price_3W = coalesce(avg_price_3W, 0),
avg_price_2M = coalesce(avg_price_2M, 0),
avg_price_3M = coalesce(avg_price_3M, 0))
# taking only columns required for analysis
# taking the cumulative_units
model_transpose_2 <- sqldf("select Channel, COLOR_DESCRIPTION, MERCHANT_CLASS, Season, Solid_Flag,
LaunchDate,
totalsales_1W, totalsales_2W, totalsales_3W,
units_1W, units_2W, units_3W, (units_2M+units_3M) as cumulative_units,
avg_margins_1W, avg_margins_2W, avg_margins_3W,
avg_price_1W, avg_price_2W, avg_price_3W
from model_transpose_1")
# Cleaning data, removing records where cumulative sales are 0 and 1W/2W/3W sales are zero
model_transpose_3 <- subset(model_transpose_2, model_transpose_2$totalsales_1W+model_transpose_2$totalsales_2W+model_transpose_2$totalsales_3W > 0)
model_transpose_3 <- subset(model_transpose_3 , model_transpose_3$cumulative_units > 0)
# joining the cluster and canibalisation information
model_transpose_4 <- sqldf("select A.*
, B.launched_1M
, B.existing_1M_totalsales
, B.existing_1M_units
, B.existing_1M_avgmargins
, B.existing_1M_avg_price
, B.launched_2M
, B.existing_2M_totalsales
, B.existing_2M_units
, B.existing_2M_avgmargins
, B.existing_2M_avg_price
, B.launched_3M
, B.existing_3M_totalsales
, B.existing_3M_units
, B.existing_3M_avgmargins
, B.existing_3M_avg_price
from model_transpose_3 A
left join can B on B.channel = A.channel
and B.merchant_class = A.merchant_class
and B.color_description = A.color_description
and B.launchdate = A.launchdate
and B.solid_flag = A.solid_flag")
# adding clusters
model_transpose_4 <- sqldf("select A.*, B.cluster
from model_transpose_4 A
left join clustering_1 B on B.color_description = A.color_description")
# calculating avg price and margin for first 3 weeks
model_transpose_4$avgprice <- (model_transpose_4$units_1W*model_transpose_4$avg_price_1W+model_transpose_4$units_2W*model_transpose_4$avg_price_2W+model_transpose_4$units_3W*model_transpose_4$avg_price_3W)/(model_transpose_4$units_1W+model_transpose_4$units_2W+model_transpose_4$units_3W)
model_transpose_4$avgmargin <-(model_transpose_4$units_1W*model_transpose_4$avg_margins_1W+model_transpose_4$units_2W*model_transpose_4$avg_margins_2W+model_transpose_4$units_3W*model_transpose_4$avg_margins_3W)/(model_transpose_4$units_1W+model_transpose_4$units_2W+model_transpose_4$units_3W)
# removing variable not required
model_transpose_4$avg_margins_1W <- NULL
model_transpose_4$avg_margins_2W <- NULL
model_transpose_4$avg_margins_3W <- NULL
model_transpose_4$avg_price_1W <- NULL
model_transpose_4$avg_price_2W <- NULL
model_transpose_4$avg_price_3W <- NULL
# imputing the nulls by 0
model_transpose_4 <- model_transpose_4 %>%
mutate(existing_1M_totalsales = coalesce(existing_1M_totalsales, 0),
existing_2M_totalsales = coalesce(existing_2M_totalsales, 0),
existing_3M_totalsales = coalesce(existing_3M_totalsales, 0),
existing_1M_units = coalesce(existing_1M_units, 0),
existing_2M_units = coalesce(existing_2M_units, 0),
existing_3M_units = coalesce(existing_3M_units, 0),
existing_1M_avgmargins = coalesce(existing_1M_avgmargins, 0),
existing_2M_avgmargins = coalesce(existing_2M_avgmargins, 0),
existing_3M_avgmargins = coalesce(existing_3M_avgmargins, 0),
existing_1M_avg_price = coalesce(existing_1M_avg_price, 0),
existing_2M_avg_price = coalesce(existing_2M_avg_price, 0),
existing_3M_avg_price = coalesce(existing_3M_avg_price, 0))
str(model_transpose_4)
model_transpose_4$launched_1M <- as.numeric(model_transpose_4$launched_1M)
model_transpose_4$launched_2M <- as.numeric(model_transpose_4$launched_2M)
model_transpose_4$launched_3M <- as.numeric(model_transpose_4$launched_3M)
# imputing nulls and missing by zero
model_transpose_4 <- model_transpose_4 %>%
mutate(launched_1M = coalesce(launched_1M, 0),
launched_2M = coalesce(launched_2M, 0),
launched_3M = coalesce(launched_3M, 0))
# removing the columns that are correlated
# margin and price were highly correlated so removed margins from the analysis
model_transpose_4$existing_1M_avgmargins <- NULL
model_transpose_4$existing_2M_avgmargins <- NULL
model_transpose_4$existing_3M_avgmargins <- NULL
model_transpose_4$avgmargin <- NULL
##################imputing the values #############################
## imputing avg prices of cannibalization and number of patterns launched
## in past 3M for those launched before '2017-05-02'
impute_3M <- subset(model_transpose_4, model_transpose_4$LaunchDate > '2017-05-02')
values_3M <- sqldf("select channel, merchant_class
, solid_flag, CEIL(avg(launched_3M)) as launched_3M
, avg(existing_3M_avg_price) as existing_3M_avg_price
from impute_3M
group by channel, merchant_class
, solid_flag")
model_transpose_4$LaunchDate <- as.numeric(model_transpose_4$LaunchDate)
as.numeric(as.Date('2017-05-02')) #17288
model_transpose_5 <- sqldf("select A.Channel, A.COLOR_DESCRIPTION, A.MERCHANT_CLASS,
A.Season, A.Solid_Flag, A.LaunchDate, A.totalsales_1W,
A.totalsales_2W, A.totalsales_3W, A.units_1W, A.units_2W, A.units_3W,
A.cumulative_units, A.launched_1M, A.existing_1M_totalsales,
A.existing_1M_units, A.existing_1M_avg_price, A.launched_2M,
A.existing_2M_totalsales, A.existing_2M_units, A.existing_2M_avg_price, A.cluster,
A.avgprice, A.existing_3M_totalsales, A.existing_3M_units
, CASE WHEN A.LaunchDate <= 17288 then B.launched_3M
ELSE A.launched_3M END as launched_3M
, CASE WHEN A.LaunchDate <= 17288 then B.existing_3M_avg_price
ELSE A.existing_3M_avg_price end as existing_3M_avg_price
from model_transpose_4 A
left join values_3M B on B.channel = A.Channel and B.merchant_class = A.MERCHANT_CLASS
and B.solid_flag = A.Solid_Flag
")
## imputing avg prices of cannibalization and number of patterns launched
## in past 2M for those launched before '2017-04-02'
model_transpose_4$LaunchDate <- as.Date(model_transpose_4$LaunchDate, origin = "1970-01-01")
impute_2M <- subset(model_transpose_4, model_transpose_4$LaunchDate > '2017-04-02')
values_2M <- sqldf("select channel, merchant_class
, solid_flag, CEIL(avg(launched_2M)) as launched_2M
, avg(existing_2M_avg_price) as existing_2M_avg_price
from impute_2M
group by channel, merchant_class
, solid_flag")
model_transpose_4$LaunchDate <- as.numeric(model_transpose_4$LaunchDate)
as.numeric(as.Date('2017-04-02')) #17258
model_transpose_5 <- sqldf("select A.Channel, A.COLOR_DESCRIPTION, A.MERCHANT_CLASS,
A.Season, A.Solid_Flag, A.LaunchDate, A.totalsales_1W,
A.totalsales_2W, A.totalsales_3W, A.units_1W, A.units_2W, A.units_3W,
A.cumulative_units, A.launched_1M, A.existing_1M_totalsales,
A.existing_1M_units, A.existing_1M_avg_price, A.launched_3M,
A.existing_3M_totalsales, A.existing_3M_units, A.existing_3M_avg_price, A.cluster,
A.avgprice
, A.existing_2M_totalsales, A.existing_2M_units
, CASE WHEN A.LaunchDate <= 17258 then B.launched_2M
ELSE A.launched_2M END as launched_2M
, CASE WHEN A.LaunchDate <= 17258 then B.existing_2M_avg_price
ELSE A.existing_2M_avg_price end as existing_2M_avg_price
from model_transpose_5 A
left join values_2M B on B.channel = A.Channel and B.merchant_class = A.MERCHANT_CLASS
and B.solid_flag = A.Solid_Flag
")
## imputing avg prices of cannibalization and number of patterns launched
## in past 1M for those launched before '2017-03-02'
model_transpose_4$LaunchDate <- as.Date(model_transpose_4$LaunchDate, origin = "1970-01-01")
impute_1M <- subset(model_transpose_4, model_transpose_4$LaunchDate > '2017-03-02')
values_1M <- sqldf("select channel, merchant_class
, solid_flag, CEIL(avg(launched_1M)) as launched_1M
, avg(existing_1M_avg_price) as existing_1M_avg_price
from impute_1M
group by channel, merchant_class
, solid_flag")
as.numeric(as.Date('2017-03-02')) #17227
model_transpose_5 <- sqldf("select A.Channel, A.COLOR_DESCRIPTION, A.MERCHANT_CLASS,
A.Season, A.Solid_Flag, A.LaunchDate, A.totalsales_1W,
A.totalsales_2W, A.totalsales_3W, A.units_1W, A.units_2W, A.units_3W,
A.cumulative_units, A.launched_2M, A.existing_2M_totalsales,
A.existing_2M_units, A.existing_2M_avg_price, A.launched_3M,
A.existing_3M_totalsales, A.existing_3M_units, A.existing_3M_avg_price, A.cluster,
A.avgprice
, A.existing_1M_totalsales, A.existing_1M_units
, CASE WHEN A.LaunchDate <= 17227 then B.launched_1M
ELSE A.launched_1M END as launched_1M
, CASE WHEN A.LaunchDate <= 17227 then B.existing_1M_avg_price
ELSE A.existing_1M_avg_price end as existing_1M_avg_price
from model_transpose_5 A
left join values_1M B on B.channel = A.Channel and B.merchant_class = A.MERCHANT_CLASS
and B.solid_flag = A.Solid_Flag
")
###################### imputing ends #######################################
plot(data$units_3W+data$units_2W+data$units_1W , data$cumulative_units)
plot(log(data$units_3W), log(data$cumulative_units))
model_transpose_5$LaunchDate <- as.Date(model_transpose_5$LaunchDate, origin = "1970-01-01")
# subsetting the data only for top 15 classes
data <- subset(model_transpose_5, model_transpose_5$MERCHANT_CLASS == 'Crossbodies' |
model_transpose_5$MERCHANT_CLASS == 'Backpacks' |
model_transpose_5$MERCHANT_CLASS == 'Travel Bags' |
model_transpose_5$MERCHANT_CLASS == 'Totes' |
model_transpose_5$MERCHANT_CLASS == 'IDs/Keychains' |
model_transpose_5$MERCHANT_CLASS == 'Wristlets' |
model_transpose_5$MERCHANT_CLASS == 'Cosmetics' |
model_transpose_5$MERCHANT_CLASS == 'Travel/Packing Accessories' |
model_transpose_5$MERCHANT_CLASS == 'Textiles' |
model_transpose_5$MERCHANT_CLASS == 'Wallets' |
model_transpose_5$MERCHANT_CLASS == 'Lunch Bags' |
model_transpose_5$MERCHANT_CLASS == 'Satchels' |
model_transpose_5$MERCHANT_CLASS == 'Rolling Luggage' |
model_transpose_5$MERCHANT_CLASS == 'Laptop/Tablet Accessories'|
model_transpose_5$MERCHANT_CLASS == 'Other Handbag Accessories')
# removing outliers ex - holiday patterns which had higher sales in the
# first three weeks alone and no sales in the subsequent months
data <- subset(data, !(data$cumulative_units <= 1000 & data$units_3W+data$units_2W+data$units_1W > 1000))
# creating new feature i.e the relative price of the substitutable item
data$relativeprice <- ifelse(data$existing_3M_avg_price == 0, 1, data$avgprice/data$existing_3M_avg_price)
# changing the data types
data$MERCHANT_CLASS <- as.factor(as.character(data$MERCHANT_CLASS))
str(data)
data$Channel <- as.factor(data$Channel)
data$MERCHANT_CLASS <- as.factor(data$MERCHANT_CLASS)
data$Season <- as.factor(data$Season)
data$Solid_Flag <- as.factor(data$Solid_Flag)
data$LaunchDate <- as.Date(data$LaunchDate, origin = "1970-01-01")
data$cluster <- as.factor(data$cluster)
data$month <- strftime(data$LaunchDate, '%B')
data$month <- as.factor(data$month)
# subsetting data as 3M units sold will not be available for launches after '2019-07-07'
data <- subset(data, data$LaunchDate <= '2019-07-07')
# removing the columns not required for analysis
data$existing_1M_totalsales <- NULL
data$existing_2M_totalsales <- NULL
data$existing_3M_totalsales <- NULL
data$existing_1M_units <- NULL
data$existing_2M_units <- NULL
data$existing_3M_units <- NULL
data$existing_1M_avg_price <- NULL
data$existing_2M_avg_price <- NULL
data$existing_3M_avg_price <- NULL
# creating this ID Variables to study the predictions
data$COLOR_DESCRIPTION -> color_ID
data$MERCHANT_CLASS -> merchant_ID
data$Season -> Season_ID
data$LaunchDate -> LaunchDate_ID
data$month -> month_ID
data$COLOR_DESCRIPTION <- NULL
data$LaunchDate <- NULL
data$Season <- NULL
str(data)
# using CARET library for model building
library(caret)
# model building
#creating dummies for factor columns using dummyVars()
dummies <- dummyVars(cumulative_units ~ ., data = data)
ex <- data.frame(predict(dummies, newdata = data))
data <- cbind(data$cumulative_units,ex)
colnames(data)[1] <- 'cumulative_units'
#Linear combos - removing one of the created dummy variable to avoid multicollinearity
CumulativeUnits <- data$cumulative_units
data <- cbind(rep(1,nrow(data)),data[2:ncol(data)])
names(data[1]) <- "ones"
comboInfo <- findLinearCombos(data)
data <- data[,-comboInfo$remove]
data <- data[,c(2:ncol(data))]
data <- cbind(CumulativeUnits , data)
#Removing variables with very low variation
nzv <- nearZeroVar(data, saveMetrics = TRUE)
data <- data[,c(TRUE,!nzv$zeroVar[2:ncol(data)])]
#checking distributions of quantitative variables
# most of the variables are right skewed and log transformations were done
hist(data$CumulativeUnits)
hist(log(data$CumulativeUnits))
hist(log(data$totalsales_1W))
hist(log(data$totalsales_2W))
hist(log(data$totalsales_3W))
hist(log(data$units_1W))
hist(log(data$units_2W))
hist(log(data$units_3W))
hist(data$launched_1M)
hist(data$launched_2M)
hist(data$launched_3M)
hist(log(data$existing_1M_units))
hist(log(data$existing_2M_units))
hist(log(data$existing_3M_units))
hist(data$existing_1M_avg_price)
hist(data$existing_2M_avg_price)
hist(data$existing_3M_avg_price)
# checking for skewness
skewness(data$existing_1M_units)
skewness(data$existing_2M_units)
skewness(data$existing_3M_units)
skewness(data$existing_1M_avg_price)
skewness(data$existing_2M_avg_price)
skewness(data$existing_3M_avg_price)
# # transforming the variable
data$totalsales_1W <- log(data$totalsales_1W+0.001)
data$totalsales_2W <- log(data$totalsales_2W+0.001)
data$totalsales_3W <- log(data$totalsales_3W+0.001)
data$units_1W <- log(data$units_1W+0.001)
data$units_2W <- log(data$units_2W+0.001)
data$units_3W <- log(data$units_3W+0.001)
data$CumulativeUnits <- log(data$CumulativeUnits)
data <- cbind(color_ID, LaunchDate_ID, merchant_ID, month_ID, Season_ID, data)
# splitting the data set into train-test (70/30 split)
set.seed(1234)
inTrain <- createDataPartition(y = data$CumulativeUnits , p = 0.7, list = F)
train <- data[inTrain,]
test <- data[-inTrain,]
train$color_ID <- NULL
train$LaunchDate_ID <- NULL
train$merchant_ID <- NULL
train$month_ID <- NULL
train$Season_ID <- NULL
test$color_ID <- NULL
test$LaunchDate_ID <- NULL
test$merchant_ID <- NULL
test$month_ID <- NULL
test$Season_ID <- NULL
#Cross-validation design - 5-fold cross validation
ctrl <- trainControl(method = "cv" ,
number = 5,
classProbs = F,
summaryFunction = defaultSummary,
allowParallel = T)
#######################Linear regression###########################
model <- train(CumulativeUnits ~ . ,
data = train,
method = "lm",
trControl = ctrl)
summary(model)
# Evaluating the predictions
predictedVal_train <- predict(model, train)
modelvalues_train <- data.frame(obs = exp(train$CumulativeUnits) , pred = exp(predictedVal_train))
defaultSummary(modelvalues_train)
predictedVal_test <- predict(model,test)
modelvalues_test <- data.frame(obs = exp(test$CumulativeUnits) , pred = exp(predictedVal_test))
defaultSummary(modelvalues_test)
# plotting actual vs predictions graphs
plot(modelvalues_test$obs ,modelvalues_test$pred)
abline(coef = c(0,1), col = "blue")
########### Random Forests
library(ranger)
model2_rf <- train(CumulativeUnits ~ . ,
data = train,
method = "ranger",
trControl = ctrl,
metric = 'MAE',
tuneLength = 15,
)
# evaluating the predictions
predictedVal_train <- predict(model2_rf, train)
modelvalues_train <- data.frame(obs = exp(train$CumulativeUnits) , pred = exp(predictedVal_train))
defaultSummary(modelvalues_train)
predictedVal_test <- predict(model2_rf,test)
modelvalues_test <- data.frame(obs = exp(test$CumulativeUnits) , pred = exp(predictedVal_test))
defaultSummary(modelvalues_test)
plot(modelvalues_test$obs ,modelvalues_test$pred)
abline(coef = c(0,1), col = "blue")
# XGBoost
library(xgboost)
# tuning parameters for XGBoost
xgb.grid <- expand.grid(nrounds = 450,
max_depth = 7,
eta = 0.03,
gamma = 0.5,
colsample_bytree = 0.5,
min_child_weight= 7,
subsample = 0.7)
xgb_tune <-train(CumulativeUnits ~.,
data= train,
method="xgbTree",
metric = "MAE",
trControl=ctrl,
tuneGrid=xgb.grid)
# Evaluating the model
predictedVal_train <- predict(xgb_tune, train)
modelvalues_train <- data.frame(obs = exp(train$CumulativeUnits) , pred = exp(predictedVal_train))
defaultSummary(modelvalues_train)
predictedVal_test <- predict(xgb_tune,test)
modelvalues_test <- data.frame(obs = exp(test$CumulativeUnits) , pred = exp(predictedVal_test))
defaultSummary(modelvalues_test)
plot(modelvalues_test$obs ,modelvalues_test$pred)
abline(coef = c(0,1), col = "blue")
# plotting the variable importance plots
caret_imp <- varImp(xgb_tune)
plot(caret_imp, top = 20)
caret_imp <- varImp(model)
plot(caret_imp, top = 15)
##################### decision tree bagging approach #####################
#install.packages("rpart")
library(rpart)
model_DT <- train(
CumulativeUnits ~.
, data = train
, method = "treebag"
,trControl = ctrl
,importance = TRUE)
predictedVal_train <- predict(model_DT, train)
modelvalues_train <- data.frame(obs = exp(train$CumulativeUnits) , pred = exp(predictedVal_train))
defaultSummary(modelvalues_train)
predictedVal_test <- predict(model_DT,test)
modelvalues_test <- data.frame(obs = exp(test$CumulativeUnits) , pred = exp(predictedVal_test))
defaultSummary(modelvalues_test)
plot(modelvalues_test$obs ,modelvalues_test$pred)
abline(coef = c(0,1), col = "blue")
|
cf54d4bae9971809808cd5a8af4b50683331ab43
|
e5604981a0ae5102f33e58218946e625e1e25fd3
|
/tests/testthat/test-matrix.R
|
c713eb0554b2e19aaa3cd716de6a7e572ad86ac2
|
[] |
no_license
|
talgalili/broom
|
d77633d58ba81ddae2e65328fc487b1943e91020
|
8bb9902b62a566ec2b7a4c37a36c32ef4a6ecfb6
|
refs/heads/master
| 2021-01-12T09:19:56.804074
| 2018-06-14T18:40:33
| 2018-06-14T18:40:33
| 81,334,167
| 0
| 1
| null | 2017-02-08T13:44:59
| 2017-02-08T13:44:59
| null |
UTF-8
|
R
| false
| false
| 234
|
r
|
test-matrix.R
|
context("matrix tidiers")
test_that("matrix tidiers work", {
skip("Deprecating soon")
mat <- as.matrix(mtcars)
td <- tidy(mat)
check_tidy(td, exp.row = 32, exp.col = 12)
gl <- glance(mat)
check_tidy(gl, exp.col = 4)
})
|
88156732a0e6fe0306c5ab6d58e666da13b0088f
|
7f241bd79a339ff7922a5b1b32a75ea3fb490ce4
|
/Inclass13June.R
|
449278679290bcd665321ded3830d11a1ffd99c0
|
[] |
no_license
|
n1tk/nonparametrics
|
1edb684e09f2e5dbf01395dca574e2a56557d250
|
fa3e8409b182f202784def8bc3580ab041f934ef
|
refs/heads/master
| 2021-06-04T12:15:26.650976
| 2016-07-27T23:07:44
| 2016-07-27T23:07:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
Inclass13June.R
|
### In class Assignment
library(MASS)
library(perm)
attach(birthwt)
RMD.test <- function(samp1,samp2,direction=c('two.sided','less','greater')[1],nsamp=10000){
devs1 <- samp1-median(samp1)
devs2 <- samp2-median(samp2)
devs <- c(devs1,devs2)
RMD <- mean(abs(devs1))/mean(abs(devs2))
if (direction[1]=='two.sided'){
RMD <- max(1/RMD, RMD)
}
RMDperms <- rep(NA,nsamp)
for (i in 1:nsamp){
tempdevs <- devs[sample(length(devs),length(devs),replace=FALSE)]
RMDperms[i] <- mean(abs(tempdevs[1:length(devs1)]))/mean(abs(tempdevs[-(1:length(devs1))]))
if (direction[1]=='two.sided') RMDperms[i] <- max(1/RMDperms[i], RMDperms[i])
}
if (direction[1]=='greater') pVal <- mean(RMDperms>=RMD)
if (direction[1]=='less') pVal <- mean(RMDperms<=RMD)
if (direction[1]=='two.sided') pVal <- mean(RMDperms>=RMD)
print(paste("Test statistic:",round(RMD,4)))
print(paste("Approximate p-value for ",direction[1],": ",pVal,sep=""))
}
###RMD test
RMD.test(bwt[ht == 1], bwt[ht == 0])
RMD.test(bwt[ui == 1], bwt[ui == 0])
### KS test
ks.test(bwt[ht == 1], bwt[ht == 0])
### will start with KS test
ks.test(birthwt$bwt[birthwt$ht == 1], birthwt$bwt[birthwt$ht == 0])
table(birthwt$ht)
###
ks.test(birthwt$bwt[birthwt$ui == 1], birthwt$bwt[birthwt$ui == 0])
table(birthwt$ui)
hist(birthwt$bwt[birthwt$ui == 1])
hist(birthwt$bwt[birthwt$ui == 0])
###compute t.test
t.test(bwt~ui, data=birthwt)
#compute wilcox test
wilcox.test(bwt~ui, data=birthwt)
|
5827306b30e9ef2b6229d52f8f948236d9b3b654
|
488c2cdfd06b9f7be1f5f20dd7c3e8c42492d189
|
/man/create_ET_trial_data.Rd
|
9d1fb26105039f6fddbc1b3e12bf34ad6a67f61c
|
[
"MIT"
] |
permissive
|
samhforbes/DDLab
|
c7061383d5190718d3328ac89a322aafe0c2faea
|
167b1ac6902b98f9206a12c72309f8c01efdc988
|
refs/heads/master
| 2023-07-19T20:17:31.761831
| 2023-07-17T15:20:33
| 2023-07-17T15:20:33
| 170,550,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,041
|
rd
|
create_ET_trial_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_ET_trial_data.R
\name{create_ET_trial_data}
\alias{create_ET_trial_data}
\title{create a trial report from a fixation eyetracking data}
\usage{
create_ET_trial_data(data, task, write = F, show_all_missing = F)
}
\arguments{
\item{data}{a dataframe read in from a fixation report CSV or txt}
\item{task}{in quotes, a string regarding the task you are interested in from the task column}
\item{write}{if TRUE will save a csv in the current working directory}
\item{show_all_missing}{if T will assume 18 trials per participant and leave blank rows}
}
\value{
A formatted dataframe with CP, SwitchRate, MLD and TLT, as well as coding info.
}
\description{
This was designed to work with eyelink fixation reports and the VWM trial.
I can't guarantee it will bring out what you want beyond that so please check
the output carefully.
}
\examples{
library(readr)
data <- read_csv("etdata.csv")
data_out <- create_ET_trial_data(data, task = 'VWM', write = F)
}
|
de655d3fe481bf0f9875d43d37076163f2b803dd
|
a8be61e1b71cfb146baa08412b06ec0bf91a551e
|
/plot1.R
|
2e1a55b33bf05fda381f0eb55370c7a7d26e807b
|
[] |
no_license
|
lenin-grib/ExData_Plotting1
|
af2ee3bc6c6155dfa41b579cada1c1299af1aa42
|
f2746449920e032abbdc0f206633bf0482bbd79b
|
refs/heads/master
| 2020-04-08T16:59:39.472027
| 2018-11-28T19:02:57
| 2018-11-28T19:02:57
| 159,545,400
| 0
| 0
| null | 2018-11-28T18:16:45
| 2018-11-28T18:16:44
| null |
UTF-8
|
R
| false
| false
| 715
|
r
|
plot1.R
|
## read the data assuming file is saved to the working directory
full <- read.table("household_power_consumption.txt", header = T, sep = ";",
colClasses = c("character", "character", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric"),
na.strings = "?")
## extract data only from the dates 2007-02-01 and 2007-02-02
data <- subset(full, Date == "1/2/2007" | Date == "2/2/2007")
## open png device
png("plot1.png")
## send a distribution of global active power values to png file
hist(data$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
1d29cef97338d1bb692b285230d95e5301125a3d
|
48197dba4bc931c8f5bfa27014b282d704f2336c
|
/inst/tinytest/test_wand.R
|
7b8cda8700bdee330fc4bce34c5676c0229d2997
|
[
"MIT"
] |
permissive
|
hrbrmstr/wand
|
c5dd3049ef9a96a4864cb79894cfae6c58962ebf
|
1f89bed4a5aba659376ab7f626dc077ee148df39
|
refs/heads/master
| 2021-01-09T20:33:25.668124
| 2019-09-26T10:10:56
| 2019-09-26T10:10:56
| 65,586,565
| 21
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,359
|
r
|
test_wand.R
|
library(wand)
list(
actions.csv = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
actions.txt = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
actions.xlsx = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
test_128_44_jstereo.mp3 = "audio/mp3", test_excel_2000.xls = "application/msword",
test_excel_spreadsheet.xml = "application/xml", test_excel_web_archive.mht = "message/rfc822",
test_excel.xlsm = "application/zip", test_excel.xlsx = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
test_nocompress.tif = "image/tiff", test_powerpoint.pptm = "application/zip",
test_powerpoint.pptx = "application/vnd.openxmlformats-officedocument.presentationml.presentation",
test_word_2000.doc = "application/msword", test_word_6.0_95.doc = "application/msword",
test_word.docm = "application/zip", test_word.docx = "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
test.au = "audio/basic", test.bin = c(
"application/mac-binary",
"application/macbinary", "application/octet-stream", "application/x-binary",
"application/x-macbinary"
), test.bmp = "image/bmp", test.dtd = "application/xml-dtd",
test.emf = c("application/x-msmetafile", "image/emf"), test.eps = "application/postscript",
test.fli = c("video/flc", "video/fli", "video/x-fli"), test.gif = "image/gif",
test.ico = c("image/vnd.microsoft.icon", "image/x-icon"),
test.jpg = "image/jpeg", test.mp3 = "audio/mp3", test.odt = "application/vnd.oasis.opendocument.text",
test.ogg = c("application/ogg", "audio/ogg"), test.pcx = c(
"image/pcx",
"image/vnd.zbrush.pcx", "image/x-pcx"
), test.pdf = "application/pdf",
test.pl = c(
"application/x-perl", "text/plain", "text/x-perl",
"text/x-script.perl"
), test.png = "image/png", test.pnm = c(
"application/x-portable-anymap",
"image/x-portable-anymap"
), test.ppm = "image/x-portable-pixmap",
test.ppt = "application/msword", test.ps = "application/postscript",
test.psd = "image/photoshop", test.py = c(
"text/x-python",
"text/x-script.phyton"
), test.rtf = c(
"application/rtf",
"application/x-rtf", "text/richtext", "text/rtf"
), test.sh = c(
"application/x-bsh",
"application/x-sh", "application/x-shar", "text/x-script.sh",
"text/x-sh"
), test.tar = "application/tar", test.tar.gz = c(
"application/gzip",
"application/octet-stream", "application/x-compressed", "application/x-gzip"
), test.tga = "image/x-tga", test.txt = "text/plain", test.txt.gz = c(
"application/gzip",
"application/octet-stream", "application/x-compressed", "application/x-gzip"
), test.wav = "audio/x-wav", test.wmf = c(
"application/x-msmetafile",
"image/wmf", "windows/metafile"
), test.xcf = "application/x-xcf",
test.xml = "application/xml", test.xpm = c(
"image/x-xbitmap",
"image/x-xpixmap", "image/xpm"
), test.zip = "application/zip"
) -> results
fils <- list.files(system.file("extdat", "pass-through", package="wand"), full.names=TRUE)
tst <- lapply(fils, get_content_type)
names(tst) <- basename(fils)
for(n in names(tst)) expect_identical(results[[n]], tst[[n]])
no_guess <- system.file("extdat", "no-guess", "csv.docx", package = "wand")
expect_equal(get_content_type(no_guess, guess = FALSE), "???")
|
cdd561062b52ce5f4415c624a4b84b03d46a4b30
|
22540d050618fa7c69c40c89d1397609e2f39936
|
/man/opts.Rd
|
6a85c3263723d70e652e782560d6aa280cda7893
|
[] |
no_license
|
cran/psyverse
|
8d3e6723d66c292f02a4d0b8978d85f868ca52b9
|
d1e2dc7f6be23f674f7b6cc1d21089995a331ba0
|
refs/heads/master
| 2023-03-17T00:04:47.391838
| 2023-03-05T21:00:07
| 2023-03-05T21:00:07
| 250,514,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,708
|
rd
|
opts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opts.R
\docType{data}
\name{opts}
\alias{opts}
\alias{set}
\alias{get}
\alias{reset}
\title{Options for the psyverse package}
\format{
An object of class \code{list} of length 4.
}
\usage{
opts
}
\description{
The \code{psyverse::opts} object contains three functions to set, get, and reset
options used by the escalc package. Use \code{psyverse::opts$set} to set options,
\code{psyverse::opts$get} to get options, or \code{psyverse::opts$reset} to reset specific or
all options to their default values.
}
\details{
It is normally not necessary to get or set \code{psyverse} options.
The following arguments can be passed:
\describe{
\item{...}{For \code{psyverse::opts$set}, the dots can be used to specify the options
to set, in the format \code{option = value}, for example,
\code{encoding = "UTF-8"}. For
\code{psyverse::opts$reset}, a list of options to be reset can be passed.}
\item{option}{For \code{psyverse::opts$set}, the name of the option to set.}
\item{default}{For \code{psyverse::opts$get}, the default value to return if the
option has not been manually specified.}
}
The following options can be set:
\describe{
\item{encoding}{The default encoding used to read or write files.}
}
}
\examples{
### Get the default encoding
psyverse::opts$get(encoding);
### Set it to UTF-8-BOM
psyverse::opts$set(encoding = "UTF-8-BOM");
### Check that it worked
psyverse::opts$get(encoding);
### Reset this option to its default value
psyverse::opts$reset(encoding);
### Check that the reset worked, too
psyverse::opts$get(encoding);
}
\keyword{datasets}
|
5e97166f40eb201eb013e70df0d0f7b3f42afc2c
|
74fe29da37e54fb5e49a1ae7d4cf5051428202eb
|
/R/output_visualise_cells.R
|
9bf087340c53b7ae410d415ab8485f57779593a0
|
[] |
no_license
|
CRAFTY-ABM/craftyr
|
7fd8e63f85f4ddc13fbb0a79b67710a7b5a818f2
|
5630d1f0e4a1b1c34e3d10740640d414346f1af4
|
refs/heads/master
| 2022-08-11T13:20:13.579266
| 2018-06-16T06:55:19
| 2018-06-16T06:55:19
| 266,212,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,972
|
r
|
output_visualise_cells.R
|
library(ggplot2) # correct (see stack exchange question) for %+replace%
#' Prints a list of data.frames as ggplot2 facet plot.
#'
#' @param simp SIMulation Properties
#' @param celldata (list of) data.frames contain info and X and X coordinates. If a list of data.frames,
#' elements must be named differently
#' @param valuecolumn
#' @param idcolumn column used to separate and name rasters, refering to column names (set with colnames()) of the data.frame(s).
#' @param title name of plot
#' @param filenamepostfix appended to the default output filename @seealso output_tools_getDefaultFilename
#' @param legendtitle title for legend of raster values
#' @param factorial true if raster values are factorial (affects colour palette)
#' @param omitaxisticks omit axis ticks if true
#' @param ncol number of columns of facet wrap. Defauls to the number of rasters in the first dataframe
#' @param coloursetname id for colour set (if factorial) to pass to simp$colours$GenericFun (e.g. "AFT", "Capital", "Service")
#' @param legenditemnames names for legend items
#' @param returnplot if true the ggplot object is returned
#' @return raster visualisation
#' @example demo/example_visualise_cells_csv_aft.R
#'
#' @author Sascha Holzhauer
#' @export
visualise_cells_printPlots <- function(simp, celldata, idcolumn = "Tick", valuecolumn = "LandUseIndex",
title = "", filenamepostfix = title, legendtitle = "",
factorial= FALSE, omitaxisticks = FALSE, ncol = if (!is.data.frame(celldata)) length(celldata) else 1,
coloursetname=simp$colours$defaultset, legenditemnames = NULL, ggplotaddon = NULL,
theme = visualisation_raster_legendonlytheme, returnplot = FALSE) {
futile.logger::flog.debug("Print cell data...",
name="craftyr.visualise.cells")
if(!is.list(celldata)) {
Roo::throw.default("Parameter celldata must be a data.frame or other list!")
}
if(is.data.frame(celldata)) {
celldata <- list(celldata)
}
if(is.null(names(celldata))) {
warning("Assign names to elements of list! Using letters...")
names(celldata) <- paste(letters[1:length(celldata)], ")", sep="")
}
listlen <- length(celldata)
celldata <- mapply(function(infoCellDataVector, listname) {
s <- data.frame(
X = infoCellDataVector[simp$csv$cname_x],
Y = infoCellDataVector[simp$csv$cname_y],
Values = as.numeric(infoCellDataVector[[valuecolumn]]),
ID = paste(if (listlen > 1) listname else "", infoCellDataVector[[idcolumn]]))
colnames(s) <- c("X", "Y", "Values", "ID")
s
}, celldata, names(celldata), SIMPLIFY = FALSE)
gc()
celldata <- do.call(rbind, celldata)
## PLOTTING
simp$fig$numcols <- ncol
simp$fig$numfigs <- length(unique(celldata$ID))
simp$fig$init(simp, outdir = paste(simp$dirs$output$figures, "raster", sep="/"),
filename = output_tools_getDefaultFilename(simp, postfix = filenamepostfix))
scaleFillElem <- ggplot2::scale_fill_gradientn(name=legendtitle, colours = simp$colours$binarycolours)
if (factorial) {
celldata$Values <- factor(celldata$Values)
scaleFillElem <- ggplot2::scale_fill_manual(name=legendtitle,
values = simp$colours$GenericFun(simp, number = length(unique(celldata$Values)), set = coloursetname),
labels = legenditemnames)
}
omitaxistickselem <- NULL
if (omitaxisticks) {
omitaxistickselem <- ggplot2::theme(axis.text = ggplot2::element_blank(), axis.ticks = ggplot2::element_blank(),
axis.title = ggplot2::element_blank())
}
# ggplot throws an error if any facet consists only of NAs.
celldata <- plyr::ddply(celldata, "ID", function(df) {
if (all(is.na(df$Values))) {
df[1, "Values"] <- levels(df$Values)[1]
}
df
})
#ggplotaddon <- countryshapeelem
facetelem <- NULL
if (length(unique(celldata$ID)) > 1) {
facetelem <- ggplot2::facet_wrap(~ID, ncol = ncol)
}
p1 <- ggplot2::ggplot()+
ggplot2::geom_raster(mapping=ggplot2::aes(X, Y, fill=Values), data=celldata) +
facetelem +
ggplot2::theme(strip.text.x = ggplot2::element_text(size=simp$fig$facetlabelsize)) +
(if (!is.null(title) && title != "") ggplot2::labs(title = title)) +
theme() +
scaleFillElem +
omitaxistickselem +
ggplot2::coord_equal(ratio=1) +
ggplotaddon
print(p1)
simp$fig$close()
if (returnplot) return(p1)
}
#' Prints a list of data.frames as raw ggplot2 facet plot.
#'
#' There does not seem to be a straigh-forward way to convert a gTree object back to a ggplot2 object...
#' (http://stackoverflow.com/questions/29583849/r-saving-a-plot-in-an-object)
#'
#' @param simp SIMulation Properties
#' @param celldata (list of) data.frames contain info and X and X coordinates. If a list of data.frames,
#' elements must be named differently
#' @param idcolumn column used to separate and name rasters, refering to column names (set with colnames()) of the data.frame(s).
#' @param valuecolumn
#' @param title name of plot
#' @param filenamepostfix appended to the default output filename @seealso output_tools_getDefaultFilename
#' @param factorial true if raster values are factorial (affects colour palette)
#' @param ncol number of columns of facet wrap. Defauls to the number of rasters in the first dataframe
#' @param coloursetname id for colour set (if factorial) to pass to simp$colours$GenericFun (e.g. "AFT", "Capital", "Service")
#' @param returnplot if true the ggplot object is returned
#'
#' @seealso input_shapes_countries
#' @author Sascha Holzhauer
#' @export
visualise_cells_printRawPlots <- function(simp, celldata, idcolumn = "Tick", valuecolumn = "LandUseIndex",
title = "", filenamepostfix = title,
factorial= FALSE, ncol = if (!is.data.frame(celldata)) length(celldata) else 1,
coloursetname=simp$colours$defaultset, ggplotaddon = NULL, returnplot = FALSE) {
if (returnplot) {
R.oo::throw.default("A ggplot2 object cannot be returned from this function!")
}
futile.logger::flog.debug("Print cell data...",
name="craftyr.visualise.cells")
if (is.null(celldata)) {
Roo::throw.default("celldata is null!")
}
if(!is.list(celldata)) {
Roo::throw.default("Parameter celldata must be a data.frame or other list!")
}
if(is.null(names(celldata))) {
warning("Assign names to elements of list! Using letters...")
names(celldata) <- letters[1:length(celldata)]
}
listlen <- length(celldata)
celldata <- mapply(function(infoCellDataVector, listname) {
s <- data.frame(
X = infoCellDataVector[simp$csv$cname_x],
Y = infoCellDataVector[simp$csv$cname_y],
Values = as.numeric(infoCellDataVector[[valuecolumn]]),
ID = paste(if (listlen > 1) listname else "", infoCellDataVector[[idcolumn]]))
colnames(s) <- c("X", "Y", "Values", "ID")
s
}, celldata, names(celldata), SIMPLIFY = FALSE)
gc()
celldata <- do.call(rbind, celldata)
## PLOTTING
simp$fig$numcols <- ncol
simp$fig$numfigs <- length(unique(celldata$ID))
scaleFillElem <- ggplot2::scale_fill_gradientn(colours = simp$colours$binarycolours)
if (factorial) {
celldata$Values <- factor(celldata$Values)
scaleFillElem <- ggplot2::scale_fill_manual(
values = simp$colours$GenericFun(simp, number = length(unique(celldata$Values)), set = coloursetname))
}
p1 <- ggplot2::ggplot()+
ggplot2::geom_raster(data=celldata, mapping=ggplot2::aes(X,Y,fill=Values)) +
ggplot2::facet_wrap(~ID, ncol = ncol) +
scaleFillElem +
ggplot2::scale_x_continuous(expand=c(0,0)) + ggplot2::scale_y_continuous(expand=c(0,0)) +
ggplot2::coord_equal() +
ggplot2::theme_bw() +
visualisation_raster_legendonlytheme()
gt <- ggplot2::ggplot_gtable(ggplot2::ggplot_build(p1))
ge <- subset(gt$layout, substring(name,1,5) == "panel")
printObjects <- list()
for (i in 1:length(ge[,1])) {
g <- ge[i,]
simp$fig$init(simp, outdir = paste(simp$dirs$output$figures, "raster", sep="/"),
filename = output_tools_getDefaultFilename(simp, postfix = paste(filenamepostfix, "_", i, sep="")))
grid::grid.draw(gt[g$t:g$b, g$l:g$r])
simp$fig$close()
}
}
|
74396aa95ae5380d95c3c0ab50f62271871c15d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/acebayes/examples/aceglm.Rd.R
|
787705234389a0331bbb41e7a4f107ea0ea1559f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,202
|
r
|
aceglm.Rd.R
|
library(acebayes)
### Name: aceglm
### Title: Approximate Coordinate Exchange (ACE) Algorithm for Generalised
### Linear Models
### Aliases: aceglm paceglm
### ** Examples
## This example uses aceglm to find a Bayesian D-optimal design for a
## first order logistic regression model with 6 runs 4 factors. The priors are
## those used by Overstall & Woods (2017), with each of the five
## parameters having a uniform prior. The design space for each coordinate is [-1, 1].
set.seed(1)
## Set seed for reproducibility.
n<-6
## Specify the sample size (number of runs).
start.d<-matrix(2 * randomLHS(n = n,k = 4) - 1,nrow = n,ncol = 4,
dimnames = list(as.character(1:n), c("x1", "x2", "x3", "x4")))
## Generate an initial design of appropriate dimension. The initial design is a
## Latin hypercube sample.
low<-c(-3, 4, 5, -6, -2.5)
upp<-c(3, 10, 11, 0, 3.5)
## Lower and upper limits of the uniform prior distributions.
prior<-function(B){
t(t(6*matrix(runif(n = 5 * B),ncol = 5)) + low)}
## Create a function which specifies the prior. This function will return a
## B by 5 matrix where each row gives a value generated from the prior
## distribution for the model parameters.
example1<-aceglm(formula=~x1+x2+x3+x4, start.d = start.d, family = binomial,
prior = prior, method = "MC", N1 = 1, N2 = 0, B = c(1000, 1000))
## Call the aceglm function which implements the ACE algorithm requesting
## only one iteration of Phase I and zero iterations of Phase II. The Monte
## Carlo sample size for the comparison procedure (B[1]) is set to 100.
example1
## Print out a short summary.
#Generalised Linear Model
#Criterion = Bayesian D-optimality
#Formula: ~x1 + x2 + x3 + x4
#
#Family: binomial
#Link function: logit
#
#Method: MC
#
#B: 1000 1000
#
#Number of runs = 6
#
#Number of factors = 4
#
#Number of Phase I iterations = 1
#
#Number of Phase II iterations = 0
#
#Computer time = 00:00:01
example1$phase2.d
## Look at the final design.
# x1 x2 x3 x4
#1 -0.3571245 0.16069337 -0.61325375 0.9276443
#2 -0.9167309 0.91411512 0.69842151 0.2605092
#3 -0.8843699 0.42863930 -1.00000000 -0.9679402
#4 0.3696224 -0.27126080 0.65284076 0.1850767
#5 0.7172267 -0.34743402 -0.05968457 -0.6588896
#6 0.7469636 0.05854029 1.00000000 -0.1742566
prior2 <- list(support = rbind(low, upp))
## A list specifying the parameters of the uniform prior distribution
example2<-aceglm(formula = ~ x1 +x2 + x3 + x4, start.d = start.d, family = binomial,
prior = prior2, N1 = 1, N2 = 0)
## Call the aceglm function with the default method of "quadrature"
example2$phase2.d
## Final design
# x1 x2 x3 x4
#1 -0.3269814 0.08697755 -0.7583228 1.00000000
#2 -0.8322237 0.86652194 0.5747066 0.51442169
#3 -0.8987852 0.48881387 -0.8554894 -1.00000000
#4 0.3441093 -0.29050147 0.4704248 0.07628932
#5 0.8371670 -0.42361888 0.1429862 -0.95080251
#6 0.6802119 0.10853163 1.0000000 0.75421678
mean(example1$utility(d = example1$phase2.d, B = 20000))
#[1] -11.55139
mean(example2$utility(d = example2$phase2.d, B = 20000))
#[1] -11.19838
## Compare the two designs using the Monte Carlo approximation
|
4e9ff07749022ca7b3df9e27cdc06a3966086a3e
|
c9fb5b8c15fc82fe19f1f8d339bb1472de18e51c
|
/Data/make_onet_score.R
|
6e6c6d52985ddd5c0a519a66dc288c9b88190931
|
[] |
no_license
|
kota-tagami/J-Onet_EDA
|
92ff9d2300f23f3c9d0fbb17ae11e56de9cb1d9a
|
bd5576ab0ecef3027de7b1a74e1b50ea99ca6147
|
refs/heads/master
| 2023-01-13T07:05:12.263174
| 2020-11-18T06:22:56
| 2020-11-18T06:22:56
| 289,420,822
| 0
| 0
| null | 2020-11-18T06:22:57
| 2020-08-22T04:45:52
|
R
|
UTF-8
|
R
| false
| false
| 2,379
|
r
|
make_onet_score.R
|
library(tidyverse)
library(readxl)
## Onetウェブサイトからダウンロードしたデータを読み込む
onet_score_00 <-
"IPD_DL_numeric_1_8.xlsx" %>%
str_c("Data", ., sep = "/") %>%
read_excel(
sheet = 1,
col_names = T,
.name_repair = "unique",
skip = 19
) %>%
select(
- `20`,
id_row = `...2`,
everything()
) %>%
mutate(
## バージョン8では西洋料理調理人(コック)のカッコが半角になっている
IPD_02_01_001 = IPD_02_01_001 %>%
str_replace_all("\\(", "(") %>%
str_replace_all("\\)", ")")
)
## アプリで使用する変数のリストとラベル
varlist <-
"onet_varlist.xlsx" %>%
str_c("Data", ., sep = "/") %>%
read_excel() %>%
rename(dist_value = `...4`)
## appで使用する変数を選択し、ロングにする
onet_score_01 <-
onet_score_00 %>%
select(contains(varlist$`IPD-ID`)) %>%
pivot_longer(-c(1, 2)) %>%
left_join(., varlist, by = c("name" = "IPD-ID")) %>%
relocate(value, .after = last_col()) %>%
mutate(across(c(`IPD_02_01_001`, name, type, label), fct_inorder))
## 教育と訓練なし
onet_score_01_1 <-
onet_score_01 %>%
filter(type != "教育と訓練") %>%
select(-dist_value)
## 教育と訓練を連続値化
onet_score_01_2 <-
onet_score_01 %>%
filter(type == "教育と訓練", dist_value != "NA") %>%
separate(
col = label,
into = c("label", "item"),
sep = "_"
) %>%
mutate(label = label %>% fct_inorder()) %>%
mutate(dist_value = dist_value %>% as.numeric()) %>%
group_by(IPD_01_01_001, IPD_02_01_001, type, label) %>%
summarise(
value = weighted.mean(dist_value, w = value, na.rm = T),
.groups = "drop"
) %>%
mutate(
label = case_when(
label == "学歴" ~ str_c(label, "(平均教育年数)"),
label == "入職前の訓練期間" ~ str_c(label, "(平均年数)"),
label == "入職前の実務経験" ~ str_c(label, "(平均年数)"),
label == "入職後の訓練期間" ~ str_c(label, "(平均年数)")
) %>%
fct_inorder(),
value = case_when(
label %>% str_detect("学歴") ~ value,
TRUE ~ value/12
)
)
## 結合
onet_score_02 <-
bind_rows(onet_score_01_1, onet_score_01_2) %>%
arrange(IPD_01_01_001, name)
## save
write_csv(onet_score_02, "Data/onet_score.csv")
|
76d79a0d607379baa0de4110ac98f35719bc61f8
|
52b84546a64b4f31245eb0bfaa68bfa489c90534
|
/sta141a/2016/discussion06.R
|
bca384f326d0b0b1cd95a3550d6d96e062c2a42c
|
[
"CC-BY-NC-SA-4.0"
] |
permissive
|
nick-ulle/teaching-notes
|
6cb48d874ef4c8c99402b9987e58b2958adff056
|
12e388f626f415bd39543bfed99c44e4130a065b
|
refs/heads/master
| 2023-02-20T12:55:06.521649
| 2023-02-05T02:53:22
| 2023-02-05T02:53:22
| 86,759,329
| 31
| 33
|
BSD-3-Clause-Clear
| 2019-01-15T15:44:11
| 2017-03-30T23:49:53
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,193
|
r
|
discussion06.R
|
# discussion06.R
# Week 5
# ------
# Linear Models
# -------------
# "All models are wrong, but some are useful." -- G. Box
library(tidyverse)
# ### Example: Elmhurst College 2011 Financial Aid
# The Elmhurst data set has three variables.
#
# * family_income: total family income
# * gift_aid: total gift aid for freshman year
# * price_paid: total price paid for first year
#
# All values are in thousands of US dollars.
elm = read_tsv("data/elmhurst.txt")
# Is there a relationship between family income and gift aid?
plt = ggplot(elm, aes(family_income, gift_aid)) + geom_point()
plt
# The data seems to have a trend.
#
# y = ( slope * x) + intercept
# y = ( b1 * x) + b0
# y = ((-5/50) * x) + 30
plt = plt + geom_abline(intercept = 30, slope = -0.1, color = "tomato",
linetype = "dashed")
# How well does our line fit the data?
#
# y = ((-5/50) * x) + 30
elm$fitted = (-5/50) * elm$family_income + 30
# The residuals are the differences between the true values and the line.
elm$resid = elm$gift_aid - elm$fitted
ggplot(elm, aes(family_income, resid)) + geom_point() +
geom_hline(yintercept = 0)
mean(elm$resid) # off by about $113 on average
sd(elm$resid)
sum(elm$resid) # negatives and positives cancel out
sum(abs(elm$resid))
sum(elm$resid^2)
# What if we choose the line that has the smallest squared residuals? This is
# what "least squares" does! (the default for linear models)
#
# The sum of the squared residuals emphasizes large errors. This is more
# conservative than the sum of their absolute values.
#
# The best way to measure error really depends on the problem!
model = lm(gift_aid ~ family_income, elm)
plt + geom_abline(intercept = coef(model)[[1]], slope = coef(model)[[2]],
color = "tomato", linetype = "dashed")
# Box-Cox transformations to fix residuals with "strange patterns"
# (Pearson) Correlation tells us how "linear" the data is.
elm$error = (elm$gift_aid - elm$line)
# The _residuals_ are the differences between the true values and the line.
#
# A _residual plot_ is a scatterplot of the residuals versus the x-variable.
ggplot(elm, aes(family_income, error)) + geom_point() +
geom_hline(yintercept = 0, color = "tomato", linetype = "dashed")
# A linear model chooses the line that fits the data "best".
#
# Residuals measure error, so minimize residuals! How?
#
# The most popular strategy is "least squares," which minimizes the sum of the
# squared residuals. Why?
plt + geom_smooth(method = "lm", se = F)
# More detail is available with the `lm()` function:
model = lm(gift_aid ~ family_income, elm)
summary(model)
# The residuals are available using the `resid()` function or the broom
# package's `augment()` function.
resid(model)
library(broom)
tidy(model)
df = as_data_frame(augment(model))
ggplot(df, aes(family_income, .resid)) + geom_point() +
geom_hline(yintercept = 0, color = "tomato")
# Conditions for linear models:
#
# 1. Linearity! The data should follow a straight line. If there is any other
# pattern (such as a parabola) a linear model is not appropriate.
#
# 2. Independent observations. The observations should not depend on each
# other. As an example, a time series would violate this condition.
#
# 3. Constant variance. Observations should be roughly the same distance from
# the line across all values of the predictor variable x.
#
# 4. Gaussian residuals. In order to construct confidence intervals for or test
# the model, the residuals must have a Gaussian (normal) distribution.
#
# Conditions 1, 3, and 4 can be checked with residual plot(s). Condition 4 can
# also be checked with a quantile-quantile (Q-Q) plot.
#
# For condition 2, think carefully about whether it makes sense for your data
# that the observations would be indpendent.
# ### Example: Anscombe's Quartet
# Do the statistics tell you everything?
df = readRDS("data/anscombe.rds")
lapply(split(df, df$group), function(subs) lm(y ~ x, subs))
ggplot(df, aes(x, y)) + geom_point() + facet_wrap(~ group)
# Multiple Regression
# -------------------
# Often more than one variable is related to the response variable. Multiple
# regression fits a model with more than one term:
#
# y = b0 + (b1 * x1) + (b2 * x2) + ...
# ### Example: Mario Kart Sales
# Auction data from Ebay for the game Mario Kart for the Nintendo Wii.
library(openintro)
?marioKart
mario = as_data_frame(marioKart)
model = lm(totalPr ~ cond + nBids + wheels + duration, mario)
summary(model)
df = as_data_frame(augment(model))
ggplot(df, aes(nBids, .resid)) + geom_point()
# For more details, see chapters 5-6 from:
#
# <https://www.openintro.org/stat/textbook.php?stat_book=isrs>
# The Anscombe data was converted to a tidy data frame from R's built-in
# `anscombe` data with the following function.
tidy_anscombe = function() {
df = as_data_frame(anscombe)
df$observation = seq_len(nrow(anscombe))
# Move values to rows labeled with the column they came from.
df = gather(df, label, value, x1:y4)
# Split the "label" column into two columns: (x or y, group #)
df = extract(df, label, c("xy", "group"), "([xy])([1-4])")
# Move values to two columns for x and y.
spread(df, xy, val)
}
|
e5aef92d14ab22c5ca7c2a9098c84b331f85dcc9
|
34d6b8a8648cec16a214278169e993eca182b344
|
/simulations/Exp3_AsyNormality/Exp3_AsyNormality_run.R
|
88d31db7131d7139f209589c182d6803143e69dd
|
[] |
no_license
|
predt/regsynth
|
9817ecf3f7dfd377af876bf975e09e12e1bd1dae
|
ace6c9d5b6c7b341e53595c94b98922852d97816
|
refs/heads/master
| 2021-03-13T18:04:55.200460
| 2019-10-16T13:40:24
| 2019-10-16T13:40:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,841
|
r
|
Exp3_AsyNormality_run.R
|
### Exp3: Asymproric Normality, Run file
### Jeremy L Hour
### 21/02/2018
setwd("//ulysse/users/JL.HOUR/1A_These/A. Research/RegSynthProject/regsynth")
rm(list=ls())
### 0. Settings
### Load packages
library("MASS")
library("ggplot2")
library("gtable")
library("grid")
library("reshape2")
library("LowRankQP")
library("xtable")
library("gridExtra")
### Load user functions
source("functions/wsol.R")
source("functions/wsoll1.R")
source("functions/matchDGP_fixedT.R")
source("functions/wATT.R")
source("functions/matching.R")
source("functions/matchest.R")
source("functions/OBest.R")
source("functions/regsynth.R")
source("functions/regsynthpath.R")
source("functions/TZero.R")
source("functions/synthObj.R")
source("simulations/Exp3_AsyNormality/Exp3_AsyNormality_setup.R")
### MC XP
set.seed(2121988)
lambda = seq(0,2,.01) # set of lambda to be considered for optim
xp = Exp3_setup(R=5000,n1=20,n0=70,p=20,K=5)
Results = xp
R = nrow(Results)
# Draw the charts
id = c(mapply(function(x) rep(x,R),1:5))
val = c(Results)
data_res = data.frame(val = val, model = id)
M = max(abs(quantile(Results,.01)),abs(quantile(Results,.99)))
lb = -1.1*M; ub = 1.1*M
sdBCH = sd(Results[,1])
### Function for plot
get.plot <- function(data,modelS,title="A Title",sdBCH){
plot_res = ggplot(subset(data, (model==modelS)), aes(x=val)) +
geom_histogram(binwidth = .1, alpha=.5, position='identity',fill="steelblue", aes(y = ..density..)) +
scale_x_continuous(limits=c(lb,ub), name="Treatment effect") +
ggtitle(title) +
stat_function(fun = dnorm, args=list(mean=0, sd=sdBCH), colour="darkorchid3", size=1) +
theme(plot.title = element_text(lineheight=.8, face="bold"),legend.position="none")
return(plot_res)
}
grid.arrange(get.plot(data_res,1,"Fixed lambda", sdBCH), get.plot(data_res,2,"RMSE opt", sdBCH), ncol=2)
|
9ce6fec94475b0c7c6b5943c0e2b9d913c49daff
|
f62736da11b1818af73866a6c5da7c5b8b75b980
|
/2018/05-facebook.R
|
fe5018b4338af5edd9ab2bdaedd7d39c8e5fb1ea
|
[] |
no_license
|
erikgahner/posts
|
95b108dccea199a81656fd207857ba7afc7cf92a
|
38293e4f7d5a02ef87f9ae4cf36af0fefa209b86
|
refs/heads/master
| 2023-08-30T17:36:37.503975
| 2023-08-27T08:33:32
| 2023-08-27T08:33:32
| 25,849,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,046
|
r
|
05-facebook.R
|
# R script to "Why you should not trust the Facebook experiment"
# Link: http://erikgahner.dk/2018/why-you-should-not-trust-the-facebook-experiment/
library("ggplot2")
respondents <- 1095
df_fb <- data.frame(
time = c(0, 0, 1, 1),
tr = c("Treatment", "Control", "Treatment", "Control"),
res = c(rep(respondents/2,2), 516, 372)
)
ggplot(df_fb, aes(x=time, y=res, group=tr, fill = tr)) +
geom_bar(position="dodge", stat="identity", alpha=.8) +
scale_y_continuous("Group size") +
scale_x_continuous("", breaks=0:1, labels=
c("Pre", "Post")) +
scale_fill_manual(values=c("#2679B2", "#E02527")) +
theme_minimal() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
theme(legend.title=element_blank(), legend.position = "top") +
geom_segment(aes(x = 0.56, y = 516, xend = 0.56, yend = 372), linetype="dashed", colour="#2679B2") +
geom_segment(aes(x = 0.56, y = 516, xend = 1, yend = 516), linetype="dashed", colour="#2679B2")
ggsave("attrition.png", height=4, width=4)
|
da22a100c4d021270df3cff0dd72461623949f39
|
092e6cb5e99b3dfbb089696b748c819f98fc861c
|
/scripts/doASTSAEMlearnCircleWithEstimatedInitialCondFA.R
|
44811cb812a533b6aa8fcf4a7e03d50f834e9978
|
[] |
no_license
|
joacorapela/kalmanFilter
|
522c1fbd85301871cc88101a9591dea5a2e9bc49
|
c0fb1a454ab9d9f9a238fa65b28c5f6150e1c1cd
|
refs/heads/master
| 2023-04-16T09:03:35.683914
| 2023-04-10T16:36:32
| 2023-04-10T16:36:32
| 242,138,106
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,486
|
r
|
doASTSAEMlearnCircleWithEstimatedInitialCondFA.R
|
require(astsa)
require(MASS)
require(ramcmc)
require(plotly)
require(mvtnorm)
require(gridExtra)
require(reshape2)
source("../src/squareRootKF.R")
source("../src/smoothLDS_R.R")
source("../src/estimateKFInitialCondFA.R")
source("../src/plotTrueInitialAndEstimatedMatrices.R")
source("../src/plotTrueInitialAndEstimatedVectors.R")
processAll <- function() {
nFactors <- 2
maxIter <- 100
tol <- 1e-8
simulationFilename <- "results/simulationCircle.RData"
simRes <- get(load(simulationFilename))
zs <- simRes$x
zsForFA <- t(as.matrix(zs))
initialConds <- estimateKFInitialCondFA(z=zsForFA, nFactors=nFactors)
A <- simRes$A
C <- simRes$C
B <- matrix(0, nrow=nrow(A), ncol=1)
D <- matrix(0, nrow=nrow(C), ncol=1)
us <- matrix(0, nrow=1, ncol=ncol(zs))
Gamma <- simRes$Gamma
SRSigmaW <- chol(x=Gamma)
Sigma <- simRes$Sigma
SRSigmaV <- chol(x=Sigma)
xHat0 <- simRes$mu0
V0 <- simRes$V0
SRSigmaX0 <- chol(x=V0)
A0 <- initialConds$A
# A0 <- A
Gamma0 <- 1e-3*diag(rep(1, ncol(A0)))
SRSigmaW0 <- chol(x=Gamma0)
C0 <- initialConds$C
# C0 <- C
B0 <- B
D0 <- D
Sigma0 <- diag(initialConds$sigmaDiag)
SRSigmaV0 <- chol(x=Sigma0)
xHat00 <- xHat0
V00 <- V0
SRSigmaX00 <- chol(x=V00)
# emRes <- EM0(num=ncol(zs), y=t(zs), A=C, mu0=xHat00, Sigma0=V00, Phi=A0, cQ=SRSigmaW0, cR=SRSigmaV0, max.iter=maxIter, tol=tol)
emRes <- EM0(num=ncol(zs), y=t(zs), A=C, mu0=xHat00, Sigma0=V0, Phi=A, cQ=SRSigmaW, cR=SRSigmaV, max.iter=maxIter, tol=tol)
df <- data.frame(x=1:length(emRes$like),
y=emRes$like)
p <- ggplot(df, aes(x=x, y=y))
p <- p + geom_line()
p <- p + geom_point()
p <- p + xlab("Time")
p <- p + ylab("Log Likelihood")
p <- ggplotly(p)
llFigFilename <- "figures//circleASTSA_LogLik.html"
htmlwidgets::saveWidget(as_widget(p), file.path(normalizePath(dirname(llFigFilename)), basename(llFigFilename)))
print(p)
browser()
AFigFilename <- "figures//circleASTSA_A.html"
plotTrueInitialAndEstimatedMatrices(trueM=A, initialM=A0, estimatedM=emRes$Phi, title="A", figFilename=AFigFilename)
CFigFilename <- "figures//circleASTSA_C.html"
plotTrueInitialAndEstimatedMatrices(trueM=C, initialM=C0, title="C", figFilename=CFigFilename)
GammaFigFilename <- "figures//circleASTSA_Gamma.html"
plotTrueInitialAndEstimatedMatrices(trueM=Gamma, initialM=Gamma0, estimatedM=emRes$Q, title="Gamma", figFilename=GammaFigFilename)
SigmaFigFilename <- "figures//circleASTSA_Sigma.html"
plotTrueInitialAndEstimatedMatrices(trueM=Sigma, initialM=Sigma0, estimatedM=emRes$R, title="Sigma", figFilename=SigmaFigFilename)
V0FigFilename <- "figures//circleASTSA_V0.html"
plotTrueInitialAndEstimatedMatrices(trueM=V0, initialM=V00, estimatedM=emRes$Sigma0, title="V0", figFilename=V0FigFilename)
xHat0FigFilename <- "figures//circleASTSA_XHat0.html"
plotTrueInitialAndEstimatedVectors(trueV=xHat0, initialV=xHat00, estimatedV=emRes$mu0, title="xHat0", figFilename=xHat0FigFilename)
fRes <- squareRootKF(A=emRes$Phi, B=B, C=C0, D=D, xHat0=emRes$mu0, SRSigmaX0=chol(x=emRes$Sigma0), SRSigmaW=chol(emRes$Q), SRSigmaV=chol(emRes$R), us=us, zs=zs)
sRes <- smoothLDS(A=emRes$Phi, mu=fRes$xHat, V=fRes$SigmaXHat, P=fRes$SigmaX[2:length(fRes$SigmaX)])
fRes0 <- squareRootKF(A=A0, B=B0, C=C0, D=D0, xHat0=xHat00, SRSigmaX0=chol(x=V00), SRSigmaW=chol(x=Gamma0), SRSigmaV=chol(x=Sigma0), us=us, zs=zs)
sRes0 <- smoothLDS(A=A0, mu=fRes0$xHat, V=fRes0$SigmaXHat, P=fRes0$SigmaX[2:length(fRes0$SigmaX)])
data <- data.frame()
for(i in 1:nrow(simRes$z)) {
dataBlock <- data.frame(sample=1:length(simRes$z[i,]),
latent=simRes$z[i,],
latentID=rep(i, length(simRes$z[i,])),
latentType=rep("true",
length(simRes$z[i,])))
data <- rbind(data, dataBlock)
}
for(i in 1:nrow(sRes$muHat)) {
dataBlock <- data.frame(sample=1:length(sRes$muHat[i,]),
latent=sRes$muHat[i,],
latentID=rep(i, length(sRes$muHat[i,])),
latentType=rep("estimated",
length(sRes$muHat[i,])))
data <- rbind(data, dataBlock)
}
for(i in 1:nrow(sRes0$muHat)) {
dataBlock <- data.frame(sample=1:length(sRes0$muHat[i,]),
latent=sRes0$muHat[i,],
latentID=rep(i, length(sRes0$muHat[i,])),
latentType=rep("initial",
length(sRes0$muHat[i,])))
data <- rbind(data, dataBlock)
}
p <- ggplot(data, aes(x=sample, y=latent,
color=factor(latentID),
linetype=factor(latentType)))
p <- p + geom_line()
p <- p + geom_hline(yintercept=0)
p <- p + geom_vline(xintercept=0)
p <- p + ylab("Latent Value")
p <- p + xlab("Time")
p <- p + theme(legend.title = element_blank())
p <- ggplotly(p)
latentsFigFilename <- "figures//circleASTSA_Latents.html"
htmlwidgets::saveWidget(as_widget(p), file.path(normalizePath(dirname(latentsFigFilename)), basename(latentsFigFilename)))
print(p)
browser()
}
processAll()
|
1be4d4298ae6f6aed3dbf4a95347588008201565
|
66ae31e851638ad20305409b99df93d8ce2f8133
|
/R/snlRigidNodeAbsorption.R
|
9246dfb1cd28d1486803259575a682abf316d715
|
[] |
no_license
|
rwoldford/edmcr
|
150e1702ceb451d154223ff5e9ded10defeda9e6
|
ee322d7dcc0bf3f497576c31a87a4886bc17d8a8
|
refs/heads/main
| 2021-12-06T06:09:38.997297
| 2021-09-08T17:59:47
| 2021-09-08T17:59:47
| 142,780,936
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,202
|
r
|
snlRigidNodeAbsorption.R
|
snlRigidNodeAbsorption <- function(ic,jc,Dpartial,Dcq,eigvs,grow,Dcqinit,condtolerscaling,r,n,csizesinit){
flagred <- 0
e22 <- Dpartial[,jc] & Dcq[,ic]
ne22 <- sum(e22)
if(ne22 == sum(Dcq[,ic]) & (length(eigvs) < ic || is.na(eigvs[[ic]]))){
Dcq[jc,ic] <- 1
grow <- 1
}else{
temp <- Dpartial[e22,]
temp <- temp[,e22]
IntersectionComplete <- (sum(temp == 0)) == ne22*(ne22-1)
#Complete clique ic if necessary
if(!IntersectionComplete){
########## COMPLETE CLIQUE ##########
temp <- snlCompleteClique(ic,Dcq,eigvs,Dpartial,Dcqinit,r,n,csizesinit)
eigvs <- temp$eigvs
P <- temp$P
flagred <- temp$flagred
#####################################
}
#If complete clique was successful, perform node absorption
if(!flagred){
e11 <- (Dcq[,ic] - e22) > 0
e33 <- matrix(rep(0,n),ncol=1)
for(i in jc){
e33[i] <- 1
}
inds <- matrix(c(which(e11 > 0),which(e22 > 0),which(e33 > 0)),ncol=1)
nvec <- c(sum(e11),sum(e22),sum(e33))
a1 <- seq(1,sum(nvec[c(1,2)]),by=1)
a2 <- seq(nvec[1]+1,sum(nvec),by=1)
a1inds <- inds[a1]
a2inds <- inds[a2]
#Find Ub1
if(length(eigvs) < ic || is.na(eigvs[[ic]])){
Dbar <- Dpartial[a1inds,]
Dbar <- Dbar[,a1inds]
B <- snlKdag(Dbar)
temp <- eigen(B)
Ub <- temp$vectors[,order(temp$values)]
Ub <- Ub[,seq(ncol(Ub)-r+1,ncol(Ub),by=1)]
k <- length(a1)
e <- matrix(rep(1,k),ncol=1)
Ub1 <- as.matrix(cbind(Ub,e/sqrt(k)))
}else{
Ub1 <- eigvs[[ic]][a1inds,]
}
#Find Ub2
if(IntersectionComplete){
temp <- Dpartial[a2inds,]
temp <- temp[,a2inds]
B <- snlKdag(temp)
}else{
v <- matrix(Dpartial[,jc], ncol=length(jc))
v <- matrix(v[c(e22),], ncol=length(jc))
temp <- cbind(snlK(as.matrix(P[e22,] %*% t(P[e22,]))),v)
B <- snlKdag(as.matrix(rbind(temp, cbind(t(v),0))))
}
temp <- eigen(B)
Ub <- temp$vectors[,order(temp$values)]
Ub <- Ub[,seq(ncol(Ub)-r+1,ncol(Ub),by=1)]
k <- length(a2)
e <- matrix(rep(1,k),ncol=1)
Ub2 <- as.matrix(cbind(Ub,e/sqrt(k)))
#Find U
############# SUBSPACE INTERSECTION ############
temp <- snlSubspaceIntersection(nvec,Ub1,Ub2,condtolerscaling)
U <- temp$U
flagred <- temp$flagred
#################################################
if(!flagred){
#Store U
ii <- matrix(rep(inds,r+1),ncol=r+1)
jj <- matrix(rep(seq(1,r+1,by=1),length(inds)),byrow=TRUE,nrow=length(inds))
temp <- matrix(rep(0,n*(r+1)),nrow=n)
for(k in 1:length(ii)){
temp[ii[k],jj[k]] <- U[k]
}
eigvs[[ic]] <- temp
#Update Dcq
Dcq[jc,ic] <- 1
grow <- 1
}
}
}
return(list(Dcq=Dcq,eigvs=eigvs,grow=grow,flagred=flagred))
}
|
9132c31a474e0146f8767abcbccaa69162bb6c24
|
86151a6ecec532ac065621a1ffdfd827504176a3
|
/R/aggregate_brick.R
|
8f1e5d459e31b6b8015594c612d405e88dc91b9b
|
[] |
no_license
|
imarkonis/pRecipe
|
3454f5ce32e6915a6caef1dbc041d12c411c9ae5
|
07c6b1da653221a0baeeb2aa81b8744393ff587e
|
refs/heads/master
| 2022-11-02T20:27:40.979144
| 2022-10-28T10:52:04
| 2022-10-28T10:52:04
| 237,580,540
| 0
| 0
| null | 2020-02-01T07:44:23
| 2020-02-01T07:44:23
| null |
UTF-8
|
R
| false
| false
| 1,698
|
r
|
aggregate_brick.R
|
#' Parallel aggregate
#'
#' Function to aggregate a raster brick
#'
#' @import parallel
#' @importFrom methods as
#' @importFrom raster aggregate as.list brick setZ
#' @param dummie_nc a character string
#' @param new_res numeric
#' @return raster brick
#' @keywords internal
aggregate_brick <- function(dummie_nc, new_res){
dummie_brick <- brick(dummie_nc)
dummie_brick <- as.list(dummie_brick)
no_cores <- detectCores() - 1
if (no_cores < 1 | is.na(no_cores))(no_cores <- 1)
cluster <- makeCluster(no_cores, type = "PSOCK")
clusterExport(cluster, "new_res", envir = environment())
dummie_list <- parLapply(cluster, dummie_brick, function(dummie_layer){
dummie_res <- raster::res(dummie_layer)[1]
dummie_factor <- new_res/dummie_res
dummie_raster <- raster::aggregate(dummie_layer, fact = dummie_factor,
fun = mean, na.rm = TRUE)
dummie_raster
})
stopCluster(cluster)
dummie_list <- brick(dummie_list)
dummie_names <- names(dummie_list)
if (!Reduce("|", grepl("^X\\d\\d\\d\\d\\.\\d\\d\\.\\d\\d",
dummie_names))) {
if (grepl("persiann", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1983-01-01 00:00:00")
} else if (grepl("gldas-clsm", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1948-01-01 00:00:00")
}
} else {
dummie_Z <- as.Date(dummie_names, format = "X%Y.%m.%d")
}
dummie_list <- setZ(dummie_list, dummie_Z)
return(dummie_list)
}
|
5da0e6ab2ba34f000f1e119a987623203944babb
|
774b77ad325d4268d86162f030130132bff9adac
|
/Politwitter_URL_Scrape.R
|
973ac44b9d2b7bec0ca650fdff70703c16d34219
|
[] |
no_license
|
adamingwersen/CA
|
f0923c6c43f210d72bf0627127f8e5604b07edcb
|
7deb3a8a66edfb3efe87ca5a7be2fe836f4ba3be
|
refs/heads/master
| 2021-01-17T14:25:32.384749
| 2016-07-14T22:26:56
| 2016-07-14T22:26:56
| 45,525,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,177
|
r
|
Politwitter_URL_Scrape.R
|
### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### ####
#Politwitter scrape
library("rvest")
library("dplyr")
# Defining http & CSS-selectors based on "Selector-gadget"
politwitter.main = "http://politwitter.ca/directory/facebook"
css.select = "td:nth-child(8) a"
css.select2 = "td:nth-child(2) a"
css.select3 = "td:nth-child(3)"
# Fetching facebook links
politwitter.link = read_html(politwitter.main, encoding = "UTF-8") %>%
html_nodes(css = css.select) %>%
html_attr(name = 'href')
#Fetching politician names
politwitter.name = read_html(politwitter.main, encoding = "UTF-8") %>%
html_nodes(css = css.select2) %>%
html_text()
#Fetching politician parties
politwitter.party = read_html(politwitter.main, encoding= "UTF-8") %>%
html_nodes(css = css.select3) %>%
html_text()
# Apparently the css-selector [td:nth-child3] also gets 11 numerics at the end - discard these to align into dataframe
politwitter.par = politwitter.party[1:825]
politwitter.df = data.frame(politwitter.link, politwitter.name, politwitter.par)
politwitter.df$politwitter.par = gsub("tory", "cons", politwitter.df$politwitter.par)
### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### ####
## Extracting page-name from each URL
politwitter.df$fbpage = gsub("\\http://www.facebook.com/", "", politwitter.df$politwitter.link)
### This doesn't work due to the fact that some FB-pages may have two or more ID's
### We need to fetch the ID's of type : pages/Olivia-Chow/15535160141 = OliviaChowTO/
# Attempting to visit each site via loop and extract "real" URL
# InTRO-step - try out on single link:
test1.link = "http://www.facebook.com/pages/Olivia-Chow/15535160141"
css.selector.test = "nth-child(29) a"
test.link.list = read_html(test1.link, encoding = "UTF-8") %>%
html_nodes(css = css.selector.test) %>%
html_attr(name = 'href')
### ... Not yet finished
### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### ####
#HELPERS
## CSS-link:
body > table > tbody > tr:nth-child(3) > td.line-content > span:nth-child(12)
body > table > tbody > tr:nth-child(3) > td.line-content > span:nth-child(12) > span:nth-child(5)
body > table > tbody > tr:nth-child(3) > td.line-content > span:nth-child(12) > a
body > table > tbody > tr:nth-child(3) > td.line-content > span:nth-child(29) > a
#candidates > li:nth-child(43) > div.social > a:nth-child(2)
#candidates > li:nth-child(43)
#candidates > li:nth-child(43) > h2.name
body > table > tbody > tr:nth-child(421) > td.line-content > span:nth-child(5) > a
body > table > tbody > tr:nth-child(421) > td.line-content > span:nth-child(5) > a
body > table > tbody > tr:nth-child(421) > td.line-content > span:nth-child(5) > span:nth-child(3)
body > table > tbody > tr:nth-child(442) > td.line-content > span:nth-child(5) > a
body > table > tbody > tr:nth-child(442) > td.line-content > span:nth-child(5) > a
body > table > tbody > tr:nth-child(442) > td.line-content > span:nth-child(5) > a
body > div.main-section.member > div:nth-child(1) > div > aside > div > a:nth-child(1)
#modalLearn
#modal > div > div > div > div.w-col.w-col-8 > div
#modal > div > div > div > div.w-col.w-col-8
#modal > div > div > div > div.w-col.w-col-8
#modal > #modal > #modal > #modal > #modal > #modal > #modalLearn
#modalLearn
//*[@id="modalLearn"]
#modal > div > div > div > div.w-col.w-col-8 > div
#modalName
### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### #### ### ####
#WORK IN PROGRESS
# Step 1 ) Create function for getting URl's
scrape_page_fb = function(politwitter.link){
fb.link = read_html(politwitter.link)
fb.link.id = fb.link %>%
html_nodes("link:nth-child(14)") %>%
html_nodes('href') %>%
html_text()
return(cbind(fb.link.id, fb.link))
}
# Step 2) Loop through each page and performing above defined function i = 826
real.links.fb = list()
for(i in politwitter.link){
print(paste("processing", i, sep = " "))
politwitter.df$r.link[[i]] = scrape_page_fb(i)
#wait
Sys.sleep(1)
cat("done!\n")
}
### REMOVE ENTIRE COLUMNS
politwitter.df$r.link = NULL
politwitter.df$q.link = NULL
head > link:nth-child(14)
########### Facebook static data on each candidate
## ...
library("Rfacebook")
library("readr")
library("stringr")
library("lubridate")
library("hexbin")
library("ggplot2")
token = "CAACEdEose0cBAKOZA7MtJMDLQ5CcWGBoA3lFWaydJp4PZBPZAYk5HPgRZCRPyhZBYpdwGSF0vVnbOZAlgtC43QSsBtHE4zgf82VhnIEdyRkjcLjbnPRjk5GIQqZCrhZAV64r8UGbZAZC58fHsC5CGSZBGfsWLujVttp2jEuLlKP5EUKU01o8I1YEBZBy9RuIOxqEYSCQduWjja8ukQZDZD"
page = getPage("oalghabra", token, n =10)
head(URLSub, 5)
|
a202791deb1e2ce301fba7c1d75b661c307a2e68
|
4ec101ac9e7fdc57510182243ace54747b5c404e
|
/scripts/mean_chip_raw_data_plot.R
|
26491e3335c9c1ff507117a8f43f055aae368123
|
[] |
no_license
|
satyanarayan-rao/tf_nucleosome_dynamics
|
e2b7ee560091b7a03fa16559096c1199d03362de
|
00bdaa23906460a3e5d95ac354830120c9dd108e
|
refs/heads/main
| 2023-04-07T17:29:40.644432
| 2021-04-12T14:16:20
| 2021-04-12T14:16:20
| 356,676,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
r
|
mean_chip_raw_data_plot.R
|
library(data.table)
library(dplyr)
library(R.utils)
library(ggplot2)
library(ggthemes)
library(stringr)
library(reshape2)
library(Cairo)
# args[1]: combined data
# args[2]: hist pdf file
options(error=traceback)
args = commandArgs(trailingOnly = T)
dt = read.table(args[1], sep = "\t", header = T, stringsAsFactors = F)
dt_sub = dt[, !grepl("chrom_loc", names(dt))]
dt_sub["to_melt"] = seq(dim(dt_sub)[1])
to_plot_df = melt(dt_sub, id.vars = "to_melt")
print (head(to_plot_df))
pdf(args[2])
plt = ggplot(to_plot_df, aes(x = value, fill = variable)) +
geom_histogram(alpha = 0.5, position = "identity", bins = 50) +
geom_rangeframe() + theme_few()
print (plt)
dev.off()
Cairo::CairoPNG(args[3], height = 4, width = 6, units = "in", res = 150)
print(plt)
dev.off()
|
840e0346a133b2df0e6c38d2d41cfdd3e515fc34
|
0f380dcb3509961dbbcf59f8b2dfb1d70f92e993
|
/R/exonsAsSummarizedExperiment.R
|
f370cffea0d3a55c2bb9c000b7774dd6cf2f9bac
|
[] |
no_license
|
ttriche/regulatoR
|
dced0aa8c0f60b191c38d106b333f3dda84317fa
|
d7e6b00ef1514423fdf8ca32a73eebc715642161
|
refs/heads/master
| 2016-09-10T00:04:25.637227
| 2013-02-26T23:14:05
| 2013-02-26T23:14:05
| 4,615,162
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,644
|
r
|
exonsAsSummarizedExperiment.R
|
# processing (here I am assuming TCGA patient IDs as names)
#
# for i in *exon*; do
# j=`echo $i | cut -f1,3 -d'-' | tr - _`
# cat $i | cut -f1,4 | gzip > $j.rpkm.gz
# done
#
## FIXME: tabulate raw counts as well: FIXED! 2/22
#
# setwd("/where/you/keep/your/processed/RPKM/files")
exonsAsSummarizedExperiment <- function(exons.gz=NULL, genome.aligned=NULL) {
if(is.null(exons.gz)) { # {{{ print usage tips
message("\n")
message('How to use exonsAsSummarizedExperiment:')
message("")
message('1) process TCGA exon RPKMs with, say, bash:')
message('$ for i in *exon*; do')
message("> j=`echo $i | cut -f1,3 -d'-' | tr - _`")
message("> cat $i | cut -f1,3,4 | gzip > $j.rpkm.gz")
message('> done')
message("")
message("2) read them into a list of files in R:")
message("R> exons.gz = list.files(pattern='.rpkm.gz')")
message("")
message("3) run the function using this list:")
message("R> exons.RPKM = exonsAsSummarizedExperiment(exons.gz)")
return(FALSE)
} # }}}
# Windows = crap
require(parallel)
# process the ranges specified in the file (and GAF)
exons = read.delim(exons.gz[[1]], stringsAsFactors=F)[,1]
exons = t(sapply(exons, function(x) strsplit(x, ':', fixed=TRUE)[[1]]))
exons = cbind(exons[,1],
t(sapply(exons[,2], function(x) strsplit(x,'-',fixed=T)[[1]])),
exons[,3])
rownames(exons) = 1:nrow(exons)
exons = as.data.frame(exons)
exons[,2:3] = apply(exons[,2:3], 2, as.numeric)
names(exons) = c('chr','start','end','strand')
# matrix of counts
counts <- do.call(cbind, mclapply(list.files(patt='rpkm.gz$'), function(x) {
read.delim(x, stringsAsFactors=FALSE)[,2]
}))
# matrix of RPKM
RPKM <- do.call(cbind, mclapply(list.files(patt='rpkm.gz$'), function(x) {
read.delim(x, stringsAsFactors=FALSE)[,3]
}))
# vector of sampleNames
IDs = unlist(lapply(list.files(patt='rpkm.gz$'), function(x) {
strsplit(x, '.', fixed=T)[[1]][1]
}))
colnames(RPKM) = colnames(counts) = IDs
EXONS.se = SummarizedExperiment(assays=SimpleList(RPKM=RPKM, counts=counts),
colData=DataFrame(sampleNames=IDs),
rowData=df2GR(exons))
colnames(EXONS.se) = EXONS.se$sampleNames # I don't know why I have to do this
rm(exons) # the data.frame
rm(counts) # the matrix
rm(RPKM) # the matrix
if(is.null(genome.aligned)) {
message('Be sure to set genome(rowData(your.exons)) and assign $gene_id!')
} else {
genome(rowData(EXONS.se)) <- genome.aligned
}
return(EXONS.se[ order(rowData(EXONS.se)), ])
} # }}}
|
60e7a88717bc8507c49fd0c1fe9fedfbc58d4f7c
|
339364322e830270c930521da6edefa78b8b3bd3
|
/R/plot_all_inv_v0.R
|
797cc85c44774095994145d525209d5aed9db839
|
[] |
no_license
|
adsteen/subspec
|
a2cbbf304467e0e7faace06f7f713a393c62435f
|
d4803015a9075ad1b1d810fc1bbb982593f9624c
|
refs/heads/master
| 2016-09-11T00:42:10.532585
| 2015-07-12T23:20:06
| 2015-07-12T23:20:06
| 38,911,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,217
|
r
|
plot_all_inv_v0.R
|
##' Makes fig 4
##'
##' @param d data frame
##' @export
plot_all_inv_v0 <- function(d, print_plot=TRUE, save_plot=FALSE, fn=NA, height=4, width=7, dpi=300, ...) {
# Make a plot of all inverse v0 vs inhibitor concentration
# browser()
p_all_inv_v0 <- ggplot(d, aes(x=conc.pNA, y=1/nM.per.hr)) +
geom_point(size=1) +
geom_errorbar(aes(ymin=1/(nM.per.hr + se.nM.per.hr), ymax=1/(nM.per.hr - se.nM.per.hr))) +
geom_smooth(method="lm", colour="black") +
#scale_colour_manual(values=c("#56A0D3", "#F77F00")) +
#scale_fill_manual(values=c("#56A0D3", "#F77F00")) +
scale_x_continuous(breaks=c(0, 100, 200), labels=c(0, 100, 200)) +
expand_limits(y=0) +
xlab(expression(paste("[I], ", mu, "M"))) +
ylab(expression(paste(1 / v[0], ", nM ", hr^{-1}))) +
facet_grid(location + AMC.substrate ~ pNA.subs, scales="free_y") +
theme(legend.position="top",
axis.text.x = element_text(angle=-45, hjust=0))
if (print_plot) {
print(p_all_inv_v0)
}
#browser()
if (save_plot) {
if (is.na(fn)) {
fn <- paste(path, "all_inv_v0.png", sep="")
}
ggsave(fn, height = height, width=width, units="in", dpi=myDPI, type="cairo")
}
p_all_inv_v0
}
|
fe007152c3d85317740f7b2c3aa224b750a42eee
|
8f5dd342a8630748449eb50e3f9462d448663350
|
/R/convertToTime.r
|
b7764160544fcddb3ec3db53288c3e5b62ada84d
|
[] |
no_license
|
gleday/ShrinkNet
|
87e484f997185d331a1e486b3e6921adfdc314c1
|
cf1513cd86cfb4db4ee0973304841a2cf92169cc
|
refs/heads/master
| 2020-05-21T03:26:08.766503
| 2018-04-06T19:36:57
| 2018-04-06T19:36:57
| 43,956,548
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
convertToTime.r
|
# Internal function
# Convert seconds into a "HH:MM:SS" format
# Author: Gwenael G.R. Leday
.convertToTime <- function(x){
h <- as.character(x%/%3600)
m <- as.character((x%%3600)%/%60)
s <- as.character(round((x%%3600)%%60))
if(nchar(m)==1) m <- paste(0,m,sep="")
if(nchar(s)==1) s <- paste(0,s,sep="")
return(paste(h,m,s,sep=":"))
}
|
9b6ec18a0407660513844b49527edd3319bfbbc3
|
5c033f7e6c842882d11ccadd2e110e19d7cb42f9
|
/predictive-model/text2vec_impl/create-dtm.R
|
b56578c798f8da13d63585384afb744b356b1e43
|
[] |
no_license
|
natereed/coursera-data-science-capstone-old
|
b6f70d0738f3a6be0af04518a28490dfcf8ddc1a
|
7b0466008a7b1fd2d5c536605e8959e081f0d4ee
|
refs/heads/master
| 2021-01-17T18:09:30.029996
| 2016-08-23T11:54:52
| 2016-08-23T11:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
create-dtm.R
|
# Install the first time: devtools::install_github('dselivanov/text2vec')
library(text2vec)
dir <- file.path("~", "Coursera", "Capstone", "final", "en_US")
blogs_data <- readLines(file.path(dir, "en_US.blogs.txt"))
length(blogs_data);
# [1] 899288
it <- itoken(blogs_data,
preprocess_function = tolower,
tokenizer = word_tokenizer);
vocab <- create_vocabulary(it, ngram=c(1L, 3L));
vectorizer <- vocab_vectorizer(vocab)
# Reinitialize iterator
it <- itoken(blogs_data,
preprocess_function = tolower,
tokenizer = word_tokenizer);
dtm <- create_dtm(it, vectorizer, type='dgTMatrix')
# Term frequencies of n-grams that match "real_things"
# Corresponds to all the documents in the corpus
dtm[,c("real_things")]
# Which term frequencies are greater than zero?
which(dtm[,c("real_things")] > 0)
# Get the term frequencies of all n-grams for documents that contain the n-gram "real_things"
dtm[which(dtm[,c("real_things")] > 0),]
|
d25db5e6796d0d02f9d847d18da9ee977652daa8
|
8b61baaf434ac01887c7de451078d4d618db77e2
|
/R/readLine.R
|
3fef13a65c22fd7a549fbdea8fc3ea9b6f7a9296
|
[] |
no_license
|
drmjc/mjcbase
|
d5c6100b6f2586f179ad3fc0acb07e2f26f5f517
|
96f707d07c0a473f97fd70ff1ff8053f34fa6488
|
refs/heads/master
| 2020-05-29T19:36:53.961692
| 2017-01-17T10:54:00
| 2017-01-17T10:54:00
| 12,447,080
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
readLine.R
|
#' readLine, and split on character
#'
#' read a single line from a file, or connection, and then
#' split the result on a separator. eg tab, comma, or spaces
#'
#' @param file a file, or open connection
#' @param split the character to split on. eg tab, comma, or default=spaces
#' @param ok unused
#'
#' @return a character vector of length >= 0
#'
#' @author Mark Cowley
#' @export
readLine <- function(file, split=" +", ok=FALSE) {
tmp <- readLines(file, n=1, ok=TRUE)
if( length(tmp) == 0 )
return(tmp)
else
return( strsplit( trim(tmp), split )[[1]] )
}
#' skipLine
#'
#' skip a line from an open file connection
#'
#' @param file an open connection
#'
#' @return nothing
#'
#' @author Mark Cowley
#' @export
skipLine <- function(file) {
tmp <- readLines(file, n=1, ok=TRUE)
}
|
19665fe9ef85fff97b1ef5c33f5b249222c40cc9
|
af34ab9351b7e004b501dff4c5bb78f523e0d345
|
/Script/9_EGSL_compile.r
|
2859db78e142b0a691c56d5cfe5f4b6fecc97fe5
|
[
"MIT"
] |
permissive
|
david-beauchesne/Interaction_catalog
|
6aca5d257fcaf426bc0363a97bfe134b471bc3b4
|
4e6ff0ba5571ae6ed5c5673acfd9e69b3fd53612
|
refs/heads/master
| 2021-01-20T19:05:11.705068
| 2018-06-12T19:53:32
| 2018-06-12T19:53:32
| 65,501,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,960
|
r
|
9_EGSL_compile.r
|
# Compiling available data for EGSL species
load("RData/Biotic_inter.RData")
EGSL_inter <- matrix(nrow = nrow(Biotic_inter[[4]]), ncol = 6, dimnames = list(Biotic_inter[[4]][, 'taxon'], c('species','genus','family','order','class','phylum')))
pb <- txtProgressBar(min = 0,max = nrow(Biotic_inter[[4]]), style = 3)
for(i in 1:nrow(Biotic_inter[[4]])) {
EGSL_inter[i, 'species'] <- length(which(Biotic_inter[[3]][, 'cons_species'] == Biotic_inter[[4]][i, 'species'] | Biotic_inter[[3]][, 'res_species'] == Biotic_inter[[4]][i, 'species']))
EGSL_inter[i, 'genus'] <- length(which(Biotic_inter[[3]][, 'cons_genus'] == Biotic_inter[[4]][i, 'genus'] | Biotic_inter[[3]][, 'res_genus'] == Biotic_inter[[4]][i, 'genus']))
EGSL_inter[i, 'family'] <- length(which(Biotic_inter[[3]][, 'cons_family'] == Biotic_inter[[4]][i, 'family'] | Biotic_inter[[3]][, 'res_family'] == Biotic_inter[[4]][i, 'family']))
EGSL_inter[i, 'order'] <- length(which(Biotic_inter[[3]][, 'cons_order'] == Biotic_inter[[4]][i, 'order'] | Biotic_inter[[3]][, 'res_order'] == Biotic_inter[[4]][i, 'order']))
EGSL_inter[i, 'class'] <- length(which(Biotic_inter[[3]][, 'cons_class'] == Biotic_inter[[4]][i, 'class'] | Biotic_inter[[3]][, 'res_class'] == Biotic_inter[[4]][i, 'class']))
EGSL_inter[i, 'phylum'] <- length(which(Biotic_inter[[3]][, 'cons_phylum'] == Biotic_inter[[4]][i, 'phylum'] | Biotic_inter[[3]][, 'res_phylum'] == Biotic_inter[[4]][i, 'phylum']))
setTxtProgressBar(pb, i)
} #i
close(pb)
EGSL_species_inter <- numeric(nrow(Biotic_inter[[4]]))
EGSL_genus <- unique(Biotic_inter[[4]][, 'genus'])
EGSL_family <- unique(Biotic_inter[[4]][, 'family'])
EGSL_genus_inter <- numeric(length(EGSL_genus))
EGSL_family_inter <- numeric(length(EGSL_family))
# Number of interactions for EGSL species
for(i in 1:nrow(Biotic_inter[[4]])) {
EGSL_species_inter[i] <- length(which(Biotic_inter[[3]][, 'cons_species'] == Biotic_inter[[4]][i, 'species'] | Biotic_inter[[3]][, 'res_species'] == Biotic_inter[[4]][i, 'species']))
}
# Number of interactions for EGSL genus
for(i in 1:length(EGSL_genus)) {
EGSL_genus_inter[i] <- length(which(Biotic_inter[[3]][, 'cons_genus'] == EGSL_genus[i] | Biotic_inter[[3]][, 'res_genus'] == EGSL_genus[i]))
}
# Number of interactions for EGSL families
for(i in 1:length(EGSL_family)) {
EGSL_family_inter[i] <- length(which(Biotic_inter[[3]][, 'cons_family'] == EGSL_family[i] | Biotic_inter[[3]][, 'res_family'] == EGSL_family[i]))
}
Species_inter <- (sum(EGSL_species_inter > 0) / length(EGSL_species_inter)) * 100
Genus_inter <- (sum(EGSL_genus_inter > 0) / length(EGSL_genus_inter)) * 100
Family_inter <-(sum(EGSL_family_inter > 0) / length(EGSL_family_inter)) * 100
# Interactions by rank
Taxa_inter <- unique(c(Biotic_inter[[3]][, 'consumer'],Biotic_inter[[3]][, 'resource']))
Taxa_inter_rank <- character(length(Taxa_inter))
for(i in 1:length(Taxa_inter)){
Taxa_inter_rank[i] <- Biotic_inter[[2]][Taxa_inter[i], 'rank']
}
|
b725bea968fd3d40270eff51c2d93d67386be04d
|
5054535a86ac34f6ee92fab3a0c7178c6657303b
|
/src/scripts/r/transduce.r
|
43f77352cb045930d2ded4c83977a8654473355a
|
[
"Apache-2.0"
] |
permissive
|
palisades-lakes/collection-experiments
|
b46907a02b436e6cbf9ffb9f796ff99d97ce6bae
|
bd96ca2c58afc5f18d301a8259a26540748c75df
|
refs/heads/master
| 2023-08-25T11:58:24.927470
| 2023-08-01T19:11:13
| 2023-08-01T19:11:13
| 113,613,449
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,077
|
r
|
transduce.r
|
# filter-map-reduce experiments
# palisades dot lakes at gmail dot com
# version 2021-02-01
#-----------------------------------------------------------------
if (file.exists('e:/porta/projects/collection-experiments')) {
setwd('e:/porta/projects/collection-experiments')
} else {
setwd('c:/porta/projects/collection-experiments')
}
source('src/scripts/r/functions.R')
#-----------------------------------------------------------------
#parentFolder <- 'data-jdk9.0.1-clj1.9.0/scripts/'
parentFolder <- 'data-jdk15.0.1-clj1.10.1/scripts/'
#hardware <- 'LENOVO.20HRCTO1WW' # X1
hardware <- 'LENOVO.20ERCTO1WW' # P70
#theday = '2017121[8]-[0-9]{4}'
theday = '20210131-[0-9]{4}'
benchmark <- 'transduce'
#-----------------------------------------------------------------
data <- read.data(
parentFolder=parentFolder,
benchmark,hardware,theday)
#-----------------------------------------------------------------
plot.folder <- file.path('docs',hardware,benchmark)
dir.create(
plot.folder,
showWarnings=FALSE,
recursive=TRUE,
mode='0777')
#-----------------------------------------------------------------
inline <- data[(data$algorithm=='inline') | (data$algorithm=='transducer_rmf'),]
quantile.log.log.plot(
data=inline,
fname='inline',
ymin='lower.q',
y='median',
ymax='upper.q',
plot.folder=plot.folder,
group='algorithm',
colors=algorithm.colors,
facet='containers',
ylabel='msec')
quantile.log.lin.plot(
data=inline,
fname='inline',
ymin='lower.q.per.element',
y='median.per.element',
ymax='upper.q.per.element',
plot.folder=plot.folder,
group='algorithm',
colors=algorithm.colors,
facet='containers',
ylabel='nanosec-per-element')
#-----------------------------------------------------------------
quantile.log.log.plot(
data=data,
fname='all',
ymin='lower.q',
y='median',
ymax='upper.q',
plot.folder=plot.folder,
group='algorithm',
colors=algorithm.colors,
facet='containers',
ylabel='msec')
quantile.log.log.plot(
data=data,
fname='all',
ymin='lower.q',
y='median',
ymax='upper.q',
plot.folder=plot.folder,
group='containers',
colors=container.colors,
facet='algorithm',
ylabel='msec')
quantile.log.lin.plot(
data=data,
fname='all',
ymin='lower.q.per.element',
y='median.per.element',
ymax='upper.q.per.element',
plot.folder=plot.folder,
group='algorithm',
colors=algorithm.colors,
facet='containers',
ylabel='nanosec-per-element')
quantile.log.lin.plot(
data=data,
fname='all',
ymin='lower.q.per.element',
y='median.per.element',
ymax='upper.q.per.element',
plot.folder=plot.folder,
group='containers',
colors=container.colors,
facet='algorithm',
ylabel='nanosec-per-element')
#-----------------------------------------------------------------
#cols <- c('benchmark','algorithm','nmethods',
# 'lower.q','median', 'upper.q','millisec',
# 'overhead.lower.q','overhead.median', 'overhead.upper.q',
# 'overhead.millisec',
# 'nanosec','overhead.nanosec')
#-----------------------------------------------------------------
|
e0c53a09d380dde8d80821c23bb8ba5d649b77db
|
251df421cec78612cbf56db7a0cbf2078b205dcd
|
/debug_na.R
|
b33c0d2df708ff8585577273c0247ea389e7df2b
|
[
"MIT"
] |
permissive
|
deponent-verb/popgen.analysis.pipeline
|
7987d6e12f3b57ea70ce62dc0d3987e6d02eeb05
|
ae482e915c7b2baca87242717cb6a0f19ca08792
|
refs/heads/master
| 2021-08-19T04:57:27.080926
| 2021-07-03T04:04:17
| 2021-07-03T04:04:17
| 213,124,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
debug_na.R
|
#debugging NA values script
pacman::p_load(tidyverse)
df<-read_csv("./data/toy_df.csv")
#extract H values
temp<-df[,1:14]
temp2<-temp %>% filter_all(any_vars(is.na(.)))
#extract D values
temp<-df[,15:24]
temp1<-temp %>% filter_all(any_vars(is.na(.)))
#D is not outputting NAs.
#new_data <- data %>% filter_all(any_vars(is.na(.)))
|
7a385463f3c81e6a878ea7b1b4970af63d08bc9b
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Managerial_Statistics_by_Gerald_Keller/CH8/EX8.2/Ex8_2.R
|
edd5584605338b485c95028f2463c8cd8fa153b7
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 72
|
r
|
Ex8_2.R
|
###page_no_261###
rm(list=ls())
m=1000; s=100; n=1100
pnorm(1100,m,s)
|
ed0314db9bc839a808c4396488e0e77a1d7f10ac
|
7a5927014872451f3a79438a11da07d8ba22c982
|
/k-Means Clustering.R
|
d94eb3dde98e3c716df311400a21cc3dccda9a34
|
[] |
no_license
|
ank234/k-Means-Clustering-on-Dungaree-Data-Set
|
f4df9944c10097d8e2858e5ce6bf868c2e4be21a
|
046d6cf6057f1bd1f1711eafe3af1fb2f896e6e9
|
refs/heads/master
| 2020-04-22T14:22:53.295159
| 2018-09-03T19:13:54
| 2018-09-03T19:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,623
|
r
|
k-Means Clustering.R
|
# read csv file
dungree <- read.csv("E:/GitHub Projects/K-Means Clustering/dungaree.csv")
#View(dungree)
# normalize data
dungree.norm<- sapply(dungree[,2:6],scale)
#View(dungree.norm)
colnames(dungree.norm) <- c('z_fashion','z_leisure','z_stretch','z_original','z_salestot')
df <- cbind(dungree,dungree.norm)
#View(df)
head(df)
#check for missing values
sum(is.na(df$FASHION))
sum(is.na(df$STRETCH))
sum(is.na(df$LEISURE))
sum(is.na(df$ORIGINAL))
sum(is.na(df$SALESTOT))
sum(is.na(df))
#boxplot of the dungree data set
boxplot(df[,7:10], xlab = "Type of Dungaree", ylab = "z-score of the Number of jeans sold ", main = "Boxplot of Types of Dungarees")
# check for the outliers
fashion.outlier <- boxplot.stats(df$z_fashion)$out
leisure.outlier <- boxplot.stats(df$z_leisure)$out
stretch.outlier <- boxplot.stats(df$z_stretch)$out
original.outlier <- boxplot.stats(df$z_original)$out
# create unique vectors of te outlier value
fashion.outlier.un <- unique(fashion.outlier)
leisure.outlier.un<- unique(leisure.outlier)
stretch.outlier.un <- unique(stretch.outlier)
original.outlier.un <- unique(original.outlier)
# create function to remove the outliers vairable
outlier_value <- function(x, factor){
v <- vector("numeric", length = 0)
for (i in 1:length(x)){
for (j in 1:length(factor)){
if (x[i] == factor[j]){
v<- c(v,i)}
}
}
return(v)
}
# find the rows containing outlers
ve1 <- outlier_value(df$z_fashion,fashion.outlier.un)
ve2 <- outlier_value(df$z_leisure,leisure.outlier.un)
ve3 <- outlier_value(df$z_stretch,stretch.outlier.un)
ve4 <- outlier_value(df$z_original,original.outlier.un)
# remove the rows with outlier values
df <- df[-ve1,]
df <- df[-ve2,]
df <- df[-ve3,]
df <- df[-ve4,]
boxplot(df[,7:10], xlab = "Type of Dungaree", ylab = "z-score of the Number of jeans sold ", main = "Boxplot of Types of Dungarees")
View(df)
set.seed(42)
row.names(df) <- df[,1]
View(df)
# # removing the dependent column
# df <- df[, c(-1,-6)]
# # normalize data
# dungree.norm<- sapply(dungree[,2:6],scale)
# View(dungree.norm)
# colnames(dungree.norm) <- c('z_fashion','z_leisure','z_stretch','z_original','z_salestot')
# df <- cbind(dungree,dungree.norm)
# View(df)
library(NbClust)
devAskNewPage(ask=TRUE)
nc <- NbClust(df[,7:10], min.nc=2, max.nc=10, method="kmeans")
table(nc$Best.n[1,])
barplot(table(nc$Best.n[1,]), xlab="Number of Clusters", ylab="Number of criteria", main="Number of clusters chosen by criteria")
# # Perform k-means cluster analysis
# fit.km <- kmeans(df[,7:10], centers = 10, nstart=25)
# fit.km
# fit.km$cluster
# fit.km$centers
# fit.km$size
#function to calculate the withing sum of sqaures for a range of number of clusters
wssplot <- function(data, nc=10, seed=1234) {
wss <- (nrow(df)-1)*sum(apply(df[,7:10], 2, var))
for (i in 2:10) {
set.seed(1234)
wss[i] <- sum(kmeans(data, centers=i)$withinss)
}
plot(1:10, wss, type="b",main = "Optimal Number of Clusters" , xlab="Number of clusters", ylab="within groups sum of squares")
}
wssplot(df[,7:10])
abline(v=6, col="red", lty=2, lwd=3)
#k-means for 6 clusters
fit.km <- kmeans(df[,7:10], 6, nstart=25)
fit.km
#k-means for 5 clusters
fit.km <- kmeans(df[,7:10], 5, nstart=25)
fit.km
# tablularize the results of k-means clustering
table(fit.km$cluster)
# fit.km <- kmeans(df.norm, 10, nstart=25)
# fit.km
library(factoextra)
#with(df[,7:10], pairs(df[,7:10], col=c(1:3)[fit.km$cluster]))
fviz_cluster(fit.km, df[,7:10])
|
6023942daa25de81983ed6ae6a3379807adeba2e
|
597fb95d3edf6c8904874d065db7f2623db23848
|
/src/deliveries.R
|
47a105e90708a9f88e8d730dfd86d26b1d630745
|
[] |
no_license
|
danyx23/covid_vaccinations
|
ac66375932e2a9d080b0509c240592b8701d0a48
|
231d42eae99e132f00fc1456236b0f33be654e87
|
refs/heads/main
| 2023-02-18T00:02:24.305060
| 2021-01-08T12:19:43
| 2021-01-08T12:19:43
| 327,906,705
| 0
| 0
| null | 2021-01-08T13:11:49
| 2021-01-08T13:11:48
| null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
deliveries.R
|
library(dplyr)
library(readr)
# break deliveries down by state in proportion to population
# from https://twitter.com/BMG_Bund/status/1345012835252887552
deliveries <- tibble(
doses = c(
rep(1.3e6/3, 3),
rep(2.8e6/4, 4)
),
delivery_date = lubridate::dmy(c(
paste0(c('26.12.', '28.12.', '30.12.'), '2020'),
paste0(c('8.1.', '18.1.', '25.1.', '1.2.'), '2021')
)),
vaccine_name = 'Pfizer/BioNTech'
)
bundeslaender <- readr::read_delim(
'https://www.datenportal.bmbf.de/portal/Tabelle-1.10.2.csv',
delim = ';',
skip = 7,
col_names = FALSE,
locale=readr::locale(encoding = "latin1", decimal_mark=',', grouping_mark = '.')
)
bundeslaender <- bundeslaender[1:16,c(1,15)]
bundeslaender %>%
purrr::set_names('bundesland', 'population') %>%
mutate(population = population * 1000) %>%
mutate(population_share = population / sum(population)) -> bundeslaender
deliveries %>%
tidyr::crossing(bundeslaender) %>%
mutate(doses = doses * population_share) %>%
group_by(bundesland) %>%
mutate(cumulative_doses = cumsum(doses)) %>%
ungroup %>%
arrange(bundesland, delivery_date) -> deliveries_by_state
dir.create(here::here('data/processed'), showWarnings = FALSE)
deliveries_by_state %>%
arrow::write_parquet(here::here('data/processed/deliveries.parquet'))
|
1019215e5c914525ff90dba387b5fc2f0dd9b2f9
|
4c78bb06198a510622640f4052d1abf770a28fbb
|
/server.R
|
5ee7262c82f2daf3764bee3cddde88e11da1a80f
|
[] |
no_license
|
qg0/options
|
c07ba8aa057b437ecaf1a2f836fba996000cc141
|
dcc901c9703ffa462e346bc8711792b98135aeac
|
refs/heads/master
| 2021-05-29T06:31:00.410873
| 2015-09-27T16:00:59
| 2015-09-27T16:00:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,558
|
r
|
server.R
|
library(shiny)
# Black-Scholes Function
BS <-
function(S, K, T, r, sig, type="C"){
d1 <- (log(S/K) + (r + sig^2/2)*T) / (sig*sqrt(T))
d2 <- d1 - sig*sqrt(T)
if(type=="C"){
value <- S*pnorm(d1) - K*exp(-r*T)*pnorm(d2)
}
if(type=="P"){
value <- K*exp(-r*T)*pnorm(-d2) - S*pnorm(-d1)
}
return(value)
}
## Function to find BS Implied Vol using Bisection Method
#S <- 1082.74 stock price
#T <- 28/365 time
#r <- 0.01 risk free
# K strike price
# type "C" for CALL vs P put?
#implied.vol(S, dat$K[i], T, r, dat$C.Ask[i], "C")
### S
implied.vol <-
function(S, K, T, r, market, type){
sig <- 0.20
sig.up <- 1
sig.down <- 0.001
count <- 0
err <- BS(S, K, T, r, sig, type) - market
## repeat until error is sufficiently small or counter hits 1000
while(abs(err) > 0.00001 && count<1000){
if(err < 0){
sig.down <- sig
sig <- (sig.up + sig)/2
}else{
sig.up <- sig
sig <- (sig.down + sig)/2
}
err <- BS(S, K, T, r, sig, type) - market
count <- count + 1
}
## return NA if counter hit 1000
if(count==1000){
return(NA)
}else{
return(sig)
}
}
ProbabilityStockPriceBelow <- function (CurrentPrice, TargetPrice, VolatilityPerPeriod, TimePeriod)
{
# return StandardNormalPx(Math.log(TargetPrice / CurrentPrice) / (VolatilityPerPeriod * Math.sqrt(TimePeriod)));
## in r: 1-Qx = px ??
#return (pnorm(log(TargetPrice / CurrentPrice) / (VolatilityPerPeriod * sqrt(TimePeriod))))
return (pnorm(log(TargetPrice / CurrentPrice) / (VolatilityPerPeriod * sqrt(TimePeriod))))
}
ProbabilityStockPriceAbove <- function (CurrentPrice, TargetPrice, VolatilityPerPeriod, TimePeriod)
{
# return StandardNormalQx(Math.log(TargetPrice / CurrentPrice) / (VolatilityPerPeriod * Math.sqrt(TimePeriod))); it is Qx instead of Px
return( 1-pnorm(log(TargetPrice / CurrentPrice) / (VolatilityPerPeriod * sqrt(TimePeriod) ) ) )
}
### this is new shit
#####################################################################################
BlackScholesDen1 <- function(Current, Strike, TBillRate, Volatility, FractionalYear)
{
return( (log(Current / Strike) + ((TBillRate + ((Volatility * Volatility) / 2)) * FractionalYear)) / (Volatility * sqrt(FractionalYear)) )
}
BlackScholesDen2 <- function(Current, Strike, TBillRate, Volatility, FractionalYear)
{
return( (log(Current / Strike) + ((TBillRate - ((Volatility * Volatility) / 2)) * FractionalYear)) / (Volatility * sqrt(FractionalYear)) )
}
BlackScholesCallHedgeRatio <-function(Current, Strike, TBillRate, Volatility, FractionalYear)
{
return (pnorm(BlackScholesDen1(Current, Strike, TBillRate, Volatility, FractionalYear)))
}
BlackScholesPutHedgeRatio <- function(Current, Strike, TBillRate, Volatility, FractionalYear)
{
return( BlackScholesCallHedgeRatio(Current, Strike, TBillRate, Volatility, FractionalYear) - 1)
}
#####################
BlackScholesCallValue <- function (Current, Strike, TBillRate, Volatility, FractionalYear)
{
a <- (Current * BlackScholesCallHedgeRatio(Current, Strike, TBillRate, Volatility, FractionalYear))
b <- (pnorm(BlackScholesDen2(Current, Strike, TBillRate, Volatility, FractionalYear)))
d <- (Strike * exp(TBillRate * (-FractionalYear)))
return( a - (b * d) )
}
BlackScholesPutValue <- function(Current, Strike, TBillRate, Volatility, FractionalYear)
{
a <- (Current * (pnorm(-BlackScholesDen1(Current, Strike, TBillRate, Volatility, FractionalYear))))
b <- (pnorm(-BlackScholesDen2(Current, Strike, TBillRate, Volatility, FractionalYear)))
d <- (Strike * exp(-TBillRate * FractionalYear))
return ((b * d) - a)
}
##############################################################################
optionPrice <-function(K, type, priceList, currentPrice){
vola <- 27.9 /100
tBill <- 2.16 /100
FractionalYear <- 71 / 365.25
sum <- 0
for(j in 1:length(priceList)){
if(type == "Call"){
sum <- sum + BlackScholesCallValue(priceList[j], K, tBill, vola, FractionalYear) - currentPrice
}
else{
sum <- sum + BlackScholesPutValue(priceList[j], K, tBill, vola, FractionalYear) - currentPrice
}
}
return(sum/length(priceList))
}
#####################################################################################
#path <- "C:/Users/fteschner/Desktop/"
#prices <- read.csv(paste(path, "OptionPrices.csv", sep=""), sep="|")
prices<- read.csv("OptionPrices.csv", sep="|")
prices <- prices[which(prices$ask > 0.001),]
prices$mid <- (prices$ask +prices$bid )/2
prices$type2 <- ifelse(prices$type =="Call", "C", "P")
prices$avgPrice <- NA
### clean dataset!
prices$impliedVola <- NA
T <- 35/365
r <- 0.01
## implied vola seems to work!
for (i in 1:nrow(prices)){
prices[i,]$impliedVola <-implied.vol(prices[i,]$stockprice, prices[i,]$strike , T, r, prices[i, ]$mid , prices[i, ]$type2)
}
prices$impliedProb <- NA
## lets calc probabilities!
for (i in 1:nrow(prices)){
prices[i,]$impliedProb <-ProbabilityStockPriceBelow(prices[i,]$stockprice, prices[i,]$strike ,prices[i, ]$impliedVola , T)
}
giveMeBeta <- function(min, ml, max){
return(1+4*(max-ml)/(max-min))
}
giveMeAlpha <- function(min, ml, max){
return ( 1+4*(ml-min)/(max-min))
}
calculateFuturePrices <- function(current ) {
#current <- input$decimal
# Strike <- 400
vola <- 27.9 /100
tBill <- 2.16 /100
FractionalYear <- 71 / 365.25
prices$fprice <- NA
prices$differ <- NA
for (i in 1:nrow(prices)){
if(prices[i,]$type2=="C"){
prices[i,]$fprice <<- BlackScholesCallValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
else{
prices[i,]$fprice <<- BlackScholesPutValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
#prices[i,]$impliedVola <-implied.vol(prices[i,]$stockprice, prices[i,]$strike , T, r, prices[i, ]$mid , prices[i, ]$type2)
prices[i,]$differ <<- prices[i,]$fprice - prices[i,]$mid
}
}
# Define server logic for slider examples
shinyServer(function(input, output) {
# Reactive expression to compose a data frame containing all of the values
sliderValues <- reactive({
# Compose data frame
# data.frame(
# Name = c("Integer",
# "Decimal",
# "Range",
# "Custom Format",
# "Animation"),
# Value = as.character(c(input$integer,
# input$decimal,
# paste(input$range, collapse=' '),
# input$format,
# input$animation)),
# stringsAsFactors=FALSE)
})
# Show the values using an HTML table
# output$values <- renderTable({
# sliderValues()
# })
# Show the first "n" observations
output$simple <- renderText({
#HTML("Current Stock Price:", prices[2,]$stockprice,"<br> Date Scraped:", prices[2,]$date_scraped, " <br> Expiration Date 2013-08-13", "<br> interest r=0.01" )
HTML("<br> <h3> Basic Info:</h3> Current Stock Price: 452 <br> Date Scraped: 2013-06-04 17:07:37.312 <br> Expiration Date 2013-08-13 <br> Interest r=0.01" )
#cat(as.character(el))
})
output$regression2 <- renderTable({
#summary(out)
if(input$n_breaks == "Alle"){
summary(lm(Punkte~Tore+MW+spiele+factor(Position), data=out))
}
else{
summary(lm(Punkte~Tore+MW+spiele, data=out[which(out$Position == input$n_breaks),]))
}
})
output$prices <- renderPlot({
if(input$type == "All"){
plot(prices$mid~prices$strike, ylab="Option Price", xlab="Strike Price")
}
if(input$type == "Calls"){
plot(prices[which(prices$type2=="C"),]$mid~prices[which(prices$type2=="C"),]$strike, ylab="Option Price", xlab="Strike Price")
}
if(input$type == "Puts"){
plot(prices[which(prices$type2=="P"),]$mid~prices[which(prices$type2=="P"),]$strike, ylab="Option Price", xlab="Strike Price")
}
})
output$vola <- renderPlot({
if(input$type == "All"){
plot(prices$impliedVola~prices$strike, ylab="Implied Volatility", xlab="Strike Price")
}
if(input$type == "Calls"){
plot(prices[which(prices$type2=="C"),]$impliedVola~prices[which(prices$type2=="C"),]$strike, ylab="Implied Volatility", xlab="Strike Price")
}
if(input$type == "Puts"){
plot(prices[which(prices$type2=="P"),]$impliedVola~prices[which(prices$type2=="P"),]$strike, ylab="Implied Volatility", xlab="Strike Price")
}
})
output$prob <- renderPlot({
if(input$type == "All"){
plot(prices$impliedProb~prices$strike, ylab="Implied Probability", xlab="Strike Price")
}
if(input$type == "Calls"){
plot(prices[which(prices$type2=="C"),]$impliedProb~prices[which(prices$type2=="C"),]$strike, ylab="Implied Probability", xlab="Strike Price")
}
if(input$type == "Puts"){
plot(prices[which(prices$type2=="P"),]$impliedProb~prices[which(prices$type2=="P"),]$strike, ylab="Implied Probability", xlab="Strike Price")
}
})
output$changedPrices <- renderPlot({
current <- input$decimal
# Strike <- 400
vola <- 27.9 /100
tBill <- 2.16 /100
FractionalYear <- 71 / 365.25
prices$fprice <- NA
for (i in 1:nrow(prices)){
if(prices[i,]$type2=="C"){
prices[i,]$fprice <- BlackScholesCallValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
else{
prices[i,]$fprice <- BlackScholesPutValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
#prices[i,]$impliedVola <-implied.vol(prices[i,]$stockprice, prices[i,]$strike , T, r, prices[i, ]$mid , prices[i, ]$type2)
}
if(input$type == "All"){
plot(prices$fprice~prices$strike)
}
if(input$type == "Calls"){
plot( (prices[which(prices$type2=="C"),]$fprice - prices[which(prices$type2=="C"),]$mid) / (prices[which(prices$type2=="C"),]$mid) ~prices[which(prices$type2=="C" ),]$strike)
}
if(input$type == "Puts"){
plot(prices[which(prices$type2=="P"),]$fprice~prices[which(prices$type2=="P"),]$strike)
}
})
output$differences <- renderPlot({
current <- input$decimal2
# Strike <- 400
vola <- 27.9 /100
tBill <- 2.16 /100
FractionalYear <- 71 / 365.25
prices$fprice <- NA
for (i in 1:nrow(prices)){
if(prices[i,]$type2=="C"){
prices[i,]$fprice <- BlackScholesCallValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
else{
prices[i,]$fprice <- BlackScholesPutValue(current, prices[i,]$strike, tBill, vola, FractionalYear)
}
#prices[i,]$impliedVola <-implied.vol(prices[i,]$stockprice, prices[i,]$strike , T, r, prices[i, ]$mid , prices[i, ]$type2)
}
if(input$dtype == "New Option Prices") {
if(input$type == "All"){
plot(prices$fprice~prices$strike, ylab="Option Price", xlab="Strike")
}
if(input$type == "Calls"){
plot( (prices[which(prices$type2=="C"),]$fprice - prices[which(prices$type2=="C"),]$mid) / (prices[which(prices$type2=="C"),]$mid) ~prices[which(prices$type2=="C" ),]$strike, ylab="Option Price", xlab="Strike")
}
if(input$type == "Puts"){
plot(prices[which(prices$type2=="P"),]$fprice~prices[which(prices$type2=="P"),]$strike, ylab="Option Price", xlab="Strike")
}
}
if(input$dtype == "Absolute Profit"){
if(input$type == "All"){
plot( prices$fprice - prices$mid ~ prices$strike, ylab="Absolute Profit", xlab="Strike")
}
if(input$type == "Calls"){
plot( (prices[which(prices$type2=="C"),]$fprice - prices[which(prices$type2=="C"),]$mid) ~prices[which(prices$type2=="C" ),]$strike, ylab="Absolute Profit", xlab="Strike")
}
if(input$type == "Puts"){
plot((prices[which(prices$type2=="P"),]$fprice - prices[which(prices$type2=="P"),]$mid) ~prices[which(prices$type2=="P"),]$strike, ylab="Absolute Profit", xlab="Strike")
}
}
if(input$dtype == "Relative Profit"){
if(input$type == "All"){
plot( (prices$fprice - prices$mid)/prices$mid ~ prices$strike, ylab="Relative Profit", xlab="Strike")
}
if(input$type == "Calls"){
plot( (prices[which(prices$type2=="C"),]$fprice - prices[which(prices$type2=="C"),]$mid) / (prices[which(prices$type2=="C"),]$mid) ~prices[which(prices$type2=="C" ),]$strike, ylab="Relative Profit", xlab="Strike")
}
if(input$type == "Puts"){
plot( (prices[which(prices$type2=="P"),]$fprice - prices[which(prices$type2=="P"),]$mid) / (prices[which(prices$type2=="P"),]$mid) ~prices[which(prices$type2=="P" ),]$strike, ylab="Relative Profit", xlab="Strike")
}
}
})
## given a certain probability what is the rel / abs. profit?
## give me a option(k) maximizing the abs/rel profit giving an estimate
################the lovely pert!
output$PERT <- renderPlot({
alpha <- giveMeAlpha(input$min, input$ml, input$max)
beta <- giveMeBeta(input$min, input$ml, input$max)
x <- rbeta(n=2000, alpha, beta)
for(i in 1: length(x)){
x[i] <- x[i] * (input$max - input$min) + input$min
}
hist(x, breaks=50)
})
output$PERTpara <- renderTable({
alpha <- giveMeAlpha(input$min, input$ml, input$max)
beta <- giveMeBeta(input$min, input$ml, input$max)
op <- data.frame(matrix(nrow=1, ncol=2))
colnames(op) <- c("alpha", "beta")
op$alpha <- alpha
op$beta <- beta
#print("alpha:")
#print(alpha)
op
})
output$joint <- renderPlot({
alpha <- giveMeAlpha(input$min, input$ml, input$max)
beta <- giveMeBeta(input$min, input$ml, input$max)
## scale beta PERT!
x <- rbeta(n=100, alpha, beta)
for(i in 1: length(x)){
x[i] <- x[i] * (input$max - input$min) + input$min
}
for(i in 1:nrow(prices)) {
prices[i,]$avgPrice <- optionPrice(prices[i,]$strike, prices[i,]$type, x, prices[i,]$mid)
}
## JUST relative profits!
if(input$type == "All"){
plot( (prices$avgPrice - prices$mid)/prices$mid ~ prices$strike, ylab="Relative Profit", xlab="Strike")
}
if(input$type == "Calls"){
plot( (prices[which(prices$type2=="C"),]$avgPrice - prices[which(prices$type2=="C"),]$mid) / (prices[which(prices$type2=="C"),]$mid) ~prices[which(prices$type2=="C" ),]$strike, ylab="Relative Profit", xlab="Strike")
}
if(input$type == "Puts"){
plot( (prices[which(prices$type2=="P"),]$avgPrice - prices[which(prices$type2=="P"),]$mid) / (prices[which(prices$type2=="P"),]$mid) ~prices[which(prices$type2=="P" ),]$strike, ylab="Relative Profit", xlab="Strike")
}
})
})
|
a13590ee48c9c6014b741f7a2b7971875d153932
|
a6ba30aa49badda9be0507045bd66edc354db15f
|
/R/models__taildependence__funinv2d.R
|
b3e274473217eaee930ec19043775ab8b0798b25
|
[] |
no_license
|
ayotoasset/cdcopula
|
fdecbd663a31985bac90369db93e10db24e52ae8
|
b0a93b0008b19b8e2f2f3157e2e4e2cdc0297c60
|
refs/heads/master
| 2022-12-26T18:01:40.918493
| 2020-09-28T10:48:30
| 2020-09-28T10:48:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,018
|
r
|
models__taildependence__funinv2d.R
|
#' @export
funinv2d <- function(FUN, x1, y, x1lim, x2lim,...,
method = c("tabular", "iterative")[1], tol = 1e-02)
{
## y = f(x1, x2) -> x2
if(tolower(method) == "tabular")
{
## If the FUNNAME does not exist, create it
set.seed(object.size(FUN))
FUNNAME.prefix <- runif(1)
tabular.FUNNAME <- paste(".tabular.", FUNNAME.prefix, sep = "")
if(!exists(tabular.FUNNAME, envir = .GlobalEnv))
{
## SAVING TO DISK IS REALLY SLOW
## Firs try to load it on disk temp R directory.
## tabular.PATH <- paste(tempdir(),"/" , tabular.FUNNAME, ".Rdata", sep = "")
## loadTry <- try(load(tabular.PATH, envir = .GlobalEnv))
## If does not exist or any error on load, create and save it on disk
## and then load it.
## if(is(loadTry, "try-error"))
## {
cat("Creating tabular for function inverse with tol = ", tol, "...")
tabular <- twowaytabular(FUN = FUN, x1lim = x1lim,
x2lim = x2lim,tol = tol, ...)
assign(tabular.FUNNAME, tabular, envir = .GlobalEnv)
cat("done.\n")
## save(as.name(tabular.FUNNAME), file = tabular.PATH,
## envir = .GlobalEnv, precheck = FALSE)
}
out <- funinv2d.tab(x1 = x1, y = y,
tabular = get(tabular.FUNNAME, envir = .GlobalEnv))
}
else if(tolower(method) == "iterative")
{
out <- funinv2d.iter(FUN = FUN, x1 = x1, y = y, x2lim = x2lim)
}
return(out)
}
#' @export
funinv2d.tab <- function(x1, y, tabular)
{
## x1 <- parRepCpl[["lambda"]]
## y <- parRepCpl[["tau"]]
nObs <- length(y)
if(length(x1) !=nObs)
{
stop("The input parameters should be of the same length.")
}
## The dictionary look up method for x2 given x1 and y
tol <- tabular$tol
nGrid1 <- tabular$nGrid1
nGrid2 <- tabular$nGrid2
x2Grid <- tabular$x2Grid
Mat <- tabular$Mat # The dictionary
## The indices for x1.
x1IdxRaw <- x1/tol
x1IdxFloor <- round(x1IdxRaw)
## Extra work to avoid under and over flow
x1IdxFloor1 <- (x1IdxFloor < 1)
x1IdxFloor2 <- (x1IdxFloor > nGrid1)
if(any(x1IdxFloor1))
{
## Below the lowest index
x1IdxFloor[x1IdxFloor1] <- 1
}
if(any(x1IdxFloor2))
{
## Above the highest index
x1IdxFloor[x1IdxFloor2] <- nGrid1
}
yMatTabFloor <- Mat[x1IdxFloor, ,drop = FALSE]
## Find the indices of the closed values close to y's left and right side
yTest <- matrix(y, nObs, nGrid2)
yFloorDev0 <- -abs(yTest-yMatTabFloor)
## The indices of x2
## FIXME: This is the bottom neck of speed.
## a <- proc.time()
x2FloorIdx0 <- max.col(yFloorDev0)
## cat("max.col:\n")
## print(proc.time()-a)
## The parallel version is not as fast as the serial version.
## a <- proc.time()
## nSubTasks <- detectCores()
## dataSubIdxLst <- data.partition(
## nObs = nObs,
## args = list(N.subsets = nSubTasks, partiMethod = "ordered"))
## x2FloorIdx0.Lst <- parLapply(
## cl, dataSubIdxLst,
## function(x, data) max.col(data[x, , drop = FALSE]),
## data = yFloorDev0)
## x2FloorIdx0 <- unlist(x2FloorIdx0.Lst )
## cat("max.col (parallel):\n")
## print(proc.time()-a)
## browser()
x2Floor0 <- x2Grid[x2FloorIdx0]
## Make sure the output format is same as the input
out <- x1
out[1:nObs] <- x2Floor0
return(out)
}
#' @export
twowaytabular <- function(FUN, x1lim, x2lim,tol = 1e-3,
gridmethod = list(x1 = "linear", x2 = "cubic"), ...)
{
## The dictionary lookup method The input argument. We choose to use the lower and
## upper tail dependence because they are fixed in [0, 1] for BB7 The code is only
## used once during the initialization. If need more precisions is needed , we
## consider using iterative way to handle the memory problem.
gridgens <- function(xlim, gridmethod, tol)
{
if(tolower(gridmethod) == "linear")
{
out <- seq(xlim[1]+tol, xlim[2]-tol, tol)
}
else if (tolower(gridmethod) == "exp")
{
out <- exp(seq(log(xlim[1])+tol, log(xlim[2])-tol, tol))
}
else if (tolower(gridmethod) == "cubic")
{
out <- (seq((xlim[1])^(1/3)+tol, (xlim[2])^(1/3)-tol, tol))^3
}
else
{
stop("No such grid grid generating method.")
}
return(out)
}
x1Grid <- gridgens(xlim = x1lim, gridmethod = gridmethod$x1, tol = tol)
x2Grid <- gridgens(xlim = x2lim, gridmethod = gridmethod$x2, tol = tol)
nGrid1 <- length(x1Grid)
nGrid2 <- length(x2Grid)
Mat <- matrix(NA, nGrid1, nGrid2)
## Big table takes huge amount of memory. We split the calculation if we
## require a very precise table.
## Split the calculations
MaxLenCurr <- round(min(nGrid1*nGrid2, 1e6)/nGrid1)
LoopIdx <- c(seq(1, nGrid2, MaxLenCurr), nGrid2)
LoopIdx[1] <- 0
yIdxCurr0 <- 0
nLoops <- length(LoopIdx)-1
for(j in 1:nLoops)
{
IdxCurr0 <- LoopIdx[j]+1
IdxCurr1 <- LoopIdx[j+1]
x1 <- rep(x1Grid, times = IdxCurr1-IdxCurr0+1)
x2 <- rep(x2Grid[IdxCurr0:IdxCurr1], each = nGrid1)
Mat[, IdxCurr0:IdxCurr1] <- FUN(x1 = x1, x2 = x2, ...)
}
out <- list(Mat = Mat, nGrid1 = nGrid1, nGrid2 = nGrid2, x2Grid = x2Grid, tol = tol)
return(out)
}
#' @export
funinv2d.iter <- function(FUN, x1, y, x2lim, ...)
{
## TODO: The max interval could not handle Inf in the uniroot function.
## TODO: Consider the error handle. i.e., In theory (see the Appendix in the
## paper) you can't have small tau with big delta (lower tail dependent) which
## yields non accurate root.
## TODO: parallel this code
out.x2 <- x1
parLen <- length(y)
out.x2[1:parLen] <- NA
for(i in 1:parLen)
{
yCurr <- y[i]
x1Curr <- x1[i]
x2Curr <- try(uniroot(function(x,...)
{
FUN(x1 = x1, x2 = x, y = y, ...)-y
}, interval = x2lim, x1 = x1, y = y, ...), silent = TRUE)
if(is(x2Curr, "try-error"))
{
out.x2[i] <- NA
}
else
{
out.x2[i] <- x2Curr$root
}
}
return(out.x2)
}
|
6992f72dd72ee1f87799c8a3f97da64328c62fe4
|
b8ebc5db1b08ed2bfd3e001cf01c360d840d5b1e
|
/april_19_23/Exercise 4.R
|
9e1c7cb5bf90003cd292db055adb8601b3031977
|
[] |
no_license
|
MichalSalach/RR_classes
|
8bc55224e6504ec0fd0df7d5a27a75af3f8175b8
|
6a9a928b97cef134d9ff0db0e9abe55eb42f2711
|
refs/heads/main
| 2023-04-12T08:47:20.362292
| 2021-05-13T14:59:56
| 2021-05-13T14:59:56
| 355,957,857
| 0
| 0
| null | 2021-04-08T15:19:42
| 2021-04-08T15:19:41
| null |
UTF-8
|
R
| false
| false
| 6,661
|
r
|
Exercise 4.R
|
#### Path ####
setwd("april_19_23")
#### Libraries ####
library(readxl)
library(Hmisc)
library(stringr)
library(dplyr)
#### Data ####
# Import data from the O*NET database, at ISCO-08 occupation level.
# The original data uses a version of SOC classification, but the data we load here
# are already cross-walked to ISCO-08 using: https://ibs.org.pl/en/resources/occupation-classifications-crosswalks-from-onet-soc-to-isco/
# The O*NET database contains information for occupations in the USA, including
# the tasks and activities typically associated with a specific occupation.
task_data <- read.csv("Data/onet_tasks.csv")
# isco08 variable is for occupation codes
# the t_* variables are specific tasks conducted on the job
# read employment data from Eurostat
# These datasets include quarterly information on the number of workers in specific
# 1-digit ISCO occupation categories. (Check here for details: https://www.ilo.org/public/english/bureau/stat/isco/isco08/)
for (i in 1:9) {
var <- paste0("isco", i)
sheet <- paste0("ISCO", i)
assign(var, read_excel("Data/Eurostat_employment_isco.xlsx", sheet = sheet))
}
#### Parameters ####
countries <- c("Belgium", "Spain", "Poland", "Italy", "Sweden")
#### Data preparation ####
# This will calculate worker totals in each of the chosen countries.
for (country in countries) {
total_country <- 0
for (i in 1:9) {
df <- get(paste0("isco", i))
total_country <- total_country + df[, country]
}
assign(paste0("total_", country), total_country[, ])
}
# Let's merge all these datasets. We'll need a column that stores the occupation categories:
for (i in 1:9) {
df <- get(paste0("isco", i))
df[, "ISCO"] <- i
assign(paste0("isco", i), df)
}
# and this gives us one large file with employment in all occupations.
all_data <- rbind(isco1, isco2, isco3, isco4, isco5, isco6, isco7, isco8, isco9)
# We have 9 occupations and the same time range for each, so we an add the totals by
# adding a vector that is 9 times the previously calculated totals
for (country in countries) {
all_data[, paste0("total_", country)] <- rep(get(paste0("total_", country)), 9)
# And this will give us shares of each occupation among all workers in a period-country:
all_data[, paste0("share_", country)] <- all_data[, country] / all_data[, paste0("total_", country)]
}
# Now let's look at the task data. We want the first digit of the ISCO variable only
task_data$isco08_1dig <- str_sub(task_data$isco08, 1, 1) %>% as.numeric()
# And we'll calculate the mean task values at a 1-digit level
# (more on what these tasks are below)
aggdata <- aggregate(task_data,
by = list(task_data$isco08_1dig),
FUN = mean, na.rm = TRUE
)
aggdata$isco08 <- NULL
# Let's combine the data.
combined <- left_join(all_data, aggdata, by = c("ISCO" = "isco08_1dig"))
# Let's move a group-specific procedure to a function:
agg_data_by_group <- function(task_items, group_name) {
tryCatch(
{
# Traditionally, the first step is to standardise the task values using weights
# defined by share of occupations in the labour force. This should be done separately
# for each country. Standardisation -> getting the mean to 0 and std. dev. to 1.
# Let's do this for each of the variables that interests us.
for (item in task_items) {
for (country in countries) {
var_name <- paste0("std_", country, "_t_", item)
temp_mean <- wtd.mean(combined[, paste0("t_", item)], combined[, paste0("share_", country)])
temp_sd <- wtd.var(combined[, paste0("t_", item)], combined[, paste0("share_", country)]) %>% sqrt()
combined[, var_name] <- (combined[, paste0("t_", item)] - temp_mean) / temp_sd
}
}
# The next step is to calculate the `classic` task content intensity, i.e.
# how important is a particular general task content category in the workforce
# Here, we're looking at non-routine cognitive analytical tasks, as defined
# by David Autor and Darron Acemoglu:
for (country in countries) {
combined[paste0(country, "_", group_name)] <- 0
for (item in task_items) {
combined[paste0(country, "_", group_name)] <- combined[paste0(country, "_", group_name)] +
combined[paste0("std_", country, "_t_", item)]
}
}
# And we standardise group in a similar way.
for (country in countries) {
temp_mean <- wtd.mean(
combined[, paste0(country, "_", group_name)],
combined[, paste0("share_", country)]
)
temp_sd <- wtd.var(
combined[, paste0(country, "_", group_name)],
combined[, paste0("share_", country)]
) %>% sqrt()
combined[, paste0("std_", country, "_", group_name)] <- (combined[, paste0(country, "_", group_name)] - temp_mean) / temp_sd
# Finally, to track the changes over time, we have to calculate a country-level mean.
# Step 1: multiply the value by the share of such workers.
combined[, paste0("multip_", country, "_", group_name)] <- combined[, paste0("std_", country, "_", group_name)] * combined[, paste0("share_", country)]
# Step 2: sum it up (it basically becomes another weighted mean)
assign(
paste0("agg_", country),
aggregate(combined[, paste0("multip_", country, "_", group_name)],
by = list(combined$TIME),
FUN = sum,
na.rm = TRUE
)
)
# We can plot it now!
agg_country <- get(paste0("agg_", country))
plot(agg_country[, 2], xaxt = "n", ylab = paste("agg. multip.", country, group_name))
axis(1, at = seq(1, 40, 3), labels = agg_country$Group.1[seq(1, 40, 3)])
}
},
error = function(cond) stop("Wrong selection of categories.")
)
}
#### Results ####
# We'll be interested in tracking the intensity of Non-routine cognitive analytical tasks
# Using a framework reminiscent of the work by David Autor ('4A2a4', '4A2b2', '4A4a1').
# Therefore, these are the categories we're primarily interested in:
# Non-routine cognitive analytical
# 4.A.2.a.4 Analyzing Data or Information
# 4.A.2.b.2 Thinking Creatively
# 4.A.4.a.1 Interpreting the Meaning of Information for Others
# These are some other categories:
# Routine manual
# 4.A.3.a.3 Controlling Machines and Processes
# 4.C.2.d.1.i Spend Time Making Repetitive Motions
# 4.C.3.d.3 Pace Determined by Speed of Equipment
agg_data_by_group(c("4A2a4", "4A2b2", "4A4a1"), "NRCA")
agg_data_by_group(c("4A3a3", "4C2d1i", "4C3d3"), "RM")
# Get rid of unnecessary files:
rm(i, var, country, sheet, df)
|
6863457286604ff104551c9f8ddf60984cd215a7
|
b38df3e8ae84be340fe8fda161b64fa8909adec2
|
/Polarity.R
|
16f2b9151327699047e61262d0613aa13b97f958
|
[] |
no_license
|
Tanay0510/Geo-Political-Multipolarity
|
61aba383a367bd31e5471108ded763350de53f81
|
a96339737fd0aa0d797f515648ade8974779a6e3
|
refs/heads/master
| 2023-02-18T18:36:17.245202
| 2021-01-22T03:25:40
| 2021-01-22T03:25:40
| 274,976,771
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,540
|
r
|
Polarity.R
|
# --- [~] Stepwise Regression --- [~] #
# http://www.sthda.com/english/articles/37-model-selection-essentials-in-r/154-stepwise-regression-essentials-in-r/
# --- Regression in R --- #
# Set Working Directory [Mac = ~/Desktop | PC = C:/Users/Default/Desktop]
setwd("~/Desktop")
# Get working directory
getwd()
# Get list of files in working directory
list.files()
if("dplyr" %in% rownames(installed.packages()) == FALSE) {
install.packages("dplyr", dependencies = TRUE)}
if("rms" %in% rownames(installed.packages()) == FALSE) {
install.packages("rms", dependencies = TRUE)}
if("psych" %in% rownames(installed.packages()) == FALSE) {
install.packages("psych", dependencies = TRUE)}
if("ggplot2" %in% rownames(installed.packages()) == FALSE) {
install.packages("ggplot2", dependencies = TRUE)}
if("ggpubr" %in% rownames(installed.packages()) == FALSE) {
install.packages("ggpubr", dependencies = TRUE)}
if("ggcorrplot" %in% rownames(installed.packages()) == FALSE) {
install.packages("ggcorrplot", dependencies = TRUE)}
# Add Package Libraries
library(dplyr)
library(rms)
library(psych)
library(ggplot2)
library(ggpubr)
library(ggcorrplot)
# Load the dataset by reading CSV file
firepower.data <- read.csv("globalization.csv")
# Convert all blank values to NA
firepower.data[firepower.data==""] <- NA
# Get summary of all data
summary(firepower.data)
# Get summary of all obesity rate data
summary(firepower.data$CINC)
# --- Regression --- #
# Create a basic linear regression formula
# Compare the obesity rate against all of the variables
# CINC ~ .
# Remove Country,Full.Name, Region,Sub.Region, United.Nations.Status, Political.Region, Military.Alliance, UN.HDI.Rank, CINC.x.10.000, Regional.GDP, Regional.UN.HDI, Nuclear.WeaponsDetails b/c they are categorical variables
df = subset(firepower.data, select = -c(Country,Full.Name,Region,Sub.Region,United.Nations.Status,Political.Region,Military.Alliance,UN.HDI.Rank,CINC.x.10.000,Regional.GDP....,Regional.UN.HDI....,Nuclear.Weapons..Details.))
firepower.model <- lm(CINC ~ ., data = df)
summary(firepower.model)
# Create a pruned model by removing all variables that do not have asterisks next to them (*, **, ***)
# Stars (*) means significant predictors
# (*) 95% of the time
# (**) = 99% of the time
# (***) = true almost all the cases (less than 1/1000)
# One intercept is always significant (intercept should be called offset)
firepower.model.pruned <- lm(CINC ~ Country.GDP..US.million. + Population + Oil.Reserves..millions.barrels. + HDI...Change..1.Yr. + IEF + Final.Military.Str..Score + Active.Military + Reserve.Military + X1000.Capita..Tot. + Aircraft.Carriers + Amphibious.War.Ship + Cruisers + Destroyers + Frigates + Corvettes + Attack.Helicopters + Military.Satellites + Nuclear.Weapons..Total. + Nuclear.Weapons..Exist., data = df)
summary(firepower.model.pruned)
# Get VIF values from model for multicollinearity
vif(firepower.model.pruned)
# Remove any variables with a VIF over 10
# Country.GDP..US.million. - 67.491718
# Final.Military.Str..Score - 16.581231
# Active.Military - 13.056200
# Aircraft.Carriers - 83.034172
# Amphibious.War.Ship - 66.255562
# Cruisers - 646.868943
# Destroyers - 116.555426
# Attack.Helicopters - 88.210877
# Military.Satellites - 202.588644
# Nuclear.Weapons..Exist. - 870.301259
# price.ratio.fruit.per.pkg.savory.snacks - 6.563584
# Create a final model
firepower.model.final <- lm(CINC ~ + HDI...Change..1.Yr. + Population + Oil.Reserves..millions.barrels. + IEF + Reserve.Military + X1000.Capita..Tot. + Frigates + Nuclear.Weapons..Total. + Corvettes, data = df)
summary(firepower.model.final)
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) -1.027e-03 1.230e-03 -0.835 0.40490
# HDI...Change..1.Yr. 9.280e-02 3.048e-01 0.305 0.76108
# Population 5.105e-11 6.177e-12 8.264 2.43e-14 ***
# Oil.Reserves..millions.barrels. 6.732e-09 1.590e-08 0.423 0.67252
# IEF -2.642e-06 1.020e-05 -0.259 0.79587
# Reserve.Military -8.527e-09 2.831e-09 -3.012 0.00295 **
# X1000.Capita..Tot. 1.103e-05 2.490e-05 0.443 0.65837
# Frigates 1.443e-03 1.710e-04 8.438 8.28e-15 ***
# Nuclear.Weapons..Total. 2.587e-03 1.064e-03 2.431 0.01598 *
# Corvettes 6.186e-04 1.272e-04 4.862 2.44e-06 ***
# Multiple R-squared: 0.8243, Adjusted R-squared: 0.8159
# F-statistic: 98.51 on 9 and 189 DF, p-value: < 2.2e-16
# Get VIF values from model for final multicollinearity check
vif(firepower.model.final)
# HDI...Change..1.Yr. - 1.157262
# Population - 2.484355
# Oil.Reserves..millions.barrels - 1.154360
# IEF - 1.080729
# Reserve.Military - 2.691129
# X1000.Capita..Tot. - 1.476128
# Frigates - 2.585946
# Nuclear.Weapons..Total. - 1.784793
# Corvettes - 2.767763
# Model Coefficients
coefficients(firepower.model.final)
# Confidence Intervals for Model Parameters
confint(firepower.model.final, level=0.95) # CIs for model parameters
# diagnostic plots to check model validity
layout(matrix(c(1,2,3,4),2,2))
plot(firepower.model.final)
# ANOVA Table
anova(firepower.model.final)
# Residuals from Analysis
# residuals(obesity.model.final)
# Statistics for Residuals Analysis
# influence(obesity.model.final)
|
aef10430bb47d0b4ccbc1d14e5e5d0b9aa29592f
|
1754113fcf2b24c711ceb1d4b43513cb908cfd32
|
/feature_extraction.R
|
f6791aea3e8225385e2c34ad3d9742c677f1eeeb
|
[] |
no_license
|
wuandtan/userInteractivity
|
86bc8e26ce9bbe36f2bf95c12e69540b325c3bac
|
a22b8ba7fcd497942941279050e20614f776c4be
|
refs/heads/master
| 2016-09-06T18:56:13.961695
| 2015-02-26T10:46:30
| 2015-02-26T10:46:30
| 31,362,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,651
|
r
|
feature_extraction.R
|
feature_extraction <- function (single_episode,segmentLen = 2,Num_place_for_pause = 2,Num_place_for_freeze = 5, chosen_window_size = 15)
{
#here i am thinking to (1) see if there is any re-positioning. if yes, then start to consider the period between each re-positioning
#(2) among the whole episode, choose the
quality <- c(230000, 331000, 477000, 688000, 991000, 1427000, 2056000, 2962000)
#we assume segment length is 2
#here we can only use the serverInfo as the input to extract the featres.
#the idea is to get the symptoms for re-positioning in the beginning. usually the re-position is relatively easy to be identified.
serverInfo <- subset(single_episode[single_episode$type=="server",], select=c(time,segidx,sc.bytes,quality,episode))
clientInfo <- subset(single_episode[single_episode$type=="client",],select=c(time,x.duration,c.playerState,buffercount,
c.starttime,numPause,pauseInfo,numJump,jumpInfo,rebufferingtime,adjusted.x.duration ))
serverInfo <- arrange(serverInfo,time, segidx)
serverInforow <- nrow(serverInfo)
clientInforow <- nrow(clientInfo)
if(serverInforow <= 20)
{
feature <- NULL
return(feature)
}
isShutdown <- grep("Shutdown",clientInfo$c.playerState)
if(length(isShutdown) > 0)
{
feature <- NULL
return(feature)
}
##########first to extract the features for re-positioning
#######the feature used in the training and in the test is the same.
###########note that we assume there is no caching between the server and the client
#an important factor for the repositioning is the discontinuity of segidx
Feat_discontinuity <- 0
jump_pos <- NULL
for (i in (3:(serverInforow-2)))
{
t <- serverInfo$segidx[(i-2):(i+2)] - serverInfo$segidx[i]
if(!any(t == segmentLen ) )
{ #print(i)
#print(serverInfo$segidx[i])
if(length(which(t==0)) <= 1)
{
Feat_discontinuity <- Feat_discontinuity + 1
jump_pos <- c(jump_pos, i)
}
}
}
jump_pos <- c(0,jump_pos,serverInforow)
#calculate the moving average of the inter-segment time between adjacent segments
ma <- function(x,n=5){filter(x,rep(1/n,n), sides=1)}
ave_sequence <- NULL
idx_sequence <- NULL
chosen_window_size <- 15
for (pos in (1:(length(jump_pos)-1)))
{
#seperate the episode into a few period, in each of which only contains one jump.
serverInfo_in_each_jump <- serverInfo[(jump_pos[pos]+1):jump_pos[pos+1],]
r <- nrow(serverInfo_in_each_jump)
interval <- as.numeric(as.duration(serverInfo_in_each_jump$time[2:r]-serverInfo_in_each_jump$time[1:(r-1)]))
if(length(interval) > chosen_window_size)
{
t1 <- cumsum(interval[1:chosen_window_size])/(1:chosen_window_size)
t2 <- ma(interval,chosen_window_size)
ave <- c(t1,t2[chosen_window_size:length(t2)])
}else
{
ave <- cumsum(interval)/(1:length(interval))
}
ave_sequence <- c(ave_sequence,ave)
# idx_sequence <- c(idx_sequence,(jump_pos[pos]+1):jump_pos[pos+1])
idx_sequence <- c(idx_sequence,1:length(ave))
}
#suspicious_place <- sort.int(ave_sequence, na.rm=TRUE, decreasing=TRUE,index.return=TRUE)
#look for the peaks
peaks <- NULL
l <- length(ave_sequence)
peaks_pos <- which((ave_sequence[2:(l-2)]> ave_sequence[1:(l-3)]) & (ave_sequence[2:(l-2)]> ave_sequence[3:(l-1)]))
if(length(peaks_pos)<Num_place_for_freeze)
{ #when bandwith is very large, the client fast reaches the steady state and never goes down. This could happen: no peaks
suspicious_place <- sort.int(ave_sequence, decreasing = TRUE,index.return = TRUE)
peaks <- suspicious_place$x[1:Num_place_for_freeze]
relative_peaks_pos <- idx_sequence[suspicious_place$ix[1:Num_place_for_freeze]]
ori_peaks_pos <- suspicious_place$ix[1:Num_place_for_freeze]
}else
{
peaks_pos <- peaks_pos +1 #because the previous statement starts from the second position.
peaks <- ave_sequence[peaks_pos]
relative_peaks_pos <- idx_sequence[peaks_pos]
suspicious_place <- sort.int(peaks, decreasing=TRUE,index.return=TRUE)
#select the top 5
peaks <- suspicious_place$x[1:Num_place_for_freeze]
relative_peaks_pos <- relative_peaks_pos[suspicious_place$ix[1:Num_place_for_freeze]]
ori_peaks_pos <- peaks_pos[suspicious_place$ix[1:Num_place_for_freeze]]
}
frequency_Window_size_in_seconds <- 10 #the window size to check how fast the client requests the segments
nSegment_within_Window <- frequency_Window_size_in_seconds/segmentLen
feature <- NULL
interval <- as.numeric(as.duration(serverInfo$time[2:serverInforow]-serverInfo$time[1:(serverInforow-1)]))
for (i in (1:Num_place_for_freeze))
{
#features for a single suspicious place
if(ori_peaks_pos[i]+nSegment_within_Window < serverInforow)
{
freq <- mean(interval[(ori_peaks_pos[i]+1):(ori_peaks_pos[i]+nSegment_within_Window)])
}
else
{
freq <- mean(interval[(ori_peaks_pos[i]+1):(serverInforow-1)])
}
feat <- c(peaks[i],relative_peaks_pos[i],which(quality==serverInfo$quality[ori_peaks_pos[i]]),which(quality==serverInfo$quality[ori_peaks_pos[i]+1]),freq)
#ground truth for this single suspicious place
#if nothing, 0; if pause, 1, if short freeze, 2, if long freeze 2
class <- ground_truth_retrieval(serverInfo,clientInfo,ori_peaks_pos[i])
feature <- rbind(feature,c(feat,class))
}
feature
}
ground_truth_retrieval <- function(serverInfo,clientInfo,pos)
{
time <- serverInfo$time[pos]
freeze_due_to_repositioning <- 0
#look around this time at the client side
t <- which((clientInfo$time-serverInfo$time[pos] < 0)== TRUE)
client_range_begin <- t[length(t)]
if((length(client_range_begin)==0) || (client_range_begin - 1 < 1) )
{
client_range_begin <- 1;
} else
{
client_range_begin <- client_range_begin - 1;
}
t <- which((clientInfo$time-serverInfo$time[pos] > 0)== TRUE)
if(length(t)>0)
{
if(t[1]+2< nrow(clientInfo))
client_range_end <- t[1]+1
else
client_range_end <- nrow(clientInfo)
}else{
#unlikely happen
print("client_range_end reaches the end of the clientInfo! Unlikely happen!")
client_range_end <- nrow(clientInfo)
}
#now we have client_range_begin and client_range_end
class <- NULL
IsPause <- grep("Pause",clientInfo$c.playerState[client_range_begin:client_range_end])
if(length(IsPause)>0)
{
class <- 1
return(class)
}
#not a pause, then is it a freeze? will look back and look fordward and see it is a short freeze or a long freeze
#or maybe the freeze is caused by the re-positioning?
st <- client_range_begin
en <- client_range_end
if(clientInfo$numJump[1] > 0)
{
split_jumpinfo <- unlist(strsplit(clientInfo$jumpInfo[1],"_"))
from <- as.numeric(split_jumpinfo[seq(2,length(split_jumpinfo),4)])
for (i in (st:en))
{
if(any(abs(from-clientInfo$adjusted.x.duration[i])<2))
{
freeze_due_to_repositioning <- 1
break
}
}
}
while (st>=1 && st>=client_range_begin - 5)
{
log_time_diff <- clientInfo$time[st+1]-clientInfo$time[st]
if(clientInfo$adjusted.x.duration[st+1] >= clientInfo$adjusted.x.duration[st] && clientInfo$adjusted.x.duration[st+1] <= clientInfo$adjusted.x.duration[st]+10)
{
#to garanttee no jumping, still want to play back in order
playback_time_diff <- clientInfo$adjusted.x.duration[st+1]-clientInfo$adjusted.x.duration[st]
if(log_time_diff == playback_time_diff)
break
}
else
{#there might be a re-positioning
#check if around there is a re-positioning
if(clientInfo$numJump[1] > 0)
{
split_jumpinfo <- unlist(strsplit(clientInfo$jumpInfo[1],"_"))
from <- as.numeric(split_jumpinfo[seq(2,length(split_jumpinfo),4)])
if(any(abs(from-clientInfo$adjusted.x.duration[st])<2))
{
freeze_due_to_repositioning <- 1
break
}
}
}
st <- st - 1
}
if(st == 0)
st <- 1
while (en < nrow(clientInfo) && en<=client_range_end + 5)
{
log_time_diff <- clientInfo$time[en+1]-clientInfo$time[en]
if(clientInfo$adjusted.x.duration[en+1] >= clientInfo$adjusted.x.duration[en] && clientInfo$adjusted.x.duration[en+1] <= clientInfo$adjusted.x.duration[en]+10)
{
#to garanttee no jumping, still want to play back in order
playback_time_diff <- clientInfo$adjusted.x.duration[en+1]-clientInfo$adjusted.x.duration[en]
if(log_time_diff == playback_time_diff)
break
}
else
{#there is a re-positioning
if(clientInfo$numJump[1] > 0)
{
split_jumpinfo <- unlist(strsplit(clientInfo$jumpInfo[1],"_"))
from <- as.numeric(split_jumpinfo[seq(2,length(split_jumpinfo),4)])
if(any(abs(from-clientInfo$adjusted.x.duration[en])<2))
{
freeze_due_to_repositioning <- 1
break
}
}
}
en <- en + 1
}
if(freeze_due_to_repositioning == 1)
{
class <- 4
return(class)
}
log_time_diff <- difftime(clientInfo$time[en],clientInfo$time[st],units = "secs")
playback_time_diff <- clientInfo$adjusted.x.duration[en]-clientInfo$adjusted.x.duration[st]
IsPause <- grep("Pause",clientInfo$c.playerState[st:en])
if(length(IsPause)>0)
{
class <- 1
return(class)
}
lag <- log_time_diff - playback_time_diff
if(lag <=2)
{
class <- 0
return(class)
}
if(lag <=10)
{
class <- 2
return(class)
}
if(lag >10)
{
class <- 3
return(class)
}
}
#feature_extraction_old_individual_task: we extract features for each episode for each single task, like jumping, pause ,or repositioning. The input is the individual episodes with "time" "sc.bytes" "quality" "segidx" "episode" columns
feature_extraction_old_individual_task <- function (single_episode,segmentLen = 2,Num_place_for_pause = 2,Num_place_for_freeze = 2, max_window_size = 20)
{
quality <- c(230000, 331000, 477000, 688000, 991000, 1427000, 2056000, 2962000)
#we assume segment length is 2
#here we can only use the serverInfo as the input to extract the featres.
#the idea is to get the symptoms for re-positioning in the beginning. usually the re-position is relatively easy to be identified.
serverInfo <- subset(single_episode[single_episode$type=="server",], select=c(time,segidx,sc.bytes,quality,episode))
clientInfo <- subset(single_episode[single_episode$type=="client",],select=c(time,x.duration,c.playerState,buffercount,
clientStartTime,c.starttime,playbackStartTime,numPause,pauseInfo,numJump,jumpInfo,rebufferingtime,adjusted.x.duration ))
serverInfo <- arrange(serverInfo,time, segidx)
serverInforow <- nrow(serverInfo)
clientInforow <- nrow(clientInfo)
if(serverInforow <= 20)
{
feature <- NULL
return(feature)
}
##########first to extract the features for re-positioning
#######the feature used in the training and in the test is the same.
###########note that we assume there is no caching between the server and the client
#an important factor for the repositioning is the discontinuity of segidx
Feat_discontinuity <- 0
for (i in (3:(serverInforow-2)))
{
if(!any(serverInfo$segidx[(i-2):(i+2)] - serverInfo$segidx[i] == segmentLen ))
{ #print(i)
#print(serverInfo$segidx[i])
Feat_discontinuity <- Feat_discontinuity + 1
}
}
#a small forward reposition is problemic: when the client have a long, e.g 20 second buffer, if it re-positions to 10 seconds ahead, the server side will not receive
#the notice.
#another noticeable feature for re-positioning could be the time difference between the seconds of the delivered segments (2seconds * number of segments) and
#the time duration from the first delivery to the last delivery.
#this feature is expected to be efficient because both freeze and pause enlarge the real playback time, while forward repositioning reduces the real playback time.
#so when neither freeze nor pause happens, by this way small forward repositioning can be identified. However, when freeze or pause exists, the feature looses its effect.
Feat_Episode_time_difference <- 0
Feat_Episode_time_difference <- as.numeric(as.duration(new_interval(serverInfo$time[1],serverInfo$time[serverInforow]))) - as.numeric(serverInfo$segidx[serverInforow]-serverInfo$segidx[1])
if(clientInfo$numJump[1] == 0)
Class_repositioning <- 0
else
Class_repositioning <- 1
#########features for pause
#########the features used in training and in the test is DIFFERENT.
##for pause training ##
#Num_place_for_pause <- 2 #obain the two most possible places for occuring pause
frequency_Window_size_in_seconds <- 10 #the window size to check how fast the client requests the segments
nSegment_within_Window <- frequency_Window_size_in_seconds/segmentLen
pauseInfo <- clientInfo$pauseInfo[1]
if(is.na(pauseInfo))
{
#obtain the two most possible place
Class_Pause <- 0
Feat_train_pause <- no_pause(serverInfo,Num_place_for_pause,quality,nSegment_within_Window)
}else
{
idx <- which(clientInfo$c.playerState=="Paused")
if(length(idx)==0)
{
Class_Pause <- 0
Feat_train_pause <- no_pause(serverInfo,Num_place_for_pause,quality,nSegment_within_Window)
}else
{
#Class_Pause <- as.numeric(clientInfo$numPause[1])
Class_Pause <- 1
Feat_train_pause <- has_pause(serverInfo,clientInfo,Num_place_for_pause,quality,nSegment_within_Window)
}
}
##for pause test ##
interval <- as.numeric(as.duration(serverInfo$time[2:serverInforow]-serverInfo$time[1:(serverInforow-1)]))
k <-sort.int(interval, decreasing = TRUE,index.return = TRUE)
k <- k$ix[1:Num_place_for_pause]
Feat_test_pause <- NULL
for (i in (1:Num_place_for_pause))
{
if(k[i]+nSegment_within_Window < serverInforow)
{
freq <- mean(interval[(k[i]+1):(k[i]+nSegment_within_Window)])
}
else
{
freq <- mean(interval[(k[i]+1):serverInforow])
}
Feat_test_pause <- c(Feat_test_pause,interval[k[i]],which(quality==serverInfo$quality[k[i]]),which(quality==serverInfo$quality[k[i]+1]), freq)
}
########Features for re-buffering
#here I try not to consider anything about repositioning or pause (pretending that they do not exist), and use the old way (the same as in the IWQos paper) to extract
#the features, and see how much classification we can obtained. In the future, we'll add more the above two events and obtain better results.
Class_Freeze_nonFreeze <- NULL
Class_max_Freeze <- NULL
Class_multiFreeze <- NULL
Class_totalFreezeTime <- NULL
ma <- function(x,n=5){filter(x,rep(1/n,n), sides=2)}
interval <- as.numeric(as.duration(serverInfo$time[2:serverInforow]-serverInfo$time[1:(serverInforow-1)]))
Feat_freeze <- NULL
for (i in (1:max_window_size))
{
#calcuate the moving average within the window size
ave <- sort(ma(interval,i), na.rm=TRUE, decreasing=TRUE)
Feat_freeze <- c(Feat_freeze,ave[1:Num_place_for_freeze])
}
if(clientInfo$rebufferingtime[1] > 2)
{
Class_Freeze_nonFreeze <- 1
#looking for the longest single freeze
t <- clientInfo$adjusted.x.duration[2:clientInforow] - clientInfo$adjusted.x.duration[1:clientInforow-1]
b <- 1
e <- 1
count <- 0
maxCount <- 0
longest_freeze <- 0
for (j in (1:length(t)))
{
if(t[j] == 0)
{
if(count ==0)
tmp_b <- j
count <- count+1
}else
{
if(count !=0 && count >= maxCount)
{
tmp_freeze <- as.numeric(as.duration(clientInfo$time[j] - clientInfo$time[tmp_b]))
if(tmp_freeze > longest_freeze)
{e <- j
b <- tmp_b
maxCount <- count
longest_freeze <- tmp_freeze
}
}
count <- 0
}
}
if(longest_freeze>10)
Class_max_Freeze <- 1
else
Class_max_Freeze <- 0
}else{
Class_Freeze_nonFreeze <- 0
Class_max_Freeze <- 0
}
Class_totalFreezeTime <- clientInfo$rebufferingtime[1]
if(clientInfo$buffercount[1]>=2)
Class_multiFreeze <- 1
else
Class_multiFreeze <- 0
#integrate all features
feature <-c(Feat_discontinuity,Feat_Episode_time_difference,Feat_train_pause,Feat_test_pause,Feat_freeze,
Class_repositioning,Class_Pause,
Class_Freeze_nonFreeze,Class_max_Freeze,Class_multiFreeze,Class_totalFreezeTime)
feature
}
no_pause <- function(serverInfo,Num_place_for_pause,quality,nSegment_within_Window)
{
serverInforow <- nrow(serverInfo)
feat <- NULL
interval <- as.numeric(as.duration(serverInfo$time[2:serverInforow]-serverInfo$time[1:(serverInforow-1)]))
k <-sort.int(interval, decreasing = TRUE,index.return = TRUE)
k <- k$ix[1:Num_place_for_pause]
Feat_train_pause <- NULL
for (i in (1:Num_place_for_pause))
{
if(k[i]+nSegment_within_Window < serverInforow)
{
freq <- mean(interval[(k[i]+1):(k[i]+nSegment_within_Window)])
}
else
{
freq <- mean(interval[(k[i]+1):serverInforow])
}
feat <- c(feat,interval[k[i]],which(quality==serverInfo$quality[k[i]]),which(quality==serverInfo$quality[k[i]+1]), freq)
}
feat
}
has_pause <- function(serverInfo,clientInfo,Num_place_for_pause,quality,nSegment_within_Window)
{
serverInforow <- nrow(serverInfo)
clientInforow <- nrow(clientInfo)
feat <- NULL
idx <- which(clientInfo$c.playerState=="Paused")
pause_begin <- idx[1]
pause_end <- idx[length(idx)]
i <- 1
while (i<length(idx))
{
if(idx[i+1] - idx[i] > 1)
{
pause_end <- c(idx[i],pause_end)
pause_begin <- c(pause_begin,idx[i+1])
}
i <- i+1
}
t <- sort(pause_end-pause_begin, decreasing = TRUE, index.return = TRUE)
pause_begin <- pause_begin[t$ix]
pause_end <- pause_end[t$ix]
Feat_train_pause <- NULL
# for (i in (1:length(pause_begin)))
for (i in (1:1)) #choose the largest one
{
#map the time log at the client to the time log at the server
t <- which((serverInfo$time-clientInfo$time[pause_begin[i]] < 0)== TRUE)
server_pause_begin <- t[length(t)]
if(server_pause_begin - 2 < 1) {server_pause_begin <- 1;} else {server_pause_begin <- server_pause_begin - 2;}
t <- which((serverInfo$time-clientInfo$time[pause_end[i]] > 0)== TRUE)
server_pause_end <- t[1]
if(is.na(server_pause_end))
{#the pause happens in the end of the episode, when all segments have been requested.
feat <- c(feat, NA,NA,NA, NA)
}
else
{
#the pause does not lies at the end.
if(server_pause_end + 2 > serverInforow){server_pause_end <- serverInforow;} else{server_pause_end <- server_pause_end + 2; }
interval <- as.numeric(as.duration(serverInfo$time[(server_pause_begin+1):server_pause_end] - serverInfo$time[server_pause_begin:(server_pause_end-1)]))
t <- server_pause_begin
server_pause_begin <- t+which.max(interval)-1
server_pause_end <- t+which.max(interval)
interval <- as.numeric(as.duration(serverInfo$time[2:serverInforow]-serverInfo$time[1:(serverInforow-1)]))
freq <- NULL
if ((server_pause_end+nSegment_within_Window-1) < serverInforow)
{
freq <- mean(interval[(server_pause_end):(server_pause_end+nSegment_within_Window-1)])
} else{
freq <- mean(interval[(server_pause_end):serverInforow])
}
feat <- c(feat, interval[server_pause_begin],which(quality==serverInfo$quality[server_pause_begin]),which(quality==serverInfo$quality[server_pause_end]), freq)
}
}
feat
}
|
5ad0e05171fc2b6e43846a269e11af2f3aacaad0
|
3d80000fb79a94180d14cd085130ccacce3dd6a4
|
/1_helpers/1_helpers_generic.R
|
32606a510e93ae9323fb6f6e70d1d452010216df
|
[
"MIT"
] |
permissive
|
boyercb/ueda-replication
|
0dec1c2c74e5460238c595a9a84fd454fe64ac3b
|
4abb20ab6b0d4131556911bac5c97647460c24bf
|
refs/heads/master
| 2022-11-20T06:44:25.774427
| 2020-07-24T19:23:30
| 2020-07-24T19:23:30
| 271,377,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
1_helpers_generic.R
|
# Load helper functions ---------------------------------------------------
get_data <- function(path) {
if(!is.null(CSV_ROOT_DIR)) {
paste0(CSV_ROOT_DIR, path)
} else {
stop("Must specify location of CSV directory!")
}
}
specd <- function(x, k) trimws(format(round(x, k), nsmall=k))
|
eb61768ec8fcf538b59fcc492f22881cdaa3e916
|
d40484c9232a01a0b4daf29622e18d841c0ad841
|
/Test_app/server.R
|
f86dbf3bd14c6253b2328aa7b60df9ef47480ee0
|
[] |
no_license
|
GM-AI/R-Markdown-and-Leaflet
|
3319c19600628e1989cec144430f2480599f3d87
|
8b6ae12e34f7b8e0efeb60624def08e393eb7070
|
refs/heads/master
| 2022-12-07T02:28:13.236200
| 2020-08-13T20:15:15
| 2020-08-13T20:15:15
| 286,549,214
| 0
| 0
| null | 2020-08-13T20:20:59
| 2020-08-10T18:21:51
|
HTML
|
UTF-8
|
R
| false
| false
| 1,038
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
vln<-as.data.frame(read.csv("vilnius_pop.csv", header = TRUE, sep = ","))
vln$Growth.Rate <- as.numeric(sub("%", "", vln$Growth.Rate))
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
bins <- seq(min(vln$Growth.Rate ), max(vln$Growth.Rate ), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(vln$Growth.Rate , breaks = bins, col = 'blue', border = 'grey',xlab = "Growth per year, %",main="Growth rate histogram")
})
output$distPlot2<-renderPlot({
plot(vln)
})
output$view <- renderTable({
head(vln, n = input$years)
})
output$summary <-renderPrint({
summary(vln)
})
})
|
178d380d9aaae2263a07f2091758239df743feaf
|
6528e839f7b6adecc76f052c0eb5e6e627776529
|
/run_analysis.R
|
b82f5768798cb0bfc1e9c5185cf639f76fc67fe7
|
[] |
no_license
|
srholt/Getting-and-Cleaning-Data-Course-Project
|
69e6b31ae70c2ea6b6886912e4be85d5f4e5b238
|
e42da16879928e08387502b51a7c0a69fd4b722f
|
refs/heads/master
| 2020-12-31T04:56:43.794346
| 2016-05-08T18:16:03
| 2016-05-08T18:16:03
| 58,324,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,572
|
r
|
run_analysis.R
|
#run_analysis.R
#download data and move to working directory
setwd("/Users/shaunholt1/datasciencecoursera/week5")
library(downloader)
download("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", dest="dataset.zip", mode="wb")
unzip ("dataset.zip")
dir()
setwd("/Users/shaunholt1/datasciencecoursera/week5/UCI HAR Dataset")
#check files and directories to establish which files to use
dir()
list.files("./train")
list.files("./test")
#load packages into R
library(plyr)
library(dplyr)
library(reshape2)
#read activity list into data
activityf <- read.table("activity_labels.txt")
dim(activityf)
#load x files to use
xtraining <- read.table("train/X_train.txt")
xtest <- read.table("test/X_test.txt")
dim(xtraining)
dim(xtest)
#Combine x files
xcomb <- rbind(xtraining, xtest)
dim(xcomb)
head(xcomb)
#load y files to use
ytraining <- read.table("train/y_train.txt")
ytest <- read.table("test/y_test.txt")
dim(ytraining)
dim(ytest)
#Combine files
ycomb <- rbind(ytraining, ytest)
dim(ycomb)
head(ycomb)
#load subject files
subtrain <- read.table("train/subject_train.txt")
subtest <- read.table("test/subject_test.txt")
dim(subtrain)
dim(subtest)
#Combine files
subcomb <- rbind(subtrain, subtest)
dim(subcomb)
head(subcomb)
#load feature file
feature <- read.table("features.txt")
dim(feature)
head(feature)
#bringing names into data
names(subcomb)<-c("subject")
names(ycomb)<- c("activity")
names(xcomb)<- feature[ ,2]
head(subcomb)
head(ycomb)
head(xcomb)
#finding means and standard deviations
meanstddev <- grep("-mean\\(\\)|-std\\(\\)", feature[, 2])
datameanstddev <- xcomb[, meanstddev]
head(datameanstddev)
#remove () and change names to lower case
names(datameanstddev) <- feature[meanstddev, 2]
names(datameanstddev) <- gsub("\\(|\\)", "", names(datameanstddev))
names(datameanstddev) <- tolower(names(datameanstddev))
head(datameanstddev)
dim(datameanstddev)
# create descriptive names
activityf
activityf[, 2] = gsub("_", "", tolower(as.character(activityf[, 2])))
ycomb[,1] = activityf[ycomb[,1], 2]
names(ycomb) <- "typeofactivity"
head(ycomb)
names(subcomb) <- "volunteer"
head(subcomb)
#create new merged data frame
tidytable <- cbind(subcomb,ycomb,datameanstddev)
write.table(tidytable, "tidydata.txt")
head(tidytable)
dim(tidytable)
#create tidy data summary required
tidydata<-aggregate(. ~volunteer + typeofactivity, tidytable, mean)
tidydata<-tidydata[order(tidydata$volunteer,tidydata$typeofactivity),]
write.table(tidydata, file = "tidydatafinal.txt",row.name=FALSE)
head(tidydata)
dim(tidydata)
|
a6836d7e81e92f391cde02d39b54b800f9a3d05d
|
a61f3d918215f5e7f7dbc0c1b51295226f1f67d0
|
/man/dmatnorm.Rd
|
e72a71ce1409ba6da088731c48d042e6373335f5
|
[] |
no_license
|
bdemeshev/vectordf
|
150f185c45e0de9b908e7f0cd359193ca2cab061
|
0d11673956b7f22aaaca41f8dada9e451965be98
|
refs/heads/master
| 2021-01-19T07:56:44.367710
| 2015-01-03T12:47:01
| 2015-01-03T12:47:01
| 28,709,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
rd
|
dmatnorm.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dmatnorm}
\alias{dmatnorm}
\title{Matrix Normal density function}
\usage{
dmatnorm(X, M = matrix(0), U = diag(nrow(M)), V = diag(ncol(M)))
}
\arguments{
\item{X}{matrix-point, argument for density function}
\item{M}{matrix of expected values (r x s)}
\item{U}{among-row scale covariance matrix (r x r)}
\item{V}{among-column scale covariance matrix (s x s)}
}
\value{
scalar, density at the point X
}
\description{
Matrix Normal density function
}
\details{
Matrix Normal density function
}
\examples{
d <- dmatnorm(X = matrix(1, nrow=3, ncol=2), M = matrix(0, nrow=3, ncol=2))
d
}
|
a396f238a3c148ef0dd3c745ab685d61b0548b34
|
02bd0187bfa29b8ba18721dd010c7916f9a8dff4
|
/Part A/complete.R
|
2a525a637851fcf29dba7f66ef1dce72f2e69d6b
|
[] |
no_license
|
anbarisker/datascienceunitec
|
c4aa2ca985a7eeab5de0f1d02bee09cc99237dfe
|
79ff377b55ad2539d5c22c636e742fc3c7bd6ecd
|
refs/heads/master
| 2020-04-27T19:31:02.137875
| 2019-04-12T22:46:27
| 2019-04-12T22:46:27
| 174,622,123
| 0
| 0
| null | 2019-04-12T22:46:28
| 2019-03-08T22:57:05
| null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
complete.R
|
#Name: Anbarasan
#StudentID: 1508153
complete <- function(directory, id=1:332)
{
## directory is location of the csv files
## id is the montior ID number to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1047
## ..
## Where 'id' is the monitor ID number and 'nobs' is the no. of complete case
#Start
if(1 > min(id) || 332 < max(id)) {
print(paste("Error: id is out of range."))
return(NA)
}
Directory_File <-list.files(directory,full.names = TRUE)
# step 2 create a empty vector object
v <-vector()
# do for loop, to get the data for every csv by reading the file
# store each records in vector with sum of compltete.case of the records
# without na values
for(i in 1:length(id))
{
#read.csv -> reads a file in table format and creates a date frame from it.
records <-c(read.csv(Directory_File[id[i]]))
# complete.cases -> Return a logical vector indicating which cases are complete, i.e., have no missing values.
v[i] <-sum(complete.cases(records))
}
#create data.frame with Id, with the value of nobs
final_data <-data.frame(id,nobs=v)
return(final_data)
}
|
579a746528d4f663a3f27c0c9a1154d97f54e581
|
d6e943fe1e8884d2048ee9b08a28c89204e6f924
|
/man/colNormalization.Rd
|
3a90d595f5941bf58d57b2e4c2627555b7797ea4
|
[
"MIT"
] |
permissive
|
YosefLab/VISION
|
c9b08b358d56d9cb8121c3da02da62a0da8079fa
|
8dc5c4e886ddfeb8412ef3a82cead1c794f0e43b
|
refs/heads/master
| 2023-02-21T05:10:06.549965
| 2023-02-08T19:05:14
| 2023-02-08T19:05:14
| 79,424,615
| 123
| 27
|
MIT
| 2022-04-26T21:30:04
| 2017-01-19T06:51:45
|
R
|
UTF-8
|
R
| false
| true
| 401
|
rd
|
colNormalization.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NormalizationMethods.R
\name{colNormalization}
\alias{colNormalization}
\title{Performs z-normalization on all columns}
\usage{
colNormalization(data)
}
\arguments{
\item{data}{data matrix}
}
\value{
Data matrix with same dimensions, with each column z-normalized.
}
\description{
Performs z-normalization on all columns
}
|
aaedd29e9bb39adb4ddef55ef6deff1f7ad5d253
|
5d3121e7e42bfb2cc8ae76062a83df2791a45b95
|
/man/sbs1.Rd
|
9ee918ec7e5130c33a5dbef6fcabc7c855d0089d
|
[] |
no_license
|
neslon/dprep
|
3b872a3cbfe3492a27314d4d68c427a949cd538a
|
bedc64837b72919f0a249d716b6cecbb23923ad0
|
refs/heads/master
| 2021-01-11T14:49:55.339753
| 2017-01-27T23:09:36
| 2017-01-27T23:09:36
| 80,226,293
| 0
| 0
| null | 2017-01-27T16:51:27
| 2017-01-27T16:51:26
| null |
UTF-8
|
R
| false
| false
| 756
|
rd
|
sbs1.Rd
|
\name{sbs1}
\alias{sbs1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{One-step sequential backward selection}
\description{
This functions performs one-step of the sequential backward selection
procedure.}
\usage{
sbs1(data, indic, correct0, kvec, method = c("lda", "knn", "rpart"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{The name of a dataset}
\item{indic}{ A vector of 0-1 values: 1 indicates a selected feature.}
\item{correct0}{ The recognition rate based on the current subset of features}
\item{kvec}{ The number of neighbors}
\item{method}{ The classifier to be used}
}
\author{ Edgar Acuna}
\seealso{\code{\link{sffs}}}
\keyword{Feature Selection}
|
9a80139a50d859cc346a8f2aeaf6e2ede8a9d474
|
9219831ac8e54247e850803474333e4fff531e60
|
/R/localUtils.R
|
5e9844ab5dee65c2ab3ee7a7bb1c483265a63e91
|
[] |
no_license
|
WTaoUMC/RegEnrich
|
3a81aa3dd18a7dd5cd7a47f49009ecfd2f11bd37
|
2e41cd739d3d49674604bd3d0a5ba338422593f2
|
refs/heads/master
| 2021-08-01T12:00:16.199392
| 2021-07-31T06:42:10
| 2021-07-31T06:42:10
| 245,637,870
| 4
| 1
| null | 2021-07-31T06:19:40
| 2020-03-07T13:29:03
|
R
|
UTF-8
|
R
| false
| false
| 8,141
|
r
|
localUtils.R
|
#' @importFrom magrittr %>%
#' @export
#' @examples
#' \donttest{
#' # library(RegEnrich)
#' data("Lyme_GSE63085")
#' data("TFs")
#'
#' data = log2(Lyme_GSE63085$FPKM + 1)
#' colData = Lyme_GSE63085$sampleInfo
#' data1 = data[seq(2000), ]
#'
#' design = model.matrix(~0 + patientID + week, data = colData)
#'
#' # Initializing a 'RegenrichSet' object
#' object = RegenrichSet(expr = data1,
#' colData = colData,
#' method = 'limma', minMeanExpr = 0,
#' design = design,
#' contrast = c(rep(0, ncol(design) - 1), 1),
#' networkConstruction = 'COEN',
#' enrichTest = 'FET')
#'
#' # Using %>%
#' object %>% regenrich_diffExpr()
#' }
#'
magrittr::`%>%`
# Obtain paramsIn slot from RegenrichSet object.
getParamsIn = function(object, arg = NULL) {
stopifnot(is(object, "RegenrichSet"))
if (!is.null(arg) && length(arg) != 1 && !is.character(arg)) {
stop("arg can only be either NULL or character.")
}
if (is.null(arg)) {
return(object@paramsIn)
} else {
return(object@paramsIn[[arg]])
}
}
# Check if the names(argsInList) are all listed in paramsIn
# slot from RegenrichSet object.
checkParams = function(object, argsInList, mustInArgs = NULL) {
argsName = names(argsInList)
if (length(argsInList) > 0) {
stopifnot(!is.null(argsName))
if (!is.null(mustInArgs)) {
indx = argsName %in% mustInArgs
if (!all(argsName %in% names(object@paramsIn))) {
stop("Unknown argument(s):\n", argsName[!argsName %in%
names(object@paramsIn)])
}
# arguments not in mustInArgs
if (sum(!indx) > 0) {
warning("Following argument(s) should not be respecified ",
"in the current function:\n", argsName[!indx])
}
# arguments in mustInArgs
if (sum(indx) > 0) {
argsInList = argsInList[indx]
} else {
argsInList = list()
}
}
}
if (length(argsInList) > 0) {
object@paramsIn[names(argsInList)] = argsInList
}
return(object)
}
# sort data frame rows by its column data.
sortDataframe = function(x, by = x, decreasing = FALSE, returnID = FALSE) {
stopifnot(is.data.frame(x))
if (is.character(by)) {
nm = by
} else if (is.data.frame(by)) {
nm = colnames(by)
} else if (is.integer(by)) {
nm = colnames(x)[by]
} else {
stop("Unknown class of 'by'")
}
stopifnot(all(nm %in% colnames(x)))
cmd = paste0("with(x, order(", paste0(nm, collapse = ","),
", decreasing = decreasing))")
id = eval(parse(text = cmd))
y = x[id, ]
if (returnID) {
y = list(res = y, id = id)
}
return(y)
}
# Generate the input matrix and output matrix for network
# inference by random forest @description Standardize the
# inputMatrix and outputMatrix for \code{\link{grNet}}.
# @param expr Gene expression data, either a matrix or a data
# frame. By default (\code{rowSample = FALSE}), each row
# represents a gene, each column represents a sample. @param
# reg vector of charactors, representing gene regulators. By
# default, these are transcription factors and co-factors,
# defined by three literatures/databases, namely RegNet,
# TRRUST, and Marbach2016. @param rowSample logic. If
# \code{TRUE}, each row represents a sample. The default is
# \code{FALSE}. @return A list of \code{inputMatrix}
# (expression of \code{reg}), \code{outputMatrix}
# (expression of all genes) and \code{validRegs} (the
# regulators exsist in \code{expr}). @examples \donttest{
# expr = matrix(rnorm(100*1000), nrow = 1000, ncol = 100,
# dimnames = list(paste0('G', seq(1000)), paste0('Samp',
# seq(100)))) set.seed(1234) TFs = paste0('G',
# sample(seq(1000),
# size = 50, replace = FALSE)) # rowSample = FALSE
# inOutput(expr, reg = TFs, rowSample = FALSE) # rowSample =
# TRUE inOutput(t(expr), reg = TFs, rowSample = TRUE) }
# @export
#' @include globals.R
inOutput = function(expr, reg = TFs$TF_name, rowSample = FALSE,
trace = FALSE) {
if (!rowSample) {
outputMatrix = t(expr)
} else {
outputMatrix = expr
}
exprGenes = colnames(outputMatrix)
exprSamp = rownames(outputMatrix)
# only to use the regulators existing in both expr and reg.
validRegs = reg[reg %in% exprGenes]
if (length(validRegs) == 0) {
stop("No valide regulators can be found. Please ",
"change 'reg' or check gene ID.")
}
if (trace) {
cat(length(validRegs), " regulators will be used. \n")
}
inputMatrix = outputMatrix[, validRegs, drop = FALSE]
# inputMatrix is the gene expression matrix of regulators
# (only reg) outputMatrix is the gene expression matrix of
# all genes (including reg)
return(list(inputMatrix = inputMatrix, outputMatrix = outputMatrix,
validRegs = validRegs))
}
# derived from DESeq2:::renameModelMatrixColumns function
renameModelMatrixColumns = function (data, design){
data = as.data.frame(data)
designVars = all.vars(design)
designVarsClass = vapply(designVars,
function(v) is.factor(data[[v]]), FUN.VALUE = TRUE)
factorVars = designVars[designVarsClass]
colNamesFrom = make.names(do.call(c, lapply(factorVars,
function(v) paste0(v, levels(data[[v]])[-1]))))
colNamesTo = make.names(do.call(c, lapply(factorVars,
function(v) paste0(v, "_", levels(data[[v]])[-1],
"_vs_", levels(data[[v]])[1]))))
data.frame(from = colNamesFrom, to = colNamesTo,
stringsAsFactors = FALSE)
}
# Adjacency matrix to a data.frame of edges.
# @param mat adjacency matrix.
# @param mode Character, to specify the class of graph and which part of
# the matrix will be used. Possible values are: "directed" (default),
# "undirected", "upper", "lower".
# @param diag logic, whether to include the diagonal of the matrix.
# @return a data.frame of edge information. The first column is from node,
# the second column is to node, and the third is weight.
# @examples {
# \donttest{
# mat = matrix(rnorm(4*4), nrow = 4,
# dimnames = list(letters[seq(4)], LETTERS[seq(4)]))
# mat2Edge(mat, mode = "undirected", diag = TRUE)
# mat2Edge(mat, mode = "undirected", diag = FALSE)
# mat2Edge(mat, mode = "directed", diag = TRUE)
# mat2Edge(mat, mode = "upper", diag = TRUE)
# mat2Edge(mat, mode = "upper", diag = FALSE)
# }
# }
mat2Edge = function(mat, mode = c("directed", "undirected", "upper", "lower"),
diag = FALSE, removeEdgesBelowThisWeight = NULL){
mode = match.arg(mode)
rowN = nrow(mat)
colN = ncol(mat)
nameRow = rownames(mat)
if(is.null(nameRow)) nameRow = seq(rowN)
nameCol = colnames(mat)
if(is.null(nameCol)) nameCol = seq(colN)
if (mode == "directed"){
id = !diag(!diag, rowN, colN)
} else if (mode %in% c("undirected", "upper")){
id = upper.tri(mat, diag = diag)
} else if (mode == "lower"){
id = lower.tri(mat, diag = diag)
}
if (!is.null(removeEdgesBelowThisWeight) &&
is.numeric(removeEdgesBelowThisWeight)){
id = id & (mat >= removeEdgesBelowThisWeight)
}
id = which(id, arr.ind = TRUE, useNames = TRUE)
return(data.frame(from = nameRow[id[,1]],
to = nameCol[id[,2]],
weight = mat[id],
stringsAsFactors = FALSE))
}
######## --------------- review --------------- #########
# obtain a regulator-target network list (list names are regulators)
.net = function(TopNetworkObj){
split(TopNetworkObj@elementset$element, TopNetworkObj@elementset$set)
}
# obtain a target-regulator network list (list names are targets)
.tarReg = function(TopNetworkObj){
split(TopNetworkObj@elementset$set, TopNetworkObj@elementset$element)
}
# judge if pFC is empty
isEmptyPFC = function(pFC){
cond = all(abs(pFC$p) < 1e-18) & all(abs(pFC$logFC) < 1e-18)
if(is.na(cond)){
cond = FALSE
}
return(cond)
}
|
2d6cea79c47b96f143f35e1b54d4e490bbedb62a
|
e56da52eb0eaccad038b8027c0a753d9eb2ff19e
|
/man/LabelSplits.Rd
|
3f04f388259d43e118b8651445258d9260d1f892
|
[] |
no_license
|
ms609/TreeTools
|
fb1b656968aba57ab975ba1b88a3ddf465155235
|
3a2dfdef2e01d98bf1b58c8ee057350238a02b06
|
refs/heads/master
| 2023-08-31T10:02:01.031912
| 2023-08-18T12:21:10
| 2023-08-18T12:21:10
| 215,972,277
| 16
| 5
| null | 2023-08-16T16:04:19
| 2019-10-18T08:02:40
|
R
|
UTF-8
|
R
| false
| true
| 2,880
|
rd
|
LabelSplits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Support.R
\name{LabelSplits}
\alias{LabelSplits}
\title{Label splits}
\usage{
LabelSplits(tree, labels = NULL, unit = "", ...)
}
\arguments{
\item{tree}{A tree of class \code{\link[ape:read.tree]{phylo}}.}
\item{labels}{Named vector listing annotations for each split. Names
should correspond to the node associated with each split; see
\code{\link[=as.Splits]{as.Splits()}} for details.
If \code{NULL}, each splits will be labelled with its associated node.}
\item{unit}{Character specifying units of \code{labels}, if desired. Include a
leading space if necessary.}
\item{\dots}{Additional parameters to \code{\link[ape:nodelabels]{ape::edgelabels()}}.}
}
\value{
\code{LabelSplits()} returns \code{invisible()}, after plotting \code{labels} on
each relevant edge of a plot (which should already have been produced using
\code{plot(tree)}).
}
\description{
Labels the edges associated with each split on a plotted tree.
}
\details{
As the two root edges of a rooted tree denote the same split, only the
rightmost (plotted at the bottom, by default) edge will be labelled.
If the position of the root is significant, add a tip at the root using
\code{\link[=AddTip]{AddTip()}}.
}
\examples{
tree <- BalancedTree(LETTERS[1:5])
splits <- as.Splits(tree)
plot(tree)
LabelSplits(tree, as.character(splits), frame = "none", pos = 3L)
LabelSplits(tree, TipsInSplits(splits), unit = " tips", frame = "none",
pos = 1L)
# An example forest of 100 trees, some identical
forest <- as.phylo(c(1, rep(10, 79), rep(100, 15), rep(1000, 5)), nTip = 9)
# Generate an 80\% consensus tree
cons <- ape::consensus(forest, p = 0.8)
plot(cons)
# Calculate split frequencies
splitFreqs <- SplitFrequency(cons, forest)
# Optionally, colour edges by corresponding frequency.
# Note that not all edges are associated with a unique split
# (and two root edges may be associated with one split - not handled here)
edgeSupport <- rep(1, nrow(cons$edge)) # Initialize trivial splits to 1
childNode <- cons$edge[, 2]
edgeSupport[match(names(splitFreqs), childNode)] <- splitFreqs / 100
plot(cons, edge.col = SupportColour(edgeSupport), edge.width = 3)
# Annotate nodes by frequency
LabelSplits(cons, splitFreqs, unit = "\%",
col = SupportColor(splitFreqs / 100),
frame = "none", pos = 3L)
}
\seealso{
Calculate split support: \code{\link[=SplitFrequency]{SplitFrequency()}}
Colour labels according to value: \code{\link[=SupportColour]{SupportColour()}}
Other Splits operations:
\code{\link{NSplits}()},
\code{\link{NTip}()},
\code{\link{PolarizeSplits}()},
\code{\link{SplitFrequency}()},
\code{\link{SplitsInBinaryTree}()},
\code{\link{Splits}},
\code{\link{TipLabels}()},
\code{\link{TipsInSplits}()},
\code{\link{match.Splits}},
\code{\link{xor}()}
}
\concept{Splits operations}
|
783d47e448ba1a9bc22bc4839f2c5bc95b66db4d
|
97e50001a42a6fdebaf083700bb167c08eef6676
|
/plot1.R
|
efe12b76c2cd9a4d54b3ceb5a6e4eed44740819f
|
[] |
no_license
|
PJGreen/ExData_Plotting1
|
e0460c60bfaa31c2a38b0c4f2be785d58d768cc6
|
9dba4b3c8d44db7f41d559360eeaa4916416b4b0
|
refs/heads/master
| 2021-05-14T09:04:14.587832
| 2018-01-06T22:51:53
| 2018-01-06T22:51:53
| 116,318,412
| 0
| 0
| null | 2018-01-04T23:39:26
| 2018-01-04T23:39:25
| null |
UTF-8
|
R
| false
| false
| 171
|
r
|
plot1.R
|
hist(dt_sub$Global_active_power, col="red", main="Global Active Power", ylab="Frequency", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="Plot1.png")
dev.off()
|
1cfec71d6c5184c2e626739c2a728956d240d60d
|
e082728a5557b4584812addfac6c91c266f29994
|
/spls/discriminant_analysis.R
|
115e5ff62004d6d243cd23cbf090041b48b4e85f
|
[] |
no_license
|
eprdz/pipelines_git
|
c5eb34df6add8a3f7f97e1868dbed4d712799630
|
bd000a3f4d07e3025cbf58dfa832c18c1bf1c97d
|
refs/heads/main
| 2023-08-10T18:48:07.545777
| 2021-10-07T13:29:09
| 2021-10-07T13:29:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,670
|
r
|
discriminant_analysis.R
|
#!/usr/bin/env Rscript
#######################################
#### Discriminant analysis ####
#######################################
pacman::p_load(mixOmics, ggplot2, gplots, DiscriMiner, clusterProfiler)
parameters <- commandArgs(trailingOnly=TRUE)
counts.file <- as.character(parameters[1]) # Omic dataset
meta.file <- as.character(parameters[2]) # Metadata file
myfactors <- as.character(parameters[3]) # Factor of metadata to study (Groups)
mylevels <- as.character(parameters[4]) # Levels of the factor to study (Decrease, increase)
descr.column <- as.character(parameters[5]) # What is going to be analysed - Genera, metabolites or IDs from KEGG
ncomp <- as.numeric(as.character(parameters[6])) # Number of components to use (2)
omic <- as.character(parameters[7]) # 16S rRNA, metabolomics, metagenomics or proteomics
model <- as.character(parameters[8]) # dynamic model or predictive model
##########
## Data selection and arrange
dat <- read.table(file=counts.file, sep="\t", header=TRUE, quote="", stringsAsFactors=FALSE, check.names=FALSE)
DESCRIPTION <- dat[, descr.column]; names(DESCRIPTION) <- rownames(dat) <- dat[, 1]
meta <- read.table(file=meta.file, sep="\t", header=TRUE, quote="", stringsAsFactors=FALSE, check.names=FALSE)
rownames(meta) <- meta[, 1]
mysamples <- intersect(rownames(meta), colnames(dat))
dat <- as.matrix(dat[, mysamples])
meta <- meta[mysamples, ]
myfactors.l <- strsplit(myfactors, split=",")
myfactor1 <- myfactors.l[[1]][1]
if(length(myfactors.l[[1]])==2) myfactor2 <- myfactors.l[[1]][2] else myfactor2 <- ""
if(myfactor2!="") GROUPS <- paste(meta[, myfactor1], meta[, myfactor2], sep=".") else GROUPS <- as.character(meta[, myfactor1])
names(GROUPS) <- mysamples
mylevels <- unlist(strsplit(mylevels, split=","))
mylevels <- mylevels[which(is.element(mylevels, unique(GROUPS)))]
ind <- which(is.element(GROUPS, mylevels))
dat <- dat[, ind]
GROUPS <- GROUPS[ind]
meta <- meta[ind, ]
Y <- as.factor(GROUPS)
X <- t(dat)
##########
## sPLS-DA
# Grid of possible numbers of variables that will be tested for each component
if (omic == "metagenomics" | omic == "proteomics") {
list.keepX <- c(seq(10, 1010, 50))
} else {
list.keepX <- c(seq(10, ncol(X), 5))
}
# Testing the error of different number of variables
tune.splsda <- tune.splsda(X, Y, ncomp=ncomp, validation='Mfold', folds=5,
progressBar=TRUE,
test.keepX=list.keepX, nrepeat=50)
# The optimal number of features to select (per component):
#tune.splsda$choice.keepX
# The optimal number of components
#tune.splsda$choice.ncomp$ncomp
# We include these parameters in our final sPLS-DA model:
choice.ncomp <- ncomp
choice.keepX <- tune.splsda$choice.keepX[1:choice.ncomp]
# Applying sPLS-DA
splsda.res <- mixOmics::splsda(X, Y, ncomp=choice.ncomp, keepX=choice.keepX)
save(splsda.res, file="splsda.res.RDa")
# Assessing performance of sPLS-DA
perf.splsda <- perf(splsda.res, validation="Mfold", folds=5, progressBar=TRUE, auc=TRUE, nrepeat=50)
pdf(file="splsda.ncomp.pdf")
plot(perf.splsda, col=color.mixo(1:3), sd=TRUE, legend.position="horizontal")
dev.off()
# Final selection of features can be output, along with their weight coefficient
#(most important based on their aboslute value) and their frequency in models:
variables1 = c()
variables2 = c()
for(ncomp in 1:choice.ncomp){
ind.match = match(selectVar(splsda.res, comp = ncomp)$name,
names(perf.splsda$features$stable[[ncomp]]))
Freq = as.numeric(perf.splsda$features$stable[[ncomp]][ind.match])
vars.comp = data.frame(selectVar(splsda.res, comp = ncomp)$value, Freq)
vars.comp = cbind(ID = rownames(vars.comp), vars.comp)
if (ncomp == 1) variables1 = as.character(vars.comp[vars.comp$Freq >= 0, 1])
if (ncomp == 2) variables2 = as.character(vars.comp[vars.comp$Freq >= 0, 1])
write.table(vars.comp, file=paste("vars.comp", ncomp, "tsv", sep="."), sep="\t", row.names=FALSE, quote=FALSE)
}
# Sample Plots (PCA and sPLS-DA)
levels(splsda.res$Y) = c("MRE decrease", "MRE increase")
pca = pca(X, center = F, scale = F)
png(paste0("pca_",omic,model,".png"))
plotIndiv(pca, group = splsda.res$Y, ind.names = F, pch = c(16,16),
title = paste('PCA -', omic, "-", model), star = T, legend = T, legend.position = "bottom")
dev.off()
back = background.predict(splsda.res, comp.predicted = 2) # To color the background according to the group belonging
png(paste0("splsda_",omic,model,".png"))
plotIndiv(splsda.res, comp=c(1,2), rep.space='X-variate', pch = c(16,16),group=splsda.res$Y, ind.names=F, legend=TRUE,
col.per.group = c("red2", "forestgreen"), title=paste0('sPLS-DA - ', omic, " - ", model), star = T,
X.label = paste0("Comp 1: ", round(splsda.res$explained_variance$Y[1], digits = 3)*100,"% Expl. variance"),
Y.label = paste0("Comp 2: ", round(splsda.res$explained_variance$Y[2], digits = 3)*100,"% Expl. variance"),
background = back, legend.position = "bottom")
dev.off()
###########
## Assessment of a PLS-DA model with selected variables by sPLS-DA using DiscriMiner
if (omic != "proteomics" & omic != "metagenomics" & omic != "16S rRNA analysis") {
variables = c(variables1, variables2)} else variables = variables1
X = X[,colnames(X) %in% unique(variables)]
storage.mode(X) = "numeric"
discriminer_pls = DiscriMiner::plsDA(variables = X, group = Y, autosel = F, comps = 2, cv = "LOO")
# VIP values
vips = as.data.frame(discriminer_pls$VIP)
vips = cbind(feature = rownames(vips), vips)
vips = vips[order(vips$`Component 1`, decreasing = T),]
write.table(vips, file=paste(omic,"VIPs", model, "tsv", sep="."), sep="\t", quote = F, row.names = F)
# Quality metrics of the model (R2, Q2, error rate)
output = as.data.frame(cbind(discriminer_pls$R2, discriminer_pls$Q2, error_rate = rep(discriminer_pls$error_rate,2)))
output = cbind(comps = rownames(output), output)
write.table(output, file=paste(omic,"r2q2error", model, "tsv", sep="."), sep="\t", quote = F, row.names = F)
vips = discriminer_pls$VIP
X = X[,colnames(X) %in% names(which(vips[,1]>1))]
write.table(X, file=paste("subset.by.vips", omic, model, "tsv", sep="."), sep="\t", quote = F, row.names = F)
# PCA of the data set with only the selected variables (PERMANOVA done separatedly)
X = t(X)
pca = pca(X, center = F, scale = F)
png(paste0("pca_",omic,".png"))
plotIndiv(pca, group = splsda.res$Y, ind.names = F, pch = c(16,16), title = paste('PCA -', omic, "-", model),
star = T, legend = T, legend.position = "bottom")
dev.off()
##########
## Significant variables
# t-test and p-value adjustment
pvalues = c()
for (i in 1:ncol(X)) {
p = t.test(X[rownames(X) %in% meta$pares[which(meta$grupos=="increase")], i],
X[rownames(X) %in% meta$pares[which(meta$grupos=="decrease")], i])
pvalues = c(pvalues, p$p.value)
names(pvalues)[i] = colnames(X)[i]}
padj = p.adjust(pvalues, method = "BH")
pvalues = data.frame(metabolite = names(pvalues), p.value=pvalues, p.value.adj=padj)
pvalues = pvalues[order(pvalues$p.value.adj),]
write.table(pvalues, file=paste(omic,"pvalues", model, "tsv", sep="."), sep="\t", quote = F, row.names = F)
# Boxplots for significant metabolites
if (omic == "metabolomics" | omic == "16S rRNA analysis") {
select_for_boxplot = as.character(pvalues$metabolite[which(pvalues$p.value.adj<=0.1 & pvalues$p.value<=0.05)])
for (i in select_for_boxplot){
metab = i
q = round(pvalues[pvalues$metabolite==metab, "p.value.adj"], digits = 4)
p = round(pvalues[pvalues$metabolite==metab, "p.value"], digits = 4)
metabolito = X[,colnames(X) == metab]
if (model == "predictive") metabolito = metabolito + log2(1000000) ## Transform to counts per million
metabolito=as.data.frame(cbind(valores = metabolito, grupos = meta[names(metabolito), "grupos"]))
metabolito$valores = as.numeric(as.vector(metabolito$valores))
metab2 = gsub(pattern = "\\.[0-9]", replacement = "", x=metab)
metab2 = paste0(gsub(pattern = "\\.", replacement = " ", x=metab2))
metab2 = stringr::str_to_sentence(metab2)
if (metab2 == "Phanylalanine") metab2="Phenylalanine"
if (metab2 == "Ile") metab2 = "Isoleucine"
if (metab2 == "Val") metab2 = "Valine"
if (metab2 == "5-aminovaleric acid") metab2 = "5-aminovalerate"
pl <- ggplot(metabolito, aes(x=factor(grupos), y=valores))
p2 = pl +
geom_boxplot(aes(fill=factor(grupos))) +
theme_bw() +
labs(fill="Grupos") +
stat_summary(fun=mean, geom="point", shape=20, size=1, color="red", fill="red") +
xlab("Groups") +
ylab("Values per sample") +
geom_point( aes(x=factor(grupos),y=valores), alpha=0.5 ) +
ggtitle(paste0(metab2, " (p = ", p, ", q = ",q,")")) +
theme(plot.title = element_text(size = 25, face = "bold"),
axis.text.y = element_text(size=20),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
axis.text.x = element_blank(),
legend.position = "none")+
scale_fill_manual(values = c("MRE decrease" = "red2",
"MRE increase" = "forestgreen"))
png(file=paste0("outliers.",metab,".png"))
print(p2)
dev.off()
}
}
# Heatmaps and enrichment for metagenomics and proteomics
if (omic == "metagenomics" | omic = "proteomics"){
# HEATMAPS
otus_matrix = t(X[,colnames(X) %in% as.character(pvalues[pvalues$p.value<=0.05 & pvalues$p.value.adj<=0.1,1])])
colnames(otus_matrix) = make.names(meta[meta$pares %in% colnames(otus_matrix), "grupos"], unique = T)
otus_matrix = otus_matrix[,order(colnames(otus_matrix))]
M=otus_matrix
M=as.matrix(M)
storage.mode(M) = "numeric"
data.temp <- M
for (i in 1:nrow(data.temp)){
minimo = min(data.temp[i,which(data.temp[i,] != 0)])
data.temp[i,] = data.temp[i,] + abs(minimo)
data.temp[i,] = data.temp[i,] / sum(data.temp[i,])
}
data.temp <- t(scale(t(data.temp)))
hc1 <- hclust(dist(t(data.temp[,grepl(pattern = "^d.+", x=colnames(data.temp))])), method = "complete")
hc2 <- hclust(dist(t(data.temp[,grepl(pattern = "^i.+", x=colnames(data.temp))])), method = "complete")
hc = merge(as.dendrogram(hc1),rev(as.dendrogram(hc2)))
hr <- hclust(dist(data.temp), method = "complete")
lmat = rbind(c(0,3),c(2,1),c(0,4))
lwid = c(1.5,4)
lhei = c(2,4,1)
colors = ifelse(grepl("decrease.*", colnames(data.temp)), "red2", "forestgreen")
myBreaks <- seq(-2, 2, length.out=11)
pdf(paste0(omica, "-", filtrado, log,"-heatmap.version_final",comp,".pdf"))
heatmap.2(data.temp,breaks = myBreaks,col=colorRampPalette(c("red","yellow","darkgreen"))(10), Colv=as.dendrogram(hc),
Rowv=as.dendrogram(hr),dendrogram="both",
trace="none", key=F, keysize = 1.5,lhei = c(2,4), lwid = c(0.5,1),
ylab = "KO", xlab="Samples", ColSideColors = colors, symkey=FALSE, density.info="density",
labRow = F, labCol = F, main = paste0("Heatmap - ", omic, " - ", model))
legend(locator(), legend = c("MRE decrease", "MRE increase"), col= c("red2", "forestgreen"), lty= 1.5,
lwd = 2, cex=1, xpd=TRUE)
dev.off()
#ENRICHMENT
kegg_path = read.delim("~/tfm/transcriptomics/processed/ko_andpathways_desglosed.tsv", sep="\t", stringsAsFactors = F,
header = T)
kegg_path = as.data.frame(cbind(desc = kegg_path$path_desc, ko = kegg_path$ko))
colnames(otus_matrix) = make.names(meta[meta$pares %in% colnames(otus_matrix), "grupos"], unique = T)
df_pos = otus_matrix[,grepl("increase.*", colnames(otus_matrix))]
df_neg = otus_matrix[,grepl("decrease.*", colnames(otus_matrix))]
result = data.frame(KEGG = rep(NA, nrow(otus_matrix)), BOOL = rep(NA,nrow(otus_matrix)))
# Select variables whether they are increased in MRE increase group or not
for (i in 1:nrow(otus_matrix)){
result[i,1] = as.character(rownames(otus_matrix)[i])
result[i,2] = mean(as.numeric(df_pos[i,])) > mean(as.numeric(df_neg[i,]))
}
info_kegg = result[result$BOOL == "TRUE",1]
ewp <- enricher(info_kegg, TERM2GENE = kegg_path, pvalueCutoff = 0.1)
barplot(ewp) + ylab("Number of KOs per KEGG pathway") + ggtitle(paste0("Enrichment analysis - ", omic, " - ", model)) +
theme(plot.title.position = "plot",
plot.title = element_text(size=20))
info_kegg = result[result$BOOL == "FALSE",1]
ewp <- enricher(info_kegg, TERM2GENE = kegg_path, pvalueCutoff = 0.1)
barplot(ewp) + ylab("Number of KOs per KEGG pathway") +
ggtitle(ggtitle(paste0("Enrichment analysis - ", omic, " - ", model))) + theme(plot.title.position = "plot",
plot.title = element_text(size=20))
}
|
db0e8bcf251306da04efbb9771483d923569ec85
|
567f2d42ca081c76732ecde1becfb2df212ceec4
|
/man/elexonURL.Rd
|
56a34e15701291732c0258a1b126bb78524b2fda
|
[] |
no_license
|
p-hunter/Relexon
|
1d2e90cb36802f58f8385b21a3221e9c70f789e9
|
1dd8b3008095a4c4d3a7b22aba629a1da829422d
|
refs/heads/master
| 2023-08-24T21:23:43.764762
| 2021-10-23T23:37:29
| 2021-10-23T23:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,142
|
rd
|
elexonURL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elexonURL.R
\name{elexonURL}
\alias{elexonURL}
\title{elexonURL}
\usage{
elexonURL(dataset = "ROLSYSDEM", key, from = Sys.Date() - 2,
to = Sys.Date() - 1, test = FALSE)
}
\arguments{
\item{dataset}{The dataset you are pulling from BMRS/Elexon.}
\item{key}{Your personal scripting key from elexon. Find out more at https://www.elexonportal.co.uk}
\item{from}{This is the start date/datetime of the dataset}
\item{to}{This is the end date/datetime of the dataset}
\item{test}{This is set to FALSE by default. Set this argument to TRUE if you want to use the test version of the API.}
}
\description{
This function gives either a single URL or many URLs that can be used to download csv files manually.
Please note: it does not matter if BMRS requires the dates to be in a different format to "yyyy-mm-dd".
The Relexon package will take care of this. Just enter the dates in the usual format!
}
\examples{
\dontrun{
elexonURL(
"HHFUEL",
key = "948ghmgpe",
from = "2018-01-01",
to = "2018-01-05",
test = TRUE
)
}
}
|
8d05cde28e41a5c880003c7708426c8c3326090b
|
d28508911e5a2f5c3d8d849d7d2a97c687dbffd9
|
/Chapter03/neural_network_with_neuralnet.R
|
7c6c9940546d7042546c4f5bebd70d798e9b467c
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-on-Deep-Learning-with-R
|
10032fb0aceed0b315cf7bb399f53e07885df8f7
|
6e3766377395d4e2a853f787d1f595e4d8d28fa5
|
refs/heads/master
| 2023-02-11T11:05:47.140350
| 2023-01-30T09:37:44
| 2023-01-30T09:37:44
| 124,351,189
| 21
| 15
|
MIT
| 2020-04-09T06:29:03
| 2018-03-08T07:03:57
|
R
|
UTF-8
|
R
| false
| false
| 2,761
|
r
|
neural_network_with_neuralnet.R
|
# load libraries
library(tidyverse)
library(caret)
library(Metrics)
# load data
wbdc <- readr::read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data", col_names = FALSE)
# convert the target variable to 1 and 0 and relabel
wbdc <- wbdc %>%
dplyr::mutate(target = dplyr::if_else(X2 == "M", 1, 0)) %>%
dplyr::select(-X2)
# scale and standarize all independent variables
wbdc <- wbdc %>% dplyr::mutate_at(vars(-X1, -target), funs((. - min(.))/(max(.) - min(.)) ))
# create a training and test data set by performing an 80/20 split
train <- wbdc %>% dplyr::sample_frac(.8)
test <- dplyr::anti_join(wbdc, train, by = 'X1')
# remove the ID column
test <- test %>% dplyr::select(-X1)
train <- train %>% dplyr::select(-X1)
# extract the target variables into a separate vector and remove from the test data
actual <- test$target
test <- test %>% dplyr::select(-target)
# prepare the data argument for the neuralnet function by getting it into the syntax required
n <- names(train)
formula <- as.formula(paste("target ~", paste(n[!n == "target"], collapse = " + ", sep = "")))
# train a neural net on the data
net <- neuralnet::neuralnet(formula,
data = train,
hidden = c(15,15),
linear.output = FALSE,
act.fct = "logistic"
)
# make prediction using the model
prediction_list <- neuralnet::compute(net, test)
# convert the predictions to binary values for evaluation
predictions <- as.vector(prediction_list$net.result)
binary_predictions <- dplyr::if_else(predictions > 0.5, 1, 0)
# calculate the percentage of correct predictions
sum(binary_predictions == actual)/length(actual)
# evaluate the results using a confusion matrix
results_table <- table(binary_predictions, actual)
caret::confusionMatrix(results_table)
# evaluate the resulyts using the AUC score
Metrics::auc(actual, predictions)
# add a backpropagation step
bp_net <- neuralnet::neuralnet(formula,
data = train,
hidden = c(15,15),
linear.output = FALSE,
act.fct = "logistic",
algorithm = "backprop",
learningrate = 0.00001,
threshold = 0.3,
stepmax = 1e6
)
# check accuracy again
prediction_list <- neuralnet::compute(bp_net, test)
predictions <- as.vector(prediction_list$net.result)
binary_predictions <- dplyr::if_else(predictions > 0.5, 1, 0)
results_table <- table(binary_predictions, actual)
Metrics::auc(actual, predictions)
caret::confusionMatrix(results_table)
|
599f11c550f4e8f32e581be41db74541c5325215
|
510bc25ad2b6e67e4a3c13043cacd4424b75552e
|
/R/print_demand.r
|
8a632d30205b9e69085c4bb55546f8b38b6a36a6
|
[] |
no_license
|
orangeluc/energyRt
|
ff7423a2010d8edc3915034c396f079662ea4315
|
c72d1a528a95ef8fada215e0abef45d523383758
|
refs/heads/master
| 2020-04-24T06:04:06.819280
| 2019-02-20T13:26:26
| 2019-02-20T13:26:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
print_demand.r
|
#---------------------------------------------------------------------------------------------------------
#! print.demand < -function(x) : print demand
#---------------------------------------------------------------------------------------------------------
print.demand <- function(x) {
# print demand
if_print_data_frame <- function(x, sl) {
if(nrow(slot(x,sl)) != 0) {
cat('\n', sl, '\n')
print(slot(x, sl))
cat('\n')
}
}
cat('Name: ', x@name, '\n')
if (x@description != '') cat('description: ', x@description, '\n')
cat('Commodity: ', x@commodity, '\n')
g <- getClass("demand")
zz <- names(g@slots)[sapply(names(g@slots), function(z) g@slots[[z]] ==
"data.frame")]
for(i in zz) if_print_data_frame(x, i)
}
|
5b3d988c4eefa02929a634acdba5a36bd42ced7b
|
a91f8efbfc949026cc36e85760336cfaeb37477f
|
/8 XtremeGradientBoost.r
|
68bb1b3d49d20233eab9bc91828848d0b783f28a
|
[] |
no_license
|
azankhanyari/SurveyLevel_Disease_detection_ML
|
2757b840be20e2a76595468a00e1807ef61e898c
|
e383d58df36d3003d13d65c4ab60f685e88f8295
|
refs/heads/master
| 2022-03-31T07:13:05.448347
| 2019-12-22T19:07:30
| 2019-12-22T19:07:30
| 229,615,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,682
|
r
|
8 XtremeGradientBoost.r
|
library(xgboost)
data_xbg <- model_train_tomek
data_xbg$Status <- as.numeric(data_xbg$Status)
table(data_xbg$Status)
test_x <- test
test_x$Status <- as.numeric(test_x$Status)
str(data_xbg$Status)
table(data_xbg$Status)
data_xbg$Status <- ifelse(data_xbg$Status == 2,0,data_xbg$Status)
train_xgb <- data_xbg[,-23]
test_xgb <- test_x[,-23]
train_xgb_2 <- as.matrix(train_xgb)
test_xgb_2 <- as.matrix(test_xgb)
dtrain <- xgb.DMatrix(data = train_xgb_2, label = data_xbg$Status)
dtest <- xgb.DMatrix(data = test_xgb_2, label= test_x$Status)
# train_v2$is_open <- as.numeric(train_v2$is_open)
# test_v2$is_open <- as.numeric(test_v2$is_open)
####################### train a model using our training data ###############
set.seed(786)
model_xgboost <- xgboost(booster = 'gbtree', data = dtrain, # the data
nrounds=1000, # max number of boosting iterations
objective = 'multi:softmax', num_class = 2,max_depth=5,eta = 0.08,silent =1,nthread =12,
eval_metric ="merror", min_child_weight =1, subsample = 0.5,colsample_bytree = 0.7) # the objective function
params <- list(booster = "gbtree", objective = "binary:logistic",num_class = 2,eval_metric ="auc", eta=0.12,silent =1, gamma=0, max_depth=6, min_child_weight=1, subsample=0.5, colsample_bytree=0.7)
params <- list(booster = "gbtree", objective = "binary:logistic",eval_metric ="error", eta=0.12,silent =1, gamma=0, max_depth=20, min_child_weight=1, subsample=0.5, colsample_bytree=0.7)
xgb_cv <- xgb.cv( params = params, data = dtrain, nrounds = 500, nfold = 5, showsd = T, stratified = T, print.every.n = 10, early.stop.round = 20, maximize = F)
##best iteration = 490
# xgb_final <- xgboost(data = dtrain,objective = "multi:softmax",num_class = 2,eval_metric ="merror", eta=0.12,silent =1, gamma=0, max_depth=6, min_child_weight=1, subsample=0.5, colsample_bytree=0.7, nrounds = 490, print_every_n = 10, eval_metric = "merror")
max_auc_idx <- which.max(xgb_cv$evaluation_log[,test_error_mean])
#model prediction 1
pred_xgb <- predict(model_xgboost, dtest)
#chnge to factor for confusion matrix
# pred_real_fact_cv <- factor(test_x$Status, levels = c(0,1), labels = c(0,1) )
# pred_factor_cv <- factor(pred_xgb, levels = c(0,1), labels = c(0,1))
#recode to factor pred object
library(car)
prediction <- as.factor(as.numeric(pred_xgb > 0.5))
prediction <- recode(prediction,"0 = 'Diabetic';1 = 'Healthy'")
str(pred_xgb)
pred_xgb <- as.factor(pred_xgb)
levels(pred_xgb)
levels(pred_xgb ) <- c('Healthy','Diabetic')
# pred_xgb<- relevel(pred_xgb, 'Diabetic')
caret::confusionMatrix(pred_xgb,test_x$Status)
str(test_x$Status)
test_x$Status <- as.factor(test_x$Status)
table(test_x$Status)
levels(test_x$Status) <- c('Diabetic','Healthy')
caret::confusionMatrix(pred_xgb,test_x$Status)
# Confusion Matrix and Statistics
#
# Reference
# Prediction Diabetic Healthy
# Diabetic 100 114
# Healthy 57 278
#
# Accuracy : 0.6885
# 95% CI : (0.6479, 0.7271)
# No Information Rate : 0.714
# P-Value [Acc > NIR] : 0.9137
#
# Kappa : 0.3122
#
# Mcnemar's Test P-Value : 1.849e-05
#
# Sensitivity : 0.6369
# Specificity : 0.7092
# Pos Pred Value : 0.4673
# Neg Pred Value : 0.8299
# Prevalence : 0.2860
# Detection Rate : 0.1821
# Detection Prevalence : 0.3898
# Balanced Accuracy : 0.6731
#
# 'Positive' Class : Diabetic
|
9a75de7631b4d160f0655ebf4f957ec1df782103
|
eb9b5a5b759b10bfbf8421f3a67a025a9ff7c069
|
/results_in_paper/10_PWAS_cor_coloc.R
|
1875e69244a5ffb060308688579be8d66e350871
|
[] |
no_license
|
Jingning-Zhang/PlasmaProtein
|
fc42790f4eaea03e5b0285dbcc5ca7bc929ecc3e
|
1a3fd772782bf2b599f8c81054e4bf899ca41bd1
|
refs/heads/main
| 2023-04-15T22:55:54.912402
| 2022-11-29T06:42:31
| 2022-11-29T06:42:31
| 465,238,572
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,608
|
r
|
10_PWAS_cor_coloc.R
|
library(readxl)
library(dplyr)
library(readr)
urateid <- c("SeqId_13676_46","SeqId_7955_195","SeqId_17692_2","SeqId_19622_7","SeqId_6897_38","SeqId_8307_47","SeqId_15686_49","SeqId_17765_3","SeqId_8900_28","SeqId_8403_18")
urategene <- c("INHBB","ITIH1","BTN3A3","INHBA","B3GAT3","C11orf68","INHBC","SNUPN","NEO1","FASN")
dat1 <- read_tsv("/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*RData/PWAS/conditional_analysis/Urate_all-cleaned_samegene.out")
dat2 <- read_tsv("/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*RData/PWAS/conditional_analysis/Urate_all-cleaned_samegene_v8.out")
corr <- full_join(dat1[,c(2:6,1,13, 7:11, 14:15)], dat2[,c(2,1, 13, 7:11, 14:15)], by=c("PWAS_hit","tissue"))
coloc <- read_excel("/Users/jnz/Dropbox/PWAS_manuscript/NatureGenetics/2021_06_revision2/Suppl_tables_9Aug2021_DD_JZ.xlsx",
sheet = "ST8.2-- coloc(PP.H4) with eQTLs", skip=2)
tmp <- coloc[match(urateid,coloc$`SOMAmer ID`),]
rescoloc <- numeric(); resid <- character(); restissue <- character()
ii=0
for (i in 1:length(urateid)) {
for (j in 3:ncol(tmp)) {
ii <- ii+1
resid[ii] <- as.character(tmp[i,2])
restissue[ii] <- colnames(tmp)[j]
rescoloc[ii] <- as.numeric(tmp[i,j])
}
}
coloc <- data.frame(gene=resid, tissue=restissue,pph4=rescoloc, stringsAsFactors = F)
corr$tissue1 <- paste0(corr$PWAS_hit,"-",unlist(lapply(strsplit(corr$tissue, "_|-"), FUN = function(x){paste(x, collapse = "")})))
coloc$tissue1 <- paste0(coloc$gene,"-",unlist(lapply(strsplit(colnames(tmp)[-1:-2], "-|\\(|\\)| "), FUN = function(x){paste(x, collapse = "")})))
res <- left_join(corr, coloc[,2:4], by="tissue1")
res$tissue.x <- res$tissue.y
res <- res[,-which(colnames(res) %in% c("tissue.y","tissue1","TWAS_hit.x","TWAS_hit.y"))]
res <- res[( (!is.na(res$Corr_of_hits.x)) | (!is.na(res$Corr_of_hits.y)) ) & !is.na(res$tissue.x),]
res <- res[order(res$tissue.x),]
res <- full_join(data.frame(name=urategene), res,by=c("name"="PWAS_hit"))
df <- res[!(is.na(res$TWAS_p.x)), ]
# df <- tibble()
# for (i in 1:length(urategene)) {
# tmp <- res[res$name == urategene[i],]
# tmp <- tmp[!is.na(tmp$TWAS_p.x),]
# tmp <- tmp[which.min(as.numeric(gsub("[*]","",tmp$TWAS_p.x))),]
# df <- rbind(df, tmp)
# }
# df <- df[,c(1:6,8:11,7,12:13,15:18,14,19:21)]
write_tsv(df, "/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*Tables/8_PWAS_cor_coloc_urate.txt")
library(readxl)
library(dplyr)
library(readr)
goutid <- c("SeqId_5353_89","SeqId_17692_2","SeqId_15686_49")
goutgene <- c("IL1RN","BTN3A3","INHBC")
dat1 <- read_tsv("/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*RData/PWAS/conditional_analysis/Gout_all-cleaned_samegene.out")
dat2 <- read_tsv("/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*RData/PWAS/conditional_analysis/Gout_all-cleaned_samegene_v8.out")
corr <- full_join(dat1[,c(2:6,1,13, 7:11, 14:15)], dat2[,c(2,1, 13, 7:11, 14:15)], by=c("PWAS_hit","tissue"))
coloc <- read_excel("/Users/jnz/Dropbox/PWAS_manuscript/NatureGenetics/2021_06_revision2/Suppl_tables_9Aug2021_DD_JZ.xlsx",
sheet = "ST8.2-- coloc(PP.H4) with eQTLs", skip=2)
tmp <- coloc[match(goutid,coloc$`SOMAmer ID`),]
rescoloc <- numeric(); resid <- character(); restissue <- character()
ii=0
for (i in 1:length(goutid)) {
for (j in 3:ncol(tmp)) {
ii <- ii+1
resid[ii] <- as.character(tmp[i,2])
restissue[ii] <- colnames(tmp)[j]
rescoloc[ii] <- as.numeric(tmp[i,j])
}
}
coloc <- data.frame(gene=resid, tissue=restissue,pph4=rescoloc, stringsAsFactors = F)
corr$tissue1 <- paste0(corr$PWAS_hit,"-",unlist(lapply(strsplit(corr$tissue, "_|-"), FUN = function(x){paste(x, collapse = "")})))
coloc$tissue1 <- paste0(coloc$gene,"-",unlist(lapply(strsplit(colnames(tmp)[-1:-2], "-|\\(|\\)| "), FUN = function(x){paste(x, collapse = "")})))
res <- left_join(corr, coloc[,2:4], by="tissue1")
res$tissue.x <- res$tissue.y
res <- res[,-which(colnames(res) %in% c("tissue.y","tissue1","TWAS_hit.x","TWAS_hit.y"))]
res <- res[( (!is.na(res$Corr_of_hits.x)) | (!is.na(res$Corr_of_hits.y)) ) & !is.na(res$tissue.x),]
res <- res[order(res$tissue.x),]
res <- full_join(data.frame(name=goutgene), res,by=c("name"="PWAS_hit"))
df <- res[!(is.na(res$TWAS_p.x)), ]
# df <- tibble()
# for (i in 1:length(urategene)) {
# tmp <- res[res$name == urategene[i],]
# tmp <- tmp[!is.na(tmp$TWAS_p.x),]
# tmp <- tmp[which.min(as.numeric(gsub("[*]","",tmp$TWAS_p.x))),]
# df <- rbind(df, tmp)
# }
# df <- df[,c(1:6,8:11,7,12:13,15:18,14,19:21)]
write_tsv(df, "/Users/jnz/Document/JHU/Research/PWAS/Analysis/500Kb/*Tables/8_PWAS_cor_coloc_gout.txt")
|
6408aef46d1c63883ac03bd365489b353440f764
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/echarts4r/examples/formatters.Rd.R
|
210f85851499f0ed0d6159d0351752955551b683
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
formatters.Rd.R
|
library(echarts4r)
### Name: e_format_axis
### Title: Formatters
### Aliases: e_format_axis e_format_x_axis e_format_y_axis
### ** Examples
# Y = %
df <- data.frame(
x = 1:10,
y = round(
runif(10, 1, 100), 2
)
)
df %>%
e_charts(x) %>%
e_line(y) %>%
e_format_y_axis(suffix = "%") %>%
e_format_x_axis(prefix = "A")
|
817291ba5baf8aa0dfe59eb560cef2f943884c22
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/7602/dprint/tbl.struct.R
|
4abfbecaf645ea5f90838af31b513ba24450b360
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,742
|
r
|
tbl.struct.R
|
#' Table Structure
#'
#' Generalization of table structure
#'
#' @param fmla Formula interface to define table structure
#' @param data data.frame
#' @param label name of column containing row labels
#' @param group name of column containing hieriarchy labels for the row names
#' @param regx regular expression to be removed from original column names
#' @param main Table title
#' @param footnote footnote
#' @param row.hl row highlight object see row.hl function
#' @export
tbl.struct <-
function(fmla=NULL, # Formula interface to define table structure
data, # Input Data.frame
label = NULL, # label & group are characters identifying columns that define the simple table structure
group = NULL,
regx=NA, # Regular Expression to take off of colnames, designed to break unwanted tiebreakers for legal data.frame columnnames
main=NA, # Table Title, Vector of strings where each element is a new line
footnote=NA, # Footnote, Vector of strings where each element is a new line
row.hl=list(dx=NULL, col=NULL) # Conditional Formatting to highlight rows
)
{
tbl.obj <- vector("list", 1)
### Parameter Declaration ###
if(is.null(fmla))
{
tbl.obj[[1]] <- tbl.struct.simp(data=data, label = label, group = group, main=main, footnote=footnote)
# Conditional Formatting
tbl.obj[[1]]$row.hl <- row.hl # Row Highlight
}
### Formula Interface ###
else
{
fmla.obj <- fmla_inter(fmla, data=data, regx=regx)
# If no conidionalt variables than simple table structure
if (is.null(fmla.obj$byvars1))
{
tbl.obj[[1]] <- tbl.struct.simp(data=fmla.obj$tbl, label = fmla.obj$label, group = fmla.obj$group, main=main, footnote=footnote, colnames.obj=fmla.obj$colnames.obj)
# Conditional Formatting
tbl.obj[[1]]$row.hl <- row.hl # Row Highlight
}
### Condional Variables Used ###
else # create a list of simple table structures by all combinations of values of conditional variables
{
conditional.obj <- conditional.struct(fmla.obj$tbl, byvars=fmla.obj$byvars1)
l.uniq.concat.cond <- length(conditional.obj$uniq.concat.cond)
tbl.obj <- vector("list", l.uniq.concat.cond)
data <- conditional.obj$data # Removes conditional variables
for (uniq.concat.cond.i in 1:l.uniq.concat.cond)
{
cur.fltr.dx <- which(conditional.obj$concat.cond == conditional.obj$uniq.concat.cond[uniq.concat.cond.i])
data.i <- data[cur.fltr.dx, ,drop=FALSE]
if (!is.data.frame(data.i)) {data.i <- as.data.frame(data.i)} # Class change on subsetting nx1 data frame
tbl.obj[[uniq.concat.cond.i]] <- tbl.struct.simp(data=data.i, label = fmla.obj$label, group = fmla.obj$group, main=main, footnote=footnote, colnames.obj=fmla.obj$colnames.obj)
tbl.obj[[uniq.concat.cond.i]]$cond.txt <- conditional.obj$uniq.concat.cond[uniq.concat.cond.i]
### Conditional Formatting ###
# Row highlight
if (!is.null(row.hl$dx))
{
tbl.obj[[uniq.concat.cond.i]]$row.hl <-list(dx=NULL, col=NULL)
row.hl.dx <- which(row.hl$dx <= max(cur.fltr.dx))
tbl.obj[[uniq.concat.cond.i]]$row.hl$dx <- row.hl$dx[row.hl.dx]
tbl.obj[[uniq.concat.cond.i]]$row.hl$col <- row.hl$col
row.hl$dx <- row.hl$dx[-row.hl.dx]-nrow(data.i)
}
}
}
}
tbl.obj
}
|
9d1425defc0df06dd62668bddbf8f01c6ff65173
|
b3afc44d91b7e1a84c7b04e4f715fc4ed8dd3320
|
/src/Script5_Sept 10 reshaping data.R
|
390fe2a2706835fee48b21e02045dd940cbd68f0
|
[] |
no_license
|
AChase44/FISH-504
|
99d3f103b4e45d6799d74ab5e6890b202d60f1f7
|
872d2b3032f20b36a70cfa317cb59f337b06e664
|
refs/heads/main
| 2023-02-04T15:56:54.771052
| 2020-12-12T20:29:44
| 2020-12-12T20:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,311
|
r
|
Script5_Sept 10 reshaping data.R
|
# Day 1 Data Wrangleing ---------------------------------------------------
#data wrangling day 1
download.file(url = "https://ndownloader.figshare.com/files/2292169",
destfile = "C:/Users/Student Account/Documents/FISH504RProjects/src/504_Sept3_Live_Code/sept3.csv")
surveys <- read.csv("C:/Users/Student Account/Documents/FISH504RProjects/src/504_Sept3_Live_Code/sept3.csv")
#ways to see data
head(surveys)
View(surveys)
str(surveys)
dim(surveys)
names(surveys)
summary(surveys)
#point to a particular section. [row number, column number] [,column][row,][from row x:to row y,column]
# [,-show all but this column] can also call by names of columns/rows
surveys[1,1]
surveys[,1]
surveys[1:3,7]
surveys[,-1]
surveys["species_id"]
sex <- factor(c("male,""female","male","female"))
levels(sex)
levels(surveys$sex)
nlevels(surveys$sex)
#tidyverse for data wrangling
#install tidyverse
#install.packages("tidyverse")
#require function is good for sharing code.
#require(tidyverse)
library(tidyverse)
# %>% ctrl shift M for pipe %>%
#this will do some sorting and tables
big_animals<-surveys %>%
filter(weight<5) %>%
select(species_id,sex,weight)
View(big_animals)
#quickly and easily read what you are doing rather than
#having inline as a chunk
#assignment one is posted. should be code based.explain why wrong answers are wrong.
# 9/10/2020 Load in data and packages -------------------------------------
#9/10/2020
#data wrangle, EDIC/FSH 503
#load packages
library(tidyverse)
#Load in data
surveys <- read.csv("C:/Users/Student Account/Documents/FISH504RProjects/src/504_Sept3_Live_Code/sept3.csv")
# 9/10/2020 tidyverse practice --------------------------------------------
surveys_sml<-surveys %>%
filter(weight<5) %>%
select(species_id, sex,weight)
#head(surveys_sml)in the live code will show us what we just did. Shows first or last part of data.
#we can calculate new columns with the mutate function. Very cool.
#I need to look up how pipe %>% works.
surveys %>%
filter(!is.na(weight)) %>%
mutate(weight_kg=weight/1000,
weight_lb=weight_kg*2.2) %>%
head()
#na.rm=TRUE removes na values from calculation.
#let's sort and find the mean weights by sex.
surveys %>%
group_by(sex,species_id) %>%
summarize(mean_weight = mean(weight, na.rm=TRUE)) %>%
tail()
surveys %>%
filter(!is.na(weight)) %>%
group_by(sex,species_id) %>%
summarize(mean_weight=mean(weight,na.rm=TRUE),
min_weight=min(weight)) %>%
arrange(desc(mean_weight))
#arrange allows sorting data like the filter in excel. desc puts it in descending order.
#count will count the number of entries in a row or column.
surveys %>%
count(sex)
#Alternatively, could use the group and sumarise(count) function.
surveys %>%
group_by(sex) %>%
summarise(count=n()) %>%
arrange(desc(count))
#each row must be a single observation to work with tidyverse.
#real world data often needs to be reshaped to fit these requirements.
#CTRL+SHIFT+R to create new section.
# reshaping data ----------------------------------------------------------
surveys_gw<-surveys %>%
filter(!is.na(weight)) %>%
group_by(plot_id,genus) %>%
summarize(mean_weight=mean(weight))
str(surveys_gw)
surveys_gw_spread<-surveys_gw %>%
spread(key=genus, value=mean_weight)
view(surveys_gw_spread)
#opposite of spread is gather.
surveys_gw_gather<-surveys_gw_spread %>%
gather(key="genus", value="mean_weight", -plot_id)
view(surveys_gw_gather)
#-plot_id means don't gather plot_id.
#now we gonna talk about expoting data.
# Export ------------------------------------------------------------------
surveys_complete<-surveys %>%
filter(!is.na(weight), #remove missing weight
!is.na(hindfoot_length), #remove missing hindgood length
is.na(sex)) #remove missing sex
species_counts<-surveys_complete %>%
count(species_id) %>%
filter(n>=50)
surveys_complete<-surveys_complete %>%
filter(species_id %in% species_counts$species_id) #want to replace surveys_complete after filtered to keep only those that have a species id column value that can be found in the species count dataset.
write_csv(surveys_complete, path="C:/Users/Student Account/Documents/FISH504RProjects/outputs/surveys_complete.csv")
|
5ecb8d3a1f6e74bf983fd63b2c746d052a20036d
|
4958fcfba9cf8bd5ef2840a3d1ba89119932a4b8
|
/man/importGtf.Rd
|
75cba714c1fa56eaf5df643732168a46fccca0b8
|
[] |
no_license
|
BIMSBbioinfo/RCAS
|
25375c1b62a2624a6b21190e79ac2a6b5b890756
|
d6dc8f86cc650df287deceefa8aeead5670db4d9
|
refs/heads/master
| 2021-07-23T10:11:46.557463
| 2021-05-19T16:21:54
| 2021-05-19T16:21:54
| 43,009,681
| 4
| 4
| null | 2017-10-19T23:28:43
| 2015-09-23T15:29:13
|
R
|
UTF-8
|
R
| false
| true
| 1,805
|
rd
|
importGtf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/report_functions.R
\name{importGtf}
\alias{importGtf}
\title{importGtf}
\usage{
importGtf(
filePath,
saveObjectAsRds = TRUE,
readFromRds = TRUE,
overwriteObjectAsRds = FALSE,
keepStandardChr = TRUE,
...
)
}
\arguments{
\item{filePath}{Path to a GTF file}
\item{saveObjectAsRds}{TRUE/FALSE (default:TRUE). If it is set to TRUE, a
GRanges object will be created and saved in RDS format
(<filePath>.granges.rds) so that importing can re-use this .rds file in
next run.}
\item{readFromRds}{TRUE/FALSE (default:TRUE). If it is set to TRUE,
annotation data will be imported from previously generated .rds file
(<filePath>.granges.rds).}
\item{overwriteObjectAsRds}{TRUE/FALSE (default:FALSE). If it is set to TRUE,
existing .rds file (<filePath>.granges.rds) will overwritten.}
\item{keepStandardChr}{TRUE/FALSE (default:TRUE). If it is set to TRUE,
\code{seqlevelsStyle} will be converted to 'UCSC' and
\code{keepStandardChromosomes} function will be applied to only keep data
from the standard chromosomes.}
\item{...}{Other arguments passed to rtracklayer::import.gff function}
}
\value{
A \code{GRanges} object containing the coordinates of the annotated
genomic features in an input GTF file
}
\description{
This function uses \code{rtracklayer::import.gff()} function to import genome
annoatation data from an Ensembl gtf file
}
\examples{
#import the data and write it into a .rds file
\dontrun{
importGtf(filePath='./Ensembl75.hg19.gtf')
}
#import the data but don't save it as RDS
\dontrun{
importGtf(filePath='./Ensembl75.hg19.gtf', saveObjectAsRds = FALSE)
}
#import the data and overwrite the previously generated
\dontrun{
importGtf(filePath='./Ensembl75.hg19.gtf', overwriteObjectAsRds = TRUE)
}
}
|
2f7c390c1dfb41f9c9c8e181f41d55d8e653730f
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googledataflowv1b3.auto/man/GetDebugConfigRequest.Rd
|
4e513a309ac5feaab6e5667f7f92960ed36f25ea
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 612
|
rd
|
GetDebugConfigRequest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataflow_objects.R
\name{GetDebugConfigRequest}
\alias{GetDebugConfigRequest}
\title{GetDebugConfigRequest Object}
\usage{
GetDebugConfigRequest(componentId = NULL, workerId = NULL)
}
\arguments{
\item{componentId}{The internal component id for which debug configuration is}
\item{workerId}{The worker id, i}
}
\value{
GetDebugConfigRequest object
}
\description{
GetDebugConfigRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Request to get updated debug configuration for component.
}
|
8345c8576ead077faaef9c214ed780313e040a60
|
cc33f833ba275ea5421f7a83ec623b85f77400f6
|
/acogarchSimulationAppHelpers.R
|
2ede97d589052eec30dd0ce0fc15cfb6d6a715ba
|
[] |
no_license
|
JonasKir97/aparch_app
|
12641451b010354da03019d7c2c496999ba2f57b
|
13ada0f8a7a769dba1a56109583ae976e9884df0
|
refs/heads/master
| 2023-04-15T06:41:38.427690
| 2021-04-29T18:19:18
| 2021-04-29T18:19:18
| 359,941,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,183
|
r
|
acogarchSimulationAppHelpers.R
|
#' helper to validate the inputs for the discrete simulation of an APARCH(1,1)-process
parseDiscreteSimulationInput <- function(shinyInputObject, maxSteps = NULL) {
deltas <- shinyInputObject$deltaDiscrete #Zeichenkette, ggf kommagetrennt für mehrere Simulationen mit variierenden Delta
deltas <- as.numeric(strsplit(deltas, ",")[[1]])
if(any(is.na(deltas)) || any(deltas <=0)) {#Konvertierung in Numerisch ging an mindestens einer Stelle schief, Fehler ausgeben
return(list(errorText = "Ungültige Eingabe in den Deltas (Erwarte: Kommagetrennt,Punkt als Dezimalzeichen, größer 0)."))
}
gammas <- shinyInputObject$gammaDiscrete #Zeichenkette, ggf kommagetrennt für mehrere Simulationen mit variierenden Gamma
gammas <- as.numeric(strsplit(gammas, ",")[[1]])
if(any(is.na(gammas)) || any(abs(gammas)>=1)) {
return(list(errorText = "Ungültige Eingabe in den Gammas (Erwarte: Kommagetrennt,Punkt als Dezimalzeichen, betragsmäßig kleiner 1)."))
}
theta <- as.numeric(shinyInputObject$thetaDiscrete)
if(is.na(theta)) {
return(list(errorText = "Ungültige Eingabe im Theta."))
}
alpha <- as.numeric(shinyInputObject$alphaDiscrete)
if(is.na(alpha)) {
return(list(errorText = "Ungültige Eingabe im Alpha."))
}
beta <- as.numeric(shinyInputObject$betaDiscrete)
if(is.na(beta)) {
return(list(errorText = "Ungültige Eingabe im Beta."))
}
steps <- as.integer(shinyInputObject$simulationStepsDiscrete)
if(is.na(steps)) {
return(list(errorText = "Ungültige Eingabe in der Länge der Simulation."))
}
if(length(maxSteps) && steps>maxSteps) {
return(list(errorText = paste0("Bitte eine kürzere Länge der Simulation (<=",maxSteps,") angeben.")))
}
return(
list(errorText = NULL,
deltaVec = deltas,
gammaVec = gammas,
theta = theta,
alpha = alpha,
beta = beta,
steps = steps)
)
}
#' the function h occuring in the definition of an APARCH(1,1)-process
h <- function(x,gamma,delta) {
return((abs(x)-gamma*x)^delta)
}
#' function to simulate a discrete time APARCH(1,1) process
#'
simulateDiscreteAPARCH11 <- function(steps = 1000,
alpha = 0.5,
beta = 0.3,
theta = 0.5,
gamma = 0.5,
delta = 2,
noiseGenerator = function(n) {return(rnorm(n, mean = 0, sd = 1))},
fixedNoises = NULL,
useCPP = TRUE) {
if(!is.null(fixedNoises)) {
epsilons <- fixedNoises
} else {
epsilons <- noiseGenerator(steps)
}
if(useCPP) {
resCpp <- simulateDiscreteAPARCH11inCPP(noises = epsilons, alpha = alpha, beta = beta, theta = theta, gamma = gamma,
delta = delta, initialSigmaDelta = 0, initialY = 0)
sigmaDelta <- resCpp$sigmaDelta
Y <- resCpp$Y
} else {
hFixed <- function(x) {return(h(x = x, gamma = gamma, delta = delta))}
oneOverDelta <- 1/delta
res <- do.call("rbind", Reduce(
f = function(sigAndY, newNoise) {
newSigDelta <- theta + alpha*hFixed(sigAndY[2]) + beta * sigAndY[1]
c(newSigDelta, newNoise * newSigDelta^oneOverDelta)
},
x = epsilons,
init = c(0,0), #sigma^delta and Y
accumulate = TRUE
))
sigmaDelta <- res[,1]
Y <- res[,2]
}
return(list(noises = epsilons, sigmaDelta = sigmaDelta, Y = Y))
}
#' simulate a compound Poisson process as driving Levy process on a given timegrid with intensity lambda
#' @param timeGrid the timegrid given as vector
#' @param lambda the intensity of the exponential distribution used to simulate the interarrivaltimes
#' @param levyJumpGenerator a named list consisting of
#' \code{FUN} : a function that is called to generate random variables
#' \code{namedArgs} : a named list consisting of the named arguments with values for the function given in \code{FUN}
#' \code{countArgName} : the name of the argument of \code{FUN} which identifies the number of random variables that should be simulated
#' defaults to a normal distribution with a mean of 0 and a standard deviation of 1
#' @param randomSeed an integer specifying a seed for reproducibility or \code{NULL}
#' @return a named list consisting of
#' \code{jumpTimes} : a vector of the processes jump times
#' \code{levyJumps} : a vector with the jumps of the Levy process
#' \code{levyProcess} : a vector with the values of the Levy process (compound Poisson process)
simulateCompoundPoisson <- function(timeGrid = 1:10,
lambda = 1,
levyJumpGenerator = list(FUN = stats::rnorm,
namedArgs = list(mean = 0, sd = 1),
countArgName = "n"),
randomSeed = 2021) {
if(is.integer(randomSeed)) set.seed(randomSeed)
lastTimeToReach <- timeGrid[length(timeGrid)] #last time in the process, simulate exp-rvs until reached
interarrivalTimes <- numeric(0)
reachedLastTime <- timeGrid[1]
while(reachedLastTime < lastTimeToReach) {
remainingTime <- lastTimeToReach - reachedLastTime
estimatedNeededValues <- ceiling(remainingTime + 3*sqrt(remainingTime)) #roughly so many values need to be generated to reach the last time
newInterarrivalTimes <- stats::rexp(n = estimatedNeededValues, rate = lambda)
interarrivalTimes <- c(interarrivalTimes,newInterarrivalTimes)
reachedLastTime <- reachedLastTime + sum(newInterarrivalTimes)
}
jumpTimes <- cumsum(interarrivalTimes)
jumpTimes <- jumpTimes[jumpTimes <= lastTimeToReach]
jumpGenerator <- levyJumpGenerator[["FUN"]]
neededArguments <- names(formals(jumpGenerator))
suppliedArgumentNames <- c(names(levyJumpGenerator$namedArgs),levyJumpGenerator$countArgName)
if(!all(neededArguments %in% suppliedArgumentNames)) stop("Missing arguments for FUN in levyJumpGenerator")
n <- length(jumpTimes)
argumentList <- c(levyJumpGenerator$namedArgs,setNames(list(n),levyJumpGenerator$countArgName))
levyJumps <- do.call(jumpGenerator,argumentList)
levyProcess <- cumsum(levyJumps)
return(list(jumpTimes = jumpTimes,levyJumps = levyJumps, levyProcess = levyProcess))
}
#' simulate a Variance gamma process as driving Levy process
#' @param timeGrid the timegrid given as vector
#' @param sigma
#' @param nu
#' @param theta
#' @param gs
#' @param randomSeed an integer specifying a seed for reproducibility or \code{NULL}
#' @return a named list consisting of
#' \code{jumpTimes} : a vector of the processes jump times
#' \code{levyProcess} : a vector with the values of the Levy process (Variance gamma process)
simulateVarianceGamma <- function(timeGrid = 1:10,
sigma = 1,
nu = 0.05,
theta = 0.5,
gs = 0.01,
randomSeed = 2021) {
ts <- seq(0,timeGrid[length(timeGrid)],gs)
dts <- ts[-1]-ts[-length(ts)]
if(is.integer(randomSeed)) set.seed(randomSeed)
gammaVariables <- stats::rgamma(n = length(dts), shape=(1/nu)*dts, scale=nu)
normalsForBrownian <- stats::rnorm(n = length(dts), mean = 0, sd = sqrt(gammaVariables))
brownian <- c(0,cumsum(normalsForBrownian))
varianceGammaProcess <- theta*c(0,cumsum(gammaVariables))+sigma*brownian
return(list(jumpTimes = ts, levyProcess = varianceGammaProcess))
}
simulateBrownianMotion <- function(timeGrid = 1:10,
mu = 0,
sigma = 1,
gs = 0.01,
randomSeed = 2021) {
if(is.integer(randomSeed)) set.seed(randomSeed)
ts <- seq(0,timeGrid[length(timeGrid)],gs)
brownian <- cumsum(stats::rnorm(n = length(ts), mean = mu, sd = sigma))
return(list(jumpTimes = ts, levyProcess = brownian))
}
#' helper to calculate simulationPlotData, which is a dataframe with columns
#' \code{x} : the x-Axis
#' \code{sigmaDelta} : the simulated process sigma^Delta
#' \code{Y} : the simulated process Y
#' \code{Simulation} : The simulation name, which is the setting of the parameters gamma and delta
calculateDiscreteSimulationPlotData <- function(discreteSimulationParameterList, noises, useCpp) {
simulationDataList <- lapply(discreteSimulationParameterList$deltaVec, function(delta) {
lapply(discreteSimulationParameterList$gammaVec, function(gamma) {
simulationData <- simulateDiscreteAPARCH11(steps = discreteSimulationParameterList$steps,
alpha = discreteSimulationParameterList$alpha,
beta = discreteSimulationParameterList$beta,
theta = discreteSimulationParameterList$theta,
gamma = gamma,
delta = delta,
noiseGenerator = NULL,
fixedNoises = noises,
useCPP = useCpp)
data.frame(x = 1:length(simulationData$sigmaDelta),
sigmaDelta = simulationData$sigmaDelta,
sigma = simulationData$sigmaDelta^(1/delta),
Y = simulationData$Y,
Simulation = paste0("Delta=",delta,",Gamma=",gamma),
stringsAsFactors = FALSE)
})
})
simulationPlotData <- do.call("rbind",do.call("c",simulationDataList))
return(simulationPlotData)
}
#' helper to parse input for levy simulation
parseLevySimulationSpecification <- function(shinyInput) {
simuType <- shinyInput$levySimulationType
errorList <- function(e) return(list(error=e))
timeGrid <- shinyInput$levySimuTimeGrid
timeGrid <- as.numeric(strsplit(timeGrid,":")[[1]])
timeGrid <- timeGrid[1]:timeGrid[2]
if(simuType == "Compound Poisson") {
lambda <- as.numeric(shinyInput$levySimuCPlambda)
if(is.na(lambda) || lambda <= 0) return(errorList("Ungültige Sprungrate Lambda"))
return(list(
error = NULL,
simuType = simuType,
timeGrid = timeGrid,
lambda = lambda
))
} else if(simuType == "Varianz-Gamma") {
sigma <- as.numeric(shinyInput$levySimuVGsigma)
if(is.na(sigma) ) return(errorList("Ungültiges sigma"))
nu <- as.numeric(shinyInput$levySimuVGnu)
if(is.na(nu) ) return(errorList("Ungültiges nu"))
theta <- as.numeric(shinyInput$levySimuVGtheta)
if(is.na(theta) ) return(errorList("Ungültiges theta"))
gs <- as.numeric(shinyInput$levySimuVGgs)
if(is.na(gs) ) return(errorList("Ungültige Eingabe in der Schrittweite"))
if(gs <= 0) return(errorList("Die Schrittweite muss positiv sein."))
return(list(
error = NULL,
simuType = simuType,
timeGrid = timeGrid,
sigma = sigma,
nu = nu,
theta = theta,
gs = gs
))
} else if(simuType == "Brownsche Bewegung") {
mu <- as.numeric(shinyInput$levySimuBBmu)
if(is.na(mu)) return(errorList("Ungültige Eingabe im Mittelwert."))
sigma <- as.numeric(shinyInput$levySimuBBsd)
if(is.na(sigma)) eturn(errorList("Ungültige Eingabe in der Standaradabweichung."))
if(sigma < 0) return(errorList("Die Standardabweichung darf nicht negativ sein."))
gs <- as.numeric(shinyInput$levySimuBBgs)
if(is.na(gs)) return(errorList("Ungültige Eingabe in der Schrittweite"))
if(gs <= 0) return(errorList("Die Schrittweite muss positiv sein."))
return(list(
error = NULL,
simuType = simuType,
timeGrid = timeGrid,
mu = mu,
sigma = sigma,
gs = gs
))
} else {
return(errorList("Ungültiger Lévyprozess ausgewählt."))
}
}
|
8ad264c4fddbbdfa00643b9f58d22de157cd7b98
|
6b4fe2baa84e74af637f319ea5d887cb2fd6f9a2
|
/kevin/rimod-analysis/kegg_pathway_view.R
|
0a97f5a43ef0f9024caa6f856c100f6f6bff4a9b
|
[] |
no_license
|
dznetubingen/analysis_scripts
|
1e27ca43a89e7ad6f8c222507549f72b1c4efc20
|
4fcac8a3851414c390e88b4ef4ac461887e47096
|
refs/heads/master
| 2021-06-25T10:47:40.562438
| 2021-01-04T16:02:34
| 2021-01-04T16:02:34
| 187,789,014
| 1
| 0
| null | 2020-09-03T11:37:25
| 2019-05-21T07:55:17
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,580
|
r
|
kegg_pathway_view.R
|
library(pathview)
library(biomaRt)
setwd("~/rimod/integrative_analysis/immune_system_pathway_analysis/")
ensembl <- useMart("ensembl", dataset="hsapiens_gene_ensembl")
data(paths.hsa)
for (i in 264:length(paths.hsa)) {
print(i)
pw <- gsub("hsa", "", names(paths.hsa)[i])
pname <- paths.hsa[i]
pname <- gsub(" ", "", gsub("/", "", pname))
# C9orf72
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_c9.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
gene.idtype = "SYMBOL",
out.suffix = paste("C9orf72", pname, sep=""),
node.sum = 'mean')
# GRN
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_grn.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
gene.idtype = "SYMBOL",
out.suffix = paste("GRN", pname, sep=""),
node.sum = 'mean')
# MAPT
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_mapt.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
gene.idtype = "SYMBOL",
out.suffix = paste("MAPT", pname, sep=""),
node.sum = 'mean')
}
####
# Individual pathways for inspection
####
print(i)
pw <- '04724'
pname <- "GlutamatergicSynapse"
# C9orf72
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_c9.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
out.suffix = paste("C9orf72", pname, sep=""),
gene.idtype = "SYMBOL",
node.sum = 'mean')
# GRN
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_grn.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
gene.idtype = "SYMBOL",
out.suffix = paste("GRN", pname, sep=""),
node.sum = 'mean')
# MAPT
deg <- read.table("~/rimod/RNAseq/analysis/RNAseq_analysis_fro_2020-05-04_15.45.57/deseq_result_mapt.ndc_fro_2020-05-04_15.45.57.txt", sep="\t", header=T)
#deg <- deg[deg$padj <= 0.05,]
bm <- getBM(attributes = c("hgnc_symbol", "ensembl_gene_id"), filters="ensembl_gene_id", values=deg$X, mart=ensembl)
deg <- merge(deg, bm, by.x="X", by.y="ensembl_gene_id")
deg <- deg[!deg$hgnc_symbol == "",]
gene.data <- deg$log2FoldChange
names(gene.data) <- deg$hgnc_symbol
v <- pathview(gene.data = gene.data,
pathway.id = pw,
gene.idtype = "SYMBOL",
out.suffix = paste("MAPT", pname, sep=""),
node.sum = 'mean')
|
2ea21942905bea8576dba5faa3a47daa73215a11
|
eaaf41d49afd7cb9bf24e0c1f77f60c23acdbdd4
|
/R/customerHistory.R
|
5d617c087cdc3451c2547db5c952a69b21dd394b
|
[] |
no_license
|
fhirschmann/ml_dmc2014
|
7fd23165dc7fcbcee38267699ea245b7e123ca4f
|
253c8223891d167565137994676b4a556ae21e64
|
refs/heads/master
| 2016-09-10T08:29:14.312845
| 2014-11-07T23:37:47
| 2014-11-07T23:37:47
| null | 0
| 0
| null | null | null | null |
ISO-8859-3
|
R
| false
| false
| 2,325
|
r
|
customerHistory.R
|
source("r/data.r")
library(data.table)
library(plyr)
x <- dt.dmc$M30$train[sample(nrow(dt.dmc$M30$train), 2)]
#dt.from <- data.table(x[x$deliveryDateMissing == "no", ])
#orderDates <- unique(dt.from[, c("customerID", "orderDate", "itemID"), with=F])
#setkeyv(orderDates, c("customerID", "orderDate", "itemID"))
#orderDates$C <- paste(orderDates$orderDate, orderDates$customerID, sep="")
##orderDates[, index := 1:.N, by=c("customerID")]
#y <- orderDates[orderDates$customerID == 6]
#mergedItems <- aggregate(itemID ~ C, y, as.vector)
#y$itemID <- NULL
#almostthere <- join(y, mergedItems, by=c("C"))
customerList <- c()
customerItemList <- c()
customerOrderList <- c()
customerSession <- c()
str(customerItemList)
yesorno <- function(customer, item, order) {
str("incoming")
str(customer)
# message("laosdaosdoas")
##wenn customer bekannt ..
if(customer %in% customerList) {
##wenn noch nicht bekannte ordersession des customers (festgestellt über orderdate)
if(!(order %in% customerOrderList[[customer]])) {
customerOrderList[[customer]] <<- c(customerOrderList[[customer]], order)
customerSession[[customer]] <<- NULL
}
##wenn item bereits gekauft und das nicht in dieser session, dann TRUE
if(!(item %in% customerSession[[customer]]) & (item %in% customerItemList[[customer]])) {
customerSession[[customer]] <<- c(customerSession[[customer]], item)
TRUE
} else if (item %in% customerSession[[customer]]){
str("item in customersession")
FALSE
} else {
str("item not yet in itemlist")
customerItemList[[customer]] <<- c(customerItemList[[customer]], item)
FALSE
}
} else {
##customer ist neu:
#eintrag in customerlist anlegen, itemlist anlegen, session anlegen, orderhistory anlegen
str(item)
str("and the list")
str(customerItemList)
str(customerList)
customerList <<- c(customerList, customer)
customerItemList[[customer]] <<- c(customerItemList[[customer]], item)
customerSession[[customer]] <<- c(item)
customerOrderList[[customer]] <<- c(list(order), customerOrderList[[customer]] )
FALSE
}
}
for(i in 1:nrow(x)) {
x[i, "test2"] <- yesorno(x[i , ]$customerID, x[i , ]$itemID, x[i , ]$orderDate)
}
|
ad7c414801f701e99b5d0458fa75461e5aecefe7
|
d6e4cae0c1f3968ddd1a57797a00de47c96a01fa
|
/Simulation Study 1/Large Missingness/R Parallel LC - LatentGOLD.r
|
5bd642e6cf503bb80f7816091f6e27a2a9a763bd
|
[] |
no_license
|
davidevdt/BLCMforMI
|
f87db89d61f02a712d0de2bbe510edbe503c4b01
|
f9a30d21b593a826f0b588782aba4f5ba3e0878c
|
refs/heads/master
| 2021-08-29T04:58:15.973700
| 2017-12-13T13:01:39
| 2017-12-13T13:01:39
| 114,115,025
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,919
|
r
|
R Parallel LC - LatentGOLD.r
|
#Before Running the following code, perform model selection with BLC-model scripts (file "R Parallel - BLC [with model selection].r")
#LG set-up
library(foreach)
library(doParallel)
library(plyr)
no_cores <- detectCores()
#Function for combining results obtained in different PC cores
comb <- function(x,...) {
lapply(seq_along(x),
function(j) c(x[[j]], lapply(list(...), function(y) y[[j]])))
}
#LATENT GOLD FILES AND SYNTAX
LG<-'...//lg51.exe' #Define LatentGOLD folder
makeNewSyntax = function(in_file,out_file,M,K){
paste("//LG5.1//
version = 5.1
infile '",in_file,"'
model
options
algorithm
tolerance=1e-008 emtolerance=0.01 emiterations=5000 nriterations=0;
startvalues
seed=0 sets=100 tolerance=1e-005 iterations=250;
bayes
categorical=1 variances=1 latent=1 poisson=1;
missing includeall;
output profile;
outfile '",out_file,"' imputation= ",M," ;
variables
dependent Y,X1,X2,X3,X4,X5 nominal;
latent
Z nominal ",K," ;
equations
Z <- 1;
Y <- 1+Z;
X1 <- 1+Z;
X2 <- 1+Z;
X3 <- 1+Z;
X4 <- 1+Z;
X5 <- 1+Z;
end model
",sep="")
}
#HERE: SELECT FROM R-CONSOLE THE FOLDER WHERE YOU WANT TO STORE AND READ THE LG-FILES
#Simulation Preparation
B<-N
cl<-makeCluster(no_cores)
registerDoParallel(cl)
npar<-length(b)
vobs<-n-(npar*2)
mm<-20
bp<-b
dp<-d
CRa1<-CRa2<-rep(0,npar)
BIASa1<-BIASa2<-rep(0,npar)
clusterExport(cl,c("dset","classes","npar","vobs","mm","makeNewSyntax"))
#Parallel Simulations
results<-foreach(i=1:B,.combine=comb,.multicombine=TRUE,.init=list(list(),list(),list(),list(),list(),list(),list(),list(),list()),.packages=c('nnet')) %dopar% {
para1<-para2<-matrix(0,mm,npar)
tsea1<-tsea2<-matrix(0,mm,npar)
ra1<-ra2<-rep(0,npar)
ua1<-ua2<-rep(0,npar)
Ba1<-Ba2<-rep(0,npar)
Ta1<-Ta2<-matrix(0,npar)
lamdaa1<-lamdaa2<-rep(0,npar)
nia1<-nia2<-rep(0,npar)
DOFa1<-DOFa2<-rep(0,npar)
LOWa1<-LOWa2<-rep(0,npar)
UPPa1<-UPPa2<-rep(0,npar)
thrID<-Sys.getpid()
in_file<-paste("parallel",thrID,".txt",sep="")
imp_dat_file<-paste("imputed_data",thrID,".dat",sep="")
outfile3<-paste("lc_imp",thrID,".lgs",sep="")
write.table(dset[[i]],in_file,na=".",sep=" ",row.names=FALSE,quote=FALSE)
write.table(makeNewSyntax(in_file,imp_dat_file,mm,classes[i]),outfile3,row.names=FALSE,quote=FALSE,col.names=FALSE)
T1<-proc.time()
shell(paste(LG,outfile3,"/b"))
imp_dat<-read.table(imp_dat_file,sep="",header=TRUE)
for(j in 1:mm){
tmp = imp_dat[which(imp_dat[,7]==j),-7]
moda<-multinom(as.factor(Y)~X1+X2+X3+X4+X5+X2:X5+X3:X4,dat=tmp)
para1[j,]<-coefficients(moda)[1,]
para2[j,]<-coefficients(moda)[2,]
tsea1[j,]<-(summary(moda)[[30]][1,])^2
tsea2[j,]<-(summary(moda)[[30]][2,])^2
}
esta1<-apply(para1,2,mean)
esta2<-apply(para2,2,mean)
ua1<-apply(tsea1,2,mean)
ua2<-apply(tsea2,2,mean)
Ba1<-(apply((t(t(para1)-esta1))^2,2,sum))/(mm-1)
Ba2<-(apply((t(t(para2)-esta2))^2,2,sum))/(mm-1)
Ta1<-sqrt(ua1+((1+(1/mm))*Ba1))
Ta2<-sqrt(ua2+((1+(1/mm))*Ba2))
lambdaa1<-((1+(1/mm))*Ba1)/(Ta1^2)
lambdaa2<-((1+(1/mm))*Ba2)/(Ta2^2)
ra1<-(mm-1)/(lambdaa1)^2
ra2<-(mm-1)/(lambdaa2)^2
nia1<-((vobs+1)/(vobs+3))*vobs*(1-lambdaa1)
nia2<-((vobs+1)/(vobs+3))*vobs*(1-lambdaa2)
DOFa1<-((1/ra1)+(1/nia1))^(-1)
DOFa2<-((1/ra2)+(1/nia2))^(-1)
LOWa1<-esta1-(qt(0.975,DOFa1)*Ta1)
LOWa2<-esta2-(qt(0.975,DOFa2)*Ta2)
UPPa1<-esta1+(qt(0.975,DOFa1)*Ta1)
UPPa2<-esta2+(qt(0.975,DOFa2)*Ta2)
T2<-proc.time()-T1
list(esta1,esta2,Ta1,Ta2,LOWa1,LOWa2,UPPa1,UPPa2,T2[[3]])
}
stopCluster(cl)
#Unlist Simulation Results
esta1<-matrix(unlist(results[[1]]),B,npar,byrow=TRUE)
esta2<-matrix(unlist(results[[2]]),B,npar,byrow=TRUE)
Ta1<-matrix(unlist(results[[3]]),B,npar,byrow=TRUE)
Ta2<-matrix(unlist(results[[4]]),B,npar,byrow=TRUE)
LOWa1<-matrix(unlist(results[[5]]),B,npar,byrow=TRUE)
LOWa2<-matrix(unlist(results[[6]]),B,npar,byrow=TRUE)
UPPa1<-matrix(unlist(results[[7]]),B,npar,byrow=TRUE)
UPPa2<-matrix(unlist(results[[8]]),B,npar,byrow=TRUE)
Times<-unlist(results[[9]])
#####################################################################
BIASa1<-(apply(esta1,2,mean))-bp
BIASa2<-(apply(esta2,2,mean))-dp
ASEa1<-apply(Ta1,2,mean)
ASEa2<-apply(Ta2,2,mean)
for(i in 1:B){
for(j in 1:npar){
if(bp[j]>=LOWa1[i,j] & bp[j]<=UPPa1[i,j]){
CRa1[j] = CRa1[j]+1
}
}
}
for(i in 1:B){
for(j in 1:npar){
if(dp[j]>=LOWa2[i,j] & dp[j]<=UPPa2[i,j]){
CRa2[j] = CRa2[j]+1
}
}
}
CRa1<-CRa1/B
CRa2<-CRa2/B
#Final Results
round(rbind(BIASa1),3)
round(rbind(BIASa2),3)
round(rbind(BIASa1/b),3)
round(rbind(BIASa2/d),3)
round(rbind(ASEa1),3)
round(rbind(ASEa2),3)
rbind(CRa1)
rbind(CRa2)
sum(Times)
|
e5c2ed31042599e49903c8c45a0d9cd40e17c2b5
|
9c7c2ca8700a1751fa6f66094295cd34e13fc484
|
/quantfin_ropen_p1quandl.R
|
d9cb27cd718a55a6094205a74a15b9ecf94a8613
|
[] |
no_license
|
jrottersman/Rcode
|
0e9a756f006eba9c330ff39909bea0b0639bcf9b
|
4c29d79ac9cde3c63999e425d37c86defe54a2d0
|
refs/heads/master
| 2021-01-10T22:05:31.861269
| 2015-04-29T23:42:11
| 2015-04-29T23:42:11
| 25,662,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
quantfin_ropen_p1quandl.R
|
library(Quandl)
gold <- Quandl("OFDP/FUTURE_GC2", collapse = "monthly")
#20 years of monthly gold prices
gold20 <- Quandl("OFDP/FUTURE_GC2", collapse= "monthly", start_date = "1992-06-01", end_date = "2012-05-01")
#calculating log prices again monthly this time
#shocking I know
gold.settle <- gold20[, "Settle"]
gold.settle <- rev(gold.settle)
log.gold <- log(gold.settle[-1]/gold.settle[-length(gold.settle)])
#Mimimal Sanity check
head(log.gold)
tail(log.gold)
|
4a2f018e40603ec77ed67772f971efd6bb5f020d
|
907054819ef2b22288814b42a855c42406a06585
|
/man/arkdb-package.Rd
|
f619130b146bec981a02b009ea61739250d6cfb9
|
[
"MIT"
] |
permissive
|
ropensci/arkdb
|
f723b334523a3f3474c4eb8776d55b05178dffcf
|
18ec931cba15925afd3905a921c7a73b05db5031
|
refs/heads/master
| 2023-05-23T20:00:18.113506
| 2022-11-18T06:32:21
| 2022-11-18T06:32:21
| 136,522,042
| 62
| 5
|
NOASSERTION
| 2022-11-18T06:32:22
| 2018-06-07T19:29:36
|
R
|
UTF-8
|
R
| false
| true
| 1,386
|
rd
|
arkdb-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arkdb.R
\docType{package}
\name{arkdb-package}
\alias{arkdb}
\alias{arkdb-package}
\title{arkdb: Archive and Unarchive Databases Using Flat Files}
\description{
Flat text files provide a more robust, compressible,
and portable way to store tables. This package provides convenient
functions for exporting tables from relational database connections
into compressed text files and streaming those text files back into
a database without requiring the whole table to fit in working memory.
}
\details{
It has two functions:
\itemize{
\item \code{\link[=ark]{ark()}}: archive a database into flat files, chunk by chunk.
\item \code{\link[=unark]{unark()}}: Unarchive flat files back int a database connection.
}
arkdb will work with any \code{DBI} supported connection. This makes it
a convenient and robust way to migrate between different databases
as well.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/ropensci/arkdb}
\item Report bugs at \url{https://github.com/ropensci/arkdb/issues}
}
}
\author{
\strong{Maintainer}: Carl Boettiger \email{cboettig@gmail.com} (\href{https://orcid.org/0000-0002-1642-628X}{ORCID}) [copyright holder]
Other contributors:
\itemize{
\item Richard FitzJohn [contributor]
\item Brandon Bertelsen \email{brandon@bertelsen.ca} [contributor]
}
}
|
5e18492bce7a9450b0fa4bf4f280fd90340e9759
|
45d7455b79bdf23be24e81bcf91396b941ce3f53
|
/R/plotSelectedEUCases.R
|
953cca42d53c38abd735d1159d76953d4943f0fb
|
[
"CC-BY-4.0"
] |
permissive
|
lgreski/COVID-19
|
525e188120de11d228dbfd821d686d7a64829b4d
|
b66e0ce3d38bc6ed4a5ea7b79c6088dfeb6d2c2a
|
refs/heads/master
| 2023-06-12T18:02:24.736309
| 2023-06-10T18:35:30
| 2023-06-10T18:35:30
| 249,577,621
| 1
| 1
| null | 2023-02-12T20:43:45
| 2020-03-24T00:48:05
|
R
|
UTF-8
|
R
| false
| false
| 1,238
|
r
|
plotSelectedEUCases.R
|
#
# plot covid-19 cases for selected countries in Europe
#
# (c) 2020 - 2023 Leonard Greski
# copying permitted with attribution
data$Country_Region[data$Country_Region == "UK"] <- "United Kingdom"
require(dplyr)
require(ggplot2)
require(ggeasy)
countryList <- c("United Kingdom", "Ireland", "France","Germany",
"Italy","Spain","Belgium","Netherlands")
europe <- data %>% filter(Country_Region %in% countryList & date > "02-21-2020") %>%
group_by(Country_Region, date) %>%
summarise(Confirmed = sum(Confirmed)) %>% rename(Country = Country_Region)
europe$date <- mdy(europe$date)
asOfDate <- max(europe$date)
message("data as of ", asOfDate)
ggplot(europe, aes(date,Confirmed, group = Country)) +
geom_line(aes(group = Country), color = "grey80") +
geom_point(aes(color = Country)) + scale_x_date(date_breaks = "2 days") +
easy_rotate_x_labels(angle = 45, side = "right") +
labs(x = "Date",
y = "Confirmed Cases",
title = paste("COVID-19 Cases for Selected Countries as of",asOfDate) )
#
# get list of country names
library(sqldf)
sqlStmt <- paste("select Country_Region, count(*) from data group by Country_Region",
"order by Country_Region")
sqldf(sqlStmt)
|
91f66d57ebbcb70c69f4adfb397cf745811e2956
|
722d32d39d2906b3f24eb8ac2172059700021ecb
|
/R/sdev.R
|
f30ce0bfa3f2c4bd0f959fe607b2616a14ba4a1e
|
[] |
no_license
|
einarhjorleifsson/husky
|
a69f9820b4d0634a77ec031f2d972c55879e45ec
|
7219dec18579308bb941015e45282f11e21f83cf
|
refs/heads/master
| 2020-07-26T05:25:16.353936
| 2016-12-02T16:51:26
| 2016-12-02T16:51:26
| 73,732,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 182
|
r
|
sdev.R
|
#' sdev
#'
#' @description
#'
#' location: /net/hafkaldi/export/u2/reikn/Splus5/SMB/GEOMETRY.NEW/.RData
#' @param x XXX
#'
#' @export
sdev <- function (x) {
return(sqrt(var(x)))
}
|
b450acbf8d4a1179fffd55019b6e0c25d28db1f5
|
b9ee02abf87564a92883d1a03e7ff6a0da5f621d
|
/man/important_gene.Rd
|
f881e236508fcc94002ef7d2c35746439102c40e
|
[] |
no_license
|
fparyani/DeepDeconv
|
e69888e8357992035285f9b46a821f5bf331ecd0
|
e3151f95c1364b2954daafc9a73201829f7561de
|
refs/heads/master
| 2023-06-07T04:50:37.113013
| 2021-07-05T18:05:51
| 2021-07-05T18:05:51
| 382,455,047
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,321
|
rd
|
important_gene.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/important_gene.R
\name{important_gene}
\alias{important_gene}
\title{Find Important Gene}
\usage{
important_gene(
quant_mat,
factor_group,
cell_type,
st_gene = NA,
num_gene = 500
)
}
\arguments{
\item{quant_mat}{A gene expression matrix that has already been quantile normalized}
\item{cell_type}{This is the name of the cell whose gene signature you are looking for, should be one of the name from "groups"}
\item{st_gene}{If you are working with spatial transcriptomic data, add its gene list to ensure feasibility when running the model}
\item{num_gene}{Hyper-parameter that approximately determines number of genes to sample from all permutation of pairwise groups}
\item{groups}{The groups entered refer to the variouscell types of your dataset and are assumed to be in factored form when entered.}
}
\value{
Returns a vector of the location of the gene on the matrix inputted
}
\description{
The purpose of this function is to reduce the dimensions of a gene expression matrix by finding the most
relevant genes of a particular cell type using pairwise Wilcox test. This function assumes the distribution of your cell types in quant_mat
reflects the set of genes you are searching for.
}
\keyword{gene}
\keyword{selection}
|
887173763f06dd220f02d3a85c847320ab50d385
|
cd3772c8fa26937675aba85c21146dd6b99de2a2
|
/partials/income_map_pop_contour.R
|
e12cefec5ca1e9611cc4e42b3d7d319b3d8fa52a
|
[] |
no_license
|
jimjh/315-project
|
8c7fbec209bd369cfa8cabb07a07d2623496ed9f
|
dd85d5d71fd94789d5c270191369172f57fa679a
|
refs/heads/master
| 2021-01-10T21:04:40.975842
| 2013-05-07T01:29:36
| 2013-05-07T01:29:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,243
|
r
|
income_map_pop_contour.R
|
incomepop.data <- list('male' = louisiana.blkgrp10$income.male,
'female' = louisiana.blkgrp10$income.female)
output$income_vs_pop <- renderPlot({
par(mfrow=c(1,2))
if (input$income_contour == TRUE) {
# plot the contour overlay on the map showing pop density
plot(louisiana.blkgrp10, xlim=c(-90.29, -89.84), ylim=c(29.81, 30.10), col=col.vector(incomepop.data[['male']]), border=NA)
contour(dens, col=rgb(0,0,0,.5), lwd=2, add=T)
}
else {
plot(louisiana.blkgrp10, xlim=c(-90.29, -89.84), ylim=c(29.81, 30.10), col=col.vector(incomepop.data[['male']]), border=1, lwd=.5)
rect(-90.13884, 29.98311, -90.06836, 29.92592, border=2, lwd=4)
}
title("Map of Male Income Distribution vs. Population Density in New Orleans (2010)")
legend("top",
legend=c("First Quartile","Second Quartile","Third Quartile","Fourth Quartile"),
col=c("yellow","gold","darkgoldenrod2","darkorange"),lwd=3)
if (input$income_contour == TRUE) {
# plot the contour overlay on the map showing pop density
plot(louisiana.blkgrp10, xlim=c(-90.29, -89.84), ylim=c(29.81, 30.10), col=col.vector(incomepop.data[['female']]), border=NA)
contour(dens, col=rgb(0,0,0,.5), lwd=2, add=T)
}
else {
plot(louisiana.blkgrp10, xlim=c(-90.29, -89.84), ylim=c(29.81, 30.10), col=col.vector(incomepop.data[['female']]), , border=1, lwd=.5)
rect(-90.13884, 29.98311, -90.06836, 29.92592, border=2, lwd=4)
}
title("Map of Female Income Distribution vs. Population Density in New Orleans (2010)")
legend("top",
legend=c("First Quartile","Second Quartile","Third Quartile","Fourth Quartile"),
col=c("yellow","gold","darkgoldenrod2","darkorange"),lwd=3)
})
output$zoomed_in_income <- renderPlot({
par(mfrow=c(1,2))
plot(louisiana.blkgrp10, xlim=c(-90.13884, -90.06836), ylim=c(29.98311, 29.92592), col=col.vector(incomepop.data[['male']]), border=NA)
title("Zoomed: Male Income Distribution in City Center")
plot(louisiana.blkgrp10, xlim=c(-90.13884, -90.06836), ylim=c(29.98311, 29.92592), col=col.vector(incomepop.data[['female']]), border=NA)
title("Zoomed: Female Income Distribution in City Center")
})
|
77a9e35522eb4438f3e019158a3eb05f3fe7d29a
|
6943ec72c033da6fdc0e6f001adf74b5b1098287
|
/R/transform.R
|
380f9f62b3b1ba61d20fb943674555a979037911
|
[
"MIT"
] |
permissive
|
mjmm13/BCB420.2019.COSMIC
|
4f33d2c052234b662b3db486e5cb2b52fdf72ae6
|
a448dfb6437f2f459bb77db08886ebb8516beab1
|
refs/heads/master
| 2020-04-20T16:49:39.317169
| 2019-02-11T06:40:46
| 2019-02-11T06:40:46
| 168,969,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
transform.R
|
# transform.R
#' MutationTransform
#'
#' This function will transform the data into mutation rate data allowing
#' us to understand the prevalence of mutation by genes and tissue types
#'
#' @param <mut> <Table of all targetted screens by genes, including negatives>.
#' @param <tissue> <boolean, returns mutation rates of genes by tissue if true>.
#' @param <gene> <boolean, returns mutation rates by genes if true>
#' One of gene or tissue must be supplied
#' @return <Matrix of mutation rates>.
#'
#' @author Matthew McNeil
MutationTransform <- function(mut, tissue = T, gene = T) {
indexList <- list()
if(tissue){
indexList$Site <- mut$Site
}
if(gene){
indexList$Gene <- mut$newSymbol
}
mutationRates <- tapply(mut$Mutation, indexList, mean)
return(mutationRates)
}
# [END]
|
5f25075b0c9c16af0d14d2bb2b709f3eadf5a783
|
b0630daa7219ac30bd41d49f8535c4d3d8afff0b
|
/Standard_Models/Random_Forest_Regressor.R
|
4e6711b026ba94fbd2e5e1557c5278e6c86e1cb7
|
[] |
no_license
|
oscarm524/Machine-Learning
|
c861d2ef501405d2d0507c2e931b16073567b1f5
|
ccca40bcd09e19e29a51237f9b169f7d62d09f27
|
refs/heads/master
| 2023-05-08T06:55:49.437633
| 2021-05-28T22:39:49
| 2021-05-28T22:39:49
| 261,507,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
Random_Forest_Regressor.R
|
#############################
## Random Forest Regressor ##
#############################
Random_Forest_Regressor <- function(X, Y){
## Checking for randomforest package
if (!require(randomForest, character.only = T, quietly = T)) {
install.packages(randomForest)
library(randomForest, character.only = T)
}
## This function assumes that X is the data.frame/matrix of
## input and Y is the target varible
rf_md <- randomForest(Y ~ X)
}
|
9437b9defe665b4a05ce7ecc79fd3417640e061e
|
da63137ed3cbeccff8fd7c7aea6fc4403829ce4d
|
/run_analysis.R
|
24f62b907b30c5968838b17ac7f42d70af2c7bcb
|
[] |
no_license
|
gvillemsr/getcleandata_Courseproject
|
ed961471f6da8b215c53a890411c3281089c9a4f
|
7126ad66ffec764952b4069cfff4bc9713b612d6
|
refs/heads/master
| 2021-01-23T06:49:25.701064
| 2014-06-21T03:56:44
| 2014-06-21T03:56:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,911
|
r
|
run_analysis.R
|
test<-read.table("UCI HAR Dataset/test/X_test.txt", sep="",header=FALSE)
train<-read.table("UCI HAR Dataset/train/X_train.txt", sep="",header=FALSE)
extcols<-c(1:6,41:46,81:86,121:126,161:166,201:202,214,215,227,228,240,241,253,254)
extcols2<-c(266:271,345:350,424:429,503,504,516,517,529,530,542,543)
extract<-c(extcols,extcols2)
testsampsubs<-subset(test,select=extract)
trainsampsubs<-subset(train,select=extract)
datasub<-rbind(testsampsubs,trainsampsubs)
testsubj<-read.table("UCI HAR Dataset/test/subject_test.txt", sep="",header=FALSE)
trainsubj<-read.table("UCI HAR Dataset/train/subject_train.txt", sep="",header=FALSE)
subjects<-rbind(testsubj,trainsubj)
colnames(subjects)<-c("subject")
testactiv<-read.table("UCI HAR Dataset/test/y_test.txt", sep="",header=FALSE)
trainactiv<-read.table("UCI HAR Dataset/train/y_train.txt", sep="",header=FALSE)
activity<-rbind(testactiv,trainactiv)
colnames(activity)<-c("Activity")
dataset<-cbind(subjects,activity,datasub)
names<-read.table("UCI HAR Dataset/features.txt", sep="",header=FALSE)
names<-subset(names,select=c(2))
names<-t(names)
namessub<-subset(names,select=extract)
colnames(dataset)<-cbind(colnames(subjects),colnames(activity),namessub)
dataset$Activity<-as.numeric(dataset$Activity)
dataset$Activity<-factor(dataset$Activity)
levels(dataset$Activity)[1]<-"Walking"
levels(dataset$Activity)[2]<-"Walking_upstairs"
levels(dataset$Activity)[3]<-"Walking_downstairs"
levels(dataset$Activity)[4]<-"Sitting"
levels(dataset$Activity)[5]<-"Standing"
levels(dataset$Activity)[6]<-"Laying"
bysubj<-dataset$subject
byactivity<-dataset$Activity
tidydata<-aggregate(dataset[,3:68], by=list(bysubj,byactivity),FUN="mean")
colnames(tidydata)[1]<-"Subject"
colnames(tidydata)[2]<-"Activity"
write.table(tidydata, "tidydata.txt", sep=" ",col.names=F, row.names=F)
cnames<-colnames(tidydata)
write.table(cnames, "column_names.txt", sep=" ",col.names=F, quote=F)
|
84c754bf02e6b2271f7f6f518aeba17d8f432e23
|
2058b23e90178e75d154081642a1c2fb38abc446
|
/app.R
|
39b2b37205d2978d6ad71693862fe31e81601ff8
|
[] |
no_license
|
cmartini86/Developing_Data_Products
|
8ec58ab82617fb58e0ffecd146cf884be3c42283
|
648dbd9dd23e4635de486ac1eae3097b4504fb01
|
refs/heads/main
| 2023-01-07T20:36:59.056799
| 2020-11-13T22:12:15
| 2020-11-13T22:12:15
| 306,761,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,173
|
r
|
app.R
|
library(shiny)
setwd("C:/DevDataProd")
data <- read.csv("QB_STATS.csv", header=TRUE)
dat <- read.csv("QB_STATS.csv", header=TRUE, row.names="NAME")
server <- function(input, output) {
# Fill in the spot we created for a plot
output$statPlot <- renderPlot({
par(mar=c(11,4,4,4))
# Render a barplot
barplot(dat[,input$stat],
main=input$stat,
ylab="Amount",
xlab="",
names.arg=data$NAME,
cex.names=1,
axis.lty=1,
angle = 90,
las= 2
)
})
}
ui <- fluidPage(
# Give the page a title
titlePanel("Quarterback Statistics"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("stat", "Stat:",
choices=colnames(dat)),
hr(),
helpText("Data from 2020-2021 Season (through 5 weeks)")
),
# Create a spot for the barplot
mainPanel(
plotOutput("statPlot")
)
)
)
shinyApp(ui = ui, server = server)
|
e562d6cc64f1841f4bc41e65bcc5cba5ae201f2f
|
bca52aeca6a6db6bb675ebdb1906a2b78f9b89df
|
/misc scripts/three dimensional array.R
|
ec5a297f583aed575a199992a0feb7a66cd39489
|
[] |
no_license
|
ammeir2/selective-fmri
|
1b6402e3f007c82a73bb92f18024d88c936c3739
|
4c2274257ba46c1c744a1da764e8f92fd60294b0
|
refs/heads/master
| 2021-01-11T13:29:16.942383
| 2017-06-20T22:58:27
| 2017-06-20T22:58:27
| 81,491,192
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,872
|
r
|
three dimensional array.R
|
plotBRAIN <- function(coordinates, column, col = NULL) {
for(ind in 1:K) {
temp <- subset(coordinates, k == ind)
signal <- temp[, column]
imagemat <- matrix(nrow = I, ncol = J)
for(l in 1:nrow(temp)) {
imagemat[temp$i[l], temp$j[l]] <- signal[l]
}
if(is.null(col)) {
(image(1:I, 1:J, imagemat, zlim = c(-max(abs(signal)), max(abs(signal))),
main = ind))
} else {
(image(1:I, 1:J, imagemat, zlim = c(-max(abs(signal)), max(abs(signal))),
main = ind, col = col))
}
}
}
findClusters <- function(coordinates) {
columns <- which(names(coordinates) %in% c("i", "j", "k"))
selected <- coordinates[coordinates$selected, ]
graph <- as.matrix(dist(coordinates[, columns], method = "manhattan"))
graph[graph > 1] <- 0
clusterNumber <- 1
clusters <- list()
while(nrow(selected) > 0) {
cluster <- selected[1, columns]
toadd <- which(graph[selected$row[1], ] != 0)
toVerify <- toadd[which(coordinates$selected[toadd])]
cluster <- rbind(cluster, coordinates[setdiff(toadd, toVerify), columns])
selected <- selected[-1, ]
while(length(toVerify) > 0) {
srow <- which(selected$row == toVerify[1])
toVerify <- toVerify[-1]
cluster <- rbind(cluster, selected[srow, columns])
toadd <- which(graph[selected$row[srow], ] != 0)
newVerify <- toadd[which(toadd %in% setdiff(selected$row, toVerify))]
toVerify <- c(toVerify, newVerify)
selected <- selected[-srow, ]
cluster <- rbind(cluster, coordinates[setdiff(toadd, toVerify), columns])
}
cluster <- unique(cluster)
cluster$row <- as.numeric(rownames(cluster))
cluster$selected <- coordinates$selected[cluster$row]
clusters[[clusterNumber]] <- cluster
clusterNumber <- clusterNumber + 1
}
return(clusters)
}
# parameters + setup
# I <- 11
# J <- 10
# K <- 9
# rho <- 0.7
# coordinates <- expand.grid(i = 1:I, j = 1:J, k = 1:K)
# covariance <- rho^as.matrix(dist(coordinates[, 1:3], method = "euclidean",
# diag = TRUE, upper = TRUE))
# covEigen <- eigen(ovariance)
# sqrtCov <- covEigen$vectors %*% diag(sqrt(covEigen$values)) %*% t(covEigen$vectors)
# precision <- covEigen$vectors %*% diag((covEigen$values)^-1) %*% t(covEigen$vectors)
targetSnr <- 3
set.seed(5120)
# Generating Signal ------------
coordinates <- expand.grid(i = 1:I, j = 1:J, k = 1:K)
signalProp <- 1
nnodes <- I * J * K
coordinates$row <- 1:nrow(coordinates)
mu <- sapply(c(I, J, K), function(x) rnorm(x))
mu <- apply(coordinates, 1, function(x) {
sum(mu[[1]][1:x[1]]) + sum(mu[[2]][x[2]:J]) + sum(mu[[3]][1:x[3]])
})
mu <- mu - mean(mu)
coordinates$signal <- mu
par(mfrow = c(3, 3), mar = rep(2, 4))
location <- sapply(c(I, J, K), function(x) sample.int(x, 1))
s <- matrix(0.3, nrow = 3, ncol = 3)
diag(s) <- 1
s <- s*2
mu <- mvtnorm::dmvnorm(coordinates[, 1:3], mean = location, sigma = s)
mu <- mu * targetSnr / max(mu)
coordinates$signal <- mu
# Generating noise + data -----------------
noise <- rnorm(nnodes)
noise <- sqrtCov %*% noise
coordinates$noise <- noise
# snr <- var(coordinates$signal) / var(coordinates$noise)
# coordinates$signal <- coordinates$signal / sqrt(snr) * sqrt(targetSnr)
coordinates$observed <- coordinates$signal + coordinates$noise
par(mfrow = c(3, 3), mar = rep(2, 4))
plotBRAIN(coordinates, which(names(coordinates) == "signal"), col = rainbow(100))
plotBRAIN(coordinates, which(names(coordinates) == "noise"), col = rainbow(100))
plotBRAIN(coordinates, which(names(coordinates) == "observed"), col = rainbow(100))
# Univariate screening ----------------------
threshold <- 1.96
BHlevel <- 0.1
coordinates$zval <- coordinates$observed / sqrt(diag(covariance))
coordinates$pval <- 2 * pnorm(-abs(coordinates$zval))
coordinates$qval <- p.adjust(coordinates$pval, method = "BH")
par(mfrow = c(1, 1))
hist(coordinates$pval)
coordinates$selected <- coordinates$qval < BHlevel
# coordinates$selected <- abs(coordinates$observed) > threshold
par(mfrow = c(3, 3), mar = rep(2, 4))
plotBRAIN(coordinates, which(names(coordinates) == "signal"))
plotBRAIN(coordinates, which(names(coordinates) == "selected"))
# Inference ----------------------------
clusters <- findClusters(coordinates)
sizes <- sapply(clusters, nrow)
cluster <- clusters[[1]]
cbind(coordinates$observed[cluster$row], cluster$selected)
threshold <- qnorm(BHlevel * sum(coordinates$selected) / nrow(coordinates) / 2,
lower.tail = FALSE)
results <- list()
pvals <- numeric(length(clusters))
par(mfrow = c(1, 1))
coordinates$estimate <- 0
coordinates$cluster <- 0
for(m in 1:length(clusters)) {
results[[m]] <- list()
cluster <- clusters[[m]]
print(c(round(m / length(clusters), 2), nrow(cluster)))
subCov <- covariance[cluster$row, cluster$row]
observed <- coordinates$observed[cluster$row]
selected <- coordinates$selected[cluster$row]
#if(sum(selected) == 1) next
signal <- coordinates$signal[cluster$row]
try(result <- optimizeSelected(observed, subCov, threshold,
projected = NULL,
selected = selected,
stepRate = 0.65,
coordinates = cluster[, 1:3],
tykohonovParam = NULL,
tykohonovSlack = 2,
stepSizeCoef = 4,
delay = 20,
assumeConvergence = 1800,
trimSample = 100,
maxiter = 2000,
probMethod = "all",
init = observed,
imputeBoundary = "neighbors"))
#print(result$meanCI)
samp <- rowMeans(result$sample[, selected, drop = FALSE])
plot(density(samp), xlim = c(-5, 5))
abline(v = mean(observed[selected]), col = "red")
obsmean <- mean(observed[selected])
pval <- 2 * min(mean(samp < obsmean), mean(samp > obsmean))
pvals[m] <- pval
print(c(pval = pval))
cbind(colMeans(result$sample[, selected, drop = FALSE]), observed[selected])
k <- 1
# plot(result$estimates[, selected, drop = FALSE][ ,k])
# abline(h = observed[selected][k])
# abline(h = signal[selected][k], col = "red")
cbind(observed[selected], result$conditional[selected], signal[selected])
# try(truesamp <- optimizeSelected(observed, subCov, threshold,
# selected = selected,
# projected = mean(signal[selected]),
# stepRate = 0.6,
# coordinates = cluster[, 1:3],
# tykohonovParam = NULL,
# tykohonovSlack = 1,
# stepSizeCoef = 0,
# delay = 10,
# assumeConvergence = 2,
# trimSample = 50,
# maxiter = 1000,
# probMethod = "selected",
# init = observed,
# imputeBoundary = "neighbors"))
# samp <- rowMeans(truesamp$sample[, selected, drop = FALSE])
lines(density(samp), col = 'blue')
abline(v = mean(signal[selected]), col = "green")
abline(v = mean(rowMeans(result$sample[, selected, drop = FALSE])), col = "pink")
abline(v = result$meanCI, col = "dark green")
abline(v = mean(result$conditional[selected]), col = "orange")
conditional <- result$conditional
#print(mean(conditional[selected]))
#print(mean(signal[selected]))
selected <- coordinates$selected[cluster$row]
signal <- coordinates$signal[cluster$row]
coordinatedat <- data.frame(conditional = conditional,
observed = observed,
signal = signal,
selected = selected)
coordinatedat$lCI[selected] <- result$coordinateCI[, 2]
coordinatedat$uCI[selected] <- result$coordinateCI[, 1]
results[[m]][[3]] <- result
results[[m]][[1]] <- coordinatedat
results[[m]][[2]] <- c(size = sum(selected),
conditional = mean(conditional[selected]),
observed = mean(observed[selected]),
signal = mean(signal[selected]),
lCI = sort(result$meanCI)[1], uCI = sort(result$meanCI)[2])
print(results[[m]][[2]])
coordinates$estimate[cluster$row[selected]] <- conditional[selected]
coordinates$cluster[cluster$row[selected]] <- m
}
par(mfrow = c(3, 3))
plotBRAIN(coordinates, 5, col = rainbow(100))
plotBRAIN(coordinates, 12, col = rainbow(100))
coordinates[coordinates[, 12] != 0, c(13, 1:3, 7, 5, 12)]
|
186ed8098d8a4e68b06d8fd65116bd45d651b50f
|
9f8a04acadbd7ab8e0aa5f223a572216f2de11d3
|
/BCB_Practical2.R
|
dfe70093548545a5a65ea9f7c5ea970b42257bbb
|
[] |
no_license
|
jonchan2003/Uni-Work
|
0c5857f80223da5a352a8eb295ab517ec42716eb
|
4821ae63a9b559d58098ba392dd67d70e6a1d4c4
|
refs/heads/master
| 2020-04-26T18:37:31.149129
| 2019-03-04T17:57:34
| 2019-03-04T17:57:34
| 173,750,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,309
|
r
|
BCB_Practical2.R
|
library(ape)
library(caper)
library(geiger)
setwd("downloads/Practical 2")
mammal.orders <- read.delim("MammalOrderS.txt")
source("hcd.functions.R")
mammal.hcd <- hcd.fit(mammal.orders$richness, reps = 1)
plot.hcd(mammal.hcd)
# Red line shows mammal richness, black line shows equal rates Markov model
mammal.hcd <- hcd.fit(mammal.orders$richness, reps = 1000, minmax = TRUE)
plot.hcd(mammal.hcd)
# 1000 repititions, finding the range of possbile results for ERM model
# Store P value
p.value <- mammal.hcd$num.hi/mammal.hcd$reps
print(p.value)
# Create aphylogenetic tree
mammal.tree <- read.tree("mammals.tre")
plot(mammal.tree)
# get the imbalance score for the phylogeny, test of phylogentic imbalance
mammal.imbalance <- fusco.test(phy = mammal.tree, dat = mammal.orders,
rich = richness, names.col = order)
summary(mammal.imbalance)
plot(mammal.imbalance) # Plot of imblance scores
# Black line is observed mean I' (I prime is imblance score)
# Red line is the 95% confidence intervals of null distribution
# Q1: That the ERM model does not accuratly predict imblance score of mammal tree
# ans: That ERM is not the model under which mammals diversified, but that different clades have had
# different chances of diversifying for some reason.
# Q2: That the imbalnce score is statisticaly significantly differnt from prdiction of I'=0.5
# ans: That ERM is not the model under which mammals diversified, but that different clades have had
# different chances of diversifying for some reason.
# function to calculate a Slowinski Guyer p value
# given the species richness of two sister taxa
sg.test <- function(n1, n2){
s <- min(n1, n2) # Assigns s the smaller of the two numbers
N <- n1 + n2 # Assigns N the sum of the two numbers
p <- 2*s/(N-1) #Applies the equation to compute p
if (p<1) {
return(p)
} else {
return(1)
}
}
sg.test(1, 25) # p=0.08 therfore does NOT reject ERM model
sg.test(1, 50) # p=0.04 therfore DOES reject ERM model
sg.test(2, 70) # p=0.0563 therfore does NOT reject ERM model
sg.test(50, 50) # P value exceeds 1, error with function
# Craete a plot of lineages through time,
# shows the species accumulation, part of diversification
erm <- growTree(b = 1, d = 0, halt = 58) # simulate clade growth specaiton only ERM model
par(mfrow = c(1, 2)) # Create two side-by-side plots
plot(erm$phy, cex = 0.7)
ltt.plot(erm$phy, log = "y")
# simulation so there is randomness in the simulated plots
# using read data from the phylloscopus genus, instead of simulated
phylloscopus <- read.nexus("phylloscopus.nex")
par(mfrow = c(1, 2)) # Create two side-by-side plots
plot(phylloscopus, cex = 0.7)
ltt.plot(phylloscopus, log = "y")
gammaStat(phylloscopus) # -5.338684 < 1.68 hence diversification has sigificantly slowed down
# Simulation that includes extinction as well
erm <- growTree(b = 1, d = 0.5, halt = 500) # d=0.5, species=500
alive <- drop.extinct(erm$phy)
par(mfrow = c(1, 2)) # Create two side-by-side plots
plot(alive, cex = 0.7)
ltt.plot(alive, log = "y")
gammaStat(alive) # extinciton means that there is space for speciaiton to occur & more species= higher probability of speciaiton
# Q3. Which one or more of the following are features of the equal-rates Markov (ERM) model?
# 1. A constant number of species in the clade
# 2. A constant per-lineage extinction rate
# 3. A constant total overall speciation rate
# 4. A constant per-lineage speciation rate
# 5. Density-dependence in speciation
# 6. Rates of diversification depend on traits of the species
# ans: 2, 4
# Q4. Which one or more of the following assumptions did you make when using hcd.fit to test ERM?
# 1. All taxa had equal numbers of species
# 2. All taxa were paraphyletic
# 3. All taxa were monophyletic
# 4. All taxa were the same age
# 5. All data were very old
# 6. No taxa were very old
# ans: 3, 4
# Q5: According to ERM, why are there not always equal numbers of species in two sister clades?
# 1. One sister clade is usually luckier than the other, just by chance, so has more species
# 2. One sister clade is usually older than the other, so has more species
# 3. One sister clade is usually more competitive than the other, so has more species
# ans: 1
# Q6: What does a mean I0 significantly greater than 0.5 indicate?
# 1. That each lineage has a lot of species
# 2. That all clades have had the same chances of diversifying
# 3. That all clades have had different chances of diversifying
# 4. That at least some clades have had different chances of diversifying
# 5. That small-bodied clades are the most diverse
# 6. That the observed mean I0 is larger than in nearly all of the randomisations
# ans: 4
# Q7: How might you be able to tell that a clade's diversification had slowed down signifcantly
# 1. Successive nodes in phylogeny would get closer and closer as you get nearer to the tips through time?
# 2. Successive nodes in phylogeny would get further apart as you get nearer to the tips
# 3. The gamma statistic would be less than -1.68
# 4. The gamma statistic would be less than 0
# 5. The gamma statistic would be greater than -1.68
# ans: 2, 3
|
eb2d88fa2c7aad3f1a42cfdd562e34219f4accb9
|
e639760af64558ff1cefa03362d8c5fa5139119e
|
/nvd3/examples_json.R
|
98099e747b9ecfbd70c9a88842596ddeddbe6a0e
|
[] |
no_license
|
timelyportfolio/docs
|
b22dad53e1da9e21802c1c4713965f1c437c3ead
|
19c7e4f841eb06eb7e57d32042386c605e177b42
|
refs/heads/master
| 2021-01-18T16:42:53.631088
| 2014-04-01T15:05:07
| 2014-04-01T15:05:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 904
|
r
|
examples_json.R
|
dat <- paste(readLines('nvd3/charts.R'), collapse = '\n')
examples <- strsplit(dat, '\n## ----')[[1]]
examples2 <- lapply(Filter(function(x) x!= "", examples), function(example){
ex = strsplit(example, '-+\n')[[1]]
ex_nm = strsplit(ex, ",")[[1]][1]
c(ex[2], ex_nm)
})
names(examples2) = sapply(examples2, '[[', 2)
examples3 = lapply(examples2, '[[', 1)
create_examples_json = function(rfiles){
dat <- lapply(rfiles, function(rfile){
paste(readLines(rfile), collapse = '\n')
})
dat <- do.call(function(...) paste(..., collapse = '\n'), dat)
examples <- strsplit(dat, '\n## ----')[[1]]
examples2 <- lapply(Filter(function(x) x!= "", examples), function(example){
ex = strsplit(example, '-+\n')[[1]]
ex_nm = strsplit(ex, ",")[[1]][1]
c(ex[2], ex_nm)
})
names(examples2) = sapply(examples2, '[[', 2)
examples3 = lapply(examples2, '[[', 1)
rjson::toJSON(examples3)
}
|
20eb90631304968fc018af8197963f0f4e0b955f
|
6ff24bc1f35410c47d2662d1b8e5a2f34e65b1b7
|
/man/cv.knn.Rd
|
3846d1b0cc4fe5a70e246797e3b085c822065375
|
[] |
no_license
|
ablanda/Esame
|
5d3d7c1408e5ed0e9771ea015855db0788036d8e
|
b43749d3fc4214e878d93b4e2b7c073c64cb7610
|
refs/heads/master
| 2020-12-30T11:39:37.681842
| 2018-08-11T12:42:47
| 2018-08-11T12:42:47
| 91,511,654
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 360
|
rd
|
cv.knn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.knn.R
\name{cv.knn}
\alias{cv.knn}
\title{cross validation leave one out knn}
\usage{
cv.knn(K, x, y, folds = NULL)
}
\arguments{
\item{K}{}
\item{x}{}
\item{y}{}
\item{folds}{}
}
\value{
errore totale per un determinato k
}
\description{
cross validation leave one out knn
}
|
1a57bb317e18168ba35437335ea960aa2f3e12f8
|
68e96e54f6dabbfa92d30adffaab0ef6a7bc7a63
|
/RJSDMX/man/RJSDMX-package.Rd
|
897e1f4f9671735052e4d24a1f36777e37d9d864
|
[] |
no_license
|
darthbeeblebrox/WorldBankData
|
f047bd2a7c361af19d59916ba663db1c2525d0f3
|
5465617cb982d619120008c71ea67328142e1fe1
|
refs/heads/master
| 2021-01-19T13:49:39.233912
| 2017-02-02T13:00:36
| 2017-02-02T13:00:36
| 82,421,540
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
rd
|
RJSDMX-package.Rd
|
% Copyright 2010,2014 Bank Of Italy
%
% Licensed under the EUPL, Version 1.1 or as soon they
% will be approved by the European Commission - subsequent
% versions of the EUPL (the "Licence");
% You may not use this work except in compliance with the
% Licence.
% You may obtain a copy of the Licence at:
%
%
% http://ec.europa.eu/idabc/eupl
%
% Unless required by applicable law or agreed to in
% writing, software distributed under the Licence is
% distributed on an "AS IS" basis,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
% express or implied.
% See the Licence for the specific language governing
% permissions and limitations under the Licence.
%
\name{RJSDMX-package}
\title{Gets timeseries from SDMX data Provider}
\description{ This package provides functions to extract timeseries data and
structural metadata from an SDMX Provider (e.g. ECB,OECD, EUROSTAT) via SDMX Web Service}
\details{\tabular{ll}{
Package: \tab RJSDMX\cr
Type: \tab Package\cr
}
The SDMX Connectors framework (of which RJSDMX is part) aims to offer data users the means for efficiently interacting with SDMX Web Service providers
from within the most popular statistical tools.
The source code of the SDMX Connectors project can be found at:
\url{https://github.com/amattioc/SDMX}
Information about the R Connector can be found in the dedicated wiki page:
\url{https://github.com/amattioc/SDMX/wiki/RJSDMX:-Connector-for-R}
In particular, all information related to configuration (network, tracing, security) can be found at:
\url{https://github.com/amattioc/SDMX/wiki/Configuration}
}
\alias{RJSDMX}
\docType{package}
\keyword{package}
\seealso{\bold{getProviders, getTimeSeries, sdmxHelp}}
\examples{
\dontrun{
my_ts = getTimeSeries('ECB','EXR.M.USD.EUR.SP00.A')
}
}
\author{Attilio Mattiocco, Diana Nicoletti, Bank of Italy, IT Support for the Economic Research
\email{attilio.mattiocco@bancaditalia.it, diana.nicoletti@bancaditalia.it}}
\references{\url{http://sdmx.org/}}
|
891751c082c0322359cb7f1d5295a0b073d53873
|
98e3f5ba9fdf45b20ae26172827da72002a0e248
|
/R/status.R
|
2ab72baa778ca281e23198432e5186468e35587d
|
[
"MIT"
] |
permissive
|
eddelbuettel/rhub
|
c167b0140ce6b6c5bb362afd38619c7d423d65ca
|
d5a495450aba062861b8c774f0cee389b672156a
|
refs/heads/master
| 2021-01-13T09:22:17.666224
| 2016-10-15T14:44:36
| 2016-10-15T14:44:36
| 70,002,510
| 0
| 0
| null | 2016-10-04T20:16:03
| 2016-10-04T20:16:02
| null |
UTF-8
|
R
| false
| false
| 3,035
|
r
|
status.R
|
#' Query the status of an r-hub check
#'
#' @param id The check id, an r-hub status URL, or the object retured
#' by [check()].
#' @return A list with the status of the check. It has entries:
#' `status`, `submitted` and `duration`. Currently the duration is
#' only filled when the build has finished.
#'
#' @export
status <- function(id = NULL) {
id <- id %||% package_data$last_handle
if (is.null(id)) stop("Could not find an rhub handle")
real_id <- if (is.list(id) && !is.null(id$id) && is_string(id$id)) {
id$id
} else if (is_string(id)) {
sub("^.*/([^/]+)$", "\\1", id, perl = TRUE)
} else {
stop("Invalid r-hub build id")
}
res <- structure(
query("GET STATUS", params = list(id = real_id)),
class = "rhub_status"
)
res
}
check_status <- function(id, interactive = interactive()) {
if (interactive) {
my_curl_stream(id$`log-url`, byline(make_status_parser(id)))
invisible(id)
} else {
id
}
}
#' @importFrom curl curl
my_curl_stream <- function(url, callback, bufsize = 80) {
con <- curl(url)
if(!isOpen(con)) {
open(con, "rb")
on.exit(close(con))
}
while (length(buf <- readBin(con, raw(), bufsize))) {
callback(buf)
Sys.sleep(0.2)
}
cat("\r \r")
}
#' @importFrom utils tail
byline <- function(fun) {
buffer <- raw(0)
function(r) {
## Append new chunk to our buffer
r <- c(buffer, r)
buffer <- raw(0)
## Search for the last newline, if any
nl <- tail(which(r == charToRaw('\n')), 1)
if (length(nl) == 0) {
buffer <<- r
return()
} else if (nl != length(r)) {
buffer <<- r[(nl + 1):length(r)]
r <- r[1:nl]
}
## Time to convert to string, split into lines, and serve it
str <- rawToChar(r)
lines <- strsplit(str, "\n")[[1]]
Encoding(lines) <- "UTF-8"
for (l in lines) fun(l)
}
}
#' @importFrom rcmdcheck rcmdcheck
make_status_parser <- function(id) {
first <- TRUE
checking <- FALSE
formatter <- ("rcmdcheck" %:::% "check_callback")()
spinner <- c("-", "\\", "|", "/")
spin <- function() {
cat("\r", spinner[1], sep = "")
spinner <<- c(spinner[-1], spinner[1])
}
function(x) {
if (first) {
header_line("Build started")
first <<- FALSE
}
## Get rid of potential \r characters
x <- gsub("[\r]+", "", x)
## Checking (already, and still)
if (checking) {
if (grepl("^Status: ", x)) {
checking <<- FALSE
return(formatter(x))
} else {
return(formatter(x))
}
}
## Not checking (yet, or any more)
if (grepl("^>>>>>=====+ Running R CMD check", x)) {
checking <<- TRUE
x <- sub("^>>>>>=+ ", "", x)
header_line(x)
} else if (grepl("^>>>>>=====", x)) {
x <- sub("^>>>>>=+ ", "", x)
header_line(x)
} else if (grepl("^\\+R-HUB-R-HUB-R-HUB", x)) {
x <- sub("^\\+R-HUB-R-HUB-R-HUB", "", x)
spin()
} else {
spin()
}
}
}
|
4daa38c2e2d6c59ba0e885c4139a0b430d8f377b
|
21d49a6e91b2546255c66d514a7f7842c6721475
|
/Shiney_App_Next_Word/ui.R
|
52340924de016fc1ca93a723fc448c5b50395ecd
|
[] |
no_license
|
DScontrol/shiny_app_next_word_prediction
|
94f0a397f0e1e362fb7346aaee32c40b58be0752
|
694f42b2d00a0f9572cd9755929a465231ae7d71
|
refs/heads/master
| 2022-01-26T15:25:59.338697
| 2018-09-06T03:44:53
| 2018-09-06T03:44:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,365
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(shinythemes)
library(DT)
library(ggplot2)
library(plotly)
library(markdown)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
#theme
theme = shinytheme("sandstone"),
# Application title
titlePanel("Next Word Prediction"),
#tabs
navbarPage("An app to predict the next word while you enter text",
tabPanel("Next Word Prediction",
fluidRow(
column(3),
column(6,
tags$div(textInput("text",
label = h3("Enter Text:"),
value = ),
br(),
tags$hr(),
h3("Predicted Next Word:"),
tags$span(style="color:darkred",
tags$strong(tags$h3(textOutput("guess_1")))),
br(),
tags$hr(),
h4("Second Guess:"),
tags$span(style="color:grey",
tags$strong(tags$h3(textOutput("guess_2")))),
br(),
tags$hr(),
h4("Third Guess:"),
tags$span(style="color:grey",
tags$strong(tags$h4(textOutput("guess_3")))),
br(),
tags$hr(),
align="center")
),
column(3)
)
),
tabPanel("N-Gram Plots",
fluidRow(
column(width = 5,
uiOutput("ngramSelectP")
),
column(width = 5, offset = 1,
sliderInput("n_terms",
"Select number of n-grams to view:",
min = 5,
max = 50,
value = 25)
)
),
hr(),
plotlyOutput("ngramPlot",height=800, width = 900)
),
tabPanel("View Data",
h2("N-Gram Data Set"),
hr(),
fluidRow(
column(width = 5,
uiOutput("ngramSelectT")
)
),
DT::dataTableOutput("ngramtable")
),
tabPanel("Documentation", includeMarkdown("documentation.md")
)
)
))
|
78b4063fc451eb125bb950826bf91db64efd8941
|
41cff625d6d1352aac02d1f206279d16e86685a1
|
/R/MTuplesList-class.R
|
24ce3ee000f36a4fab2f94a8a455a8957744a4f0
|
[] |
no_license
|
PeteHaitch/MethylationTuples
|
dae3cf80085d58f57ac633d99f3be44e6fb84daa
|
4e127d2ad1ff90dbe8371e8eeba4babcb96e86f2
|
refs/heads/master
| 2020-12-11T22:52:16.651509
| 2015-04-24T13:26:56
| 2015-04-24T13:27:12
| 24,593,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,491
|
r
|
MTuplesList-class.R
|
### =========================================================================
### GTuplesList objects
### -------------------------------------------------------------------------
###
# TODO: unit tests
# TODO: Base documentation on GTuplesList
#' MTuplesList objects
#'
#' @description
#' The \code{MTuplesList} class is a container for storing a collection of
#' \code{\link{MTuples}} objects. The \code{MTuplesList} class is almost
#' identical to the \code{\link[GenomicTuples]{GTuplesList}} on which it is
#' based.
#'
#' @usage
#' MTuplesList(...)
#'
#' @details
#' Please see
#' \code{\link[GenomicTuples]{GTuplesList}} for a description of available
#' methods. The only additional methods are \code{methinfo} and
#' \code{\link{methtype}}, which are identical to their \code{\link{MTuples}}
#' counterparts.
#'
#' @param ... \code{\link{MTuples}} objects. All must contain the same
#' \code{size} tuples.
#'
#' @seealso \code{\link{MTuples}}, \code{\link[GenomicTuples]{GTuplesList}}.
#'
#' @aliases MTuplesList
#'
#' @export
#' @include MethInfo-class.R
#' @author Peter Hickey
#' @examples
#' ## TODO
setClass("MTuplesList",
contains = c("GTuplesList"),
representation(
unlistData = "MTuples",
elementMetadata = "DataFrame"
),
prototype(
elementType = "MTuples"
)
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
#' @export
MTuplesList <- function(...) {
listData <- list(...)
if (length(listData) == 0L) {
unlistData <- MTuples()
} else {
if (length(listData) == 1L && is.list(listData[[1L]])) {
listData <- listData[[1L]]
}
if (!all(sapply(listData, is, "MTuples"))) {
stop("all elements in '...' must be MTuples objects")
}
if (!GenomicTuples:::.zero_range(sapply(listData, size)) &&
!isTRUE(all(is.na(sapply(listData, size))))) {
stop("all MTuples in '...' must have the same 'size'")
}
unlistData <- suppressWarnings(do.call("c", unname(listData)))
}
relist(unlistData, PartitioningByEnd(listData))
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters
###
#' @export
setMethod("methinfo",
"MTuplesList",
function(object) {
object@unlistData@methinfo
}
)
#' @export
setMethod("methtype",
"MTuplesList",
function(object) {
methtype(object@unlistData@methinfo)
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Setters
###
#' @export
setReplaceMethod("methinfo",
c("MTuplesList", "MethInfo"),
function(object, value) {
object@unlistData@methinfo <- value
object
}
)
#' @export
setReplaceMethod("methtype",
c("MTuplesList", "character"),
function(object, value) {
methtype(object@unlistData@methinfo) <- value
object
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Going from MTuples to MTuplesList with extractList() and family.
###
#' @export
setMethod("relistToClass",
"MTuples",
function(x) {
"MTuplesList"
}
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### show method.
###
# Based on GenomicRanges::showList
my_showList <- function(object, showFunction, print.classinfo)
{
k <- length(object)
cumsumN <- cumsum(elementLengths(object))
N <- tail(cumsumN, 1)
cat(class(object), " object of length ", k, ":\n", sep = "")
if (k == 0L) {
cat("<0 elements>\n\n")
} else if ((k == 1L) || ((k <= 3L) && (N <= 20L))) {
nms <- names(object)
defnms <- paste0("[[", seq_len(k), "]]")
if (is.null(nms)) {
nms <- defnms
} else {
empty <- nchar(nms) == 0L
nms[empty] <- defnms[empty]
nms[!empty] <- paste0("$", nms[!empty])
}
for (i in seq_len(k)) {
cat(nms[i], "\n")
showFunction(object[[i]], margin=" ",
print.classinfo=print.classinfo)
if (print.classinfo)
print.classinfo <- FALSE
cat("\n")
}
} else {
sketch <- function(x) c(head(x, 3), "...", tail(x, 3))
if (k >= 3 && cumsumN[3L] <= 20)
showK <- 3
else if (k >= 2 && cumsumN[2L] <= 20)
showK <- 2
else
showK <- 1
diffK <- k - showK
nms <- names(object)[seq_len(showK)]
defnms <- paste0("[[", seq_len(showK), "]]")
if (is.null(nms)) {
nms <- defnms
} else {
empty <- nchar(nms) == 0L
nms[empty] <- defnms[empty]
nms[!empty] <- paste0("$", nms[!empty])
}
for (i in seq_len(showK)) {
cat(nms[i], "\n")
showFunction(object[[i]], margin=" ",
print.classinfo=print.classinfo)
if (print.classinfo)
print.classinfo <- FALSE
cat("\n")
}
if (diffK > 0) {
cat("...\n<", k - showK,
ifelse(diffK == 1, " more element>\n", " more elements>\n"),
sep="")
}
}
cat("-------\n")
cat("seqinfo: ", summary(seqinfo(object)), "\n", sep="")
cat("methinfo: ", summary(methinfo(object)), "\n", sep = "")
}
#' @export
setMethod("show",
"MTuplesList",
function(object) {
my_showList(object, showMTuples, FALSE)
}
)
|
d53f156072e805e8dd9852bc905bbd95359f80b4
|
9b40d9d2a1a525ef69f989518b64259feb51e684
|
/02_ini_simulation_simple_reg.R
|
775c9c07cbf90506a7b481670649579636d7a425
|
[] |
no_license
|
CaroHaensch/IPD_MA_Survey_Data
|
5c144f9c127fc5a5b3aadb6a623194f3e2ccdb95
|
32c15a3d28bc17074542cce3c5a183023560dab9
|
refs/heads/master
| 2020-04-28T09:35:11.085620
| 2019-03-12T08:59:45
| 2019-03-12T08:59:45
| 175,172,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,150
|
r
|
02_ini_simulation_simple_reg.R
|
## Filename: 02_ini_simulation_simple_reg.R
## Description: Ini file for the whole simulation
## Author: Anna-Carolina Haensch
## Maintainer: Anna-Carolina Haensch (anna-carolina.haensch@gesis.org)
## Software version: R 3.3.3
## Creation: 2017-11-20
## Last updated on: 2018-05-09
###
# ATTENTION: Takes about 10 hours to run on the
# maschine of the author - all simulations for the article
# are run in the loop. The superpopulation simulations are especially
# time-intensive. Delete them from the list of simulation combinations
# if it pleases you. This should speed things up.
###
# ------------------------------------------------------------------------
# 0. Preliminaries --------------------------------------------------------
# ------------------------------------------------------------------------
# Clear
rm(list = ls())
#Set Directory
#setwd("/home/hpc/pr63mi/di49koy/Dissertatiom/crosssecma")
#setwd("//tsclient/N/mannheim/src/ch/cross_sec_sim")
# Install Packages if needed
if (!require("survey")) install.packages("survey")
if (!require("lme4")) install.packages("lme4")
if (!require("optimx")) install.packages("optimx")
if (!require("metafor")) install.packages("metafor")
if (!require("sjstats")) install.packages("sjstats")
if (!require("Hmisc")) install.packages("Hmisc")
if (!require("parallel")) install.packages("parallel")
if (!require("reshape2")) install.packages("reshape2")
if (!require("MASS")) install.packages("MASS")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("ggthemes")) install.packages("ggthemes")
if (!require("ggpubr")) install.packages("ggpubr")
if (!require("cowplot")) install.packages("cowplot")
if (!require("xtable")) install.packages("xtable")
# Load Packages
library("survey") # Analytical packages
library("lme4")
library("metafor")
library("sjstats")
library("Hmisc")
library("optimx")
library("parallel") # Needed for simulation
library("reshape2")
library("MASS")
library("ggplot2") # Plot packages
library("ggthemes")
library("ggpubr")
library("cowplot")
library("xtable") #Tables
# ------------------------------------------------------------------------
# 1. Simulation -----------------------------------------------------------
# ------------------------------------------------------------------------
# 1.1 Population Data Creation -------------------------------------------
# ------------------------------------------------------------------------
# There are different data generating models implemented.
list.data.creation <- list.files(path = "100_data_creation/")
#[1] "01_the_simple_case.R"
#[2] "02_heterogeneity_of_intercepts.R"
#[3] "03_heterogenity_of_slopes.R"
#[4] "04_diff_x_means_per_strata.R"
#[5] "05_diff_x_means_per_strata_y_heterogeneity.R"
#[6] "06_diff_x_means_u_high_cor.R"
#[7] "07_diff_x_means_y_heterogeneity_u_high_cor.R"
#[8] "08_superpopulation_diff_slope.R"
#[9] "09_heterogeneity_of_both_coefficients.R"
#[10] "10_superpopulation_diff_intercept.R"
#[11] "10_superpopulation_diff_intercept25.R"
# 1.2 Data Sampling and Design Weights ------------------------------------
# ------------------------------------------------------------------------
# There are also different sampling mechanisms implemented.
source("200_data_samp_weight/setups_sampling.R")
#[1] "setup1.5" Sampling depending on strata, all 5 s. sizes equal
#[2] "setup1.20" Sampling depending on strata, all 20 s. sizes equal
#[3] "setup1.500" Sampling depending on strata, 5 s. sizes different
#[4] "setup2.5" Not used in article
#[5] "setup2.20" Not used in article
#[6] "setup2.500" Not used in final article
#[7] "setup3.5" Endogenous sampling, all 5 s. sizes equal
#[8] "setup3.20" Endogenous sampling, all 20 s. sizes equal
#[9] "setup4.5" Not used in article
#[10] "setup4.20" Not used in article
#[11] "setup4.500" Not used in article
#[12] "setup1.5.super" Sampling depending on strata, all 5 s. sizes equal, supp.
#[13] "setup5.5" Sampling depending on strata+Endogenous sampling
#[14] "setup1.5.extreme" Sampling depending on strata, but high cov for weights
#[15] "setup4.10000" Very differnt survey sizes
#[16] "setup6.5" Sampling depending on strata+Endogenous sampling, high cov w.
#[17] "setup1.25.super" Sampling depending on str., all 25 s. sizes equal, supp.
# The article looks at the following combinations.
# WA = Web Appendix
# Data generating | Sampling | Simulation Number in Article
# [3] | [16] | Nr. 1
# [3] | [13] | Nr. 2
# [5] | [15] | Nr. 3
# [11] | [17] | Nr. 4
# [8] | [17] | Nr. 5
# [4] | [1] | Nr. 6 (WA)
# [4] | [7] | Nr. 7 (WA)
# [5] | [7] | Nr. 8 (WA)
# [3] | [1] | Nr. 9 (WA)
# [3] | [13] | Nr. 10 (WA)
# [7] | [7] | Nr. 11 (WA)
combinations <- as.data.frame(
matrix(data =
c(3,16,
3,13,
5,15,
11,17,
8,17,
4,1,
4,7,
5,7,
3,1,
3,13,
7,7
),
ncol = 2,
byrow = T))
for (i in 1:nrow(combinations)){
number.data.creation <- combinations[i,1]
source(file = paste0("100_data_creation/",
list.data.creation[number.data.creation]))
# Define patterns of sampling probabilities
# Sampling prob. depending on strata
source("200_data_samp_weight/01_diff_strata_probs.R")
# Endogeneous sampling
source("200_data_samp_weight/02_endogeneous_sampling.R")
# Endogeneous sampling
source("200_data_samp_weight/03_diff_strata_and_endo_sampling.R")
number.sampling <- combinations[i,2]
setup.sim <- list.setup[[number.sampling]]
name.setup <- names(list.setup)[[number.sampling]]
# 1.3 Simulation ----------------------------------------------------------
# ------------------------------------------------------------------------
# Number of repetitions
M<-1000
# Set up the clusters, export the functions, libraries etc. to the clusters
source("300_simulation_set_up/cluster_basics.R")
# Run the Simulation
list.erg.sim.test<-list()
clusterSetRNGStream(cl, 12031992)
list.erg.sim.test<-parLapply(cl,
1:M,
function(i) SimulationSetup(data.pop = data.pop,
setup = setup.sim,
kN = 1000000))
# Stop the clusters
stopCluster(cl)
# Unlist
erg.int.point <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[1]][1,]))
erg.slope.point <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[1]][2,]))
erg.int.std.err <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[2]][1,]))
erg.slope.std.err <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[2]][2,]))
erg.int.tau <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[3]][1,]))
erg.slope.tau <- as.data.frame(sapply(X = list.erg.sim.test,
FUN = function(x) x[[3]][2,]))
# Save results
save(list = c("erg.int.point", "erg.int.std.err",
"erg.slope.point", "erg.slope.std.err",
"erg.int.tau", "erg.slope.tau",
"name.dataset", "name.setup","mod.pop"),
file = paste0("results/", name.dataset, "_", name.setup,
"_results_vector.Rdata"))
}
# ------------------------------------------------------------------------
# 2. Create basic density plots -------------------------------------------
# ------------------------------------------------------------------------
# Take all the Rdata files with results that were created and create some
# basic plots
# # 2.1 Load dataset --------------------------------------------------------
# # -------------------------------------------------------------------------
#
# list.results <- list.files(path = "results/")
#
# for (i in 1:length(list.results)){
#
# load(file = paste0("results/", list.results[i]))
#
#
#
# # 2.2 Visualization of results --------------------------------------------
# # ------------------------------------------------------------------------
#
# # Build the plots for the Simulation results
# source("500_density_plots/simulation_all_methods.R")
#
#
# # Build the plots for the Simulation results, Simple Version
# source("500_density_plots/simulation_without_FE_and_single_surveys.R")
#
# }
# Plots specifically created for the article
# Focus on differences between 1Stage and 2-Stage
load(file =
"results/heterogeneity_of_slopes_setup5.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Closer look at tau
load(file =
"results/superpopulation_intercept_25_setup1.25.super_results_vector.Rdata")
source(file =
"500_density_plots/05_comparison_tau.R")
rm(list = ls())
# Linear model with exogenous sampling
load(file =
"results/diff_x_means_per_strata_setup1.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Linear model with endogenous sampling
load(file =
"results/diff_x_means_per_strata_setup3.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Linear model with endogenous sampling, heterogenous Y
load(file =
"results/diff_x_means_per_strata_heterogeneity_setup3.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Heterogeneity of slopes model with exogenous sampling (strata sampling)
load(file =
"results/heterogeneity_of_slopes_setup1.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Heterogeneity of effects model with endogenous sampling+strata sampling
load(file =
"results/heterogeneity_of_slopes_setup6.5_results_vector.Rdata")
source(file =
"500_density_plots/01_comparison_weights_no_weights.R")
rm(list = ls())
# Closer look at Poststratification
load(file =
"results/diff_x_means_per_strata_y_heterogeneity_u_high_cor_setup3.5_results_vector.Rdata")
source(file =
"500_density_plots/02_focus_poststrat.R")
rm(list = ls())
# Closer look at Poststratification
load(file =
"results/diff_x_means_per_strata_heterogeneity_setup4.10000_results_vector.Rdata")
source(file =
"500_density_plots/03_focus_transform.R")
rm(list = ls())
# ------------------------------------------------------------------------
# 3. Create performance tables --------------------------------------------
# ------------------------------------------------------------------------
# Focus on differences between 1Stage and 2-Stage
load(file =
"results/heterogeneity_of_slopes_setup5.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Focus on study heterogeneity
load(file =
"results/superpopulation_slope_setup1.25.super_results_vector.Rdata")
source(file =
"700_performance_tables/012_latex_superpopulation.R")
rm(list = ls())
load(file =
"results/superpopulation_intercept_25_setup1.25.super_results_vector.Rdata")
source(file =
"700_performance_tables/012_latex_superpopulation.R")
rm(list = ls())
# Linear model with exogenous sampling
load(file = "results/diff_x_means_per_strata_setup1.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear model with endogenous sampling
load(file = "results/diff_x_means_per_strata_setup3.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear model with endogenous sampling, y heterogenous
load(file =
"results/diff_x_means_per_strata_heterogeneity_setup3.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear heterogeneity of effects model with exoegnous sampling
load(file =
"results/heterogeneity_of_slopes_setup1.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear heterogeneity of effects model with endogenous sampling
load(file =
"results/heterogeneity_of_slopes_setup6.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear model with endogenous sampling, focus on poststratification
load(file =
"results/diff_x_means_per_strata_y_heterogeneity_u_high_cor_setup3.5_results_vector.Rdata")
source(file =
"700_performance_tables/011_latex_weighting_when_and_how.R")
rm(list = ls())
# Linear model with endogenous sampling, focus on transformation because of
# different survey sizes
load(file =
"results/diff_x_means_per_strata_heterogeneity_setup4.10000_results_vector.Rdata")
source(file = "700_performance_tables/013_latex_transform.R",local = T)
rm(list = ls())
# ------------------------------------------------------------------------
# 4. Create example plots ------------------------------------------------
# ------------------------------------------------------------------------
# These plots are needed for the more techinical subsections.
list.examples <- list.files(path = "800_example_plots/")
list.examples <- list.examples[-7]
for (i in 1:length(list.examples)){
source(file = paste0("800_example_plots/", list.examples[i]), local=T)
}
|
69b8092189f6b2f206b28ab39e6fb6716bceed5f
|
d62ed0b5061ba4e025635162076245871baabff6
|
/ui.R
|
381c9368692bd03020a1d637b758b9b3d422246f
|
[] |
no_license
|
NJBongithub/course_DSJH_DataProducts
|
64305fcfb26aec1d8988534f39a87c29866560bc
|
0e3df7e6bf630fe80743b4fba2641e5e0dde5b41
|
refs/heads/master
| 2021-01-20T23:32:23.438286
| 2015-03-21T09:27:59
| 2015-03-21T09:27:59
| 32,625,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel("Deciding to Reject a Null Hypothesis"),
sidebarPanel(
p('You measure the mean amount of Substance X per gram of soil for several soil samples.'),
h4('Your Observation'),
numericInput('t_observed', 'Enter current sample mean.', 0.06, min=0, max=0.20, step=0.01),
h4('Your Decision boundary'),
p('You label as poluted any sample with a value above a certain number (your decision boundary) and as pristine any sample with a value below that number. Adjust this decision boundary using the lever below.'),
sliderInput('criterion','Set the decision boundary', value=0.06, min = 0, max = 0.12, step = 0.01,),
p('In the graph, the distribution of means for all possible pristine and polluted soil samples is shown respectively in
red and in blue. Every sample with a mean to the right of the verticle line is labeled polluted. Notice
that the vertical line in the graph changes to reflect your selection of a decision boundary.')
),
mainPanel(
plotOutput('decison_and_error_plot'),
h4('Your Inference'),
verbatimTextOutput('prediction'),
h4('Type I errors'),
p('The black area of the figure shows you a group of pristine soil samples that you will eventually
incorrectly label as polluted.')
)
))
|
e84bcab1210f49687f2c2ee385b9da3a0e227ad1
|
fae0770ad0cd10b81a641d8bfcd61ffbcb32f142
|
/MODELOS/ComparingMethods.R
|
946c72bf5bc78c6f5f771b01bb21d399d0fc386b
|
[] |
no_license
|
jorgeramirezcarrasco/l3p3_Titanic
|
8f7edc78e63c6dea3ecc9814ff1eab2ad9c13eb6
|
0376f4a66c46834fdec476ffce9ae9bf1568f197
|
refs/heads/master
| 2022-12-05T15:47:57.843079
| 2014-07-15T07:56:26
| 2014-07-15T07:56:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
ComparingMethods.R
|
#Logistic regression model no.1#
ctab.test1 <- table(pred=titanic$pred1>0.44, Survived=titanic$Survived)
precision <- ctab.test[2,2]/sum(ctab.test[2,])
recall <- ctab.test[2,2]/sum(ctab.test[,2])
#Logistic regression model no.2#
ctab.test2 <- table(pred=titanic$pred1>0.44, Survived=titanic$Survived)
precision <- ctab.test[2,2]/sum(ctab.test[2,])
recall <- ctab.test[2,2]/sum(ctab.test[,2])
parametros<-function(col){
ctab.test<-table(pred=col>0.44,Survived=titanic$Survived)
precision <- ctab.test[2,2]/sum(ctab.test[2,])
recall <- ctab.test[2,2]/sum(ctab.test[,2])
enrich <- precision/mean(as.numeric(titanic$Survived))
specificity <- ctab.test[1,1]/sum(ctab.test[,1])
accuracy <- (ctab.test[1,1]+ctab.test[2,2])/sum(ctab.test[])
fpr <- ctab.test[2,1]/(ctab.test[2,1]+ctab.test[1,1])
fnr <- ctab.test[1,2]/(ctab.test[1,2]+ctab.test[2,2])
result <- c(precision,recall,enrich,specificity,accuracy,fpr,fnr)
}
miMatrix<-matrix(c(parametros(titanic$pred),parametros(titanic$pred1),parametros(titanic$pred2),parametros(titanic$pred3)),ncol=7,byrow=TRUE)
colnames(miMatrix)<-c('prec','rec','enrich','spec','accuracy','fpr','fnr')
rownames(miMatrix)<-c('method#0','method#1','method#2','method#3')
miMatrix<-as.table(miMatrix)
print(miMatrix)
|
a3174d9fbcb393ab9dbf277876167aceb5f68602
|
fc8cf5aa32e4c08cf6f2542b4c87c158659c8c0a
|
/man/writeNanoStringRccSet.Rd
|
82a98ff1369414c94d030d1c1474f378fb7eea3e
|
[] |
no_license
|
amarinderthind/NanoStringNCTools
|
c4848828cca752991e068bf613afc286b9539bdd
|
4ea743e7dff21ffe8d96ea34c72092dbc74f1948
|
refs/heads/master
| 2023-03-01T22:33:40.170820
| 2021-02-02T19:51:55
| 2021-02-02T19:51:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,308
|
rd
|
writeNanoStringRccSet.Rd
|
\name{writeNanoStringRccSet}
\alias{writeNanoStringRccSet}
\concept{NanoStringRccSet}
\title{Write NanoString Reporter Code Count (RCC) files}
\description{
Write NanoString Reporter Code Count (RCC) files from an instance of class
\code{\linkS4class{NanoStringRccSet}}.
}
\usage{
writeNanoStringRccSet(x, dir = getwd())
}
\arguments{
\item{x}{an instance of class \code{\linkS4class{NanoStringRccSet}.}}
\item{dir}{An optional character string representing the path to the
directory for the RCC files.}
}
\details{
Writes a set of NanoString Reporter Code Count (RCC) files based upon \code{x}
in \code{dir}.
}
\value{
A character vector containing the paths for all the newly created RCC files.
}
\author{Patrick Aboyoun}
\seealso{\code{\link{NanoStringRccSet}}, \code{\link{readNanoStringRccSet}}}
\examples{
datadir <- system.file("extdata", "3D_Bio_Example_Data",
package = "NanoStringNCTools")
rccs <- dir(datadir, pattern = "SKMEL.*\\\\.RCC$", full.names = TRUE)
solidTumorNoRlfPheno <- readNanoStringRccSet(rccs)
writeNanoStringRccSet(solidTumorNoRlfPheno, tempdir())
for (i in seq_along(rccs)) {
stopifnot(identical(readLines(rccs[i]),
readLines(file.path(tempdir(), basename(rccs[i])))))
}
}
\keyword{file}
\keyword{manip}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.