blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e84b29e9a7422ee2deea63ffc83d6728222963ff | d8b4a48626eb51f88a6e75f4abc288ae97a44ca2 | /man/min_rank.Rd | 2d81548e20115b81436a267c420fce2cfc8e049e | [] | no_license | eclarke/eclectic | da8c4d9e91a114389f3aafb70dffe7dddd3420b1 | 621a81b8ac6b659300b4ab72621f080bdf273eb1 | refs/heads/master | 2021-01-17T09:09:49.255796 | 2017-11-13T17:38:34 | 2017-11-13T17:38:34 | 42,249,192 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 723 | rd | min_rank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{min_rank}
\alias{min_rank}
\title{Creates a MinRank vector from an agglomerated data frame.}
\usage{
min_rank(agg, end.rank, ranks = qiimer::taxonomic_ranks, ...)
}
\arguments{
\item{agg}{\link{agglomerate}d data frame (with taxonomic ranks)}
\item{end.rank}{the most specific taxonomic rank if available}
\item{...}{additional arguments passed to \link{tax_climber}}
\item{rank}{a character vector of taxonomic ranks present in agg}
}
\description{
Version of \link{tax_climber} for use inside \link{dplyr::mutate} or directly
assign to col. MinRank columns are the most specific taxonomic assignments
(up to a threshold)
}
|
a478a75b78c4526de77a0592275ec9519a60453e | 41994da520410968257d912d704ab7434c595daf | /IMDB.R | 496f54a715eb4eb074851b3717008d94ae62032e | [] | no_license | germckay/IMDB | c58a4e47e63ede8664be4d5be89d739d6870fb50 | 6183371206460a4b941f826fdc7388ffe5a00f07 | refs/heads/master | 2022-12-15T14:57:17.375681 | 2020-09-02T22:16:48 | 2020-09-02T22:16:48 | 292,404,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 194 | r | IMDB.R | #################################
### In-Class IMDB Competition ###
#################################
# Reading in packages
library(tidyverse)
# Reading in Data
# Exploratory Data Analysis
|
d8bcd9043f62f4e3d6c3734d09c1502452990916 | 82ea213c1ec0f269ca67bc84fd4629ac17f0fa29 | /man/rasterizeGimms.Rd | a03133721425b6046c43f348518dafb50218d698 | [] | no_license | cran/gimms | 3a51971c97073acdf20fa333c783066035067366 | b937e90ac5be9e0a6abeb9d06263a8276e164f36 | refs/heads/master | 2023-08-17T16:16:41.858408 | 2023-08-09T19:00:02 | 2023-08-09T19:31:01 | 48,080,919 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,599 | rd | rasterizeGimms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasterizeGimms.R
\name{rasterizeGimms}
\alias{rasterizeGimms}
\title{Rasterize GIMMS NDVI3g Data}
\usage{
rasterizeGimms(
x,
ext = NULL,
snap = "out",
keep = NULL,
split = FALSE,
cores = 1L,
filename = "",
...
)
}
\arguments{
\item{x}{\code{character}. Vector of local filepaths. Note that product
versions must not be mixed, i.e. 'x' should represent files originating from
either NDVI3g.v1 or NDVI3g.v0 only.}
\item{ext}{\code{Extent}, or any object from which an \code{Extent} can be
extracted, see \code{\link[raster]{crop}}.}
\item{snap}{\code{character}, defaults to "out". Other available options are
"in" and "near", see \code{\link[raster]{crop}}.}
\item{keep}{\code{integer}. Flag values of NDVI3g pixels to spare during
quality control. Pixels with non-included flag values are set to \code{NA}.
If not specified (i.e., \code{NULL}; default), quality control is skipped.}
\item{split}{\code{logical}, defaults to \code{FALSE}. If \code{TRUE}, a
\code{list} of \code{RasterStack} objects (of \code{length(x)}) is returned
rather than a single \code{RasterStack}.}
\item{cores}{\code{integer}. Number of cores for parallel computing.}
\item{filename}{\code{character}. Optional output filename. If specified,
this must be of the same length as 'x'.}
\item{...}{Further arguments passed to \code{\link{writeRaster}}.}
}
\value{
If \code{split = TRUE}, a list of NDVI3g \code{RasterStack} objects
corresponding to the files specified in 'x'; else a single NDVI3g
\code{RasterStack} object.
}
\description{
Import GIMMS NDVI3g (binary or NetCDF) data into R as \code{Raster*} objects.
}
\examples{
\dontrun{
tmp <- tempdir()
## Download NDVI3g.v1 sample data
gimms_files <- downloadGimms(x = as.Date("2000-01-01"),
y = as.Date("2000-12-31"),
dsn = tmp)
## Extent for clipping
shp <- getData("GADM", country = "DEU", level = 0, path = tmp)
## Rasterize without quality control
gimms_raster <- rasterizeGimms(x = gimms_files,
ext = shp) # clipping
plot(gimms_raster[[1]])
lines(shp)
## Rasterize with quality control
gimms_rasterq <- rasterizeGimms(x = gimms_files,
ext = shp, # clipping
keep = 0) # quality control
plot(gimms_rasterq[[1]])
lines(shp)
}
}
\seealso{
\code{\link[raster]{crop}}, \code{\link{qualityControl}},
\code{\link{writeRaster}}.
}
|
ca3e58b1e437796ed99c836754f03479cfb1a984 | 00d5fbe9ac0bac3dd635c922d1a6fd81f778a5f8 | /2.2_solarradiation.R | 00364156c9b62528a772994df3038218df6c7a90 | [] | no_license | enicurus/warbler.molt.migration | f21b691af548d5cef99636a13772ba232e2dc001 | aa12e83ef0929570a97ee3c3618d84fbd3a1dcf4 | refs/heads/master | 2021-01-22T23:05:56.429844 | 2020-02-20T21:55:04 | 2020-02-20T21:55:04 | 85,604,728 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,594 | r | 2.2_solarradiation.R | library(raster)
library(maps)
library(maptools)
library(sp)
library(GISTools)
library(geosphere)
##################################################################
#Import solar radiation data
nasa = read.table(file='~/Dropbox/Warbler.Molt.Migration/global_radiation.txt', skip=13, header=TRUE)
coordinates(nasa) = ~ Lon + Lat
proj4string(nasa) = CRS('+proj=longlat +ellps=WGS84')
summary(nasa)
extent = extent(nasa)
length(-180:179)
length(-90:89)
rast = raster(extent,ncol=360,nrow=180,crs='+proj=longlat +ellps=WGS84')
grid = rasterize(nasa,rast,fun='last')
grid = grid[[2:13]]
##################################################################
#Plot the solar radiation data
wrld = readShapeSpatial("~/Documents/Glenn/Furnariidae/Furn_Maps/TM_WORLD_BORDERS-0.2/TM_WORLD_BORDERS-0.2.shp")
ex = c(-170,-30,-60,89) #new world
nwsolar = crop(grid,extent(ex))
nwmap = crop(wrld,extent(ex))
pdf('~/Dropbox/Warbler.Molt.Migration/solar_radiation.pdf')
par(mfrow=c(2,2),mar=c(2,2,2,2))
plot(nwsolar[['Jan']],main='Jan')
plot(nwmap,add=T,lwd=.1,border='black')
plot(nwsolar[['Apr']],main='Apr')
plot(nwmap,add=T,lwd=.1,border='black')
plot(nwsolar[['Jul']],main='Jul')
plot(nwmap,add=T,lwd=.1,border='black')
plot(nwsolar[['Oct']],main='Oct')
plot(nwmap,add=T,lwd=.1,border='black')
dev.off()
par(mfrow=c(1,1),mar=c(2,2,2,2))
##################################################################
#Import average monthly daylight hours
day = read.table(file='~/Dropbox/Warbler.Molt.Migration/daylight.txt', skip=8, header=TRUE)
##################################################################
##################################################################
map.dir = '~/Dropbox/Warbler.Molt.Migration/Parulidae_shapefiles/'
files = list.files(map.dir,pattern='*.shp')
taxa = as.character(sapply(files,function(x) paste(strsplit(x,'_')[[1]][1],'_',strsplit(x,'_')[[1]][2],sep='')))
colnames = c('Shapefile','strategy','solar','daylight')
data = data.frame(matrix(nrow=length(taxa),ncol=length(colnames)))
colnames(data) = colnames
rownames(data) = taxa
data$Shapefile = files
# m = migratory - only wintering and breeding ranges (2,3)
# nm = non-migratory - only resident ranges (1)
# mix = mixed - resident, wintering and breeding ranges (1,2,3)
# par = partial - resident and breeding ranges (1,2)
breeding = c('May','Jun','Jul','Aug')
nonbreed = c('Jan','Feb','Mar','Apr','Sep','Oct','Nov','Dec')
head(data)
i = 29
i = 48
for(i in 1:nrow(data)){
print(rownames(data)[i])
path = paste(map.dir,data$Shapefile[i],sep='')
shp = readShapeSpatial(path)
projection(shp) = '+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0'
sol.tmp = matrix(nrow=3,ncol=13)
colnames(sol.tmp) = c('Area','Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')
day.tmp = matrix(nrow=3,ncol=13)
colnames(day.tmp) = c('Area','Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')
#non-migratory - only resident ranges (1)
if(all(1 %in% shp@data$SEASONAL & !(c(2,3) %in% shp@data$SEASONAL))){
data[i,'strategy'] = 'nm'
res = shp[shp@data$SEASONAL == 1, ]
months = c(breeding,nonbreed)
samp = spsample(res,1000,type='random')
area = mean(sapply(slot(res,'polygons'),slot,'area'))
sol.tmp[1,'Area'] = area
day.tmp[1,'Area'] = area
sol.resid = extract(grid[[months]],samp)
sol.tmp[1,months] = apply(sol.resid,2,FUN=mean)
#for each latitude coordinate, round to nearest integer then extract row in data.frame 'day'
#that it corresponds to, then average across all rows and columns
day.resid=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[1,months] = apply(day.resid,2,FUN=mean)
data[i,'solar'] = mean(apply(sol.tmp[,-1],2,FUN=weighted.mean,w=sol.tmp[,1],na.rm=T))
data[i,'daylight'] = mean(apply(day.tmp[,-1],2,FUN=weighted.mean,w=day.tmp[,1],na.rm=T))
#migratory - only breeding and wintering ranges (2,3)
}else if(all(c(2,3) %in% shp@data$SEASONAL & !(1 %in% shp@data$SEASONAL))){
data[i,'strategy'] = 'm'
breed = shp[shp@data$SEASONAL == 2,]
months = c(breeding)
samp = spsample(breed,1000,type='random')
area = mean(sapply(slot(breed,'polygons'),slot,'area'))
sol.tmp[2,'Area'] = area
day.tmp[2,'Area'] = area
sol.breed = extract(grid[[months]],samp)
sol.tmp[2,months] = apply(sol.breed,2,FUN=mean)
day.breed=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[2,months] = apply(day.breed,2,FUN=mean)
winter = shp[shp@data$SEASONAL == 3,]
months = c(nonbreed)
samp = spsample(winter,1000,type='random')
area = mean(sapply(slot(winter,'polygons'),slot,'area'))
sol.tmp[3,'Area'] = area
day.tmp[3,'Area'] = area
sol.winter = extract(grid[[months]],samp)
sol.tmp[3,months] = apply(sol.winter,2,FUN=mean)
day.winter=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[3,months] = apply(day.winter,2,FUN=mean)
data[i,'solar'] = mean(apply(sol.tmp[,-1],2,FUN=weighted.mean,w=sol.tmp[,1],na.rm=T))
data[i,'daylight'] = mean(apply(day.tmp[,-1],2,FUN=weighted.mean,w=day.tmp[,1],na.rm=T))
#mixed - resident, breeding and wintering ranges (1,2,3)
}else if(all(c(1,2,3) %in% shp@data$SEASONAL)){
data[i,'strategy'] = 'mix'
res = shp[shp@data$SEASONAL == 1, ]
months = c(breeding,nonbreed)
samp = spsample(res,1000,type='random')
area = mean(sapply(slot(res,'polygons'),slot,'area'))
sol.tmp[1,'Area'] = area
day.tmp[1,'Area'] = area
sol.resid = extract(grid[[months]],samp)
sol.tmp[1,months] = apply(sol.resid,2,FUN=mean)
day.resid=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[1,months] = apply(day.resid,2,FUN=mean)
breed = shp[shp@data$SEASONAL == 2,]
months = c(breeding)
samp = spsample(breed,1000,type='random')
area = mean(sapply(slot(breed,'polygons'),slot,'area'))
sol.tmp[2,'Area'] = area
day.tmp[2,'Area'] = area
sol.breed = extract(grid[[months]],samp)
sol.tmp[2,months] = apply(sol.breed,2,FUN=mean)
day.breed=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[2,months] = apply(day.breed,2,FUN=mean)
winter = shp[shp@data$SEASONAL == 3,]
months = c(nonbreed)
samp = spsample(winter,1000,type='random')
area = mean(sapply(slot(winter,'polygons'),slot,'area'))
sol.tmp[3,'Area'] = area
day.tmp[3,'Area'] = area
sol.winter = extract(grid[[months]],samp)
sol.tmp[3,months] = apply(sol.winter,2,FUN=mean)
day.winter=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[3,months] = apply(day.winter,2,FUN=mean)
data[i,'solar'] = mean(apply(sol.tmp[,-1],2,FUN=weighted.mean,w=sol.tmp[,1],na.rm=T))
data[i,'daylight'] = mean(apply(day.tmp[,-1],2,FUN=weighted.mean,w=day.tmp[,1],na.rm=T))
#partial - resident and breeding ranges (1,2)
}else if(all(c(1,2) %in% shp@data$SEASONAL)){
data[i,'strategy'] = 'par'
res = shp[shp@data$SEASONAL == 1, ]
months = c(breeding,nonbreed)
samp = spsample(res,1000,type='random')
area = mean(sapply(slot(res,'polygons'),slot,'area'))
sol.tmp[1,'Area'] = area
day.tmp[1,'Area'] = area
sol.resid = extract(grid[[months]],samp)
sol.tmp[1,months] = apply(sol.resid,2,FUN=mean)
day.resid=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[1,months] = apply(day.resid,2,FUN=mean)
breed = shp[shp@data$SEASONAL == 2,]
months = c(breeding)
samp = spsample(breed,1000,type='random')
area = mean(sapply(slot(breed,'polygons'),slot,'area'))
sol.tmp[2,'Area'] = area
day.tmp[2,'Area'] = area
sol.breed = extract(grid[[months]],samp)
sol.tmp[2,months] = apply(sol.breed,2,FUN=mean)
day.breed=t(sapply(round(samp@coords[,2],0),function(x){
as.numeric(day[day$Lat == x, months])
}))
day.tmp[2,months] = apply(day.breed,2,FUN=mean)
data[i,'solar'] = mean(apply(sol.tmp[,-1],2,FUN=weighted.mean,w=sol.tmp[,1],na.rm=T))
data[i,'daylight'] = mean(apply(day.tmp[,-1],2,FUN=weighted.mean,w=day.tmp[,1],na.rm=T))
} #end of if statement
} #i loop
data
write.table(data,'~/Dropbox/Warbler.Molt.Migration/solar_daylight.txt',quote=F,row.names=F,col.names=T,sep='\t')
plot(data$solar,data$daylight)
cer = day.tmp
tri = day.tmp
blk = day.tmp
mean(cer[2,breeding])
mean(tri[1,breeding])
mean(blk[2,breeding])
mean(cer[3,nonbreed])
mean(tri[1,nonbreed])
mean(blk[3,nonbreed])
|
04e4c00e2808c089f4b97c0094c7dde641ff0f59 | bd3d28fe434b50abaf37ee82aa79584df4f173a7 | /man/namespace_match.Rd | dbf2615a4c4de2ef95f0d109f819a3c478873df2 | [
"MIT"
] | permissive | wikimedia-research/WMUtils | ff4a9adb5c33722443016f7aba4fb6e873bf8cc5 | fed8178aad9d0f24936fa675184a0b00af93ee64 | refs/heads/master | 2020-04-01T06:56:40.070383 | 2014-12-21T17:06:19 | 2014-12-21T17:06:19 | 20,454,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,758 | rd | namespace_match.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{namespace_match}
\alias{namespace_match}
\title{namespace name/number matching}
\usage{
namespace_match(x, code = "enwiki", language = NULL, project_type = NULL,
use_API = FALSE)
}
\arguments{
\item{x}{a vector of namespace names or numbers}
\item{code}{which project's names to use as the basis for the conversion. Set to "enwiki" by default.}
\item{language}{see 'details' - set to NULL by default}
\item{project_type}{see 'details' - set to NULL by default}
\item{use_API}{whether to rebuild the data fresh from the API, or use the version that comes with WMUtils.
note that API rebuilding will update the version stored with WMUtils, but won't work at all on stat1002.
Because there's no internet on stat1002.}
}
\value{
a vector containing the IDs or names, whichever you wanted.
}
\description{
\code{namespace_match} allows you to match namespace names to the appropriate namespace ID numbers, or vice
versa, in any language.
}
\details{
namespace_match takes a vector of namespace ID numbers or namespace names, and matches them to...well, the one
you didn't provide. To do this it relies on a .RData file of the globally used namespace IDs and local names.
To match your names/IDs with the project you want them localised to, you can provide either \code{code}, which
matches the format used in the \code{wiki_info} table and the NOC lists, or both language and project_type,
where language is the English-language name for the project language, and project_type is "wiki", "wikisource",
or so on, following the format used in the \code{wiki_info} table.
}
\seealso{
\code{\link{namespace_match_generator}}, the function that (re)generates the dataset. It can be directly
called.
}
|
2c4359a30ea12bb07fa57bdeab453b0ab1c36b90 | d8ed284412c99f0ca03491b6b8a65e2ae1ae3964 | /other/images/summarize_DOW_data.R | 0004a84fd1e280360357a74beac539b5ac19aa8e | [
"MIT"
] | permissive | jwillwerth/FrEDI | e3cd3bef0ee6d05cdc7c5f8518728bb35413fab7 | 1d698e41fe4e70f4e6d21eb2958702d0c77d6d01 | refs/heads/main | 2023-08-15T23:47:54.396764 | 2021-10-15T17:17:06 | 2021-10-15T17:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,015 | r | summarize_DOW_data.R | ###### summarize_DOW_data ######
### This function summarizes data to use with plot_byDegree
summarize_DOW_data <- function(
data,
year = 2020,
primary = T,
sumByCol = "annual_impacts",
bySector = T,
otherGroupVars = c("adaptation", "impactType", "impactYear"),
impactYear = NULL,
silent = F
){
###### Set defaults ######
if(is.null(year )) year <- 2020
if(is.null(primary )) primary <- T
if(is.null(bySector)) bySector <- T
if(is.null(sumByCol)) sumByCol <- "annual_impacts"
if(is.null(otherGroupVars)) otherGroupVars <- c("adaptation", "impactType", "impactYear")
### Messaging
if(is.null(silent)) silent <- F
print_msg <- !silent
###### Prep data #######
### Keep only observations for specified reference year
### Drop model averages
data <- data %>%
filter(model!="Average") %>%
as.data.frame
# data %>% nrow %>% print
#### Filter to specific year
ref_year <- year
data <- data %>% filter(year == ref_year) %>% as.data.frame
# if(bySector){
# data <- data %>% filter(year == ref_year) %>% as.data.frame
# # data$year %>% unique %>% length %>% print
# }
### Standardize the column name
data$yCol <- data[,sumByCol]
data <- data %>%
select(-c(all_of(sumByCol))) %>%
mutate(is_NA = yCol %>% is.na)
#### Names
data_names0 <- data %>% names
###### Summarize by Region ######
### Drop national totals if present
if("region" %in% data_names0){
data <- data %>% filter(region!="National Total")
}
c_regions <- data$region %>% unique
n_regions <- c_regions %>% length
# n_regions %>% print
### Main group vars
main_groupVars <- c("sector", "model_type", "model", "driverValue")
which_main <- (main_groupVars %in% data_names0) %>% which
main_groupVars <- main_groupVars[which_main]
### Figure out which factor columns are in the data
other_groupVars <- otherGroupVars
which_other <- (other_groupVars %in% data_names0) %>% which
other_groupVars <- other_groupVars[which_other]
# data %>% nrow %>% print
###### Summarize by Impact Year ######
ref_impactYear <- impactYear
if(("impactYear" %in% other_groupVars) & bySector){
c_impYears <- data$impactYear %>% unique
n_impYears <- c_impYears %>% length
if(n_impYears>1){
if(print_msg) message("\t", "More than one impact year present...")
if(is.null(impactYear)){
which_not_interp <- (c_impYears!="Interpolation") %>% which
impactYear <- c_impYears[1]
}
if(print_msg) message("\t", "Summarizing values for impact year", impactYear, " ...")
}
data <- data %>%
filter(impactYear=="Interpolation" | impactYear == ref_impactYear) %>%
mutate(impactYear==c_impYears) %>%
mutate(impactYear = impactYear %>% as.character %>% as.numeric) %>%
filter(year == impactYear)
n_impYears <- c_impYears %>% length
}
# "got here" %>% print
# data %>% filter(is.na(yCol)) %>% nrow %>% print
###### Get primary values ######
primeValue <- primary * 1
if(("primary" %in% data_names0) & bySector){
data <- data %>% filter(primary==primeValue) %>% as.data.frame
}
###### Summarize by Impact Type ######
### Impact Type
if(("impactType" %in% other_groupVars) & bySector){
impactType_groupVars <- c(other_groupVars[which(other_groupVars!="impactType")], main_groupVars)
#### Count number of impact types
count_impactTypes <- data %>%
group_by_at(c(all_of(impactType_groupVars), "region")) %>%
summarize(n=n(), .groups="keep")
n_impTypes <- count_impactTypes$n %>% max
# n_impTypes %>% print
# data$year %>% unique %>% print
if(n_impTypes>1){
if(print_msg) message("\t", "More than one impact type present...")
if(print_msg) message("\t\t", "Summing values across impact types...")
data <- data %>%
(function(x){
# data %>% names %>% print
#### Join with other data
x <- x %>% left_join(count_impactTypes, by = c(impactType_groupVars, "region"))
### Summarize by impact type
x_impactTypes <- x %>%
group_by_at(.vars=c(all_of(impactType_groupVars), "region", "n")) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
mutate(
is_NA = is_NA < n,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA)
# mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0))
# "got here"
# x_impactTypes %>% filter(!is.na(yCol)) %>% nrow %>% print
### Summarize national values
x_national <- x_impactTypes %>%
group_by_at(.vars=c(all_of(impactType_groupVars))) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
mutate(
is_NA = is_NA < n_regions,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0)) %>%
mutate(region = "National Total") %>%
mutate(impactType = "all")
return(x_national)
})
} ### End if n_impTypes > 1
} ### End if impactType in data
# "got here" %>% print
# data %>% filter(!is.na(yCol)) %>% nrow %>% print
###### Summarize by Adaptation ######
if(("adaptation" %in% other_groupVars) & bySector){
adapt_groupVars <- c(other_groupVars[which(other_groupVars!="adaptation")], main_groupVars)
#### Count number of adaptations
#### Count number of impact types
count_adapt <- data %>%
group_by_at(.vars=c(all_of(adapt_groupVars), "region")) %>%
summarize(n=n(), .groups="keep")
n_adapt <- count_adapt$n %>% max
if(n_adapt>1){
if(print_msg) message("\t", "More than one adaptation present...")
if(print_msg) message("\t\t", "Averaging values across adaptations...")
data <- data %>%
(function(x){
#### Join with other data
x <- x %>% left_join(count_adapt, by = c(adapt_groupVars, "region"))
### Summarize by impact type
x_adapt <- x %>%
group_by_at(.vars=c(all_of(adapt_groupVars), "region", "n")) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
mutate(
is_NA = is_NA < n,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA)
# mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0))
### Summarize national values
x_national <- x_adapt %>%
group_by_at(.vars=c(all_of(adapt_groupVars))) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
mutate(
is_NA = is_NA < n_regions,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0)) %>%
mutate(region = "National Total") %>%
mutate(adaptation = "Average")
return(x_national)
})
} ### End if n_impTypes > 1
} ### End if impactType in data
# "got here" %>% print
# data %>% filter(!is.na(yCol)) %>% nrow %>% print
###### Summarize By Sector ######
all_group_vars <- c(main_groupVars, other_groupVars)
# c_regions <- (data %>% filter(region!="National Total"))$region %>% unique
# n_regions <- c_regions %>% length
# c_regions%>% print
# n_regions%>% print
if(bySector){
data <- data %>%
group_by_at(.vars = c(all_of(main_groupVars))) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
mutate(
is_NA = is_NA < n_regions,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0)) %>%
mutate(region = "National Total")
# data %>% filter(is.na(yCol)) %>% nrow %>% print
} else{
# all_group_vars %>% print
# data %>% nrow %>% print
data <- data %>%
as.data.frame %>%
ungroup %>%
# group_by_at(.vars = c(all_of(all_group_vars))) %>%
group_by_at(.vars = c("sector", "model_type", "model", "driverValue", "impactYear", "impactType", "adaptation")) %>%
summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T)
data %>% filter(sector=="Extreme Temperature") %>% filter(!is.na(yCol)) %>% nrow %>% print
(data %>% filter(sector=="Extreme Temperature"))$is_NA %>% max(na.rm=T) %>% print
data <- data %>%
mutate(
is_NA = is_NA < n_regions,
is_NA = is_NA * 1,
is_NA = is_NA %>% na_if(0)
) %>%
mutate(yCol = yCol * is_NA) %>%
# mutate(yCol = yCol %>% replace_na(0)) %>%
mutate(region = "National Total")
# data <- data %>%
# group_by_at(.vars = c(all_of(all_group_vars))) %>%
# summarize_at(.vars = c("yCol", "is_NA"), sum, na.rm = T) %>%
# mutate(
# is_NA = is_NA < n_regions,
# is_NA = is_NA * 1,
# is_NA = is_NA %>% na_if(0)
# ) %>%
# mutate(yCol = yCol * is_NA) %>%
# # mutate(yCol = yCol %>% replace_na(0)) %>%
# mutate(region = "National Total")
# data %>% filter(!is.na(yCol)) %>% nrow %>% print
}
###### Return ######
return_df <- data %>% ungroup %>% as.data.frame
return_df[, sumByCol] <- return_df$yCol
return_df <- return_df %>% select(-c("yCol", "is_NA")) %>% as.data.frame
return(return_df)
}
|
f7e194ee7ad42c8eb0abe17efabb6847818de370 | a5e49e9b3e7892ce476bab528cde3f686d5a5e3d | /R/reg.6mod.R | a305b10fff34ed07689cb9f916d5a8a59be1b1da | [] | no_license | cran/lessR | a7af34480e88c5b9bf102ab45fa6464a22ffbe3b | 562f60e6688622d8b8cede7f8d73d790d0b55e27 | refs/heads/master | 2023-05-29T07:57:09.544619 | 2023-05-14T20:20:02 | 2023-05-14T20:20:02 | 17,697,039 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,012 | r | reg.6mod.R | .reg6mod <-
function(lm.out, w.nm, x.nm,
digits_d, pdf=FALSE, width=5, height=5, manage.gr=FALSE, ...) {
nm <- all.vars(lm.out$terms) # names of vars in the model
n.vars <- length(nm)
n.keep <- nrow(lm.out$model)
# pdf graphics option
if (pdf) {
pdf_file <- "mod.pdf"
pdf(file=pdf_file, width=width, height=height)
}
# keep track of the plot in this routine
plt.i <- 0L
plt.title <- character(length=0)
plt.i <- plt.i + 1L
plt.title[plt.i] <- "Moderator Variable Interaction Plot"
max.width=.4
margs <- .plt.marg(max.width, y.lab=nm[1], x.lab=nm[2], main=NULL, sub=NULL)
lm <- margs$lm
tm <- margs$tm
rm <- margs$rm + .85 # allow for legend
bm <- margs$bm + .3
par(bg=getOption("window_fill"))
orig.params <- par(no.readonly=TRUE)
on.exit(par(orig.params))
par(mai=c(bm, lm, tm, rm))
tx <- character(length = 0)
mn.x <- min(lm.out$model[, x.nm])
mx.x <- max(lm.out$model[, x.nm])
mn.y <- min(lm.out$model[, 1])
mx.y <- max(lm.out$model[, 1])
plot(c(mn.x,mx.x), c(mn.y,mx.y), type="n", xlab=x.nm, ylab=nm[1])
x.ind <- which(names(lm.out$model) == x.nm)
w.ind <- which(names(lm.out$model) == w.nm)
b0 <- lm.out$coefficients[1]
bx <- lm.out$coefficients[x.ind]
bw <- lm.out$coefficients[w.ind]
bxw <- lm.out$coefficients[4]
clr.u1 <- getColors("hues", n=2)[1] # up 1 sd
clr.0 <- "gray20"
clr.d1 <- getColors("hues", n=2)[2] # down 1 sd
# wc is the constant value of mod W variable, 3 possibilities
m.w <- mean(lm.out$model[, w.nm])
s.w <- sd(lm.out$model[, w.nm])
tx[length(tx)+1] <- paste("Mean of ", w.nm,": ", .fmt(m.w,digits_d), sep="")
tx[length(tx)+1] <- paste("SD of ", w.nm,": ", .fmt(s.w,digits_d), sep="")
tx[length(tx)+1] <- ""
wc <- m.w+s.w; b.0 <- b0 + bw*wc; b.1 <- bx + bxw*wc
tx[length(tx)+1] <- paste("mean+1SD for ", w.nm,
": b0=", round(b.0,digits_d), " b1=", round(b.1,digits_d), sep="")
abline(b.0, b.1, col=clr.u1, lwd=1.5)
wc <- m.w; b.0 <- b0 + bw*wc; b.1 <- bx + bxw*wc
tx[length(tx)+1] <- paste("mean for ", w.nm,
": b0=", round(b.0,digits_d), " b1=", round(b.1,digits_d), sep="")
abline(b.0, b.1, col=clr.0, lwd=1)
wc <- m.w-s.w; b.0 <- b0 + bw*wc; b.1 <- bx + bxw*wc
tx[length(tx)+1] <- paste("mean-1SD for ", w.nm,
": b0=", round(b.0,digits_d), " b1=", round(b.1,digits_d), sep="")
abline(b.0, b.1, col=clr.d1, lwd=1.5)
lbls <- c("+1SD", "Mean", "-1SD")
text.cex <- ifelse(is.null(getOption("axis_x_cex")),
getOption("axis_cex"), getOption("axis_x_cex"))
if (text.cex > 0.99) text.cex <- .7 * text.cex
clr <- c(clr.u1, clr.0, clr.d1)
l.typ <- c("solid", "solid", "solid")
.plt.legend(lbls, FALSE, clr, "blue", "", rgb(.98,.98,.98), par("usr"),
legend_title=w.nm, lab_cex=text.cex, line_type=l.typ)
if (pdf) {
dev.off()
.showfile(pdf_file, "moderator interaction plot")
}
return(invisible(list(i=plt.i, ttl=plt.title, out_mod=tx)))
}
|
9f640967a349e53fa86b285671246735ed66407f | e0bcd3a0bfa23c1d445c2c738b8e37323a0c3b71 | /test/20181229_data_integrity_testing.R | 50b2c0bc317ed44a03c2729fa952c7c84d5773a4 | [] | no_license | active-analytics/pqam_2018 | 87a017a55c130412e4d090518f2f47a6adb67c9a | cbc0437bb9da5460939641ec39af24070b733e24 | refs/heads/master | 2021-10-20T11:22:43.513713 | 2019-02-27T14:07:28 | 2019-02-27T14:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,326 | r | 20181229_data_integrity_testing.R | # clearing shit out
rm(list=ls())
cat("\014")
# loading packages
library(tidyverse)
library(lubridate)
library(backtestr)
library(tidyquant)
#######################
## chain description ##
#######################
df_chain_desc <-
read_csv("data_output/spy_weekly_chain_desc_5yr.csv")
# reasonableness of numerical values
df_chain_desc$d2x %>% summary()
df_chain_desc$num_opts %>% summary()
df_chain_desc$exec_day_volume %>% summary()
# missing data
df_chain_desc[rowSums(is.na(df_chain_desc)) > 0, ]
###################
## chain history ##
###################
df_chain_hist <-
read_csv("data_output/spy_weekly_chain_hist_5yr.csv")
# reasonableness of numerical values
df_chain_hist %>%
#filter(trade_date != last_trade_date) %>%
.$implied_forward %>%
summary()
df_chain_hist %>%
filter(trade_date != last_trade_date) %>%
.$bid_swap_rate %>%
summary()
df_chain_hist %>%
filter(trade_date != last_trade_date) %>%
.$ask_swap_rate %>%
summary()
df_chain_hist %>%
filter(trade_date != last_trade_date) %>%
.$mid_swap_rate %>%
summary()
# comparing implied forward to spot prices
# this was a good check because it found some issues going on in
# september of 2014
df_yahoo <-
tq_get(
"SPY"
, get = "stock.prices"
, from = "2013-12-20"
, to = "2018-11-30"
)
df_yahoo
df_price_comparison <-
df_yahoo %>%
select(date, close) %>%
left_join(
df_chain_hist %>% select(trade_date, implied_forward)
, by = c("date" = "trade_date")
)
ggplot(data = df_price_comparison) +
geom_line(mapping = aes(x = date, y = close), color = "blue") +
geom_line(mapping = aes(x = date, y = implied_forward), color = "red")
df_price_comparison %>%
mutate(
diff = abs(close - implied_forward)
, pct_diff = abs(close - implied_forward) / close
) %>%
arrange(desc(diff)) %>%
View()
# lots of implied forwards greater than close prices
df_price_comparison %>% filter(implied_forward > close) %>% View()
# missing data
df_chain_hist[rowSums(is.na(df_chain_hist)) > 0, ]
####################
## option history ##
####################
df_opt_hist <- read_csv("data_output/spy_weekly_opt_hist_5yr.csv")
# checks how many execution date options there are, lowest is 14
df_chain_desc %>%
left_join(
df_opt_hist
, by = c("expiration", "execution" = "data_date")
) %>%
group_by(
expiration, execution
) %>%
summarize(
num_opts = sum(!is.na(strike))
) %>%
arrange(num_opts)
# missing data, there are about 25, from 12/16/2016, 12/23/2016, 12/30/2016
df_opt_hist[rowSums(is.na(df_opt_hist)) > 0, ] %>% View()
option_chain(
trade_date = mdy("12/30/2016")
, underlying = "SPY"
, expiration = mdy("12/30/2016")
)
# bad option prices on expiration date - look at volatility skews
df_exec_day_options <-
df_opt_hist %>%
left_join(
df_chain_desc %>% select(expiration, execution, last_trade_date)
, by = "expiration"
) %>%
filter(data_date == execution)
# randomly sampling an expiraton and then plotting its execution
# day option prices (I notices a few with some gaps but generally
# speaking it looked fine)
dt_random_exp <-
sample_n(df_chain_desc, 1) %>% .$expiration %>% `[`(1)
df_exec_day_options %>%
filter(expiration == dt_random_exp) %>%
ggplot() +
geom_point(aes(x = strike, y = mid))
dt_random_exp # printing to the screen
## check that you agree with option payoffs ##
# grabs all options on last trade date
df_exp_opt <-
df_opt_hist %>%
left_join(
df_chain_desc %>% select(expiration, execution, last_trade_date)
, by = "expiration"
) %>%
filter(data_date == last_trade_date)
# one off payoff function
payoff <- function(type, upx, strike){
if (type == "call"){
p <- max(upx - strike, 0)
} else {
p <- max(strike - upx, 0)
}
p
}
df_exp_opt %>%
mutate(
payoff =
pmap_dbl(
df_exp_opt %>% select(type, upx = underlying_price, strike)
, payoff
)
) %>%
filter(mid == payoff)
|
777401b869c38259a8a2cf53c58de9731debf0df | 6878863aa57046c8b183cc3ea976e4b80a051bc3 | /week4/quiz4.R | 07f3864daa7ab84d5661d991a6b502f459576e3f | [] | no_license | Bill1987/Getting-and-Cleaning-Data-Coursera | 6b0042da02861283daae1c611e4cc01b2661f3b6 | 2ec76dfc412818b4509eb56d44146ce5fc406a2d | refs/heads/master | 2020-03-27T04:10:59.267392 | 2018-09-20T03:17:47 | 2018-09-20T03:17:47 | 145,918,667 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,258 | r | quiz4.R | ###----------------------------------------------------------------------------------------------------------------------
###Question 1------Question 1-----------Question 1-------------Question 1-------Question 1
# The American Community Survey distributes downloadable data about United States
# communities. Download the 2006 microdata survey about housing for the state of
# Idaho using download.file() from here:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv
#
# and load the data into R. The code book, describing the variable names is here:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FPUMSDataDict06.pdf
#
# Apply strsplit() to split all the names of the data frame on the characters "wgtp".
# What is the value of the 123 element of the resulting list?
url_Q1 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
# download.file(url_Q1,"Q1.csv" ,"curl")
SurveyData <- read.csv("Q1.csv", stringsAsFactors = FALSE)
dataNames <- names(SurveyData)
result_Q1 <- strsplit(dataNames[[123]],"wgtp")
print(result_Q1)
# [1] "" "15"
###----------------------------------------------------------------------------------------------------------------------
###Question 2------Question 2-----------Question 2-------------Question 2-------Question 2
# Load the Gross Domestic Product data for the 190 ranked countries in this data set:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
#
# Remove the commas from the GDP numbers in millions of dollars and average them.
# What is the average?
#
# Original data sources:
#
# http://data.worldbank.org/data-catalog/GDP-ranking-table
url_Q2 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv"
# download.file(url_Q2,"Q2.csv" ,"curl")
GDP <- read.csv("Q2.csv",skip = 5,nrows = 190, header = FALSE, stringsAsFactors = FALSE)
# clean the data
GDP <- GDP[,c(1,2,4,5)]
names(GDP) <- c("CountryCode", "Rank", "Country.Name", "GDP.Value")
GDP$GDP.Value <- as.numeric(gsub(",", "",GDP$GDP.Value))
result_Q2 <- mean(GDP$GDP.Value, na.rm = TRUE)
print(result_Q2)
# [1] 377652.4
###----------------------------------------------------------------------------------------------------------------------
###Question 3------Question 3-----------Question 3-------------Question 3-------Question 3
# In the data set from Question 2 what is a regular expression that would allow you to count
# the number of countries whose name begins with "United"? Assume that the variable with the
# country names in it is named countryNames. How many countries begin with United?
countryNames <- GDP$Country.Name
# include "United"
grep("*United",countryNames)
# end with "United"
grep("United$",countryNames)
# begins with "United"
begins <- grep("^United",countryNames)
result_Q3 <- c("grep('^United',countryNames)", length(begins))
print(result_Q3)
# [1] "grep('^United',countryNames)" "3"
###----------------------------------------------------------------------------------------------------------------------
###Question 4------Question 4-----------Question 4-------------Question 4-------Question 4
# Load the Gross Domestic Product data for the 190 ranked countries in this data set:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
#
# Load the educational data from this data set:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv
#
# Match the data based on the country shortcode. Of the countries for which the end
# of the fiscal year is available, how many end in June?
#
# Original data sources:
#
# http://data.worldbank.org/data-catalog/GDP-ranking-table
#
# http://data.worldbank.org/data-catalog/ed-stats
url_Q4 <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv"
# download.file(url_Q4,"Q4.csv" ,"curl")
educational <- read.csv("Q4.csv", stringsAsFactors = FALSE)
# only need part of data
eduNotes <- educational[,c("CountryCode","Special.Notes")]
GDP_EDU <- merge(eduNotes,GDP, by = "CountryCode")
# convert to lower
result_Q4 <- length( grep("fiscal year end.*june", tolower(GDP_EDU$Special.Notes)) )
print(result_Q4)
# [1] 13
###----------------------------------------------------------------------------------------------------------------------
###Question 5------Question 5-----------Question 5-------------Question 5-------Question 5
# You can use the quantmod (http://www.quantmod.com/) package to get historical stock prices
# for publicly traded companies on the NASDAQ and NYSE. Use the following code to download
# # data on Amazon's stock price and get the times the data was sampled.
#
# library(quantmod)
# amzn = getSymbols("AMZN",auto.assign=FALSE)
# sampleTimes = index(amzn)
#
# How many values were collected in 2012? How many values were collected on Mondays in 2012?
# install.packages("quantmod")
library("quantmod")
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
result_Q51 <- sum(year(sampleTimes) == 2012)
#my computer language is Chinese
# Sys.setlocale("LC_TIME", "English")
result_Q52 <- sum(year(sampleTimes) == 2012 & weekdays(sampleTimes) == "Monday")
print(c(result_Q51,result_Q52))
# [1] 250 47
|
5776f1f305060b704f7ca0d9a12878c2e735fbc6 | b08b7e3160ae9947b6046123acad8f59152375c3 | /Programming Language Detection/Experiment-2/Dataset/Train/R/time-a-function-2.r | b3436af0b832cb2435a34ca262a2f936f7a738db | [] | no_license | dlaststark/machine-learning-projects | efb0a28c664419275e87eb612c89054164fe1eb0 | eaa0c96d4d1c15934d63035b837636a6d11736e3 | refs/heads/master | 2022-12-06T08:36:09.867677 | 2022-11-20T13:17:25 | 2022-11-20T13:17:25 | 246,379,103 | 9 | 5 | null | null | null | null | UTF-8 | R | false | false | 41 | r | time-a-function-2.r | Rprof()
foo()
Rprof(NULL)
summaryRprof()
|
0222238f10814faaf94670da190edefc685da2fa | b75de980dc88ca4a9c08352fd883b534638d7563 | /bindings/R/rlibkriging/tests/testthat/test-KrigingPredict.R | 51a1440e3d7476f2a87d0ba8988c84c22039933a | [
"Apache-2.0"
] | permissive | haveneer/libKriging | ecb14838c2bce15818f1a017f6931cfb4b2cd8d6 | d95342483de6bf7095911167b8145ff0bd98ba62 | refs/heads/master | 2022-05-24T22:57:52.082051 | 2022-02-04T16:54:29 | 2022-02-04T16:54:29 | 193,868,887 | 0 | 0 | Apache-2.0 | 2020-07-26T10:05:19 | 2019-06-26T09:02:29 | C++ | UTF-8 | R | false | false | 1,186 | r | test-KrigingPredict.R | library(testthat)
f = function(x) 1-1/2*(sin(12*x)/(1+x)+2*cos(7*x)*x^5+0.7)
#plot(f)
n <- 5
set.seed(123)
X <- as.matrix(runif(n))
y = f(X)
#points(X,y)
k = DiceKriging::km(design=X,response=y,covtype = "gauss",control = list(trace=F))
library(rlibkriging)
r <- Kriging(y,X,"gauss","constant",FALSE,"none","LL",
parameters=list(sigma2=k@covariance@sd2,has_sigma2=TRUE,
theta=matrix(k@covariance@range.val),has_theta=TRUE))
# m = as.list(r)
ntest <- 100
Xtest <- as.matrix(runif(ntest))
ptest <- DiceKriging::predict(k,Xtest,type="UK",cov.compute = TRUE,checkNames=F)
Yktest <- ptest$mean
sktest <- ptest$sd
cktest <- c(ptest$cov)
Ytest <- predict(r,Xtest,TRUE,TRUE)
precision <- 1e-5
test_that(desc=paste0("pred mean is the same that DiceKriging one:\n ",paste0(collapse=",",Yktest),"\n ",paste0(collapse=",",Ytest$mean)),
expect_equal(array(Yktest),array(Ytest$mean),tol = precision))
test_that(desc="pred sd is the same that DiceKriging one",
expect_equal(array(sktest),array(Ytest$stdev) ,tol = precision))
test_that(desc="pred cov is the same that DiceKriging one",
expect_equal(cktest,c(Ytest$cov) ,tol = precision))
|
d438e647d666d6b13d1b07a2ecbcd68f1bc7867a | 186dd33c855dc643aeef3953a373a6d704588cf0 | /man/pp_opts_out.Rd | 869ed7597ab5f7de6385ffce05eb1a22907a7d53 | [
"BSD-2-Clause"
] | permissive | cran/pubprint | 8432d36fc9d0aa8397bbb41d6fae1f189d967f80 | 864a5a20de759dcd50bee84ad44639601a5aadc6 | refs/heads/master | 2021-01-21T14:04:33.481164 | 2016-05-24T10:28:06 | 2016-05-24T10:28:06 | 50,608,880 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 933 | rd | pp_opts_out.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\docType{data}
\name{pp_opts_out}
\alias{pp_opts_out}
\title{Output format options for the pubprint package}
\format{A list with a \code{get} and \code{set} function.}
\usage{
pp_opts_out
}
\description{
A list which functions are used to print in the correct output format
(LaTeX, HTML, Markdown or plain text). If pubprint is running inside
\code{\link[knitr]{knit}} it will automatically determine the output format
from \code{\link[knitr]{knitr}}.
}
\details{
Using \code{pp_opts_out$get()} shows all currently used output format
functions, \code{pp_opts_out$set()} allows to change them.
}
\examples{
pp_opts_out$set(pp_init_out())
pp_opts_out$set(pp_init_out("html"))
}
\seealso{
See \code{\link{pp_init_out}} for initialising this variable in the
correct way and \code{\link{pp_init_style}} for publication style.
}
\keyword{datasets}
|
fda1aee3039996fb960b376630322a95257af67f | e9bd727dd117bbd4df0888a1b83862c7dd5d8fee | /Week 5/code/ANN-concrete.R | c0baefe79825f7b4d5a91d7c20cec3fe285cc4a4 | [] | no_license | cdtan/Pred_Models_git | 3aebf9819bc2771b45521c275a7cc029ba257b80 | afa9b535955547b2b526fce77f3f1b1cd7a1ce22 | refs/heads/master | 2020-04-29T17:19:09.869391 | 2019-03-20T14:06:00 | 2019-03-20T14:06:00 | 176,293,378 | 1 | 0 | null | 2019-03-18T13:35:01 | 2019-03-18T13:35:00 | null | UTF-8 | R | false | false | 3,643 | r | ANN-concrete.R | #A continuous Target
setwd('/Users/mylesgartland/OneDrive - Rockhurst University/Courses/Predictive Models/Pred_Models_git/Week 5/data')
## Step 2: Exploring and preparing the data ----
# read in data and examine structure
concrete <- read.csv("concrete.csv")
str(concrete)
boxplot(concrete)
#custom normalization function
#This is called min/max normalization (vs z-score)
#Normalization by Scaling Between 0 and 1
#Common way for ANN
normalize <- function(x) {
return((x - min(x)) / (max(x) - min(x)))
}
# apply min/max normalization to entire data frame
#note all values are now between 0 and 1
concrete_norm <- as.data.frame(lapply(concrete, normalize))
boxplot(concrete_norm)
# confirm that the range is now between zero and one
summary(concrete_norm$strength)
# compared to the original minimum and maximum
summary(concrete$strength)
# create training and test data
#Split the dataset into a training and testing sets 70/30
concrete_train <- concrete_norm[1:773, ]
concrete_test <- concrete_norm[774:1030, ]
## Step 3: Training a model on the data ----
# train the neuralnet model
library(neuralnet)
# simple ANN with only a two hidden neurons
concrete_model_1 <- neuralnet(formula = strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train, hidden = 2, algorithm = "rprop+", learningrate=NULL)
#rprop+ is a backpropagation method called resilient backpropagation. It modifies
#its learning rate on the error.
# visualize the network topology
#note one node in the hidden layer
plot(concrete_model_1)
#table of nuerons and weights
concrete_model_1$result.matrix
## Step 4: Evaluating model performance ----
# obtain model results
model_results_1 <- compute(concrete_model_1, concrete_test[1:8]) #You are running the training set through the ANN model
# obtain predicted strength values
predicted_strength_1 <- model_results_1$net.result #The prediction of each observation
# examine the correlation between predicted and actual values
cor(predicted_strength_1, concrete_test$strength)
#RMSE
sqrt(mean((concrete_test$strength-predicted_strength_1)^2))
## Step 5: Improving model performance ----
# a more complex neural network topology with 5 hidden neurons
concrete_model2 <- neuralnet(strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train, hidden = 5,algorithm = "rprop+", learningrate=NULL)
# plot the network
#note 5 neurons in the hidden layer
plot(concrete_model2)
# evaluate the results as we did before
model_results2 <- compute(concrete_model2, concrete_test[1:8])
predicted_strength2 <- model_results2$net.result
cor(predicted_strength2, concrete_test$strength)
predicted_strength2[1:10]
#what do you notice about the values?
#Return norm value to a regular value
denormalize <- function(x) {
return(x*(max(concrete$strength)) - min(concrete$strength))+min(concrete$strength)
}
#look at predicted vs actual
accuracy<-data.frame(denormalize(predicted_strength2),concrete$strength[774:1030])
#plot pred vs actual
plot(denormalize(predicted_strength2),concrete$strength[774:1030])
#Model with two hidden layers
concrete_model3 <- neuralnet(strength ~ cement + slag +
ash + water + superplastic +
coarseagg + fineagg + age,
data = concrete_train, hidden = c(5,3), algorithm = "rprop+", learningrate=NULL)
plot(concrete_model3)
|
f83fc140cb63c4d3bd3a28e4c7b6f139ebb62064 | 1d6d5a11bd45a0b8ba9f78d0fd4598a735b70c33 | /man/SNPcontam.Rd | 12dd0851f3a854b7c7b15e672c09bc6d6cf92205 | [
"LicenseRef-scancode-public-domain"
] | permissive | eriqande/SNPcontam | 63460083483c66ff5a87fe2aee0a2a5249ac6b45 | 04de02b1415fd1c6612cd7f34c9ce5c552bb97fd | refs/heads/master | 2021-01-10T19:48:04.519930 | 2014-12-18T00:00:44 | 2014-12-18T00:00:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 300 | rd | SNPcontam.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{SNPcontam}
\alias{SNPcontam}
\alias{SNPcontam-package}
\title{Detecting contaminated samples from SNP genotypes}
\description{
\code{SNPcontam} is a package in development.
It really is just in development at the moment
}
|
2a6db50aab950346d235d9b415cd2245b2de3703 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/stringi/examples/stri_trans_char.Rd.R | 58a604db2953f8f1fd15260da84753a017fd302f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 201 | r | stri_trans_char.Rd.R | library(stringi)
### Name: stri_trans_char
### Title: Translate Characters
### Aliases: stri_trans_char
### ** Examples
stri_trans_char("id.123", ".", "_")
stri_trans_char("babaab", "ab", "01")
|
d52f5397233e8d4625c821d09e2d8275afe956cb | 16dbd4a2054a2bdc5fd1e49552d283fe21fc0cc2 | /R/utils.R | a5fe0158379ea85b2cc621e4f2f6a3d2c1de0f2a | [
"MIT"
] | permissive | RLesur/pagedreport | 360d7efcde777db5bcc6311fd41bfa064d1ac3cc | bbfac117c4f5933508c8f12f18a8601de9c5e7b4 | refs/heads/main | 2023-03-04T06:27:30.558079 | 2021-01-19T14:56:58 | 2021-01-19T14:56:58 | 331,128,926 | 2 | 0 | NOASSERTION | 2021-01-19T22:40:03 | 2021-01-19T22:40:02 | null | UTF-8 | R | false | false | 108 | r | utils.R | pkg_resource <- function(...) {
system.file("resources", ..., package = "pagedreport", mustWork = TRUE)
}
|
d784adb735c80306ddb6d9cf0c58f48385f84363 | afd52451e8845963de4ad1243005834fa0958beb | /sta_r/sta_factanal.R | 74f8154a2eb670b5936991b1fac9dff8b68afd08 | [] | no_license | plus4u/R | 7c0d867767ae948b24a15322df11b500abcfd920 | c8c25313567bd8bcf5142a04187a24e0d5ad12d1 | refs/heads/master | 2021-09-19T13:52:40.115595 | 2021-08-11T06:47:22 | 2021-08-11T06:47:22 | 155,179,952 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,712 | r | sta_factanal.R | ## factor analysis : 1. factanal, 2. factor.pa( ) function in the psych package
# Maximum Likelihood Factor Analysis
# entering raw data and extracting 3 factors,
# with varimax rotation
## Determining the Number of Factors to Extract : library(psych)- plot or plotnScree
# Determine Number of Factors to Extract
# library(nFactors)
# ev <- eigen(cor(mydata)) # get eigenvalues
# ap <- parallel(subject=nrow(mydata),var=ncol(mydata), rep=100,cent=.05)
# nS <- nScree(x=ev$values, aparallel=ap$eigen$qevpea)
# plotnScree(nS)
str(pca_xls)
dat <- pca_xls[,2:16]
e <- eigen(cor(dat)) #solving for the eigenvalues and eigenvectors from the correlation matrix
L <- e$values # from manually
plot(L,main="Scree Plot",ylab="Eigenvalues",xlab="Component number",type='b')
abline(h=1, lty=2)
#
# rotate can "none", "varimax", "quatimax", "promax", "oblimin", "simplimax", or "cluster"
fit1 <- factanal(dat, 5, rotation="varimax")
# to use oblimin
install.packages("GPArotation")
library(GPArotation)
library(psych)
## https://www.rdocumentation.org/packages/psych/versions/1.8.10/topics/fa
fit2 <- fa(r = cor(dat), nfactors = 5, rotate = "oblimin", fm = "pa") # correlation or covariance matrix or
fit2 # spss very similar
fit1
print(fit2, digits=2, cutoff=.3, sort=TRUE)
## eo 1
# plot factor 1 by factor 2
load <- fit$loadings[, 1:5] # in case of 2 factor is ok, but 5 ?
plot(load,type="n") # set up plot
text(load,labels=names(dat),cex=.7) # add variable names
# Structual Equation Modeling : sem package
# 2.
library(psych)
fit <- factor.pa(mydata, nfactors=3, rotation="varimax")
fit # print results |
c5342cea52d1ec0a43897df41676c8507a41127e | 969d4316ad794a0eef0213b01a7b06ddfdf8d90d | /14_performance/01_microbenchmark/exercise3.r | dce91768105276fec1e711946c051e5b80e8fe10 | [] | no_license | Bohdan-Khomtchouk/adv-r-book-solutions | adaa5b5f178999d130aff1359a23e978e39e86ae | e1b3a63c0539de871728b522604110c0aa18c7d1 | refs/heads/master | 2021-01-22T00:36:17.450660 | 2015-12-06T02:54:02 | 2015-12-06T02:54:02 | 47,481,353 | 1 | 1 | null | 2015-12-06T02:49:46 | 2015-12-06T02:49:46 | null | UTF-8 | R | false | false | 1,655 | r | exercise3.r | ### Use microbenchmarking to rank the basic arithmetic operators (+, -, *, /,
### and ^) in terms of their speed. Visualise the results. Compare the speed of
### arithmetic on integers vs. doubles.
microbenchmark::microbenchmark(2 + 2, 2 - 2, 2 * 2, 2 / 2, 2 ^ 2)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2 + 2 81 95.0 155.02 98 145.5 3706 100
# 2 - 2 82 92.5 128.82 97 137.0 1061 100
# 2 * 2 82 94.0 118.19 96 141.0 419 100
# 2/2 80 93.0 120.65 97 143.0 384 100
# 2^2 136 142.0 233.08 147 232.0 4764 100
microbenchmark::microbenchmark(2 + 2, 2.0 + 2.1)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2 + 2 74 80 133.33 130.5 145 457 100
# 2 + 2.1 73 79 169.88 133.0 157 3630 100
microbenchmark::microbenchmark(2 - 2, 2.0 - 2.1)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2 - 2 73 80 162.64 135 143 3988 100
# 2 - 2.1 74 78 136.17 132 146 434 100
microbenchmark::microbenchmark(2 * 2, 2.0 * 2.1)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2 * 2 75 81 134.22 137 149.5 481 100
# 2 * 2.1 76 82 163.48 139 149.5 2697 100
microbenchmark::microbenchmark(2 / 2, 2.0 / 2.1)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2/2 75 79 125.60 122.5 145.0 419 100
# 2/2.1 75 80 171.74 136.0 163.5 3494 100
microbenchmark::microbenchmark(2 ^ 2, 2.0 ^ 2.1)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# 2^2 117 132.0 237.72 183 221.5 4587 100
# 2^2.1 144 159.5 227.71 202 249.5 1290 100
|
cca464d1480f1e034dd796f614d0944510689235 | 5f8a241f0e6d5c0ebcdef4bc54dd45ace52a32f0 | /Plot3.r | 715ed804263fd26dac8af7d4496cd62e9694f9b5 | [] | no_license | Shayak94/ExData_Plotting1 | e4505f094db7608d9907a11fbce338cbf054b705 | 193706995ffe3cd1279c57df3d92fea9822a96ec | refs/heads/master | 2021-01-18T14:44:38.385605 | 2015-05-09T18:51:46 | 2015-05-09T18:51:46 | 31,761,525 | 0 | 0 | null | 2015-03-06T09:14:28 | 2015-03-06T09:14:28 | null | UTF-8 | R | false | false | 1,231 | r | Plot3.r | ?parcheck<-function(a){ ##checking for missing value,function to remove "?"
if(a=="?"){
a<-NA
}
}
ds<-read.table("household_power_consumption.txt",sep=";") ##Reading and subsetting
ds[,1]<-as.Date(ds[,1],"%d/%m/%Y")
subs<-subset(ds,ds[,1]=="2007-2-1" | ds[,1]=="2007-2-2")
for(i in 2:9){ ##Removing all "?" not checking date values as it cannot be missing
for(j in 1:2880){
check(subs[j,i])
}
}
GAP<-as.numeric(as.character(subs[,3])) ##Converting factors to numerics
dnt<-paste(subs[,1],subs[,2],sep=" ") ##combining date and time values
DnT<-as.POSIXlt(dnt) ##converting to standard format
subs[,7]<-as.numeric(as.character(subs[,7]))
subs[,8]<-as.numeric(as.character(subs[,8]))
subs[,9]<-as.numeric(as.character(subs[,9]))
png(file="Plot3.png",width=480,height=480)
plot(DnT,subs[,7],col="black",type="l",xlab="",ylab="Energy sub metering")
points(DnT,subs[,8],col="Red",type="l")
points(DnT,subs[,9],col="Blue",type="l")
legend("topright",legend=c("sub_metering_1","sub_metering_2","sub_metering_3"),col=c("Black","Red","Blue"),lwd=2)
dev.off() |
3f42fa532f0a02e6634b3d91d1b6962f79cfaa41 | f2d0e19b55cb262c1e76dad1de21557a8f6640d1 | /govWebsitesIndiana2015.R | 06f56aa9e506ca863cd3b3a775ab0eaf3804f1d6 | [] | no_license | desmarais-lab/govWebsites | e405989a82374832e9715e08b785b13b34a85493 | e22e3ef38d7f986c7332a6d0d04d80e8a1b0edef | refs/heads/master | 2021-12-23T11:43:47.803689 | 2019-05-06T20:35:53 | 2019-05-06T20:35:53 | 80,395,954 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,680 | r | govWebsitesIndiana2015.R | library("dplyr")
library("tidyr")
#2015
mIN15 <- read.csv("data/mayorIN2015.csv", header = F)
mIN15$V1 <- as.character(mIN15$V1)
mIN15$V2 <- as.character(mIN15$V2)
mIN15$V3 <- as.character(mIN15$V3)
#clean up data - put city name, candidate name and votes in the correct column
for(i in 1:nrow(mIN15)){
if(is.na(mIN15$V3[i])==T){
mIN15$V3[i] <- mIN15$V2[i]
mIN15$V2[i] <- mIN15$V1[i]
mIN15$V1[i] <- mIN15$V1[i-1]
}
}
mIN15$V3 <- as.numeric(mIN15$V3)
#extract party
library(stringr)
mIN15$V4 <- str_extract(mIN15$V2,"\\((.+?)\\)$")
mIN15$V5 <- str_extract(mIN15$V4," \\((.+?)\\)$")
mIN15$V5 <- str_trim(mIN15$V5)
mIN15$V4[is.na(mIN15$V5)==F] <- mIN15$V5[is.na(mIN15$V5)==F]
#mIN15$V4 <- gsub("log\\(", "", mIN15$V4)
mIN15$V4 <- gsub("(", "", mIN15$V4, fixed="TRUE")
mIN15$V4 <- gsub(")", "", mIN15$V4, fixed="TRUE")
mIN15$V5 <- 2015
#2011
mIN11 <- read.csv("data/mayorIN2011compatibility.csv", header = T)
mIN11$District <- as.character(mIN11$District)
for(i in 1:nrow(mIN11)){
if(mIN11$District[i]==""){
mIN11$District[i] <- mIN11$District[i-1]
}
}
#extract party
library(stringr)
mIN11$Party <- str_extract(mIN11$Candidate,"\\((.+?)\\)$")
mIN11$Party2 <- str_extract(mIN11$Party,"\\s\\((.+?)\\)$")
mIN11$Party2 <- str_trim(mIN11$Party2)
mIN11$Party[is.na(mIN11$Party2)==F] <- mIN11$Party2[is.na(mIN11$Party2)==F]
#mIN11$Party <- gsub("log\\(", "", mIN11$Party)
mIN11$Party <- gsub("(", "", mIN11$Party, fixed="TRUE")
mIN11$Party <- gsub(")", "", mIN11$Party, fixed="TRUE")
names(mIN11)[5] <- "Year"
mIN11$Year <- 2011
#rename 2015
names(mIN15) <- names(mIN11)
mIN15 <- subset(mIN15, select = -Candidate)
mIN15 <- subset(mIN15, Party %in% c("Democratic","Republican"))
mIN15$District <- factor(mIN15$District)
mIN15 <- mIN15 %>% spread(Party, Votes)
mIN15$Democratic[is.na(mIN15$Democratic)==T] <- -1
mIN15$Republican[is.na(mIN15$Republican)==T] <- -1
mIN15$winner <- names(mIN15)[3:4][max.col(mIN15[,3:4])]
#do the same for 2011
mIN11 <- subset(mIN11, select = -Candidate)
mIN11 <- subset(mIN11, Party %in% c("Democratic","Republican"))
mIN11$District <- factor(mIN11$District)
mIN11 <- mIN11 %>% spread(Party, Votes)
mIN11$Democratic[is.na(mIN11$Democratic)==T] <- -1
mIN11$Republican[is.na(mIN11$Republican)==T] <- -1
mIN11$winner <- names(mIN11)[3:4][max.col(mIN11[,3:4])]
mIN <- rbind(mIN11,mIN15)
mIN <- mIN[order(mIN$District,mIN$Year),]
mIN <- mIN[-c(237:239),] #remove last 3 because they dont appear in 2011
mIN$control_change <- 0
for(i in 2:nrow(mIN)){
if(mIN$winner[i]!=mIN$winner[i-1])
mIN$control_change[i] <- 1
}
mIN$control_change[mIN$Year==2011] <- NA
#party control changed
table(mIN$control_change) #35 out of 118 times
table(mIN$control_change)[2]/sum(table(mIN$control_change)) #29.66%
#set -1s back to NA
mIN$Democratic[mIN$Democratic==-1] <- NA
mIN$Republican[mIN$Republican==-1] <- NA
#mIN <- mIN[mIN$Year==2015,]
#save election data
save(mIN, file="data/indianaElections2015.rdata")
#load websites data
load("data/govWebsitesVerifiedCensus.Rdata")
#only Indiana
data9 <- subset(data9,State=="IN")
#check overlap
match(data9$City,mIN$District)
#how many matches?
length(match(data9$City,mIN$District)[is.na(match(data9$City,mIN$District))==F]) #17
#names of matches
mIN$District[match(data9$City,mIN$District)[is.na(match(data9$City,mIN$District))==F]]
#merge
combined <- merge(mIN,data9,by.x = "District", by.y = "City", all.x = F, all.y = F)
#only 2015
indiana <- subset(combined, Year==2015)
#save
save(indiana, file="data/indiana2015.rdata")
load(file="data/indiana2015.rdata")
#correction to indianapolis
indiana$redirect[indiana$District=="Indianapolis"] <- "http://www.indy.gov"
#
indiana.table <- subset(indiana, select=c("District","Democratic","Republican",
"winner","control_change","POPESTIMATE2015",
"redirect"))
names(indiana.table) <- c("City","DemVotes","RepVotes",
"Winner","Change","Pop15",
"url")
require(xtable)
print(xtable(indiana.table, caption="", digits = 0), include.rownames = F)
### pull websites from wayback machine
library(jsonlite)
API_base <- 'http://archive.org/wayback/available?url='
test <- indiana$redirect
#set up folders
setwd("./websites/")
system("mkdir oct15")
system("mkdir jan16")
setwd("./jan16/")
#loop through websites, results automatically get saved into 'websites' folder inside wd
for (i in 1:length(test)){
website <- test[i] #loop through websites
#the following three lines aren't actually needed when using the Ruby package
API_URL <- paste(API_base,website,sep = "")
wayback <- fromJSON(API_URL)
waybackURL <- wayback$archived_snapshots$closest$url
#pasting input for Ruby package, then executing it
#--concurrency 20 causes 20 items to be downloaded at the same time
#the default is 1, this takes WAY too long (i.e. one hour for a website...)
#--from 201510 downloads a snapshot from October 2015, or, if not available, later
#The mayoral elections in IN happened on November 3
WBMD_base <- "wayback_machine_downloader --concurrency 40 --from 201601"
#WBMD_base <- "wayback_machine_downloader --concurrency 40 --from 201510"
WBMD_site <- paste(WBMD_base,website)
system(WBMD_site, intern = T) #just ignore the printout if running outside of loop
}
#setwd("D:/Dropbox/4_RA/govWebsites/websites/pdfsfolder/")
list.files(path='websites/frankfort-in.gov/', pattern='*.pdf', recursive=T)
|
45e09b000caf9e97a3bda4f4028604bc747f2968 | 384e46729e7f151b5239e38b9ee8cf0b8fb8d477 | /1-birthday-problem.R | 2e53a6feb3a5193c6d76fec4452163faa097127f | [] | no_license | Vassar-COGS-319/lab-2-jodeleeuw | af03002c4de58385b7660134d6b15eaec14f08de | a348bc6ca82f32bdc407a18be11531323167b5c8 | refs/heads/master | 2020-07-24T22:48:24.635160 | 2019-09-12T17:43:50 | 2019-09-12T17:43:50 | 208,074,525 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,738 | r | 1-birthday-problem.R | # part 1 ####
# suppose there are 30 students in a class. estimate the probability that at least one pair
# students has the same birthday.
# you can ignore the possibility of people being born on Feb 29 during a leap-year.
# first, i'll write a function to generate a new class and return whether there are people
# with the same birthday
new.class <- function(class.size){
# since there are 365 days in a year, we can use the integers 1:365 to represent
# birthdays
birthdays <- sample(1:365, class.size, replace=T)
# now we need to check if any two values in the birthdays array are the same.
# there are many ways to do this. we could do it with a for loop:
for(day in birthdays){
number.of.matches <- sum(day == birthdays)
if(number.of.matches > 1){
return(TRUE)
}
}
# if we go through the whole for loop and never return TRUE it means there are no matches
# so we can return FALSE at this point in the code.
return(FALSE)
}
# to estimate the probability for a class of size 30, we need to run the function many times
results <- replicate(10000, new.class(30))
sum(results) / length(results)
# the estimated probability is around 70%
# part 2 ####
# estimate the probability for class sizes from 5-60, and plot the resulting curve
# (x axis is class size, y axis is probability of at least one shared birthday)
# let's start by making an array to hold the class size:
class.size <- 5:60
# now we need to run a function for each element of this array. there are lots of
# ways to do this.
probabilities <- sapply(class.size, function(s){
res <- replicate(10000, new.class(s))
return(sum(res) / length(res))
})
# make the plot!
plot(class.size, probabilities, type="o")
|
05a8c7e0c99e98b214cf5eb026cdd11c99872b9f | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-43/tlc02-nonuniform-depth-43.R | d48969370f7327c4608400df43e3403ec9939112 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 78 | r | tlc02-nonuniform-depth-43.R | a8f52439a6d8f0c806af0954ca85a093 tlc02-nonuniform-depth-43.qdimacs 10077 26568 |
3b1b056eb760b5166124d10a8ba0e6a09c5a88b0 | b86d626d101e2a75696a08a04569e6be44e03b1f | /code/23_check_assumptions_v0.1.R | 100148ed7db9e73c4ddcaf89d75d6eec4a86fe9e | [
"CC-BY-4.0"
] | permissive | GitTFJ/carnivore_trends | d95a862ffd96122b2357fbf6d571abf9cc7030f3 | f7e423d7c2b51c3cb66567e8026db219ed49c579 | refs/heads/main | 2023-04-12T22:50:25.990602 | 2022-11-21T21:06:28 | 2022-11-21T21:06:28 | 555,432,324 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,590 | r | 23_check_assumptions_v0.1.R | mod = readRDS("Data/Analysis/full_mod_test4.rds")
mod.mcmc = as.mcmc(mod)
converge = mod.mcmc
for(a in 1:nchain(converge)){
trim = converge[[a]]
trim = trim[,c(grep(
paste("alpha",
"beta",
"sd.alpha",
"sd.gamma",
"sd.reg",
"sd.cou",
"sd.gen",
"sd.spec",
"sd.mod",
"p.select",
"p.int"
, sep = "|"),
colnames(trim)))]
converge[[a]] = trim
}
pdf("Results/convergence.pdf")
plot(converge)
dev.off()
gelman.diag(converge)
merged.chains = do.call(rbind, mod.mcmc)
pred = as.data.frame(merged.chains[,grepl("mod", colnames(merged.chains))])
pred = pred[,-c(grep("sd.mod", colnames(pred)))]
true = as.data.frame(merged.chains[,grepl("pt", colnames(merged.chains))])
true = true[,-c(grep("pt.pred", colnames(true)))]
sim = as.data.frame(merged.chains[,grepl("pt.pred", colnames(merged.chains))])
res = true - pred
df = data.frame(
p.l = apply(pred, 2, function(x) quantile(x,probs = 0.025)),
p.m = apply(pred, 2, function(x) quantile(x,probs = 0.5)),
p.u = apply(pred, 2, function(x) quantile(x,probs = 0.975)),
t.l = apply(true, 2, function(x) quantile(x,probs = 0.025)),
t.m = apply(true, 2, function(x) quantile(x,probs = 0.5)),
t.u = apply(true, 2, function(x) quantile(x,probs = 0.975)),
s.l = apply(sim, 2, function(x) quantile(x,probs = 0.025)),
s.m = apply(sim, 2, function(x) quantile(x,probs = 0.5)),
s.u = apply(sim, 2, function(x) quantile(x,probs = 0.975)),
r.l = apply(res, 2, function(x) quantile(x,probs = 0.025)),
r.m = apply(res, 2, function(x) quantile(x,probs = 0.5)),
r.u = apply(res, 2, function(x) quantile(x,probs = 0.975))
)
df$id = rownames(df)
Values = data.frame(id = paste("mod[", 1:length(jagsdata_lag10$pt), "]", sep = ""))
Values$Code = c(rep("Quantitative", (min(which(TrendsTrim_lag10$QualitativeStable == 1)) - 1)),
rep("Qualitative: Stable", (max(which(TrendsTrim_lag10$QualitativeStable == 1)) - (min(which(TrendsTrim_lag10$QualitativeStable == 1)) - 1))),
rep("Qualitative: Decrease", (max(which(TrendsTrim_lag10$QualitativeDecrease == 1)) - (min(which(TrendsTrim_lag10$QualitativeDecrease == 1)) - 1))),
rep("Qualitative: Increase", (max(which(TrendsTrim_lag10$QualitativeIncrease == 1)) - (min(which(TrendsTrim_lag10$QualitativeIncrease == 1)) - 1))))
Values$wt = TrendsTrim_lag10$abs_weight
df = left_join(df, Values)
a = ggplot(df[which(df$Code == "Quantitative"),]) +
geom_jitter(aes(x = t.m-p.m, y = p.m), alpha = 0.2, width = 0.1, height = 0.1) +
theme_classic() +
labs(x = "Residual annual rate\nof change (%) ihs transformed", y = "Predicted annual rate\nof change (%) ihs transformedd")
a
ac = cbind(data.frame(id = paste("mod[", 1:length(jagsdata_lag5$pt), "]", sep = "")),
TrendsTrim_lag5[,c("Longitude", "Latitude", "Species")])
ac = left_join(ac, df[,c("id", "t.m", "p.m")])
geo = as.matrix(dist(cbind(ac$Longitude, ac$Latitude)))
geo = 1/geo
diag(geo) = 0
geo[is.infinite(geo)] <- 0
vg = variog(coords = ac[,2:3], data = ac$t.m - ac$p.m)
vg = data.frame(distance = vg$u, vari = vg$v)
b = ggplot() +
geom_smooth(data = vg, aes(x = distance, y = vari)) +
geom_point(data = vg, aes(x = distance, y = vari)) +
scale_y_continuous(limits = c(0,7)) +
labs(x = "Distance\n (decimal degrees)", y = "Semivariance", title = paste(" Moran's autocorrelation p-value:", round(ape::Moran.I(ac$t.m - ac$p.m, geo)$p.value,2))) +
theme_classic()
b
PrunedTree = readRDS("PrunedTree.rds")
p.ac = as.data.frame(ac %>%
group_by(Species) %>%
dplyr::summarise(r = mean(t.m-p.m)))
p.x = as.matrix(p.ac$r)
rownames(p.x) = p.ac$Species
psig = phylosig(tree = PrunedTree,
x = p.x,
method = "lambda",
test = T)
p.x = data.frame(id = p.ac$Species, res = p.ac$r, stringsAsFactors = F)
PrunedTree = drop.tip(PrunedTree,PrunedTree$tip.label[-match(p.x$id, PrunedTree$tip.label)])
p = ggtree(PrunedTree)+
theme_tree2()
c = facet_plot(p,
panel = "Trend",
data = p.x,
geom=geom_barh,
mapping = aes(x = res),
stat = "identity") +
labs(x = " Millions of years Residual annual rate of change (%) ihs transformed")
c = facet_labeller(c, c(Tree = "Phylogeny"))
c
jpeg("Results/assumption_plot.jpeg", width = 8, height = 6, units = "in", res = 300)
ggarrange(ggarrange(a,b, ncol = 2, labels = c("a", "b")), c, nrow = 2, labels = c(" ", "c\n\n\n"))
dev.off()
a = ggplot(df[which(df$Code == "Quantitative"),]) +
geom_point(aes(x = t.m, y = p.m), alpha = 0.2) +
coord_cartesian(ylim = c(-5,5), xlim = c(-5,5)) +
theme_classic() +
labs(x = "Observed annual rate of change (%)\nInverse hyperbolic sine transformed", y = "Predicted annual rate of change (%)\nInverse hyperbolic sine transformed")
a
df2 = df
df2$Code = factor(df2$Code, levels = c("Qualitative: Decrease", "Qualitative: Stable", "Qualitative: Increase"))
b = ggplot(df2[which(df2$Code != "Quantitative"),]) +
geom_pointrange(aes(x = t.m, y = p.m, ymin = p.l, ymax = p.u), alpha = 0.3, colour = "grey") +
geom_hline(aes(yintercept = 0)) +
coord_cartesian(ylim = c(-5, 5)) +
theme_classic() +
facet_grid(~Code, scales = "free_x") +
labs(x = "Quasi-observed annual rate of change (%)\nInverse hyperbolic sine transformed",
y = " ")
b
bpval <- mean(df[which(df$Code == "Quantitative"),]$s.m > df[which(df$Code == "Quantitative"),]$t.m)
c = ggplot(df[which(df$Code == "Quantitative"),]) +
geom_density(aes(x = t.m), fill = "grey", alpha = 0.4) +
geom_density(aes(x = s.m), fill = "blue", alpha = 0.2, linetype = "dashed") +
theme_classic() +
labs(x = "Annual rate of change (%)\nInverse hyperbolic sine transformed",
y = "Density") +
xlim(-10,10)
c
bpval <- mean(df[which(grepl("Qualitative", df$Code)),]$s.m > df[which(grepl("Qualitative", df$Code)),]$t.m)
d = ggplot(df2[which(df2$Code != "Quantitative"),]) +
geom_density(aes(x = t.m), fill = "grey", alpha = 0.4) +
geom_density(aes(x = s.m), fill = "blue", alpha = 0.2, linetype = "dashed") +
theme_classic() +
facet_grid(~Code, scales = "free_x") +
labs(x = "Annual rate of change (%)\nInverse hyperbolic sine transformed",
y = " ")
d
jpeg("Results/posterior_check_plot.jpeg", width = 11, height = 8, units = "in", res = 300)
ggarrange(a,b,c,d, nrow = 2, ncol = 2, labels = c("a", "b", "c", "d"), widths = c(1,1.5))
dev.off()
|
7feb0cef6dac4df986fa1e6825546913b5281612 | 82ac620742e3fed4fcb08746269ca703509e8761 | /graphs/fromRCyjs/test_utils.R | 76c6b4a7606175dbe714c6b002e4109e6ecfad0f | [
"Apache-2.0"
] | permissive | PriceLab/STP | 6d2966dcbbb92a0403c22ca76944aa8e79a0ea47 | a63994b5e636ad6817465b43c0f680b7401906eb | refs/heads/master | 2020-03-22T02:00:11.456395 | 2018-11-16T21:18:46 | 2018-11-16T21:18:46 | 139,343,766 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,332 | r | test_utils.R | library(RUnit)
source("~/github/STP/graphs/fromRCyjs/utils.R")
library(graph)
library(jsonlite)
#------------------------------------------------------------------------------------------------------------------------
printf <- function(...) print(noquote(sprintf(...)))
cleanup <- function(s) gsub('\"', "'", s)
#------------------------------------------------------------------------------------------------------------------------
# use ~/github/projects/examples/cyjsMinimal/cyjs.html to test out json strings produced here
#------------------------------------------------------------------------------------------------------------------------
if(!exists("g.big")){
load(system.file(package="RCyjs", "extdata", "graph.1669nodes_3260edges_challenge_for_converting_to_json.RData"))
g.big <- g.lo
}
if(!exists("g.small")){
print(load(system.file(package="RCyjs", "extdata", "graph.11nodes.14edges.RData")))
g.small <- g
}
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_1_node()
test_1_node_with_position()
test_2_nodes()
test_2_nodes_1_edge()
test_1_node_2_attributes()
test_2_nodes_1_edge_2_edgeAttribute()
test_smallGraphWithAttributes()
test_2_nodes_2_edges_no_attributes()
test_20_nodes_20_edges_no_attributes()
test_200_nodes_200_edges_no_attributes()
test_2000_nodes_2000_edges_no_attributes()
test_1669_3260()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
createTestGraph <- function(nodeCount, edgeCount)
{
elementCount <- nodeCount^2;
vec <- rep(0, elementCount)
set.seed(13);
vec[sample(1:elementCount, edgeCount)] <- 1
mtx <- matrix(vec, nrow=nodeCount)
gam <- graphAM(adjMat=mtx, edgemode="directed")
as(gam, "graphNEL")
} # createTestGraph
#----------------------------------------------------------------------------------------------------
test_1669_3260 <- function(display=FALSE)
{
printf("--- test_1669_3260")
g.json <- .graphToJSON(g.small)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
checkEquals(lapply(g2$elements, dim), list(nodes=c(11, 27), edges=c(14,4)))
system.time( # < 14 seconds elapsed: 1669 nodes, 3260 edges
g.json <- .graphToJSON(g.big)
)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
checkEquals(lapply(g2$elements, dim), list(nodes=c(1669, 83), edges=c(3260, 4)))
} # test_1669_3260
#------------------------------------------------------------------------------------------------------------------------
test_2_nodes_2_edges_no_attributes <- function(display=FALSE)
{
printf("--- test_2_nodes_2_edges_no_attributes")
g <- createTestGraph(2, 2)
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(2, 3))
} # test_2_nodes_2_edges_no_attributes
#------------------------------------------------------------------------------------------------------------------------
test_20_nodes_20_edges_no_attributes <- function(display=FALSE)
{
printf("--- test_20_nodes_20_edges_no_attributes")
g <- createTestGraph(20, 20)
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(20, 3))
} # test_2_nodes_2_edges_no_attributes
#------------------------------------------------------------------------------------------------------------------------
test_200_nodes_200_edges_no_attributes <- function(display=FALSE)
{
printf("--- test_200_nodes_200_edges_no_attributes")
g <- createTestGraph(200, 200)
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(200, 3))
} # test_200_nodes_200_edges_no_attributes
#------------------------------------------------------------------------------------------------------------------------
test_2000_nodes_2000_edges_no_attributes <- function(display=FALSE)
{
printf("--- test_2000_nodes_2000_edges_no_attributes")
print(system.time({ # 4 seconds
g <- createTestGraph(2000, 2000)
g.json <- .graphToJSON(g)
}))
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(2000, 3))
} # test_2000_nodes_2000_edges_no_attributes
#------------------------------------------------------------------------------------------------------------------------
test_1_node <- function(display=FALSE)
{
printf("--- test_1_node")
g <- graphNEL(nodes="A", edgemode="directed")
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
} # test_1_node
#------------------------------------------------------------------------------------------------------------------------
test_1_node_with_position <- function(display=FALSE)
{
printf("--- test_1_node_with_position")
g <- graphNEL(nodes="A", edgemode="directed")
nodeDataDefaults(g, "xPos") <- 0
nodeDataDefaults(g, "yPos") <- 0
nodeData(g, n="A", "xPos") <- pi
nodeData(g, n="A", "yPos") <- cos(pi)
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
checkEqualsNumeric(tbl.nodes$data.xPos, 3.1416, tol=1e-4)
checkEquals(tbl.nodes$position.x, 3.1416, tol=1e-4)
checkEqualsNumeric(tbl.nodes$data.yPos, -1, tol=1e-4)
checkEquals(tbl.nodes$position.y, -1, tol=1e-4)
} # test_1_node_with_position
#------------------------------------------------------------------------------------------------------------------------
test_2_nodes <- function(display=FALSE)
{
printf("--- test_2_nodes")
g <- graphNEL(nodes=c("A", "B"), edgemode="directed")
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
} # test_2_nodes
#------------------------------------------------------------------------------------------------------------------------
test_2_nodes_1_edge <- function(display=FALSE)
{
printf("--- test_2_nodes_1_edge")
g <- graphNEL(nodes=c("X", "Y"), edgemode="directed")
g <- addEdge("X", "Y", g);
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
# flatten: automatically ‘flatten’ nested data frames into a single non-nested data frame
g2 <- fromJSON(g.json, flatten=TRUE)
checkEquals(names(g2$elements), c("nodes", "edges"))
tbl.nodes <- g2$elements$nodes
checkEquals(dim(tbl.nodes), c(2,1))
checkEquals(tbl.nodes$data.id, c("X", "Y"))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(1,3))
checkEquals(tbl.edges$data.id, "X->Y")
} # test_2_nodes_1_edge
#------------------------------------------------------------------------------------------------------------------------
test_1_node_2_attributes <- function(display=FALSE)
{
printf("--- test_1_node_2_attributse")
g <- graphNEL(nodes="A", edgemode="directed")
nodeDataDefaults(g, "size") <- 0
nodeData(g, "A", "size") <- 99
nodeDataDefaults(g, "label") <- ""
nodeData(g, "A", "label") <- "bigA"
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
tbl.nodes <- g2$elements$nodes
checkEquals(tbl.nodes$data.id, nodes(g))
checkEquals(tbl.nodes$data.size, 99)
checkEquals(tbl.nodes$data.label, "bigA")
} # test_1_node_2_attributes
#------------------------------------------------------------------------------------------------------------------------
test_2_nodes_1_edge_2_edgeAttribute <- function(display=FALSE)
{
printf("--- test_2_nodes_2_edgeAttributes")
g <- graphNEL(nodes=c("X", "Y"), edgemode="directed")
g <- addEdge("X", "Y", g);
edgeDataDefaults(g, "weight") <- 0
edgeDataDefaults(g, "edgeType") <- "generic"
edgeData(g, "X", "Y", "weight") <- 1.234
edgeData(g, "X", "Y", "edgeType") <- "regulates"
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
# flatten: automatically ‘flatten’ nested data frames into a single non-nested data frame
g2 <- fromJSON(g.json, flatten=TRUE)
checkEquals(names(g2$elements), c("nodes", "edges"))
tbl.nodes <- g2$elements$nodes
checkEquals(dim(tbl.nodes), c(2,1))
checkEquals(tbl.nodes$data.id, c("X", "Y"))
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.edges), c(1,5))
checkEquals(tbl.edges$data.id, "X->Y")
checkEquals(tbl.edges$data.source, "X")
checkEquals(tbl.edges$data.target, "Y")
checkEquals(tbl.edges$data.weight, 1.234)
checkEquals(tbl.edges$data.edgeType, "regulates")
} # test_2_nodes_1_edge
#------------------------------------------------------------------------------------------------------------------------
test_smallGraphWithAttributes <- function(display=FALSE)
{
printf("--- test_smallGraphWithAttributes")
g <- simpleDemoGraph()
g.json <- .graphToJSON(g)
if(display){
writeLines(sprintf("network = %s", g.json), "network.js")
browseURL("cyjs-readNetworkFromFile.html")
} # display
g2 <- fromJSON(g.json, flatten=TRUE)
checkEquals(names(g2$elements), c("nodes", "edges"))
tbl.nodes <- g2$elements$nodes
tbl.edges <- g2$elements$edges
checkEquals(dim(tbl.nodes), c(3, 5))
checkEquals(colnames(tbl.nodes),
c("data.id", "data.type", "data.lfc", "data.label", "data.count"))
checkEquals(dim(tbl.edges), c(3, 6))
checkEquals(colnames(tbl.edges), c("data.id", "data.source", "data.target", "data.edgeType", "data.score", "data.misc"))
} # test_smallGraphWithAttributes
#------------------------------------------------------------------------------------------------------------------------
simpleDemoGraph = function ()
{
g = new ('graphNEL', edgemode='directed')
nodeDataDefaults(g, attr='type') <- 'undefined'
nodeDataDefaults(g, attr='lfc') <- 1.0
nodeDataDefaults(g, attr='label') <- 'default node label'
nodeDataDefaults(g, attr='count') <- 0
edgeDataDefaults(g, attr='edgeType') <- 'undefined'
edgeDataDefaults(g, attr='score') <- 0.0
edgeDataDefaults(g, attr= 'misc') <- "default misc"
g = graph::addNode ('A', g)
g = graph::addNode ('B', g)
g = graph::addNode ('C', g)
nodeData (g, 'A', 'type') = 'kinase'
nodeData (g, 'B', 'type') = 'transcription factor'
nodeData (g, 'C', 'type') = 'glycoprotein'
nodeData (g, 'A', 'lfc') = -3.0
nodeData (g, 'B', 'lfc') = 0.0
nodeData (g, 'C', 'lfc') = 3.0
nodeData (g, 'A', 'count') = 2
nodeData (g, 'B', 'count') = 30
nodeData (g, 'C', 'count') = 100
nodeData (g, 'A', 'label') = 'Gene A'
nodeData (g, 'B', 'label') = 'Gene B'
nodeData (g, 'C', 'label') = 'Gene C'
g = graph::addEdge ('A', 'B', g)
g = graph::addEdge ('B', 'C', g)
g = graph::addEdge ('C', 'A', g)
edgeData (g, 'A', 'B', 'edgeType') = 'phosphorylates'
edgeData (g, 'B', 'C', 'edgeType') = 'synthetic lethal'
edgeData (g, 'A', 'B', 'score') = 35.0
edgeData (g, 'B', 'C', 'score') = -12
g
} # simpleDemoGraph
#----------------------------------------------------------------------------------------------------
|
001655c80d1e895005a0e76666c318a9e23189da | 54af5cc8afd2ee26683afef5ef21769419aae041 | /man/kstructure_is_wellgraded.Rd | 8e99d57e938ea965dc0eccad27c9285995ac4b0b | [] | no_license | cran/kst | 1fb402f6cee26906841b210d1a6214664ae60c6d | f841da1da8d5b0db20b2101a025537b802931a16 | refs/heads/master | 2022-11-10T16:25:29.191228 | 2022-10-24T12:52:37 | 2022-10-24T12:52:37 | 17,696,947 | 8 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,429 | rd | kstructure_is_wellgraded.Rd | \name{kstructure_is_wellgraded}
\alias{kstructure_is_wellgraded}
\title{Well-Gradedness of Knowledge Structures}
\description{
Tests for the well-gradedness of knowledge structures.
}
\usage{
kstructure_is_wellgraded(x)
}
\arguments{
\item{x}{An \R object of class \code{\link{kstructure}}.}
}
\details{
A knowledge structure is considered \emph{well-graded} if any two of
its states are connected by a bounded path, i.e., each knowledge state
(except the state for the full set of domain problems \emph{Q}) has at
least one immediate successor state that comprises the same domain items
plus exactly one and each knowledge state (except the empty set \emph{\{\}})
has at least one predecessor state that contains the same domain items
with the exception of exactly one.
\code{kstructure_is_wellgraded} takes an arbitrary knowledge structure
and tests for its well-gradedness.
}
\value{
A logical value.
}
\references{
Doignon, J.-P., Falmagne, J.-C. (1999) \emph{Knowledge Spaces}. Heidelberg:
Springer Verlag.
}
\seealso{
\code{\link{kstructure}}
}
\examples{
kst <- kstructure(set(set(), set("a"), set("b"), set("c"), set("a","b"),
set("b","c"), set("a","b","c")))
kstructure_is_wellgraded(kst)
kst <- kstructure(set(set(), set("a"), set("b"), set("c"), set("a","b"),
set("a","b","c")))
kstructure_is_wellgraded(kst)
}
\keyword{math} |
a00fa9d41deb5d1dffeeceb8212a0d151841d8d6 | c853a2a1d74194b0ea5dcea67a28bcb194479c80 | /IntroR/CourseFiles/R01_2_Plots.R | a16f8e01ae625bd1f0c2f9e2bd1e9641a541c76d | [
"MIT"
] | permissive | chsolis/IntoT_test | 399e5c34774880f62addd22a288fef93fd3cd09a | 4f88a489dfe14b676645674bfd51f025dd5d7ff1 | refs/heads/master | 2022-04-24T05:28:38.354259 | 2020-04-28T01:54:15 | 2020-04-28T01:54:15 | 259,503,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 786 | r | R01_2_Plots.R | # File: R_3_Plots.R
# Course: Introduction to R
# Section: 3: Plots
# Author: Christopher Solis, uic.edu, @csolisoc
# Date: 2019-04-23
# 1. Load data ################################################
library(datasets) # Load built-in datasets
# 2. Sumarize data ############################################
iris # Shows the whole data set. Hard to read!
head(iris) # Shows the first six lines of iris data
summary(iris) # Summary statistics for iris data
plot(iris) # Scatterplot matrix for iris data
# 3. Clean up #################################################
# clear packages
detach("package:datasets", unload = TRUE) # For base
# Clear plots
dev.off() # But only if there IS a plot
# Clear console
cat("\014") # ctrl+L
# FIN! |
f2d14f5820b8ac80c875b33aba688c5cc1ba7f8f | 88ce0f88952733d55ea09255a322e9c3f850d7c2 | /function_Part.Dom.Sel.Coef.for.q.R | 35b45ad4820219538040465eec2e1aae83eb58a2 | [] | no_license | mrvella/IQTmosq | a9b98deaaea6e79c8dab055a11daa8b87c0c2e36 | 1b86526dd48616cbbe6b9edd98fdec527707e23e | refs/heads/master | 2021-01-26T07:16:42.181239 | 2020-02-26T20:48:35 | 2020-02-26T20:48:35 | 243,362,059 | 0 | 0 | null | 2020-02-26T20:45:36 | 2020-02-26T20:45:36 | null | UTF-8 | R | false | false | 1,286 | r | function_Part.Dom.Sel.Coef.for.q.R | # This is the function to calculate selection coefficient assuming partial dominance
# of q and selection for q, where A1 = p = R, A2 = q = S
# Solves for s in Table 2.2, equation 2 - column heading "Change of gene frequency"
# Falconer and Mackay, page 28
# To Load Required Libraries
library(scatterplot3d)
# To Calculate s, default value of h = 0.5
Part.Dom.Sel.Coef.for.q = function(q.1, q.2, g, h=0.5) {
p.1 = 1 - q.1 # to select for q set q.1 = p, where p = 1-q
p.2 = 1 - q.2 # to select for q set q.2 = p, where p = 1-q
delta.p = (p.2-p.1)/g
s = (delta.p)/(-p.1^2 + p.1^3 - h*p.1 + 3*h*p.1^2 - 2*h*p.1^3 + (delta.p * (2*h*p.1 - 2*h*p.1^2 + p.1^2)))
}
# To plot s
plot.Part.Dom.Sel.Coef.for.q = function(q.1, q.2, g, s, dataframe) {
delta.p = (p.2-p.1)/g
df.name <- deparse(substitute(dataframe))
png(file = print(paste0(df.name,"_PartDom-forq.png")), units = "px", height = 600, width = 900)
scatterplot3d(delta.p, p.1, abs(s),highlight.3d = TRUE, col.axis = "blue",
cex = 2.5, cex.axis = 1.5, cex.lab = 2, cex.main = 2, col.grid = "lightblue",
main = "Partial Dominance of p, Selection for q", xlab = "Delta q", ylab = "",
zlab = "Selection Coefficient", pch = 20, zlim = c(0,.75))
dev.off()
} |
e835464d36e967904b70295f335e1d4cb3e4e85c | 7b26a7cef677c11d74013da42838a965485d8263 | /man/dosechange_vals.Rd | 5fad19cc2eb6a88b4915e506c2654cac502a21c5 | [] | no_license | choileena/medExtractR | 39ca350f6259ba475f64d2177640339da04918b4 | a92a6eab57fd581cd4a471e85a6b6fe482c4e76e | refs/heads/master | 2022-06-29T04:04:13.789905 | 2022-06-06T21:15:20 | 2022-06-06T21:15:20 | 197,653,826 | 4 | 1 | null | 2021-06-04T21:57:06 | 2019-07-18T20:39:55 | R | UTF-8 | R | false | true | 781 | rd | dosechange_vals.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dosechange_vals.R
\docType{data}
\name{dosechange_vals}
\alias{dosechange_vals}
\title{Keywords Specifying Dose Change}
\format{
A data frame with dose change expressions (exact and/or regular expressions).
\describe{
\item{expr}{A character vector, expressions to consider as dose change.}
}
}
\usage{
dosechange_vals
}
\description{
A dictionary of words indicating a dose change, meaning that the associated
drug regimen may not be current. This includes phrases such as increase,
reduce, or switch. In the following example of clinical text, the word
\sQuote{increase} represents a dose change keyword: \dQuote{Increase prograf to 5mg bid.}
}
\examples{
data(dosechange_vals)
}
\keyword{datasets}
|
e021b7b2901a4c3662d68adefdaaaf29808d16e6 | 0b18ee6f54030735ae117c67d1f363407ae7b66d | /R/utils.R | 22b4ecbdc1c480357f3668c0b27bdb85c9cd1532 | [] | no_license | crsh/prereg | e468c0fb031b081f965f57ff1d3a10e4e2083cd3 | 56d497921c325311d41489f0cf14878aa828520c | refs/heads/master | 2022-01-28T01:44:22.100425 | 2022-01-20T11:49:39 | 2022-01-20T11:49:39 | 48,246,208 | 53 | 11 | null | 2022-01-20T10:36:47 | 2015-12-18T16:53:22 | R | UTF-8 | R | false | false | 1,173 | r | utils.R | # Preprocessor functions are adaptations from the RMarkdown package
# (https://github.com/rstudio/rmarkdown/blob/master/R/pdf_document.R)
# to ensure right geometry defaults in the absence of user specified values
pdf_pre_processor <- function(metadata, input_file, runtime, knit_meta, files_dir, output_dir) {
args <- c()
# Set margins if no other geometry options specified
has_geometry <- function(text) {
length(grep("^geometry:.*$", text)) > 0
}
if (!has_geometry(readLines(input_file, warn = FALSE)))
args <- c(args
, "--variable", "geometry:left=2.5in"
, "--variable", "geometry:bottom=1.25in"
, "--variable", "geometry:top=1.25in"
, "--variable", "geometry:right=1in"
)
# Use APA6 CSL citations template if no other file is supplied
has_csl <- function(text) {
length(grep("^csl:.*$", text)) > 0
}
if (!has_csl(readLines(input_file, warn = FALSE))) {
csl_template <- system.file("rmd", "apa6.csl", package = "prereg")
if(csl_template == "") stop("No CSL template file found.")
args <- c(args, c("--csl", rmarkdown::pandoc_path_arg(csl_template)))
}
args
} |
db72bff5f51afc5c837594096d54aeeded336f83 | ce4dca785d1172ce87f0ed9d778dddd3964a5396 | /R/account_albums.R | 24bf96a943c9bdbf8eaffd78313b83a88649e12e | [] | no_license | Zedseayou/imguR | 2e2517d1b7ad39eb9102ace473ae4e3c3a1a9892 | 6185d151efb26564307a18d0a86825c717b9bd1f | refs/heads/master | 2022-01-12T03:10:15.268427 | 2018-04-22T08:18:30 | 2018-04-22T08:19:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 509 | r | account_albums.R | account_albums <-
function(account = 'me',
ids = TRUE,
...){
if (!"token" %in% names(list(...)) && account == 'me') {
stop("This operation can only be performed for account 'me' using an OAuth token.")
}
if (ids) {
out <- imgurGET(paste0('account/', account, '/albums/ids'), ...)
structure(out, class = 'imgur_basic')
} else {
out <- imgurGET(paste0('account/', account, '/albums/'), ...)
lapply(out, `class<-`, 'imgur_album')
}
}
|
a455b02abf081397bcaafced8ca79d8f41b5caea | c774252b7929cdd9da0acbfaea6375078d1bbbb3 | /R/as.mpInterval.R | 9f9453704d5b099c1cbdc046ecf0363113e804d5 | [] | no_license | lmw40/mpMap2 | 35032fef23cf204f62eb7c8c7107a1034ba23d5e | 0ac4569b251ccc041cfe965dfe41091aff18e3bc | refs/heads/master | 2021-01-22T19:22:11.104705 | 2016-08-23T00:52:51 | 2016-08-23T00:52:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 173 | r | as.mpInterval.R | #' @export
as.mpInterval <- function(object)
{
if(!isS4(object) || !is(object, "mpcrossMapped"))
{
stop("Input object must be an S4 object of class mpcrossMapped")
}
}
|
4c6ed1aed09624f928f15811600b86455209236f | b1a6f8512d841dae7e8691a5771997f80ec8cd68 | /man/SimCiRat.Rd | 46151289b30805ea04c6744859c9fbe3b0fbd52e | [] | no_license | cran/SimComp | bffc073d4ae2eb817dbb9633b9c0251dca2b4f3b | adf7d7be63bcb964d7ea086710f65c14a3f34d35 | refs/heads/master | 2021-06-04T02:11:29.239529 | 2019-08-26T13:50:10 | 2019-08-26T13:50:10 | 17,693,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,037 | rd | SimCiRat.Rd | \name{SimCiRat}
\alias{SimCiRat}
\alias{SimCiRat.default}
\alias{SimCiRat.formula}
\title{ Simultaneous Confidence Intervals for General Contrasts (Ratios) of Means
of Multiple Endpoints }
\description{
Simultaneous confidence intervals for general contrasts (linear functions) of
normal means (e.g., "Dunnett", "Tukey", "Williams" ect.), and for single or
multiple endpoints (primary response variables) simultaneously. The procedure of
Hasler and Hothorn (2012) <doi:10.1080/19466315.2011.633868> is applied for
ratios of means of normally distributed data. The variances/ covariance matrices
of the treatment groups (containing the covariances between the endpoints) may be
assumed to be equal or possibly unequal for the different groups (Hasler, 2014
<doi:10.1515/ijb-2012-0015>). For the case of only a single endpoint and unequal
covariance matrices (variances), the procedure coincides with the PI procedure of
Hasler and Hothorn (2008) <doi:10.1002/bimj.200710466>.
}
\usage{
\method{SimCiRat}{default}(data, grp, resp = NULL, na.action = "na.error", type = "Dunnett",
base = 1, Num.Contrast = NULL, Den.Contrast = NULL, alternative = "two.sided",
covar.equal = FALSE, conf.level = 0.95, CorrMatDat = NULL, ...)
\method{SimCiRat}{formula}(formula, ...)
}
\arguments{
\item{data}{ a data frame containing a grouping variable and the endpoints as
columns }
\item{grp}{ a character string with the name of the grouping variable }
\item{resp}{ a vector of character strings with the names of the endpoints; if
\code{resp=NULL} (default), all column names of the data frame
without the grouping variable are chosen automatically }
\item{formula}{ a formula specifying a numerical response and a grouping factor
(e.g. \kbd{response ~ treatment}) }
\item{na.action}{ a character string indicating what should happen when the data
contain \code{NAs};
if \code{na.action="na.error"} (default) the procedure stops
with an error message; if \code{na.action="multi.df"} a new
experimental version is used (details will follow soon) }
\item{type}{ a character string, defining the type of contrast, with the following
options:
\itemize{
\item "Dunnett": many-to-one comparisons
\item "Tukey": all-pair comparisons
\item "Sequen": comparisons of consecutive groups
\item "AVE": comparison of each group with average of all others
\item "GrandMean": comparison of each group with grand mean of all
groups
\item "Changepoint": differences of averages of groups of higher
order to averages of groups of lower order
\item "Marcus": Marcus contrasts
\item "McDermott": McDermott contrasts
\item "Williams": Williams trend tests
\item "UmbrellaWilliams": Umbrella-protected Williams trend tests
}
note that \code{type} is ignored if \code{Num.Contrast} or
\code{Den.Contrast} is specified by the user (see below) }
\item{base}{ a single integer specifying the control group for Dunnett contrasts,
ignored otherwise }
\item{Num.Contrast}{ a numerator contrast matrix, where columns correspond to
groups and rows correspond to contrasts }
\item{Den.Contrast}{ a denominator contrast matrix, where columns correspond to
groups and rows correspond to contrasts }
\item{alternative}{ a character string specifying the alternative hypothesis,
must be one of \code{"two.sided"} (default), \code{"greater"}
or \code{"less"} }
\item{covar.equal}{ a logical variable indicating whether to treat the variances/
covariance matrices of the treatment groups (containing the
covariances between the endpoints) as being equal;
if \code{TRUE} then the pooled variance/ covariance matrix is
used, otherwise the Satterthwaite approximation to the degrees
of freedom is used }
\item{conf.level}{ a numeric value defining the simultaneous confidence level }
\item{CorrMatDat}{ a correlation matrix of the endpoints, if \code{NULL} (default)
it is estimated from the data }
\item{\dots}{ arguments to be passed to SimCiRat.default }
}
\details{
The interest is in simultaneous confidence intervals for several linear combinations
(contrasts) of treatment means in a one-way ANOVA model, and for single or
multiple endpoints simultaneously. For example, corresponding intervals for the all-
pair comparison of Tukey (1953) and the many-to-one comparison of Dunnett (1955) are
implemented, but allowing for heteroscedasticity and multiple endpoints, and in
terms of ratios of means. The user is also free to create other interesting
problem-specific contrasts. Approximate multivariate \emph{t}-distributions are used
to calculate lower and upper limits (Hasler and Hothorn, 2012
<doi:10.1080/19466315.2011.633868>). Simultaneous tests based on these intervals
control the familywise error rate in admissible ranges and in the strong sense. The
variances/ covariance matrices of the treatment groups (containing the covariances
between the endpoints) can be assumed to be equal (\code{covar.equal=TRUE}) or
unequal (\code{covar.equal=FALSE}). If being equal, the pooled variance/ covariance
matrix is used, otherwise approximations to the degrees of freedom
(Satterthwaite, 1946) are used (Hasler, 2014 <doi:10.1515/ijb-2012-0015>;
Hasler and Hothorn, 2008 <doi:10.1002/bimj.200710466>). Unequal covariance matrices
occure if variances or correlations of some endpoints differ depending on the
treatment groups.
}
\value{
An object of class SimCi containing:
\item{estimate}{ a matrix of estimated ratios }
\item{lower.raw}{ a matrix of raw (unadjusted) lower limits }
\item{upper.raw}{ a matrix of raw (unadjusted) upper limits }
\item{lower}{ a matrix of lower limits adjusted for multiplicity }
\item{upper}{ a matrix of upper limits adjusted for multiplicity }
\item{CorrMatDat}{ if not prespecified by \code{CorrMatDat}, either the estimated
common correlation matrix of the endpoints
(\code{covar.equal=TRUE}) or a list of different (one for each
treatment) estimated correlation matrices of the endpoints
(\code{covar.equal=FALSE}) }
\item{CorrMatComp}{ the estimated correlation matrix of the comparisons }
\item{degr.fr}{ a matrix of degrees of freedom }
}
\note{
By default (\code{na.action="na.error"}), the procedure stops if there are
missing values. A new experimental version for missing values is used if
\code{na.action="multi.df"}. If \code{covar.equal=TRUE}, the number of endpoints
must not be greater than the total sample size minus the number of treatment
groups. If \code{covar.equal=FALSE}, the number of endpoints must not be greater
than the minimal sample size minus 1. Otherwise the procedure stops.
All intervals have the same direction for all comparisons and endpoints
(\code{alternative="..."}). In case of doubt, use \code{"two.sided"}.
The correlation matrix for the multivariate \emph{t}-distribution also depends
on the unknown ratios. The same problem also arises for the degrees of freedom
if the covariance matrices for the different groups are assumed to be unequal
(\code{covar.equal=FALSE}). Both problems are handled by a plug-in approach, see
the references therefore.
}
\references{
Hasler, M. (2014): Multiple contrast tests for multiple endpoints in the presence of
heteroscedasticity. \emph{The International Journal of Biostatistics} 10, 17--28,
<doi:10.1515/ijb-2012-0015>.
Hasler, M. and Hothorn, L.A. (2012): A multivariate Williams-type trend procedure.
\emph{Statistics in Biopharmaceutical Research} 4, 57--65, <doi:10.1080/19466315.2011.633868>.
Hasler, M. and Hothorn, L.A. (2008): Multiple contrast tests in the presence of
heteroscedasticity. \emph{Biometrical Journal} 50, 793--800, <doi:10.1002/bimj.200710466>.
Dilba, G. et al. (2006): Simultaneous confidence sets and confidence intervals for
multiple ratios. \emph{Journal of Statistical Planning and Inference} 136, 2640--2658,
<DOI:10.1016/j.jspi.2004.11.009>.
}
\author{ Mario Hasler }
\seealso{ \code{\link{SimTestRat}}, \code{\link{SimTestDiff}},
\code{\link{SimCiDiff}} }
\examples{
# Example 1:
# Simultaneous confidence intervals related to a comparison of the groups
# B and H against the standard S, for endpoint Thromb.count, assuming unequal
# variances for the groups. This is an extension of the well-known Dunnett-
# intervals to the case of heteroscedasticity and in terms of ratios of means
# instead of differences.
data(coagulation)
interv1 <- SimCiRat(data=coagulation, grp="Group", resp="Thromb.count",
type="Dunnett", base=3, alternative="greater", covar.equal=FALSE)
interv1
plot(interv1)
# Example 2:
# Simultaneous confidence intervals related to a comparisons of the groups
# B and H against the standard S, simultaneously for all endpoints, assuming
# unequal covariance matrices for the groups. This is an extension of the well-
# known Dunnett-intervals to the case of heteroscedasticity and multiple
# endpoints and in terms of ratios of means instead of differences.
data(coagulation)
interv2 <- SimCiRat(data=coagulation, grp="Group", resp=c("Thromb.count","ADP","TRAP"),
type="Dunnett", base=3, alternative="greater", covar.equal=FALSE)
summary(interv2)
plot(interv2)
}
\keyword{ htest }
|
52051fd3d8088cb12f5c942edccdae07dda8ffaf | f5279bbb062078f42a52f2d5c3613907ccf132ee | /man/dbGetInfo.Rd | 3a22e65ea0aafa556f4fa5aee1752f63bcaa2278 | [
"BSD-3-Clause"
] | permissive | prestodb/RPresto | cff084d4679bcb0e1237147d76f37b9e1cf42a83 | 3a9fb5baf81bca7c53ea8187ffc7ba784223ca23 | refs/heads/master | 2023-09-03T03:54:41.119049 | 2023-05-05T03:37:54 | 2023-05-08T02:49:45 | 32,487,073 | 140 | 50 | NOASSERTION | 2023-05-08T02:49:46 | 2015-03-18T22:11:28 | R | UTF-8 | R | false | true | 1,680 | rd | dbGetInfo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbGetInfo.R
\name{dbGetInfo,PrestoDriver-method}
\alias{dbGetInfo,PrestoDriver-method}
\alias{dbGetInfo,PrestoConnection-method}
\alias{dbGetInfo,PrestoResult-method}
\title{Metadata about database objects}
\usage{
\S4method{dbGetInfo}{PrestoDriver}(dbObj)
\S4method{dbGetInfo}{PrestoConnection}(dbObj)
\S4method{dbGetInfo}{PrestoResult}(dbObj)
}
\arguments{
\item{dbObj}{A \linkS4class{PrestoDriver},
\linkS4class{PrestoConnection}
or \linkS4class{PrestoResult} object}
}
\value{
\linkS4class{PrestoResult} A \code{\link[=list]{list()}} with elements
\describe{
\item{statement}{The SQL sent to the database}
\item{row.count}{Number of rows fetched so far}
\item{has.completed}{Whether all data has been fetched}
\item{stats}{Current stats on the query}
}
}
\description{
Metadata about database objects
For the \linkS4class{PrestoResult} object, the implementation
returns the additional \code{stats} field which can be used to
implement things like progress bars. See the examples section.
}
\examples{
\dontrun{
conn <- dbConnect(Presto(), "localhost", 7777, "onur", "datascience")
result <- dbSendQuery(conn, "SELECT * FROM jonchang_iris")
iris <- data.frame()
progress.bar <- NULL
while (!dbHasCompleted(result)) {
chunk <- dbFetch(result)
if (!NROW(iris)) {
iris <- chunk
} else if (NROW(chunk)) {
iris <- rbind(iris, chunk)
}
stats <- dbGetInfo(result)[["stats"]]
if (is.null(progress.bar)) {
progress.bar <- txtProgressBar(0, stats[["totalSplits"]], style = 3)
} else {
setTxtProgressBar(progress.bar, stats[["completedSplits"]])
}
}
close(progress.bar)
}
}
|
629577c29ab6148bdae897d639b692bafa09f8dc | 9133a15cf08ba26a60cd47e26b32f8fad4d06000 | /genomic_range/workflows/protocolFunctions/enrichrResultsCompilation.R | c24f99380a6b73900204f0e4c73926ac6d22e0ab | [
"MIT"
] | permissive | mora-lab/benchmarks | d5922a0d7887b5ee245170f56fe6d1a8367cd496 | cde839aa35d39f74d03283c8bf50be3859349174 | refs/heads/master | 2023-01-08T23:45:44.849311 | 2020-11-12T11:08:18 | 2020-11-12T11:08:18 | 189,912,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,569 | r | enrichrResultsCompilation.R | enrichrResultsCompilation <- function(){
## Pulling Enrichr Results from a local directory.
tryCatch( {enrichrGOBPSamples <- list.files("./results/Enrichr/GO_Biological_Process_2018/")}
,error = function(e){ print("File not found"); break;}
,finally = function (f){next;})
enrichrGOBPSamples <- substr(enrichrGOBPSamples,1,nchar(enrichrGOBPSamples)-4)
## Similarly for other databases
tryCatch( {enrichrGOCCSamples <- list.files("./results/Enrichr/GO_Cellular_Component_2018/")}
,error = function(e){ print("File not found"); break;}
,finally = function (f){next;})
enrichrGOCCSamples <- substr(enrichrGOCCSamples,1,nchar(enrichrGOCCSamples)-4)
tryCatch( {enrichrGOMFSamples <- list.files("./results/Enrichr/GO_Molecular_Function_2018/")}
,error = function(e){ print("File not found"); break;}
,finally = function (f){next;})
enrichrGOMFSamples <- substr(enrichrGOMFSamples,1,nchar(enrichrGOMFSamples)-4)
## There could be another way here to remove the ".txt" extension from the list of samples.
#enrichrGOBPSamples[1] <- gsub('.{4}$', '', enrichrGOBPSamples[1])
## Let us load the results for the samples from the assorted databases of Enrichr, into respective lists.
enrichrGOBPResults <- list()
for (i in 1:length(ChIPSeqSamples))
{
for(j in 1:length(enrichrGOBPSamples))
{
if(enrichrGOBPSamples[j] == ChIPSeqSamples[i])
{
enrichrGOBPResults[[j]] <-read.table(paste0("./results/Enrichr/GO_Biological_Process_2018/",paste0(eval(parse(text='ChIPSeqSamples[i]')),".txt")), sep = '\t', header = TRUE, quote = "", fill = TRUE)
}
}
}
enrichrGOMFResults <- list()
for (i in 1:length(ChIPSeqSamples))
{
for(j in 1:length(enrichrGOMFSamples))
{
if(enrichrGOMFSamples[j] == ChIPSeqSamples[i])
{
enrichrGOMFResults[[j]] <-read.table(paste0("./results/Enrichr/GO_Molecular_Function_2018/",paste0(eval(parse(text='ChIPSeqSamples[i]')),".txt")), sep = '\t', header = TRUE, quote = "", fill = TRUE)
}
}
}
enrichrGOCCResults <- list()
for (i in 1:length(ChIPSeqSamples))
{
for(j in 1:length(enrichrGOCCSamples))
{
if(enrichrGOCCSamples[j] == ChIPSeqSamples[i])
{
enrichrGOCCResults[[j]] <-read.table(paste0("./results/Enrichr/GO_Cellular_Component_2018/",paste0(eval(parse(text='ChIPSeqSamples[i]')),".txt")), sep = '\t', header = TRUE, quote = "", fill = TRUE)
}
}
}
## Same protocol for ENRICHR KEGG results too.
tryCatch( {enrichrKEGGSamples <- list.files("./results/Enrichr/KEGG_2019_Human/")}
,error = function(e){ print("File not found"); break;}
,finally = function (f){next;})
enrichrKEGGSamples <- substr(enrichrKEGGSamples,1,nchar(enrichrKEGGSamples)-4)
enrichrKEGGResults <- list()
for (i in 1: length(ChIPSeqSamples))
{
for(j in 1:length(enrichrKEGGSamples))
{
if(enrichrKEGGSamples[j] == ChIPSeqSamples[i])
{
enrichrKEGGResults[[j]] <-read.table(paste0("./results/Enrichr/KEGG_2019_Human/",paste0(eval(parse(text='ChIPSeqSamples[i]')),".txt")), sep = '\t', header = TRUE, quote = "", fill = TRUE)
}
}
}
## Condensed Results
## BP
enrichrGOBPResultsShredded <- list()
for (i in 1:length(enrichrGOBPResults))
{
enrichrGOBPResultsShredded[[i]] <- enrichrGOBPResults[[i]][,c(1,3)]
}
names(enrichrGOBPResultsShredded) <- as.character(enrichrGOBPSamples)
saveRDS(enrichrGOBPResultsShredded, file = "./results/Enrichr/enrichrGOBPResultsShredded")
##CC
enrichrGOCCResultsShredded <- list()
for (i in 1:length(enrichrGOCCResults))
{
enrichrGOCCResultsShredded[[i]] <- enrichrGOCCResults[[i]][,c(1,3)]
}
names(enrichrGOCCResultsShredded) <- as.character(enrichrGOCCSamples)
saveRDS(enrichrGOCCResultsShredded, file = "./results/Enrichr/enrichrGOCCResultsShredded")
##MF
enrichrGOMFResultsShredded <- list()
for (i in 1:length(enrichrGOMFResults))
{
enrichrGOMFResultsShredded[[i]] <- enrichrGOMFResults[[i]][,c(1,3)]
}
names(enrichrGOMFResultsShredded) <- as.character(enrichrGOMFSamples)
saveRDS(enrichrGOMFResultsShredded, file = "./results/Enrichr/enrichrGOMFResultsShredded")
##KEGG
enrichrKEGGResultsShredded <- list()
for (i in 1:length(enrichrKEGGResults))
{
enrichrKEGGResultsShredded[[i]] <- enrichrKEGGResults[[i]][,c(1,3)]
}
names(enrichrKEGGResultsShredded) <- as.character(enrichrKEGGSamples)
saveRDS(enrichrKEGGResultsShredded, file = "./results/Enrichr/enrichrKEGGResultsShredded")
## Removing data from cache.
rm(enrichrGOBPResultsShredded)
rm(enrichrGOCCResultsShredded)
rm(enrichrGOMFResultsShredded)
rm(enrichrKEGGResultsShredded)
}
|
b9130ae104154e7f9d20b86051c9e0fd91b78a05 | 61990b472e2f6da90f3cf56e2ddf575c60dd8c18 | /scripts/categorical_vars_check.R | 55e34eee07fd0ad3b7f6f02eb39c4262460e5e6f | [] | no_license | caramirezal/vihCohort | 0a61dfebe622ebe318c7f983abbe650d706596ee | 599777373c5c83379893328c2515b74c1520b3ea | refs/heads/master | 2021-04-09T16:04:14.745535 | 2020-02-28T13:43:35 | 2020-02-28T13:43:35 | 125,657,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,124 | r | categorical_vars_check.R | ## lasso implementation on VIH patients cohort data
library(glmnet)
library(dplyr)
library(ggplot2)
library(gridExtra)
vih_data <- read.csv("../data/cleandata.csv", stringsAsFactors = TRUE)
str(vih_data)
## No NA values are presented in data
dim(vih_data[!complete.cases(vih_data),])
########################################################################################
## Only numerical data
## processing data for lasso
input <- vih_data[, ! sapply(vih_data, function(x) class(x)=="factor") ]
input <- select(input, -Delta_CD4_year1)
input <- select(input, -CD4_S0)
input <- select(input, -CD4_S52)
input <- as.matrix(input)
str(input)
output <- vih_data$Delta_CD4_year1
## leave-one-out validation
## vector to store predictions
res <- numeric(nrow(input))
## matrix to store lasso coefficients
lasso_coefs <- matrix(0, nrow(input), ncol(input)+1)
## perform leave-one-out validation
for (i in 1:nrow(input)) {
lambda.cv <- cv.glmnet(x=input[-i,], y = output[-i])$lambda.1se
lasso <- glmnet(x=input[-i,], y = output[-i], lambda = lambda.cv)
prediction <- predict(lasso, newx = input, type = "response", s = lambda.cv)
res[i] <- prediction[i]
lasso_coefs[i,] <- as.vector(coef(lasso))
}
mse_num <- sqrt(mean((res-output)^2))
mse_num
## plot predicted vs target values
validation <- data.frame("lasso_prediction"=res,
"values"=output)
theme_set(theme_light())
p_num <- ggplot(validation, aes(x=values, y=lasso_prediction)) +
geom_point(colour="steelblue", size= 2.5) +
geom_abline(slope = 1,colour="red", size=1) +
labs(x="Delta TCD4 values", y="LASSO_prediction") +
theme(text = element_text(face="bold", size = 18))
plot(p_num)
## calculate mean coefficient values
colnames(lasso_coefs) <- rownames(coef(lasso))
mean_coef <- apply(lasso_coefs, 2, mean)
sd_coef <- apply(lasso_coefs, 2, sd)
summary_coefs <- data.frame("coefficient"=colnames(lasso_coefs),
"mean"=mean_coef,
"sd"=sd_coef) %>%
arrange(desc(abs(mean)))
write.csv(summary_coefs, "../data/lasso_only_numeric.csv", row.names=FALSE)
#####################################################################################
## Numerical + categorical values
## processing data for lasso
categorical <- vih_data[, sapply(vih_data, function(x) class(x)=="factor") ]
categorical.bin <- predict(dummyVars(~., categorical), newdata = categorical)
head(categorical.bin)
## categorical + numerical values
input <- cbind(input, categorical.bin)
write.table(input, "../data/model_matrix_plus_categorical.tsv", sep = "\t")
## leave-one-out validation
## vector to store predictions
res <- numeric(nrow(input))
## matrix to store lasso coefficients
lasso_coefs <- matrix(0, nrow(input), ncol(input)+1)
## perform leave-one-out validation
for (i in 1:nrow(input)) {
lambda.cv <- cv.glmnet(x=input[-i,], y = output[-i])$lambda.1se
lasso <- glmnet(x=input[-i,], y = output[-i], lambda = lambda.cv)
prediction <- predict(lasso, newx = input, type = "response", s = lambda.cv)
res[i] <- prediction[i]
lasso_coefs[i,] <- as.vector(coef(lasso))
}
mse_num_cat <- sqrt(mean((res-output)^2))
mse_num_cat
## plot predicted vs target values
validation <- data.frame("lasso_prediction"=res,
"values"=output)
theme_set(theme_light())
p_num_cat <- ggplot(validation, aes(x=values, y=lasso_prediction)) +
geom_point(colour="steelblue", size= 2.5) +
geom_abline(slope = 1,colour="red", size=1) +
labs(x="Delta TCD4 values", y="LASSO_prediction") +
theme(text = element_text(face="bold", size = 18))
plot(p_num_cat)
##################################################################################
#lineal_reg <- lm(output~., data = as.data.frame(categorical.bin))
#prediction <- predict(lineal_reg, newdata = as.data.frame(categorical.bin))
#plot(prediction, output)
#abline(0,1, col="red")
#summary(lineal_reg)
jpeg("../figures/cat_vars_check.jpeg")
grid.arrange(p_num, p_num_cat, nrow=1)
dev.off()
|
dbe79a609e3d17ee68269453f57546f636edd1b2 | a1b1547497f02a20eb6d82380d9e54b1eff8f2cc | /etc/2d-die-models/d10.R | da7af894a02ba8f13cfaa97f65c10f0b8a4fdd5e | [
"MIT"
] | permissive | ssoloff/dice-server-js | e4473533ea14ab2cc7d161374745bb58a40aa7a5 | 517787774d35f4c52fa041aeb04e3576aedf0dc9 | refs/heads/master | 2021-01-10T03:45:01.228281 | 2017-09-24T02:48:58 | 2017-09-24T02:48:58 | 37,000,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,194 | r | d10.R | # Matrix of pentagonal trapezohedron vertex coordinates (see
# http://www.georgehart.com/virtual-polyhedra/vrml/pentagonal_trapezohedron.wrl).
vertices_3d = matrix(
c(
0.5257311, 0.381966, 0.8506508, # (x1, y1, z1)
-0.2008114, 0.618034, 0.8506508,
-0.6498394, 0, 0.8506508,
0.5257311, -1.618034, 0.8506508,
1.051462, 0, -0.2008114,
0.8506508, 0.618034, 0.2008114,
-0.5257311, 1.618034, -0.8506508,
-1.051462, 0, 0.2008114,
-0.8506508, -0.618034, -0.2008114,
0.2008114, -0.618034, -0.8506508,
0.6498394, 0, -0.8506508,
-0.5257311, -0.381966, -0.8506508 # (x12, y12, z12)
),
nrow=3,
ncol=12
)
# Rotate die to generate 2D view from above while it is at rest on a surface
# (the above vertex coordinates were [arbitrarily?] generated by the author
# at an angle of 18 degrees off the x-axis).
angle = 18 * pi / 180
rotate_z_neg_angle = matrix(
c(
cos(-angle), -sin(-angle), 0,
sin(-angle), cos(-angle), 0,
0, 0, 1
),
nrow=3,
ncol=3,
byrow=TRUE
)
vertices_3d_rot = rotate_z_neg_angle %*% vertices_3d
# Extract 2D vertex coordinates and order them in such a way to optimize
# drawing. The 2D vertices are as labeled below. (**) represents the origin.
#
# ( 4) +--> x
# |
# ( 9) ( 5) V
# y
# (**)
# ( 3) ( 2)
# ( 8) ( 6)
# ( 1)
#
# ( 7)
#
vertices_2d = matrix(
c(
vertices_3d_rot[1, 2], vertices_3d_rot[2, 2], # (x'1, y'1)
vertices_3d_rot[1, 1], vertices_3d_rot[2, 1],
vertices_3d_rot[1, 3], vertices_3d_rot[2, 3],
vertices_3d_rot[1, 4], vertices_3d_rot[2, 4],
vertices_3d_rot[1, 5], vertices_3d_rot[2, 5],
vertices_3d_rot[1, 6], vertices_3d_rot[2, 6],
vertices_3d_rot[1, 7], vertices_3d_rot[2, 7],
vertices_3d_rot[1, 8], vertices_3d_rot[2, 8],
vertices_3d_rot[1, 9], vertices_3d_rot[2, 9] # (x'9, y'9)
),
nrow=2,
ncol=9
)
|
892156996983ba025a11c4e3b853f19d12b27bc4 | c9afbb226c190ffc958ff1e1554aec53dda62248 | /run_analysis.R | 7de524046c839fe64d46d96b547717b9a477643b | [] | no_license | WunderBara/Getting_Cleaning_Data_Project | 48292f868dedf91965fefde8b5cc8d6db7d12917 | a16fc7b4c9e58ae983503b1a76f0d86afbae5490 | refs/heads/master | 2021-01-10T14:38:45.514336 | 2015-05-22T07:47:35 | 2015-05-22T07:47:35 | 36,000,447 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,571 | r | run_analysis.R |
library(data.table)
setwd("Project_3_1")
##### Tasks 1-4 #####
# Merge "y" files and rename to "activities"
y_test <- read.table("test/y_Test.txt")
y_train <- read.table("train/y_train.txt")
y_Merged <- y_test
activities <- rbind(y_Merged, y_train)
colnames(activities) <- "act_nbr"
# Merge "subject" files
subject_test <- read.table("test/subject_test.txt")
subject_train <- read.table("train/subject_train.txt")
subject_merged <- subject_test
subject_merged <- rbind(subject_merged, subject_train)
# Merge Inertial Signals
filenames_test <- list.files("test/Inertial Signals")
filenames_train <- list.files("train/Inertial Signals")
path_test <- "test/Inertial Signals"
path_train <- "train/Inertial Signals"
# Prepare data frame for merged signals from "Inertial Signals"
Signals_Measures <- data.frame(0,0,0,0)
colnames(Signals_Measures) <- c("signal", "mean", "SD", "act_nbr")
# Load activity labels
activities_codetable <- data.frame(read.table("activity_labels.txt"))
colnames(activities_codetable) <- c("act_nbr", "activity")
# For each file from "Inertial Signals" do:
i <- 1
while (i <= length(filenames_test)) {
# Prepare names for signals (measures)
signal_name <- gsub("_test.txt", "", filenames_test[i])
# Prepare path for files to merge
path_test_file <- paste(path_test,"/",filenames_test[i], sep = "")
path_train_file <- paste(path_train,"/",filenames_train[i], sep = "")
# Prepare for file load
num2 <- rep(16, times = 128)
# Load files
Signals_Test <- read.fwf(path_test_file, num2, sep = "")
Signals_Train <- read.fwf(path_train_file, num2, sep = "")
# Merge files
Signals_Merged <- Signals_Test
Signals_Merged <- rbind(Signals_Merged, Signals_Train)
# Count rows for each kind of signal
rows <- nrow(Signals_Merged)
# Prepare a data frame with signal name, mean, standard deviation and activity number
Signals_Measures_tmp <- data.frame("signal" = rep(signal_name, times = rows))
Signals_Measures_tmp <- cbind(Signals_Measures_tmp, data.frame("mean" = rowMeans(Signals_Merged)))
tmp_frame <- data.frame("SD" = apply(Signals_Merged,1, sd))
Signals_Measures_tmp <- cbind(Signals_Measures_tmp, tmp_frame)
Signals_Measures_tmp <- cbind(Signals_Measures_tmp, activities)
Signals_Measures <- rbind(Signals_Measures,Signals_Measures_tmp)
i <- i+1
}
# Merge with activity names
Signals_Measures_labeled <- merge(x = Signals_Measures, y = activities_codetable, by = "act_nbr")
# Order and ommit "act_nbr"
Signals_Measures_labeled <- Signals_Measures_labeled[,c(2,3,4,5)]
##### Task 5 #####
# Rename
subject <- subject_merged
colnames(subject) <- "subject"
# Prepare data frames
Signals_Averages_Prep <- data.frame("signal" = 0, "mean" = 0, "SD" = 0, "activity" = 0)
Signals_Averages <- data.frame("signal" = 0, "mean" = 0, "SD" = 0, "activity" = 0, "subject" = 0)
signal_names <- unique(Signals_Measures_labeled$signal)
# For every kind of signal do:
i <- 1
while (i <= length(signal_names)) {
Signals_Averages_Prep <- rbind(filter(Signals_Measures_labeled, signal == signal_names[i]))
Signals_Averages_Prep <- cbind(Signals_Averages_Prep, "subject" = subject)
Signals_Averages <- rbind(Signals_Averages, Signals_Averages_Prep)
i = i+1
}
# Final computing of average measures for subject-activity-signal
Signals_Avg <- data.table(Signals_Averages)
Signals_Avg <- Signals_Avg[,.("average" = mean(mean)), by = .(signal, activity,subject)]
# Show all rows
print(Signals_Avg, nrow=316)
# Export to a txt file
write.table(Signals_Avg, "Signals_Avg.txt", row.name=FALSE)
|
3bd7e7a370412e9976e14e2fce335bc089495a08 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/titrationCurves/examples/diwb_sa.Rd.R | 58309d10ee2b3bf13ea3b865b01bd1927b3b413c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | diwb_sa.Rd.R | library(titrationCurves)
### Name: diwb_sa
### Title: Titration Curve for Diprotic Weak Base
### Aliases: diwb_sa
### ** Examples
### Simple titration curve with equivalence points
ex6 = diwb_sa(eqpt = TRUE)
head(ex6)
### Overlay titration curves using different pKa1 and pKa2 values
diwb_sa(pka1 = 5, pka2 = 9, eqpt = TRUE)
diwb_sa(pka1 = 6, pka2 = 10, overlay = TRUE)
diwb_sa(pka1 = 4, pka2 = 8, overlay = TRUE)
|
547bf787f5a34324e60b795c5a3c179050aa8819 | dd4210bd0f7d79eff8364da3eaaa530c359f9603 | /tests/testthat/test-query.R | 0e729728f1008e390985e2c89f4da2655c387d11 | [] | no_license | cran/ctrialsgov | 99e63d9ed4f4881b568c36b849d6608d1548de4e | fcf779806dc207462cc4efaa827d2d84ad23f37a | refs/heads/master | 2023-08-14T07:08:24.693717 | 2021-10-18T15:00:02 | 2021-10-18T15:00:02 | 418,757,869 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,967 | r | test-query.R | library(ctrialsgov)
library(stringi)
library(lubridate)
test_that("check standard keyword queries", {
ctgov_load_sample()
res <- ctgov_query(description_kw = "cancer")
expect_true(all(stri_detect(res$description, regex = "(?i)cancer")))
res <- ctgov_query(sponsor_kw = "cancer")
expect_true(all(stri_detect(res$sponsor, regex = "(?i)cancer")))
res <- ctgov_query(brief_title_kw = "cancer")
expect_true(all(stri_detect(res$brief_title, regex = "(?i)cancer")))
res <- ctgov_query(official_title_kw = "cancer")
expect_true(all(stri_detect(res$official_title, regex = "(?i)cancer")))
res <- ctgov_query(intervention_desc_kw = "cancer")
expect_true(all(
stri_detect(res$intervention_model_description, regex = "(?i)cancer")
))
res <- ctgov_query(conditions_kw = "cancer")
expect_true(all(stri_detect(res$conditions, regex = "(?i)cancer")))
res <- ctgov_query(population_kw = "cancer")
expect_true(all(stri_detect(res$population, regex = "(?i)cancer")))
})
test_that("check range queries", {
ctgov_load_sample()
res <- ctgov_query(date_range = c("2010-01-01", "2010-12-31"))
expect_true(all(year(res$start_date) == 2010L))
res <- ctgov_query(enrollment_range = c(100, 120))
expect_true(all(res$enrollment >= 100))
expect_true(all(res$enrollment <= 120))
res <- ctgov_query(enrollment_range = c(100, 120))
expect_true(all(res$enrollment >= 100))
expect_true(all(res$enrollment <= 120))
res <- ctgov_query(minimum_age_range = c(5, 10))
expect_true(all(res$minimum_age >= 5))
expect_true(all(res$minimum_age <= 10))
res <- ctgov_query(maximum_age_range = c(5, 10))
expect_true(all(res$maximum_age >= 5))
expect_true(all(res$maximum_age <= 10))
})
test_that("check categorical queries", {
ctgov_load_sample()
res <- ctgov_query(study_type = "Interventional")
expect_true(all(res$study_type == "Interventional"))
res <- ctgov_query(allocation = "Randomized")
expect_true(all(res$allocation == "Randomized"))
res <- ctgov_query(intervention_model = "Parallel Assignment")
expect_true(all(res$intervention_model == "Parallel Assignment"))
res <- ctgov_query(observational_model = "Cohort")
expect_true(all(res$observational_model == "Cohort"))
res <- ctgov_query(primary_purpose = "Treatment")
expect_true(all(res$primary_purpose == "Treatment"))
res <- ctgov_query(time_perspective = "Prospective")
expect_true(all(res$time_perspective == "Prospective"))
res <- ctgov_query(masking_description = "Triple")
expect_true(all(res$masking_description == "Triple"))
res <- ctgov_query(sampling_method = "Non-Probability Sample")
expect_true(all(res$sampling_method == "Non-Probability Sample"))
res <- ctgov_query(phase = "Phase 2")
expect_true(all(res$phase == "Phase 2"))
res <- ctgov_query(gender = "All")
expect_true(all(res$gender == "All"))
res <- ctgov_query(sponsor_type = "Industry")
expect_true(all(res$sponsor_type == "Industry"))
})
|
876201435171803f222dca217835c05b0b0cdffa | ba23868dea31f3303f02de0addd95c700fcc45b2 | /moodle-process.R | 5547022d812230ed0f05ebe8792074601ef31b92 | [] | no_license | ravellom/moodle-R | 8b6e8490877bebe70b85c5ffa55e49c59cf5996a | df2237a1f551b174ed5fc15515525de223225488 | refs/heads/main | 2023-05-26T00:55:55.390271 | 2021-06-07T23:47:45 | 2021-06-07T23:47:45 | 374,161,239 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,980 | r | moodle-process.R | ## Librerías necesarias
library(tidyverse)
library(lubridate)
library(viridis)
### Usuarios ------------------------
# Agrupar usuarios con clases virtuales
mdl_usr <- mdl %>% group_by(User = mdl$`Nombre completo del usuario`) %>%
summarise(n = n()) %>%
#subset(n > 400, na.rm = TRUE) %>% # más de x accesos
#subset(n < 5000, na.rm = TRUE) #%>% # menos de x acessos
arrange(desc(n)) %>% # ordenar
filter(User != "-") # Eliminar usuario "-"
mdl_usr <- mdl_usr[-1,] # Eliminar Admin
dim(mdl_usr)
mdl_usr_28 <- mdl28 %>% group_by(User = Nombre.completo.del.usuario) %>%
summarise(n = n()) %>%
#subset(n > 400, na.rm = TRUE) %>% # más de x accesos
#subset(n < 5000, na.rm = TRUE) #%>% # menos de x acessos
arrange(desc(n)) %>% # ordenar
filter(User != "-") # Eliminar usuario "-"
# Graficar usuarios por cantidad de accesos
ggplot(head(mdl_usr, 15), aes(x = reorder(User,n), n, fill=User)) +
geom_bar(stat="identity", show.legend = FALSE) +
coord_flip() +
# theme_my_style() +
labs(title="Usuarios con mayor actividad",
#subtitle = "Fecha",
#caption="fuente: clasesvirtuales.ucf.edu.cu",
y="Cantidad de accesos",
x="Usuarios",
color=NULL,
family = "Helvetica")
# Agrupar usuarios X dÃa
mdl_usr2 <- mdl %>% group_by(fecha = mdl$date, User = mdl$`Nombre completo del usuario`) %>%
summarise()
mdl_usr3 <- mdl_usr2 %>% group_by(fecha) %>%
summarise(no = n())
ggplot(mdl_usr3, aes(x = as_date(fecha), y = no)) +
geom_line(color = "#1380A1", size = 1) +
geom_hline(yintercept = 0, size = 1, colour="#333333") +
# theme_my_style() +
labs(title="Usuarios conectados por día",
# subtitle = "Feb 10 - Mar 10 2021",
# caption="fuente: clasesvirtuales.ucf.edu.cu",
# y="Usuarios",
x="Fecha")
# geom_point(size = 2, colour="#333333", alpha = 1/3) +
# geom_hline(yintercept = 50, size = 1, colour = "red", linetype = "dashed")
mean(mdl_usr2[1:8,]$no)
|
fd4d19cdef65cac5ebab3b61f1dacdace9522456 | 9eab5f652b893e0150c52ed2144f18bf1fa3aa80 | /assignment/rankall.R | 8be4480a53c7029978eec8ffe3e9a9e06bf12672 | [] | no_license | markelarauzo7/datasciencecoursera | b0e6bcfbb32c7783712e59b62f53d9dd954d8248 | ae89870f4ccfdce2bb3873a379906073e1acef3a | refs/heads/master | 2021-01-20T12:34:27.490847 | 2017-05-05T14:17:14 | 2017-05-05T14:17:14 | 69,502,705 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,545 | r | rankall.R | rankall <- function(outcome, num = 'best'){
## Read outcome data
## Check that state and outcome are valid
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
outcome_data <- read.csv("outcome-of-care-measures.csv")
state_codes <- levels(outcome_data[,"State"])
disease_list <- c("heart attack","heart failure","pneumonia")
## Check if introduced state abbreviation exists
for(state in state_codes){
## Check if introduced outcome exists
outcome_data <- outcome_data[outcome_data[,"State"] == state,]
if(outcome %in% disease_list){
if(outcome == 'heart attack'){
print('Entro a heart attack')
## "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
chosen_death_rate <- outcome_data[,c(2,11)]
}else if(outcome == 'heart failure'){
## "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure"
print('Entro a heart failure')
chosen_death_rate <- outcome_data[,c(2,17)]
}else{
## "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"
print('Entro a Pneumonia')
chosen_death_rate <- outcome_data[,c(2,23)]
}
## Output data to csv for further checking
## This line could be omitted
## write.csv2(chosen_death_rate,file = paste(state,".csv",sep = ""),sep = ";")
# Auxiliar variable
values_dt <- chosen_death_rate
## Convert factor into Numeric for processing
values_dt[,2] <- as.character(values_dt[,2])
values_dt[,2] <- as.numeric(values_dt[,2])
## Removes NAs
values_dt <- values_dt[!is.na(values_dt[,2]),]
hospitals_ordered <- values_dt[order(as.numeric(values_dt[,2]),values_dt[,1]),]
write.csv2(hospitals_ordered,file = paste(state,num,".csv",sep = ""),sep = ";")
#result_df <- data.frame(hospital = ,state = state)
## Printing result
if(num == 'best' ){
print('Entro a mostrar un hospital')
print(hospitals_ordered[1,1])
}else{
if(num == 'worst'){
print(hospitals_ordered[nrow(hospitals_ordered),1])
}else{
if(is.numeric(num)){
print(hospitals_ordered[num,1])
}
else{
stop("Invalid num")
}
}
}
}else{
## Outcome does not exist
stop("Invalid outcome.")
}
}
} |
08637e164a76f9d9e698445e923e061170c2af3a | 7e5e5139f817c4f4729c019b9270eb95978feb39 | /Introduction to R/Chapter 5-Data frame/7.R | 7f2b4851335f422fd8741f6cff01bc44326d010d | [] | no_license | Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track- | a45594a8a9078076fe90076f675ec509ae694761 | a50740cb3545c3d03f19fc79930cb895b33af7c4 | refs/heads/main | 2023-05-08T19:45:46.830676 | 2021-05-31T03:30:08 | 2021-05-31T03:30:08 | 366,929,815 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 790 | r | 7.R | # Selection of data frame elements (2)
# Instead of using numerics to select elements of a data frame, you can also use the variable names to select columns of a data frame.
#
# Suppose you want to select the first three elements of the type column. One way to do this is
#
# planets_df[1:3,2]
# A possible disadvantage of this approach is that you have to know (or look up) the column number of type, which gets hard if you have a lot of variables. It is often easier to just make use of the variable name:
#
# planets_df[1:3,"type"]
# Instructions
# 100 XP
# Select and print out the first 5 values in the "diameter" column of planets_df.
# The planets_df data frame from the previous exercise is pre-loaded
# Select first 5 values of diameter column
planets_df[1:5, "diameter"]
|
77cdbab3fbfc346ea9b9d00a222de9de1f8161c4 | 6cbc6e80ae07b8fb1fff0a5cad4ddcd29c358c0a | /R/ezr_add_bins.R | 630d17ed823c10f8cd48d320fc0dcc5c0f1e8d36 | [] | no_license | lenamax2355/easyr | d99638b84fd9768774fa7ede84d257b10e0bacf6 | 37ab2fe5c28e83b9b5b3c0e3002f2df45708016b | refs/heads/master | 2022-01-09T20:43:17.801623 | 2019-05-13T02:49:48 | 2019-05-13T02:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,776 | r | ezr_add_bins.R | #' Add Bins
#'
#'Bin a continous field into fewer values using variety of methods.
#'
#'
#' @param dataset dataset
#' @param column column. It should be numerical
#' @param style 'fixed','equal','quantile','pretty', or 'percentile'.
#' @param n_breaks number of breaks. Only matters if not fixed
#' @param fixed_breaks the fixed breaks. Only applicable if style is 'fixed'
#' @param new_col_prefix Default prefix is 'bucket_'
#' @param round_breaks number of digits to round too
#'
#' @return
#' @export
#'
#' @examples
ezr.add_bins=function (dataset, column, style = "equal", n_breaks = 10, fixed_breaks = NULL,
new_col_prefix = "bucket_", round_breaks=0)
{
if (style %in% c("fixed", "equal", "quantile", "pretty",
"percentile") == FALSE) {
stop("Style must be in fixed','equal','quantile','pretty', 'percentile', or 'pretty' ")
}
if (style %in% c("fixed", "equal", "quantile", "pretty")) {
if (style == "fixed") {
n_breaks = length(fixed_breaks) - 1
}
breaks = classInt::classIntervals(dataset[[column]],
n = n_breaks, style = style, fixedBreaks = fixed_breaks
)$brks
breaks = base::unique(round(breaks,round_breaks))
breaks = cut(dataset[[column]], breaks = breaks, include.lowest = TRUE,
ordered_result = TRUE, dig.lab = 10)
new_col_prefix = paste0(new_col_prefix, column)
dataset[[new_col_prefix]] = breaks
}
if (style %in% "percentile") {
new_col_prefix = paste0(new_col_prefix, column)
dataset = dataset %>% mutate(percentile = ntile(!!rlang::sym(column),
n = n_breaks))
names(dataset)[ncol(dataset)] = new_col_prefix
}
return(dataset)
}
|
4f66c80825d6f74e31e5dec5cef25064dc84a602 | 36cb1fddeb59b334e16f5a635c6dc8a1a7ac5a12 | /ui.R | e9f17fbe1ab4c70edc46862a03e58b9a6946b466 | [] | no_license | amerus/US-Opioid-Prescribing | 17a55c575ee7831f2c05c87b1f2d375a31590843 | a58b40943be1f26b5b48f3889b678168751f8cb3 | refs/heads/master | 2020-04-11T09:55:56.564333 | 2019-01-19T19:45:03 | 2019-01-19T19:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,007 | r | ui.R |
# Define UI
shinyUI(
dashboardPage(
dashboardHeader(title = 'Opioids by Specialty'),
dashboardSidebar(
selectInput("specialty",
label = "Specialty:",
choices = specialties,
selected = 'Family Practice'
),
selectInput("checkbox",
label = "Additional States",
choices = states,
multiple = TRUE,
selected = 'TN')
),
dashboardBody(
fluidRow(
box(width = 12,
title = "Opioids Prescribed in 2014 by State and Specialty", status = "primary", solidHeader = TRUE,
plotOutput("drugbars", height = 600)
)
)
)
)
)
|
ed4b5064714bd1324e78ca11689f6f88e11f177f | f3ba5c556dfc50ca1bce1c0dfe5b4cee5e3d3363 | /R/separate_sentences.R | 8a73e26908f071ffa7e8d27e1e0faf160bfd70ae | [] | no_license | HughParsonage/TeXCheckR | 48b9ae8f94b2801f984a66e9f3ecb6c7f1f831f4 | 09826878215cf56bc24a7e273084bfda3954a73b | refs/heads/master | 2023-02-22T10:02:17.707587 | 2023-02-10T09:30:48 | 2023-02-10T09:30:48 | 87,679,432 | 8 | 2 | null | 2020-09-18T04:35:17 | 2017-04-09T03:12:11 | TeX | UTF-8 | R | false | false | 1,983 | r | separate_sentences.R | #' Put sentences on their own line
#'
#' @param filename A tex or knitr file in which to separate sentences.
#' @param hanging_footnotes (logical, default: \code{FALSE}) Should footnotes be indented?
#' @return NULL. The function is called for its side-effect: rewriting \code{filename} with separated sentences.
#' @export
separate_sentences <- function(filename, hanging_footnotes = FALSE) {
lines <- readLines(filename)
knitr_start <- grepl(">>=", lines, fixed = TRUE)
knitr_stop <- grepl("^@$", lines, perl = TRUE)
stopifnot(length(knitr_start) == length(knitr_stop))
in_knitr <- as.logical(cumsum(knitr_start) - cumsum(knitr_stop))
lines_with_percent <- grepl("(?<!(\\\\))%", lines, perl = TRUE)
new_lines <-
if_else(in_knitr | lines_with_percent,
lines,
gsub(",\\footnote", ",%\n\\footnote", fixed = TRUE,
gsub(",\\footcite", ",%\n\\footcite", fixed = TRUE,
gsub(".\\footnote", ".%\n\\footnote", fixed = TRUE,
gsub(".\\footcite", ".%\n\\footcite", fixed = TRUE,
gsub("\\.\\s+([A-Z])", "\\.\n\\1", perl = TRUE,
gsub("\\.[}]\\s+([A-Z])", "\\.}\n\\1", perl = TRUE,
lines)))))))
writeLines(new_lines, filename)
if (hanging_footnotes && !any(in_knitr)) {
new_lines <- read_lines(filename)
parsed_doc <- parse_tex(new_lines)
footnote_extraction <- extract_mandatory_LaTeX_argument(new_lines,
"footnote",
by.line = TRUE,
parsed_doc = parsed_doc)
footnote_lines <- footnote_extraction[["line_no_min"]]
new_lines[footnote_lines] <- paste0("\t", new_lines[footnote_lines])
}
writeLines(new_lines, filename)
}
|
c04169fb2325aa4270830fab351ee58e961a6f0d | ee735ad4c975cd824c63e4c87b283e30485ee841 | /plot2.R | 251329ac3fa1a0bb7dfe79915a7f67f2f40dc48a | [] | no_license | andrewhr/ExData_Plotting1 | e3d1b2f0f495827b8b798557483de325990f65a3 | 8ba5da724db81022cc5e674030f31378556ec334 | refs/heads/master | 2021-01-17T20:02:35.963763 | 2015-04-12T21:33:05 | 2015-04-12T21:33:34 | 33,800,314 | 0 | 0 | null | 2015-04-12T02:32:28 | 2015-04-12T02:32:28 | null | UTF-8 | R | false | false | 199 | r | plot2.R | source("power.R")
png("plot2.png", width = 480, height = 480)
plot(power$Time, power$Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "l")
dev.off()
|
a6e8dc3428ca44ae91303c9d37805752e1ad40b0 | 0085d00ce341967d940b4f08e94e1482bc1d340f | /scatterplot.R | 57928b2095a6c2397bc731def505be48bf7b3e65 | [] | no_license | ksedivyhaley/kates-make-demo | 1694940f317ff8d74fe14fc193dfe3b970e1c880 | 124c82da2a9433600d352572c8fda407643b9c46 | refs/heads/master | 2021-06-26T06:27:25.804825 | 2017-09-13T21:11:12 | 2017-09-13T21:11:12 | 103,449,327 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 817 | r | scatterplot.R | library(tidyverse)
library(ggplot2)
## @knitr scatterplot
data <- read_csv("tidy_data.csv")
#check to make sure I've read in a tidy data frame
if(!is.data.frame(data)){
stop(paste(c("analysed_data must be a data frame for graphing. Class",
class(df), "supplied."), collapse=" "))
}
cols_needed <- c("Race", "Type", "Hairiness")
cols_missing <- !(cols_needed %in% colnames(data))
if(sum(cols_missing) > 0){
stop(paste(c("analysed_data is missing column(s)",
cols_needed[cols_missing]), collapse=" "))
}
ggplot(data, aes(x=Race, y=Hairiness, color=Race)) +
facet_wrap(~Type) +
geom_jitter() +
labs(title="Figure 2: Scatterplot of Hair Weight by Race & Hair Type",
y = "Hairiness (% body weight)") +
theme(legend.position="none") #redundant with x-axis label
|
c943fd326d95a328f9c287d76bace7a7be48d0f8 | 17f2c1d9a5e279fc027fe8ae6a1c8b41f605097d | /man/lava.tobit.Rd | 9b6dfee4241f0300a5612ab5442a9d3c744cdae9 | [] | no_license | kkholst/lava.tobit | 97a411e7ebd2efdd6f2ec0389f0562b6eca57864 | 73cb4adf7a0f045b4c78fec2950099b663564a09 | refs/heads/master | 2020-12-24T16:06:40.982381 | 2020-09-24T08:14:38 | 2020-09-24T08:14:38 | 28,052,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,139 | rd | lava.tobit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lava.tobit-package.R
\docType{package}
\name{lava.tobit}
\alias{lava.tobit}
\alias{lava.tobit-package}
\title{Estimation and simulation of probit and tobit latent variable models}
\description{
Framwork for estimating parameters and simulate data from Latent Variable
Models with binary and censored observations. Plugin for the \code{lava}
package
}
\details{
\tabular{ll}{ Package: \tab lava.tobit \cr Type: \tab Package \cr Version:
\tab 0.4-5 \cr Date: \tab 2012-03-15 \cr License: \tab GPL-3 \cr LazyLoad:
\tab yes \cr }
}
\examples{
library('lava.tobit')
m <- lvm(list(c(y,z) ~ x, y~z))
## Simulate 200 observation from path analysis model
## with all slopes and residual variances set to 1 and intercepts 0:
d <- sim(m,200,seed=1)
## Dichotomize y and introduce censoring on z
d <- transform(d, y=as.factor(y>0), z=Surv(z,z<2))
## if (requireNamespace("mets",quietly=TRUE)) {
## e <- estimate(m,d,control=list(trace=1),estimator="gaussian")
## effects(e,y~x)
## }
}
\author{
Klaus K. Holst Maintainer: <kkho@biostat.ku.dk>
}
\keyword{package}
|
226a83ddad7f43a3bf86eb61c7ade5fd51e3ba2a | 11637afdc8d299a222eb78441c0e2c1c8d3e092f | /vignettes/intro.R | 143d71006911308eff64af1085d3aef3572fbe52 | [
"MIT"
] | permissive | land23/recharts | 62f21756819e28811962c6fd72962b304da623a5 | ab0bdfc7cbf1617f70ea034e657f24a7ba2f5c64 | refs/heads/master | 2020-06-15T17:30:05.927222 | 2016-11-01T13:05:47 | 2016-11-01T13:05:47 | 75,275,205 | 1 | 0 | null | 2016-12-01T09:12:28 | 2016-12-01T09:12:28 | null | UTF-8 | R | false | false | 4,873 | r | intro.R | ## ------------------------------------------------------------------------
library(recharts)
echartr(iris, ~Sepal.Length, ~Sepal.Width, series = ~Species)
## ------------------------------------------------------------------------
head(mtcars)
## ------------------------------------------------------------------------
echartr(mtcars, wt, mpg)
## ---- echo=FALSE---------------------------------------------------------
str(args(echartr))
## ------------------------------------------------------------------------
knitr::kable(recharts:::validChartTypes[,c(1:3,5)])
## ------------------------------------------------------------------------
echartr(mtcars, wt, mpg, factor(am, labels=c('Automatic', 'Manual')))
## ------------------------------------------------------------------------
echartr(mtcars, wt, mpg, am, weight=gear, type='bubble')
## ------------------------------------------------------------------------
d <- data.table::dcast(mtcars, carb+gear~., mean, value.var='mpg')
names(d)[3] <- 'mean.mpg'
d$carb <- as.character(d$carb)
echartr(d, carb, "mean.mpg", gear, type=c('vbar', 'vbar', 'line')) %>%
setSymbols('emptycircle')
## ------------------------------------------------------------------------
echartr(d, carb, mean.mpg, gear, type='line',
subtype=c('stack + smooth', 'stack + dotted', 'smooth + dashed')) %>%
setSymbols('emptycircle')
## ------------------------------------------------------------------------
g = echartr(mtcars, wt, mpg, factor(am, labels=c('Automatic', 'Manual')))
## ------------------------------------------------------------------------
g %>% setSeries(series=2, symbolSize=8, symbolRotate=30)
## ------------------------------------------------------------------------
g %>% addMarkLine(data=data.frame(type='average', name1='Avg'))
## ------------------------------------------------------------------------
g %>% addMarkPoint(series=1, data=data.frame(type='max', name='Max'))
## ------------------------------------------------------------------------
link <- 'https://stat.ethz.ch/R-manual/R-devel/library/datasets/html/mtcars.html'
g %>% setTitle('wt vs mpg', paste0('[Motor Trend](', link, ')'),
textStyle=list(color='red'))
## ------------------------------------------------------------------------
g %>% setLegend(selected='Automatic', textStyle=list(color='lime'))
## ------------------------------------------------------------------------
g %>% setToolbox(lang='en', pos=2)
## ------------------------------------------------------------------------
g %>% setDataZoom()
## ------------------------------------------------------------------------
g %>% setXAxis(min=0) %>% setYAxis(min=0)
## ------------------------------------------------------------------------
g %>% setTheme('dark', calculable=TRUE)
## ------------------------------------------------------------------------
g %>% setSymbols(c('heart', 'star6'))
## ------------------------------------------------------------------------
g %>% setSeries(series=2, symbolSize=8, symbolRotate=30) %>%
addMarkLine(data=data.frame(type='average', name1='Avg')) %>%
addMarkPoint(series=1, data=data.frame(type='max', name='Max')) %>%
setTitle('wt vs mpg', paste0('[Motor Trend](', link, ')'),
textStyle=list(color='red')) %>%
setLegend(selected='Automatic', textStyle=list(color='lime')) %>%
setToolbox(lang='en', pos=2) %>% setDataZoom() %>%
setTheme('dark', calculable=TRUE) %>% setSymbols(c('heart', 'star6'))
## ------------------------------------------------------------------------
chordEx1 = list(
title = list(
text = '测试数据',
subtext = 'From d3.js',
x = 'right',
y = 'bottom'
),
tooltip = list(
trigger = 'item',
formatter = JS('function(params) {
if (params.indicator2) { // is edge
return params.value.weight;
} else {// is node
return params.name
}
}')
),
toolbox = list(
show = TRUE,
feature = list(
restore = list(show = TRUE),
magicType = list(show = TRUE, type = c('force', 'chord')),
saveAsImage = list(show = TRUE)
)
),
legend = list(
x = 'left',
data = c('group1', 'group2', 'group3', 'group4')
),
series = list(
list(
type = 'chord',
sort = 'ascending',
sortSub = 'descending',
showScale = TRUE,
showScaleText = TRUE,
data = list(
list(name = 'group1'),
list(name = 'group2'),
list(name = 'group3'),
list(name = 'group4')
),
itemStyle = list(
normal = list(
label = list(show = FALSE)
)
),
matrix = rbind(
c(11975, 5871, 8916, 2868),
c( 1951, 10048, 2060, 6171),
c( 8010, 16145, 8090, 8045),
c( 1013, 990, 940, 6907)
)
)
)
)
echart(chordEx1)
|
39dafd1b77fbcf73b5092fbab697f689353bd03f | 29585dff702209dd446c0ab52ceea046c58e384e | /spnet/R/world.map.simplified.r | 716b3e6497ab2ba96ff849866049608815e96dce | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,881 | r | world.map.simplified.r | #' The TM_WORLD_BORDERS_SIMPL-0.3 world map.
#'
#' The simplified version of the world map provided by Bjorn Sandvik, thematicmapping.org.
#'
#' The map was imported in R as follows:
#'
#' \preformatted{
#' require(maptools)
#' world.map.simplified <- readShapeSpatial("~/TM_WORLD_BORDERS_SIMPL-0.3/TM_WORLD_BORDERS_SIMPL-0.3.shp")
#' slot(world.map.simplified, 'data')[,'NAME'] <- iconv(slot(world.map.simplified, 'data')[,'NAME'], "latin1", "UTF-8")
#' save(world.map.simplified, file="data/world.map.simplified.rda")
#' }
#'
#' The result is a \code{SpatialPolygonsDataFrame} object. Its data slot contains a data frame with 246 observations and 11 variable:
#'
#' \itemize{
#' \item \strong{FIPS.} FIPS 10-4 Country Code
#' \item \strong{ISO2.} ISO 3166-1 Alpha-2 Country Code
#' \item \strong{ISO3.} ISO 3166-1 Alpha-3 Country Code
#' \item \strong{UN.} ISO 3166-1 Numeric-3 Country Code
#' \item \strong{NAME.} Name of country/area
#' \item \strong{AREA.} Land area, FAO Statistics (2002)
#' \item \strong{POP2005.} Population, World Polulation Prospects (2005)
#' \item \strong{REGION.} Macro geographical (continental region), UN Statistics
#' \item \strong{SUBREGION.} Geographical sub-region, UN Statistics
#' \item \strong{LON.} Longitude
#' \item \strong{LAT.} Latitude
#' }
#'
#' @note Note from the TM_WORLD_BORDERS_SIMPL-0.3's README file:
#' \itemize{
#' \item Use this dataset with care, as several of the borders are disputed.
#' \item The original shapefile (world_borders.zip, 3.2 MB) was downloaded from the Mapping Hacks website: http://www.mappinghacks.com/data/. The dataset was derived by Schuyler Erle from public domain sources. Sean Gilles did some clean up and made some enhancements.
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A \code{SpatialPolygonsDataFrame}.
#' @name world.map.simplified
NULL |
992be680b78c4e86c6748fb3d2ece846e067777c | 8b78d8e12a23338f3dd5c90710c81192b240f00b | /man/uniReg.Rd | ebdad538afbdfce05db04d00d2da5f3b89625148 | [] | no_license | vando026/ahri | 96fef7fbab694d8e0bc56719255aa32b4c2a274d | a8940fb1a1af8a34a2065311966cc4bdd311fe84 | refs/heads/master | 2022-06-29T19:38:39.647429 | 2022-06-13T17:09:16 | 2022-06-13T17:09:16 | 241,947,281 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 922 | rd | uniReg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intCens.R
\name{uniReg}
\alias{uniReg}
\title{uniReg}
\usage{
uniReg(
xpath,
InFile,
OutFile,
Model,
ID = NULL,
inf = "Inf",
iter = 5000,
cthresh = 1e-04,
r = 1,
printout = FALSE,
ign_stout = TRUE
)
}
\arguments{
\item{xpath}{The path to the unireg executable.}
\item{InFile}{txt file to be input}
\item{OutFile}{txt file to be output}
\item{Model}{equation to be given}
\item{ID}{name of subject ID}
\item{inf}{Value for infinite, default is "Inf"}
\item{iter}{Number of iterations}
\item{cthresh}{Threshold for convergence}
\item{r}{Threshold for convergence}
\item{printout}{Print results to screen}
\item{ign_stout}{For Linux systems}
}
\description{
Wrapper for Intcens executable by Zeng et al 2016. See
http://dlin.web.unc.edu/software/intcens/ to download the intcens program
for R.
}
\keyword{internal}
|
63459f38becf966145ed37263bf67228e3eb65cd | 93e3f7e05e6020e8900714c714af83435f6a6d9c | /Taiga_Weight.R | 8e3bab08c79399ce150d3e2cbbe7cad351886e24 | [] | no_license | suhasxavier/CADashboard_R | 371656132e80bbdddcc7a8e4c875a6f80b21099c | ce0fd3e46f1bcef4bbd3e7028de291e3a7810757 | refs/heads/master | 2021-01-17T17:56:20.822793 | 2016-02-13T23:10:31 | 2016-02-13T23:10:31 | 46,106,247 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,612 | r | Taiga_Weight.R | m_table=read.csv("c:/Users/Suhas Xavier/Desktop/taiga_membership_table.csv")
taiga_table=read.csv("c:/Users/Suhas Xavier/Desktop/taiga_data1.csv")
usernames=m_table$email
teamnames=m_table$project_name
#Calculate individual weight
#different grading schmes for different modes (Online and f2f)
for(i in 1:length(unique(usernames)))
{
tname=as.character(m_table[m_table$email==usernames[i],"project_name"])
this_user_data=taiga_table[taiga_table$email==as.character(usernames[i]),]
dates=as.character(tail(this_user_data$date,1))
course_val=as.character(m_table[m_table$email==usernames[i],"course"])
#for f2f)
if(course_val=="CST316-F2F")
{
if(nrow(this_user_data)>=7)
{
exp_val=3
#diff gives difference of all values, the unique values indicate the actual changes, sum them up and average over 3
df2=tail(this_user_data,7)
inp1=length(unique(diff(df2$in_progress)))-1
tot1=length(unique(diff(df2$to_test)))-1
tot_len=(sum(inp1,tot1))
msg=""
fin_score=0
if(tot_len<=1)
{
fin_score=0
msg=paste(dates,"NO Taiga Activity!!",sep=" ")
}
else if(tot_len>1 & tot_len<=2)
{
fin_score=3
msg=paste(dates,"Need more Taiga Activity!!",sep=" ")
}
else if(tot_len>2 & tot_len<=3)
{
fin_score=5
msg=paste(dates,"Consistent Taiga Activity",sep=" ")
}
else if(tot_len>3)
{
fin_score=3
msg=paste(dates,"Too many tasks assigned to you!",sep=" ")
}
df_temp=data.frame(usernames[i],msg)
df_holder=data.frame(dates,usernames[i],fin_score,tname,exp_val)
print(df_holder)
write.table(df_holder,file="C:/Users/Suhas Xavier/Desktop/Taiga_Weight.csv",row.names = F,col.names = F,sep=",",append = T,na="0")
write.table(df_temp,file="C:/Users/Suhas Xavier/Desktop/notification_table.csv",row.names = F,col.names = F,sep=",",append = T)
# print(df_holder)
print(df_temp)
}
}
#for online
else if(course_val=="CST316 - Online")
{
if(nrow(this_user_data)>=7)
{
exp_val=2
#diff gives difference of all values, the unique values indicate the actual changes, sum them up and average over 3
df2=tail(this_user_data,7)
inp1=length(unique(diff(df2$in_progress)))-1
tot1=length(unique(diff(df2$to_test)))-1
tot_len=(sum(inp1,tot1))
msg=""
fin_score=0
if(tot_len<=1)
{
fin_score=0
msg=paste(dates,"NO Taiga Activity!!",sep=" ")
}
else if(tot_len>1 & tot_len<2)
{
fin_score=3
msg=paste(dates,"Need more Taiga Activity!!",sep=" ")
}
else if(tot_len>=2 & tot_len<=3)
{
fin_score=5
msg=paste(dates,"Consistent Taiga Activity!!",sep=" ")
}
else if(tot_len>3) {
fin_score=3
msg=paste(dates,"Too many tasks assigned to you!",sep=" ")
}
df_temp=data.frame(usernames[i],msg)
df_holder=data.frame(dates,usernames[i],fin_score,tname,exp_val)
print(df_holder)
write.table(df_holder,file="C:/Users/Suhas Xavier/Desktop/Taiga_Weight.csv",row.names = F,col.names = F,sep=",",append = T,na="0")
write.table(df_temp,file="C:/Users/Suhas Xavier/Desktop/notification_table.csv",row.names = F,col.names = F,sep=",",append = T)
# print(df_holder)
print(df_temp)
}
}
}
closeAllConnections() |
7ab31c896b2b57cae3824dfa7be8102ea5fa41cf | 60c18f7761ce302f533cea0faca5325c14de61ab | /src/visualization_3_analysis_v5.R | 37109e6e82821bbb47afe49ea82bccbc44a96ba4 | [] | no_license | gentok/ForeignerJapan | 6d52d9df67c2faa58c3edcaba3057af7139f582a | 3512684dfd9f308351ba3e6efab89d03390d2964 | refs/heads/master | 2023-03-01T05:54:51.777569 | 2021-02-09T02:53:23 | 2021-02-09T02:53:23 | 292,305,490 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 35,669 | r | visualization_3_analysis_v5.R | #' ---
#' title: "Visualization 3: Analysis Results"
#' author: "Fan Lu & Gento Kato"
#' date: "January 26, 2020"
#' ---
#'
#' # Preparation
## Clean Up Space
rm(list=ls())
## Set Working Directory (Automatically) ##
require(rstudioapi); require(rprojroot)
if (rstudioapi::isAvailable()==TRUE) {
setwd(dirname(rstudioapi::getActiveDocumentContext()$path));
}
projdir <- find_root(has_file("thisishome.txt"))
cat(paste("Working Directory Set to:\n",projdir))
setwd(projdir)
## Directories for Main Effect Data
visdtdir <- paste0(projdir, "/out/visdt.rds")
visdtmdir <- paste0(projdir, "/out/visdtm.rds")
visdtalldir <- paste0(projdir, "/out/visdtall.rds")
visdtxdir <- paste0(projdir, "/out/visdtx.rds")
visdtxmdir <- paste0(projdir, "/out/visdtxm.rds")
visdtxalldir <- paste0(projdir, "/out/visdtxall.rds")
## Directories for Mediation Effect Data
coefdtdir0 <- paste0(projdir,"/out/medoutcoefdt_unmatched_v5.rds")
coefdtdir1 <- paste0(projdir,"/out/medoutcoefdt_matchednoL_v5.rds")
coefdtdir2 <- paste0(projdir,"/out/medoutcoefdt_matchedL50_v5.rds")
coefdtdir3 <- paste0(projdir,"/out/medoutcoefdt_matchedL100_v5.rds")
coefdtdir4 <- paste0(projdir,"/out/medoutcoefdt_matchedL200_v5.rds")
coefdtdir5 <- paste0(projdir,"/out/medoutcoefdt_matchedL350_v5.rds")
## Packages
require(ggplot2)
#'
#' # Main Effects
#'
## Import Required Data
visdt <- readRDS(visdtdir)
visdtm <- readRDS(visdtmdir)
visdtall <- readRDS(visdtalldir)
#'
#' ## OLS
#'
require(ggplot2)
p <- ggplot(visdt, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_y_continuous(breaks = c(-0.1,-0.05,0.00,0.05)) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("OLS Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplot1.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplot1.pdf"),p,width=8,height=5)
require(ggplot2)
p <- ggplot(visdt[which(visdt$data%in%c("Unmatched",
"Matched without \nDistance Adj.",
"Matched with \nLambda = 100km")),],
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_y_continuous(breaks = c(-0.1,-0.05,0.00,0.05)) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("OLS Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplot2.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplot2.pdf"),p,width=8,height=5)
#'
#' ## Multinomial Logit (Disagree vs. Agree)
#'
require(ggplot2)
p <- ggplot(visdtm, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("Multinomial Logit Coefficient: Agree over Disagree\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplot1m.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplot1m.pdf"),p,width=8,height=5)
require(ggplot2)
p <- ggplot(visdtm[which(visdtm$data%in%c("Unmatched",
"Matched without \nDistance Adj.",
"Matched with \nLambda = 100km")),],
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("Multinomial Logit Coefficient: Agree over Disagree\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplot2m.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplot2m.pdf"),p,width=8,height=5)
#'
#' ## Compare OLS and Multinomial Logit
#'
visdtsub <- subset(visdtall, data=="Unmatched")
visdtsub$method <- factor(gsub("Multinomial Logit\nAgree vs. Disagree",
"Multinomial Logit\nDisagree vs. Agree",
visdtsub$method),
levels = c("OLS","Multinomial Logit\nDisagree vs. Agree"))
dummy <- data.frame(est = c(range(c(subset(visdtall, method=="OLS")$lci95,
subset(visdtall, method=="OLS")$uci95),
na.rm = TRUE),
range(c(subset(visdtall, method!="OLS")$lci95,
subset(visdtall, method!="OLS")$uci95),
na.rm = TRUE)),
gender = "Female", age = 45,
method = factor(rep(levels(visdtsub$method), each=2),
levels = levels(visdtsub$method)))
require(ggplot2)
p <- ggplot(visdtsub, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
geom_blank(data=dummy) +
facet_grid(gender ~ method, scales = "free_x") +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop = FALSE) +
labs(caption="Check Table 2 for the full results with coefficient values.") +
xlab("Age") +
labs(caption="Outcome: Agreement with granting suffrage to permanent residents \n(OLS: Five categories, rescaled to 0-1; Multinomial logit: Three categories, disagree, neigher, and agree).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectcompareolsmultinom.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectcompareolsmultinom.pdf"),p,width=8,height=5)
#'
#' ## For Robustness Check
#'
visdtsub <- subset(visdtall, data%in%c("Matched without \nDistance Adj.",
"Matched with \nLambda = 200km",
"Mail-in"))
visdtsub$data2 <- factor(visdtsub$data,
labels = c("Standard \nMatching",
"Distance Adjusted \nMatching",
"Mail-in \n(CI omitted)"))
visdtsub$method <- factor(gsub("Multinomial Logit\nAgree vs. Disagree",
"Multinomial Logit\nDisagree vs. Agree",
visdtsub$method),
levels = c("OLS","Multinomial Logit\nDisagree vs. Agree"))
dummy <- data.frame(est = c(range(c(subset(visdtall, method=="OLS")$lci95,
subset(visdtall, method=="OLS")$uci95),
na.rm = TRUE),
range(c(subset(visdtall, method!="OLS")$lci95,
subset(visdtall, method!="OLS")$uci95),
na.rm = TRUE)),
gender = "Female", age = 45,
method = factor(rep(levels(visdtsub$method), each=2),
levels = levels(visdtsub$method)))
require(ggplot2)
p <- ggplot(visdtsub, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar, color=data2),
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar, color=data2),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar, shape=data2, color=data2),
position=position_dodge(width=-0.9), size=3) +
geom_blank(data=dummy) +
facet_grid(gender ~ method, scales = "free_x") +
scale_color_manual(name="Data", values = rep("black", 3)) +
scale_shape_discrete(name="Data") +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop = FALSE) +
ylab("University Education (1:Attained, 0:Not Attained) Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Check Online Appendix for the full results with coefficient values. CI omitted for mail-in survey results since they are too wide.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectrobustnesscheck.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectrobustnesscheck.pdf"),p,width=8,height=5)
#'
#' # Main Effects (Movers)
#'
## Import Required Data
visdtx <- readRDS(visdtxdir)
visdtxm <- readRDS(visdtxmdir)
visdtxall <- readRDS(visdtxalldir)
#'
#' ## OLS
#'
require(ggplot2)
p <- ggplot(visdtx, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_y_continuous(breaks = c(-0.1,-0.05,0.00,0.05)) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("OLS Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplotx.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplotx.pdf"),p,width=8,height=5)
#'
#' ## Multinomial Logit (Disagree vs. Agree)
#'
require(ggplot2)
p <- ggplot(visdtxm, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
facet_grid(gender ~ data) +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop=FALSE) +
ylab("Multinomial Logit Coefficient: Agree over Disagree\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). \nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1).") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectplotxm.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectplotxm.pdf"),p,width=8,height=5)
#'
#' ## Compare OLS and Multinomial Logit
#'
visdtxsub <- subset(visdtxall, data=="Unmatched")
visdtxsub$method <- factor(gsub("Multinomial Logit\nAgree vs. Disagree",
"Multinomial Logit\nDisagree vs. Agree",
visdtxsub$method),
levels = c("OLS","Multinomial Logit\nDisagree vs. Agree"))
dummy <- data.frame(est = c(range(c(subset(visdtxall, method=="OLS")$lci95,
subset(visdtxall, method=="OLS")$uci95),
na.rm = TRUE),
range(c(subset(visdtxall, method!="OLS")$lci95,
subset(visdtxall, method!="OLS")$uci95),
na.rm = TRUE)),
gender = "Female", age = 45,
method = factor(rep(levels(visdtxsub$method), each=2),
levels = levels(visdtxsub$method)))
require(ggplot2)
p <- ggplot(visdtxsub, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar),
position=position_dodge(width=-0.7), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar),
position=position_dodge(width=-0.7), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar),
position=position_dodge(width=-0.7), size=3) +
geom_blank(data = dummy) +
facet_grid(gender ~ method, scales = "free_x") +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop = FALSE) +
ylab("University Education (1:Attained, 0:Not Attained) Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Check Online Appendix for the full results with coefficient values.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectcompareolsmultinomx.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectcompareolsmultinomx.pdf"),p,width=8,height=5)
#'
#' ## For Robustness Check
#'
visdtxsub <- subset(visdtxall, data%in%c("Matched without \nDistance Adj.",
"Mail-in"))
visdtxsub$data2 <- factor(visdtxsub$data,
labels = c("Standard\nMatching",
"Mail-in \n(CI omitted)"))
visdtxsub$method <- factor(gsub("Multinomial Logit\nAgree vs. Disagree",
"Multinomial Logit\nDisagree vs. Agree",
visdtxsub$method),
levels = c("OLS","Multinomial Logit\nDisagree vs. Agree"))
dummy <- data.frame(est = c(range(c(subset(visdtxall, method=="OLS")$lci95,
subset(visdtxall, method=="OLS")$uci95),
na.rm = TRUE),
range(c(subset(visdtxall, method!="OLS")$lci95,
subset(visdtxall, method!="OLS")$uci95),
na.rm = TRUE)),
gender = "Female", age = 45,
method = factor(rep(levels(visdtxsub$method), each=2),
levels = levels(visdtxsub$method)))
require(ggplot2)
p <- ggplot(visdtxsub, aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,alpha=pstar, color=data2),
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,alpha=pstar, color=data2),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(alpha=pstar, shape=data2, color=data2),
position=position_dodge(width=-0.9), size=3) +
geom_blank(data=dummy) +
facet_grid(gender ~ method, scales = "free_x") +
scale_color_manual(name="Data", values = rep("black", 3)) +
scale_shape_discrete(name="Data") +
scale_alpha_manual(name="Significance",values=c(1,0.5,0.2), drop = FALSE) +
ylab("University Education (1:Attained, 0:Not Attained) Coefficient\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Check Online Appendix for the full results with coefficient values. CI omitted for mail-in survey results since they are too wide.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=11),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/maineffectrobustnesscheckx.png"),p,width=8,height=5)
ggsave(paste0(projdir,"/out/maineffectrobustnesscheckx.pdf"),p,width=8,height=5)
#'
#' # Mediation Effects
#'
#'
#' ## Function to Subset Data (Except for knowledge)
#'
gencoefdts <- function(coefdt) {
coefdt$med <- factor(coefdt$med, levels=c("income","knowledge","ideology","ldpdpjft",
"familiarityFT_KOR","familiarityFT_CHN",
"familiarityFT_USA"),
labels = c("Income\n(Percentile)",
"Political\nKnowledge",
"Political\nIdeology",
"LDP - DPJ\nFeeling\nThermometer",
"South Korea\nFeeling\nThermometer",
"China\nFeeling\nThermometer",
"United States\nFeeling\nThermometer"))
coefdts <- subset(coefdt, med!="Political\nKnowledge" &
mod!="Treatment => Outcome\n(ADE)" &
age %in% c(25,45,65))
return(coefdts)
}
#'
#' ## Unmatched
#'
coefdts <- gencoefdts(readRDS(coefdtdir0))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_unmatched_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_unmatched_v5.pdf"),p,width=10,height=7)
#'
#' ## Matched without Distance Adjustment
#'
coefdts <- gencoefdts(readRDS(coefdtdir1))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_matchednoL_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_matchednoL_v5.pdf"),p,width=10,height=7)
#'
#' ## Matched with Lambda = 50km
#'
coefdts <- gencoefdts(readRDS(coefdtdir2))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL50_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL50_v5.pdf"),p,width=10,height=7)
#'
#' ## Matched with Lambda = 100km
#'
coefdts <- gencoefdts(readRDS(coefdtdir3))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL100_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL100_v5.pdf"),p,width=10,height=7)
#'
#' ## Matched with Lambda = 200km
#'
coefdts <- gencoefdts(readRDS(coefdtdir4))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL200_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL200_v5.pdf"),p,width=10,height=7)
#'
#' ## Matched with Lambda = 100km
#'
coefdts <- gencoefdts(readRDS(coefdtdir5))
require(ggplot2)
p <- ggplot(coefdts,
aes(x=factor(age, levels=rev(names(table(age)))), y=est)) +
geom_hline(aes(yintercept=0), linetype=2) +
geom_errorbar(aes(ymin=lci95,ymax=uci95,color=gender,alpha=pstar), #linetype=pstar
position=position_dodge(width=-0.9), size=0.5, width=0.3) +
geom_errorbar(aes(ymin=lci90,ymax=uci90,color=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=1.5, width=0.0) +
geom_point(aes(shape=gender,alpha=pstar),
position=position_dodge(width=-0.9), size=3) +
facet_grid(med ~ mod, scales = "free") +
scale_alpha_manual(name="Significance (Transparency)",values=c(1,0.5,0.2), drop=FALSE) +
scale_shape_discrete(name="Gender (Point Shape)") +
scale_color_manual(name="Gender (Point Shape)", values = rep("black",2)) +
ylab("Effect Size\n(Thin Line = 95% CI; Thick Line 90% CI)") +
xlab("Age") +
labs(caption="Treatment: University education (1:attained, 0:not attained). Mediatiors: All rescaled to 0=minimum and 1=maximum.\nOutcome: Agreement with granting suffrage to permanent residents (rescaled to 0-1). All models are estimated by OLS.") +
coord_flip() + theme_bw() +
theme(legend.position = "bottom",
strip.text.x = element_text(size=9),
strip.text.y = element_text(angle=0,size=11),
strip.background = element_rect(fill=NA,color=NA),
plot.caption = element_text(hjust=0),
plot.subtitle = element_text(hjust=0.5))
p
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL350_v5.png"),p,width=10,height=7)
ggsave(paste0(projdir,"/out/mediationplot_all_matchedL350_v5.pdf"),p,width=10,height=7)
#'
#' # Extra Multinomial Logit Table
#'
## Load Analysis Data
load(paste0(projdir,"/out/heavy/analysis_2_matched_v5.RData"))
## Set Working Directory (Automatically) ##
require(rstudioapi); require(rprojroot)
if (rstudioapi::isAvailable()==TRUE) {
setwd(dirname(rstudioapi::getActiveDocumentContext()$path));
}
projdir <- find_root(has_file("thisishome.txt"))
cat(paste("Working Directory Set to:\n",projdir))
setwd(projdir)
require(texreg)
require(lmtest)
require(sandwich)
require(mlogit)
#+ eval = FALSE
texreg(list(s0mo_1C,s0mo2_1C), digits = 4, single.row = T,
override.se = list(coeftest(s0mo_1C,vcov.=vcovHC(s0mo_1C))[,2],
coeftest(s0mo2_1C,vcov=sandwich)[grep(":Neither",names(coef(s0mo2_1C))),2],
coeftest(s0mo2_1C,vcov=sandwich)[grep(":Agree",names(coef(s0mo2_1C))),2]),
override.pvalues = list(coeftest(s0mo_1C,vcov.=vcovHC(s0mo_1C))[,4],
coeftest(s0mo2_1C,vcov=sandwich)[grep(":Neither",names(coef(s0mo2_1C))),4],
coeftest(s0mo2_1C,vcov=sandwich)[grep(":Agree",names(coef(s0mo2_1C))),4]),
beside = T,
omit.coef = "(wave)",stars = c(0.1,0.05,0.01,0.001), symbol = "\\dagger",
custom.coef.map = vnmap,
custom.model.names = c(" ", "vs. Agree", "vs. Neither"),
custom.header = list("OLS"=1, "Multinomial logit"=2:3),
custom.note = '%stars. Robust standard errors in parentheses. Survey month fixed effects ommited from the output. For multinomial logit, the baseline category is "disagree". The table is exported using \\texttt{texreg} R package \\citep{Leifeld2013teco}.',
booktabs = TRUE, dcolumn = TRUE, use.packages = FALSE, threeparttable = TRUE, fontsize = "scriptsize",
caption = "The effect of university education on the support for granting suffrage to permanent residents in Japan",
caption.above = TRUE, label = "table:s0mo_1_article", float.pos = "t!",
file = paste0(projdir,"/out/s0mo_1_tabular_article.tex"))
tmptab <- gsub("{dagger","{\\dagger",
readLines(paste0(projdir,"/out/s0mo_1_tabular_article.tex")),fixed=T)
tmptab
tmptab <- gsub("16618.2864 & 16618.2864", "\\multicolumn{2}{D{.}{.}{5.4}}{16618.2864}", tmptab, fixed=T)
tmptab <- gsub("-8239.1432 & -8239.1432", "\\multicolumn{2}{D{.}{.}{5.4}}{-8239.1432}", tmptab, fixed=T)
tmptab <- gsub("7827 & 7827 & 7827", "7827 & \\multicolumn{2}{D{.}{.}{5.4}}{7827}", tmptab, fixed=T)
tmptab <- gsub("3 & 3", "\\multicolumn{2}{D{.}{.}{5.4}}{3}", tmptab, fixed=T)
tmptab
writeLines(tmptab,paste0(projdir,"/out/s0mo_1_tabular_article.tex"), useBytes = T)
#+ eval=FALSE, echo=FALSE
# Exporting HTML File
# In R Studio
# rmarkdown::render('./src/visualization_3_analysis_v5.R', rmarkdown::pdf_document(latex_engine="xelatex", extra_dependencies = list(bookmark=NULL, xltxtra=NULL, zxjatype=NULL, zxjafont=c("ipa"))), encoding = 'UTF-8')
# rmarkdown::render('./src/visualization_3_analysis_v5.R', 'github_document', clean=FALSE)
# tmp <- list.files(paste0(projdir,"/src"))
# tmp <- tmp[grep("\\.spin\\.R$|\\.spin\\.Rmd$|\\.utf8\\.md$|\\.knit\\.md$",tmp)]
# for (i in 1:length(tmp)) file.remove(paste0(projdir,"/src/",tmp[i]))
|
4eba2c151fb5eaa40512e417a2ba1ef6b240a8ab | 5c8fe2441da72fa9a1c99dcb9df4f03a10b922f3 | /plot4.R | 64a7e705a8df5e6b73a5ebfdf5fed8ce0e0e06bc | [] | no_license | JohnLiau/ExData_Plotting1 | f5674b07e2807091c7e2963f2afcd4a9d3209a83 | 2f7714afd964f8df987d16c1e1b7dad1a9169a5a | refs/heads/master | 2021-01-15T19:03:24.851024 | 2014-08-10T22:31:45 | 2014-08-10T22:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,078 | r | plot4.R | plot4 <- function() {
## Read input file
fulldata<-read.table(unz("~/Learn/Data Science/JHU04 Exploratory Data Analysis/exdata-data-household_power_consumption.zip","household_power_consumption.txt"),header=TRUE,sep=';')
colnames(fulldata)<-gsub("_",".",colnames(fulldata))
Date<-as.Date(fulldata$Date,"%d/%m/%Y")
data<-fulldata[Date>=as.Date('1/2/2007',"%d/%m/%Y") & Date<=as.Date('2/2/2007',"%d/%m/%Y"),]
data$Time<-strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
data$Global.active.power<-as.numeric(levels(data$Global.active.power))[data$Global.active.power]
data$Voltage<-as.numeric(levels(data$Voltage))[data$Voltage]
data$Global.reactive.power<-as.numeric(levels(data$Global.reactive.power))[data$Global.reactive.power]
par(mfcol=c(2,2))
with(data,{
plot(Time,Global.active.power,bg=NA,type='l',xlab="",ylab="Global Active Power(kilowatts)",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
plot(Time,Sub.metering.1,bg=NA,type='n',col='black',xlab="",ylab="Energy sub metering",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
legend("topright",lty=1,box.col="transparent",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.55)
points(Time,Sub.metering.1,bg=NA,type='l',col='black',xlab="",ylab="Energy sub metering",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
points(Time,Sub.metering.2,bg=NA,type='l',col='red',xlab="",ylab="Energy sub metering",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
points(Time,Sub.metering.3,bg=NA,type='l',col='blue',xlab="",ylab="Energy sub metering",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
plot(Time,Voltage,bg=NA,type='l',xlab="datetime",ylab="Voltage",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
plot(Time,Global.reactive.power,bg=NA,type='l',xlab="datetime",ylab="Global_reactive_power",cex.axis=0.7,cex.lab=0.65,font.axis=1,font.lab=1)
})
dev.copy(png,file='~/Learn/Data Science/JHU04 Exploratory Data Analysis/plot4.png')
dev.off()
}
|
a86b01ae13affcdbaeef1f7ffb2e112e6c9cb923 | f28e7f74a0a3d61b0242eade931c4aa3b196821c | /R_code/local_code/filtered_cells_table.R | 4aeda53ce7de013fae55369b1cfb93de68eaffc9 | [
"BSD-3-Clause"
] | permissive | ayshwaryas/ddqc_source | c2cbdd12defa67c97f1ff3c0fec98fa2653a8d8e | 2b26415cb38619d8bb2c7884aee78c0d8fd05c0a | refs/heads/master | 2023-04-13T16:45:45.378206 | 2022-10-16T15:37:02 | 2022-10-16T15:37:02 | 226,552,782 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,977 | r | filtered_cells_table.R | source("scripts/readers.R")
source("scripts/mc_functions.R")
source("scripts/fc_plots.R")
source("scripts/settings.R")
source("scripts/local_settings.R")
tasks.per.tiss <<- 1
project <- "mc_tm"
tissue <- "Lung"
readFilterCsvMethod <- function(method, all.cells) {
filtered.cells.all <- NULL
line <- method
for (metric in c("counts", "genes", "mito", "ribo")) {
filtered.cells <- tryCatch({
as.character(read.csv(paste0(source.dir, res, "-", method, "/!filtered_", metric, ".csv"))[["barcodekey"]])},
error = function(e) {warning(paste(method, metric, "filtered cells not found"))})
filtered.cells.all <- union(filtered.cells.all, filtered.cells)
line <- paste(line, length(all.cells) - length(filtered.cells), paste0(
round((length(all.cells) - length(filtered.cells)) / length(all.cells) * 100, 1), "%"), sep=",")
}
line <- paste(line, length(all.cells) - length(filtered.cells.all), paste0(
round((length(all.cells) - length(filtered.cells.all)) / length(all.cells) * 100, 1), "%"), sep=",")
write(line, file=paste0(source.dir, "fc_table.csv"), append=TRUE)
return(filtered.cells)
}
res <<- 1.4 #0.5 * (1 + (task.id %% tasks.per.tiss) %/% tasks.per.res) #clustering resolution
source.dir <<- paste0(source.dir.prefix, project, "/", tissue, "/") #directory where csv with filtered cells are located
all.cells = as.character(read.csv(paste0(source.dir, res, "-none-0/!cells.csv"))$barcodekey)
write("method,counts cells,counts %,genes cells,genes %,mito cells,mito %,ribo cells,ribo %,all cells,all %", file=paste0(source.dir, "fc_table.csv"))
#cutoff5 <- setdiff(all.cells, readFilterCsvMethod("cutoff-5", all.cells))
cutoff10 <- setdiff(all.cells, readFilterCsvMethod("cutoff-10", all.cells))
#zscore2 <- setdiff(all.cells, readFilterCsvMethod("z_score-2", all.cells))
mad <- setdiff(all.cells, readFilterCsvMethod("mad-2", all.cells))
outlier <- setdiff(all.cells, readFilterCsvMethod("outlier-0", all.cells))
|
c43918c59fdc7b45f2e0a9e07a76f458bf81f41a | 182257a7b18220970988c68cdd0d815e12ab4c85 | /scratch/Sim7_Poisson_ERGM_Model.R | 56a498104ec83f56ca72b421b54b1864d7036eeb | [] | no_license | tylerandrewscott/elwha | 3b81c495f96a8e14819621a1b70556f7eca29d06 | b2c3f0b3b9cafc3382eaa57acf87ebc2c47b1cfc | refs/heads/master | 2022-05-12T16:03:49.773027 | 2016-05-11T14:32:48 | 2016-05-11T14:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,251 | r | Sim7_Poisson_ERGM_Model.R | #rm(list=ls())
library(statnet)
library(latentnet)
library(ergm.count)
as.mcmc.default <- coda:::as.mcmc.default
as.mcmc.list.default <- coda:::as.mcmc.list.default
net<-net_temp
net_temp
#Poisson:
m <- sum(net %e% "TCO")/network.dyadcount(net)
init.sum.pois <- log(m)
mod0 <-
ergm(net~sum,
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr1-1)),
MCMC.prop.weights="0inflated",MCMLE.maxit=40,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
#mcmc.diagnostics(net.cmpois.nm)
summary(mod0)
# Simulate from model fit:
nonzero.sim <-
simulate(mod0, monitor=~nonzero, nsim = 1000, statsonly=TRUE,
control=control.simulate.ergm(
MCMC.prop.weights="0inflated",MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5) # Should not be necessary in the next version.
))
#compute statistic for nonzero observed in the network
nonzero.obs<-summary(net~nonzero,response="TCO")
nonzero.obs
par(mar=c(5, 4, 4, 2) + 0.1)
# 2nd col. = nonzero
plot(density(nonzero.sim[,2]))
abline(v=nonzero.obs)
p.nonzero<-min(mean(nonzero.sim[,2]>nonzero.obs),mean(nonzero.sim[,2]<nonzero.obs))*2
p.nonzero
pr2 <- length(summary(net~sum+nonzero,
response = "TCO"))
mod1 <-
ergm(net~sum+nonzero,
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr2-1)),
MCMC.prop.weights="0inflated",MCMLE.maxit=40,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
#mcmc.diagnostics(net.cmpois.nm)
summary(mod1)
# Simulate from model fit:
cmp.sim <-
simulate(mod1, monitor=~CMP, nsim = 1000, statsonly=TRUE,
control=control.simulate.ergm(
MCMC.prop.weights="0inflated",MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5) # Should not be necessary in the next version.
))
#compute statistic for nonzero observed in the network
cmp.obs<-summary(net~CMP,response="TCO")
par(mar=c(5, 4, 4, 2) + 0.1)
# 3nd col. = CMP
plot(density(cmp.sim[,3]))
abline(v=cmp.obs)
p.cmp<-min(mean(cmp.sim[,3]>cmp.obs),mean(cmp.sim[,3]<cmp.obs))*2
# Simulate from model fit:
mutual.sim <-
simulate(mod1, monitor=~mutual(form="min"), nsim = 1000, statsonly=TRUE,
control=control.simulate.ergm(
MCMC.prop.weights="0inflated",MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5) # Should not be necessary in the next version.
))
#compute statistic for nonzero observed in the network
mutual.obs<-summary(net~mutual,response="TCO")
par(mar=c(5, 4, 4, 2) + 0.1)
# 3nd col. = mutual
plot(density(mutual.sim[,3]))
abline(v=mutual.obs)
p.mutual<-min(mean(mutual.sim[,3]>mutual.obs),mean(mutual.sim[,3]<mutual.obs))*2
p.mutual
#select mutual, add into model
pr3 <- length(summary(net~sum+nonzero+mutual(form="min"),
response = "TCO"))
mod2 <-
ergm(net~sum+nonzero+mutual(form="min"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3-1)),
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod2)
# Simulate from model fit:
transitiveweights.sim <-
simulate(mod2, monitor=~transitiveweights("min","max","min"), nsim = 1000, statsonly=TRUE,
control=control.simulate.ergm(
MCMC.prop.weights="0inflated",MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5) # Should not be necessary in the next version.
))
#compute statistic for nonzero observed in the network
transitiveweights.obs<-summary(net~transitiveweights("min","max","min"),response="TCO")
par(mar=c(5, 4, 4, 2) + 0.1)
# 4th col. = transitiveweights("min","max","min")
plot(density(transitiveweights.sim[,4]))
abline(v=transitiveweights.obs)
p.transitiveweights<-min(mean(transitiveweights.sim[,3]>transitiveweights.obs),
mean(transitiveweights.sim[,3]<transitiveweights.obs))*2
#select transitiveweights, add to model
pr3 <- length(summary(net~sum+mutual(form="min")+
transitiveweights("geomean","sum","geomean")+
cyclicalweights(twopath="min",combine="max",affect="min"),
response = "TCO"))
mod3 <-
ergm(net~sum+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3-1)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=100,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod3)
mod4 <-
ergm(net~sum+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+nodecov("NUMRESP"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=100,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod4)
mod5 <-
ergm(net~sum+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+nodecov("NUMRESP")+
nodecov("MEANYEARS")+nodecov("NUMGROUPS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+2)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=100,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod5)
mod6 <-
ergm(net~sum+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+CMP+nodecov("NUMRESP")+
nodecov("MEANYEARS")+nodecov("NUMGROUPS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+3)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=100,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod6)
mod7 <-
ergm(net~sum(pow=1/2)+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+nodecov("NUMRESP")+
nodecov("MEANYEARS")+nodecov("NUMGROUPS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+2)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=100,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod7)
mod8 <-
ergm(net~sum(pow=1/2)+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+
nodecov("NUMRESP")+
nodecov("MEANYEARS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+1)),MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=100,MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,MCMLE.steplength=500,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod8)
mod9 <-
ergm(net~sum(pow=1/2)+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+
nodecov("NUMRESP")+
nodecov("NUMGROUPS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+1)),
MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=200,
MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod9)
mod10 <-
ergm(net~sum(pow=1/2)+mutual(form="min")+ transitiveweights("min","max","min")+
cyclicalweights(twopath="min",combine="max",affect="min")+
nodecov("NUMRESP")+
nodecov("NUMGROUPS")+nodecov("MEANYEARS"),
response="TCO", reference=~Poisson,
control=control.ergm(init=c(init.sum.pois, rep(0, pr3+1)),
MCMLE.density.guard=10,MCMLE.density.guard.min=400,
MCMC.prop.weights="0inflated",MCMLE.maxit=200,
MCMC.runtime.traceplot=F,seed=24,
MCMLE.trustregion=1000,
MCMC.prop.args=list(p0=0.5)),eval.loglik=F)
summary(mod10)
ls()[grep("mod", ls())]
summary(mod0)
mod0<-logLik(mod0, add=TRUE)
mod1<-logLik(mod1, add=TRUE)
mod2<-logLik(mod2, add=TRUE)
mod3<-logLik(mod3, add=TRUE)
mod4<-logLik(mod4, add=TRUE)
mod5<-logLik(mod5, add=TRUE)
mod6<-logLik(mod6, add=TRUE)
mod7<-logLik(mod7, add=TRUE)
mod8<-logLik(mod8, add=TRUE)
mod9<-logLik(mod9, add=TRUE)
mcmc.diagnostics(mod2,vars.per.page=4)
summary(mod2)
|
431229e808f316a6f2443b6098e3e560e9f89921 | 614c076cee38793f249a62b1c7e8fb569d90bf11 | /code/UsefulFunctions/MICS_Categorization.R | 4f523a2f70187c9b64d80106b5196faa3a020e61 | [] | no_license | InstituteforDiseaseModeling/Nigeria-Family-Planning-Paper | b6e1c8822d03c7832bdd1cb9ff9f26ee32d9745d | 1ef4f73cac49d0e103513dfd24bceb83b5c74ef3 | refs/heads/master | 2021-10-09T05:40:25.075030 | 2018-12-21T22:17:25 | 2018-12-21T22:17:25 | 160,894,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,702 | r | MICS_Categorization.R | ## R scripts to harmonise family planning variable and estimate family planning indicators by marital status and age from MICS micro-data files
# 1. 'MICS_Translate.R' Translates relevant variables across surveys and stores harmonised variable names and codes as R data sets
# 2. 'MICS_Categorization.R' Computes marital status and contraceptive use variables
# 3. 'MICS_GenerateUnmet.R' Computes unmet need variable based on DHS code [http://dhsprogram.com/topics/unmet-need.cfm]
# 4. 'MICS_output_FP-Indicators.R' Outputs table of family planning indicators by marital status and age
## Author: United Nations Population Division (Ching Yee Lin, Philipp Ueffing, Stephen Kisambira and Aisha Dasgupta)
## Project: Making family planning count
# [http://www.un.org/en/development/desa/population/projects/making-family-planning-count/index.shtml]
# [http://www.un.org/en/development/desa/population/theme/family-planning/index.shtml]
## MICS micro data sets need to be downloaded from the MICS program website [http://mics.unicef.org/]
VariableNames_Path <- paste0(home, "/UsefulFunctions/TranslationTables")
nameInventory <- read.csv(paste(VariableNames_Path,"tt_variableNames.csv",sep="/"), stringsAsFactors = F, header = TRUE)
library(plyr)
# Compute marital status categories
MaritalStatusCategories<-function(df,choice){
if(choice == "Both"){
df<-MaritalStatusCategories(df,"General") #Categorization for Married and Unmarried categories
df<-MaritalStatusCategories(df,"Unmarried")
}else if(choice == "General"){
married <- c(11, 20:22)
unmarried <- c(10,30:35,40)
missing_notstated <- c(98,99)
#Another variable of marital status seems to be used in report (Current marital status)
##State of Palestine - PHL26 (Translated to CurrentMStatus) match with reported marital status table
if(surveyID == "pswm4"){
df["MSTAT"]<-NA
df$MSTAT[which(df$CURRENTMSTATUS %in% married)]<-1
df$MSTAT[which(df$CURRENTMSTATUS %in% unmarried)]<-2
df$MSTAT[which(is.na(df$CURRENTMSTATUS) & df$EVERMARRIED %in% unmarried)]<-2
df$MSTAT[which(df$CURRENTMSTATUS %in% missing_notstated)]<-NA
}else{
##Categorize for Married/Unmarried women
df["MSTAT"]<-NA
df$MSTAT[which(df$MARSTAT %in% married)]<-1
df$MSTAT[which(df$MARSTAT %in% unmarried)]<-2
df$MSTAT[which(is.na(df$MSTAT) & df$EVERMARRIED %in% unmarried)]<-2
df$MSTAT[which(df$MARSTAT %in% missing_notstated)]<-NA
}
df$MSTAT_LAB <- mapvalues(df$MSTAT, from = c(1,2), to=c("Married/In-union","Unmarried/Not-in-union"), warn_missing = F)
}else if(choice == "Unmarried"){ #Categorization for Formerly Married and Never Married categories
formerly <- c(30:35)
never <- c(10)
df["MSTATNF"]<-NA
df$MSTATNF[which(df$MARSTAT %in% formerly)]<-1
df$MSTATNF[which(df$MARSTAT %in% never)]<-2
if(!any(1%in%df$MSTATNF) && !is.null(df$EVERMARRIED)){
df$MSTATNF[which(df$EVERMARRIED %in% formerly)] <-1
df$MSTATNF[which(df$EVERMARRIED %in% never)] <- 2
}
df$MSTATNF_LAB <- mapvalues(df$MSTATNF, from = c(1,2), to=c("Formerly married","Never married"), warn_missing = F)
if("Formerly-Married" %in% df$MSTATNF_LAB && !"Never-Married" %in% df$MSTATNF_LAB){
df$MSTAT_LAB[which(df$MSTAT_LAB=="Unmarried")]<- NA
}
}
return (df)
}
# Compute contraceptive method categories
MethodCategories <- function(df){
# Women who are coded as NA also need to be classified as 2 (Non-users) so that they are later included in the denominator for CP
df$FPANY <- NA
df$FPANY <- mapvalues(df$FPNOW, from = c(1, 2, 9, NA), to = c(1, 2, 2, 2), warn_missing = F)
#Determine effectiveness of methods presented
## Variable of single most effective method used, based on Trussel and WHO
df$FPMETHOD <- NULL
df$FPMETHOD <- NA
###Specific methods are under FPMETHNOW for bswm2
if("FPMETHNOW"%in%names(df)){
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 9)] <- 102
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 8)] <- 106
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 2)] <- 101
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 7)] <- 105
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 2)] <- 101
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 3)] <- 120
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 1)] <- 110
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 6)] <- 103
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 11)] <- 210
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 4)] <- 130
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 5)] <- 104
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 13)] <- 220
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 14)] <- 300
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPMETHNOW == 10)] <- 303
}
###Specific methods are separated into different variables
else{
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSIMP == 1)] <- 102 # Implant
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSMST == 1)] <- 106 # Male Sterilisation
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSIUS == 1)] <- 101 # IUS
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSFST == 1)] <- 105 # Female Sterilsaiton
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSIUD == 1)] <- 101 # IUD ## Some IUS might have been classified as IUD in MICS as differentiation not made. Trussel: IUS > FST > IUD
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSINJ == 1)] <- 120 # Injection ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSINJ1 == 1)] <- 121 ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSINJ2 == 1)] <- 122 ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSINJ3 == 1)] <- 123 ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSPILL == 1)] <- 110 # Pill
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSPAT == 1)] <- 107 # Patch
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSRING == 1)] <- 108 # Ring
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSCONM == 1)] <- 103 # Male condom
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSRHY == 1)] <- 210 # Rhythm
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSSDM == 1)] <- 212 # Standard Days Method
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSDIA == 1)] <- 130 # Diaphragm
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSCONF == 1)] <- 104 # Female condom
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSLAM == 1)] <- 140 # LAM ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSWD == 1)] <- 220 # Withdrawal
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSFOA == 1)] <- 135 # Foam ## Spermicides in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSEC == 1)] <- 150 # Emergency contraception ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSBF == 1)] <- 141 # Breasfeeding ## Not in Trussel
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSOTH == 1)] <- 300 # Other
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSNSP == 1)] <- 301 # Not specified
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSOTHMOD == 1)] <- 302 # Other modern
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPNOWUSOTHTRAD == 1)] <- 303 # Other traditional
}
# Set Variable of method type (modern/traditional/no use)
df["FPTYPE"] <- NA
modern <- c(100:108, 110:112, 120:123, 130:136, 140, 150, 160, 302)
traditional <- c(141, 200, 210:217, 220, 230:236, 303, 212)
other <- c(300, 301) # Not specified put into 'other'
df$FPTYPE[which(df$FPMETHOD %in% modern)] <- 1
df$FPTYPE[which(df$FPMETHOD %in% traditional)] <- 2
df$FPTYPE[which(df$FPMETHOD %in% other)] <- 2 # other methods classified as traditional
# Recode non-user in for denominator of calculation
df$FPTYPE[which(is.na(df$FPTYPE) & df$FPANY == 2)] <- 3
df$FPTYPE[which(is.na(df$FPTYPE) & df$FPANY == 1)] <- 4 #Using, but no method
# If FPNOW not available (equal to "Not in translation table" then set all NAs to "Not using") define non-users through FPNOWUSXXXXs
if (is.null(df$FPNOW) | all(is.na(df$FPNOW))){
df$FPTYPE[which(is.na(df$FPMETHOD))] <- 3
df$FPANY <- mapvalues(df$FPTYPE, from = c(1, 2, 3, NA), to = c(1, 1, 2, NA), warn_missing = F)
}
#Recategorize Non-user -> for cases when FPNOW is missing
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPANY == 2)] <- 999 # Categorise NAs as Non-users
df$FPMETHOD[which(is.na(df$FPMETHOD) & df$FPANY == 1)] <- 998 # Identify women who have said that using contraception, but not specified method (Being excluded from calculation)
df$FPMETHOD_LAB <- mapvalues(df$FPMETHOD,
from = c(102, 106, 101, 105, 101, 120, 121, 122, 123, 110, 107, 108, 103, 130, 104, 140, 150,
210, 220, 135, 141, 300, 301, 302, 303, 999, 998, 212),
to = c("IMP", "MST", "IUD_IUS", "FST", "IUD_IUS", "INJ", "INJ1", "INJ2", "INJ3", "PILL", "PAT", "RING", "CONM", "DIA", "CONF", "LAM", "EC",
"RHY", "WD", "FOA", "BF", "OTH", "NSP", "OTHMOD", "OTHTRAD", "NotUsing", "FPAny_butNotInFPMETHOD","SDM"),
warn_missing = F)
# Compute variable with descriptive names
df$FPANY_LAB <- mapvalues(df$FPANY, from = c(1, 2, NA), to = c("Using_any", "Not_using", NA), warn_missing = F) # There shouldn't be any 'NA' values #### PHIL #### Problem?
df$FPTYPE_LAB <- mapvalues(df$FPTYPE, from = c(1, 2, 3,4), to = c("Using_modern", "Using_traditional", "Not_using","Using_any_nomethod"), warn_missing = F)
return (df)
}
# NOT USED
MethodAllocation <- function(df){
##Distribute FPAny_butNotInFPMETHOD among all the methods
if(any(df$FPMETHOD == 998) & !is.na(any(df$FPMETHOD==998))){
#Get Frequencies of all method
FPMethod_ByAgeMSTAT<-as.data.frame(xtabs(~FPMETHOD+AGE5YEAR_LAB+MSTAT_LAB,df))
FPMethod_AllMethod<-subset(FPMethod_ByAgeMSTAT,subset=!FPMethod_ByAgeMSTAT$FPMETHOD%in%c("998","999"))
FPMethod_NoMethod <- subset(FPMethod_ByAgeMSTAT,subset=FPMethod_ByAgeMSTAT$FPMETHOD%in%c("998"))
#Sum up all the available specific methods
FPMethod_Total<-aggregate(x=FPMethod_AllMethod["Freq"],by=list(AGE5YEAR_LAB=FPMethod_AllMethod$AGE5YEAR_LAB,MSTAT_LAB=FPMethod_AllMethod$MSTAT_LAB),FUN=sum)
FPMethod_Total$Total <- FPMethod_Total$Freq
FPMethod_Total <- FPMethod_Total[,c("AGE5YEAR_LAB","MSTAT_LAB","Total")]
#Get Frequency of FPAny_butNoMethod
FPMethod_NoMethod$NoMethod.freq <- FPMethod_NoMethod$Freq
FPMethod_NoMethod <- FPMethod_NoMethod[,c(1,2,3,5)]
FPMethod_AllMethod<-merge(FPMethod_AllMethod,FPMethod_Total,by=c("AGE5YEAR_LAB","MSTAT_LAB"))
FPMethod_All <- merge(FPMethod_AllMethod,FPMethod_NoMethod,by=c("AGE5YEAR_LAB","MSTAT_LAB"))
#Get the number of observations needed to be allocate (Round by 0 in order to select sample (What to do with the remaining FPAny_NoMethod???))
FPMethod_All$Rate<-NA
FPMethod_All$Rate <- round((FPMethod_All$Freq / FPMethod_All$Total)*FPMethod_All$NoMethod.freq,0)
FPMethod_All <- FPMethod_All[!is.na(FPMethod_All$Rate),]
#Only works if there is only 1 FPANY_NoMethod -> can directly set to the method with highest rate
if(all(FPMethod_All$Rate ==0)){
FPMethod_All$Rate <- round((FPMethod_All$Freq / FPMethod_All$Total)*FPMethod_All$NoMethod.freq,1)
}
#Subset possible candidates for allocation
FPMethod_Allocate <- subset(FPMethod_All, FPMethod_All$Rate > 0)
FPMethod_Allocate <- data.frame(lapply(FPMethod_Allocate, as.character), stringsAsFactors=FALSE)
FPMethod_Allocate$Rate <- as.numeric(FPMethod_Allocate$Rate)
#Match Marital status and Age, the method with the highest rate is replaced when there is only 1 FPAny_NoMethod
if(length(which(df$FPMETHOD == 998))==1){
FPMethod_NoMethod <- subset(FPMethod_NoMethod, subset=FPMethod_NoMethod$NoMethod.freq!=0)
#Retain only the matching marital status and age, who has the highest rate - candidate of FPANY_NoMethod
FPMethod_Allocate <- subset(FPMethod_Allocate, subset=FPMethod_Allocate$MSTAT_LAB == FPMethod_NoMethod$MSTAT_LAB &
FPMethod_Allocate$AGE5YEAR_LAB == FPMethod_NoMethod$AGE5YEAR_LAB & FPMethod_Allocate$Rate == max(FPMethod_Allocate$Rate))
df$FPMETHOD[which(df$MSTAT_LAB == FPMethod_NoMethod$MSTAT_LAB &
df$AGE5YEAR_LAB == FPMethod_NoMethod$AGE5YEAR_LAB &
df$FPMETHOD == 998)] <- as.numeric(FPMethod_Allocate[,"FPMETHOD.x"])
}else{
# Allocate to df based on matching marital status and age group
## Order of allocation? Highest rate first? (Affect if df does not have same # of observations correspond to FPAny_NoMethod's marital status and age)
###Order by Highest Rate
# FPMethod_Allocate <- FPMethod_Allocate[with(FPMethod_Allocate,order(-Rate)),]
# rownames(FPMethod_Allocate) <- 1:nrow(FPMethod_Allocate)
for(s in 1:nrow(FPMethod_Allocate)){
if(length(which(df$MSTAT_LAB == FPMethod_Allocate[s,"MSTAT_LAB"] &
df$AGE5YEAR_LAB == FPMethod_Allocate[s,"AGE5YEAR_LAB"] &
df$FPMETHOD == 998)) < FPMethod_Allocate[s,"Rate"]){
#If there are less candidates from df compared to amount needed to be allocated
##Set all matching marital status and age observations to current loop's method
df[row.names(subset(df,df$MSTAT_LAB == FPMethod_Allocate[s,"MSTAT_LAB"] &
df$AGE5YEAR_LAB == FPMethod_Allocate[s,"AGE5YEAR_LAB"] &
df$FPMETHOD == 998)),"FPMETHOD"] <- FPMethod_Allocate[s,"FPMETHOD.x"]
}else{
#Random sample the number of allocation needed from df
##Set the random sample FPMETHOD to current loop's method
df[row.names(sample_n(subset(df,df$MSTAT_LAB == FPMethod_Allocate[s,"MSTAT_LAB"] &
df$AGE5YEAR_LAB == FPMethod_Allocate[s,"AGE5YEAR_LAB"] &
df$FPMETHOD == 998),as.numeric(FPMethod_Allocate[s,"Rate"]))),"FPMETHOD"] <- FPMethod_Allocate[s,"FPMETHOD.x"]
}
}
}
} #End of Allocation
#Remaining FPAny_NoMethod puts into Other -> In order to retain all the woman in the denominator
if(any(df$FPMETHOD == 998) & !is.na(any(df$FPMETHOD==998))){
df$FPMETHOD[which(df$FPMETHOD == 998)] <- 300
}
return (df)
} |
6f1ec35f25c33cb48a7b96a8888217a86684c7cc | 67a6a1ea9802abc4f9cb487a88df9714bbcc81d2 | /capstone milestone.R | 92a9179e940d2ca1c27c948615217ce060e9fe7a | [] | no_license | alexyom/Capstone-Project | bf763188ded30a6c732a30e4a183f5625941d104 | ab02fb1815c06ffce4bb6dcfcd1f7889b53193df | refs/heads/master | 2021-07-06T16:28:25.972040 | 2019-02-22T16:30:29 | 2019-02-22T16:30:29 | 141,309,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,266 | r | capstone milestone.R | library(dplyr)
library(tidyr)
library(ggplot2)
nba<-cbind(NBA_Team_Annual_Attendance,nba_wins_II)
nba<-nba[,c(1,2,3,12,5,6,4,7,8,9,10,11,12)]
View(nba_capstone)
write.csv(nba,"nba_capstone.csv")
str(nba_capstone)
str(nba)
View(NBA)
write.csv(nba,"NBA.csv")
nba<-nba_capstone %>%
rename(Year = `Starting Year`,
HomeAvgAttendance= `Home: Avg Attendance`)
ggplot(NBA,aes(x=Wins,y=HomeAvgAttendance,color=Team))+
geom_point()
cor(NBA$Wins,NBA$HomeAvgAttendance)
knicks<-NBA[NBA$Team %in% c("NY Knicks"),]
east<-NBA[NBA$Team %in% c("Bulls","Cavaliers","Raptors","NY Knicks","Heat",
"Celtics","Wizards","Magic","Hornets","Pacers",
"Hawks","Pistons","Bucks","Nets","76ers"),]
west<-NBA[NBA$Team %in% c("Mavericks","Warriors","Trail Blazers","Jazz",
"Clippers","Lakers","Spurs","Thunder","Rockets",
"Kings","Suns","Pelicans","Grizzlies","Timberwolves",
"Nuggets"),]
Year2015<-NBA[NBA$Year %in% c("2015"),]
ggplot(knicks,aes(x=Wins,y=HomeAvgAttendance,color=Year))+
geom_point()
ggplot(Year2015,aes(x=Wins,y=HomeAvgAttendance,color=Team))+
geom_point()
cor(Year2015$Wins,Year2015$HomeAvgAttendance) |
65737478d2cbcda679708f447cf03bdd4ba4840f | e449130c7f508441d7baa459bff749408a791baf | /scripts/2b_sentiment.R | 2afa59a1b6ae737ac5e736350d4bf5d6d1d6d030 | [] | no_license | ropxu/Thesis | 9c0eb6eacfc384d20d82fca898ba194b7e4624da | 9a60a5c0fef932d94c335da641f81b5116bef80b | refs/heads/master | 2023-08-14T03:59:36.308486 | 2021-09-21T17:07:42 | 2021-09-21T17:07:42 | 408,902,607 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,978 | r | 2b_sentiment.R | library(tidyverse)
library(lubridate)
library(tidytext)
library(SentimentAnalysis)
library(ggwordcloud)
library(readxl)
# ========================== News ==========================
# Read news data
news_df <- read_csv('../data/news_master_file.csv')
#news_df <- news_df %>% select(-co_cusip, -co_sic)
# Early closing days
early_closing <- read_excel('../data/original/early_closing.xlsx')
early_closing <- early_closing %>% mutate(date = as.Date(date))
news <- news_df %>%
mutate(hour = hour(versionCreated),
minute = minute(versionCreated)) %>%
select(versionCreated, hour, minute, everything())
holidays <- news %>% mutate(date = as.Date(versionCreated)) %>% inner_join(early_closing)
regular_days <- news %>% mutate(date = as.Date(versionCreated)) %>% anti_join(early_closing)
before_closing <- holidays %>%
filter(hour < 13) %>%
select(versionCreated, date, everything(), -hour, -minute)
after_closing <- holidays %>%
filter(hour >= 13) %>%
mutate(date = date + days(1)) %>%
select(versionCreated, date, everything(), -hour, -minute)
holidays <- before_closing %>% bind_rows(after_closing)
# ==== Include after hours ====
sp500 <- read_csv('../data/exchange_info.csv')
sp500 <- sp500 %>% mutate(date = ymd(date))
sp500 <- sp500 %>% select(date, CUSIP, EXCHCD, TICKER) %>%
rename(co_exc = EXCHCD,
co_cusip = CUSIP) %>%
group_by(co_cusip) %>% filter(date == max(date)) %>%
select(-date, -TICKER)
# CRSP cusip does not have check sum
news <- news %>%
mutate(co_cusip = str_sub(co_cusip, 0, -2))
news <- news %>% left_join(sp500)
" Exchange codes
1 = NYSE
3 = NASDAQ
5 = NASDAQ
"
# Split to nyse and nasdaq
nyse <- news %>% filter(co_exc == 1 | is.na(co_exc))
nasdaq <- news %>% filter(co_exc != 1)
# NYSE
# News that happend during trading hours
nyse_during_trading <- nyse %>%
filter(hour < 16) %>%
mutate(date = as.Date(versionCreated)) %>%
select(versionCreated, date, everything(), -hour, -minute)
# News that happend after 16:30
nyse_after_trading <- nyse %>%
filter(hour >= 16) %>%
mutate(date = as.Date(versionCreated + days(1))) %>%
select(versionCreated, date, everything(), -hour, -minute)
nyse_df <- nyse_during_trading %>% bind_rows(nyse_after_trading)
# NASDAQ
# News that happend during trading hours
nasdaq_during_trading <- nasdaq %>%
filter(hour < 20) %>%
mutate(date = as.Date(versionCreated)) %>%
select(versionCreated, date, everything(), -hour, -minute)
# News that happend after 20:00
nasdaq_after_trading <- nasdaq %>%
filter(hour >= 20) %>%
mutate(date = as.Date(versionCreated + days(1))) %>%
select(versionCreated, date, everything(), -hour, -minute)
nasdaq_df <- nasdaq_during_trading %>% bind_rows(nasdaq_after_trading)
df <- nyse_df %>% bind_rows(nasdaq_df)
# ==== Include only core trading hours ====
#
# # News that happend during trading hours
# during_trading <- regular_days %>%
# filter(hour < 16) %>%
# mutate(date = as.Date(versionCreated)) %>%
# select(versionCreated, date, everything(), -hour, -minute)
#
# # News that happend after 16:30
# after_trading <- regular_days %>%
# filter(hour >= 16) %>%
# mutate(date = as.Date(versionCreated + days(1))) %>%
# select(versionCreated, date, everything(), -hour, -minute)
#
# df <- during_trading %>% bind_rows(after_trading)
# df <- df %>% bind_rows(holidays)
# ==== Weekends ====
# weekends <- df %>% filter(weekdays(date) %in% c('Saturday', 'Sunday'))
# weekdays <- df %>% filter(!weekdays(date) %in% c('Saturday', 'Sunday'))
#
# weekends <- weekends %>%
# mutate(date = if_else(weekdays(date) == 'Saturday', date + days(2), date + days(1)))
#
# df <- weekends %>% bind_rows(weekdays)
# ==== Sentiment extraction ====
# Read sentiment wordlists Loughrain and Mcdonald
lm_positive <- tibble(DictionaryLM$positive) %>%
mutate(sentiment = 1, dict = 'pos') %>%
rename(word = `DictionaryLM$positive`)
lm_negative <- tibble(DictionaryLM$negative) %>%
mutate(sentiment = -1, dict = 'neg') %>%
rename(word = `DictionaryLM$negative`)
lm_uncertain <- tibble(DictionaryLM$uncertainty) %>%
mutate(sentiment = -1, dict = 'un') %>%
rename(word = `DictionaryLM$uncertainty`)
# Combine positive and uncertain words
lm_dictionary <- lm_positive %>% bind_rows(lm_negative) %>% bind_rows(lm_uncertain)
# Filter headlines containing the company name or ticker
df <- df %>%
mutate(co_conm = co_conm %>%
str_remove('CORP') %>% str_squish() %>%
str_remove('INC') %>% str_squish() %>%
str_remove('CO') %>% str_squish() %>%
str_remove('LTD') %>% str_squish()) %>%
filter(
text %>% str_detect(regex(co_conm, ignore_case = T)) |
text %>% str_detect(regex(co_tic, ignore_case = T)))
# Remove NYSE ORDER IMBALANCE notices, removes approx 45k rows
df <- df %>%filter(!text %>% str_detect(regex('NYSE ORDER IMBALANCE', ignore_case = T)))
# Convert to tidy text format
df <- df %>% unnest_tokens(word, text)
# Remove stop words
df <- df %>% anti_join(stop_words)
# Count the total words per headline
number_of_words <- df %>% group_by(date, ric, storyId) %>% tally() %>% rename(total_n = n)
# Join the total words to main_df
df <- df %>% inner_join(number_of_words)
# Join sentiment words
main_df <- df %>% inner_join(lm_dictionary)
# Calculate news specific sentiment for all words
news_level_sentiment_all <- main_df %>%
group_by(date, storyId, ric, total_n) %>%
summarise(sentiment = sum(sentiment)) %>% ungroup() %>%
mutate(dict = 'all')
# Calculate news specific sentiment for neg and pos words
news_level_sentiment_neg_pos <- main_df %>%
filter(dict %in% c('neg', 'pos')) %>%
group_by(date, storyId, ric, total_n) %>%
summarise(sentiment = sum(sentiment)) %>% ungroup() %>%
mutate(dict = 'neg_pos')
# Calculate news specific sentiment for only neg
news_level_sentiment_neg <- main_df %>%
filter(dict == 'neg') %>%
group_by(date, storyId, ric, total_n) %>%
summarise(sentiment = sum(sentiment)) %>% ungroup() %>%
mutate(dict = 'neg')
# Combine
news_level_sentiment <- news_level_sentiment_all %>%
bind_rows(news_level_sentiment_neg) %>%
bind_rows(news_level_sentiment_neg_pos)
# Calculate daily sentiment from news specific sentiments
daily_sentiment <- news_level_sentiment %>%
group_by(date, ric, dict) %>%
summarise(
sentiment = sum(sentiment) / sum(total_n))
# Calculate total words per day per ric
daily_words <- news_level_sentiment %>%
group_by(date, ric) %>%
summarise(total_words = sum(total_n))
daily_words <- news_level_sentiment %>%
select(date, storyId, ric, total_n) %>% distinct() %>%
group_by(date, ric) %>%
summarise(total_words = sum(total_n))
# Combine with daily words
daily_sentiment <- daily_sentiment %>% inner_join(daily_words)
# Transform sentiment column to multiple columns
daily_sentiment <- daily_sentiment %>%
spread(dict, sentiment) %>%
rename(
se_all = all,
se_neg = neg,
se_np = neg_pos
) %>%
arrange(ric, date)
" Fill missing days so that the data contains all days from the period
Also, days without no news are stored to column 'no_news', (some of these might public holidays)
TODO it has be checked that there is no dates from over or under the period "
daily_sentiment <- daily_sentiment %>%
group_by(ric) %>%
complete(date = seq.Date(min(date), max(date), by='day')) %>% ungroup()
" Remove weekends from the dataset, this will still leave public holidays, but they will
be filtered out since the price data does not contain them "
daily_sentiment <- daily_sentiment %>%
mutate(weekday = weekdays(date)) %>%
filter(!weekday %in% c('Saturday', 'Sunday')) %>%
select(date, ric, total_words, everything(), -weekday)
# Find all the 'sentiment' words
sentiment_words <- main_df %>%
group_by(date, ric) %>%
summarise(words = paste(word, collapse = ', ')) %>% arrange(ric, date)
# Join the words
daily_sentiment <- daily_sentiment %>% left_join(sentiment_words)
# Replace all NA's
daily_sentiment <- daily_sentiment %>% mutate_if(is.numeric, replace_na, 0)
# Write the final output
write_csv(daily_sentiment, '../data/daily_sentiment_file.csv')
# ==== Plotting ====
# All words
all_words <- df %>% group_by(word) %>% tally() %>% arrange(n %>% desc()) %>%
filter(str_detect(word, '[^0-9]'))
all_words <- all_words %>%
anti_join(news %>% select(co_conm) %>% distinct() %>% mutate(co_conm = str_to_lower(co_conm)) %>%
unnest_tokens(word, co_conm)) %>% head(50) %>% mutate(angle = if_else(rbinom(n(), 1, 0.2) == 0,0,90))
# All with sentiment
all_sentiment_words <- main_df %>% group_by(word) %>% tally() %>%
arrange(n %>% desc()) %>% head(50) %>% mutate(angle = if_else(rbinom(n(), 1, 0.2) == 0,0,90))
# Negative
negative_words <- main_df %>% filter(dict == 'neg') %>% group_by(word) %>% tally() %>%
arrange(n %>% desc()) %>% head(50) %>% mutate(angle = if_else(rbinom(n(), 1, 0.2) == 0,0,90))
# Positive
positive_words <- main_df %>% filter(dict == 'pos') %>% group_by(word) %>% tally() %>%
arrange(n %>% desc()) %>% head(50) %>% mutate(angle = if_else(rbinom(n(), 1, 0.2) == 0,0,90))
# # Uncertain
# uncertain_words <- main_df %>% filter(dict == 'un') %>% group_by(word) %>% tally() %>%
# arrange(n %>% desc()) %>% head(50)
# Plot metrics
plot_width <- 4
plot_height <- 3.5
# All words
ggplot(all_words, aes(label = word, size = n, angle = angle)) +
geom_text_wordcloud_area(shape = 'circle') +
theme_minimal() +
ggsave('../thesis/figures/all_words_wordcloud.pdf',
width = plot_width, height = plot_height, dpi = 'retina')
# All sentiment words
ggplot(all_sentiment_words, aes(label = word, size = n, angle = angle)) +
geom_text_wordcloud_area() +
scale_size_area() +
theme_minimal() +
ggsave('../thesis/figures/all_sentiment_wordcloud.pdf',
width = plot_width, height = plot_height, dpi = 'retina')
# Negative sentiment words
ggplot(negative_words, aes(label = word, size = n, angle = angle)) +
geom_text_wordcloud_area() +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5)) +
ggsave('../thesis/figures/negative_wordcloud.pdf',
width = plot_width, height = plot_height, dpi = 'retina')
# Positive sentiment words
ggplot(positive_words, aes(label = word, size = n, angle = angle)) +
geom_text_wordcloud_area() +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5)) +
ggsave('../thesis/figures/positive_wordcloud.pdf',
width = plot_width, height = plot_height, dpi = 'retina')
# # Uncertain sentiment words
# ggplot(uncertain_words, aes(label = word, size = n)) +
# geom_text_wordcloud() +
# theme_minimal() +
# theme(plot.title = element_text(hjust = 0.5)) +
# ggsave('../thesis/figures/uncertain_wordcloud.pdf',
# width = plot_width, height = plot_height, dpi = 'retina')
# Cropt all figures in figures folder
system("for f in ../thesis/figures/*
do
pdfcrop --margins 5 $f $f
done")
|
9bd4f4a9afd73b17cc4e47482c8edfedf661f4c4 | 6cfaf1d70ddc6da14a7a88535867f199f1075c1d | /code/outbreak_step3a_generate_onset.R | f3fd92214bac39d676204f1b77812147c1bea397 | [] | no_license | rachaelpung/cruise_networks | fc041971d21a2216893a6c1b939eabcc6dfc53d6 | 44d5b297cb9ca44cb2a14c337e8a62bd6a748279 | refs/heads/main | 2023-04-10T09:51:11.780869 | 2022-10-23T01:49:49 | 2022-10-23T01:49:49 | 418,541,935 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,412 | r | outbreak_step3a_generate_onset.R | source('github/code/cruise_load_library.R')
# determine probability of onset by day of event (day 1-7)
# http://weekly.chinacdc.cn/en/article/doi/10.46234/ccdcw2021.148
distIncub = data.table(DAY=1:14, PDF=dlnorm(1:14, meanlog = log(4), sdlog =sqrt(2*log(4.4/4))))
distIncub[,PDF:=PDF/sum(PDF)]
# assume that index cases could be infected up to 14 days before event
# determine probability symptoms onset by respective days before/during event
distOnset = data.table(DAY_INFECTION=-13:0,PROB_DAY_INFECTION=1/14)
distOnset = distOnset[rep(seq_len(distOnset[,.N]), each=14)]
distOnset[,DAY_ONSET:=rep(1:14, len=.N)]
distOnset[distIncub,INC_PDF:=i.PDF, on=c(DAY_ONSET='DAY')]
distOnset[,DAY_ONSET:=DAY_ONSET+DAY_INFECTION]
distOnset[,INC_PDF:=INC_PDF*PROB_DAY_INFECTION]
distOnset = distOnset[,sum(INC_PDF), by=.(DAY_ONSET)]
# determine probability of symptoms onset by respective days during event
# conditional on symptoms onset during event (persons who developed symptoms prior to event will be barred)
distOnset.7 = distOnset[DAY_ONSET %in% c(1:7)]
setnames(distOnset.7, old='V1', new='PDF')
distOnset.7[,PDF:=PDF/sum(PDF)]
distOnset.3 = distOnset[DAY_ONSET %in% c(1:3)]
setnames(distOnset.3, old='V1', new='PDF')
distOnset.3[,PDF:=PDF/sum(PDF)]
save(distOnset.3, file = 'github/data/onset/distOnset.3.RData')
save(distOnset.7, file = 'github/data/onset/distOnset.7.RData')
|
fce6b4baee03c377fa105956c6038d873d599a62 | 06888de22ecff4d48a621c778aa35b18a28c32f1 | /man/calculate_alignment_stats.Rd | 1bd7c50c1fc48908c8619333e498022f341277e7 | [
"MIT"
] | permissive | joelnitta/baitfindR | fcb9db8bc978a6004e988003a5b1fb4675f84162 | de73d9451115ad143da249e5ef2146bb929cd17b | refs/heads/master | 2021-07-20T14:16:15.081289 | 2020-04-29T20:04:47 | 2020-04-29T20:04:47 | 140,532,881 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,886 | rd | calculate_alignment_stats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_alignment_stats.R
\name{calculate_alignment_stats}
\alias{calculate_alignment_stats}
\title{Calculate summary statistics for an alignment.}
\usage{
calculate_alignment_stats(
alignment,
cutoff = 120,
cutoff_any = FALSE,
include_aln = FALSE
)
}
\arguments{
\item{alignment}{Input alignment; must be a matrix of class "DNAbin".}
\item{cutoff}{Numeric value indicating minimum exon length (optional);
flag this alignment if any/all exons are less than the cutoff length.}
\item{cutoff_any}{Logical; Should the alignment
be flagged if any exons are shorter than the cutoff? The default, FALSE,
means that the alignment will only be flagged if all exons are shorter
than the cutoff value.}
\item{include_aln}{Logical; Should the original alignment
be included in the output list?}
}
\value{
A list including the following summary statistics: \describe{
\item{intron_lengths}{List including vector of intron lengths}
\item{exon_lengths}{List including vector of exon lengths}
\item{num_introns}{Number of introns}
\item{num_exons}{Number of introns}
\item{mean_dist}{Mean genetic distance between sequences in alignment}
\item{max_dist}{Maximum genetic distance between sequences in alignment}
\item{GC_content}{Total \%GC content}
\item{pars_inf}{Fraction of sites that are parsimony informative}
\item{total_exon_length}{Total exon length}
\item{less_than_cutoff}{Logical flag indicating whether alignment passed
the minimum exon length cutoff or not}
\item{alignment}{The original input alignment}
}
}
\description{
Including the original alignment in the output with \code{include_aln}
can be useful for mapping \code{calculate_alignment_stats} over a list
of alignments with \code{\link[purrr]{map_df}} to sort and filter
alignments by their summary statistics.
}
|
030d4de41e056c14935bd297f276009251a0d653 | 597845777259112d3a912256466242500a6f7533 | /man/PETseasonality.Rd | 1ac3e3c60ce109d632a605bd4f7f2206f51c99b8 | [
"MIT"
] | permissive | ptitle/envirem | e27187556cd8fb3afac2f8fb0572c660c42a44b9 | 40cccd0485324605724284c6936206327a4f1002 | refs/heads/master | 2023-04-27T14:16:23.463364 | 2023-04-14T16:04:35 | 2023-04-14T16:04:35 | 61,913,099 | 12 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,447 | rd | PETseasonality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PETseasonality.R
\name{PETseasonality}
\alias{PETseasonality}
\title{PET seasonality}
\usage{
PETseasonality(PETstack)
}
\arguments{
\item{PETstack}{rasterStack of monthly PET rasters}
}
\value{
rasterLayer in mm / month
}
\description{
Seasonality of potential evapotranspiration
}
\details{
PET seasonality = 100 * standard deviation of monthly PET.
}
\examples{
\donttest{
# Find example rasters
rasterFiles <- list.files(system.file('extdata', package='envirem'), full.names=TRUE)
env <- stack(rasterFiles)
# identify the appropriate layers
meantemp <- grep('mean', names(env), value=TRUE)
solar <- grep('solrad', names(env), value=TRUE)
maxtemp <- grep('tmax', names(env), value=TRUE)
mintemp <- grep('tmin', names(env), value=TRUE)
# read them in as rasterStacks
meantemp <- stack(env[[meantemp]])
solar <- stack(env[[solar]])
maxtemp <- stack(env[[maxtemp]])
mintemp <- stack(env[[mintemp]])
tempRange <- abs(maxtemp - mintemp)
# get monthly PET
pet <- monthlyPET(meantemp, solar, tempRange)
PETseasonality(pet)
}
}
\references{
Metzger, M.J., Bunce, R.G.H., Jongman, R.H.G., Sayre, R., Trabucco, A. & Zomer, R. (2013).
A high-resolution bioclimate map of the world: a unifying framework for global
biodiversity research and monitoring. \emph{Global Ecology and Biogeography},
\strong{22}, 630-638.
}
\seealso{
\link{monthlyPET}
}
\author{
Pascal Title
}
|
94ea54a22f25d976568dfb40a38bb919533d1295 | 9d0dcacf6878a3451a01083ae0b87521e3bd436d | /fMRI/fx/motion/auto-motion-fmriprep/auto_motion_fmriprep_gen.R | 957d71872d0d148d785533165a195b6c4831bbaa | [] | no_license | UOSAN/DEV_scripts | 244a87fb3dfc230c3fad19b2e4db5a0a2fab1ef4 | 513121b4fb84aaed45099415d7f9cf5e876b5d57 | refs/heads/master | 2023-08-31T11:15:54.357529 | 2023-08-28T16:44:56 | 2023-08-28T16:44:56 | 144,056,482 | 0 | 2 | null | 2019-11-26T23:48:57 | 2018-08-08T19:04:35 | MATLAB | UTF-8 | R | false | false | 1,270 | r | auto_motion_fmriprep_gen.R | # This script loads the fmriprep confound files, applies a machine learning classifier to
# predict motion artifacts, and returns summaries by task, task and run, and trash volumes only.
# It will also export new rp_txt files if writeRP = TRUE and plots if writePlots = TRUE.
# Inputs:
# * config.R = configuration file with user defined variables and paths
# Outputs:
# * study_summaryRun.csv = CSV file with summary by task and run
# * study_summaryTask.csv = CSV file with summary by task only
# * study_trashVols.csv = CSV file with trash volumes only
# * if writeRP = TRUE, rp_txt files will be written to rpDir
# * if writePlots = TRUE, plots for each subjects will be written to plotDir
#
motion_get_packages()
#------------------------------------------------------
# source the config file
#------------------------------------------------------
cat("loading config...")
source('config.R')
cat("config loaded.\n")
#------------------------------------------------------
# load confound files
#------------------------------------------------------
source("auto_motion_fmriprep_files.R")
dataset <- motion_prep_load()
motion_classify_summarize_write(dataset)
if (file.exists(state_filename)) {
#Delete file if it exists
file.remove(state_filename)
}
|
eac34a66aceab72667cdb03fac81f69d2859c606 | 98d2dea2ac67ba55374046080bc42bafcd2181cd | /packages_and_functions/ez_deepcell_lib.R | 78481cb37b5d390d51145f5d40866be6552fb10f | [] | no_license | bryjcannon/ez_segmenter_analysis | 8cc645d5f13439947b5954941870b44b878b0af3 | 4aea8fd5021e77b8cc19f506b29c5d72831e2c8c | refs/heads/master | 2020-11-25T02:13:50.803349 | 2019-12-17T21:27:49 | 2019-12-17T21:27:49 | 228,447,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,135 | r | ez_deepcell_lib.R | install_ez_packages <- function(answer) {
if (answer == T | TRUE) {
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install(pkgs = c("R.matlab", "digest", "rlang", "flowCore", "ks", "flowVS", "flowViz", "RColorBrewer", "gtools", "gplots",
"ggplot2", "openxlsx", "samr", "lattice", "flowStats", "gdata", "Rtsne", "umap",
"FlowSOM", "dplyr", "plyr", "pryr", "doBy", "scales", "mixOmics", "reshape2",
"plotly", "Rmisc", "Hmisc", "EBImage", "magick", "phonTools"))
}
print("Done")
}
load_ez_packages <- function (answer) {
if (answer == T | TRUE) {
library("R.matlab")
library("digest")
library("rlang")
library("flowCore")
library("ks")
library("flowVS") #
library("flowViz")
library("RColorBrewer")
library("gtools")
library("gplots")
library("ggplot2")
library("openxlsx") #
library("samr") #
library("lattice")
library("flowStats") #
library("gdata")
library("Rtsne")
library("umap")
library("FlowSOM") #
library("dplyr")
library('plyr')
library("pryr")
library("doBy") #
library("scales")
library("mixOmics") #
library("reshape2")
library("plotly") #
library("Rmisc")
library("Hmisc") #
library("EBImage")
library("magick")
library("phonTools")
#https://support.bioconductor.org/p/109128/ --> explains why use Biobase::exprs
exprs = Biobase::exprs
# color palette aken fro stackOverflow <- https://stackoverflow.com/questions/9563711/r-color-palettes-for-many-data-classes
c25 <- c(
"dodgerblue2", "#E31A1C", # red
"green4",
"#6A3D9A", # purple
"#FF7F00", # orange
"gold1",
"skyblue2", "#FB9A99", # lt pink
"palegreen2",
"#CAB2D6", # lt purple
"#FDBF6F", # lt orange
"gray70", "khaki2",
"maroon", "orchid1", "deeppink1", "blue1", "steelblue4",
"darkturquoise", "green1", "yellow4", "yellow3",
"darkorange4", "brown", "black")
}
print("Done")
} |
9727e79225efb2f910318a4205f4e89113fdb413 | 0c6ab6d7c471d79391192b3fbca24d959672030e | /R/10-data-integration.R | ac8e45fc32be290e63c7690f8ff907c9f4926f63 | [] | no_license | lcolladotor/osca_playground_leo | 5a517e7ddbe2d6b01a1ad6692957f6a0852c9dd1 | b8dd1a75857c54fa4d0809ed9dc776b9154c4c39 | refs/heads/master | 2021-04-16T05:02:52.500796 | 2020-03-27T16:41:21 | 2020-03-27T16:41:21 | 249,329,224 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 190 | r | 10-data-integration.R | # Notes for 10-data-integration.R
# --------------------------------------
## Copy code from https://github.com/lcolladotor/osca_LIIGH_UNAM_2020/blob/master/10-data-integration.R
## Notes
|
bc6afc6881fbec09f89453435315bd1f4499c3a1 | 1d60d9c02a0a1eb49b7324f4aa287afbe73e4a06 | /rprogramming/assignment1/pollutantmean.R | 0d0c92d05318b8be744338b4ac4cc5757ad48916 | [] | no_license | scottessner/datasciencecoursera | f069a7b4cc4d5312867df24ce261565af98b29d0 | b420aef3d939e5ebae7b8fc56547fe1294d640c5 | refs/heads/master | 2021-01-25T07:34:13.205258 | 2014-09-16T05:31:39 | 2014-09-16T05:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id = 1:332) {
values <- numeric()
#iterate throught the files g
for(monitor in id){
#clean up id and directory and create path
path <- paste(directory, "/", sprintf("%03d", monitor), ".csv", sep = "");
#Open the file
currentFile <- read.csv(path)
#Append the values from this file into the stored variable
values <- c(values, t(currentFile[pollutant]))
}
#Compute the mean without the NA values
mean(values, na.rm = TRUE)
} |
c79bf8b9d307cb7efbad10fe147c158ab721ccc6 | 2df21dc9a08abb1430b467d34ba2f3416909e339 | /man/dcardBoardContent.Rd | 8752a6dd2a947db120eb004bdc5c875afd6acddc | [] | no_license | b05102139/DcardR | 9af1e06079341419ae08fec7c4c6281eadd073d9 | 80a2d97b72b4f4150751d975188eea46d313ef87 | refs/heads/master | 2022-11-22T00:16:28.787712 | 2020-07-19T11:29:12 | 2020-07-19T11:29:12 | 280,819,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 513 | rd | dcardBoardContent.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dcardmaincontent.R
\name{dcardBoardContent}
\alias{dcardBoardContent}
\title{DCard Board Content}
\usage{
dcardBoardContent(board, posts, by_popular = F, rate_limit = 1)
}
\arguments{
\item{rate_limit}{}
}
\value{
}
\description{
Returns general post info in a specific board, where the post number and whether or not to sort by popular can be specified. The rate limit can also be altered, at the risk of being blocked by the API.
}
|
8e2dade5ed460b72c42ab7a28d08fda856e21c3d | 253d4a133a8a6e568f30523447358689d182f473 | /R/tidyverse.R | 4954fec5c91d12b4edc294a9826431c19981684a | [
"Apache-2.0"
] | permissive | r-spatial/stars | 081a7fc96089aeb276051107911145d17365de79 | 0e17daf1c49b69f990ec77275e80cfd471210aee | refs/heads/main | 2023-09-04T10:58:52.888008 | 2023-08-19T09:15:37 | 2023-08-19T09:15:37 | 78,360,080 | 489 | 126 | Apache-2.0 | 2023-03-20T09:17:18 | 2017-01-08T17:48:24 | R | UTF-8 | R | false | false | 16,027 | r | tidyverse.R | # convert arrays to data.frame, in long form
to_df = function(x) {
as.data.frame(lapply(x, function(y) structure(y, dim = NULL)), stringsAsFactors = FALSE)
}
set_dim = function(x, d) {
lapply(x, function(y, dims) structure(y, dim = dims), dims = d)
}
get_dims = function(d_cube, d_stars) {
xy = attr(d_stars, "raster")$dimensions
d_stars = d_stars[names(d_cube)]
for (i in seq_along(d_cube)) {
d_stars[[i]]$values = if (inherits(d_stars[[i]]$values, "intervals")) {
v = d_stars[[i]]$values
d_stars[[i]]$values = v[ na.omit(find_interval(d_cube[[i]], v)) ]
} else if (is.list(d_stars[[i]]$values)) {
d_stars[[i]]$values[ d_cube[[i]] ]
} else
d_cube[[i]]
d_stars[[i]] = create_dimension(values = d_stars[[i]]$values, point = d_stars[[i]]$point,
refsys = d_stars[[i]]$refsys, is_raster = names(d_stars)[i] %in% xy)
}
d_stars
}
#' dplyr verbs for stars objects
#'
#' dplyr verbs for stars objects; package dplyr needs to be loaded before these methods can be used for stars objects.
#' @param .data object of class \code{stars}
#' @param ... see \link[dplyr]{filter}
#' @name dplyr
filter.stars <- function(.data, ...) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("package dplyr required, please install it first") # nocov
if (!requireNamespace("cubelyr", quietly = TRUE))
stop("package cubelyr required, please install it first") # nocov
cb = cubelyr::as.tbl_cube(.data)
cb = dplyr::filter(cb, ...)
st_as_stars(cb$mets, dimensions = get_dims(cb$dims, st_dimensions(.data)))
}
#' @name dplyr
filter.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "filter", ".data", env = environment())
}
#' @name dplyr
mutate.stars <- function(.data, ...) {
ret = dplyr::mutate(to_df(.data), ...)
st_as_stars(set_dim(ret, dim(.data)), dimensions = st_dimensions(.data))
}
#' @name dplyr
mutate.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "mutate", ".data", env = parent.frame())
}
#' @name dplyr
transmute.stars <- function(.data, ...) {
ret = dplyr::transmute(to_df(.data), ...)
st_as_stars(set_dim(ret, dim(.data)), dimensions = st_dimensions(.data))
}
#' @name dplyr
transmute.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "transmute", ".data", env = environment())
}
#' @name dplyr
select.stars <- function(.data, ...) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("package dplyr required, please install it first") # nocov
ret <- dplyr::select(to_df(.data), ...)
st_as_stars(set_dim(ret, dim(.data)), dimensions = st_dimensions(.data))
}
#' @name dplyr
select.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "select", ".data", env = environment())
}
#' @name dplyr
rename.stars <- function(.data, ...) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("package dplyr required, please install it first") # nocov
ret <- dplyr::rename(to_df(.data), ...)
st_as_stars(set_dim(ret, dim(.data)), dimensions = st_dimensions(.data))
}
#' @name dplyr
rename.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "rename", ".data", env = environment())
}
#' @param var see \link[dplyr]{pull}
#' @name dplyr
pull.stars = function (.data, var = -1) {
if (!requireNamespace("dplyr", quietly = TRUE))
stop("package dplyr required, please install it first") # nocov
if (!requireNamespace("rlang", quietly = TRUE))
stop("package rlang required, please install it first") # nocov
var = rlang::enquo(var)
structure(dplyr::pull(to_df(.data), !!var), dim = dim(.data))
}
#' @name dplyr
pull.stars_proxy = function(.data, ...) {
collect(.data, match.call(), "pull", ".data", env = environment())
}
#' @name dplyr
#' @param x object of class \code{stars}
#' @export
as.tbl_cube.stars = function(x, ...) {
if (!requireNamespace("cubelyr", quietly = TRUE))
stop("package cubelyr required, please install it first") # nocov
cleanup = function(y) {
if (is.list(y))
seq_along(y)
else
y
}
dims = lapply(expand_dimensions(x), cleanup)
cubelyr::tbl_cube(dims, c(unclass(x)))
}
#' @name dplyr
#' @param along name or index of dimension to which the slice should be applied
#' @param index integer value(s) for this index
#' @param drop logical; drop dimensions that only have a single index?
#' @examples
#' tif = system.file("tif/L7_ETMs.tif", package = "stars")
#' x1 = read_stars(tif)
#' if (require(dplyr, quietly = TRUE)) {
#' x1 %>% slice("band", 2:3)
#' x1 %>% slice("x", 50:100)
#' }
slice.stars <- function(.data, along, index, ..., drop = length(index) == 1) {
#stopifnot(length(index) == 1)
if (!requireNamespace("rlang", quietly = TRUE))
stop("package rlang required, please install it first") # nocov
nd <- length(dim(.data))
indices <- rep(list(rlang::missing_arg()), nd + 1)
along = rlang::expr_text(rlang::ensym(along))
ix = which(along == names(st_dimensions(.data)))[1]
indices[[ix + 1]] <- index
indices[["drop"]] <- drop
eval(rlang::expr(.data[!!!indices]))
}
#' @name dplyr
slice.stars_proxy <- function(.data, along, index, ...) {
# TODO: add adrop argument, this requires an eager implementation of
# adrop.stars_proxy
# If there are already operations queued, just add to the queue
if (!is.null(attr(.data, "call_list")))
return(collect(.data, match.call(), "slice", ".data",
env = parent.frame(), ...))
# figure out which dimensions are part of the files
vecsize <- rev(cumprod(rev(dim(.data))))
# NOTE: The first set of dimensions corresponds to the dimensions in the
# files. The second set of dimensions corresponds to the list of files. It may
# be undecided where exactly the break is (at least without reading in the
# files) if there are a singleton dimensions, I am not sure if this matters,
# for now just assume the maximum index, this should be the safe choice.
# Singleton dimensions that are part of files probably need some logic
# somewhere else and cannot just be ignored.
# Can we assume, that all elements of .data are the same?
first_concat_dim <- max(which(vecsize == length(.data[[1]])))
stopifnot(first_concat_dim > 0)
all_dims <- st_dimensions(.data)
file_dims <- all_dims[seq_len(first_concat_dim - 1)]
concat_dims <- all_dims[first_concat_dim:length(dim(.data))]
d_concat_dims <- dim(concat_dims)
l_concat_vec <- prod(d_concat_dims)
# what is the dimension we have to subset
ix <- which(names(all_dims) == along) - length(file_dims)
stopifnot(length(ix) == 1)
# if the slice is on file dimensions we have to queue the operation
if (ix <= 0)
return(collect(.data, match.call(), "slice", ".data",
env = parent.frame(), ...))
# subset indices for the files, it may be faster to calculate these and not
# take them from an array.
d <- array(seq_len(l_concat_vec), d_concat_dims)
idx <- rep(list(quote(expr = )), length(d_concat_dims))
idx[[ix]] <- index
vidx <- as.vector(do.call(`[`, c(list(d), idx)))
# The actual subsetting of files and dimensions
file_list_new <- lapply(.data, function(x) x[vidx])
all_dims[[along]] <- all_dims[[along]][index]
# construct stars_proxy
st_stars_proxy(as.list(file_list_new), all_dims,
NA_value = attr(.data, "NA_value"),
resolutions = attr(.data, "resolutions"),
RasterIO = attr(.data, "RasterIO"))
}
#' @name st_coordinates
#' @param .x object to be converted to a tibble
as_tibble.stars = function(.x, ..., add_max = FALSE, center = NA) {
if (!requireNamespace("tibble", quietly = TRUE))
stop("package tibble required, please install it first") # nocov
tibble::as_tibble(append(
st_coordinates(.x, add_max = add_max, center = center),
lapply(.x, function(y) structure(y, dim = NULL))
)
)
}
#' @name dplyr
#' @param data data set to work on
#' @param replace see \link[tidyr]{replace_na}: list with variable=value pairs, where value is the replacement value for NA's
replace_na.stars = function(data, replace, ...) {
if (!requireNamespace("tidyr", quietly = TRUE))
stop("package tidyr required, please install it first") # nocov
if (!requireNamespace("cubelyr", quietly = TRUE))
stop("package cubelyr required, please install it first") # nocov
cb = cubelyr::as.tbl_cube(data)
d = dim(cb$mets[[1]])
cb$mets = as.data.frame(lapply(cb$mets, as.vector))
cb$mets = unclass(tidyr::replace_na(cb$mets, replace, ...))
for (i in seq_along(cb$mets))
cb$mets[[i]] = structure(cb$mets[[i]], dim = d)
st_as_stars(cb$mets, dimensions = get_dims(cb$dims, st_dimensions(data)))
}
#' @name dplyr
replace_na.stars_proxy = function(data, ...) {
collect(data, match.call(), "replace_na", "data", env = environment())
}
#' ggplot geom for stars objects
#'
#' ggplot geom for stars objects
#' @name geom_stars
#' @param mapping see \link[ggplot2:geom_tile]{geom_raster}
#' @param data see \link[ggplot2:geom_tile]{geom_raster}
#' @param ... see \link[ggplot2:geom_tile]{geom_raster}
#' @param downsample downsampling rate: e.g. 3 keeps rows and cols 1, 4, 7, 10 etc.; a value of 0 does not downsample; can be specified for each dimension, e.g. \code{c(5,5,0)} to downsample the first two dimensions but not the third.
#' @param sf logical; if \code{TRUE} rasters will be converted to polygons and plotted using \link[ggplot2:ggsf]{geom_sf}.
#' @param na.action function; if \code{NA} values need to be removed before plotting use the value \code{na.omit} here (only applies to objects with raster dimensions)
#' @details \code{geom_stars} returns (a call to) either \link[ggplot2:geom_tile]{geom_raster}, \link[ggplot2]{geom_tile}, or \link[ggplot2:ggsf]{geom_sf}, depending on the raster or vector geometry; for the first to, an \link[ggplot2]{aes} call is constructed with the raster dimension names and the first array as fill variable. Further calls to \link[ggplot2:coord_fixed]{coord_equal} and \link[ggplot2]{facet_wrap} are needed to control aspect ratio and the layers to be plotted; see examples. If a \code{stars} array contains hex color values, and no \code{fill} parameter is given, the color values are used as fill color; see the example below.
#'
#' If visual artefacts occur (Moiré-Effekt), then see the details section of \link{plot.stars}
#' @export
#' @examples
#' system.file("tif/L7_ETMs.tif", package = "stars") %>% read_stars() -> x
#' if (require(ggplot2, quietly = TRUE)) {
#' ggplot() + geom_stars(data = x) +
#' coord_equal() +
#' facet_wrap(~band) +
#' theme_void() +
#' scale_x_discrete(expand=c(0,0))+
#' scale_y_discrete(expand=c(0,0))
#' # plot rgb composite:
#' st_as_stars(L7_ETMs)[,,,1:3] |> st_rgb() -> x # x contains colors as pixel values
#' ggplot() + geom_stars(data = x)
#' }
geom_stars = function(mapping = NULL, data = NULL, ..., downsample = 0, sf = FALSE,
na.action = na.pass) {
if (!requireNamespace("ggplot2", quietly = TRUE))
stop("package ggplot2 required, please install it first") # nocov
if (!requireNamespace("tibble", quietly = TRUE))
stop("package tibble required, please install it first") # nocov
if (is.null(data)) stop("argument data should be a stars or stars_proxy object")
for (i in seq_along(data)) {
if (inherits(data[[i]], "units"))
data[[i]] = units::drop_units(data[[i]])
}
if (inherits(data, "stars_proxy"))
data = st_as_stars(data, downsample = downsample) # fetches data
else if (any(downsample > 0))
data = st_downsample(data, downsample)
all_colors = function (x) {
is.character(x) && all(nchar(x) %in% c(7, 9) & substr(x, 1, 1) == "#", na.rm = TRUE)
}
if (is.null(list(...)$fill) && all_colors(fill <- as.vector(data[[1]])))
return(geom_stars(mapping = mapping, data = data, sf = sf, na.action = na.action,
..., fill = fill)) # RETURNS/recurses
if (is_curvilinear(data) || sf)
data = st_xy2sfc(data, as_points = FALSE) # removes NA's by default
d = st_dimensions(data)
if (has_raster(d) && (is_regular_grid(d) || is_rectilinear(d))) {
xy = attr(d, "raster")$dimensions
if (is_regular_grid(d)) {
mapping = if (is.null(mapping))
ggplot2::aes(x = !!rlang::sym(xy[1]), y = !!rlang::sym(xy[2]),
fill = !!rlang::sym(names(data)[1]))
else
modifyList( ggplot2::aes(x = !!rlang::sym(xy[1]), y = !!rlang::sym(xy[2]),
fill = !!rlang::sym(names(data)[1])), mapping)
data = na.action(tibble::as_tibble(data))
ggplot2::geom_raster(mapping = mapping, data = data, ...)
} else { # rectilinear: use geom_rect, passing on cell boundaries
xy_max = paste0(xy, "_max")
mapping = if (is.null(mapping))
ggplot2::aes(xmin = !!rlang::sym(xy[1]), ymin = !!rlang::sym(xy[2]),
xmax = !!rlang::sym(xy_max[1]), ymax = !!rlang::sym(xy_max[2]),
fill = !!rlang::sym(names(data)[1]))
else
modifyList(ggplot2::aes(xmin = !!rlang::sym(xy[1]), ymin = !!rlang::sym(xy[2]),
xmax = !!rlang::sym(xy_max[1]), ymax = !!rlang::sym(xy_max[2]),
fill = !!rlang::sym(names(data)[1])), mapping)
data = na.action(tibble::as_tibble(data, add_max = TRUE))
ggplot2::geom_rect(mapping = mapping, data = data, ...)
}
} else if (has_sfc(d)) {
if (is.null(mapping)) {
mapping = ggplot2::aes(fill = !!rlang::sym(names(data)[1])) } else {
mapping = modifyList( ggplot2::aes(fill = !!rlang::sym(names(data)[1])), mapping) }
ggplot2::geom_sf(data = st_as_sf(data, long = TRUE), color = NA, mapping = mapping, ...)
} else
stop("geom_stars only works for objects with raster or vector geometries")
}
#' @name geom_stars
theme_stars = function(...) {
if (!requireNamespace("ggplot2", quietly = TRUE))
stop("package ggplot2 required, please install it first") # nocov
# coord_equal() +
# scale_fill_viridis() +
# scale_x_discrete(expand=c(0,0)) +
# scale_y_discrete(expand=c(0,0)) +
ggplot2::theme_void()
}
register_all_s3_methods = function() {
register_s3_method("cubelyr", "as.tbl_cube", "stars") # nocov start
register_s3_method("dplyr", "filter", "stars")
register_s3_method("dplyr", "filter", "stars_proxy")
register_s3_method("dplyr", "select", "stars")
register_s3_method("dplyr", "select", "stars_proxy")
register_s3_method("dplyr", "mutate", "stars")
register_s3_method("dplyr", "mutate", "stars_proxy")
register_s3_method("dplyr", "pull", "stars")
register_s3_method("dplyr", "pull", "stars_proxy")
register_s3_method("dplyr", "rename", "stars")
register_s3_method("dplyr", "rename", "stars_proxy")
register_s3_method("dplyr", "slice", "stars")
register_s3_method("dplyr", "slice", "stars_proxy")
register_s3_method("dplyr", "transmute", "stars")
register_s3_method("dplyr", "transmute", "stars_proxy")
register_s3_method("tidyr", "replace_na", "stars")
register_s3_method("tidyr", "replace_na", "stars_proxy")
register_s3_method("lwgeom", "st_transform_proj", "stars")
register_s3_method("sf", "st_join", "stars")
register_s3_method("spatstat.geom", "as.owin", "stars")
register_s3_method("spatstat.geom", "as.im", "stars")
register_s3_method("tibble", "as_tibble", "stars")
register_s3_method("xts", "as.xts", "stars") # nocov end
}
# from: https://github.com/tidyverse/hms/blob/master/R/zzz.R
# Thu Apr 19 10:53:24 CEST 2018
#nocov start
register_s3_method <- function(pkg, generic, class, fun = NULL) {
stopifnot(is.character(pkg), length(pkg) == 1)
stopifnot(is.character(generic), length(generic) == 1)
stopifnot(is.character(class), length(class) == 1)
if (is.null(fun)) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
} else {
stopifnot(is.function(fun))
}
if (pkg %in% loadedNamespaces()) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(pkg, "onLoad"),
function(...) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
)
}
#nocov end
|
1ebb402b8113eec01ab6685b8bb3a717b45f424d | 3c72657dcf1adbd6c2f7a044bf42d789c94ee5d3 | /OR.R | debf5f6167afbc2e0e98d80e493a745e056f9310 | [
"MIT"
] | permissive | geoffrosen/simple-v-microbiome | 6a2c8071f64b1d247ca983e51570e9dda3ca4f29 | cacd0cda1bc4422d5f1c034a3a13cf4a1bc48fdc | refs/heads/master | 2021-01-11T10:47:33.779104 | 2015-08-13T18:17:33 | 2015-08-13T18:17:33 | 38,334,275 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,401 | r | OR.R | odds.ratio <- function( two_by_two ) {
this <- environment()
this$table <- two_by_two
this$fisher <- fisher.test(two_by_two)
a <- two_by_two[1,1]
b <- two_by_two[1,2]
c <- two_by_two[2,1]
d <- two_by_two[2,2]
this$Sensitivity <- (a)/(a + c)
this$Specificity <- (d)/(d + b)
this$OR <- (a * d)/(b * c)
siglog <- sqrt( ( 1/a ) + ( 1/b ) + ( 1/c ) + ( 1/d ) )
zalph <- qnorm( 0.975 )
logOR <- log( OR )
loglo <- logOR - zalph * siglog
loghi <- logOR + zalph * siglog
this$OR_low <- exp( loglo )
this$OR_high <- exp( loghi )
df.two_by_two <- as.data.frame( two_by_two )
exposure.pos <- paste( colnames( df.two_by_two )[1], as.character( df.two_by_two[1,1] ) )
exposure.neg <- paste( colnames( df.two_by_two )[1], as.character( df.two_by_two[2,1] ) )
outcome <- paste( colnames( df.two_by_two )[2], as.character( df.two_by_two[1,2] ) )
stmt1 <- 'The odds of'
stmt2 <- outcome
stmt3 <- 'is'
stmt4 <- paste(this$OR, '(', this$OR_low, '-', this$OR_high, ')')
stmt5 <- 'given'
stmt6 <- exposure.pos
stmt7 <- 'compared to'
stmt8 <- exposure.neg
stmt8.5 <- paste('(Fisher\'s test p-value:', this$fisher$p.value, ')')
stmt9 <- '. the sensitivity and specificity are'
stmt10 <- this$Sensitivity
stmt10.5 <- 'and'
stmt11 <- this$Specificity
stmt12 <- 'respectively for'
stmt13 <- exposure.pos
stmt14 <- 'on'
stmt15 <- outcome
this$Statement <- paste( stmt1, stmt2, stmt3, stmt4, stmt5, stmt6, stmt7, stmt8, stmt8.5, stmt9, stmt10, stmt10.5, stmt11, stmt12, stmt13, stmt14, stmt15)
return(this)
}
runner <- function(data, possible_vars, constant_var) {
for (i in attributes(d.split)$names) {
for (poss_var in possible_vars) {
tab <- table(data[[i]][,poss_var],data[[i]][,constant_var])
ft <- fisher.test(tab)
o <- odds.ratio(tab)
writeLines(c('Table for',i,'Looking at', poss_var))
print(tab)
writeLines(c('Fisher test p-value:',ft$p.value))
writeLines(c('Odds ratio:', o$this$Statement))
}
}
}
guess_status <- function(guess_col, cutoff = 0.0000001) {
ver <- as.numeric(as.character(guess_col))
ver[which(ver > cutoff)] <- 1
ver[which(ver <= cutoff)] <- 0
ver <- as.character(ver)
ver[which(ver == "1")] <- "pos"
ver[which(ver == "0")] <- "neg"
return(as.factor(ver))
}
double_positive <- function(col1, col2) {
t <- cbind.data.frame(col1, col2)
ot <- rep("neg",length(col1))
ot[which(t[,1] == "pos" & t[,2] == "pos")] <- "pos"
return(as.factor(ot))
}
pick_dominant_add_total <- function(df, cutoff = 0.5) {
for (i in 1:length(colnames(df))) {
df[,i] <- as.numeric(as.character(df[,i]))
}
row.totals <- rowSums(df)
max.abund <- c()
max.abund.name <- c()
doms <- c()
for (i in 1:length(df[,1])) {
thisrow <- df[i,]
thismax <- max(thisrow)
thismaxname <- colnames(thisrow[which(thisrow == thismax)])[1]
max.abund <- c(max.abund, thismax)
max.abund.name <- c(max.abund.name, thismaxname)
thisprop <- thismax/row.totals[i]
if (thisprop >= cutoff) {
doms <- c(doms, thismaxname)
}
else {
doms <- c(doms, 'None')
}
}
df$total_seqs <- row.totals
df$max_abund <- max.abund
df$max_abund_name <- max.abund.name
df$dominant <- as.factor(doms)
return(df)
} |
ad2b5a4ea1d9b7533b8999c49c9a727fdcdf15e2 | 264424e51a7c4684cea89a266946c60ec419413b | /man/is.mlnet.Rd | d6e9048be47e8fd6a6bea0f1f4c8e2692595bc39 | [] | no_license | cran/mlergm | fe5e740abf775ee12d43c5a47084e4e94a3a75ac | 467a7a70ddbf5587eb6712f03e3f5359d43c8deb | refs/heads/master | 2021-08-31T20:08:22.327934 | 2021-08-23T15:00:02 | 2021-08-23T15:00:02 | 160,185,882 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 495 | rd | is.mlnet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.mlnet.R
\name{is.mlnet}
\alias{is.mlnet}
\title{Check if object is of class \code{mlnet}}
\usage{
is.mlnet(x)
}
\arguments{
\item{x}{An object to be checked.}
}
\value{
\code{TRUE} if the provided object \code{x} is of class \code{mlnet}, \code{FALSE} otherwise.
}
\description{
Function checks if a provided object is of class \code{mlnet} (see \code{\link{mlnet}} for details).
}
\seealso{
\code{\link{mlnet}}
}
|
82ed04466992ae281918f8f1743564a3f6ea4b07 | a8097ad611f67462ea0bb782087fee9daa4bc8ff | /projects/hematodinium_analysis/scripts/16_running_GO-MWU/16_running_GO-MWU.R | 043d746c36f615868fc97a2e64c558e34623dcc4 | [] | no_license | fish546-2021/aidan-hematodinium | ea0e710f44fada502142c19c10cad70194d307fb | 756a1e67179da4a6459cbd40d23dd51bfcccb7d3 | refs/heads/main | 2023-03-23T21:02:41.575774 | 2021-03-17T03:07:04 | 2021-03-17T03:07:04 | 328,749,147 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 34,266 | r | 16_running_GO-MWU.R | #############################
# Aidan Coyle, afcoyle@uw.edu
# Roberts lab, UW-SAFS
# 2021-02-01
#############################
# This file runs GO-MWU. Commands copied from the GO-MWU.R file
# in the GO-MWU Github repository, available at https://github.com/z0on/GO_MWU
# We will run GO-MWU twice - once for each comparison
# After each analysis, move the output files from the 16_running_GO-MWU directory into an output folder.
# I used /output/GO-MWU_output/[analysis name]
library(ape)
# Need to be in same directory as all other GO-MWU files -
# both data files and analysis files
setwd("16_running_GO-MWU")
#### GO-MWU Run 1: Elevated Day 2 vs. Ambient Days 0+2, Individual Libraries Only --------------------------
# Edit these to match your data file names:
input="cbaihemat2.0_elev2_vs_amb02_indiv_only_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="cbaihemat2.0_elev2_vs_amb02_indiv_only_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 24 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.1, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.05, # FDR cutoff to print in regular (not italic) font.
level3=0.01, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 2: Elevated Day 2 vs. Ambient Day 0+2+17 + Elevated Day 0 + Lowered Day 0 --------------------------
# Edit these to match your data file names:
input="amb0217_elev0_low0_vs_elev2_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="amb0217_elev0_low0_vs_elev2_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 2 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.1, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.05, # FDR cutoff to print in regular (not italic) font.
level3=0.01, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# Only 2 categories represented, and both have p-values above 0.05 (0.0848 for both).
# Stopping analysis here
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 3: Elevated Day 0 vs. Elevated Day 2, Indiv. Libraries Only ---------------------------------------
# Edit these to match your data file names:
input="elev0_vs_elev2_indiv_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="elev0_vs_elev2_indiv_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 57 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.1, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.05, # FDR cutoff to print in regular (not italic) font.
level3=0.01, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 4: Ambient Day 0 vs. Ambient Day 2, Individual Libraries Only --------------------------
# Edit these to match your data file names:
input="amb0_vs_amb2_indiv_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="amb0_vs_amb2_indiv_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 3 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.1, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.05, # FDR cutoff to print in regular (not italic) font.
level3=0.01, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 5: Elevated Day 0 vs. Elevated Day 17, Individual Libraries Only --------------------------
# Edit these to match your data file names:
input="amb0_vs_amb17_indiv_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="amb0_vs_amb17_indiv_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 144 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.01, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.005, # FDR cutoff to print in regular (not italic) font.
level3=0.0001, # FDR cutoff to print in large bold font.
txtsize=0.9, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 6: Ambient Day 2 vs. Ambient Day 17, Individual Libraries Only --------------------------
# Edit these to match your data file names:
input="amb2_vs_amb17_indiv_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="amb2_vs_amb17_indiv_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 150 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.01, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.005, # FDR cutoff to print in regular (not italic) font.
level3=0.001, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
#### GO-MWU Run 7: Ambient Day 2 vs. Elevated Day 2, Individual Libraries Only --------------------------
# Edit these to match your data file names:
input="amb2_vs_elev2_indiv_pvals.csv" # two columns of comma-separated values: gene id, continuous measure of significance. To perform standard GO enrichment analysis based on Fisher's exact test, use binary measure (0 or 1, i.e., either sgnificant or not).
goAnnotations="amb2_vs_elev2_indiv_GOIDs_norepeats.txt" # two-column, tab-delimited, one line per gene, multiple GO terms separated by semicolon. If you have multiple lines per gene, use nrify_GOtable.pl prior to running this script.
goDatabase="go.obo" # download from http://www.geneontology.org/GO.downloads.ontology.shtml
goDivision="BP" # either MF, or BP, or CC
source("gomwu.functions.R")
# ------------- Calculating stats
# It might take a few minutes for MF and BP. Do not rerun it if you just want to replot the data with different cutoffs, go straight to gomwuPlot. If you change any of the numeric values below, delete the files that were generated in previos runs first.
gomwuStats(input, goDatabase, goAnnotations, goDivision,
perlPath="C:/Users/acoyl/Documents/GradSchool/RobertsLab/Tools/perl/bin/perl.exe", # replace with full path to perl executable if it is not in your system's PATH already
largest=0.1, # a GO category will not be considered if it contains more than this fraction of the total number of genes
smallest=5, # a GO category should contain at least this many genes to be considered
clusterCutHeight=0.25, # threshold for merging similar (gene-sharing) terms. See README for details.
# Alternative="g" # by default the MWU test is two-tailed; specify "g" or "l" of you want to test for "greater" or "less" instead.
# Module=TRUE,Alternative="g" # un-remark this if you are analyzing a SIGNED WGCNA module (values: 0 for not in module genes, kME for in-module genes). In the call to gomwuPlot below, specify absValue=0.001 (count number of "good genes" that fall into the module)
# Module=TRUE # un-remark this if you are analyzing an UNSIGNED WGCNA module
)
# --------------- Results
# 3 GO terms at 10% FDR
windows()
results=gomwuPlot(input,goAnnotations,goDivision,
absValue=0.05, # genes with the measure value exceeding this will be counted as "good genes". This setting is for signed log-pvalues. Specify absValue=0.001 if you are doing Fisher's exact test for standard GO enrichment or analyzing a WGCNA module (all non-zero genes = "good genes").
# absValue=1, # un-remark this if you are using log2-fold changes
level1=0.1, # FDR threshold for plotting. Specify level1=1 to plot all GO categories containing genes exceeding the absValue.
level2=0.05, # FDR cutoff to print in regular (not italic) font.
level3=0.01, # FDR cutoff to print in large bold font.
txtsize=1.2, # decrease to fit more on one page, or increase (after rescaling the plot so the tree fits the text) for better "word cloud" effect
treeHeight=0.5, # height of the hierarchical clustering tree
# colors=c("dodgerblue2","firebrick1","skyblue2","lightcoral") # these are default colors, un-remar and change if needed
)
# manually rescale the plot so the tree matches the text
# if there are too many categories displayed, try make it more stringent with level1=0.05,level2=0.01,level3=0.001.
# text representation of results, with actual adjusted p-values
results[[1]]
# ------- extracting representative GOs
# this module chooses GO terms that best represent *independent* groups of significant GO terms
pcut=1e-2 # adjusted pvalue cutoff for representative GO
hcut=0.9 # height at which cut the GO terms tree to get "independent groups".
# plotting the GO tree with the cut level (un-remark the next two lines to plot)
# plot(results[[2]],cex=0.6)
# abline(h=hcut,col="red")
# cutting
ct=cutree(results[[2]],h=hcut)
annots=c();ci=1
for (ci in unique(ct)) {
message(ci)
rn=names(ct)[ct==ci]
obs=grep("obsolete",rn)
if(length(obs)>0) { rn=rn[-obs] }
if (length(rn)==0) {next}
rr=results[[1]][rn,]
bestrr=rr[which(rr$pval==min(rr$pval)),]
best=1
if(nrow(bestrr)>1) {
nns=sub(" .+","",row.names(bestrr))
fr=c()
for (i in 1:length(nns)) { fr=c(fr,eval(parse(text=nns[i]))) }
best=which(fr==max(fr))
}
if (bestrr$pval[best]<=pcut) { annots=c(annots,sub("\\d+\\/\\d+ ","",row.names(bestrr)[best]))}
}
mwus=read.table(paste("MWU",goDivision,input,sep="_"),header=T)
bestGOs=mwus[mwus$name %in% annots,]
bestGOs
|
97654a0838ba7b7c7d901c76463acd3e0db10a43 | 81fc55d5f9b89d599a617e19f832bf1bdf76c6e4 | /man/est.IDRm.sample.Rd | c01eca0a441abbe683d5babf03dbb2cd951d8773 | [] | no_license | YosefLab/scRAD | aefed00c3c95add2507206a791c8860d3a7e461e | 968d2625c1442f6546bdb2ce67101498de48fe2d | refs/heads/master | 2021-08-30T02:50:33.976347 | 2017-12-14T10:17:47 | 2017-12-14T10:17:47 | 96,137,608 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,810 | rd | est.IDRm.sample.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idrm.R
\name{est.IDRm.sample}
\alias{est.IDRm.sample}
\title{Irreproducible Discovery Rate analysis with Sub-Sampling}
\usage{
est.IDRm.sample(x, mu, sigma, rho, p, frac = 0.7, nsamp = 100,
verbose = FALSE, plot = FALSE, ...)
}
\arguments{
\item{x}{an n by m numeric matrix, where m = num of replicates, n = num of
observations. Numerical values representing the significance of the
observations. Note that significant signals are expected to have large
values of x. In case that smaller values represent higher significance
(e.g. p-value), a monotonic transformation needs to be applied to reverse
the order before using this function, for example, -log(p-value).}
\item{mu}{a starting value for the scalar mean for the reproducible
component.}
\item{sigma}{a starting value for the scalar standard deviation (diagonal
covariance) of the reproducible component.}
\item{rho}{a starting value for the scalar correlation coefficient
(off-diagonal correlation) of the reproducible component.}
\item{p}{a starting value for the proportion of the reproducible component.}
\item{frac}{fraction of observations chosen in each sample. Default 0.7.}
\item{nsamp}{number of samples. Default 100.}
\item{verbose}{If TRUE, print helpful messages. Default FALSE.}
\item{plot}{If TRUE, plot summary figures. Default FAlSE.}
\item{...}{additional arguments passed to \code{\link[scider]{est.IDRm}}.}
}
\value{
a list with the following elements: \itemize{ \item{mean_para}{ mean
estimated parameters: p, rho, mu, sigma.}\item{para_bp}{
\code{\link[graphics]{boxplot}} summary for estimated parameters over
samples.} \item{mean_idr}{ a numeric vector of mean local idr for each
observation (i.e. estimated conditional probablility for each observation
to belong to the irreproducible component.} \item{sd_idr}{ a numeric
vector of s.d. of local idr for each observation.} \item{IDR}{ a numerical
vector of the expected irreproducible discovery rate for observations that
are as irreproducible or more irreproducible than the given observations.}
\item{times_sampled}{ a numerical vector of counts for each time an
observation was included in a sample.} \item{num_failed}{ number of times
a sampled fit failed for any reason.} }
}
\description{
Fit a multivariate Gaussian copula mixture model to multiple sub-samples of
observations.
}
\examples{
data("simu.idr",package = "idr")
# simu.idr$x and simu.idr$y are p-values
# Transfer them such that large values represent significant ones
x <- cbind(-simu.idr$x, -simu.idr$y)
mu <- 2.6
sigma <- 1.3
rho <- 0.8
p <- 0.7
idr.out <- est.IDRm.sample(x, mu, sigma, rho, p, nsamp = 5)
plot(-log10(idr.out$IDR),idr.out$sd_idr)
abline(v = 2, col = "red", lty = 2)
}
|
8b3ce58e2711e4c43f48b0c630614b6f59e42a66 | 3649f2cf6c8392d1776033becc66370f2412b58e | /app.R | c2505f26196638eafe0c0c763546e6f1cc5e155d | [] | no_license | ctkremer/harvesting | c2fcfbca62f6919a84c6b3e66b241f9522d37d7e | 7c5a869ec5e01b9837ca558c606f550e920462f0 | refs/heads/master | 2020-04-27T02:14:10.341358 | 2019-03-07T14:53:42 | 2019-03-07T14:53:42 | 173,987,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,145 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(deSolve)
library(ggplot2)
library(gridExtra)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Population growth activity: harvesting"),
# Sidebar with a slider input for number of bins
sidebarLayout(position='left',
sidebarPanel(
sliderInput("r","r, Population growth rate",
min = -0.5,
max = 1.5,
value = 0.8),
sliderInput("K","K, Carrying capacity",
min = 1,
max = 100,
value = 50),
sliderInput("N0","N(0), Initial abundance",
min = 0,
max = 100,
value = 1),
checkboxInput("allow.fixed.H", "Allow fixed harvesting?", value = F),
sliderInput("H","H, fixed harvest rate",
min = 0,
max = 0.25*1.5*100+0.1,
value = 0)
),
# Show a plot of the generated distribution
mainPanel(
column(6,plotOutput(outputId="popPlot", width="500px",height="400px"))
#plotOutput("popPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
### Run background calculations
plot1<-reactive({
if(!input$allow.fixed.H){
# Differential equation set up
Logistic<-function(t,state,parameters){
with(as.list(c(state,parameters)),{
# rate of change
dN<- r*N*(1-N/K)
# return the rate of change
list(c(dN))
}) # end of with(as.list...
}
# define parameters and IC's
parameters<-c(r=input$r,K=input$K)
state<-c(N=input$N0)
times<-seq(0,100,0.01)
# Solve ODE
out<-ode(y=state,times=times,func=Logistic,parms=parameters,method='ode45')
out<-data.frame(out)
g1<-ggplot(out,aes(x=time,y=N))+
geom_line(colour='blue',size=1)+
scale_y_continuous('Abundance, N',limits=c(0,110))+
scale_x_continuous('Time, t',limits=c(0,100))+
theme_bw()+
ggtitle('Population dynamics')
}else{ # FIXED HARVEST
# Differential equation set up
LogisticH<-function(t,state,parameters){
with(as.list(c(state,parameters)),{
# rate of change
dN<- r*N*(1-N/K)-H
dY<-H
# return the rate of change
list(c(dN,dY))
}) # end of with(as.list...
}
# define parameters and IC's
parameters<-c(r=input$r,K=input$K,H=input$H)
state<-c(N=input$N0,Y=0)
times<-seq(0,100,0.01)
# Solve ODE
out<-ode(y=state,times=times,func=LogisticH,parms=parameters,method='ode45')
out<-data.frame(out)
# figure out where, if anywhere, population crashes...
tmp1<-unlist(na.omit(out$time[out$N<0]))
if(length(tmp1)>0){
crash.time<-min(tmp1)
out$Y<-ifelse(out$time<crash.time,out$Y,out$Y[out$time==crash.time])
}
# Total yield
ty<-out$Y[nrow(out)]
g1a<-ggplot(out,aes(x=time,y=N))+
geom_line(colour='blue',size=1)+
scale_y_continuous('Abundance, N')+
scale_x_continuous('Time, t',limits=c(0,100))+
coord_cartesian(ylim = c(0,110))+
theme_bw()+
ggtitle('Population dynamics')
g1b<-ggplot(out,aes(x=time,y=Y))+
geom_line(colour='red',size=1)+
scale_y_continuous('Cumulative yield')+
scale_x_continuous('Time, t',limits=c(0,100))+
theme_bw()+
ggtitle(paste('Final yield = ',round(ty,2)))
g1<-grid.arrange(g1a,g1b,nrow=1)
}
g1
})
plot2<-reactive({
if(!input$allow.fixed.H){
maxK<-100
xs<-seq(0,maxK,0.01)
ys<-input$r*xs*(1-xs/input$K)
g2<-ggplot(data.frame(xs,ys),aes(x=xs,y=ys))+
geom_line(size=1,colour='blue')+
geom_hline(yintercept = 0)+
scale_y_continuous('dN/dt')+
scale_x_continuous('N',limits=c(0,maxK))+
coord_cartesian(ylim=c(0,0.25*1.5*maxK))+
theme_bw()
}else{#FIXED HARVEST
maxK<-100
xs<-seq(0,maxK,0.01)
ys<-input$r*xs*(1-xs/input$K)
g2<-ggplot(data.frame(xs,ys),aes(x=xs,y=ys))+
geom_line(size=1,colour='blue')+
geom_hline(yintercept = 0)+
geom_hline(yintercept=input$H,colour='red',linetype=2)+
geom_text(aes(x=c(75),y=c(input$H+5),label='Harvest rate'),colour='red')+
scale_y_continuous('dN/dt')+
scale_x_continuous('N',limits=c(0,maxK))+
coord_cartesian(ylim=c(0,0.25*1.5*maxK))+
theme_bw()
}
g2
})
output$popPlot <- renderPlot({
plot.list<-list(plot1(),plot2())
grid.arrange(grobs=plot.list)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
203c983d29f79e7da7e2a7dc12d2ad22ef719b46 | 40c65fff3847662ce46d2afd73acf8b68b785107 | /man/check_timestep.Rd | 7a2252cfea76d7ba0ebf33e6c91c17fbee497250 | [
"MIT"
] | permissive | epinowcast/epinowcast | b4d4562603938e9a184d3450d9387f92908cd6bc | 98ec6dbe3c84ecbe3d55ce988e30f8e7cc6b776d | refs/heads/main | 2023-09-05T18:19:10.985900 | 2023-09-05T12:13:49 | 2023-09-05T12:13:49 | 422,611,952 | 23 | 5 | NOASSERTION | 2023-09-14T09:57:09 | 2021-10-29T14:47:06 | R | UTF-8 | R | false | true | 1,926 | rd | check_timestep.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.R
\name{check_timestep}
\alias{check_timestep}
\title{Check timestep}
\usage{
check_timestep(
obs,
date_var,
timestep = "day",
exact = TRUE,
check_nrow = TRUE
)
}
\arguments{
\item{obs}{Any of the types supported by \code{\link[data.table:as.data.table]{data.table::as.data.table()}}.}
\item{date_var}{The variable in \code{obs} representing dates.}
\item{timestep}{The timestep to used. This can be a string ("day",
"week", "month") or a numeric whole number representing the number of days.}
\item{exact}{Logical, if \verb{TRUE``, checks if all differences exactly match the timestep. If }FALSE``, checks if the sum of the differences modulo the
timestep equals zero. Default is \code{TRUE}.}
\item{check_nrow}{Logical, if \code{TRUE}, checks if there are at least two
observations. Default is \code{TRUE}. If \code{FALSE}, the function returns invisibly
if there is only one observation.}
}
\value{
This function is used for its side effect of stopping if the check
fails. If the check passes, the function returns invisibly.
}
\description{
This function verifies if the difference in dates in the provided
observations corresponds to the provided timestep. If the \code{exact} argument
is set to TRUE, the function checks if all differences exactly match the
timestep; otherwise, it checks if the sum of the differences modulo the
timestep equals zero. If the check fails, the function stops and returns an
error message.
}
\seealso{
Functions used for checking inputs
\code{\link{check_calendar_timestep}()},
\code{\link{check_group_date_unique}()},
\code{\link{check_group}()},
\code{\link{check_modules_compatible}()},
\code{\link{check_module}()},
\code{\link{check_numeric_timestep}()},
\code{\link{check_quantiles}()},
\code{\link{check_timestep_by_date}()},
\code{\link{check_timestep_by_group}()}
}
\concept{check}
|
078af230ec83a50b2c8b2f4308c5182ceb0fcf46 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/VGAMextra/examples/ARMA.studentt.ff.Rd.R | 1b133e56d203de4e1b508fb5d083542d184056e0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,173 | r | ARMA.studentt.ff.Rd.R | library(VGAMextra)
### Name: ARMA.studentt.ff
### Title: VGLTSMs Family Functions: Generalized autoregressive moving
### average model with Student-t errors
### Aliases: ARMA.studentt.ff
### ** Examples
### Estimate the parameters of the errors distribution for an
## AR(1) model. Sample size = 50
set.seed(20180218)
nn <- 250
y <- numeric(nn)
ncp <- 0 # Non--centrality parameter
nu <- 3.5 # Degrees of freedom.
theta <- 0.45 # AR coefficient
res <- numeric(250) # Vector of residuals.
y[1] <- rt(1, df = nu, ncp = ncp)
for (ii in 2:nn) {
res[ii] <- rt(1, df = nu, ncp = ncp)
y[ii] <- theta * y[ii - 1] + res[ii]
}
# Remove warm up values.
y <- y[-c(1:200)]
res <- res[-c(1:200)]
### Fitting an ARMA(1, 0) with Student-t errors.
AR.stut.er.fit <- vglm(y ~ 1, ARMA.studentt.ff(order = c(1, 0)),
data = data.frame(y = y), trace = TRUE)
summary(AR.stut.er.fit)
Coef(AR.stut.er.fit)
## No test:
plot(ts(y), col = "red", lty = 1, ylim = c(-6, 6), main = "Plot of series Y with Student-t errors")
lines(ts(fitted.values(AR.stut.er.fit)), col = "blue", lty = 2)
abline( h = 0, lty = 2)
## End(No test)
|
6f3d77fd89df022ae4e3c419394fab767bd61ed5 | 709efc91b94968ca6b3bd823b1da402acd2805f4 | /figures/table-2.R | 26318ad98ead1c68c817d3c1c580da0b0054907c | [] | no_license | nilsreimer/inclusive-identities | 889b64d993ea7a661894b1b891c065821df1cafc | 777eba29cd13fa6990693c280beeea6066f100d5 | refs/heads/master | 2022-12-13T07:00:10.545532 | 2020-08-31T09:03:45 | 2020-08-31T09:03:45 | 143,322,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,991 | r | table-2.R | rm(list = ls())
# Notes -------------------------------------------------------------------
# Library -----------------------------------------------------------------
# Load packages
library(tidyverse); library(loo)
# Prepare -----------------------------------------------------------------
# Import results from k-fold cross-validation
q1_elpd <- read_rds("results/q1_elpd.rds")
# Calculate pseudo-BMA weights
q1_elpd <- q1_elpd %>%
unnest(elpd_i) %>%
group_by(model) %>%
mutate(ii = row_number()) %>%
ungroup() %>%
pivot_wider(
names_from = model,
names_prefix = "M",
values_from = elpd_i
) %>%
select(-ii) %>%
as.matrix() %>%
pseudobma_weights() %>%
mutate(q1_elpd, elpd_w = .)
# Calculate expected log predictive density (ELPD)
q1_elpd <- q1_elpd %>%
mutate(
elpd = map(elpd_i, sum) %>% unlist(),
elpd_se = map(elpd_i, ~(sd(.) * sqrt(length(.)))) %>% unlist()
)
# Calculate differences in difference in ELPD
q1_elpd <- q1_elpd %>%
select(model, elpd_i) %>%
crossing(comparison_model = 0:7) %>%
left_join(
q1_elpd %>% select(comparison_model = model, comparison_elpd_i = elpd_i),
by = "comparison_model"
) %>%
mutate(
elpd_d = map2_dbl(elpd_i, comparison_elpd_i, ~sum(.x - .y)),
elpd_d_se = map2_dbl(elpd_i, comparison_elpd_i, ~(sd(.x - .y) * sqrt(length(.x)))),
elpd_d_z = if_else(elpd_d == 0, 0, elpd_d/elpd_d_se)
) %>%
select(model, comparison_model, elpd_d_z) %>%
pivot_wider(
names_from = comparison_model,
names_prefix = "M",
values_from = elpd_d_z
) %>%
left_join(q1_elpd, ., by = "model")
# Reformat
q1_elpd <- q1_elpd %>% mutate(model = paste0("M", model))
# View
q1_elpd %>%
select(model, elpd_w, matches("M[0-9]")) %>%
mutate_at(vars(matches("M[0-9]")), round, digits = 1) %>%
mutate_at(vars(elpd_w), round, digits = 2)
|
1ca73e180175c9b68c8cba3fb48c629e4e43e72d | a802e08368280ccf5fbf07c764f1b30d7533a971 | /Use_Cases/VPS_Popcorn_Production/Kubernetes/experiments/optimizers.R | 3e755f27a4c98514c46dbf0fcd48147c5575b65b | [
"Apache-2.0"
] | permissive | janstrohschein/KOARCH | d86850d1b5e2fbd13401d93023cde783e14fb158 | 8eaa4a71cd1a33475f7b1618d075037d1446a7e1 | refs/heads/master | 2023-07-25T08:21:06.879591 | 2021-09-17T11:55:03 | 2021-09-17T11:55:03 | 229,251,272 | 4 | 8 | Apache-2.0 | 2023-07-06T21:40:07 | 2019-12-20T11:18:52 | C# | UTF-8 | R | false | false | 34,235 | r | optimizers.R | ###################################################################################
#'
#' File covers several functions related to optimizers and tuning via SPOT
#'
###################################################################################
###################################################################################
#' Get a list of parameters for optimizers, corresponding to
#' the list of feasiblePipelines
#' @param funEvals nr of function evaluations for each parameter list
#'
#' @return list of names parameter values for each optimizer
###################################################################################
getPipelineConfigurations <- function(funEvals = NULL) {
force(funEvals)
listPipelineControls <- list()
listPipelineControls[['Generalized SA']] <- list(temp = 100, qv = 2.56, qa=-5, max.call=funEvals)
listPipelineControls[['Random Search']] <- list(funEvals = funEvals)
listPipelineControls[['Lhd']] <- list(funEvals = funEvals, retries = 100)
popsize <- 5
itermax <- floor(((funEvals-popsize)/popsize))
listPipelineControls[['Differential Evolution']] <- list(funEvals = funEvals, itermax = itermax, popsize = popsize, F = 0.8, CR = 0.5, strategy = 2, c = 0.5)
mue <- 10
if(mue >= funEvals) {
mue <- funEvals/2
}
listPipelineControls[['Evolution Strategy']] <- list(funEvals = funEvals, mue = mue)
listPipelineControls[['Kriging']] <- list(funEvals = funEvals, model = buildKriging, modelControl = list(target="y"), designControl = list(size=7))
listPipelineControls[['Kriging EI']] <- list(funEvals = funEvals, model = buildKriging, modelControl = list(target="ei"), designControl = list(size=7))
listPipelineControls[['Random Forest']] <- list(funEvals = funEvals, model = buildRandomForest, designControl = list(size=7))
listPipelineControls[['L-BFGS-B']] <- list(funEvals = funEvals, lmm=5)
listPipelineControls[['Linear Model']] <- list(funEvals = funEvals, model = buildLM, optimizer = optimLBFGSB, designControl = list(size=7))
return(listPipelineControls)
}
###################################################################################
#' Get a list of optimizers, returned as list of functions
#'
#' @param lower vector of lower bounds for objFunction
#' @param upper vector of upper bounds for objFunction
#'
#' @return This function returns a list with optimizers
#' and following arguments:
#' @param objFunction objective function on which optimizer will be tuned
#' @param ctrl a named list of control parameters
#' @param seed a seed for RNG
#'
###################################################################################
getFeasiblePipelines <- function(lower = NULL, upper = NULL) {
# init result list
listPipelines <- list()
listPipelines[['Generalized SA']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("GenSA")
force(seed)
set.seed(seed)
temp <- ctrl$temp
qv <- ctrl$qv # 2.62
qa <- ctrl$qa # -5
maxEval <- ctrl$max.call
res <- NULL
memProfile <- profmem({
res <- GenSA(lower = lower, upper = upper, fn = objFunction,
control=list(max.call=maxEval, temperature=temp, visiting.param=qv, acceptance.param=qa, seed = seed))
# rename consistently
res <- list(ybest=res$value, xbest=res$par, count=res$counts)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Random Search']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("RandomSearch")
force(seed)
set.seed(seed)
budget <- ctrl$funEvals
res <- NULL
memProfile <- profmem({
yBest <- Inf
xBest <- NULL
for(i in 1:budget) {
# random par values
x <- lower + runif(length(lower)) * (upper-lower)
# evaluate function
y <- objFunction(x = as.matrix(x))
# update best
if(y < yBest) {
yBest <- y
xBest <- x
}
}
res <- list(ybest=yBest, xbest=xBest, count=budget)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Lhd']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("Lhd")
force(seed)
set.seed(seed)
res <- NULL
memProfile <- profmem({
res <- SPOT::optimLHD(fun = objFunction, lower = lower, upper = upper, control = ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Differential Evolution']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("DEoptim")
force(seed)
set.seed(seed)
budget <- ctrl$funEvals
popsize <- ctrl$popsize
c <- ctrl$c
strategy <- ctrl$strategy
Fval <- ctrl$F
CR <- ctrl$CR
itermax <- ctrl$itermax
if(itermax < 1) {
itermax <- 1
warning("Itermax ist 1 oder kleiner: Auf 1 korrigiert (übersteigt aber Budget)")
}
print(paste("budget", budget, "popsize", popsize, "itermax", itermax, sep = " ", collapse = NULL))
res <- NULL
memProfile <- profmem({
# call DEoptim
res <- DEoptim::DEoptim(fn = objFunction,
lower = lower,
upper = upper,
control = list(NP=popsize, itermax=itermax, c=c, strategy=strategy, F=Fval, CR=CR ,reltol=1e-10, trace=FALSE))
# save interesting result values
nfEvals <- popsize + (popsize * itermax)
res <- list(ybest=res$optim$bestval, xbest=res$optim$bestmem, count=nfEvals)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Kriging']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("KrigingBP")
force(seed)
set.seed(seed)
ctrl['seedSPOT'] <- seed
res <- NULL
memProfile <- profmem({
res <- SPOT::spot(fun = objFunction, lower = lower, upper = upper, control= ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Kriging EI']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("KrigingEI")
force(seed)
set.seed(seed)
ctrl['seedSPOT'] <- seed
res <- NULL
memProfile <- profmem({
res <- SPOT::spot(fun = objFunction, lower = lower, upper = upper, control= ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Random Forest']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("RF")
force(seed)
set.seed(seed)
ctrl['seedSPOT'] <- seed
res <- NULL
memProfile <- profmem({
res <- SPOT::spot(fun = objFunction, lower = lower, upper = upper, control = ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['L-BFGS-B']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("LBFGSB")
force(seed)
set.seed(seed)
#ctrl['seedSPOT'] <- seed
res <- NULL
memProfile <- profmem({
res <- SPOT::optimLBFGSB(fun = objFunction, lower = lower, upper = upper, control = ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
listPipelines[['Linear Model']] <- function(objFunction = NULL, ctrl = NULL, seed = NULL) {
tic("LM")
force(seed)
set.seed(seed)
ctrl['seedSPOT'] <- seed
res <- NULL
memProfile <- profmem({
res <- SPOT::spot(fun = objFunction, lower = lower, upper = upper, control = ctrl)
})
# cpu time
cpu <- toc(quiet = TRUE)
cpu <- cpu$toc[[1]] - cpu$tic[[1]]
cpu <- round(cpu, digits=2)
# memory usage in MB
mem <- sum(memProfile$bytes, na.rm = TRUE)/(1024*1024)
res$cpu <- cpu
res$mem <- mem
return(res)
}
return(listPipelines)
}
###################################################################################
#' Get a list of interface functions for SPOT tuning
#'
#'
#' @return This function returns a list with functions for each optimizer
#' and following arguments:
#' @param algpar matrix of optimizer configurations suggested by SPOT
#' @param objFunction objective function on which optimizer will be tuned
#' @param objFunctionBudget budget for optimizer to solve the objFunction
#' @param lowerObj vector of lower bounds for objFunction
#' @param upperObj vector of upper bounds for objFunction
#'
###################################################################################
getTuningInterfaces <- function() {
# init list of tuning interfaces
listTuningInterfaces <- list()
listTuningInterfaces[['Generalized SA']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj){
print("Tuning GenSA")
# create result list
resultList <- NULL
# budget for each optimization run
budget <- objFunctionBudget
# algpar is matrix of row-wise settings
for (i in 1:nrow(algpar)) {
temp <- algpar[i,1]
qv <- algpar[i,2]
qa <- algpar[i,3]
# requires random starting point
par <- lowerObj + runif(length(lowerObj)) * (upperObj-lowerObj)
res <- GenSA::GenSA(fn = objFunction,
par = par,
lower = lowerObj,
upper = upperObj,
control = list(threshold.stop = -Inf,
max.call = budget,
temperature = temp,
visiting.param = qv,
acceptance.param = qa))
resultList <- c(resultList, res$value)
}
return(resultList)
}
listTuningInterfaces[['Random Search']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
}
listTuningInterfaces[['Lhd']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
print("Tuning LHD")
# print(algpar)
performance <- NULL
for (i in 1:nrow(algpar)) {
nRetries = algpar[i, 1]
result <- SPOT::optimLHD(fun = objFunction, control = list(funEvals = objFunctionBudget, retries = nRetries), lower = lowerObj, upper = upperObj)
performance <- c(performance, result$ybest[1,1])
}
return(matrix(performance, , 1))
}
listTuningInterfaces[['Differential Evolution']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
print("Tuning DEoptim")
# print(algpar)
resultList <- NULL
budget <- objFunctionBudget
for (i in 1:nrow(algpar)) {
popsize <- algpar[i, 1]
Fval <- algpar[i,2]
CR <- algpar[i,3]
strategy <- algpar[i,4]
c <- algpar[i,5]
# max nr iterations according to budget
itermax = floor((budget - popsize)/popsize)
# check and correct itermax if <= 0
if(itermax <= 0) {
## correct NP as well? at least one iteration MUST be performed
popsize <- floor(budget/2)
itermax = floor((budget - popsize)/popsize)
warning(paste('Corrected popsize:', popsize, 'itermax: ', itermax))
}
res <- DEoptim::DEoptim(fn = objFunction,
lower = lowerObj,
upper = upperObj,
control = list(NP=popsize,
F=Fval,
CR=CR,
c=c,
itermax=itermax,
strategy=strategy,
reltol=1e-10,
trace=0))
resultList <- c(resultList, res$optim$bestval)
}
return(resultList)
}
listTuningInterfaces[['Kriging']] <- function(algpar, lowerObj, upperObj, objFunction, objFunctionBudget) {
resultList <- NULL
budget <- objFunctionBudget
# algpar is matrix of row-wise settings
for (i in 1:nrow(algpar)) {
designSize <- algpar[i,1]
designType <- algpar[i,2] # 1 = designLHD, 2 = designUniformRandom
# set design
design <- designLHD
if(designType == 2) {
design <- designUniformRandom
}
spotConfig <- list(funEvals = budget,
model = buildKriging,
modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE, target="y"),
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
design = design,
designControl = list(size=designSize)
)
res <- SPOT::spot(fun = objFunction,
lower = lowerObj,
upper = upperObj,
control = spotConfig)
resultList <- c(resultList, res$ybest)
}
return(resultList)
}
listTuningInterfaces[['Kriging EI']] <- function(algpar, lowerObj, upperObj, objFunction, objFunctionBudget) {
resultList <- NULL
budget <- objFunctionBudget
# algpar is matrix of row-wise settings
for (i in 1:nrow(algpar)) {
designSize <- algpar[i,1]
designType <- algpar[i,2] # 1 = designLHD, 2 = designUniformRandom
# set design
design <- designLHD
if(designType == 2) {
design <- designUniformRandom
}
spotConfig <- list(funEvals = budget,
model = buildKriging,
modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE, target="ei"),
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
design = design,
designControl = list(size=designSize)
)
res <- SPOT::spot(fun = objFunction,
lower = lowerObj,
upper = upperObj,
control = spotConfig)
resultList <- c(resultList, res$ybest)
}
return(resultList)
}
listTuningInterfaces[['Random Forest']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
resultList <- NULL
budget <- objFunctionBudget
# algpar is matrix of row-wise settings
for (i in 1:nrow(algpar)) {
designSize <- algpar[i,1]
designType <- algpar[i,2] # 1 = designLHD, 2 = designUniformRandom
# set design
design <- designLHD
if(designType == 2) {
design <- designUniformRandom
}
spotConfig <- list(funEvals = budget,
model = buildRandomForest,
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
design = design,
designControl = list(size=designSize)
)
res <- SPOT::spot(fun = objFunction,
lower = lowerObj,
upper = upperObj,
control = spotConfig)
resultList <- c(resultList, res$ybest)
}
return(resultList)
}
listTuningInterfaces[['L-BFGS-B']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
print("Tuning L-BFGS-B")
resultList <- NULL
budget <- objFunctionBudget
for (i in 1:nrow(algpar)) {
lmm <- algpar[i, 1]
spotConfig <- list(funEvals = budget, lmm = lmm)
res <- SPOT::optimLBFGSB(fun = objFunction,
lower = lowerObj,
upper = upperObj,
control = spotConfig)
resultList <- c(resultList, res$ybest)
}
return(resultList)
}
listTuningInterfaces[['Linear Model']] <- function(algpar, objFunction, objFunctionBudget, lowerObj, upperObj) {
}
return(listTuningInterfaces)
}
###################################################################################
#' Get a list of functions which process tuning of given optimizers
#'
#' @param tuningBudget is a point (vector) in the decision space of fun
#' @param lowerObj is the target function of type y = f(x, ...)
#' @param upperObj is a vector that defines the lower boundary of search space
#' @param objFunctionBudget is a vector that defines the upper boundary of search space
#'
#' @return This function returns a list with functions for each optimizer
#' and following arguments:
#' @param tuningInterface function which can be passed to SPOT
#' @param objFunction objective function on which optimizer will be tuned
#' @param seed seed for random number generator
###################################################################################
getSpotTuningList <- function(tuningBudget = NULL, lowerObj = NULL, upperObj = NULL, objFunctionBudget = NULL) {
# lowerObj <- force(lowerObj)
# upperObj <- force(upperObj)
# objFunctionBudget <- force(objFunctionBudget)
# tuningBudget <- force(tuningBudget)
# init result list
listSpotTuningCalls <- list()
listSpotTuningCalls[['Generalized SA']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tune GenSA')
force(seed)
set.seed(seed)
# configure spot
spotConfig <- list(funEvals = tuningBudget,
types = c("integer", "numeric", "numeric"), # integer
model = buildKriging,
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
## https://journal.r-project.org/archive/2013/RJ-2013-002/RJ-2013-002.pdf
# max.call - funEval
# between 2 and 3 for qv and any value < 0 for qa
# defaults: qv 2.65 ; qa -5
## temp, qv, qa
lowerSPO <- c(1, 2, -1000)
upperSPO <- c(100, 3, 1)
## call SPO
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
## return xBest, as a named list
result <- list()
result['temp'] <- spotResult$xbest[1]
result['qv'] <- spotResult$xbest[2]
result['qa'] <- spotResult$xbest[3]
print("Tuned GenSA: ")
print(paste(mapply(paste, names(result), as.numeric(result)), collapse=" / "))
return(result)
}
## tune LHD with Spot
listSpotTuningCalls[['Lhd']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print("Tune optimLhd")
force(seed)
set.seed(seed)
spotConfig <- list(funEvals = tuningBudget,
types = c("integer"), # integer
model = buildKriging,
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
# range repetitions
lowerSPO <- c(1)
upperSPO <- c(200)
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
result <- list()
result['retries'] <- spotResult$xbest[1]
print(paste("Tuned nrRepetitions: ", result['retries'], collapse = NULL, sep = ""))
return(result)
}
listSpotTuningCalls[['Differential Evolution']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print("Tune optimDE")
#print(paste("objFunctionBudget: ", objFunctionBudget, collapse = NULL, sep = ""))
#print(paste("tuningBudget: ", tuningBudget, collapse = NULL, sep = ""))
force(seed)
set.seed(seed)
lowerNP <- 4 # dim * 2
upperNP <- floor(objFunctionBudget/2)
# upperNP <- min(dim*15, objFunctionBudget)
lowerSPO <- c(lowerNP, 0, 0, 1, 0)
upperSPO <- c(upperNP, 2, 1, 5, 1)
spotConfig <- list(funEvals = tuningBudget,
types = c("integer", "numeric", "numeric", "factor", "numeric"),
# model = buildKriging,
# modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE),
model = buildKriging,
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
## call SPO
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
## return xBest, as a named list
result <- list()
result['popsize'] <- spotResult$xbest[1]
result['F'] <- spotResult$xbest[2]
result['CR'] <- spotResult$xbest[3]
result['strategy'] <- spotResult$xbest[4]
result['c'] <- spotResult$xbest[5]
print("Tuned DEOptim: ")
print(paste(mapply(paste, names(result), as.numeric(result)), collapse=" / "))
return(result)
}
## tune Kriging with Spot
listSpotTuningCalls[['Kriging']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning KrigingBP')
set.seed(seed)
# configure spot
spotConfig <- list(funEvals = tuningBudget,
types = c("integer", "factor"), # factor, number
# model = buildRandomForest,
model = buildKriging,
modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE),
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
# design Type
designTypeLHD <- 1
designTypeUniform <- 2
# design Type
minDesignSize <- max(4, floor(objFunctionBudget/4))
maxDesignSize <- min(50, objFunctionBudget - 3)
lowerSPO <- c(minDesignSize, designTypeLHD)
upperSPO <- c(maxDesignSize, designTypeUniform)
## call SPO
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
## return xBest, as a named list
result <- list()
result['designSize'] <- spotResult$xbest[1]
result['designType'] <- spotResult$xbest[2]
print("Tuned KrigingBP: ")
print(paste(mapply(paste, names(result), as.numeric(result)), collapse=" / "))
return(result)
}
## tune Kriging with Spot
listSpotTuningCalls[['Kriging EI']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning KrigingEI')
set.seed(seed)
# configure spot
spotConfig <- list(funEvals = tuningBudget,
types = c("integer", "factor"), # factor, number
model = buildKriging,
# model = buildKriging,
# modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE),
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
# design Type
designTypeLHD <- 1
designTypeUniform <- 2
# design Type
minDesignSize <- max(4, floor(objFunctionBudget/4))
maxDesignSize <- min(50, objFunctionBudget - 3)
lowerSPO <- c(minDesignSize, designTypeLHD)
upperSPO <- c(maxDesignSize, designTypeUniform)
## call SPO
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
## return xBest, as a named list
result <- list()
result['designSize'] <- spotResult$xbest[1]
result['designType'] <- spotResult$xbest[2]
print("Tuned KrigingEI: ")
print(paste(mapply(paste, names(result), as.numeric(result)), collapse=" / "))
return(result)
}
## tune RandomForest with Spot
listSpotTuningCalls[['Random Forest']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning RandomForest')
set.seed(seed)
# configure spot
spotConfig <- list(funEvals = tuningBudget,
types = c("integer", "factor"), # factor, number
model = buildKriging,
# modelControl = list(algTheta=optimizerLikelihood, useLambda=TRUE, reinterpolate=TRUE),
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
# design Type
designTypeLHD <- 1
designTypeUniform <- 2
# design Type
minDesignSize <- max(4, floor(objFunctionBudget/4))
maxDesignSize <- min(50, objFunctionBudget - 3)
lowerSPO <- c(minDesignSize, designTypeLHD)
upperSPO <- c(maxDesignSize, designTypeUniform)
## call SPO
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
## return xBest, as a named list
result <- list()
result['designSize'] <- spotResult$xbest[1]
result['designType'] <- spotResult$xbest[2]
print("Tuned KrigingEI: ")
print(paste(mapply(paste, names(result), as.numeric(result)), collapse=" / "))
return(result)
}
## tune LBFGSB with Spot
listSpotTuningCalls[['L-BFGS-B']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning L-BFGS-B lmm parameter')
force(seed)
set.seed(seed)
spotConfig <- list(funEvals = tuningBudget,
types = c("integer"), # integer
model = buildKriging,
optimizer = optimizerForSPOT,
optimizerControl = list(funEvals=150),
designControl = list(size=10),
seedSPOT = seed
)
# range repetitions
lowerSPO <- c(1)
upperSPO <- c(10)
spotResult <- SPOT::spot(fun = tuningInterface,
lower = lowerSPO,
upper = upperSPO,
control = spotConfig,
lowerObj = lowerObj,
upperObj = upperObj,
objFunction = objFunction,
objFunctionBudget = objFunctionBudget)
result <- list()
result['lmm'] <- spotResult$xbest[1]
print(paste("Tuned lmm: ", result['lmm'], collapse = NULL, sep = ""))
return(result)
}
## tune LM with Spot
listSpotTuningCalls[['Linear Model']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning LM by doing nothing')
result <- list()
return(result)
}
## tune RS with Spot doing nothing
listSpotTuningCalls[['Random Search']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning RS by doing nothing')
result <- list()
return(result)
}
listSpotTuningCalls[['Evolution Strategy']] <- function(tuningInterface = NULL, objFunction = NULL, seed = NULL) {
print('tuning ES by doing nothing')
result <- list()
return(result)
}
return(listSpotTuningCalls)
}
###################################################################################
#' Another numerical optimizer. Directly calls nloptr.
#'
#' @param x is a point (vector) in the decision space of fun
#' @param fun is the target function of type y = f(x, ...)
#' @param lower is a vector that defines the lower boundary of search space
#' @param upper is a vector that defines the upper boundary of search space
#' @param control is a list of additional settings, defaults to:
#' list(funEvals=200, method="NLOPT_LN_NELDERMEAD", reltol=1e-4, verbosity=0)
#'
#' @return This function returns a list with:
#' xbest parameters of the found solution
#' ybest target function value of the found solution
#' count number of evaluations of fun
###################################################################################
optimizerForSPOT <- function(x=NULL, fun, lower, upper, control, ...){
## generate random start point
if(is.null(x))
x <- lower + runif(length(lower)) * (upper-lower)
else
x <- x[1,] #requires vector start point
con <- list(funEvals=200, method="NLOPT_LN_NELDERMEAD", reltol=1e-4, verbosity=0) # NLOPT_GN_DIRECT_L
con[names(control)] <- control
control <- con
# wrapper for target function: vector to matrix
f2 <- function(x){
x <- matrix(data=x, nrow = 1)
fun(x)
}
dots <- list(...)
if(length(dots) > 0) {
cat("The arguments in ... are\n")
print(dots)
}
opts = list(algorithm = control$method,
maxeval = control$funEvals,
ftol_rel = control$reltol,
xtol_rel = -Inf,
print_level = control$verbosity)
# call optimizer
res <- nloptr::nloptr(x, f2, lb = lower, ub = upper, opts = opts, ...)
res$count <- res$iterations
res$xbest <- matrix(data=res$solution, nrow = 1)
res$ybest <- res$objective
# print nr of function evaluations used to otpimize model
# if(res$count != control$funEvals)
# print(paste('optimEvals: ', res$count, '/', control$funEvals, sep = ""))
return(res)
}
###################################################################################
#' Numerical optimizer, useful for MLE (e.g. during Kriging model fit).
#' Calls CEGO::optimInterface
#'
#' @param x is a point (vector) in the decision space of fun
#' @param fun is the target function of type y = f(x, ...)
#' @param lower is a vector that defines the lower boundary of search space
#' @param upper is a vector that defines the upper boundary of search space
#' @param control is a list of additional settings, defaults to:
#' list(method="NLOPT_GN_DIRECT_L",funEvals=400,reltol=1e-8)
#'
#' @return This function returns a list with:
#' xbest parameters of the found solution
#' ybest target function value of the found solution
#' count number of evaluations of fun
###################################################################################
optimizerLikelihood <- function(x = NULL, fun, lower, upper, control, ...){
# print("blubb")
# print(x)
# ## generate random start point
# if(is.null(x))
# x <- lower + runif(length(lower)) * (upper-lower)
# else
# x <- x[1,] #requires vector start point
# print(x)
CEGO::optimInterface(x,fun,lower,upper,control=list(method="NLOPT_GN_DIRECT_L",funEvals=400,reltol=1e-8),...)
}
|
0ee763699dbdc086d070666a06e16f02e14294d0 | d03924f56c9f09371d9e381421a2c3ce002eb92c | /man/Integer-class.Rd | 0ba4c99cbcc2c1685208e4b463adb9073e602f8c | [] | no_license | cran/distr | 0b0396bbd5661eb117ca54026afc801afaf25251 | c6565f7fef060f0e7e7a46320a8fef415d35910f | refs/heads/master | 2023-05-25T00:55:19.097550 | 2023-05-08T07:10:06 | 2023-05-08T07:10:06 | 17,695,561 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,082 | rd | Integer-class.Rd | \name{Integer-class}
\docType{class}
\alias{Integer-class}
\alias{coerce,numeric,Integer-method}
\title{Internal Class "Integer"}
\description{For the ease of method dispatch, there is an internal
S4 class \code{Integer}, which is a subclass of \code{numeric} and has a
straightforward validity method.}
\section{Objects from the Class}{
new("Integer",
}
\section{Slots}{
\describe{
\item{\code{.Data}}{Object of class \code{"numeric"}}
}
}
\section{Extends}{
Class \code{"\linkS4class{numeric}"}, from data part.
Class \code{"\linkS4class{vector}"}, by class "numeric", distance 2.
}
\section{Methods}{
\describe{ \item{coerce}{\code{signature(from = "numeric", to = "Integer")}:
create a \code{"Integer"} object from a \code{"numeric"} vector.}
}}
%\references{ ~put references to the literature/web site here ~ }
\author{
Peter Ruckdeschel \email{peter.ruckdeschel@uni-oldenburg.de}
}
%\note{ ~~further notes~~ }
\seealso{
\code{\link{numeric}}, \code{\link{vector}}
}
%\examples{}
\keyword{classes}
\keyword{internal}
|
88d488b111cdc41dfd89294c75f90ac5194a0152 | 52cd29c18c790d2b46ef98123ca3d3e657db4a08 | /scripts/tune_cnn.R | 68fd890424b235e3a75224f7798d1bc53867bb04 | [] | no_license | DavidPitteloud/Deep_Learning | fa4940ce7f65e4039d81a118f7b5f983e854d775 | 20c83d95941b1b0b963356e050f8dae00222352c | refs/heads/master | 2022-12-17T18:31:57.847691 | 2020-08-10T20:24:11 | 2020-08-10T20:24:11 | 285,857,590 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 459 | r | tune_cnn.R | # Load packages
library("here")
library("keras")
library("reticulate")
library("tensorflow")
library("tfruns")
library(tidyverse)
#training_run
tuning_run(
file = here::here("scripts/train_cnn.R"),
flags = list(L1 = c(0.001),
L2 = c(0.002),
dropout1 = c(0.3,0.4),
dropout2 = c(0.1),
dropout3 = c(0.3,0.4),
filter1 = c(96,196),
filter2 = c(96,196)
)
)
|
083a764446ffa37a1b51081741a40d529ca8f3ba | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/archdata/examples/Michelsberg.Rd.R | aa96ed52c7751ab143bf7eeb1e27d47488414fa4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,090 | r | Michelsberg.Rd.R | library(archdata)
### Name: Michelsberg
### Title: Younger Neolithic Pottery from Central Europe
### Aliases: Michelsberg
### Keywords: datasets
### ** Examples
data(Michelsberg)
str(Michelsberg)
names(Michelsberg)[5:39]
attributes(Michelsberg)$typological_key
library(ca)
# geographical distribution
xy <- as.matrix(Michelsberg[,41:42])/1000
plot(xy, asp=1, pch=16, col=rgb(.3,.3,.3,.5))
text(xy[,1], xy[,2], Michelsberg$id, cex=.7, pos=2)
# Note site 109 to the Northeast;
# preparing the data set for CA
abu <- Michelsberg[, 5:39]
rownames(abu) <- Michelsberg$id
# CA with site 109, Flintbek LA48, as supplementary row
MBK.ca <- ca(abu, ndim=min(dim(abu)-1), suprow=109 )
# asymmetric biplot with row quality and column contribution
plot(MBK.ca, map="rowprincipal", contrib=c("relative", "absolute"))
title(main="Row-isometric Biplot of Michelsberg CA", cex.sub=.7,
sub="color intensity represents quality for sites and contributions for types")
# The arch is a curved trend in 3D; zoom with mouse scroll
library(rgl)
plot3d(MBK.ca, map="rowprincipal", labels=c(0,0))
|
306552d81a9670112f20ff6e8ab84845d540e6d8 | e0b3f76f4fcd8d258b2ee856d3a040f16814b888 | /R/testVariable.R | 9ec6936fb57b86d2be20cb1e1210a51a74dd794a | [] | no_license | IBIC/UdallR | 6095ad1dc0624b280da58aa05a54a9f3a4e2fc40 | 8f0cde10c837bcca933705fe78fab75d1e01464a | refs/heads/master | 2021-03-27T10:35:09.705484 | 2019-05-07T19:41:29 | 2019-05-07T19:41:29 | 81,885,023 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,366 | r | testVariable.R | #' Test demographic variable
#'
#' Given a column and data frame, compares values of column for PD and
#' control/unaffected participants.
#'
#' @param col Column name
#' @param all.subjects Data frame containing information for all subjects.
#'
#' @return Vector of length 7 with PD group mean/count; PD group SD/percent;
#' control group mean/count; control group SD/percent; p-value (when
#' appropriate); total mean/count; total SD/percent
#'
#' @export
testVariable <- function(col, all.subjects, grouping, groups){
if (!(grouping %in% colnames(all.subjects)))
{
stop(paste("Column", grouping, "is not present in all.subjects"))
}
# Select data subsets based on groups
subsets <- as.list(rep(NA, length(groups)))
for (i in 1:length(groups))
{
subsets[[i]] <- all.subjects[all.subjects[, grouping]==groups[i], col]
}
names(subsets) <- groups
# Name output for assignment
return.vec <- rep(NA, length(subsets) * 2 + 3)
names(return.vec)[seq(1, length(return.vec) - 3, by = 2)] <- paste0(groups,
".m")
names(return.vec)[seq(2, length(return.vec) - 3, by = 2)] <- paste0(groups,
".sd")
names(return.vec)[length(return.vec):(length(return.vec) - 2)] <- c("total.sd",
"total.m",
"p")
# Only total subjects that belong to an eligible group
# Assign total information to total.*
total <- all.subjects[all.subjects[, grouping] %in% groups, c(grouping, col)]
total.omitted <- na.omit(total)
omitted.groups <- unique(total.omitted[, grouping])
if (length(omitted.groups) != length(groups))
{
warning("One or more groups was ommitted entirely from ANOVA.")
}
if (is.numeric(total[, col]))
{
return.vec["total.m"] <- mean(total[, col], na.rm = TRUE)
return.vec["total.sd"] <- sd(total[, col], na.rm = TRUE)
if (length(omitted.groups) > 1)
{
message(paste("Running ANOVA for group differences:", col))
anova.results <- aov(total[, col] ~ total[, grouping])
return.vec["p"] <- summary(anova.results)[[1]][["Pr(>F)"]][[1]]
}
else
{
warning(paste("Not enough groups to do an ANOVA for", col))
return.vec["p"] <- NA
}
}
for (i in 1:length(subsets))
{
s <- subsets[[i]]
name <- names(subsets)[i]
if (is.numeric(s))
{
if (all(is.na(s)))
{
warning("Column is all NA")
return.vec[c(paste0(name, ".m"), paste0(name, ".sd"))] <- NA
}
else
{
m <- mean(s, na.rm = TRUE)
sd <- sd(s, na.rm = TRUE)
return.vec[paste0(name, ".m")] <- m
return.vec[paste0(name, ".sd")] <- sd
}
}
else if (is.factor(s) || is.character(s))
{
s <- as.factor(s)
message(paste(col, "is a factor vector. Returning count for",
levels(s)[1]))
if (length(levels(s)) != 2)
{
warnings("Error: More than two factor levels in column.")
}
total.c <- sum(total.omitted[, col] == levels(s)[1], na.rm = TRUE)
return.vec["total.m"] <- total.c
return.vec["total.sd"] <- total.c / length(total.omitted[, col])
count <- sum(s == levels(s)[1], na.rm = TRUE)
proportion <- count / length(s)
return.vec[paste0(name, ".m")] <- count
return.vec[paste0(name, ".sd")] <- proportion
# total.c <- sum(total.col == levels(pd.col)[1], na.rm = TRUE)
# total.p <- total.c / length(total.col)
#
#
#
# return.vec <- c(pd.c, pd.p, ctrl.c, ctrl.p, NA, total.c, total.p)
}
}
# Run a chi-square test on the factor variables by reconstructing
## contigency table from return.vec and running chi-square if all variables
## are present.
if (is.na(return.vec["p"]))
{
tbl <- matrix(NA, nrow = length(subsets), ncol = length(groups))
colnames(tbl) <- names(subsets)
tbl[, 1] <- return.vec[paste0(names(subsets), ".m")]
tbl[, 2] <- tbl[, 1] / return.vec[paste0(names(subsets), ".sd")]
if (all(tbl > 0) && !is.na(all(tbl > 0)))
p <- chisq.test(tbl)$p.value
else
p <- NA
return.vec["p"] <- p
}
return(return.vec)
}
|
9d0e7a0e796510a5763cbddb0d6ba106ff230dd5 | 1f045242458430c26961eede23b49f854540e122 | /code/algo.R | 290a66fb25c2bfc6f068712c075d00240ea5aa43 | [] | no_license | hcui10/intro_sample_recycling | f103ca03235e18c478b762be54c35024a5878315 | 4a9d81c54b83e9baa34d318f0d83b7d7c41a683a | refs/heads/master | 2020-03-31T17:48:46.150162 | 2018-10-10T20:56:16 | 2018-10-10T20:56:16 | 152,435,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,790 | r | algo.R | ## This file contains implementations for relevant algorithms accompanying
## [Introducing Sample Recycling Method]
################################################################################
################################################################################
############## Simulation: Geometric Brownian Motion Sample Path ###############
################################################################################
# simulate one path
sim_path <- function(dt, tau, F0, mu, sigma) {
Wt <- cumsum(rnorm(as.integer(tau/dt), mean = 0, sd = sqrt(dt)))
timeline <- seq(from = dt, to = tau, by = dt)
return( c(F0, F0*exp( (mu-0.5*sigma^2)*timeline + sigma*Wt )) )
}
################################################################################
################################################################################
### Theoretical Price for European and Geometric Average-Rate Asian Options ####
################################################################################
# European Option
european_option_price_theory <- function(S0, K, sigma, r, d, tau, option.type) {
d1 <- ( log( S0 / K) + ( r - d + 0.5 * sigma^2 ) * tau ) / sigma / sqrt(tau)
d2 <- d1 - sigma * sqrt(tau)
S.disc <- S0 * exp( - d * tau )
K.disc <- K * exp( - r * tau )
if (option.type == "call") return( S.disc * pnorm(d1) - K.disc * pnorm(d2) )
if (option.type == "put") return( K.disc * pnorm(-d2) - S.disc * pnorm(-d1) )
}
# Geometric Continuous Average-Rate Asian Options
asian_option_price_theory <- function(S0, K, sigma, r, d, tau, option.type) {
d_adj <- 0.5 * ( r + d + sigma^2 / 6 )
sigma_adj <- sigma / sqrt(3)
return(
european_option_price_theory(S0, K, sigma_adj, r, d_adj, tau, option.type) )
}
# Geometric Average-Rate Asian Options with Discrete Sample Steps
asian_option_price_theory_discrete <- function(S0, K, sigma, r, d, tau,
option.type, n, j, S.arr = NULL) {
# incorporating step params: n discretely sampled steps
if (n == Inf) {
tau_mu <- tau / 2
tau_sigma <- tau / 3
B <- 1
} else {
h <- tau / n
tau_mu <- ( 1 - j/n ) * ( tau - h * (n-j-1) / 2 )
tau_sigma <- tau * ( 1 - j/n )^2 -
(n-j) * (n-j-1) * (4 * n - 4 * j + 1) / 6 / n / n * h
B <- ifelse( j == 0, 1, prod(S.arr[1:j] / S0)^(1/n) )
}
# core pricing components
A <- exp( - r * (tau - tau_mu) - d * tau_mu -
sigma^2 * (tau_mu - tau_sigma) * 0.5 ) * B
d2 <- ( log( S0 / K ) + ( r - d - 0.5 * sigma^2 ) * tau_mu + log(B) ) /
sigma / sqrt(tau_sigma)
d1 <- d2 + sigma * sqrt(tau_sigma)
# option type: call / put
if (option.type == "call") {
omega <- 1
} else if (option.type == "put") {
omega <- -1
} else {
omega <- NULL
}
return( omega * S0 * A * pnorm( omega * d1 ) -
omega * K * exp( - r * tau ) * pnorm( omega * d2 ) )
}
################################################################################
################################################################################
######################## Vanilla Monte Carlo Simulation ########################
################################################################################
# Wrapper to Compute Average: Arithmetic and Geometric
avg <- function(arr, avg.method) {
if (avg.method == "arithmetic") {
return( mean(arr) )
} else if (avg.method == "geometric") {
return( exp( mean( log(arr[arr > 0]) ) ) )
}
}
# Loss Path Functional
eval_path_loss <- function(sample.path, loss.type, option.type, params) {
# params:
# - European: K
# - Asian: avg.target, avg.method, avg.idx, K (if average price)
if (loss.type == "European") {
if (option.type == "call") {
return( max(sample.path[length(sample.path)] - params$K, 0) )
} else if (option.type == "put") {
return( max(params$K - sample.path[length(sample.path)], 0) )
}
} else if (loss.type == "Asian") {
if (params$avg.target == "price") {
price <- avg(sample.path[params$avg.idx], avg.method = params$avg.method)
strike <- params$K
} else if (params$avg.target == "strike") {
price <- sample.path[length(sample.path)]
strike <- avg(sample.path[params$avg.idx], avg.method = params$avg.method)
}
if (option.type == "call") return( max(price - strike, 0) )
if (option.type == "put") return( max(strike - price, 0) )
}
}
# Monte Carlo Option Pricing
option_price_MC <- function(S0, K, sigma, r, d, tau,
loss.type, option.type,
num.MC, dt, avg.target, avg.method, avg.step,
ncpus = 1, timeit = TRUE) {
# simulate sample paths
sim.time <- system.time({
sample.paths <-
simplify2array( # each col is a path
parallel::mclapply(seq(num.MC), function(x) sim_path(dt, tau, S0, r - d, sigma),
mc.cores = ncpus, mc.allow.recursive = FALSE))
})
# evaluate losses
loss.eval.time <- system.time({
# subset elements to take average on
avg.idx <- seq(from = 1, to = nrow(sample.paths), by = avg.step)[-1]
MC.losses <- as.numeric(simplify2array(parallel::mclapply(
data.frame(sample.paths),
FUN = function(sample.path)
eval_path_loss(sample.path, loss.type = loss.type, option.type = option.type,
params = list(avg.target = avg.target,
avg.method = avg.method,
avg.idx = avg.idx,
K = K)),
mc.cores = ncpus, mc.allow.recursive = FALSE))) * exp( - r * tau )
})
# return MC results
if (timeit) {
return( list( est = mean(MC.losses),
disc.losses = MC.losses,
samples = sample.paths,
sim.time = sim.time,
loss.eval.time = loss.eval.time) )
} else {
return( list( est = mean(MC.losses),
disc.losses = MC.losses,
samples = sample.paths ) )
}
}
################################################################################
################################################################################
########################### Density Ratio Estimation ###########################
################################################################################
# Density Ratio for Geometric Brownian Motion
GBM_lambda <- function(F.tar, F.ref, sigma, r, d, dt) { # true density ratio
# derived parameters
F.tar.meanlog <- log(F.tar) + (r - d - 0.5 * sigma^2) * dt
F.tar.sdlog <- sigma * sqrt(dt)
F.ref.meanlog <- log(F.ref) + (r - d - 0.5 * sigma^2) * dt
F.ref.sdlog <- F.tar.sdlog
# construct density ratio function
lambda_true <- function(x)
dlnorm(x, meanlog = F.tar.meanlog, sdlog = F.tar.sdlog) /
dlnorm(x, meanlog = F.ref.meanlog, sdlog = F.ref.sdlog)
return( list("tar_meanlog" = F.tar.meanlog, "tar_sdlog" = F.tar.sdlog,
"ref_meanlog" = F.ref.meanlog, "ref_sdlog" = F.ref.sdlog,
"lambda" = lambda_true) )
}
# Density Ratio Estimation - Naive Stepwise
lambda_step_approx <- function(x_nu, x_de, n_block, x_min = -Inf, x_max = Inf) {
# use denominator for breaks
breaks <-
c(x_min,
as.numeric(head( # remove first and last (sample min and max) from quantiles
quantile(x_de, probs = seq(from = 0, to = 1,length.out = n_block + 1)[-1]),
-1)),
x_max)
# construct ratio
n_nu <- hist(x_nu, breaks = breaks, plot = FALSE)$counts
n_de <- hist(x_de, breaks = breaks, plot = FALSE)$counts
ratio <- n_nu / n_de
# Remove NAs, NaNs and Infs due to 0 counts
ratio[is.na(ratio)] <- 0
ratio[is.nan(ratio)] <- 0
ratio[is.infinite(ratio)] <- 0
# return estimated stepwise function
lambda <- approxfun(x = breaks, y = c(ratio, ratio[n_block]),
# return NA for points outside the interval [min(x), max(x)]
rule = 1,
# stepwise constant
method = "constant")
return(lambda)
}
# Density Ratio Estimation Wrapper Function
ratio_est <- function(classifier.type, x_nu, x_de, params = NULL) {
# params:
# - GBM_true: F.tar, F.ref, sigma, r, d, dt
# - naive_stepwise: n_block, x_min, x_max
# fit data
if ( classifier.type == "GBM_true" ) {
# unpack params
F.tar <- params$F.tar
F.ref <- params$F.ref
sigma <- params$sigma
r <- params$r
d <- params$d
dt <- params$dt
GBM_true <- GBM_lambda(F.tar, F.ref, sigma, r, d, dt)
return( GBM_true$lambda )
} else if ( classifier.type == "naive_stepwise" ) {
naive_est <-
lambda_step_approx(x_nu, x_de, n_block = params$n_block,
x_min = params$x_min,
x_max = params$x_max)
return( naive_est )
}
}
# map reference index to target indices (VERSION 1)
find_tar_idx <- function(Ft.ref.axis, Ft.axis) { # midpoint references
# get distance to each ref pt
dist.mat <- sapply(Ft.ref.axis, function(Ft.ref) abs(Ft.axis-Ft.ref))
# get the ref point with the min distance
dist.min.arr <- apply(dist.mat, MARGIN = 1, # apply by row
FUN = function(row) which.min(row)[1]) # pickfirst if tie
# construct return list
idx.tars.ls <- list()
for (i in seq_len(length(Ft.ref.axis))) {
idx.ref <- which(Ft.axis == Ft.ref.axis[i])
idx.tars <- which(dist.min.arr == i)
if ( idx.ref %in% idx.tars && length(idx.tars) == 1 ) {
idx.tars.ls[[toString(idx.ref)]] <- NA
} else {
idx.tars.ls[[toString(idx.ref)]] <- idx.tars[!(idx.tars %in% c(idx.ref))]
}
}
return(idx.tars.ls)
}
# # map reference index to target indices (VERSION 2)
# find_tar_idx <- function(Ft.ref.axis, Ft.axis) { # look left references
# # construct return list
# idx.tars.ls <- list()
# for (i in seq_len(length(Ft.ref.axis))) {
# idx.ref <- which(Ft.axis == Ft.ref.axis[i])
#
# if ( i == 1 ) tar.start <- 1
# else tar.start <- which(Ft.axis == Ft.ref.axis[i-1]) + 1
#
# if ( tar.start + 1 == idx.ref ) idx.tars.ls[[toString(idx.ref)]] <- NA
# else idx.tars.ls[[toString(idx.ref)]] <- as.integer(
# seq(from = tar.start, to = idx.ref - 1, by = 1))
# }
# return(idx.tars.ls)
# }
# # map target index to reference index (VERSION 3)
# find_ref_idx <- function(Ft.ref.axis, Ft.axis) { # midpoint references
# # get distance to each ref pt
# dist.mat <- sapply(Ft.ref.axis, function(Ft.ref) abs(Ft.axis-Ft.ref))
# # get the ref point with the min distance
# dist.min.arr <- apply(dist.mat, MARGIN = 1, # apply by row
# FUN = function(row) which.min(row)[1]) # pickfirst if tie
# names(dist.min.arr) <- as.character(seq_len(length(dist.min.arr)))
# return(dist.min.arr)
# }
################################################################################
################################################################################
########################### Sample Recycling Method ############################
################################################################################
option_price_sample_recycle <- function(lambda.est, sample.paths.ref, disc.losses.ref) {
F.test <- matrix(sample.paths.ref[2,], ncol = 1, byrow = TRUE)
ratio.pred <- as.numeric(lambda.est(F.test))
return( mean(ratio.pred * disc.losses.ref, na.rm = TRUE) )
}
################################################################################
################################################################################
######################### Helper Distribution Functions ########################
################################################################################
get_Ft_dist_params <- function(outer.params, contract.params) {
Ft.dist.params <- # theoretical Ft distribution params
GBM_lambda(F.tar = outer.params$F0,
F.ref = outer.params$F0,
sigma = contract.params$sigma,
r = outer.params$mu,
d = contract.params$d,
dt = outer.params$t1)
return(Ft.dist.params)
}
get_Ft_quantile <- function(outer.params, contract.params) {
Ft.dist.params <- get_Ft_dist_params(outer.params, contract.params)
Ft.quantile <- function(p) # theoretical Ft quantiles
qlnorm(p, meanlog = Ft.dist.params$tar_meanlog, sdlog = Ft.dist.params$tar_sdlog)
return(Ft.quantile)
}
get_Ft_PDF <- function(outer.params, contract.params) {
Ft.dist.params <- get_Ft_dist_params(outer.params, contract.params)
Ft.PDF <- function(x) # theoretical Ft PDF
dlnorm(x, meanlog = Ft.dist.params$tar_meanlog, sdlog = Ft.dist.params$tar_sdlog)
return(Ft.PDF)
}
get_Ft_CDF <- function(outer.params, contract.params) {
Ft.dist.params <- get_Ft_dist_params(outer.params, contract.params)
Ft.CDF <- function(q) # theoretical Ft CDF
plnorm(q, meanlog = Ft.dist.params$tar_meanlog, sdlog = Ft.dist.params$tar_sdlog)
return(Ft.CDF)
}
get_Ft_RNG <- function(outer.params, contract.params) {
Ft.dist.params <- get_Ft_dist_params(outer.params, contract.params)
Ft.RNG <- function(n) # random number generator from theoretical Ft distribution
rlnorm(n, meanlog = Ft.dist.params$tar_meanlog, sdlog = Ft.dist.params$tar_sdlog)
return(Ft.RNG)
}
get_loss_func <- function(inner.params, contract.params) {
return(switch( # theoretical loss Lt
contract.params$loss.type,
"European" = function(Ft) european_option_price_theory(
S0 = Ft,
K = contract.params$K,
sigma = contract.params$sigma,
r = contract.params$r,
d = contract.params$d,
tau = contract.params$tau,
option.type = contract.params$option.type),
"Asian" = function(Ft) asian_option_price_theory_discrete(
S0 = Ft,
K = contract.params$K,
sigma = contract.params$sigma,
r = contract.params$r,
d = contract.params$d,
tau = contract.params$tau,
option.type = contract.params$option.type,
n = inner.params$n.dt, j = 0, S.arr = NULL)
))
}
get_Lt2Ft <- function(outer.params, inner.params, contract.params) {
loss_func <- get_loss_func(inner.params, contract.params)
Ft.quantile <- get_Ft_quantile(outer.params, contract.params)
solve_Ft <- function(Lt.arr) sapply(Lt.arr, function(Lt)
uniroot(function(Ft) loss_func(Ft) - Lt,
interval = c(0, Ft.quantile(1-.Machine$double.eps^0.5)))$root )
}
get_Lt_PDF <- function(outer.params, inner.params, contract.params) {
solve_Ft <- get_Lt2Ft(outer.params, inner.params, contract.params)
Ft.PDF <- get_Ft_PDF(outer.params, contract.params)
Lt.PDF <- function(Lt.arr) {
if ( any(Lt.arr == 0) ) { # deal with inputs containing zeros
Lt.PDF.arr <- vector(mode = "numeric", length = length(Lt.arr))
Lt.PDF.arr[Lt.arr == 0] <- 0
if ( any(Lt.arr != 0) ) {
Lt.PDF.arr[Lt.arr != 0] <-
Ft.PDF(solve_Ft(Lt.arr[Lt.arr != 0])) *
abs(numDeriv::grad(solve_Ft, x = Lt.arr[Lt.arr != 0]))
}
return(Lt.PDF.arr)
} else {
return( Ft.PDF(solve_Ft(Lt.arr)) *
abs(numDeriv::grad(solve_Ft, x = Lt.arr)) )
}
}
return(Lt.PDF)
}
get_Lt_CDF <- function(outer.params, inner.params, contract.params) {
solve_Ft <- get_Lt2Ft(outer.params, inner.params, contract.params)
Ft.CDF <- get_Ft_CDF(outer.params, contract.params)
return( function(Lt.arr) Ft.CDF(solve_Ft(Lt.arr)) )
}
get_Lt_quantile <- function(outer.params, inner.params, contract.params) {
loss_func <- get_loss_func(inner.params, contract.params)
Ft.quantile <- get_Ft_quantile(outer.params, contract.params)
return( function(probs) loss_func(Ft.quantile(probs)) )
}
################################################################################
################################################################################
################################# Risk Measures ################################
################################################################################
# Empirical Estimate of Risk Measures
risk_measure_est <- function(est.arr, risk.type, est.params = NULL) {
# params: additional parameters for special risk types
# "Prob of Exceedance (POE)": thres K, array
if ( risk.type == "VaR" ) {
n.axis <- min(101, length(est.arr))
probs <- head(seq(from = 0, to = 1, length.out = n.axis)[-1], -1)
return( as.numeric(quantile(est.arr, probs = probs)) )
} else if ( risk.type == "CTE" ) {
n.axis <- min(101, length(est.arr))
probs <- head(seq(from = 0, to = 1, length.out = n.axis)[-1], -1)
VaRs <- as.numeric(quantile(est.arr, probs = probs))
CTEs <- sapply(VaRs, function(VaR)
weighted.mean(est.arr, est.arr > VaR))
return( CTEs )
} else if ( risk.type == "CVaR" ) {
n.axis <- min(101, length(est.arr))
probs <- head(seq(from = 0, to = 1, length.out = n.axis)[-1], -1)
VaRs <- as.numeric(quantile(est.arr, probs = probs))
CTEs <- sapply(VaRs, function(VaR)
weighted.mean(est.arr, est.arr > VaR))
return( CTEs - VaRs )
} else if ( risk.type == "POE" ) {
return( as.numeric(sapply(est.params$K, function(K)
mean(est.arr > K))) )
}
}
# Theoretical Functions of Risk Measures
get_VaR_func <- function(outer.params, inner.params, contract.params) {
Ft.quantile <- get_Ft_quantile(outer.params, contract.params)
loss_func <- get_loss_func(inner.params, contract.params)
return( function(probs) loss_func(Ft.quantile(probs)) )
}
get_CTE_func <- function(outer.params, inner.params, contract.params) {
VaR_func <- get_VaR_func(outer.params, inner.params, contract.params)
CTE_func <- function(probs) # theoretical conditional tail expectation (CTE)
sapply(probs, function(prob)
ifelse(prob == 1, Inf,
integrate(VaR_func, lower = prob, upper = 1,
rel.tol = .Machine$double.eps^0.5)$value / (1 - prob) ))
return(CTE_func)
}
get_CVaR_func <- function(outer.params, inner.params, contract.params) {
VaR_func <- get_VaR_func(outer.params, inner.params, contract.params)
CTE_func <- get_CTE_func(outer.params, inner.params, contract.params)
# theoretical conditional Value-at-Risk (CVaR)
return( function(probs) CTE_func(probs) - VaR_func(probs) )
}
get_POE_func <- function(outer.params, inner.params, contract.params) {
solve_Ft <- get_Lt2Ft(outer.params, inner.params, contract.params)
Ft.CDF <- get_Ft_CDF(outer.params, contract.params)
# theoretical probability of exceedance (POE)
return( function(K.arr) return( 1 - Ft.CDF(solve_Ft(K.arr)) ) )
}
################################################################################
|
aebc55278d3ca1973931a1edf714ce654a557634 | e7f403c4f61446b342e0a54660a85801201ee9e8 | /plot/plot_legend.R | ffb054aab22e8d4136758c46aa2c0d5c4ed5aa38 | [] | no_license | clslgrnc/fuzzing_scripts | 4742b8b73cf77cc075bf0e380518ff76dc070599 | e4b9cf33662dae57f466f0896adf28e142c58ce8 | refs/heads/master | 2022-03-25T01:34:49.857762 | 2019-12-17T02:37:26 | 2019-12-17T02:37:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,134 | r | plot_legend.R | #!/usr/bin/Rscript
#!/usr/bin/env Rscript
# TAGS: bugs_over_time, coverage_over_time, plot_legend
# Script to plot the overall legend for bugs overtime plots (result of "generate_bug_plot_data.sh").
source(file="plot_utils.R")
option_list <- list(
make_option("--algnames", default="full cmin minset moonshine_size empty random", help="corpus treatment"),
make_option("--output", default="plot.pdf", help="output file name")
)
opt <- parse_args(OptionParser(option_list=option_list))
algnames <- unlist (strsplit(opt$algnames, " ") )
iname = opt$output
for (i in 1:length(algnames)) {
if (algnames[i] %in% names(possible_names)) {
actual_names[[i]] <- possible_names[[algnames[i]]]
} else { actual_names[[i]] <- algnames[i] }
# cat (paste(i,actual_names[[i]], algnames[i], possible_names[[algnames[i]]], sep=" "))
}
pdf(iname, width=8, height=0.75)
par(oma = c(1.5,0,0,0), mar = c(0, 0, 0, 0))
plot(NULL ,xaxt='n',yaxt='n',bty='n',ylab='',xlab='', xlim=0:1, ylim=0:1)
legend("center", legend = actual_names, pch=symbols, lwd=5, lty=lntypes,
col = colours, text.col=colours, horiz=T, text.width=0.11)
dev.off()
|
1b3f49495ec9011af26092fe222e51ed1c05d07e | f60e9200289e480ea10797e4d4929c40e6d75dc0 | /uk/ac/bolton/trailer-analytics/view/server.R | 45eabdd60e132b2ca1802791988b7df914e7e172 | [] | no_license | ps3com/data-analytics | 6167b2a24e51dc2f78af5a5dfadc030a591b70b2 | 088db3c4c3abf337c1a3f0abe5200b17f4efcef3 | refs/heads/master | 2021-01-10T20:22:56.010694 | 2014-03-27T12:07:28 | 2014-03-27T12:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,734 | r | server.R | # TODO: Add comment
#
# Author: paul
###############################################################################
source('../main.R', chdir=T)
# Define server logic
shinyServer(function(input, output) {
bookmarkButtonCount <- 0;
############################ Handle the bookmarks scrape ###########################################
# reactive function to check that the search button was pressed before
# actually doing the search
scrapeBookmarks <- reactive({
if (input$goBookmarkTopics == 0)
return(NULL)
isolate({
output$bookmarkPlot <- renderPlot({
#wordcloud(topicHandler$labels, scale=c(5,0.5), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
#wordcloud(topicHandler$labels, scale=c(8,.2),min.freq=1, max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
wordcloud(bookmarksController$getLabels(), scale=c(3,.2), min.freq=1, max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
})
return (bookmarksController$getTopics()) ###@@@@@
})
})
output$bookmarkTermsDataset <- renderUI({
#if (input$goBookmarkTopics != 0)
# output$bookmarkTermsLegend <- renderText("Searching, please wait")
dataSet2 <- scrapeBookmarks()
dataSet2 <- suppressWarnings(as.data.frame(sapply(dataSet2, as.character)))
toJSON(as.data.frame(t(dataSet2)), .withNames=FALSE, container = TRUE)
})
#############################################################################################
############################ Handle getting bookmarks ######################################
# reactive function to check that the search button was pressed before
# actually doing the search
getBookmarkResults <- reactive({
#if (input$getBookmarks == 0)
if(bookmarkButtonCount == input$getBookmarks){
cat(paste("A:",input$getBookmarks,bookmarkButtonCount,"\n",sep=" "))
return(NULL)
}
isolate({
bookmarkButtonCount <- input$getBookmarks
cat(paste("B:",input$getBookmarks,bookmarkButtonCount,"\n",sep=" "))
return (bookmarksController$getBookmarks(input$userId, input$tTag))
})
})
output$bookmarkDataset <- renderUI({
out <- tryCatch(
{
cat(paste("*",input$userId, ":", input$tTag,"*\n",sep=""))
dataSet2 <- getBookmarkResults()
dataSet2 <- suppressWarnings(as.data.frame(sapply(dataSet2, as.character)))
toJSON(as.data.frame(t(dataSet2)), .withNames=FALSE)
},
error=function(cond) {
eMessage <- paste("Cannot create url with args", input$userId, input$tTag, sep=" ")
return(paste('{"APPERROR": "',eMessage, '"}' ,sep=""))
},
warning=function(cond) {
message("Here's the original warning message:")
message(cond)
# Choose a return value in case of warning
return(NULL)
},
finally={
}
)
return(out)
})
#############################################################################################
############################ Handle the web search ##########################################
# reactive function to check that the search button was pressed before
# actually doing the search
getSearchResults <- reactive({
if (input$goSearch == 0){
cat(paste("1 inputGoSearch=",input$goSearch,"*\n",sep=""))
return(NULL)
}
isolate({
cat(paste("2 inputGoSearch=",input$goSearch,"*\n",sep=""))
#cat(paste("<",input$searchTerms,">\n",sep=""))
# todo url encode values in controller
return (webSearchController$searchJSON(input$searchTerms))
})
})
output$searchResultDataset <- renderUI({
cat(paste("*",input$searchTerms,"*\n",sep=""))
dataSet2 <- getSearchResults()
dataSet2 <- suppressWarnings(as.data.frame(sapply(dataSet2, as.character)))
toJSON(as.data.frame(t(dataSet2)), .withNames=FALSE)
})
#############################################################################################
############################ Handle the web search scrape ###########################################
# reactive function to check that the search button was pressed before
# actually doing the search
scrapeSearchResults <- reactive({
if (input$goSearchTopics == 0)
return(NULL)
isolate({
output$searchPlot <- renderPlot({
wordcloud(webSearchController$getLabels(), scale=c(3,.2), min.freq=1, max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
})
return (webSearchController$getTopics())
})
})
output$searchTermsDataset <- renderUI({
dataSet2 <- scrapeSearchResults()
dataSet2 <- suppressWarnings(as.data.frame(sapply(dataSet2, as.character)))
toJSON(as.data.frame(t(dataSet2)), .withNames=FALSE, container = TRUE)
})
#output$bookmarkPlot <- renderPlot({
# wordcloud(topicHandler$labels, scale=c(5,0.5), max.words=100, random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# })
#############################################################################################
}) |
fe9f3a5490d3a84dadd8be74bf595c04ee66af23 | 4d3672136d43264176fe42ea42196f113532138d | /man/Vaccine.Rd | fc904abd771b78cf19e176a8beaee44e9b5e2e3a | [] | no_license | alanarnholt/BSDA | 43c851749a402c6fe73213c31d42c26fa968303e | 2098ae86a552d69e4af0287c8b1828f7fa0ee325 | refs/heads/master | 2022-06-10T10:52:15.879117 | 2022-05-14T23:58:15 | 2022-05-14T23:58:15 | 52,566,969 | 5 | 13 | null | 2017-07-27T02:06:33 | 2016-02-26T00:28:07 | R | UTF-8 | R | false | true | 796 | rd | Vaccine.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Vaccine}
\alias{Vaccine}
\title{Reported serious reactions due to vaccines in 11 southern states}
\format{
A data frame/tibble with 11 observations on two variables
\describe{
\item{state}{U.S. state}
\item{number}{number of reported serious reactions per million doses of a vaccine}
}
}
\source{
Center for Disease Control, Atlanta, Georgia.
}
\usage{
Vaccine
}
\description{
Data for Exercise 1.111
}
\examples{
stem(Vaccine$number, scale = 2)
fn <- fivenum(Vaccine$number)
fn
iqr <- IQR(Vaccine$number)
iqr
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Pacific Grove, CA: Brooks/Cole, a division of Thomson Learning.
}
\keyword{datasets}
|
585b5d3b8d12dc57562f6b6c0faa8a897b68d877 | 3e0c8f2ad6749d24944db2fe107ae5a79a86de54 | /plot3.R | 1bcf9832a7cd5fb053675bc5c66183ab6c871b1d | [] | no_license | TTeemu/CourseraProject1TT | df33782d1545674be6f750caec440bdcd57aba18 | 2a1693e9ed96791b796a90aa24dd023273328ead | refs/heads/master | 2021-01-13T01:44:40.178196 | 2014-10-12T17:48:02 | 2014-10-12T17:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,071 | r | plot3.R | ## Exploratory Data Analys Project 1 ##
## PLOT 3 ##
#######################################
## installing packages ##
install.packages("lubridate")
library(lubridate)
## Reading in the data ##
data <- read.table("household_power_consumption.txt",sep = ";",header = T,na.strings = "?")
# changing the variable class as date
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## Subsetting correct timeframe ##
cor_data <- data[data$Date >= as.Date("2007-02-01") & data$Date <= as.Date("2007-02-02"),]
# making weekday variable
cor_data$wday <- wday(cor_data$Date,label =T)
#removing the unused portion of data
remove(data)
## Making the second plot ##
png(file = "plot3.png", bg = "transparent", width = 480, height = 480,)
matplot(cor_data[,7:9],type="l",xaxt="n",lty=1,col=c("black","red","blue"),ylab="Energy sub metering")
axis(side=1, at=c(1,length(cor_data$Global_active_power)/2,length(cor_data$Global_active_power)), labels=c("Thu","Fri","Sat"))
legend('topright', names(cor_data[7:9]), lty=1, col=c('black', 'red','blue'))
dev.off()
|
1e094ab63e07c6b6e5d40be2310e675ef34c1422 | 6f257dfac5625c2bc5cd0fa418c94b432bac472d | /man/subpixel2bin.Rd | 8d82c9430c375375776d7b9d07399b2e5c64fe05 | [] | no_license | GastonMauroDiaz/caiman | 591ac8fa2d46a291ff2692cd825021ec3970b650 | c37d0e4a0af4774b67c30dc3c22c1b55cbe3f153 | refs/heads/master | 2022-01-25T21:34:30.218488 | 2022-01-21T18:52:43 | 2022-01-21T18:52:43 | 61,065,044 | 10 | 1 | null | null | null | null | UTF-8 | R | false | true | 437 | rd | subpixel2bin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_MBLT.R
\name{subpixel2bin}
\alias{subpixel2bin}
\alias{subpixel2bin,RasterLayer-method}
\title{todo}
\usage{
subpixel2bin(subpixel, segmentation)
\S4method{subpixel2bin}{RasterLayer}(subpixel, segmentation)
}
\arguments{
\item{subpixel}{todo}
\item{segmentation}{todo}
}
\value{
\code{\linkS4class{BinImage}}
}
\description{
todo
}
\examples{
#todo
}
|
97db3476f7df07432a849ac65194723a3c1ca00a | b06aca03349252e5a3dcaa3d387fd4e59b407e4d | /16S/PhyloseqObjects.r | da01008e54adc0a1dab181471888d59c88f74e38 | [] | no_license | vera-yxu/Thesis-QuantitativeAnalysis | cc2b161c0755a01a688c5b3495cf2583823f2b86 | 575899f62e6a0b229bfc292b59722e79af4afa61 | refs/heads/master | 2021-07-11T20:02:53.420001 | 2020-07-07T22:35:13 | 2020-07-07T22:35:13 | 170,559,291 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,070 | r | PhyloseqObjects.r | library("phyloseq")
packageVersion("phyloseq")
library("biomformat")
packageVersion("biomformat")
biom_data <- import_biom(BIOMfilename = "table-with-taxa.biom",
treefilename = "tree.nwk")
mapping_file <- import_qiime_sample_data(mapfilename = "16s-metadata-with-counts.tsv")
physeq.a <- merge_phyloseq(biom_data, mapping_file)
colnames(tax_table(physeq.a))= c("Kingdom","Phylum","Class","Order","Family","Genus", "Species")
whole.samples <- c("T1R1","T1R4","T1R5","T1R7","T1R9","T1R10","T2R1","T2R4","T2R5","T2R7","T2R9","T2R10","T3R1","T3R4","T3R5","T3R7","T3R9","T3R10","T4R1","T4R4","T4R5","T4R7","T4R9","T4R10","T5R1","T5R4","T5R5","T5R7","T5R9","T5R10")
live.samples <- c("T2R1L","T2R4L","T2R5L","T2R7L","T2R9L","T3R1L","T3R4L","T3R5L","T3R7L","T3R9L","T3R10L","T4R1L","T4R4L","T4R5L","T4R7L","T4R9L","T4R10L","T5R1L","T5R4L","T5R5L","T5R7L","T5R9L","T5R10L")
dead.samples <- c("T2R1D","T2R4D","T2R5D","T2R7D","T2R9D","T2R10D","T3R1D","T3R4D","T3R5D","T3R7D","T3R9D","T3R10D","T4R1D","T4R4D","T4R5D","T4R7D","T4R9D","T4R10D","T5R1D","T5R4D","T5R5D","T5R7D","T5R9D","T5R10D")
live.dead.samples <- c("T2R1L","T2R4L","T2R5L","T2R7L","T2R9L","T3R1L","T3R4L","T3R5L","T3R7L","T3R9L","T3R10L","T4R1L","T4R4L","T4R5L","T4R7L","T4R9L","T4R10L","T5R1L","T5R4L","T5R5L","T5R7L","T5R9L","T5R10L","T2R1D","T2R4D","T2R5D","T2R7D","T2R9D","T2R10D","T3R1D","T3R4D","T3R5D","T3R7D","T3R9D","T3R10D","T4R1D","T4R4D","T4R5D","T4R7D","T4R9D","T4R10D","T5R1D","T5R4D","T5R5D","T5R7D","T5R9D","T5R10D")
physeq.whole <- subset_samples(physeq.a, SampleID %in% whole.samples)
physeq.live <- subset_samples(physeq.a, SampleID %in% live.samples)
physeq.dead <- subset_samples(physeq.a, SampleID %in% dead.samples)
physeq.lnd <- subset_samples(physeq.a, SampleID %in% live.dead.samples)
physeq.whole.percent <- transform_sample_counts(physeq.whole, function(x) 100 * x/sum(x))
physeq.w.percent.gyp <- subset_samples(physeq.whole.percent, Material == "Gypsum")
physeq.w.percent.mdf <- subset_samples(physeq.whole.percent, Material == "MDF")
physeq.l.percent <- transform_sample_counts(physeq.live, function(x) 100 * x/sum(x))
physeq.l.percent.gyp <- subset_samples(physeq.l.percent, Material == "Gypsum")
physeq.l.percent.mdf <- subset_samples(physeq.l.percent, Material == "MDF")
physeq.d.percent <- transform_sample_counts(physeq.dead, function(x) 100 * x/sum(x))
physeq.d.percent.gyp <- subset_samples(physeq.d.percent, Material == "Gypsum")
physeq.d.percent.mdf <- subset_samples(physeq.d.percent, Material == "MDF")
# get counts
#sample_data(physeq.whole.percent)[,9]
count.whole <- as.data.frame(sample_data(physeq.whole.percent))$Count
count.live <- as.data.frame(sample_data(physeq.l.percent))$Count
count.dead <- as.data.frame(sample_data(physeq.d.percent))$Count
# function to convert relative abundance to quantitative abundance
rel_to_quan <- function(physeq, counts) {
for (i in 1:nsamples(physeq)) {
otu_table(physeq)[,i] = get_taxa(physeq, sample_names(physeq)[i]) * counts[i] /100
}
return(otu_table(physeq))
}
physeq.w.quan <- physeq.whole.percent
# replace relative otu table with a new quantitative one by using rel_to_quan function
otu_table(physeq.w.quan) <- rel_to_quan(physeq.w.quan, count.whole)
physeq.w.quan.gyp <- subset_samples(physeq.w.quan, Material == "Gypsum")
physeq.w.quan.mdf <- subset_samples(physeq.w.quan, Material == "MDF")
physeq.l.quan <- physeq.l.percent
# replace relative otu table with a new quantitative one by using rel_to_quan function
otu_table(physeq.l.quan) <- rel_to_quan(physeq.l.quan, count.live)
physeq.l.quan.gyp <- subset_samples(physeq.l.quan, Material == "Gypsum")
physeq.l.quan.mdf <- subset_samples(physeq.l.quan, Material == "MDF")
physeq.d.quan <- physeq.d.percent
# replace relative otu table with a new quantitative one by using rel_to_quan function
otu_table(physeq.d.quan) <- rel_to_quan(physeq.d.quan, count.dead)
physeq.d.quan.gyp <- subset_samples(physeq.d.quan, Material == "Gypsum")
physeq.d.quan.mdf <- subset_samples(physeq.d.quan, Material == "MDF")
|
a983eb8b693e2bc67d6e83db4966978eac03daea | 1f653d44ad299720e7bc75c24d1b207540e11cf3 | /exp4_eyetrackerFribbles/analysis/preProcessing.R | 552dda671992fbf663ca58f22b342566864afa63 | [] | no_license | n400peanuts/leverhulmeNDL | cc9232f5c9fafd751bc93df9529cffa06343d8b2 | f2584b912cf9f20d68123c93c31b11c50fb3f630 | refs/heads/master | 2023-04-23T05:36:05.867653 | 2021-05-11T09:05:56 | 2021-05-11T09:05:56 | 247,944,842 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 28,172 | r | preProcessing.R | #-----------------------------------------------------------#
#---------- this script takes the raw data from Gorilla ----#
#---- it selects the columns and rows necessary for --------#
#--------------------- data analysis -----------------------#
#-----------------------------------------------------------#
rm(list=ls())
library(tidyverse)
#----------- select the experiment ------------#
expeType <- "pilot2"
#### Set current working directory ####
localDirectory <- c("C:/Users/eva_v/Nexus365/Elizabeth Wonnacott - Eva_Liz_Leverhulme/leverhulmeNDL/eyetracker - fribbles/")
# folder where we store the input from Gorilla as is
input <- c(paste0(localDirectory,"rawdata/",expeType, "/"))
#folder where we save the preprocessed data after columns and rows selection
output <- c(paste0(localDirectory,"preProcessed_data/", expeType, '/'))
#### load behavioural data ####
#--------------- load stimuli ------------------#
read.csv(paste0(localDirectory,"stimuli/stimuli.csv"))-> stimuli
#--------------- load data ------------------#
#see what's in the folder
df <- list.files(input)
df <- df[grepl("data",df)]
# Gorilla assigns a random generated ID for each task, we need to know what is what
taskID_list1 <- data.frame(
list = rep(1,3),
gorillaCode = c("wcph","hpz2","jyd3"),
task = c("learning", "2AFC", "contingency")
)
taskID_list2 <- data.frame(
list = rep(2,3),
gorillaCode = c("vm7o","67pm","dlw7"),
task = c("learning", "2AFC", "contingency")
)
taskID_list3 <- data.frame(
list = rep(3,3),
gorillaCode = c("74sa","jc49","tiwi"),
task = c("learning", "2AFC", "contingency")
)
# c(df[grepl(taskID_list1[taskID_list1$task=="learning",]$gorillaCode, df)],
# df[grepl(taskID_list2[taskID_list2$task=="learning",]$gorillaCode, df)],
# df[grepl(taskID_list3[taskID_list3$task=="learning",]$gorillaCode, df)]) -> learningID
c(df[grepl(taskID_list1[taskID_list1$task=="2AFC",]$gorillaCode, df)],
df[grepl(taskID_list2[taskID_list2$task=="2AFC",]$gorillaCode, df)],
df[grepl(taskID_list3[taskID_list3$task=="2AFC",]$gorillaCode, df)]) -> AFCID
c(df[grepl(taskID_list1[taskID_list1$task=="contingency",]$gorillaCode, df)],
df[grepl(taskID_list2[taskID_list2$task=="contingency",]$gorillaCode, df)],
df[grepl(taskID_list3[taskID_list3$task=="contingency",]$gorillaCode, df)]) -> contingencyID
# load the data into our global environment
#learning <- NULL
labPic <- NULL
contingency <- NULL
if (expeType == "pilot1"){
for (i in 1:length(df)){
gsub("data_exp_45245-v3_task-|.csv$", "", df[i]) -> id #remove .csv
if (id == taskID_list1[taskID_list1$task=="learning",]$gorillaCode){
id <- "learning"
assign("learning", data.frame()) #load into the environment with more intuitive names
} else if (id == taskID_list1[taskID_list1$task=="2AFC",]$gorillaCode){
id <- "labPic"
assign("labPic", data.frame())
} else if (id == taskID_list1[taskID_list1$task=="contingency",]$gorillaCode){
id <- "contingency"
assign("contingency", data.frame())
}
read.csv(paste0(input, df[i]),
na.strings=c("","NA"),
stringsAsFactors = T)-> temp
assign(paste0(id), temp)
}
} else {
for (y in 1:length(AFCID)){
read.csv(paste(input, AFCID[y], sep = ""),na.strings=c("","NA"), stringsAsFactors = T)-> temp
labPic <- plyr::rbind.fill(temp,labPic)
};
for (z in 1:length(contingencyID)){
read.csv(paste(input, contingencyID[z], sep = ""), na.strings=c("","NA"), stringsAsFactors = T)-> temp
contingency <- plyr::rbind.fill(temp,contingency)
};
}
rm(taskID_list1,taskID_list2,taskID_list3,temp,i,df, id, contingencyID,AFCID,learningID,x,y,z)
unique(contingency$list)
unique(na.omit(contingency$Participant.Private.ID))
#### columns and rows selection ####
# in Gorilla there are a number of rows that are not necessary for our analysis,
# therefore we're going to select only the columns and rows that we need
#### 2AFC - labPic ####
if (expeType == "pilot2"){
columnsIwantTokeep<- labPic[c('Task.Name','Participant.Private.ID', 'display','Trial.Number','Zone.Type',
'Screen.Name', 'Response', 'label','frequency','Reaction.Time','list')]
rowsIwantTokeep <- c("Screen 2")
labPic <- columnsIwantTokeep %>%
filter(Screen.Name == rowsIwantTokeep & display == "task") %>%
rename(subjID = Participant.Private.ID,
task = Task.Name,
resp = Response,
rt = Reaction.Time,
trial = Trial.Number,
labelPresented = label)
labPic$display <- NULL; labPic$Screen.Name <- NULL; labPic$Zone.Type <- NULL #we don't need these columns anymore
rm(rowsIwantTokeep, columnsIwantTokeep)
} else {
columnsIwantTokeep<- labPic[c('Task.Name','Participant.Private.ID', 'display','Trial.Number',
'Screen.Name', 'Response', 'label','frequency','Reaction.Time')]
rowsIwantTokeep <- c("Screen 2")
labPic <- columnsIwantTokeep %>%
filter(Screen.Name %in% rowsIwantTokeep &
display %in% "task" ) %>%
rename(subjID = Participant.Private.ID,
task = Task.Name,
resp = Response,
rt = Reaction.Time,
trial = Trial.Number,
labelPresented = label)
labPic$display <- NULL; labPic$Screen.Name <- NULL #we don't need these columns anymore
rm(rowsIwantTokeep, columnsIwantTokeep)
}
labPic <- labPic[labPic$subjID!="3502047",]
#----------------- clean the rows from CSS and HTML metadata ---------------#
labPic <- droplevels(labPic)
labPic$labelPresented <- gsub('<p style="font-size: 700%;">', "", labPic$labelPresented)
labPic$labelPresented <- gsub('</p>', "", labPic$labelPresented);
labPic$labelPresented <- gsub(' ', "", labPic$labelPresented);
as.factor(labPic$labelPresented)-> labPic$labelPresented
as.factor(labPic$frequency)-> labPic$frequency
labPic$resp <- gsub('.jpg', "", labPic$resp)
as.factor(labPic$resp)-> labPic$resp
#----- map the picture to the correponding fribble --------------#
# fribble ID in stimuli contains the mapping, we're going to merge the two dataframes
# merging is possible only if the column to merge has the same name
colnames(stimuli)[1] <- 'resp'
merge(stimuli, labPic, by = c("resp"), all.y = T)-> temp
colnames(temp)[4] <- 'fribbleSelected'
colnames(temp)[9] <- 'frequency'
temp$frequency.x <-NULL #this is a duplicate
temp -> labPic; rm(temp);
labPic$resp <- as.factor(labPic$resp)
labPic$frequency <- as.factor(labPic$frequency)
labPic$fribbleSelected <- as.factor(labPic$fribbleSelected)
labPic$subjID <- as.factor(labPic$subjID)
#------------- accuracy ----------------#
ifelse(labPic$fribbleSelected == labPic$labelPresented,1,0)-> labPic$acc
aggregate(acc ~ frequency + subjID, data = labPic, mean)
# coding of the type of response
labPic$resp <- as.character(labPic$resp)
labPic$resp[is.na(labPic$resp)] <- "missing"
labPic$resp <- as.factor(labPic$resp)
labPic$type_of_resp <- c("responses") # if I made the column correctly, then we shouldn't find any row names "responses" left.
labPic[labPic$resp=="missing",]$type_of_resp <- "timedOut"
#-------------------control-----------------------------#
labPic[na.omit(labPic$labelPresented=="bim" & labPic$fribbleSelected == "bim"),]$type_of_resp <- c("match")
labPic[na.omit(labPic$labelPresented=="bim" & labPic$fribbleSelected != "bim"),]$type_of_resp <- c("errorControl")
# ------------------correct-----------------------------#
labPic[na.omit(labPic$labelPresented=="tob" & labPic$fribbleSelected == "tob"),]$type_of_resp <- c("match")
labPic[na.omit(labPic$labelPresented=="wug" & labPic$fribbleSelected == "wug"),]$type_of_resp <- c("match")
labPic[na.omit(labPic$labelPresented=="dep" & labPic$fribbleSelected == "dep"),]$type_of_resp <- c("match")
# ------------------mismatch-type1 ---------------------#
#dep
labPic[na.omit(labPic$labelPresented=="tob" & labPic$frequency=="low" & labPic$fribbleSelected == "dep"),]$type_of_resp <- c("mismatch-type1")
labPic[na.omit(labPic$labelPresented=="wug" & labPic$frequency=="high" & labPic$fribbleSelected == "dep"),]$type_of_resp <- c("mismatch-type1")
#wug
labPic[na.omit(labPic$labelPresented=="dep" & labPic$frequency=="low" & labPic$fribbleSelected == "wug"),]$type_of_resp <- c("mismatch-type1")
labPic[na.omit(labPic$labelPresented=="tob" & labPic$frequency=="high" & labPic$fribbleSelected == "wug"),]$type_of_resp <- c("mismatch-type1")
#tob
labPic[na.omit(labPic$labelPresented=="wug" & labPic$frequency=="low" & labPic$fribbleSelected == "tob"),]$type_of_resp <- c("mismatch-type1")
labPic[na.omit(labPic$labelPresented=="dep" & labPic$frequency=="high" & labPic$fribbleSelected == "tob"),]$type_of_resp <- c("mismatch-type1")
#-------------------mismatch-type2----------------------#
labPic[na.omit(labPic$labelPresented=="wug" & labPic$frequency=="high" & labPic$fribbleSelected == "tob"),]$type_of_resp <- c("mismatch-type2")
labPic[na.omit(labPic$labelPresented=="dep" & labPic$frequency=="high" & labPic$fribbleSelected == "wug"),]$type_of_resp <- c("mismatch-type2")
labPic[na.omit(labPic$labelPresented=="tob" & labPic$frequency=="high" & labPic$fribbleSelected == "dep"),]$type_of_resp <- c("mismatch-type2")
labPic[na.omit(labPic$labelPresented=="dep" & labPic$frequency=="low" & labPic$fribbleSelected == "tob"),]$type_of_resp <- c("mismatch-type2")
labPic[na.omit(labPic$labelPresented=="tob" & labPic$frequency=="low" & labPic$fribbleSelected == "wug"),]$type_of_resp <- c("mismatch-type2")
labPic[na.omit(labPic$labelPresented=="wug" & labPic$frequency=="low" & labPic$fribbleSelected == "dep"),]$type_of_resp <- c("mismatch-type2")
#----- (!) these are trials that were not supposed to be control trials, but participants nonetheless choose the control (!)
labPic[na.omit(labPic$labelPresented=="dep" & labPic$fribbleSelected == "bim" & labPic$frequency=="low"),]$type_of_resp <- c("errorControl-low")
labPic[na.omit(labPic$labelPresented=="tob" & labPic$fribbleSelected == "bim" & labPic$frequency=="low"),]$type_of_resp <- c("errorControl-low")
labPic[na.omit(labPic$labelPresented=="wug" & labPic$fribbleSelected == "bim" & labPic$frequency=="low"),]$type_of_resp <- c("errorControl-low")
labPic[na.omit(labPic$labelPresented=="dep" & labPic$fribbleSelected == "bim" & labPic$frequency=="high"),]$type_of_resp <- c("errorControl-high")
labPic[na.omit(labPic$labelPresented=="tob" & labPic$fribbleSelected == "bim" & labPic$frequency=="high"),]$type_of_resp <- c("errorControl-high")
labPic[na.omit(labPic$labelPresented=="wug" & labPic$fribbleSelected == "bim" & labPic$frequency=="high"),]$type_of_resp <- c("errorControl-high")
as.factor(labPic$type_of_resp)->labPic$type_of_resp
summary(labPic$type_of_resp) #no other response left,
labPic$expeType <- as.factor(expeType)
write.csv(labPic, paste0(output, "labPic.csv"), quote = F, row.names = F)
#### contingency ####
if (expeType == "pilot2"){
columnsIwantTokeep<- contingency[c('Task.Name','Participant.Private.ID', 'display','Trial.Number', 'fribbleID',
'Zone.Type', 'Response', 'labelPresented','frequency','Reaction.Time','trialType','list')]
rowsIwantTokeep <- c("response_slider_endValue")
contingency <- columnsIwantTokeep %>%
filter(Zone.Type %in% rowsIwantTokeep) %>%
rename(subjID = Participant.Private.ID,
task = Task.Name,
resp = Response,
rt = Reaction.Time,
trial = Trial.Number,
fribblePresented = fribbleID)
contingency$Zone.Type <- NULL; #we don't need these columns anymore
rm(rowsIwantTokeep, columnsIwantTokeep)
} else {
columnsIwantTokeep<- contingency[c('Task.Name','Participant.Private.ID', 'display','Trial.Number', 'fribbleID',
'Zone.Type', 'Response', 'labelPresented','frequency','Reaction.Time','trialType')]
rowsIwantTokeep <- c("response_slider_endValue")
contingency <- columnsIwantTokeep %>%
filter(Zone.Type %in% rowsIwantTokeep) %>%
rename(subjID = Participant.Private.ID,
task = Task.Name,
resp = Response,
rt = Reaction.Time,
trial = Trial.Number,
fribblePresented = fribbleID)
contingency$Zone.Type <- NULL; #we don't need these columns anymore
rm(rowsIwantTokeep, columnsIwantTokeep)
}
#----------------- clean the rows from CSS and HTML metadata ---------------#
contingency <- droplevels(contingency)
contingency$labelPresented <- gsub('<p style="font-size: 500%;">', "", contingency$labelPresented)
contingency$labelPresented <- gsub('</p>', "", contingency$labelPresented);
contingency$labelPresented <- gsub(' ', "", contingency$labelPresented);
as.factor(contingency$labelPresented)-> contingency$labelPresented
contingency$fribblePresented <- gsub('.jpg', "", contingency$fribblePresented)
as.factor(contingency$fribblePresented)-> contingency$fribblePresented
as.factor(contingency$subjID)-> contingency$subjID
as.factor(expeType)-> contingency$expeType
write.csv(contingency, paste0(output, "contingency.csv"), quote = F, row.names = F)
aggregate(resp ~ trialType + frequency + subjID, data = contingency, mean)
#### eye tracker data ####
df <- list.files(paste0(input,"eyetracker/")) #folder where I story my eyetracking data
df <- df[grepl("collection",df)] #let's take only the experimental trials
calb <- list.files(paste0(input,"eyetracker/")) #folder where I story my eyetracking data
calb <- calb[grepl("calibration",calb)] #let's take only the experimental trials
eyeData <- NULL
for (i in 1:length(df)){
gsub(".xlsx$", "", df[i]) -> id
readxl::read_xlsx(paste(input, "eyetracker/", df[i], sep = ""))-> temp
eyeData <- bind_rows(temp,eyeData)
};
rm(temp,df,i,id)
eyeData$zone_name <- as.factor(eyeData$zone_name)
calibrationData <- NULL
for (i in 1:length(calb)){
# gsub(".xlsx$", "", calb[i]) -> id
readxl::read_xlsx(paste(input, "eyetracker/", calb[i], sep = ""))-> temp
calibrationData <- rbind(temp,calibrationData)
};
rm(temp,calb,i,id)
#these are our region of interest
levels(eyeData$zone_name)
summary(eyeData)
unique(eyeData$participant_id)
# the eyetracking files and the spreadsheet loaded on Gorilla are meant to be linked by the column "spreadsheet_row"
# basically the eyetracking files point to the row of the spreadsheet
# Since we would like to be able to trace back what we're presenting, then we replace in the eyetracking masterfile (data) the following columns
# with the values listed in the spreadsheet file
# first I load the spreadsheet used in Gorilla with the list of trials:
spreadsheet <-read.csv(paste0(localDirectory,"stimuli/spreadsheet Gorilla/finalSpreadsheets/pilot1/spreadsheet.csv"), stringsAsFactors = T)
spreadsheet_list1 <-read.csv(paste0(localDirectory,"stimuli/spreadsheet Gorilla/finalSpreadsheets/learning_list1.csv"), stringsAsFactors = T)
spreadsheet_list2 <-read.csv(paste0(localDirectory,"stimuli/spreadsheet Gorilla/finalSpreadsheets/learning_list2.csv"), stringsAsFactors = T)
spreadsheet_list3 <-read.csv(paste0(localDirectory,"stimuli/spreadsheet Gorilla/finalSpreadsheets/learning_list3.csv"), stringsAsFactors = T)
unique(contingency[contingency$list==1,]$subjID) -> subjlist1
unique(contingency[contingency$list==2,]$subjID) -> subjlist2
unique(contingency[contingency$list==3,]$subjID) -> subjlist3
# the column spreadsheet_row contains numbers pointing to the row in the actual spreadsheet
head(eyeData$spreadsheet_row)
#we're going to take the fribble presented for that trial and strip away the '.jpg'
if (expeType == "pilot2"){
eyeData$target <- ""
eyeData$list <- 0
eyeData$position <- ""
eyeData$labelPresented <- ""
eyeData$frequency <- ""
eyeData[eyeData$participant_id %in% subjlist1,]$target <- gsub(".jpg$", "", spreadsheet_list1[(eyeData[eyeData$participant_id %in% subjlist1,]$spreadsheet_row),]$ID)
eyeData[eyeData$participant_id %in% subjlist1,]$list <- spreadsheet_list1[(eyeData[eyeData$participant_id %in% subjlist1,]$spreadsheet_row),]$list
eyeData[eyeData$participant_id %in% subjlist1,]$position <- spreadsheet_list1[(eyeData[eyeData$participant_id %in% subjlist1,]$spreadsheet_row),]$ANSWER
eyeData[eyeData$participant_id %in% subjlist2,]$target <- gsub(".jpg$", "", spreadsheet_list2[(eyeData[eyeData$participant_id %in% subjlist2,]$spreadsheet_row),]$ID)
eyeData[eyeData$participant_id %in% subjlist2,]$list <- spreadsheet_list2[(eyeData[eyeData$participant_id %in% subjlist2,]$spreadsheet_row),]$list
eyeData[eyeData$participant_id %in% subjlist2,]$position <- spreadsheet_list2[(eyeData[eyeData$participant_id %in% subjlist2,]$spreadsheet_row),]$ANSWER
eyeData[eyeData$participant_id %in% subjlist3,]$target <- gsub(".jpg$", "", spreadsheet_list3[(eyeData[eyeData$participant_id %in% subjlist3,]$spreadsheet_row),]$ID)
eyeData[eyeData$participant_id %in% subjlist3,]$list <- spreadsheet_list3[(eyeData[eyeData$participant_id %in% subjlist3,]$spreadsheet_row),]$list
eyeData[eyeData$participant_id %in% subjlist3,]$position <- spreadsheet_list3[(eyeData[eyeData$participant_id %in% subjlist3,]$spreadsheet_row),]$ANSWER
summary(as.factor(eyeData$target))
summary(as.factor(eyeData$list))
summary(as.factor(eyeData$position))
#same for the label presented
eyeData[eyeData$participant_id %in% subjlist1,]$labelPresented <- spreadsheet_list1[(eyeData[eyeData$participant_id %in% subjlist1,]$spreadsheet_row),]$label
eyeData[eyeData$participant_id %in% subjlist2,]$labelPresented <- spreadsheet_list2[(eyeData[eyeData$participant_id %in% subjlist2,]$spreadsheet_row),]$label
eyeData[eyeData$participant_id %in% subjlist3,]$labelPresented <- spreadsheet_list3[(eyeData[eyeData$participant_id %in% subjlist3,]$spreadsheet_row),]$label
summary(as.factor(eyeData$labelPresented))
# frequency
eyeData[eyeData$participant_id %in% subjlist1,]$frequency <- spreadsheet_list1[(eyeData[eyeData$participant_id %in% subjlist1,]$spreadsheet_row),]$frequency
eyeData[eyeData$participant_id %in% subjlist2,]$frequency <- spreadsheet_list2[(eyeData[eyeData$participant_id %in% subjlist2,]$spreadsheet_row),]$frequency
eyeData[eyeData$participant_id %in% subjlist3,]$frequency <- spreadsheet_list3[(eyeData[eyeData$participant_id %in% subjlist3,]$spreadsheet_row),]$frequency
} else {
eyeData$target <- ""
eyeData$position <- ""
eyeData$labelPresented <- ""
eyeData$frequency <- ""
eyeData$target <- gsub(".jpg$", "", spreadsheet[(eyeData$spreadsheet_row),]$ID)
eyeData$position <- spreadsheet[(eyeData$spreadsheet_row),]$ANSWER
#same for the label presented
eyeData$labelPresented <- spreadsheet[(eyeData$spreadsheet_row),]$label
# frequency
eyeData$frequency <- spreadsheet[(eyeData$spreadsheet_row),]$frequency
}
summary(as.factor(eyeData$target))
summary(as.factor(eyeData$position))
summary(as.factor(eyeData$labelPresented))
summary(as.factor(eyeData$frequency))
#this converts to factor everything that has been listed "as.character"
eyeData[sapply(eyeData, is.character)] <-
lapply(eyeData[sapply(eyeData, is.character)], as.factor)
eyeData <- droplevels(eyeData)
#check that you can make sense of all columns just by looking at the summary
summary(eyeData) #our masterfile with all the eyetracking data
# --------------- column selection --------------#
#select relevant columns and rows
if (expeType =="pilot2"){
eyeData_minimal <-
eyeData %>%
filter(type %in% "prediction") %>%
select(participant_id, filename, spreadsheet_row, time_elapsed, type,
screen_index, x_pred_normalised, y_pred_normalised,
target, labelPresented, frequency, list, position) %>%
rename(subjID = participant_id,
task = filename,
time = time_elapsed,
trial = spreadsheet_row,
x = x_pred_normalised,
y = y_pred_normalised)
eyeData_minimal <- droplevels(eyeData_minimal)
eyeData_minimal$subjID <- as.factor(eyeData_minimal$subjID)
} else {
eyeData_minimal <-
eyeData %>%
filter(type %in% "prediction") %>%
select(participant_id, filename, spreadsheet_row, time_elapsed, type,
screen_index, x_pred_normalised, y_pred_normalised,
target, labelPresented, frequency, position) %>%
rename(subjID = participant_id,
task = filename,
time = time_elapsed,
trial = spreadsheet_row,
x = x_pred_normalised,
y = y_pred_normalised)
eyeData_minimal <- droplevels(eyeData_minimal)
eyeData_minimal$subjID <- as.factor(eyeData_minimal$subjID)
}
summary(eyeData_minimal)
#ok now that we've got our eyetracking data, we need to know the areas of our ROI
#extract zone dimensions -- we need to know where we have presented our images
# in order to do so, we extract the info about the zone areas
zones <- eyeData[grepl("fribblezone|leftITI|centerITI|rightITI|leftLabel|rightLabel|centerLabel|buttonLeft|buttonCenter|buttonRight",
eyeData$zone_name),] #here we extract the zone infos
droplevels(zones)->zones
levels(zones$zone_name)
# -------------------------- FRIBBLE SCREEN ---------------------------#
orig_x_fribble <- zones[zones$zone_name=="fribblezone",]$zone_x_normalised[1]
orig_y_fribble <- zones[zones$zone_name=="fribblezone",]$zone_y_normalised[1]
width_fribble <- zones[zones$zone_name=="fribblezone",]$zone_width_normalised[1]
height_fribble <- zones[zones$zone_name=="fribblezone",]$zone_height_normalised[1]
x1_fribble<-orig_x_fribble
x2_fribble<-orig_x_fribble + (width_fribble)
y1_fribble<-orig_y_fribble
y2_fribble<-orig_y_fribble + (height_fribble)
# -------------------------- ITI BLANK SCREEN ---------------------------#
# center
orig_x_centerITI <- zones[zones$zone_name=="centerITI",]$zone_x_normalised[1]
orig_y_centerITI <- zones[zones$zone_name=="centerITI",]$zone_y_normalised[1]
width_centerITI <- zones[zones$zone_name=="centerITI" ,]$zone_width_normalised[1]
height_centerITI <- zones[zones$zone_name=="centerITI",]$zone_height_normalised[1]
x1_centerITI<-orig_x_centerITI
x2_centerITI<-orig_x_centerITI + (width_centerITI)
y1_centerITI<-orig_y_centerITI
y2_centerITI<-orig_y_centerITI + (height_centerITI)
#left
orig_x_leftITI <- zones[zones$zone_name=="leftITI",]$zone_x_normalised[1]
orig_y_leftITI <- zones[zones$zone_name=="leftITI",]$zone_y_normalised[1]
width_leftITI <- zones[zones$zone_name=="leftITI" ,]$zone_width_normalised[1]
height_leftITI <- zones[zones$zone_name=="leftITI",]$zone_height_normalised[1]
x1_leftITI<-orig_x_leftITI
x2_leftITI<-orig_x_leftITI + (width_leftITI)
y1_leftITI<-orig_y_leftITI
y2_leftITI<-orig_y_leftITI + (height_leftITI)
#right
orig_x_rightITI <- zones[zones$zone_name=="rightITI",]$zone_x_normalised[1]
orig_y_rightITI <- zones[zones$zone_name=="rightITI",]$zone_y_normalised[1]
width_rightITI <- zones[zones$zone_name=="rightITI" ,]$zone_width_normalised[1]
height_rightITI <- zones[zones$zone_name=="rightITI",]$zone_height_normalised[1]
x1_rightITI<-orig_x_rightITI
x2_rightITI<-orig_x_rightITI + (width_rightITI)
y1_rightITI<-orig_y_rightITI
y2_rightITI<-orig_y_rightITI + (height_rightITI)
# -------------------------- LABEL SCREEN ---------------------------#
# center
orig_x_centerLabel <- zones[zones$zone_name=="centerLabel",]$zone_x_normalised[1]
orig_y_centerLabel <- zones[zones$zone_name=="centerLabel",]$zone_y_normalised[1]
width_centerLabel <- zones[zones$zone_name=="centerLabel" ,]$zone_width_normalised[1]
height_centerLabel <- zones[zones$zone_name=="centerLabel",]$zone_height_normalised[1]
x1_centerLabel<-orig_x_centerLabel
x2_centerLabel<-orig_x_centerLabel + (width_centerLabel)
y1_centerLabel<-orig_y_centerLabel
y2_centerLabel<-orig_y_centerLabel + (height_centerLabel)
#left
orig_x_leftLabel <- zones[zones$zone_name=="leftLabel",]$zone_x_normalised[1]
orig_y_leftLabel <- zones[zones$zone_name=="leftLabel",]$zone_y_normalised[1]
width_leftLabel <- zones[zones$zone_name=="leftLabel" ,]$zone_width_normalised[1]
height_leftLabel <- zones[zones$zone_name=="leftLabel",]$zone_height_normalised[1]
x1_leftLabel<-orig_x_leftLabel
x2_leftLabel<-orig_x_leftLabel + (width_leftLabel)
y1_leftLabel<-orig_y_leftLabel
y2_leftLabel<-orig_y_leftLabel + (height_leftLabel)
#right
orig_x_rightLabel <- zones[zones$zone_name=="rightLabel",]$zone_x_normalised[1]
orig_y_rightLabel <- zones[zones$zone_name=="rightLabel",]$zone_y_normalised[1]
width_rightLabel <- zones[zones$zone_name=="rightLabel" ,]$zone_width_normalised[1]
height_rightLabel <- zones[zones$zone_name=="rightLabel",]$zone_height_normalised[1]
x1_rightLabel<-orig_x_rightLabel
x2_rightLabel<-orig_x_rightLabel + (width_rightLabel)
y1_rightLabel<-orig_y_rightLabel
y2_rightLabel<-orig_y_rightLabel + (height_rightLabel)
# -------------------------- BUTTON SCREEN ---------------------------#
# center
orig_x_buttonCenter <- zones[zones$zone_name=="buttonCenter",]$zone_x_normalised[1]
orig_y_buttonCenter <- zones[zones$zone_name=="buttonCenter",]$zone_y_normalised[1]
width_buttonCenter <- zones[zones$zone_name=="buttonCenter" ,]$zone_width_normalised[1]
height_buttonCenter <- zones[zones$zone_name=="buttonCenter",]$zone_height_normalised[1]
x1_buttonCenter<-orig_x_buttonCenter
x2_buttonCenter<-orig_x_buttonCenter + (width_buttonCenter)
y1_buttonCenter<-orig_y_buttonCenter
y2_buttonCenter<-orig_y_buttonCenter + (height_buttonCenter)
#left
orig_x_buttonLeft <- zones[zones$zone_name=="buttonLeft",]$zone_x_normalised[1]
orig_y_buttonLeft <- zones[zones$zone_name=="buttonLeft",]$zone_y_normalised[1]
width_buttonLeft <- zones[zones$zone_name=="buttonLeft" ,]$zone_width_normalised[1]
height_buttonLeft <- zones[zones$zone_name=="buttonLeft",]$zone_height_normalised[1]
x1_buttonLeft<-orig_x_buttonLeft
x2_buttonLeft<-orig_x_buttonLeft + (width_buttonLeft)
y1_buttonLeft<-orig_y_buttonLeft
y2_buttonLeft<-orig_y_buttonLeft + (height_buttonLeft)
#right
orig_x_buttonRight <- zones[zones$zone_name=="buttonRight",]$zone_x_normalised[1]
orig_y_buttonRight <- zones[zones$zone_name=="buttonRight",]$zone_y_normalised[1]
width_buttonRight <- zones[zones$zone_name=="buttonRight" ,]$zone_width_normalised[1]
height_buttonRight <- zones[zones$zone_name=="buttonRight",]$zone_height_normalised[1]
x1_buttonRight<-orig_x_buttonRight
x2_buttonRight<-orig_x_buttonRight + (width_buttonRight)
y1_buttonRight<-orig_y_buttonRight
y2_buttonRight<-orig_y_buttonRight + (height_buttonRight)
# put all these variables in a dataframe for simplicity
ROIs <- data.frame(
x1 = c(x1_fribble, x1_centerITI, x1_leftITI, x1_rightITI, x1_centerLabel, x1_leftLabel, x1_rightLabel, x1_buttonCenter, x1_buttonLeft, x1_buttonRight),
x2 = c(x2_fribble, x2_centerITI, x2_leftITI, x2_rightITI, x2_centerLabel, x2_leftLabel, x2_rightLabel, x2_buttonCenter, x2_buttonLeft, x2_buttonRight),
y1 = c(y1_fribble, y1_centerITI, y1_leftITI, y1_rightITI, y1_centerLabel, y1_leftLabel, y1_rightLabel, y1_buttonCenter, y1_buttonLeft, y1_buttonRight),
y2 = c(y2_fribble, y2_centerITI, y2_leftITI, y2_rightITI, y2_centerLabel, y2_leftLabel, y2_rightLabel, y2_buttonCenter, y2_buttonLeft, y2_buttonRight),
ROI = c("fribble","cITI","lITI","rITI","cLabel","lLabel","rLabel","cButton","lButton","rButton"),
screen = c(2,3,3,3,4,4,4,5,5,5)
)
ROIs
write.csv(ROIs, paste0(output,"ROIs.csv"), row.names = F,quote=F)
write.csv(eyeData_minimal, paste0(output,"eyeTracker.csv"), row.names = F,quote=F)
|
21c44b5c9681a740d7c1e596425808bc6e829f42 | 700d8121a4e3a9fc4c31e015db643758cb843569 | /inst/registered/NCBI_assemblies/Pan_troglodytes.R | 6ff3b04b9fa495f8fdad88d8a95a79f249793f2f | [] | no_license | Bioconductor/GenomeInfoDb | 727c90f03c289f692999860a12077775f4d65317 | 9dba03f8d2a4f76732e2b12beac7c0ee3230a693 | refs/heads/devel | 2023-08-09T21:33:11.074781 | 2023-06-20T21:40:39 | 2023-06-20T21:40:39 | 102,149,975 | 14 | 15 | null | 2023-03-13T17:45:24 | 2017-09-01T20:19:20 | R | UTF-8 | R | false | false | 913 | r | Pan_troglodytes.R | ORGANISM <- "Pan troglodytes"
### List of assemblies by date.
ASSEMBLIES <- list(
list(assembly="Pan_troglodytes-2.1",
date="2006/03/16",
assembly_accession="GCF_000001515.3", # panTro2
circ_seqs="MT"),
## The sequence names in this one are seriously messed up!
list(assembly="Pan_troglodytes-2.1.3",
date="2010/11/15",
assembly_accession="GCA_000001515.3", # panTro3
circ_seqs=character(0)),
list(assembly="Pan_troglodytes-2.1.4",
date="2011/03/25",
assembly_accession="GCF_000001515.5", # panTro4
circ_seqs="MT"),
list(assembly="Pan_tro 3.0",
date="2016/05/03",
assembly_accession="GCF_000001515.7", # panTro5
circ_seqs="MT"),
list(assembly="Clint_PTRv2",
date="2018/01/19",
assembly_accession="GCA_002880755.3", # panTro6
circ_seqs="MT")
)
|
0ce5f0cfcf7f0fee0af8286d2e63534f50dc4277 | d99e52f963d0b553233eabd9f1103d4562f542f1 | /Common/Utilities.R | c2d8f59f6f73da63aa481a1333fd8cc2875f5911 | [] | no_license | bertcarnell/TrainingResults | c25bbfdc6166ed09e2f55bdc6a13fc8ab747baf8 | 3e3b4e242d9534788fcdc5a3fcd7935e347f3471 | refs/heads/master | 2016-09-10T10:07:44.376424 | 2013-11-29T01:50:02 | 2013-11-29T01:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,622 | r | Utilities.R | require(XML)
startToday <- as.numeric(strptime("00:00:00","%T"))
posixOrigin <- "1970-01-01 00:00:00"
timeHistogram <- function(times, indMe, title)
{
hist(times, breaks=50, freq=TRUE, col="blue", main=title,
xlab="Final Time", ylab="Frequency")
abline(v=times[indMe], col="red", lwd=2)
abline(v=median(times), col="green", lwd=1)
abline(v=mean(times), col="green", lwd=1, lty=2)
legend("topright",
legend=c(paste(round(ecdf(times)(times[indMe])*100, digits=0), "%",
strftime(times[indMe], format="%M:%S")),
"Median",
"Mean"),
bg="white", lty=c(1,1,2), lwd=c(2,1,1), col=c("red","green","green"))
}
timeBoxplot <- function(times, indMe, title)
{
boxplot(times, main=title, ylab="Final Time", axes=FALSE)
points(rep(1, length(times)), times, pch=1, cex=0.8)
points(1, mean(times), pch=19, cex=2, col="green")
points(1, times[indMe], pch=19, cex=2, col="red")
axis(1, at=1, labels="Overall")
axis(2, at=as.numeric(strptime(as.character(seq(15,50,by=5)), format="%M")),
labels=paste(seq(15,50,by=5), ":00", sep=""))
legend("topright",
legend=c(paste(round(ecdf(times)(times[indMe])*100, digits=0), "%",
strftime(times[indMe], format="%M:%S")),
"Median",
"Mean"),
bg="white",
pch=c(19,NA,19),
lty=c(NA,1,NA),
lwd=c(NA,3,NA),
col=c("red","black","green"))
}
timeHistogramBoxplot <- function(times, indMe, title, xTimes)
{
layout(matrix(c(1,2), nrow=2), heights=c(0.7, 0.3))
## Histogram
par(mar=c(0,4,2,2))
h <- hist(times, breaks=50, freq=TRUE, col="blue", main=title,
xlab="", ylab="Frequency", axes=FALSE)
axis(2)
abline(v=times[indMe], col="red", lwd=2)
abline(v=median(times), col="green", lwd=1)
abline(v=mean(times), col="green", lwd=1, lty=2)
legend("topright",
legend=c(paste(round(ecdf(times)(times[indMe])*100, digits=0), "%",
strftime(times[indMe], format="%M:%S")),
"Median",
"Mean"),
bg="white",
lty=c(1,1,2),
lwd=c(2,1,1),
col=c("red","green","green"))
## Boxplot
par(mar=c(5,4,0,2))
boxplot(times, main="", xlab="Final Time", axes=FALSE, horizontal=TRUE,
ylim=range(h$breaks))
points(rep(1, length(times)), times, pch=1, cex=0.8)
points(mean(times), 1, pch=19, cex=2, col="green")
points(times[indMe], 1, pch=19, cex=2, col="red")
axis(1, at=as.numeric(xTimes),
labels=format(xTimes, format="%H:%M"))
}
|
9db32da5313858091bcf3bfc6075a13ee608df1d | 9b1a760d45e21998b9d3871a1f4dac3a7a90c05a | /R/magclip.R | 062da2af3fd4be0a1c2d10e73802d70a325a8288 | [] | no_license | asgr/magicaxis | ac10b0b054128025976cb6b51003816cbd2157a9 | 0e3a56587021f8c22f86a3eda87907d8dfbe9e39 | refs/heads/master | 2023-06-21T11:28:06.031052 | 2023-06-19T06:30:03 | 2023-06-19T06:30:03 | 13,343,972 | 9 | 4 | null | 2020-10-22T07:14:05 | 2013-10-05T11:12:59 | R | UTF-8 | R | false | false | 1,723 | r | magclip.R | magclip=function(x, sigma='auto', clipiters=5, sigmasel=1, estimate='both', extra=TRUE){
if(extra){
xord=order(x)
sel = is.finite(x[xord])
clipx=x[xord][sel]
}else{
sel = is.finite(x)
clipx=sort(x[sel])
}
if(clipiters>0 & length(clipx)>0){
newlen=length(clipx)
sigcut=pnorm(sigmasel)
for(i in 1:clipiters){
if(newlen<=1){break}
oldlen=newlen
roughmed=clipx[newlen/2]
if(sigma=='auto'){
clipsigma=qnorm(1-2/max(newlen,2,na.rm=TRUE))
}else{
clipsigma=sigma
}
if(estimate=='both'){
#vallims=clipsigma*diff(quantile(clipx,c(1-sigcut,sigcut)))/2/sigmasel
vallims=clipsigma*(clipx[sigcut*newlen]-clipx[(1-sigcut)*newlen])/2/sigmasel
}
if(estimate=='lo'){
#vallims=clipsigma*(roughmed-quantile(clipx,1-sigcut))/sigmasel
vallims=clipsigma*(roughmed-clipx[(1-sigcut)*newlen])/sigmasel
}
if(estimate=='hi'){
#vallims=clipsigma*(quantile(clipx,sigcut)-roughmed)/sigmasel
vallims=clipsigma*(clipx[sigcut*newlen]-roughmed)/sigmasel
}
if(extra){
cliplogic=x[xord]>=(roughmed-vallims) & x[xord]<=(roughmed+vallims) & sel
clipx=x[xord][which(cliplogic)]
newlen=length(clipx)
}else{
clipx=clipx[clipx>=(roughmed-vallims) & clipx<=(roughmed+vallims)]
newlen=length(clipx)
}
if(oldlen==newlen){break}
}
}else{
clipx=x
if(extra){
cliplogic=TRUE
}
}
if(extra & length(clipx)>0){
cliplogic[xord]=cliplogic
range=range(clipx, na.rm = FALSE)
}else{
i=0
cliplogic=NA
range=NA
}
invisible(list(x=clipx, clip=cliplogic, range=range, clipiters=i))
} |
279e662c430c83ebb05c9177c21c75775d27c167 | e195ea7aea1de19f148ede1d5e664b10d4353a9a | /extra_scripts/FCM_diversity_Lakes.R | 64d1ebbd773f178142427545ff9112268bc8d0fd | [] | no_license | DenefLab/EnvMicro_Props2017 | 08778a268e4f54c319687f4ce4369273b7cfb33b | 1667d94e770ba540b6e21212193960fba61ae359 | refs/heads/master | 2021-07-12T10:39:58.688230 | 2017-10-13T11:47:26 | 2017-10-13T11:47:26 | 106,823,385 | 1 | 0 | null | 2017-10-13T12:47:26 | 2017-10-13T12:47:26 | null | UTF-8 | R | false | false | 3,123 | r | FCM_diversity_Lakes.R | library("Phenoflow")
library("dplyr")
### Output files will be stored in this directory
path = c("/data_reference/FCM_MI")
### Import .fcs data
### Samples are automatically sorted according to name...
flowData <- read.flowSet(path = path,
transformation = FALSE, pattern=".fcs")
### Select parameters (standard: two scatters and two FL) and
### Transform data using the inverse hyperbolic sine
flowData_transformed <- transform(flowData,`FL1-H`=asinh(`FL1-H`),
`SSC-H`=asinh(`SSC-H`),
`FL3-H`=asinh(`FL3-H`),
`FSC-H`=asinh(`FSC-H`))
param=c("FL1-H", "FL3-H","SSC-H","FSC-H")
flowData_transformed = flowData_transformed[,param]
remove(flowData)
### Create a PolygonGate for extracting the single-cell information
### Input coordinates for gate in sqrcut1 in format: c(x,x,x,x,y,y,y,y)
sqrcut1 <- matrix(c(8.5,8.5,15,15,3,8,14,3),ncol=2, nrow=4)
colnames(sqrcut1) <- c("FL1-H","FL3-H")
polyGate1 <- polygonGate(.gate=sqrcut1, filterId = "Total Cells")
### Gating quality check
xyplot(`FL3-H` ~ `FL1-H`, data=flowData_transformed[100], filter=polyGate1,
scales=list(y=list(limits=c(0,15)),
x=list(limits=c(6,15))),
axis = axis.default, nbin=125,
par.strip.text=list(col="white", font=2, cex=2), smooth=FALSE,xbins=750)
### Isolate only the cellular information based on the polyGate1
flowData_transformed <- flowCore::Subset(flowData_transformed, polyGate1)
### Normalize data between [0,1] on average,
### this is required for using the bw=0.01 in the fingerprint calculation
summary <- fsApply(x=flowData_transformed,FUN=function(x) apply(x,2,max),use.exprs=TRUE)
max = mean(summary[,1])
mytrans <- function(x) x/max
flowData_transformed <- transform(flowData_transformed,`FL1-H`=mytrans(`FL1-H`),
`FL3-H`=mytrans(`FL3-H`),
`SSC-H`=mytrans(`SSC-H`),
`FSC-H`=mytrans(`FSC-H`))
### optional resample
### Calculate phenotypic diversity
Diversity.Accuri <- Phenoflow::Diversity_rf(flowData_transformed, d=3, R=100,
param=param)
### Count nr of cells
sqrcut1 <- matrix(c(8.5,8.5,15,15,3,8,14,3)/max,ncol=2, nrow=4)
colnames(sqrcut1) <- c("FL1-H","FL3-H")
polyGate1 <- polygonGate(.gate=sqrcut1, filterId = "Total Cells")
s <- flowCore::filter(flowData_transformed, polyGate1)
TotalCount <- summary(s);TotalCount <- toTable(TotalCount)
### Extract the volumes
vol.temp<-c()
for(i in 1:length(flowData_transformed)){
vol.temp[i] <- as.numeric(flowData_transformed[[i]]@description$`$VOL`)/1000
}
### Make count dataframe
### Counts in cells per µL
Counts.Accuri <- data.frame(Samples=flowData_transformed@phenoData@data$name,
counts = TotalCount$true, volume=vol.temp)
### Merge counts/diversity
tmp <- inner_join(Diversity.Accuri, Counts.Accuri, by=c("Sample_names"="Samples"))
### Write to file
write.csv2(results,file="files/Lakes_diversityFCM_F.csv")
|
6ea0c7b76774d5357ccb23094c36fadbd26dc344 | 2c2e4085536b36157ac569bc606ce5b42328fad1 | /DE_Chlamy.R | b94a32aad3c4ab7001e20b1c5fd15399d29627f7 | [] | no_license | jpimentabernardes/Transcriptome_Trade-offs | 4d69f4f395cfc843720794547431b59bbdbd5ea9 | 2ece0686f72fc92d0610cdfbe5f3912ddeb8a478 | refs/heads/main | 2023-04-10T04:09:06.135552 | 2021-04-23T08:17:12 | 2021-04-23T08:17:12 | 358,596,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,394 | r | DE_Chlamy.R | library(DESeq2)
library(ggplot2)
library(RColorBrewer)
setwd("~/Documents/MPI/Dynamics/manuscript_multi/Publication_2021/Chlamy_Project_Feb2021/")
Count<-read.csv("TableS2_RawCounts_Average.csv", row.names = 1, sep = ";")
col_data= read.csv("Infotable_Chlamy.csv", sep=";")
count_data=Count[, as.character(col_data$Sample)]
#DESeq norm
dds_counts=DESeqDataSetFromMatrix(countData = count_data, colData =col_data, design = ~ Sample )
dds_counts=dds_counts[ rowSums(counts(dds_counts)) > 1, ]
dds_counts=estimateSizeFactors(dds_counts)
normalized_counts <- counts(dds_counts, normalized=TRUE)
rlog_counts <- rlog(dds_counts, blind = TRUE)
rlog.norm.counts <- assay(rlog_counts)
pc <-prcomp(t(rlog.norm.counts))
pc_df <- as.data.frame(pc$x)
#rownames(Infotable) <- Infotable$Library.Name
pc_df$Sample <- col_data$Sample
pc_df$Predation <- col_data$Predation
pc_df$Treatment <- col_data$Treatment
pdf("pca_plot_new.pdf", width = 10, height = 8)
eigs <- pc$sdev^2
eigs[1] / sum(eigs)
eigs[2] / sum(eigs)
P <- ggplot(data = pc_df, mapping = aes(x=PC1, y=PC2, color=Predation, shape=Treatment )) +
geom_point(size=4) + geom_text(aes(label=Treatment), nudge_y = -5000) + xlab("PC1 (45.29% variance)") +
ylab("PC2 (27.22% variance)")
P <- P + scale_color_manual(values = c("deepskyblue","darkorange1", "darkolivegreen"))
P <- P + scale_size_area(max_size=4)
P <- P + scale_x_continuous(limits=c(-70000, 70000))
P <- P + theme(axis.text = element_text(size = 20), axis.title = element_text(size = 22)) + theme_bw()
P
dev.off()
#Differential expression with DESeq2
#Subset accordingly
col_data1= subset(col_data, col_data$Predation %in% c('Predation'))
count_data1=count_data[, as.character(col_data1$Sample)]
col_data2= subset(col_data, col_data$Treatment %in% c('Rotifer', 'Nitrogen'))
count_data2=count_data[, as.character(col_data2$Sample)]
dds_counts=DESeqDataSetFromMatrix(countData = count_data2, colData =col_data2, design = ~ Treatment)
dds_counts=dds_counts[ rowSums(counts(dds_counts)) > 1, ]
dds_counts=estimateSizeFactors(dds_counts)
dds_norm=DESeq(dds_counts)
dds_normr=results(dds_norm)
table(dds_normr$padj < 0.05)
write.csv(DifExp3, file="DifExpression.csv")
# Heatmap
data<- read.csv('DifExpression.csv')
data2<- subset(data, data$padj < 0.05)
Down <- subset(data2, data2$log2FoldChange<0)
Up <-subset(data2, data2$log2FoldChange>0)
norm=t(rlog.norm.counts)
count_data1=norm[, as.character(data2$X)]
HM=t(count_data1)
data_distance=as.dist(1-cor(t(HM),method='spearman'))
data_hclust=hclust(data_distance)
AveR=HM[c(data_hclust$order),]
condition_colors <- c(rep("deepskyblue", 3), rep("darkorange1", 3))
pdf("HeatMap_Predation.pdf", width = 10, height = 8)
heatmap.2(as.matrix(AveR),
Rowv = NA, Colv = NA,
dendrogram = 'none',
ColSideColors = condition_colors,
cexRow=0.4, cexCol =1,
col=rev(brewer.pal(11,"RdBu")),
scale='row', trace='none',
labCol=c("Control", "Rotifer", "Nitrogen", "Control", "Rotifer", "Nitrogen"),
density.info=c("none"),
margin=c(5,5),
lhei = c(1,5))
dev.off()
#Table for publishing
Norm<-as.data.frame(rlog.norm.counts)
x<-Norm[,1:3]
Norm$AvePredation<-rowMeans(x)
x<-Norm[,4:6]
Norm$AveNoPredation<-rowMeans(x)
test<-c('MV3', 'MV6')
x<-Norm[test]
Norm$AveNitrogen<-rowMeans(x)
write.csv(Norm, file="TableS3_NormCounts_Average.csv")
|
8e15f6cf2bfaac696a977e1d424e6c9322d2f6d8 | a2019b01eb114767fe19422ecbe3400233284cc3 | /R/modules.R | e0bb2bc3607358f00671d9d0ec0d6eb585569a47 | [
"MIT"
] | permissive | sjspielman/types.of.plots | bfdf760cdaa12cef8583152ce0a726abbfaec12f | 3f21555a061e6020b804931f91bf577a38d02598 | refs/heads/main | 2023-07-18T09:19:10.626635 | 2021-08-27T14:48:48 | 2021-08-27T14:48:48 | 399,623,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,912 | r | modules.R | #' Select colors in UI
#' @import shiny
color_module_ui <- function(id, label = "Color all by same color or based on category?" ) {
ns <- NS(id)
tagList(
selectInput(ns("color_style"), label = label,
choices = color_choices # Single color
),
conditionalPanel("input.color_style == 'Single color'", ns = ns,
{
colourpicker::colourInput(ns("single_color"),
"Choose color:",
default_color)
}
)
)
}
#' Select colors in server
#' @import shiny
color_module_server <- function(id) {
shiny::moduleServer(
id,
function(input, output, session) {
shiny::reactive({
list(
color_style = input$color_style,
single_color = input$single_color
)
})
}
)
}
#' Display the plotting code in UI
#' @import shiny
display_plot_code_module_ui <- function(id, width = "600px", height = "400px"){
ns <- NS(id)
tagList(
fluidRow(
column(1,shinyWidgets::dropdownButton(
h3("Code:"),
verbatimTextOutput(ns("plot_code")),
circle = FALSE, status = "warning",
icon = icon("gear"), width = "600px"
)),
column(11, plotOutput(ns("plot"), width = width, height = height))
), # fluidRow
br()
)
}
#' Display the plotting code in server
#' @import shiny
display_plot_code_module_server <- function(id, plot_string)
{
## MUST REFER TO plot_string as plot_string() (not in function definition, but in body)
# Otherwise module won't be reactive.
shiny::moduleServer(
id,
function(input, output, session) {
output$plot <- shiny::renderPlot({
eval(parse(text = plot_string()))
})
output$plot_code <- shiny::renderText({plot_string()})
}
)
}
|
c4d6cb62eb892914fd3a10d3d19eb18bc0c54907 | 3774149a542831968202c6d3006b7ae164dd00be | /randomForest_AV.R | 7e4b3c63030c5c66ae829f3bba3110d01f298466 | [] | no_license | SAICHARAN-J/Loan_Default_Prediction | 9ecc9c405af2a72eb1eb0204432cb1c021923924 | 56cbbf7f2377500ca06767aa2c133f84ad35c34a | refs/heads/master | 2021-01-18T07:48:40.882824 | 2017-08-15T07:52:49 | 2017-08-15T07:52:49 | 100,353,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,883 | r | randomForest_AV.R | #Reading the training_set
dataset <- read.csv("train.csv")
#Factor Variables
dataset$Married <- factor(dataset$Married, levels = c("Yes","No"),labels = c(1,0))
dataset$Education <- factor(dataset$Education, levels = c("Graduate","Not Graduate"), labels = c(1,0))
dataset$Loan_Amount_Term <- as.factor(dataset$Loan_Amount_Term)
dataset$Credit_History <- as.factor(dataset$Credit_History)
dataset$Property_Area <- factor(dataset$Property_Area, levels = c("Urban","Rural","Semiurban"), labels = c(0,1,2))
dataset$Loan_Status <- factor(dataset$Loan_Status, levels = c("Y","N"), labels = c(1,0))
write.csv(dataset,"dataset_factored.csv")
#Analysis
colSums(is.na(dataset))
table(dataset$Gender, dataset$Loan_Status)
table(dataset$Education, dataset$Loan_Status)
#Mode parameter to Married column missing values.
married_table <- as.data.frame(table(dataset$Married))
dataset[is.na(dataset$Married),]$Married <- married_table[which.max(married_table$Freq),]$Var1
#Missing values in Credit_history
table(dataset$Credit_History,dataset$Loan_Status)
dataset[is.na(dataset$Credit_History),]$Credit_History <- 1
#Checking Credibility
#Checking Co-Relation
cor.test((as.numeric(as.character(dataset$Credit_History))),as.numeric(as.character(dataset$Loan_Status)))
ggplot(dataset[!is.na(dataset$LoanAmount),], aes(x = sort(dataset[!is.na(dataset$LoanAmount),]$ApplicantIncome), y = sort(dataset[!is.na(dataset$LoanAmount),]$LoanAmount))) + geom_point()
cor.test(dataset$ApplicantIncome,dataset$LoanAmount)
#Polynomial Regression for Missing Values - Loan Amount
poly <- data.frame(LoanAmount = dataset$LoanAmount, Income = dataset$ApplicantIncome)
poly$appincome4 = poly$Income ^ 4
poly$appincome5 = poly$Income ^ 5
poly$appincome6 = poly$Income ^ 6
train <- poly[!is.na(poly$LoanAmount),]
test <- poly[is.na(poly$LoanAmount),]
regressor <- lm(LoanAmount ~ . , data = poly)
summary(regressor)
prediction <- predict(regressor, test[c(-1)])
prediction <- round(prediction)
test$LoanAmount <- prediction
#fitting missing loan amount values to the dataset
dataset[is.na(dataset$LoanAmount),]$LoanAmount <- prediction
Loan_Term_Table <- as.data.frame(table(dataset$Loan_Amount_Term))
dataset[is.na(dataset$Loan_Amount_Term),]$Loan_Amount_Term <- Loan_Term_Table[which.max(Loan_Term_Table$Freq),]$Var1
#------------------------------
data <- dataset
data$year <- (data$LoanAmount * 1000) / as.numeric(as.character(data$Loan_Amount_Term))
data$year <- data$year * 12
data$diff <- data$ApplicantIncome - data$year
data$diff_value <- ifelse(data$diff > 0 , 1 , 0)
data <- data[c(-1,-14,-15)]
data <- data[c(-3,-5,-9)]
data <- data[c(-1)]
classifier <- randomForest(x = data[,-8], y = data$Loan_Status, ntree = 80)
summary(classifier)
#------------------------------
dataset <- read.csv("test.csv")
#Factor Variables
dataset$Married <- factor(dataset$Married, levels = c("Yes","No"),labels = c(1,0))
dataset$Education <- factor(dataset$Education, levels = c("Graduate","Not Graduate"), labels = c(1,0))
dataset$Loan_Amount_Term <- as.factor(dataset$Loan_Amount_Term)
dataset$Credit_History <- as.factor(dataset$Credit_History)
dataset$Property_Area <- factor(dataset$Property_Area, levels = c("Urban","Rural","Semiurban"), labels = c(0,1,2))
write.csv(dataset,"dataset_factored_test.csv")
#Analysis
colSums(is.na(dataset))
dataset[is.na(dataset$Credit_History),]$Credit_History <- 1
#Polynomial Regression for Missing Values - Loan Amount
poly <- data.frame(LoanAmount = dataset$LoanAmount, Income = dataset$ApplicantIncome)
poly$appincome4 = poly$Income ^ 4
poly$appincome5 = poly$Income ^ 5
poly$appincome6 = poly$Income ^ 6
train <- poly[!is.na(poly$LoanAmount),]
test <- poly[is.na(poly$LoanAmount),]
regressor <- lm(LoanAmount ~ . , data = poly)
summary(regressor)
prediction <- predict(regressor, test[c(-1)])
prediction <- round(prediction)
test$LoanAmount <- prediction
#fitting missing loan amount values to the dataset
dataset[is.na(dataset$LoanAmount),]$LoanAmount <- prediction
Loan_Term_Table <- as.data.frame(table(dataset$Loan_Amount_Term))
dataset[is.na(dataset$Loan_Amount_Term),]$Loan_Amount_Term <- Loan_Term_Table[which.max(Loan_Term_Table$Freq),]$Var1
#------------------------------
data <- dataset
data$year <- (data$LoanAmount * 1000) / as.numeric(as.character(data$Loan_Amount_Term))
data$year <- data$year * 12
data$diff <- data$ApplicantIncome - data$year
data$diff_value <- ifelse(data$diff > 0 , 1 , 0)
data <- data[c(-1,-13,-14)]
data <- data[c(-3,-5,-9)]
data <- data[c(-1)]
y_pred <- predict(classifier, newdata = data)
y_pred <- ifelse(as.integer(as.character(y_pred)) == 0, "N","Y")
table(y_pred)
y_pred
av <- data.frame(Loan_ID = dataset$Loan_ID, Loan_Status = y_pred)
write.csv(av,"av_rr.csv",row.names = F)
|
b56270ccde5488fd0d523267bc5623bcc81574d3 | ed16bfed5c94e8e4b8ddf1e106b3a85358a37099 | /scratch.R | 3d156cb17d125d71ec73b7cef137c6ca4b550ff9 | [] | no_license | andyprice2/10-08-19 | d2b074ca6be6cb1494e93fcf4185a2b573e846a5 | 00fa1e03e66ef31d08afc6361f85605ab6bcc15e | refs/heads/master | 2020-08-08T03:09:12.322920 | 2019-10-10T16:46:03 | 2019-10-10T16:46:03 | 213,690,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,139 | r | scratch.R | library(tidyverse)
dice <- function() {
sample(1:6, size = 1, replace = TRUE)
}
twicedice <- function(n_pairs = 2) {
results <- vector(mode = "integer", length = n_pairs)
for (i in 1:n_pairs) {
results[i] <- (dice() + dice())
}
print(results)
}
mapdice <- function(n_pairs = 2) {
results <- vector(mode = "integer", length = n_pairs)
map_int(results, dice)
print(results)
}
x <- tibble(rolls = twicedice(100000))
ggplot(x, aes(x = rolls)) +
geom_histogram()
roll_dice <- function(n = 1) {
map_int(1:n, ~ dice() + dice())
}
x <- tibble(rolls = roll_dice(100000))
x <- x %>%
mutate(include_7_or_11 = ifelse(rolls %in% c(7, 11), TRUE, FALSE)) %>%
summarize(prop)
list_col <- tibble(
replication = 1:100,
throws=map(1:100, ~ roll_dice(7))
)
with_7_11 <- list_col %>%
mutate(both_7_and_11 = ifelse(throws %>% c(7), TRUE, FALSE))
for
unlist(list_col[i, "throws"])
ggplot(x, aes(x = rolls)) +
geom_histogram()
props <- x %>%
count(rolls) %>%
mutate(prop = n / sum(n)) %>%
props() %>%
filter(rolls == "7") %>%
pull(prop) +
props %>%
filter(rolls == "11") %>%
pull(prop)
|
a9ee791f9376ec96d24da5f0349ab7030d62b120 | 254e700f6a6202e24a66a105cba814856e9e1b30 | /script_emisiones_finca.R | f08f3d2ee8a30b3758f6f9fe12f7649a24779cfd | [] | no_license | FAO-EC/Farm_livestock_direct_emissions_Ecuador | 94f67df0c80e68d64be9e5ceb620a6c2119c5bf4 | c6a84a352111425d2c69879a0898331700a58b51 | refs/heads/master | 2022-01-03T03:36:39.538648 | 2020-05-05T18:20:54 | 2020-05-05T18:20:54 | 250,617,146 | 0 | 1 | null | 2021-12-30T09:52:10 | 2020-03-27T18:42:32 | R | ISO-8859-1 | R | false | false | 51,789 | r | script_emisiones_finca.R | ## SCRIPT FOR EMISSIONS ESTIMATION
## FARM LEVEL
## GANADERIA CLIMATICAMENTE INTELIGENTE
## 2019
## ARMANDO RIVERA
## armando.d.rivera@outlook.com
## BASED ON
## GLEAM 2.0 (FEB. 2017)
## http://www.fao.org/gleam/resources/es/
## The script automate the formulas from
## the GLEAM model for cattle production
##
## The results show:
## production estimation in liters and kg of meat
## Direct emissions:
## CH4 (methane) emissions from enteric fermentation
## CH4 emissions from manure management
## N2O (nitrous oxide) emissions from manure management
## N2O emissions from manure in pastures
## The emissions are converted to CO2-eq
## INITIALS
## AF = ADULT FEMALES (VACAS)
## AM = ADULT MALES (TOROS)
## YF = YOUNG FEMALES (VACONAS)
## YM = YOUNG MALES (TORETES)
## MF = MEAT FEMALES (HEMBRAS DE CARNE)
## MM = MEAT MALES (MACHOS DE CARNE)
## OT = OTHER CATEGORIES ANIMALS
## (OTRAS CATEGORIAS DE ANIMALES
## FUERA DE LAS VACAS)
## Input data is the total number in
## one calendar year selected for the
## evaluation
## In case of weights and ages, it is
## the average in the calendar year.
########################################
## LIBRARIES
########################################
library(xlsx) ## EXCEL FILES MANAGMENT
library(leaflet) ## INTERACTIVE MAPS
library(dplyr) ## MATRIX MANAGMENT
library(raster)## RASTER MANAGMENT
library(rgdal) ## GEODATA MANAGMENT
########################################
##FUNCTIONS
########################################
## -------------------------------------
## DATA CLASSIFICATION
## -------------------------------------
## Classify a value (VALUE_REC) into 3
## categories (CLASS1, CLASS2 Y CLASS3).
## The limits for each category are
## MIN1 and MIN2
##
## If VALUE_REC is less than MIN1 = CLASS1
## If VALUE_REC is between MIN1 and MIN2 =
## CLASS2
## If VALUE_REC is bigger than MIN1 = CLASS3
reclass = function(value_rec,min1,min2,
class1,class2,class3){
if (min1 > value_rec){
new_class = class1
} else if (min1 <= value_rec & min2 >= value_rec){
new_class = class2
} else if (min2 < temp_resample){
new_class = class3
}
return(new_class)
}
## -------------------------------------
## EMISSIONS ESTIMATION
## -------------------------------------
## Compute emissions from cattle production
## based on the GLEAM model
##
## The results show:
## production estimation in liters and kg of meat
## Direct emissions:
## CH4 (methane) emissions from enteric fermentation
## CH4 emissions from manure management
## N2O (nitrous oxide) emissions from manure management
## N2O emissions from manure in pastures
## The emissions are converted to CO2-eq
farm_emissions = function(
## CSV FILES
## DIGESTIBILITY (PERCENTAGE)
## PROTEIN NITROGEN (gN/kg DRY MATTER)
## MIN = MINIMUM (LITERATURE REVIEW)
## MAX = MAXIMUM (LITERATURE REVIEW)
##
## IF LAB ANALYSIS IS USED, PUT THE
## SAME VALUE IN MAX AND MIN
main_pasture_list, #csv main pasture
mixture_pasture_list, #csv mixture pastures
cut_pasture_list, #csv cut pastures
diet_list, #csv diet supplements
## FARM DATA
farm_name, #string
year, #string
longitude, #float number
latitude, #float number
main_product, #string
## options: Leche, Carne
## Leche = milk, Carne = meat
## number in one year
## including death and sold animals
adult_females, #integer number
adult_females_milk, #integer number
## adult females producing milk
young_females, #integer number
female_calves, #integer number
adult_males, #integer number
young_males, #integer number
male_calves, #integer number
death_adult_females, #integer number
death_female_calves, #integer number
death_adult_males, #integer number
death_male_calves, #integer number
slaughtered_adult_females, #integer number
sold_adult_females, #integer number
slaughtered_adult_males, #integer number
sold_adult_males, #integer number
total_births, #integer number
age_first_calving_months, #float number
## average (kg)
adult_females_weight, #float number
female_calves_weight, #float number
adult_males_weight, #float number
male_calves_weight, #float number
slaughtered_young_females_weight, #float number
slaughtered_young_males_weight, #float number
milk_fat, #float number (percentage)
milk_protein, #float number (percentage)
milk_yield_liters_animal_day, #float number
lactancy_period_months, #float number
pasture_area_ha, #float number (hectares)
adult_females_feed_pasture_age,
other_categories_feed_pasture_age,
## options: 1, 2, 3
## 1 = 0 - 25 days
## 2 = 25 - 60 days
## 3 = more than 60 days
mixture_pasture_ha, #float number (hectares)
## daily kg of cut and carry pasture
adult_females_feed_cut_pasture_kg, #float number (kg)
other_categories_feed_cut_pasture_kg, #float number (kg)
productive_system, #string
## options: MARGINAL, MERCANTIL, COMBINADO, EMPRESARIAL
## MARGINAL = no technology in the farm, the livestock
## production is for family consumption
## MERCANTIL = no technology in the farm, the livestock
## production generates incomes.
## COMBINADO = semi-technical farm}, the livestock
## production generates income, labor is hired
## EMPRESARIAL = full technology in the farm, the livestock
## production goes to the industry or is exported
##
## MAGAP. (2008). Metodología de Valoración de
## Tierras Rurales
## Manure managment
## percentage of the manure on each system
## Check GLEAM for a description of each system
manure_in_pastures, #integer (percentage), no managment
manure_daily_spread, #integer (percentage)
manure_liquid_storage, #integer (percentage)
manure_compost, #integer (percentage)
manure_drylot, #integer (percentage)
manure_solid, #integer (percentage)
manure_anaerobic, #integer (percentage)
manure_uncoveredlagoon, #integer (percentage)
manure_burned #integer (percentage)
){
########################################
## GLEAM VARIABLES
########################################
AFC = age_first_calving_months/12 # age first calving in years
LACT_PER = lactancy_period_months*30.4 # lactancy period in days
## AFKG = adult female weight
## MFSKG = slaughtered young females weight
## -------------------------------------
## Restriction: If AFKG is less than MFSKG, then
## AFKG = slaughtered young females weight
## MFSKG = adult female weight
##
## It avoids that the weight of young females
## are bigger than adult females
## -------------------------------------
if( adult_females > 0 & young_females > 0 &
adult_females_weight < slaughtered_young_females_weight){
AFKG = slaughtered_young_females_weight #live weight of slaughtered young females
MFSKG = adult_females_weight #live weight of adult females
} else{
AFKG = adult_females_weight #live weight of adult females
MFSKG = slaughtered_young_females_weight #live weight of slaughtered young females
}
## AMKG = adult male weight
## MMSKG = slaughtered young males weight
## -------------------------------------
## Restriction: If AMKG is less than MMSKG, then
## AMKG = slaughtered young males weight
## MMSKG = adult male weight
##
## It avoids that the weight of young males
## are bigger than adult males
## -------------------------------------
if(adult_males > 0 & young_males > 0 &
adult_males_weight < slaughtered_young_males_weight){
AMKG = slaughtered_young_males_weight
MMSKG = adult_males_weight
} else{
AMKG = adult_males_weight
MMSKG = slaughtered_young_males_weight
}
## MILK FAT
## Default values per region in Ecuador
## AMAZONIA = 3.17
## COSTA = 3.98
## SIERRA = 3.72
MILK_FAT = milk_fat
## MILK PROTEIN
## Default values per region in Ecuador
## AMAZONIA = 2.91
## COSTA = 3.42
## SIERRA = 3.01
MILK_PROTEIN = milk_protein
MILK_YIELD = milk_yield_liters_animal_day
##Manure managment
MMSDRYLOT = manure_drylot
MMSSOLID = manure_solid
MMSANAEROBIC = manure_anaerobic
MMSUNCOVEREDLAGOON = manure_uncoveredlagoon
MMSBURNED = manure_burned
MMSCOMPOSTING = manure_compost
MMSDAILY = manure_daily_spread
MMSLIQUID = manure_liquid_storage
MMSPASTURE = manure_in_pastures
########################################
## HERD TRACK
########################################
## INITIALSL FROM GLEAM 2.0 (FEB. 2017)
## http://www.fao.org/gleam/resources/es/
## SEE PAGE 9 (GLEAM 2.0)
AF = adult_females
AM = adult_males
YF = young_females
YM = young_males
## SEE PAGE 12 (GLEAM 2.0)
DR1F = ifelse(female_calves == 0, 0, death_female_calves/
(female_calves + death_female_calves)*100) # death rate female calves
DR1M = ifelse(male_calves == 0, 0, death_male_calves/
(male_calves + death_male_calves)*100) # deatha rate male calves
DR2 = ifelse(AF == 0 & AM==0, 0,(death_adult_females + death_adult_males)/
(AF + AM + death_adult_females + death_adult_males +
slaughtered_adult_females + slaughtered_adult_males +
sold_adult_females + sold_adult_males)*100) # death rate adults
## Calves weight correction
## SEE PAGE 12 (GLEAM 2.0)
if(female_calves_weight == 0 & male_calves_weight > 0){
CKG = male_calves_weight
}
if(female_calves_weight > 0 & male_calves_weight == 0){
CKG = female_calves_weight
}
if(female_calves_weight > 0 & male_calves_weight > 0){
CKG = (female_calves_weight + male_calves_weight)/2
}
if(female_calves_weight == 0 & male_calves_weight == 0){
CKG = 0
}
## Rates
## SEE PAGE 12 (GLEAM 2.0)
FRRF = 95 # Rate of fertile replacement females, default value 95
RRF = ifelse(AF == 0, 0, (YF - death_adult_females - slaughtered_adult_females)/
(AF + death_adult_females + slaughtered_adult_females +
sold_adult_females) * 100) # Replacement rate adult females
## SEE PAGE 13 (GLEAM 2.0)
ERF = ifelse(AF == 0, 0, (slaughtered_adult_females + sold_adult_females)/
(AF + death_adult_females + slaughtered_adult_females +
sold_adult_females) * 100) # Exit rate adult females
ERM = ifelse(AM == 0, 0, (slaughtered_adult_males + sold_adult_males)/
(AM + death_adult_males + slaughtered_adult_males +
sold_adult_males) * 100) # Exit rate adult males
## Fertility rate
## For a dairy system, FR is associated to adult females milk
## For other systems, FR is associated to AF
if(main_product == "Leche"){
FR = ifelse(adult_females_milk == 0, 0,
ifelse(total_births > adult_females_milk, 100,
(total_births/adult_females_milk)*100))
} else {
FR = ifelse(AF == 0, 0,
ifelse(total_births > AF, 100,
(total_births/AF)*100))
}
## DIET SUPPLIES TYPES
## -------------------------------------
##
## DIGESTIBILITY OF FOOD
## (PORCENTAJE)
##
## PROTEIN CONTENT
## (gN/kg Dry matter)
## -------------------------------------
## SEE PAGE 52 (GLEAM 2.0)
## Digestible energy percentage
if(productive_system=="MARGINAL"){
DE_percentage = 45
} else if(productive_system=="MERCANTIL"){
DE_percentage = 50
} else if(productive_system=="COMBINADO"){
DE_percentage = 55
} else if(productive_system=="EMPRESARIAL"){
DE_percentage = 60
}
## estimated dietary net energy
if(productive_system=="MARGINAL"){
grow_nema = 3.5
} else if(productive_system=="MERCANTIL"){
grow_nema = 4.5
} else if(productive_system=="COMBINADO"){
grow_nema = 5.5
} else if(productive_system=="EMPRESARIAL"){
grow_nema = 6.5
}
## Estimation of dry matter intake for mature dairy cows
if(main_product == "Leche"){
DMI_AF = ((5.4*AFKG)/500)/((100-DE_percentage)/100)
}
## Estimation of dry matter intake for growing and finishing cattle
if(main_product == "Carne"){
DMI_AF = AFKG^0.75*((0.0119*grow_nema^2+0.1938)/grow_nema)
}
## Growing animals
DMI_YF = MFSKG^0.75*((0.2444*grow_nema-0.0111*grow_nema^2-0.472)/grow_nema)
DMI_YM = MMSKG^0.75*((0.2444*grow_nema-0.0111*grow_nema^2-0.472)/grow_nema)
DMI_female_calves = female_calves_weight^0.75*((0.2444*grow_nema-0.0111*grow_nema^2-0.472)/grow_nema)
DMI_male_calves = male_calves_weight^0.75*((0.2444*grow_nema-0.0111*grow_nema^2-0.472)/grow_nema)
## AM
DMI_AM=AMKG^0.75*((0.0119*grow_nema^2+0.1938)/grow_nema)
## Avergae OT (OTHER CATEGORIES NO AF)
DMI_OT = (DMI_female_calves+DMI_male_calves+DMI_YM+DMI_YF+DMI_AM)/5
## DRY MATTER DIET LIST
diet_list$ms_AF = diet_list$adult_female_feed_kg*(diet_list$dry_matter_percentage/100)
diet_list$ms_OT = diet_list$other_categories_feed_kg*(diet_list$dry_matter_percentage/100)
## DRY MATER PASTURES
## CUT AND TAKE PASTURES
ms_cut_pasture_AF = adult_females_feed_cut_pasture_kg * 0.2316
ms_cut_pasture_OT = other_categories_feed_cut_pasture_kg * 0.2316
cut_pasture_list$cut_d = cut_pasture_list$digestibility_percentage_max
cut_pasture_list$cut_n = cut_pasture_list$nitrogen_content_max
ms_cut_pasture_d = mean(cut_pasture_list$cut_d)
ms_cut_pasture_n = mean(cut_pasture_list$cut_n)
diet_list = rbind(diet_list, c(NA,NA,ms_cut_pasture_d,ms_cut_pasture_n,0,0,0,ms_cut_pasture_AF,ms_cut_pasture_OT))
ms_AF_total = sum(diet_list$ms_AF)
ms_OT_total = sum(diet_list$ms_OT)
DMI_AF_direct_pasture = ifelse((DMI_AF - ms_AF_total)<=0,0,DMI_AF - ms_AF_total)
DMI_OT_direct_pasture = ifelse((DMI_OT - ms_OT_total)<=0,0,DMI_OT - ms_OT_total)
## MIXTURE PASTURE FEEDING
mixture_pasture_percentage1 = ifelse(pasture_area_ha==0,0,mixture_pasture_ha/pasture_area_ha)
mixture_pasture_percentage = ifelse(mixture_pasture_percentage1>1,1,mixture_pasture_percentage1)
ms_mix_pasture_AF = DMI_AF_direct_pasture * mixture_pasture_percentage
ms_mix_pasture_OT = DMI_OT_direct_pasture * mixture_pasture_percentage
mixture_pasture_list$mix_d = mixture_pasture_list$digestibility_percentage_max
mixture_pasture_list$mezcla_n = mixture_pasture_list$nitrogen_content_max
ms_mix_pasture_d = mean(mixture_pasture_list$mix_d)
ms_mix_pasture_n = mean(mixture_pasture_list$mezcla_n)
diet_list = rbind(diet_list, c(NA,NA,ms_mix_pasture_d,ms_mix_pasture_n,0,0,0,ms_mix_pasture_AF,ms_mix_pasture_OT))
## DIRECT PASTURE FEEDING
ms_direct_pasture_AF = ifelse((DMI_AF_direct_pasture - ms_mix_pasture_AF)<=0,0,DMI_AF_direct_pasture - ms_mix_pasture_AF)
ms_direct_pasture_OT = ifelse((DMI_OT_direct_pasture - ms_mix_pasture_OT)<=0,0,DMI_OT_direct_pasture - ms_mix_pasture_OT)
ms_direct_pasture_digestibility_percentage_max = mean(main_pasture_list$digestibility_percentage_max)
ms_direct_pasture_digestibility_percentage_min = mean(main_pasture_list$digestibility_percentage_min)
ms_direct_pasture_nitrogen_content_max = mean(main_pasture_list$nitrogen_content_max)
ms_direct_pasture_nitrogen_content_min = mean(main_pasture_list$nitrogen_content_min)
if(adult_females_feed_pasture_age == 1){
ms_direct_pasture_AF_d = ms_direct_pasture_digestibility_percentage_max
ms_direct_pasture_AF_n = ms_direct_pasture_nitrogen_content_max
} else if(adult_females_feed_pasture_age == 2){
ms_direct_pasture_AF_d = (ms_direct_pasture_digestibility_percentage_max + ms_direct_pasture_digestibility_percentage_min) / 2
ms_direct_pasture_AF_n = (ms_direct_pasture_nitrogen_content_max + ms_direct_pasture_nitrogen_content_min) / 2
} else if(adult_females_feed_pasture_age == 3){
ms_direct_pasture_AF_d = ms_direct_pasture_digestibility_percentage_min
ms_direct_pasture_AF_n = ms_direct_pasture_nitrogen_content_min
}
if(other_categories_feed_pasture_age == 1){
ms_direct_pasture_OT_d = ms_direct_pasture_digestibility_percentage_max
ms_direct_pasture_OT_n = ms_direct_pasture_nitrogen_content_max
} else if(other_categories_feed_pasture_age == 2){
ms_direct_pasture_OT_d = (ms_direct_pasture_digestibility_percentage_max + ms_direct_pasture_digestibility_percentage_min) / 2
ms_direct_pasture_OT_n = (ms_direct_pasture_nitrogen_content_max + ms_direct_pasture_nitrogen_content_min) / 2
} else if(other_categories_feed_pasture_age == 3){
ms_direct_pasture_OT_d = ms_direct_pasture_digestibility_percentage_min
ms_direct_pasture_OT_n = ms_direct_pasture_nitrogen_content_min
}
diet_list = rbind(diet_list, c(NA,NA,ms_direct_pasture_AF_d,ms_direct_pasture_AF_n,0,0,0,ms_direct_pasture_AF,0))
diet_list = rbind(diet_list, c(NA,NA,ms_direct_pasture_OT_d,ms_direct_pasture_OT_n,0,0,0,0,ms_direct_pasture_OT))
if(sum(diet_list$ms_AF)==0){
diet_list$AF = 0
} else (
diet_list$AF = diet_list$ms_AF/sum(diet_list$ms_AF)*100
)
if(sum(diet_list$ms_OT)==0){
diet_list$OT = 0
} else (
diet_list$OT = diet_list$ms_OT/sum(diet_list$ms_OT)*100
)
## DIGESTIBILITY CALCULATION
## SEE PAGE 52 (GLEAM 2.0)
diet_list$AFLCIDE = diet_list$AF*diet_list$digestibility_percentage
diet_list$OTLCIDE = diet_list$OT*diet_list$digestibility_percentage
diet_list$AFLCIN = diet_list$AF*diet_list$nitrogen_content
diet_list$OTLCIN = diet_list$OT*diet_list$nitrogen_content
## FEED VARIABLES
## AVERAGE DIGESTIBILITY OF THE AF DIET
## (DIETDI)
AFLCIDE = ifelse(sum(diet_list$AFLCIDE)==0,1,sum(diet_list$AFLCIDE)/100)
## AVERAGE DIGESTIBILITY OF THE OT DIET
## (DIETDI)
OTLCIDE = ifelse(sum(diet_list$OTLCIDE)==0,1,sum(diet_list$OTLCIDE)/100)
## AVERAGE NITROGEN OF THE AF DIET
## (DIETNCONTENT)
AFLCIN = ifelse(sum(diet_list$AFLCIN)==0,1,sum(diet_list$AFLCIN)/100)
## AVERAGE NITROGEN OF THE OT DIET
##(DIETNCONTENT)
OTLCIN = ifelse(sum(diet_list$OTLCIN)==0,1,sum(diet_list$OTLCIN)/100)
## -------------------------------------
## HERD CALCULATIONS
## -------------------------------------
##
## 2.1.2.1 FEMALE SECTION
## SEE PAGE 13 (GLEAM 2.0)
AFIN = (RRF/ 100) * AF
AFX = AF * (DR2 / 100)
AFEXIT = AF * (ERF / 100)
CFIN = AF * ((1 - (DR2 / 100)) * (FR / 100) + (RRF / 100)) * 0.5 * (1 - (DR1F / 100))
RFIN = (AFIN / (FRRF/100)) / ((1 - (DR2 / 100))^AFC)
MFIN = CFIN - RFIN
RFIN = ifelse((MFIN < 0),RFIN+MFIN,RFIN)
MFIN = ifelse((MFIN < 0),0,MFIN)
RFEXIT = (((RRF / 100) * AF) / (FRRF/100)) - AFIN
RF = (RFIN + AFIN) / 2
ASF = ifelse(AFC == 0, 0, (MFSKG - CKG) / (AFKG - CKG) * AFC)
ASF1 = ifelse(ASF <= 0, 0, ASF) #####AUMENTAR2020
MFEXIT = MFIN * ((1 - (DR2 / 100))^ASF1) #####AUMENTAR2020
MF = (MFIN + MFEXIT) / 2
## 2.1.2.2 MALE SECTION
## SEE PAGE 14 (GLEAM 2.0)
AMX = AM * (DR2 / 100)
RRM = ifelse(AFC == 0, 0, 1 / AFC)
AMEXIT = AF * (ERM / 100) ##AMEXIT = (AM * RRM) - AMX ###AGREGAR
CMIN = AF * ((1 - (DR2 / 100)) * (FR / 100) + (RRF / 100)) * 0.5 * (1 - (DR1M / 100))
AMIN = ifelse(AFC == 0, 0, AM / AFC)
RMIN = AMIN / ((1 - (DR2 / 100))^AFC)
MMIN = CMIN - RMIN
RMIN = ifelse((MMIN < 0),RMIN+MMIN,RMIN)
MMIN = ifelse((MMIN < 0),0,MMIN)
RM = ((RMIN + AMIN) / 2)
ASM = ifelse(AFC == 0, 0, (MMSKG - CKG) / (AMKG - CKG) * AFC)
ASM1 = ifelse(ASM <= 0, 0, ASM) #####AUMENTAR2020
MMEXIT = MMIN * ((1 - (DR2 / 100))^ASM1)#####AUMENTAR2020
MM = (MMIN + MMEXIT) / 2
MILK_YIELD_KG = MILK_YIELD*1.032
## 2.1.2.5 WEIGHT SECTION
## SEE PAGE 16 (GLEAM 2.0)
MFKG = ifelse(MFSKG == 0, 0,(MFSKG - CKG) / 2 + CKG)
MMKG = ifelse(MMSKG == 0, 0,(MMSKG - CKG) / 2 + CKG)
RFKG = ifelse(AFKG == 0, 0,(AFKG - CKG) / 2 + CKG)
RMKG = ifelse(AMKG == 0, 0,(AMKG - CKG) / 2 + CKG)
GROWF = ifelse(AFC == 0, 0, (AFKG - CKG) / (AFC * 365))
GROWM = ifelse(AFC == 0, 0, (AMKG - CKG) / (AFC * 365))
GROWF = ifelse(GROWF < 0, 0, GROWF)
GROWM = ifelse(GROWM < 0, 0, GROWM)
## -------------------------------------
## HERD PROJECTION
## -------------------------------------
## NEGATIVE VALUES CORRECTION
RF = ifelse(RF<0, 0, RF)
RM = ifelse(RM<0, 0, RM)
MF = ifelse(MF<0, 0, MF)
MM = ifelse(MM<0, 0, MM)
## ANIMAL DISTRIBUTION ACCORDING REPORTED
## WEIGHT
## -------------------------------------
## THE PREVIOS CALCULATIONS MAKE A HERD
## PROJECTION.
## FOR THE CORRECTION
## IT IS ASSUMED THAT AN AFKG (AF WEIGHT)
## EQUAL TO ZERO, IMPLIES THAT THERE IS NO
## AF IN THE HERD AND NO REPLACEMENT
## ANIMALS. THEN, THE VALUE OF MF
## (MEAT FEMALE) AND RF (REEPLACEMENT FEMALES)
## ARE ASSIGNED TO MF
##
## IT IS ASSUMED THAT AN MFSKG (MEAT FEMALE
## WEIGHT) EQUAL TO ZERO, IMPLIES THAT
## THERE ARE NO MEAT ANIMALS. THEN THE VALUE
## OF MF (MEAT FEMALES) AND RF (REEPLACEMENT FEMALES)
## ARE ASSIGNED TO RF
## -------------------------------------
if(AFKG == 0 & MFSKG > 0){
MF = MF + RF
RF = 0
}
if (AFKG > 0 & MFSKG == 0){
RF = RF + MF
MF = 0
}
if(AFKG == 0 & MFSKG == 0){
MF = 0
RF = 0
}
if (AMKG == 0 & MMSKG > 0){
MM = MM + RM
RM = 0
}
if (AMKG > 0 & MMSKG == 0){
RM = RM + MM
MM = 0
}
if (AMKG == 0 & MMSKG == 0){
RM = 0
MM = 0
}
## CORRECTION WITH THE REAL NUMBER OF
## YOUNG ANIMALS REPORTED
## -------------------------------------
## THE INPUT DATA INCLUDE VALUES OF
## YOUNG FEMALES (YF) AND YOUNG MALES
## (YM). THE DISTRIBUTION OF RF, RM,
## MF, MM IS ASSIGNED TO THE SUM OF YF
## AND YM.
## THIS CALCULATION DETERMINES HOW MANY
## ANIMALS BELONG TO EACH CATEGORY
## OF THE YOUNG ANIMALS IN THE FARM.
## -------------------------------------
DAIRY = RF + RM + MF + MM
if(DAIRY == 0){
MF = YF
MM = YM
## MEAT ANIMALS EXIT
## SEE PAGE 14 (GLEAM 2.0)
MFEXIT1 = MF * ((1 - (DR2 / 100))^ASF)
MMEXIT1 = MM * ((1 - (DR2 / 100))^ASF)
} else {
MF = ifelse((RF+MF)==0, 0, MF * (YF+YM) / (DAIRY))
RF = ifelse((RF+MF)==0, 0, RF * (YF+YM) / (DAIRY))
MM = ifelse((RM+MM) ==0, 0, MM * (YF+YM) / (DAIRY))
RM = ifelse((RM+MM)==0, 0, RM * (YF+YM) / (DAIRY))
MFEXIT1 = ifelse((RF+MF)==0, 0, MFEXIT * (YF+YM) / (RM+MM+RF+MF))
MMEXIT1 = ifelse((RM+MM)==0, 0, MMEXIT * (YF+YM) / (RM+MM+RF+MF))
}
## REEPLACEMENT ANIMALS EXIT FOR MEAT
## SEE PAGE 14 (GLEAM 2.0)
RFEXIT1 = ifelse((RF+MF)==0, 0, RFEXIT * (YF+YM) / (RM+MM+RF+MF))
## ADULT ANIMALS EXIT FOR MEAT
AFEXIT1 = AFEXIT
AMEXIT1 = AMEXIT
## 9.1.1 MILK PRODUCTIONE
## LITERS
## SEE PAGE 99 (GLEAM 2.0)
Milk_production = MILK_YIELD * LACT_PER * AF
## 9.1.2 MEAT PRODUCTION
## KG CARCASS
## SEE PAGE 99 (GLEAM 2.0)
## MEAT OF GROWING FEMALE ANIMALS
AFEXITKG = ifelse(AFEXIT1 <= 0, 0, (AFEXIT1 * AFKG))
RFEXITKG = ifelse(RFEXIT1 <= 0, 0, (RFEXIT1 * RFKG))
Meat_production_FF = (AFEXITKG + RFEXITKG)*0.5 ##50% PESO A LA CANAL
##MEAT OF GROWING MALE ANIMALS
Meat_production_FM = ifelse(AMEXIT1 <= 0, 0, (AMEXIT1 * AMKG)*0.5 ) ##50% PESO A LA CANAL
##MEAT OF SLAUGHTERED YOUNG ANIMALS
MFEXITKG = ifelse(MFEXIT1 <= 0, 0, (MFEXIT1 * MFKG))
MMEXITKG = ifelse(MMEXIT1 <= 0, 0, (MMEXIT1 * MMKG))
Meat_production_M = (MFEXITKG + MMEXITKG)*0.5 ##50% PESO A LA CANAL
########################################
## SYSTEM TRACK
########################################
## INITIALS
## AF = ADULT FEMALES (VACAS)
## AFN = ADULT FEMALES NO MILK (VACAS
## SECAS)
## AFM = ADULT FEMALES MILK (VACAS
## EN PRODUCCION)
## AM = ADULT MALES (TOROS)
## YF = YOUNG FEMALES (VACONAS)
## YM = YOUNG MALES (TORETES)
## MF = MEAT FEMALES (HEMBRAS DE CARNE)
## MM = MEAT MALES (MACHOS DE CARNE)
## OT = OTHER ANIMALS (OTRAS CATEGORIAS
## DE ANIMALES FUERA DE LAS VACAS)
kg_variables = c("AF","AM","RF","RM","MM","MF")
for(tipo in kg_variables){
##-------------------------------------
## ENERGY
##-------------------------------------
## 3.5.1.1 MAINTENANCE
## SEE PAGE 54 (GLEAM 2.0)
# INPUT
CfL = 0.386
CfN = 0.322
CfB = 0.370
# CALCULATION
if (tipo == "AF"){
Cf = CfL
KG = AFKG
}
if (tipo == "AM"){
Cf = CfB
KG = AMKG
}
if (tipo == "MM"){
Cf = CfB
KG = MMKG
}
if (tipo == "RM"){
Cf = CfB * 0.974
KG = RMKG
}
if (tipo == "MF"){
Cf = CfN
KG = MFKG
}
if (tipo == "RF"){
Cf = CfN * 0.974
KG = RFKG
}
tipo_result = (KG ^ 0.75)*Cf
# OUTPUT
assign(paste(tipo,"NEMAIN", sep = ""), tipo_result)
## 3.5.1.7 PREGNANCY
## SEE PAGE 57 (GLEAM 2.0)
if (tipo == "AF" | tipo == "RF"){
# INPUT
Cp = 0.1
# CALCULATION
if (tipo == "AF"){
outNEMAIN = AFNEMAIN
NEPREG = outNEMAIN * Cp * FR / 100.0
}
if (tipo == "RF"){
outNEMAIN = RFNEMAIN
NEPREG = outNEMAIN * Cp* AFC / 2
}
# OUTPUT
assign(paste(tipo,"NEPREG", sep = ""), NEPREG)
}
## 3.5.1.3 GROWTH
## SEE PAGE 55 (GLEAM 2.0)
if (tipo == "RF" | tipo == "RM" | tipo == "MF" | tipo == "MM"){
# INPUT
CgF = 0.8
CgM = 1.2
CgC = 1.0 #for castrated animals
# CALCULATION
if (tipo == "RF"){
KG = RFKG
NEGRO = ifelse((CgF * AFKG)==0, 0, 22.02 * ((KG / (CgF * AFKG)) ^ 0.75) * (GROWF ^ 1.097)) ###### AUMENTAR
}
if (tipo == "MF"){
KG = MFKG
NEGRO = ifelse((CgF * AFKG)==0, 0, 22.02 * ((KG / (CgF * AFKG)) ^ 0.75) * (GROWF ^ 1.097)) ###### AUMENTAR
}
if (tipo == "RM"){
KG = RMKG
NEGRO = ifelse((CgF * AMKG)==0, 0, 22.02 * ((KG / (CgM * AMKG)) ^ 0.75) * (GROWM ^ 1.097)) ###### AUMENTAR
}
if (tipo == "MM"){
KG = MMKG
NEGRO = ifelse((CgF * AMKG)==0, 0, 22.02 * ((KG / (CgC * AMKG)) ^ 0.75) * (GROWM ^ 1.097)) ###### AUMENTAR
}
# OUTPUT
assign(paste(tipo,"NEGRO", sep = ""), NEGRO)
}
## 3.5.1.4 MILK PRODUCTION
## SEE PAGE 56 (GLEAM 2.0)
if (tipo == "AF"){
# CALCULATION
NEMILK = MILK_YIELD_KG * (MILK_FAT * 0.40 + 1.47)
# OUTPUT
assign(paste(tipo,"NEMILK", sep = ""), NEMILK)
}
## 3.5.1.2 ACTIVITY (GRAZING) RANGE = 1; GRAZE = 2
## SEE PAGE 55 (GLEAM 2.0)
# INPUT
MMSpast = MMSPASTURE
# CALCULATIONS
NEACT = tipo_result * (MMSpast * 0.36 / 100.0)
# OUTPUT
assign(paste(tipo,"NEACT", sep = ""), NEACT)
## 3.5.1.10 TOTAL ENERGY
## SEE PAGE 58 (GLEAM 2.0)
# INPUT GRID
# MAKES THE CALCULATIONS
if (tipo == "AF"){
NETOT1 = AFNEMAIN + AFNEACT + AFNEPREG + AFNEMILK
NETOT2 = AFNEMAIN + AFNEACT + AFNEPREG
# OUTPUT GRID
assign(paste(tipo,"MNETOT1", sep = ""), NETOT1)
assign(paste(tipo,"NNETOT1", sep = ""), NETOT2)
}
if (tipo == "RF"){
NETOT1 = RFNEMAIN + RFNEACT + RFNEPREG
# OUTPUT GRID
assign(paste(tipo,"NETOT1", sep = ""), NETOT1)
}
if (tipo == "AM"){
NETOT1 = AMNEMAIN + AMNEACT
# OUTPUT GRID
assign(paste(tipo,"NETOT1", sep = ""), NETOT1)
}
if (tipo == "RM"){
NETOT1 = RMNEMAIN + RMNEACT
# OUTPUT GRID
assign(paste(tipo,"NETOT1", sep = ""), NETOT1)
}
if (tipo == "MM"){
NETOT1 = MMNEMAIN + MMNEACT
# OUTPUT GRID
assign(paste(tipo,"NETOT1", sep = ""), NETOT1)
}
if (tipo == "MF"){
NETOT1 = MFNEMAIN + MFNEACT
# OUTPUT GRID
assign(paste(tipo,"NETOT1", sep = ""), NETOT1)
}
}
## 3.5.1.8 ENERGY RATIO FOR:
## MAINTENANCE (REM)
## GROWTH (REG)
## SEE PAGE 57 (GLEAM 2.0)
# INPUT
for (group in c("AF","OT")){
if (group == "AF"){
LCIDE = AFLCIDE
n = 1
}
if (group == "OT"){
LCIDE = OTLCIDE
n = 2
}
# CALCULATIONS
tmpREG = 1.164 - (0.00516 * LCIDE) + (0.00001308 * LCIDE * LCIDE) - (37.4 / LCIDE)
tmpREM = 1.123 - (0.004092 * LCIDE) + (0.00001126 * LCIDE * LCIDE) - (25.4 / LCIDE)
# OUTPUT
assign(paste("REG",n, sep = ""), tmpREG)
assign(paste("REM",n, sep = ""), tmpREM)
}
## 3.5.1.10 TOTAL ENERGY
## SEE PAGE 58 (GLEAM 2.0)
# INPUT
LCIDE1 = AFLCIDE
LCIDE2 = OTLCIDE
# CALCULATIONS & OUTPUT
AFMGE = (AFMNETOT1 / REM1) / (LCIDE1 / 100.0)
AFNGE = (AFNNETOT1 / REM1) / (LCIDE1 / 100.0)
RFGE = ((RFNETOT1 / REM2) + (RFNEGRO / REG2)) / (LCIDE2 / 100.0)
AMGE = (AMNETOT1 / REM2) / (LCIDE2 / 100.0)
RMGE = ((RMNETOT1 / REM2) + (RMNEGRO / REG2)) / (LCIDE2 / 100.0)
MMGE = ((MMNETOT1 / REM2) + (MMNEGRO / REG2)) / (LCIDE2 / 100.0)
MFGE = ((MFNETOT1 / REM2) + (MFNEGRO / REG2)) / (LCIDE2 / 100.0)
## FEED
## SEE PAGE 68 (GLEAM 2.0)
LCIGE = 18.45
AFMINTAKE = AFMGE / LCIGE
AFNINTAKE = AFNGE / LCIGE
RFINTAKE = RFGE / LCIGE
AMINTAKE = AMGE / LCIGE
RMINTAKE = RMGE / LCIGE
MMINTAKE = MMGE / LCIGE
MFINTAKE = MFGE / LCIGE
##-------------------------------------
## METHANE CH4 EMISSIONS
##-------------------------------------
## NUM = ANIMALS NUMBER
## 34 CONVERSION FACTOR CH4 TO CO2EQ
## SEE PAGE 100 (GLEAM 2.0)
## 4.2 FROM ENTERIC FERMENTATION
## SEE PAGE 67 (GLEAM 2.0)
for (group in c("AF","OT")){
if (group == "AF"){
LCIDE = AFLCIDE
n = 1
}
if (group == "OT"){
LCIDE = OTLCIDE
n = 2
}
# CALCULATION
Ym = 9.75 - (LCIDE * 0.05)
# OUTPUT
assign(paste("YM",n, sep = ""), Ym)
}
for (tipo in c("AFN","AFM","AM","RF","RM","MM", "MF")){
## 4.3 FROM MANURE MANAGMENT
## SEE PAGE 67 (GLEAM 2.0)
# CALCULATIONS
if (tipo == "AFM"){
LCIDE = AFLCIDE
GE = AFMGE
anim_num = AF
Ym = YM1
}
if (tipo == "AFN"){
LCIDE = AFLCIDE
GE = AFNGE
anim_num = AF
Ym = YM1
}
if (tipo == "AM"){
LCIDE = OTLCIDE
GE = AMGE
anim_num = AM
Ym = YM2
}
if (tipo == "RF"){
LCIDE = OTLCIDE
GE = RFGE
anim_num = RF
Ym = YM2
}
if (tipo == "RM"){
LCIDE = OTLCIDE
GE = RMGE
anim_num = RM
Ym = YM2
}
if (tipo == "MM"){
LCIDE = OTLCIDE
GE = MMGE
anim_num = MM
Ym = YM2
}
if (tipo == "MF"){
LCIDE = OTLCIDE
GE = MFGE
anim_num = MF
Ym = YM2
}
# CALCULATIONS
CH41 = (GE * Ym / 100) / 55.65
VS = GE * (1.04 - (LCIDE / 100)) * (0.92 / LCIGE)
# OUTPUT
assign(paste(tipo, "CH41", sep = ""), CH41)
assign(paste(tipo, "VS", sep = ""), VS)
# INPUT
temp = raster("data/temp.tif")
temp_resample = as.numeric(extract(temp, matrix(c(longitude,latitude), ncol = 2)))
temp_cutoff = raster("data/temp_cutoff.tif")
temp_cutoff_resample = as.numeric(extract(temp_cutoff, matrix(c(longitude,latitude), ncol = 2)))
# CALCULATIONS
MCFSOLID = reclass(temp_resample,14,26,2,4,5)
MCFCOMPOSTING = reclass(temp_resample,14,26,0.5,1,1.5)
MCFANAEROBIC = 10.0
MCFDAILY = reclass(temp_resample,14,26,0.1,0.5,1)
MCFUNCOVEREDLAGOON = 44.953 + 2.6993 * temp_cutoff_resample - 0.0527 * temp_cutoff_resample * temp_cutoff_resample
MCFLIQUID = 19.494 - 1.5573 * temp_cutoff_resample + 0.1351 * temp_cutoff_resample * temp_cutoff_resample
MCFBURNED = 10.0
MCFPASTURE = reclass(temp_resample,14,26,1,1.5,2)
MCFDRYLOT <- reclass(temp_resample,14,26,1,1.5,2)
# CREATES THE MCFMANURE RASTER
# INPUT
# CALCULATIONS
MCFMANURE = MMSANAEROBIC * MCFANAEROBIC + MMSBURNED * MCFBURNED + MMSCOMPOSTING * MCFCOMPOSTING + MMSDAILY * MCFDAILY + MMSLIQUID * MCFLIQUID + MMSPASTURE * MCFPASTURE + MMSSOLID * MCFSOLID + MMSUNCOVEREDLAGOON * MCFUNCOVEREDLAGOON + manure_drylot * MCFDRYLOT
}
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# CALCULATIONS
if (var == "AFM"){
CH41 = AFMCH41
VS = AFMVS
anim_num = AF
totCH41 = LACT_PER * CH41 * anim_num * 34
CH42 = 0.67 * 0.0001 * 0.13 * MCFMANURE * VS
totCH42 = LACT_PER * CH42 * anim_num * 34
}
else if (var == "AFN"){
CH41 = AFNCH41
VS = AFNVS
anim_num = AF
totCH41 = (365.0 - LACT_PER) * CH41 * anim_num * 34
CH42 = 0.67 * 0.0001 * 0.13 * MCFMANURE * VS
totCH42 = (365.0 - LACT_PER) * CH42 * anim_num * 34
}
else {
if (var == "AM"){
CH41 = AMCH41
VS = AMVS
anim_num = AM
} else if (var == "RF"){
CH41 = RFCH41
VS = RFVS
anim_num = RF
} else if (var == "RM"){
CH41 = RMCH41
VS = RMVS
anim_num = RM
} else if (var == "MM"){
CH41 = MMCH41
VS = MMVS
anim_num = MM
} else if (var == "MF"){
CH41 = MFCH41
VS = MFVS
anim_num = MF
}
totCH41 = 365.0 * CH41 * anim_num * 34
CH42 = 0.67 * 0.0001 * 0.13 * MCFMANURE * VS
totCH42 = 365.0 * CH42 * anim_num * 34
}
# OUTPUT
assign(paste("CH41CO2TOT", var, sep = ""), totCH41)
assign(paste("CH42CO2TOT", var, sep = ""), totCH42)
}
##-------------------------------------
## NITROUS OXIDE N20 EMISSIONS
##-------------------------------------
## NUM = ANIMALS NUMBER
## 298 CONVERSION FACTOR N2O TO CO2EQ
## SEE PAGE 100 (GLEAM 2.0)
## 4.4 FROM MANURE MANAGMENT
## SEE PAGE 69 (GLEAM 2.0)
## 4.4.1 NITROGEN EXCRETION RATE
## SEE PAGE 69 (GLEAM 2.0)
## STEP 1 INTAKE CALCULATION
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# INPUT
if (var == "AFM" | var == "AFN"){
if (var == "AFM"){
inTAKE = AFMINTAKE
} else if (var == "AFN"){
inTAKE = AFNINTAKE
}
LCIN = AFLCIN
}
else {
if (var == "AM"){
inTAKE = AMINTAKE
} else if (var == "RF"){
inTAKE = RFINTAKE
} else if (var == "RM"){
inTAKE = RMINTAKE
} else if (var == "MM"){
inTAKE = MMINTAKE
} else if (var == "MF"){
inTAKE = MFINTAKE
}
LCIN = OTLCIN
}
# CALCULATION
NINTAKE = (LCIN / 1000) * inTAKE
# OUTPUT
assign(paste(var, "NINTAKE", sep = ""), NINTAKE)
}
## STEP 2 RETENTION CALCULATION
# CALCULATION
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
if (var == "AFM"){
NRETENTION = ifelse(GROWF==0, 0, (MILK_YIELD_KG * (MILK_PROTEIN/100)/6.38)+(CKG/365 * (268-(7.03 * RFNEGRO/GROWF))*0.001/6.25)) ######ADICIONAR
}
else if (var == "AM" | var == "AFN"){
NRETENTION = 0
}
else if (var == "RF"){
NRETENTION = ifelse(GROWF==0, 0,(GROWF * (268 - (7.03 * RFNEGRO/GROWF)) * 0.001/6.25) + (CKG/365 * (268-(7.03 * RFNEGRO/GROWF))*0.001/6.25) / AFC) ######ADICIONAR
}
else if (var == "MF"){
NRETENTION = ifelse(GROWF==0, 0,(GROWF * (268 - (7.03 * MFNEGRO/GROWF)) * 0.001/6.25)) ######ADICIONAR
}
else {
NRETENTION = ifelse(GROWM==0, 0,(GROWM * (268 - (7.03 * RMNEGRO/GROWM)) * 0.001/6.25)) ######ADICIONAR
}
# OUTPUT
assign(paste(var, "NRETENTION", sep = ""), NRETENTION)
}
## STEP 3 N EXCRETION
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# CALCULATIONS
if (var == "AFN"){
Nintake = AFNNINTAKE
Nretention = AFNNRETENTION
Nx = (365.0 - LACT_PER) * (Nintake - Nretention)
}
else if (var == "AFM"){
Nintake = AFMNINTAKE
Nretention = AFMNRETENTION
Nx = (LACT_PER) * (Nintake - Nretention)
}
else{
if (var == "AM"){
Nintake = AMNINTAKE
Nretention = AMNRETENTION
} else if (var == "RF"){
Nintake = RFNINTAKE
Nretention = RFNRETENTION
} else if (var == "RM"){
Nintake = RMNINTAKE
Nretention = RMNRETENTION
} else if (var == "MM"){
Nintake = MMNINTAKE
Nretention = MMNRETENTION
} else if (var == "MF"){
Nintake = MFNINTAKE
Nretention = MFNRETENTION
}
Nx = 365.0 * (Nintake - Nretention)
}
# OUTPUT
assign(paste(var, "NX", sep = ""), Nx)
## 4.4.2 N2O DIRECT EMISSIONS FROM
## MANURE MANAGMENT
## SEE PAGE 70 (GLEAM 2.0)
# INPUT
N2Olagoon = 0
N2Oliquid = 0.005
N2Osolid = 0.005
N2Odrylot = 0.02
N2Opasture = 0
N2Odaily = 0
N2Oburned = 0.02
N2Oanaerobic = 0
N2Ocomposting = 0.1
if (var == "AFM" | var == "AFN"){
LCIDE = AFLCIDE
}
else {
LCIDE = OTLCIDE
}
# CALCULATIONS
N2OCFmanure = MMSANAEROBIC * N2Oanaerobic + MMSBURNED * N2Oburned * (100.0 - LCIDE) / 100 +
MMSCOMPOSTING * N2Ocomposting + MMSDAILY * N2Odaily + MMSLIQUID * N2Oliquid +
MMSPASTURE * N2Opasture + MMSSOLID * N2Osolid + MMSUNCOVEREDLAGOON * N2Olagoon + manure_drylot * N2Odrylot
# OUTPUT
assign(paste("N2OCFMAN", var, sep = ""), N2OCFmanure)
}
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# INPUT
if (var == "AFM"){
Nx = AFMNX
N2OCFmanure = N2OCFMANAFM
} else if (var == "AFN"){
Nx = AFNNX
N2OCFmanure = N2OCFMANAFN
} else if (var == "AM"){
Nx = AMNX
N2OCFmanure = N2OCFMANAM
} else if (var == "RF"){
Nx = RFNX
N2OCFmanure = N2OCFMANRF
} else if (var == "RM"){
Nx = RMNX
N2OCFmanure = N2OCFMANRM
} else if (var == "MM"){
Nx = MMNX
N2OCFmanure = N2OCFMANMM
} else if (var == "MF"){
Nx = MFNX
N2OCFmanure = N2OCFMANMF
}
# CALCULATIONS
NOdir = N2OCFmanure * Nx * 44 / 2800
# OUTPUT
assign(paste(var, "NODIR", sep = ""), NOdir)
}
## 4.4.4 INDIRECT N2O EMISSIONS FROM
## VOLATILIZATION
## SEE PAGE 71 (GLEAM 2.0)
# INPUT
VOLliquid = 40
VOLsolid = 30
VOLpasture = 0
VOLdaily = 7
VOLlagoon = 35
VOLanaerobic = 0
VOLcomposting = 40
VOLdrylot = 20
# CALCULATIONS & OUTPUT
CFVOLMANURE = MMSLIQUID * VOLliquid + MMSSOLID * VOLsolid + MMSPASTURE * VOLpasture + MMSDAILY * VOLdaily +
MMSUNCOVEREDLAGOON * VOLlagoon + MMSANAEROBIC * VOLanaerobic + MMSCOMPOSTING * VOLcomposting + manure_drylot * VOLdrylot
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# INPUT
if (var == "AFM"){
Nx = AFMNX
} else if (var == "AFN"){
Nx = AFNNX
} else if (var == "AM"){
Nx = AMNX
} else if (var == "RF"){
Nx = RFNX
} else if (var == "RM"){
Nx = RMNX
} else if (var == "MM"){
Nx = MMNX
} else if (var == "MF"){
Nx = MFNX
}
# CALCULATIONS
MVOL = CFVOLMANURE / 10000 * Nx
NOVOL = MVOL * 0.01 * 44 / 28
# OUTPUT
assign(paste(var, "NOVOL", sep = ""), NOVOL)
}
## 4.4.4 INDIRECT N2O EMISSION FROM
## LEACHING
## SEE PAGE 71 (GLEAM 2.0)
# INPUT
LEACHliquid_total = raster("data/leachliquid.tif")
LEACHliquid = as.numeric(extract(LEACHliquid_total, matrix(c(longitude,latitude), ncol = 2)))
LEACHsolid_total = raster("data/leachsolid.tif")
LEACHsolid = as.numeric(extract(LEACHsolid_total, matrix(c(longitude,latitude), ncol = 2)))
# CALCULATIONS
CFLEACHMANURE = MMSLIQUID * LEACHliquid + MMSSOLID * LEACHsolid
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# INPUT
if (var == "AFM"){
Nx = AFMNX
} else if (var == "AFN"){
Nx = AFNNX
} else if (var == "AM"){
Nx = AMNX
} else if (var == "RF"){
Nx = RFNX
} else if (var == "RM"){
Nx = RMNX
} else if (var == "MM"){
Nx = MMNX
} else if (var == "MF"){
Nx = MFNX
}
# CALCULATIONS
MLEACH = CFLEACHMANURE / 10000 * Nx
NOLEACH = MLEACH * 0.0075 * 44 / 28
# OUTPUT
assign(paste(var, "NOLEACH", sep = ""), NOLEACH)
}
## 4.5 TOTAL N2O EMISSIONS PER ANIMAL
## SEE PAGE 73 (GLEAM 2.0)
for (var in c("AFM","AFN","RF","AM","RM","MM","MF")){
# INPUT
if (var == "AFM"){
NOdir = AFMNODIR
NOvol = AFMNOVOL
NOleach = AFMNOLEACH
num = AF
} else if (var == "AFN"){
NOdir = AFNNODIR
NOvol = AFNNOVOL
NOleach = AFNNOLEACH
num = AF
} else if (var == "AM"){
NOdir = AMNODIR
NOvol = AMNOVOL
NOleach = AMNOLEACH
num = AM
} else if (var == "RF"){
NOdir = RFNODIR
NOvol = RFNOVOL
NOleach = RFNOLEACH
num = RF
} else if (var == "RM"){
NOdir = RMNODIR
NOvol = RMNOVOL
NOleach = RMNOLEACH
num = RM
} else if (var == "MM"){
NOdir = MMNODIR
NOvol = MMNOVOL
NOleach = MMNOLEACH
num = MM
} else if (var == "MF"){
NOdir = MFNODIR
NOvol = MFNOVOL
NOleach = MFNOLEACH
num = MF
}
# CALCULATIONS
NOtot = NOdir + NOvol + NOleach
NOtotal = num * NOtot * 298
# OUTPUT
assign(paste("NOTOTCO2", var, sep = ""), NOtotal)
}
## 6.2.1 N2O EMISSIONS FROM MANURE DEPOSITED ON PASTURES
## SEE PAGE 82 (GLEAM 2.0)
## 90% pasture dry matter GLEAM2.0
## N retention and excretion per animal type
AFNNx = AFNNX
AFN_NXTOTAL = AF*AFNNx
AFN_MANURE = AFN_NXTOTAL*MMSPASTURE/100
AFN_N2OFEEDMAN = AFN_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
AFMNx = AFMNX
AFM_NXTOTAL = AF*AFMNx
AFM_MANURE = AFM_NXTOTAL*MMSPASTURE/100
AFM_N2OFEEDMAN = AFM_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
RFNx = RFNX
RF_NXTOTAL = RF*RFNx
RF_MANURE = RF_NXTOTAL*MMSPASTURE/100
RF_N2OFEEDMAN = RF_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
AMNx = AMNX
AM_NXTOTAL = AM*AMNx
AM_MANURE = AM_NXTOTAL*MMSPASTURE/100
AM_N2OFEEDMAN = AM_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
RMNx = RMNX
RM_NXTOTAL = RM*RMNx
RM_MANURE = RM_NXTOTAL*MMSPASTURE/100
RM_N2OFEEDMAN = RM_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
MMNx = MMNX
MM_NXTOTAL = MM*MMNx
MM_MANURE = MM_NXTOTAL*MMSPASTURE/100
MM_N2OFEEDMAN = MM_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
MFNx = MFNX
MF_NXTOTAL = MF*MFNx
MF_MANURE = MF_NXTOTAL*MMSPASTURE/100
MF_N2OFEEDMAN = MF_MANURE*(0.02+0.2*0.01+0.3*0.0075)*(44/28)*298
########################################
## RESULTS GENERATION
########################################
finallist = data.frame(
farm_name = paste(farm_name,"-",year),
CH4_Enteric_AFM = ifelse(CH41CO2TOTAFM<0,0,CH41CO2TOTAFM),
CH4_Enteric_AFN = ifelse(CH41CO2TOTAFN<0,0,CH41CO2TOTAFN),
CH4_Enteric_AM = ifelse(CH41CO2TOTAM<0,0,CH41CO2TOTAM),
CH4_Enteric_RF = ifelse(CH41CO2TOTRF<0,0,CH41CO2TOTRF),
CH4_Enteric_RM = ifelse(CH41CO2TOTRM<0,0,CH41CO2TOTRM),
CH4_Enteric_MM = ifelse(CH41CO2TOTMM<0,0,CH41CO2TOTMM),
CH4_Enteric_MF = ifelse(CH41CO2TOTMF<0,0,CH41CO2TOTMF),
CH4_Manure_Management_AFM = ifelse(CH42CO2TOTAFM<0,0,CH42CO2TOTAFM),
CH4_Manure_Management_AFN = ifelse(CH42CO2TOTAFN<0,0,CH42CO2TOTAFN),
CH4_Manure_Management_AM = ifelse(CH42CO2TOTAM<0,0,CH42CO2TOTAM),
CH4_Manure_Management_RF = ifelse(CH42CO2TOTRF<0,0,CH42CO2TOTRF),
CH4_Manure_Management_RM = ifelse(CH42CO2TOTRM<0,0,CH42CO2TOTRM),
CH4_Manure_Management_MM = ifelse(CH42CO2TOTMM<0,0,CH42CO2TOTMM),
CH4_Manure_Management_MF = ifelse(CH42CO2TOTMF<0,0,CH42CO2TOTMF),
N2O_Manure_Management_AFM = ifelse(NOTOTCO2AFM<0,0,NOTOTCO2AFM),
N2O_Manure_Management_AFN = ifelse(NOTOTCO2AFN<0,0,NOTOTCO2AFN),
N2O_Manure_Management_AM = ifelse(NOTOTCO2AM<0,0,NOTOTCO2AM),
N2O_Manure_Management_RF = ifelse(NOTOTCO2RF<0,0,NOTOTCO2RF),
N2O_Manure_Management_RM = ifelse(NOTOTCO2RM<0,0,NOTOTCO2RM),
N2O_Manure_Management_MM = ifelse(NOTOTCO2MM<0,0,NOTOTCO2MM),
N2O_Manure_Management_MF = ifelse(NOTOTCO2MF<0,0,NOTOTCO2MF),
N2O_Manure_in_pasture_AFM = ifelse(AFM_N2OFEEDMAN<0,0,AFM_N2OFEEDMAN),
N2O_Manure_in_pasture_AFN = ifelse(AFN_N2OFEEDMAN<0,0,AFN_N2OFEEDMAN),
N2O_Manure_in_pasture_AM = ifelse(AM_N2OFEEDMAN<0,0,AM_N2OFEEDMAN),
N2O_Manure_in_pasture_RF = ifelse(RF_N2OFEEDMAN<0,0,RF_N2OFEEDMAN),
N2O_Manure_in_pasture_RM = ifelse(RM_N2OFEEDMAN<0,0,RM_N2OFEEDMAN),
N2O_Manure_in_pasture_MM = ifelse(MM_N2OFEEDMAN<0,0,MM_N2OFEEDMAN),
N2O_Manure_in_pasture_MF = ifelse(MF_N2OFEEDMAN<0,0,MF_N2OFEEDMAN),
milk = Milk_production,
meatm = Meat_production_M,
meatfm = Meat_production_FM,
meatff = Meat_production_FF)
finallist$TOTAL_CH4_Enteric_Fermentation_kg_CO2eq = finallist$CH4_Enteric_AFM + finallist$CH4_Enteric_AFN + finallist$CH4_Enteric_AM +
finallist$CH4_Enteric_RF + finallist$CH4_Enteric_RM + finallist$CH4_Enteric_MM + finallist$CH4_Enteric_MF
finallist$TOTAL_CH4_Manure_Managment_kg_CO2eq = finallist$CH4_Manure_Management_AFM + finallist$CH4_Manure_Management_AFN + finallist$CH4_Manure_Management_AM +
finallist$CH4_Manure_Management_RF + finallist$CH4_Manure_Management_RM + finallist$CH4_Manure_Management_MM + finallist$CH4_Manure_Management_MF
finallist$TOTAL_N2O_Manure_Managment_kg_CO2eq = finallist$N2O_Manure_Management_AFM + finallist$N2O_Manure_Management_AFN + finallist$N2O_Manure_Management_AM +
finallist$N2O_Manure_Management_RF + finallist$N2O_Manure_Management_RM + finallist$N2O_Manure_Management_MM + finallist$N2O_Manure_Management_MF
finallist$TOTAL_N2O_Manure_in_pastures_kg_CO2eq = finallist$N2O_Manure_in_pasture_AFM + finallist$N2O_Manure_in_pasture_AFN + finallist$N2O_Manure_in_pasture_AM +
finallist$N2O_Manure_in_pasture_RF + finallist$N2O_Manure_in_pasture_RM + finallist$N2O_Manure_in_pasture_MM + finallist$N2O_Manure_in_pasture_MF
finallist$TOTAL_EMISSIONS = finallist$TOTAL_CH4_Enteric_Fermentation_kg_CO2eq + finallist$TOTAL_CH4_Manure_Managment_kg_CO2eq +
finallist$TOTAL_N2O_Manure_Managment_kg_CO2eq + finallist$TOTAL_N2O_Manure_in_pastures_kg_CO2eq
finallist$TOTAL_MILK = finallist$milk
finallist$TOTAL_MEAT = finallist$meatm + finallist$meatfm + finallist$meatff
finallist$MILK_INTENSITY = finallist$TOTAL_EMISSIONS/finallist$TOTAL_MILK
finallist$MEAT_INTENSITY = finallist$TOTAL_EMISSIONS/finallist$TOTAL_MEAT
return(finallist)
}
########################################
##INPUT FILES
########################################
## CSV FILES
main_pasture_list = read.csv("input_pasture_main_list.csv")
mixture_pasture_list = read.csv("input_pasture_mixture_list.csv")
cut_pasture_list = read.csv("input_pasture_cut_list.csv")
diet_list = read.csv("input_feed_supplements_list.csv")
## FARM DATA
farm_data = read.csv("input_farm_data.csv")
year = farm_data$fecha
farm_name = farm_data$finca
longitude = farm_data$longitud
latitude = farm_data$latitud
main_product = farm_data$producto
adult_females = farm_data$vacas
adult_females_milk = farm_data$vacas_produccion
young_females= farm_data$vaconas
female_calves= farm_data$terneras
adult_males= farm_data$toros
young_males= farm_data$toretes
male_calves= farm_data$terneros
death_adult_females= farm_data$vacas_muertas
death_female_calves= farm_data$terneras_muertas
death_adult_males= farm_data$toros_muertos
death_male_calves= farm_data$terneros_muertos
slaughtered_adult_females= farm_data$vacas_faenadas
sold_adult_females= farm_data$vacas_vendidas
slaughtered_adult_males= farm_data$toros_faenados
sold_adult_males= farm_data$toros_vendidos
total_births= farm_data$partos_totales
age_first_calving_months= farm_data$edad_primer_parto_meses
adult_females_weight= farm_data$peso_vacas
female_calves_weight= farm_data$peso_terneras
adult_males_weight= farm_data$peso_toros
male_calves_weight= farm_data$peso_terneros
slaughtered_young_females_weight= farm_data$peso_sacrificio_vaconas
slaughtered_young_males_weight= farm_data$peso_sacrificio_toretes
milk_fat= farm_data$grasa_leche
milk_protein= farm_data$proteina_leche
milk_yield_liters_animal_day= farm_data$produccion_leche_litro_animal_dia
lactancy_period_months= farm_data$periodo_lactancia_meses
pasture_area_ha= farm_data$superficie_pastos_ha
adult_females_feed_pasture_age= farm_data$edad_pasto_vacas
other_categories_feed_pasture_age= farm_data$edad_pasto_otros
mixture_pasture_ha= farm_data$superficie_mezclas
adult_females_feed_cut_pasture_kg = farm_data$pasto_corte_vaca_kg
other_categories_feed_cut_pasture_kg= farm_data$pasto_corte_otros_kg
productive_system= farm_data$sistema_productivo
manure_in_pastures= farm_data$excretas_sin_manejo
manure_daily_spread= farm_data$excretas_dispersion_diaria
manure_liquid_storage= farm_data$excretas_liquido_fango
manure_compost= farm_data$excretas_compostaje
manure_anaerobic= farm_data$excretas_digestor_anaerobico
manure_drylot= farm_data$excretas_lote_secado
manure_solid= farm_data$excretas_almacenamiento_solido
manure_uncoveredlagoon= farm_data$excretas_laguna_anaerobica
manure_burned= farm_data$excretas_incinera
results = farm_emissions(
main_pasture_list,
mixture_pasture_list,
cut_pasture_list,
diet_list,
farm_name,
year,
longitude,
latitude,
main_product,
adult_females,
adult_females_milk,
young_females,
female_calves,
adult_males,
young_males,
male_calves,
death_adult_females,
death_female_calves,
death_adult_males,
death_male_calves,
slaughtered_adult_females,
sold_adult_females,
slaughtered_adult_males,
sold_adult_males,
total_births,
age_first_calving_months,
adult_females_weight,
female_calves_weight,
adult_males_weight,
male_calves_weight,
slaughtered_young_females_weight,
slaughtered_young_males_weight,
milk_fat,
milk_protein,
milk_yield_liters_animal_day,
lactancy_period_months,
pasture_area_ha,
adult_females_feed_pasture_age,
other_categories_feed_pasture_age,
mixture_pasture_ha,
adult_females_feed_cut_pasture_kg,
other_categories_feed_cut_pasture_kg,
productive_system,
manure_in_pastures,
manure_daily_spread,
manure_liquid_storage,
manure_compost,
manure_drylot,
manure_solid,
manure_anaerobic,
manure_uncoveredlagoon,
manure_burned
)
########################################
## RESULTS CSV FILE
########################################
write.csv(results,file = "results.csv") |
cd9225d9d097f6e2c1b1445c20877c28339cdfc0 | d5f011fdc8eed076fc6d89684473b7582e4527bf | /R/supportFunc_ensembleEnet.R | bdc589f391a174ce988b3c10d746ebef403cc449 | [] | no_license | singha53-zz/amritr | 60cf8ed5a26e1d4dc7950cbb10b0d85688a3ff8b | 139d9029d3a24ba90e252c642383016b40a9a504 | refs/heads/master | 2022-06-17T21:10:16.535078 | 2019-07-18T18:42:16 | 2019-07-18T18:42:16 | 58,297,895 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,814 | r | supportFunc_ensembleEnet.R | #' Build ensemble enet classification panel
#'
#' @param X.trainList - list of training datasets (nxpi); i number of elements
#' @param y.train - n-vector of class labels (must be a factor)
#' @param alphaList = list of alpha values
#' @param lambdaList = list of lambda values
#' @param family - can be "binomial" or "multinomial"
#' @param X.testList - list of test datasets (nxpi); i number of elements
#' @param y.test - n-vector of class labels (must be a factor)
#' @param filter - pre-filtering of initial datasets - "none" or "p.value"
#' @param topranked - Number of topranked features based on differential expression to use to build classifer
#' @param keepVarList - which variables to keep and not omit (set to NULL if no variables are forced to be kept)
#' @return model
#' @return testPerf
#' @return X.trainList
#' @return y.train
#' @return alphaList
#' @return lambdaList
#' @return family
#' @return X.testList
#' @return y.test
#' @return filter
#' @return topranked
#' @return keepVarList
#' @export
ensembleEnet = function(X.trainList, y.train, alphaList, lambdaList, family = "binomial",
X.testList=NULL, y.test=NULL, filter="none", topranked=50, keepVarList=NULL){
if (class(y.train) == "character")
stop("y.train is not a factor")
## load libraries
library(glmnet); library(limma); library(pROC);
library(OptimalCutpoints); library(tidyverse);
## perform pre-filtering (none, p-value, and keep certain variables)
if (filter == "none") {
X1.trainList <- X.trainList
}
if (filter == "p.value") {
X1.trainList <- lapply(X.trainList, function(i){
design <- model.matrix(~y.train)
fit <- eBayes(lmFit(t(i), design))
top <- topTable(fit, coef = 2, adjust.method = "BH", n = nrow(fit))
i[, rownames(top)[1:topranked]]
})
}
if (is.null(keepVarList)) {
penalty.factorList <- lapply(X1.trainList, function(i){rep(1, ncol(i))})
X2.trainList <- X1.trainList
} else {
X2.trainList <- mapply(function(x, x1, y){
X1 <- x1[, setdiff(colnames(x1), y)]
X2 <- as.matrix(cbind(X1, x[, y]))
colnames(X2) <- c(colnames(X1), y)
X2
}, x = X.trainList, x1 = X1.trainList, y = keepVarList)
penalty.factorList <- mapply(function(x, y){
c(rep(1, ncol(X1)), rep(0, length(keepVar)))
}, x = X1.trainList, y = keepVarList)
}
## build glmnet classifier
model <- mapply(function(X, alpha, lambda, penalty.factor){
if(family == "binomial") {
fit <- glmnet(X, y.train, family = "binomial", alpha = alpha,
penalty.factor = penalty.factor)
cv.fit <- cv.glmnet(X, y.train, family = "binomial")
} else {
fit <- glmnet(X, y.train, family = "multinomial", alpha = alpha,
type.multinomial = "grouped", penalty.factor = penalty.factor)
cv.fit <- cv.glmnet(X, y.train, family = "multinomial")
}
if(is.null(lambda)) {lambda = cv.fit$lambda.min} else {lambda = lambda}
Coefficients <- coef(fit, s = lambda)
if(family == "binomial"){
Active.Index <- which(Coefficients[, 1] != 0)
Active.Coefficients <- Coefficients[Active.Index, ]
} else {
Active.Index <- which(Coefficients[[1]][, 1] != 0)
Active.Coefficients <- Coefficients[[1]][Active.Index, ]
}
enet.panel <- names(Active.Coefficients)[-1]
enet.panel.length <- length(enet.panel)
return(list(fit=fit, Coefficients=Coefficients, Active.Index=Active.Index, lambda = lambda,
Active.Coefficients=Active.Coefficients, enet.panel=enet.panel, enet.panel.length=enet.panel.length))
}, X = X2.trainList, alpha = alphaList, lambda = lambdaList, penalty.factor = penalty.factorList, SIMPLIFY = FALSE)
## Test performance in test dataset
if(!is.null(X.testList)){
if(!all(sapply(1 : length(X.trainList), function(i) any(colnames(X.trainList[[i]]) == colnames(X.testList[[i]])))))
stop("features of the train and test datasets are not in the same order")
if(!any(levels(y.train) == levels(y.test)))
stop("levels of y.train and y.test are not in the same order")
testPerf <- mapply(function(mod, test){
predictResponse <- unlist(predict(mod$fit, newx = test[, rownames(mod$Coefficients)[-1]], s = mod$lambda, type = "class"))
probs <- predict(mod$fit, newx = test[, rownames(mod$Coefficients)[-1]], s = mod$lambda, type = "response") %>% as.numeric
names(probs) <- rownames(predictResponse)
predictResponse <- as.character(predictResponse)
names(predictResponse) <- names(probs)
## compute error rate
mat <- table(pred=factor(as.character(predictResponse), levels = levels(y.train)), truth=y.test)
mat2 <- mat
diag(mat2) <- 0
classError <- colSums(mat2)/colSums(mat)
er <- sum(mat2)/sum(mat)
ber <- mean(classError)
error <- c(classError, er, ber) %>% matrix
rownames(error) <- c(names(classError), "Overall", "Balanced")
colnames(error) <- "Error_0.5"
## compute AUROC
if(length(y.test) > 1) {
if(nlevels(y.train) == 2){
y.test <- factor(as.character(y.test), levels(y.train))
perfTest <- amritr::tperformance(weights = as.numeric(as.matrix(probs)), trueLabels = y.test) %>% as.matrix
colnames(perfTest) <- paste(levels(y.train), collapse = "_vs_")
} else {
perfTest <- NA
}
} else {
perfTest <- NA
}
return(list(probs=probs, predictResponse=predictResponse, error=error, perfTest=perfTest))
}, mod = model, test = X.testList, SIMPLIFY = FALSE)
} else {testPerf <- NA}
return(list(model=model, testPerf=testPerf, X.trainList=X.trainList,
y.train=y.train, alphaList=alphaList, lambdaList=lambdaList, family=family, X.testList=X.testList,
y.test=y.test, filter=filter, topranked=topranked, keepVarList=keepVarList))
}
#' Estimate classification performance using repeated cross-validation using an elastic net classifier
#'
#'
#' @param object - ensembleEnet object
#' @param validation = "Mfold" or "loo"
#' @param M - # of folds
#' @param iter - Number of iterations of cross-validation
#' @param threads - # of cores, running each iteration on a separate node
#' @param progressBar = TRUE (show progress bar or not)
#' @export
perf.ensembleEnet = function(object, validation = "Mfold", M = 5, iter = 5, threads = 5, progressBar = TRUE){
library(tidyverse)
X.trainList=object$X.trainList
y.train=object$y.train
alphaList=object$alphaList
lambdaList=object$lambdaList
family=object$family
filter=object$filter
topranked=object$topranked
keepVarList=object$keepVarList
if (validation == "Mfold") {
folds <- lapply(1:iter, function(i) createFolds(y.train, k = M))
require(parallel)
cl <- parallel::makeCluster(mc <- getOption("cl.cores", threads))
parallel::clusterExport(cl, varlist = c("ensembleEnetCV",
"ensembleEnet", "X.trainList", "y.train", "alphaList", "lambdaList",
"family", "filter", "topranked", "keepVarList",
"M", "folds", "progressBar"),
envir = environment())
cv <- parallel::parLapply(cl, folds, function(foldsi, X.trainList, y.train, alphaList, lambdaList, family, filter, topranked, keepVarList, M, progressBar) {
ensembleEnetCV(X.trainList=X.trainList, y.train=y.train, alphaList=alphaList, lambdaList=lambdaList, family=family, filter=filter, topranked=topranked, keepVarList=keepVarList, M=M, folds=foldsi, progressBar=progressBar)
}, X.trainList, y.train, alphaList, lambdaList, family, filter, topranked,
keepVarList, M, progressBar) %>%
amritr::zip_nPure()
parallel::stopCluster(cl)
error <- do.call(rbind, cv$error) %>% as.data.frame %>%
mutate(ErrName = factor(rownames(.), unique(rownames(.)))) %>%
dplyr::group_by(ErrName) %>%
dplyr::summarise(Mean = mean(Error_0.5), SD = sd(Error_0.5))
perfTest <- do.call(rbind, cv$perfTest) %>% as.data.frame %>%
mutate(ErrName = factor(rownames(.), unique(rownames(.)))) %>%
dplyr::group_by(ErrName) %>%
dplyr::summarise(Mean = mean(perf), SD = sd(perf))
}
else {
n <- length(y.train)
folds = split(1:n, rep(1:n, length = n))
M = n
cv <- ensembleEnetCV(X.trainList, y.train, alphaList, lambdaList, family, filter, topranked,
keepVarList, M, folds, progressBar)
error <- cv$error
perfTest <- cv$perfTest
}
result = list()
result$error = error
result$perfTest = perfTest
method = "enetEnsemble.mthd"
result$meth = "enetEnsemble.mthd"
class(result) = c("perf", method)
return(invisible(result))
}
#' Estimate classification performance using cross-validation using an elastic net classifier
#'
#' @param X.trainList list of training datasets (nxpi); i number of elements
#' @param y.train n-vector of class labels (must be a factor)
#' @param alphaList list of alpha values
#' @param lambdaList list of lambda values
#' @param family can be "binomial" or "multinomial"
#' @param filter pre-filtering of initial datasets - "none" or "p.value"
#' @param topranked Number of topranked features based on differential expression to use to build classifer
#' @param keepVarList which variables to keep and not omit (set to NULL if no variables are forced to be kept)
#' @param M # of folds
#' @param folds list of length M, where each element contains the indices for samples for a given fold
#' @param progressBar (TRUE/FALSE) - show progress bar or not
#' @return error computes error rate (each group, overall and balanced error rate)
#' @return perfTest classification performance measures
#' @export
ensembleEnetCV = function(X.trainList, y.train, alphaList, lambdaList, family="binomial", filter="none", topranked=50,
keepVarList=NULL, M=5, folds=5, progressBar=FALSE){
J <- length(X.trainList)
assign("X.training", NULL, pos = 1)
assign("y.training", NULL, pos = 1)
X.training = lapply(folds, function(x) {
lapply(1:J, function(y) {
X.trainList[[y]][-x, , drop = FALSE]
})
})
y.training = lapply(folds, function(x) {
y.train[-x]
})
X.test = lapply(folds, function(x) {
lapply(1:J, function(y) {
X.trainList[[y]][x, , drop = FALSE]
})
})
y.test = lapply(folds, function(x) {
y.train[x]
})
avgProbList <- list()
if (progressBar == TRUE)
pb <- txtProgressBar(style = 3)
for (i in 1:M) {
if (progressBar == TRUE)
setTxtProgressBar(pb, i/M)
## build ensemble panel
result <- ensembleEnet(X.trainList=X.training[[i]], y.train=y.training[[i]],
alphaList, lambdaList, family = family,
X.testList=X.test[[i]], y.test=y.test[[i]], filter, topranked, keepVarList)
# combine predictions using average probability
avgProbList[[i]] <- do.call(cbind, lapply(result$testPerf,
function(i) {
i$probs
})) %>% rowMeans
}
probs <- unlist(avgProbList)
## Error and AUROC
predictResponse <- rep(levels(y.train)[1], length(probs))
predictResponse[probs >= 0.5] <- levels(y.train)[2]
## compute error rate
truth <- sapply(strsplit(names(unlist(y.test)), "\\."), function(i) i[2])
if(!all(names(probs) == truth))
stop("predicted probability is not in the same order as the test labels")
mat <- table(pred=factor(as.character(predictResponse), levels = levels(y.train)), truth=unlist(y.test))
mat2 <- mat
diag(mat2) <- 0
classError <- colSums(mat2)/colSums(mat)
er <- sum(mat2)/sum(mat)
ber <- mean(classError)
error <- c(classError, er, ber) %>% matrix
rownames(error) <- c(names(classError), "Overall", "Balanced")
colnames(error) <- "Error_0.5"
## compute AUROC
if(nlevels(y.train) == 2){
perfTest <- amritr::tperformance(weights = probs, trueLabels = unlist(y.test)) %>% as.matrix
colnames(perfTest) <- "perf"
} else {
perfTest <- NA
}
return(list(error = error, perfTest = perfTest))
}
|
753ac19515361e38efc225b48d93f6450cc54016 | dae56739731143f4e03991fe47759d2f33fe076d | /SubsetSelection.R | 408b56ae54754ce363dc501a65b9b9908b25b792 | [] | no_license | MarissaMC/Machine-Learning_using-R | 55b406b7e24f063558c100122027a52c1f65c966 | 613897ca0e18a4dfb0ff678c4e7c641a51b687eb | refs/heads/master | 2021-01-01T17:33:10.551116 | 2015-06-11T17:40:41 | 2015-06-11T17:40:41 | 37,275,761 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | SubsetSelection.R | library(ISLR)
?Hitters
attach(Hitters)
## check for missing values
sum(is.na(Salary))
model=lm(Salary~.,data=Hitters)
Hitters=na.omit(Hitters)
attach(Hitters)
dim(Hitters)
# the observations with missing values are
# Use subset selection to decide what variables should be in our model
library(leaps)
model=regsubsets(Salary~.,data=Hitters,nvmax=19)
# given due to MSE
model_summary=summary(model)
# the star of the output refers theat the variable is included in the model
# 8 model because of the default 8, us nvmax to define
names(model_summary)
model_summary$adjr2
# based on adj r2
n=1:19
plot(n,model_summary$adjr2,xlab="number of variables",ylab="adjusted R^2",type="o")
which.max(model_summary$adjr2)
points(11,model_summary$adjr2[11],col="red",cex=2,pch=20)
abline(b=11,col="blue")
plot(n,model_summary$cp,xlab="number of variables",ylab="adjusted R^2",type="o")
which.min(model_summary$cp)
par(mfrow=c(1,2))
points(10,model_summary$cp[10],col="red",cex=2,pch=20)
abline(v=10,col="blue")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.