blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2534a0cb3cc38c8d2f661e8d56aa2baca631922f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Sim.DiffProc/examples/fptsde2d.Rd.R | c239dffb4395e8ca199d21f7a945e6e6b9fd5be2 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,343 | r | fptsde2d.Rd.R | library(Sim.DiffProc)
### Name: fptsde2d
### Title: Approximate densities and random generation for first passage
### time in 2-D SDE's
### Aliases: fptsde2d fptsde2d.default summary.fptsde2d print.fptsde2d
### mean.fptsde2d Median.fptsde2d Mode.fptsde2d quantile.fptsde2d
### kurtosis.fptsde2d skewness.fptsde2d moment.fptsde2d cv.fptsde2d
### max.fptsde2d min.fptsde2d dfptsde2d dfptsde2d.default print.dfptsde2d
### plot.dfptsde2d
### Keywords: fpt sde ts mts
### ** Examples
## dX(t) = 5*(-1-Y(t))*X(t) * dt + 0.5 * dW1(t)
## dY(t) = 5*(-1-X(t))*Y(t) * dt + 0.5 * dW2(t)
## x0 = 2, y0 = -2, and barrier -3+5*t.
## W1(t) and W2(t) two independent Brownian motion
set.seed(1234)
# SDE's 2d
fx <- expression(5*(-1-y)*x , 5*(-1-x)*y)
gx <- expression(0.5 , 0.5)
mod2d <- snssde2d(drift=fx,diffusion=gx,x0=c(2,-2),M=100)
# boundary
St <- expression(-1+5*t)
# random fpt
out <- fptsde2d(mod2d,boundary=St)
out
summary(out)
# Marginal density
denM <- dfptsde2d(out,pdf="M")
denM
plot(denM)
# Joint density
denJ <- dfptsde2d(out,pdf="J",n=200,lims=c(0.28,0.4,0.04,0.13))
denJ
plot(denJ)
plot(denJ,display="image")
plot(denJ,display="image",drawpoints=TRUE,cex=0.5,pch=19,col.pt='green')
plot(denJ,display="contour")
plot(denJ,display="contour",color.palette=colorRampPalette(c('white','green','blue','red')))
|
164bf46c1ce0216602b161b15d254e26f979fde0 | fb143d9e0a947bb9e3fd903687e9eb7f03a21e5f | /lib/go.functions.R | be05da0e100428eebc0a8e12d9aeed23c6fe0629 | [
"MIT"
] | permissive | psikon/functional.analysis | 160af61823e247a78b430ca12c64f23f969e1a56 | cfeb648bfb2f2bad4b42878bc8366e93ac607246 | refs/heads/master | 2016-09-16T10:51:38.965362 | 2015-09-09T14:01:07 | 2015-09-09T14:01:07 | 23,146,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,931 | r | go.functions.R | #' pfam2go
#'
#' assign for pfam ids the corresponding go ids, go ontologies and
#' ancestor strings from the mapping object
#'
#' @description convert a pfam count table into a new data.frame
#' with the following fields:
#' db - origin of annotation
#' pfam - pfam id of the protein
#' name - name of the found pfam id
#' go.id - GO:ID from resulting from mapping
#' count - number of occurences of the pfam id
#' ontology - ontology of the GO:Id (one of BP, MF or CC)
#' ancestor - string with all ancestors of the GO:Id
#'
#'@param pfam.table pfam count table
#'@param sample.id id of the current sample
#'@param pfam2go.mapping pfam2go mapping object
#'@param GO.db GO.db object for faster access
#'
#'@return data.frame
#'@export
pfam2GO <- function(pfam.count,
pfam2go.mapping,
GO.db = NULL) {
# load requiered Databases from GO.db package -
# for speedup the look up process
if (is.null(GO.db)) GO.db <- init.GO.db()
# create the ancestor db
ancestor.db <- list(mf = as.list(GOMFANCESTOR),
bp = as.list(GOBPANCESTOR),
cc = as.list(GOCCANCESTOR))
data <- pblapply(pfam.count$pfam, function(x) {
# find mapped GO:Ids for pfam id
go <- pfam2go.mapping[grep(x, as.character(pfam2go.mapping$id)), ]
# if go is empty -> list element is NULL else:
if(nrow(go) > 0) {
# create data.frame with supplementary data
# ontology and ancestors
supplemental <- do.call(rbind.data.frame,lapply(go$go_id,
function(x) {
data.frame("ontology" = get.GOontology(x, GO.db),
"ancestor" = get.GOancestor(x, ancestor.db))
}))
# create data.frame from go mapping and combine it
# with supplementary data.frame and count information
res <- cbind(data.frame(
"db" = as.character("pfam"),
"pfam" = as.character(go$id),
"name" = as.character(go$name),
"go.id" = as.character(go$go_id),
"count" = pfam.count[grep(as.character(x)[1],
pfam.count$pfam),
"count"],
stringsAsFactors = F),
supplemental)
}
})
# remove null elements from list and combine all results
# to one data.frame
data <- do.call(rbind.data.frame, data[!sapply(data, is.null)])
return(data)
}
#' go2slim
#'
#' assign a linage basing of the slim annotation of gene ontology to
#' the go.ids
#'
#' @description convert the annotated go id table to a new data.frame
#' with following fields
#' pfam - pfam id of the protein
#' go.id - GO:ID from resulting from mapping
#' count - number of occurences of the pfam id
#' ontology - ontology of the GO:Id (one of BP, MF or CC)
#' linage - linage from the go slim annotation
#'
#'@param pfam.go go annotated pfam table
#'@param go2slim.mapping go2slim mapping object
#'
#'@return data.frame
#'@export
go2slim <- function(pfam.go, go2slim.mapping) {
# assign slim linage to every row
slim.linage <- apply(pfam.go, 1, function(x) {
# get ancestor
ancestor <- x["ancestor"]
# convert string to vector
ancestor <- unlist(str_split(ancestor,","))
# find index of last hit
idx <- max(which(ancestor %in% go2slim.mapping$id == TRUE))
# get annotation mapping
slim <- go2slim.mapping[which(go2slim.mapping$id %in% ancestor[idx]),]
# only assign one linage to go id
slim$linage[1]
})
# build new data.frame with linage
slim.annotation <- data.frame(pfam.go$pfam,
pfam.go$go.id,
pfam.go$count,
pfam.go$ontology,
as.vector(slim.linage),
stringsAsFactors = F)
# adjust column names
colnames(slim.annotation) <- c("pfam.id", "go.id",
"count", "ontology",
"slim.linage")
return(slim.annotation)
}
getGOfromSlim <- function(functions, slim_list) {
index <- unlist(lapply(functions, FUN = function(x) {
grep(x, slim_list[[1]]$slim.linage)
}))
data <- slim_list[[1]][index, ]
return(data)
}
aggregateGObySlim <- function(functions) {
data <- lapply(functions$go.id, function(x){
data <- functions[functions$go.id == x,]
data.frame(aggregate(count ~ go.id, data = data, FUN = sum),
slim.linage = unique(functions$slim.linage))
})
data <- unique(do.call(rbind, data))
}
|
d995100f004b654f369c9c70b8eb1522ee6012f9 | ab20fc503daf99092253c56731f9c1570fe440cc | /3+-+Graphics+in+R.R | 17b9bbe6b292767695f3a576d26b7d90b39132c7 | [] | no_license | jbschnee/Stat-3080 | 788884cfc36f334c823f1e3f2c59cc009ca8181e | 6781f22b3996214730ac4ffbb5682f210c2826f9 | refs/heads/main | 2023-01-08T00:13:37.318452 | 2020-11-04T22:47:04 | 2020-11-04T22:47:04 | 310,134,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,577 | r | 3+-+Graphics+in+R.R |
#########################################################################
# #
# Graphics in R #
# #
#########################################################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
library(gcookbook)
library(MASS)
###############################################
#############
# General #
#############
# There are several different ways to produce graphics in R. Many #
# exist in the base graphics package. #
## Scatterplot of tree circumference vs. tree age
plot(Orange$age, Orange$circumference)
## Histogram of tree circumference
hist(Orange$circumference)
# A commonly used package for graphics is ggplot2. #
#####################
# ggplot2 package #
#####################
# The main idea behind graphics in ggplot2 is that different layers #
# of the plot are specified by the user and then added together. #
# Layers have one of three purposes: #
# 1. Display the data #
# 2. Display a statistical summary of the data #
# 3. Add metadata, context, and annotations #
# #
# A graph is not displayed until there is at least one layer. #
# Generally, the first layer is specified by using the function #
# ggplot(). In this function, the data to be used for the entire #
# graphic and/or the aesthetic features for the entire graphic can #
# be specified. If these are not given in the ggplot() statement, #
# they must be specified in each layer. Aesthetic features for the #
# entire graphic are specified within the aes() function. #
##################
# Scatterplots #
##################
## Look at the first 5 rows of the Davis data (contained in the car package)
Davis[1:5,]
## Define the first layer of a scatterplot of reported weight vs.
## measured weight
DavisPlot <- ggplot(Davis, aes(x=weight, y=repwt))
DavisPlot
# A layer can contain any of many different elements. One of the most #
# basic elements that will be contained in a graphics layer is a #
# geometric object (ie. points, lines, bars, etc.). These are added #
# using one of many functions of the form geom_"object name"(). A full #
# list of possible geometric objects is given on the ggplot2 website #
# or by typing geom_ into the console of RStudio and pressing Tab. #
# #
# Any layer is added to a graphic by using the + operator. #
## Adding points to the axes
DavisPlot + geom_point()
# The aesthetics that are either default or specified in the aes() #
# function can be overwritten by providing new values for aesthetic #
# options in the geometric object function. Some of the aesthetics #
# that users may be interested in changing are the position, the #
# color, the fill color, the shape, and the size. #
## Modifying aesthetic features of points
DavisPlot + geom_point(shape=21, size=2, color="darkred", fill="blue")
# Additionally, the points can be made more transparent for cases #
# when several points are plotted on top of each other. #
## Adjusting transparency of data points
DavisPlot + geom_point(alpha=0.2)
# Alternatively for the case when several points are obscured #
# together, a layer can be created that groups points into rectangles #
# and shades each rectangle according to how many points are #
# contained in that space. This type of layer is created using the #
# function stat_bin2d(). #
## Binning the data points
DavisPlot + stat_bin2d()
# If a scatterplot contains points belonging to different groups, #
# different shapes or colors can be used to differentiate the groups. #
# Automatically varying shapes or colors for the entire graphic #
# should be specified in the aesthetics function, aes(). #
## Setting varying colors for different groups
ggplot(Davis, aes(x=weight, y=repwt, color=sex)) + geom_point()
## Specifying colors for different groups
ggplot(Davis, aes(x=weight, y=repwt, color=sex)) + geom_point() +
scale_color_manual(values=c("M"="blue","F"="red"))
## Setting varying colors and shapes for different groups
ggplot(Davis, aes(x=weight, y=repwt, color=sex, shape=sex)) + geom_point()
# Differing colors can be used to designate the scale of a continuous #
# variable as well. #
## Setting varing colors for levels of a continuous variable
ggplot(Davis, aes(x=weight, y=repwt, color=repht)) + geom_point()
# The value of another continuous variable can be represented by the #
# size of the point. #
## Setting size of point for value of a continuous variable
ggplot(Davis, aes(x=weight, y=repwt, size=repht)) + geom_point()
# Differing groups can also be plotted in separate grids. #
## Separating groups into plots separated vertically
DavisPlot + geom_point() + facet_grid(sex~.)
## Separating groups into plots separated horizontally
DavisPlot + geom_point() + facet_grid(.~sex)
# For data sets with more than one grouping variable, the plots can #
# be split by two of the grouping variables to create a scatterplot #
# matrix. #
## Look at the first five rows of the mpg data set
mpg[1:5,]
## Create mpg plot of highway mpg vs. engine displacement and add points
mpgPlot <- ggplot(mpg, aes(x=displ, y=hwy)) + geom_point()
mpgPlot
## Separate plots based on type of drive (front, rear, all)
mpgPlot + facet_grid(.~drv)
## Separate plots based on number of cylinders
mpgPlot + facet_grid(cyl~.)
## Separate plots based on both type of drive and # of cylinders
mpgPlot + facet_grid(cyl~drv)
# By default all of the axes will have the same range. Users can allow #
# the horizontal axis, vertical axis, or both to vary. #
## Varying vertical axes
mpgPlot + facet_grid(cyl~drv, scales="free_y")
## Varying horizontal axes
mpgPlot + facet_grid(cyl~drv, scales="free_x")
## Varying vertical and horizontal axes
mpgPlot + facet_grid(cyl~drv, scales="free")
# For grouping variables with too many levels to make side-by-side #
# unfeasible, a facet wrap can be used. #
## Using facet wrap to create subplots based on vehicle class
mpgPlot + facet_wrap(~class)
mpgPlot + facet_wrap(~class, nrow=2)
#######################################
# Scatterplots with regression line #
#######################################
# A smoothing line can be added to any scatterplot using the #
# geom_smooth() function. The default smoothing line will attempt to #
# smooth out the data into a curve that best fits the data. #
## Adding default smoothing line
DavisPlot + geom_point() + geom_smooth()
# Options can be specified in the geom_smooth() function for the #
# smoothing line to use a particular method, such as linear regression.#
## Adding the linear regression line
DavisPlot + geom_point() + geom_smooth(method=lm)
################## ending of 2/11 #######################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
library(gcookbook)
library(MASS)
DavisPlot <- ggplot(Davis, aes(x=weight, y=repwt))
###############################################
###################################################
# Scatterplots with regression line (continued) #
###################################################
# The default color of the smoothing line is blue and can be changed #
# using the color option. #
## Adding a black linear regression line
DavisPlot + geom_point() + geom_smooth(method=lm, color="black")
# The geom_smooth() function default is to include the confidence #
# interval bands in a shaded region around the regression line. These #
# shaded bands are gray by default, but can be changed using the fill #
# option.
## Changing color of confidence bands
DavisPlot + geom_point() + geom_smooth(method=lm, fill="blue")
# The confidence bands can be made less prevalent by changing the #
# transparency of the shading. A transparency setting of zero is fully #
# transparent and a transparent setting of one is fully opaque. #
## Lessening the prevalence of the confidence bands
DavisPlot + geom_point() + geom_smooth(method=lm)
DavisPlot + geom_point() + geom_smooth(method=lm, alpha=0.2)
# The shaded bands can be removed using the standard error option, se. #
## Adding the linear regression line without the confidence bands
DavisPlot + geom_point() + geom_smooth(method=lm, se=F)
# If the universal settings of a graphic have defined different #
# colors for different groups, separate regression lines and #
# confidence bands will be added for each group. #
## Adding regression lines for groups
ggplot(Davis, aes(x=weight, y=repwt, color=sex)) + geom_point() +
geom_smooth(method=lm)
################## ending of 2/13 #######################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
library(gcookbook)
library(MASS)
###############################################
################
# Line plots #
################
# When data consist of one variable that represent values in a #
# specified order, a line plot can be used to show the trend of the #
# data. One common example of this type of plot is a time series plot. #
## Look at US population data (in the car package)
USPop[1:5,]
## Create line plot
ggplot(USPop, aes(x=year, y=population)) + geom_line()
# The options within the geom_line() function can be used to change #
# the line type, color, and size. #
## Modify aesthetic features of line
ggplot(USPop, aes(x=year, y=population)) +
geom_line(linetype="dashed", size=1, color="blue")
# To also show the points on a line plot, a layer can be added with #
# the data points. #
## Adding data points to line plot
ggplot(USPop, aes(x=year, y=population)) + geom_line() + geom_point()
# There may be situations when it is useful to plot several line #
# plots that distinguish separate groups together. A muliple line #
# plot can be created by specifying the grouping variable to vary #
# over the line type and/or line color. #
## Look at US population data separated by age group (in gcookbook package)
uspopage[1:16,]
## Creating a multiple line plot
ggplot(uspopage, aes(x=Year, y=Thousands, linetype=AgeGroup)) + geom_line()
ggplot(uspopage, aes(x=Year, y=Thousands, color=AgeGroup)) + geom_line()
################
# Bar graphs #
################
# When conducting an experiment and measuring a response over various #
# treatment groups, bar graphs are a useful way to graphically #
# summarize the data. #
# #
# The geom_bar() function will add a layer of bars to the graphic. An #
# input that is required for this function is the stat option. This #
# input indicates what statistical transformation to use on the data #
# for this layer. By default the geom_bar() function will group and #
# count the number of responses or use a variable that has already #
# been created in the data set if identity is specified for the stat #
# option. #
## Look at pg_mean data set (in gcookbook package)
pg_mean
## Create bar graph to view the means of the treatment groups
bar1 <- ggplot(pg_mean, aes(x=group, y=weight))
bar1 + geom_bar(stat="identity")
# To plot a bar graph that represents a statistical measure other #
# than the count, the stat_summary() function can be used. This #
# function takes at least two inputs - the function to apply to the #
# data and the geometric object to create in the layer. The specified #
# function can be applied to just the y-variable or to the entire #
# data set by using the options fun.y or fun.data. #
## Look at the PlantGrowth data set
PlantGrowth[1:5,]
## Create bar graph to view the means of the treatment groups
bar2 <- ggplot(PlantGrowth, aes(x=group, y=weight))
bar2 + stat_summary(fun.y=mean, geom="bar")
# Regardless of the function used to create the bar layer, aesthetic #
# features (color, border, separation, etc.) can be changed within #
# the layer function. #
## Modifying aesthetics of the bar graph
bar1 + geom_bar(stat="identity", fill="orange", color="blue")
bar2 + stat_summary(fun.y=mean, geom="bar", fill="orange", color="blue")
## Modifying the spacing of the bar graph
bar1 + geom_bar(stat="identity")
bar1 + geom_bar(stat="identity", width=0.5)
bar1 + geom_bar(stat="identity", width=1)
# If the height of the bars represents the means of various groups, #
# error bars can be added to the plot to either represent confidence #
# limits or a single standard error of the mean. These error bars are #
# added as a new layer to the graphic. #
## Adding error bars to the bar graph (CI)
bar2 + stat_summary(fun.y=mean, geom="bar", fill="white", color="black") +
stat_summary(fun.data=mean_cl_normal, geom="errorbar", width=0.2)
## Adding error bars to the bar graph (one standard error)
bar2 + stat_summary(fun.y=mean, geom="bar", fill="white", color="black") +
stat_summary(fun.data=mean_se, geom="errorbar", width=0.2)
# In the case of experiments with more than one variable that defines #
# each treatment group or in the case where there are more than one #
# groups of interest, it is useful to separate the bars of the plot #
# to distinguish the groups. #
## Look at cabbages data set (in MASS package)
cabbages[1:5,]
## Create bar graph for average weight, separated by cultivation
bar3 <- ggplot(cabbages, aes(x=Date, y=HeadWt, fill=Cult))
bar3 + stat_summary(fun.y=mean, geom="bar")
# Even if groups are specified by different fill color in the base #
# layer, bars will be plotted on top of each other unless the user #
# specifies otherwise. The dodge option for the position input #
# indicates that bars should not overlap. #
## Create bar graph for average weight, separated by cultivation
bar3 + stat_summary(fun.y=mean, geom="bar", position="dodge")
# By default, bars within groups will never have space between them, #
# even if the width is adjusted. #
## Adjusting width
bar3 + stat_summary(fun.y=mean, geom="bar", position="dodge", width=0.5)
# In order to add space between the bars within a group, the position #
# needs to be adjusted. When the dodge option is specified, the #
# function is using position_dodge(0.5). Changing the value in the #
# position_dodge() function will adjust the separation between bars #
# within groups. #
## Adding space between bars within groups
bar3 + stat_summary(fun.y=mean, geom="bar", position=position_dodge(0.7),
width=0.5)
# In some plots, it is useful to add the value of the height of each #
# bar onto the plot. If these values are part of the initial data set, #
# a layer is added using the geom_text() function. If these values #
# were calculated using the stat_summary() function, a new layer #
# using a second stat_summary() function is used. #
## Adding bar height values
bar3 + stat_summary(fun.y=mean, geom="bar", position="dodge") +
stat_summary(fun.y=mean, geom="text", aes(label=..y..), vjust=1.5,
position=position_dodge(0.9), color="white")
################## ending of 2/18 #######################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
library(gcookbook)
library(MASS)
###############################################
##############
# Boxplots #
##############
# Boxplots give users the ability to quickly compare population #
# distributions using the information contained in the five-number #
# summary. #
## Look at birthwt data (in MASS package)
birthwt[1:5,]
## Label race values in data
bwdata <- birthwt
bwdata$race <- factor(bwdata$race, labels=c("white","black","other"))
## Create boxplots for birth weights separated by mother's race
bxplot <- ggplot(bwdata, aes(x=race, y=bwt))
bxplot + geom_boxplot()
# The spaces between multiple boxplots can be modified with the #
# width option. #
## Modify the space between the boxplots
bxplot + geom_boxplot(width=0.5)
# Outliers are marked as points on each boxplot. If there are many #
# outliers, the aesthetics of the outliers can be modified to unmask #
# any overlapped points. #
## Modifing outlier aesthetics
bxplot + geom_boxplot(outlier.size=1.5, outlier.shape=21)
# To emphasize the location of the median of each population, notches #
# can be added to the boxplots. #
## Adding notches to boxplots
bxplot + geom_boxplot(notch=TRUE)
# Markers for the mean of each population can be added to the boxplot #
# using the stat_summary() function. #
## Adding markers for the means
bxplot + geom_boxplot() + stat_summary(fun.y="mean", geom="point")
bxplot + geom_boxplot() +
stat_summary(fun.y="mean", geom="point", shape=21, size=3)
################
# Histograms #
################
# Histograms allow us to see the distribution of a sample in more #
# detail by plotting the frequencies over several bins. #
## Look at the faithful data
faithful[1:5,]
## Create histogram of waiting times
hist1 <- ggplot(faithful, aes(x=waiting))
hist1 + geom_histogram()
# The width of bins can be specified with the binwidth option or the #
# number of bins can be specified with the bins option. The bar #
# aesthetics can be modified using the fill and color options. #
## Change number of bins in a histogram
hist1 + geom_histogram(binwidth=5, fill="white", color="black")
hist1 + geom_histogram(bins=9, fill="white", color="black")
# For users who would like to compare histograms for multiple groups, #
# the function facet_grid() can be used to split the frame and #
# display multiple histograms. #
## Look at the birthwt data (in MASS package)
birthwt[1:5,]
## Change labels for smoking and nonsmoking mothers
bwdata <- birthwt
bwdata$smoke <- factor(bwdata$smoke, labels=c("Nonsmoking","Smoking"))
## Create histograms for birth weights separated by mothers' smoking habits
hist2 <- ggplot(bwdata, aes(x=bwt))
hist2 + geom_histogram(binwidth=250, fill="white", color="black") +
facet_grid(smoke~.)
hist2 + geom_histogram(binwidth=250, fill="white", color="black") +
facet_grid(.~smoke)
# Another option for comparing multiple histograms is to overlay them #
# and make them semitransparent. #
## Overlapping histograms
ggplot(bwdata, aes(x=bwt, fill=smoke)) +
geom_histogram(binwidth=250, position="identity", alpha=0.4)
#########################
# Distribution curves #
#########################
# There are times when it may be useful to plot the density curve of #
# a distribution. These can be created as a layer using the #
# stat_function() function and specifying the desired distribution. #
# If the distribution function requires additional parameters, they #
# are specified in the args option. #
## Plotting a normal distribution curve
Xdata1 <- data.frame(X=c(-4,4))
dist1 <- ggplot(Xdata1, aes(x=X))
dist1 + stat_function(fun=dnorm)
## Plotting a normal distribution and t-distribution curve
dist1 + stat_function(fun=dnorm) +
stat_function(fun=dt, args=list(df=3), linetype="dashed")
## Plotting a chi-squared curve
Xdata2 <- data.frame(X=c(0,20))
dist2 <- ggplot(Xdata2, aes(x=X))
dist2 + stat_function(fun=dchisq, args=list(df=4))
# An area under a curve can be shaded using the geom_area() function. #
# Within this function, the density of the distribution must be #
# specified along with the x-values to be shaded. #
## Shading the area above 1.5 in the normal distribution
dist1 + stat_function(fun=dnorm) +
geom_area(stat="function", fun=dnorm, xlim=c(1.5,4))
## Shading the area above and below +/- 1.5 in the normal distribution
dist1 + stat_function(fun=dnorm) +
geom_area(stat="function", fun=dnorm, xlim=c(1.5,4)) +
geom_area(stat="function", fun=dnorm, xlim=c(-4,-1.5))
# The cumulative probability function of a distribution can be shown #
# in a graphic by specifying the distribution function beginning #
# with p instead of d. #
## Plotting the cumulative probability function of the normal distribution
dist1 + stat_function(fun=pnorm)
################## ending of 2/20 #######################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
###############################################
##############
# QQ plots #
##############
# A common way to verify that data follows a normal distribution is #
# by creating a QQ plot or normal probability plot. A QQ plot takes #
# the sample data ordered from smallest to largest and compares each #
# value with what it would be if the data had been drawn from a #
# normal distribution. If the sample data is normally distributed, #
# the points will follow the straight reference line in the QQ plot. #
## Create a QQ plot for the waiting times
ggplot(faithful, aes(sample = waiting)) + stat_qq() + stat_qq_line()
######################################
# Modifying the graphic aesthetics #
######################################
# The ggplot2 package has two preset themes for its graphics - grey #
# and black and white. The grey theme is the default. The theme can #
# be changed for specific plots by adding a layer with the theme name. #
## Default grey theme
DavisScatter <- ggplot(Davis, aes(x=weight, y=repwt)) + geom_point()
DavisScatter
## Changing the theme
DavisScatter + theme_bw()
# If you prefer the black and white theme for all of your graphics, #
# you can use the theme_set() function and specify your chosen theme. #
# This function will apply only to the current R session and will #
# reset every time R is restarted. #
DavisScatter
theme_set(theme_bw())
DavisScatter
theme_set(theme_grey())
DavisScatter
# One aesthetic option is to suppress the gridlines. Users can #
# suppress either the horizontal lines, vertical lines, or both. #
DavisScatter + theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank())
DavisScatter + theme(panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank())
DavisScatter + theme(panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank())
# For graphics that will be used in a report or presentation, it is a #
# best practice to have a plot title and axis labels that are easily #
# understood. The function labs() can be used to add a title and/or #
# to change the axis labels. #
DavisScatter + labs(title="Weights of individuals who regularly exercise")
DavisScatter + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight")
# The aesthetics of the graphic labels can be changed using the #
# theme() and element_text() functions. #
DavisScatter + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
theme(plot.title=element_text(face="bold", size=20),
axis.title.y=element_text(size=14),
axis.title.x=element_text(size=14))
DavisScatter + labs(title="Weights of individuals who \n regularly exercise",
x="Measured weight", y="Reported weight") +
theme(plot.title=element_text(face="bold", size=20))
DavisScatter + labs(title="Weights of individuals who \nregularly exercise",
x="Measured weight", y="Reported weight") +
theme(plot.title=element_text(face="bold", size=20))
# The axis values themselves can be modified by using axis.text.y= #
# and axis.text.x= in the theme() function. #
# #
# The range of the axes can be changed using the functions xlim(), #
# ylim(), or expand_limits(). #
DavisScatter + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
xlim(0,170) + ylim(0,130)
DavisScatter + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
expand_limits(x=0, y=0)
# When groups are represented separatedly in a graphic, changes can #
# be made to the legend. These changes can be differences in #
# aesthetics or a difference in position. #
DavisScatter2 <- ggplot(Davis, aes(x=weight, y=repwt, color=sex)) +
geom_point()
DavisScatter2 + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
labs(color="Biological sex")
DavisScatter2 + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
labs(color="Biological sex") +
scale_color_discrete(labels=c("Female","Male"))
DavisScatter2 + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
labs(color="Biological sex") +
scale_color_discrete(limits=c("M","F"))
################## ending of 2/25 #######################################
########### objects/packages needed ###########
library(ggplot2)
# OR library(tidyverse)
library(car)
DavisScatter2 <- ggplot(Davis, aes(x=weight, y=repwt, color=sex)) +
geom_point()
###############################################
##################################################
# Modifying the graphic aesthetics (continued) #
##################################################
DavisScatter2 + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
theme(legend.position="top")
# The legend can also be moved inside the plot by using the coordinate #
# system. The bottom left begins at 0,0 and the top right is 1,1. #
DavisScatter2 + labs(title="Weights of individuals who regularly exercise",
x="Measured weight", y="Reported weight") +
theme(legend.position=c(1,1), legend.justification=c(1,1))
# If groups are represented in separate plots, the aesthetics of the #
# facet labels can be modified in the theme() function. #
DavisScatter3 <- ggplot(Davis, aes(x=weight, y=repwt)) + geom_point() +
facet_grid(.~sex)
DavisScatter3 + theme(strip.background=element_rect(fill="darkgreen"),
strip.text.x=element_text(size=14, color="white"))
######################################################################### |
ff7b8e558583c33e0b78f3dbb05484767cb1269a | b220e9b4c9db0a1590358b22480ea00beb3fba0f | /R/PATTERN_AV45_ANCOVA.R | 065b737a1bc1327ec79c6e99fac4745d08360b20 | [] | no_license | catfishy/jagust | 9741a74812c15c4c812b7143404ae1092e76e4d7 | 8877d29b340d6489fbb20542dc5d1188ec512531 | refs/heads/master | 2021-04-19T01:21:36.722907 | 2016-11-09T22:54:44 | 2016-11-09T22:54:44 | 37,677,259 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,238 | r | PATTERN_AV45_ANCOVA.R | library(lme4)
library(coefplot2)
library(ggplot2)
library(lmerTest)
library(pbkrtest)
library(multcomp)
library(contrast)
library(xtable)
library(sjPlot)
library(splines)
library(car)
library(stats)
library(gdata)
library(psych)
library(reshape2)
library(piecewiseSEM)
library(LambertW)
library(nnet)
library(DAAG)
library(relaimpo)
library(caret)
library(cvTools)
library(VGAM)
library(lmtest)
library(languageR)
library(stringr)
library(covTest)
library(scales)
source('R/LM_FUNCS.R')
# CONSTANTS
pattern_prefix = 'NSFA_'
to_factor = c('RID','ad_prior','ad_post','positive_prior','positive_post',
'diag_prior','diag_post','APOE4_BIN','APOE2_BIN','Gender',
'Diag.AV45','positive_prior','positive_post',
'AV45_NONTP_wcereb_BIN1.11')
to_standardize = c('CORTICAL_SUMMARY_prior','Age.AV45','Edu..Yrs.')
demog_columns = c('RID','APOE4_BIN','Diag.AV45','Age.AV45','Gender','Edu..Yrs.')
av45_columns = c('CORTICAL_SUMMARY_prior','AV45_NONTP_wcereb')
output_folder = 'R/output_av45_ancova/'
# valid_diags = c('N','SMC','EMCI','LMCI','AD')
# valid_diags = c('N','SMC')
# valid_diags = c('EMCI')
valid_diags = c('LMCI')
# valid_diags = c('AD')
# valid_diags = c('EMCI','LMCI','AD')
positive_value=1
# ancova_factors = 'Diag.AV45'
# ancova_factors = 'AV45_NONTP_wcereb_BIN1.11'
# ancova_factors = 'Age.AV45'
# ancova_factors = 'APOE4_BIN'
# ancova_factors = 'Gender'
# ancova_factors = 'Edu..Yrs.'
# ancova_factors = 'AVLT_AV45_1'
# ancova_factors = 'ADAS_AV45_1'
# ancova_factors = 'UW_EF_AV45_1'
ancova_factors = 'UW_MEM_AV45_1'
# ancova_factors = 'CSF_ABETA_closest_AV45_1'
# ancova_factors = 'UCB_FS_HC.ICV_AV45_1'
# ancova_factors = 'AVLT_slope_postAV45'
# ancova_factors = 'ADASslope_postAV45'
# ancova_factors = 'UW_MEM_slope'
# ancova_factors = 'UW_EF_slope'
# ancova_factors = 'CORTICAL_SUMMARY_change'
# IMPORT
df_av45 = read.csv('nsfa/av45_pattern_dataset.csv')
non.na = complete.cases(df_av45[,c(demog_columns,av45_columns,ancova_factors)])
df_av45 = df_av45[non.na,]
for (i in names(df_av45)){
if (i %in% to_factor){
df_av45[,eval(i)] = as.factor(as.character(df_av45[,eval(i)]))
}
}
pattern_columns = Filter(isPatternColumn,names(df_av45))
naive_columns = Filter(isNaiveColumn,names(df_av45))
# remove target outliers
target.mean = mean(df_av45[,ancova_factors])
target.sd = sd(df_av45[,ancova_factors])
df_av45 = df_av45[df_av45[,ancova_factors] <= target.mean+target.sd*5,]
df_av45 = df_av45[df_av45[,ancova_factors] >= target.mean-target.sd*5,]
# Filter by diag + positivity
df_av45 = df_av45[which(df_av45$Diag.AV45 %in% valid_diags),]
# df_av45 = df_av45[which(df_av45[,'AV45_NONTP_wcereb_BIN1.11'] == positive_value),]
df_av45 = df_av45[which(df_av45[,'positive_prior'] == positive_value),]
# # standardize predictors
# cross_to_standardize = c(to_standardize,pattern_columns,naive_columns)
# cross_normalization = preProcess(df_av45[,cross_to_standardize])
# df_av45[,cross_to_standardize] = predict(cross_normalization, df_av45[,cross_to_standardize])
# make response normal
#df_av45[,eval(target)] = Gaussianize(df_av45[,eval(target)], type='hh', method='MLE', return.u=TRUE)
#all.addons = lapply(pattern_columns,lm.addvar)
#naive.addons = lapply(naive_columns,lm.addvar)
all.addons = paste('+',paste(pattern_columns,collapse=' + '))
naive.addons = paste('+',paste(naive_columns,collapse=' + '))
pattern_formula = lapply(pattern_columns, function (x) lm.createformula(x,ancova_factors))
av45_formula = lapply(av45_columns, function (x) lm.createformula(x,ancova_factors))
naive_formula = lapply(naive_columns, function (x) lm.createformula(x,ancova_factors))
pvalues = list()
lmformula = list()
all_formula = c(pattern_formula,av45_formula)
all_columns = c(pattern_columns,av45_columns)
for (i in 1:NROW(all_formula)) {
pattern_str = all_columns[i]
formula = as.formula(paste(all_formula[i]))
print(formula)
cur.lm = lm(formula,df_av45)
cur.lm.summary = summary(cur.lm)
cur.lm.coef = cur.lm.summary$coefficients
fstat = cur.lm.summary$fstatistic
cur.lm.pvalue = pf(fstat[1],fstat[2],fstat[3],lower.tail=F)
pvalues[pattern_str] = cur.lm.pvalue
lmformula[pattern_str] = paste(pattern_formula[i])
cur.lm.anova = Anova(cur.lm,type='III')
# save.printout(paste(output_folder,pattern_str,'_',ancova_factors,'_ancova_anova','.txt',sep=''),cur.lm.anova)
# save.printout(paste(output_folder,pattern_str,'_',ancova_factors,'_ancova_summary','.txt',sep=''),cur.lm.summary)
# lm.plotfn = function() {par(mfrow=c(2,2));plot(cur.lm);title(pattern_str, outer=T, line=-2);}
# save.plot(paste(output_folder,pattern_str,'_',ancova_factors,'_lmplot.pdf',sep=''), lm.plotfn)
# save.plot(paste(output_folder,pattern_str,'_',ancova_factors,'_avplot.pdf',sep=''), function() {avPlots(cur.lm, ask=FALSE)})
}
pvalues.corrected = p.adjust(pvalues,method='bonferroni')
pvalues.sig = pvalues.corrected[pvalues.corrected < 0.05]
unlist(pvalues)
pvalues.corrected
pvalues.sig
plot(df_av45$NSFA_14,df_av45[,ancova_factors])
ggplot(df_av45,aes_string(x='CORTICAL_SUMMARY_prior',y=ancova_factors,color='NSFA_6')) +
geom_point(size=3) +
geom_smooth(method='lm') +
scale_color_gradient2(limit=c(-2,2), low='#22FF00',mid='white',high='#FF0000',oob=squish) +
theme(panel.grid=element_blank(),
panel.background=element_rect(fill='black'))
to_graph = c('NSFA_6','NSFA_8','NSFA_0')
# Diagnosis graphs
for (pcol in to_graph) {
p = ggplot(df_av45, aes_string('Diag.AV45',pcol)) +
geom_violin(trim=TRUE,aes(fill=Diag.AV45),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab(paste(pcol,'Score')) +
xlab('Diagnosis')
print(p)
}
p2 = ggplot(df_av45, aes_string('Diag.AV45','CORTICAL_SUMMARY_prior')) +
geom_violin(trim=TRUE,aes(fill=Diag.AV45),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab('CS SUVR (whole cereb. ref., PVC)') +
xlab('Diagnosis')
print(p2)
# Age graphs
for (pcol in to_graph) {
p = ggplot(df_av45, aes_string(x='Age.AV45',y=pcol)) +
geom_point(aes_string(color='Diag.AV45'),show.legend=FALSE) +
geom_smooth(method='lm') +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14)) +
ylab(paste(pcol,'Score')) +
xlab('Age')
print(p)
}
p2 = ggplot(df_av45, aes_string(x='Age.AV45',y='CORTICAL_SUMMARY_prior')) +
geom_point(aes_string(color='Diag.AV45'),show.legend=FALSE) +
geom_smooth(method='lm') +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14)) +
ylab('CS SUVR (whole cereb. ref., PVC)') +
xlab('Age')
print(p2)
# APOE4 graphs
for (pcol in to_graph) {
p = ggplot(df_av45, aes_string('APOE4_BIN',pcol)) +
geom_violin(trim=TRUE,aes(fill=APOE4_BIN),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab(paste(pcol,'Score')) +
xlab(expression(paste('APOE ',epsilon,'4',collapse='')))
print(p)
}
p2 = ggplot(df_av45, aes_string('APOE4_BIN','CORTICAL_SUMMARY_prior')) +
geom_violin(trim=TRUE,aes(fill=APOE4_BIN),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab('CS SUVR (whole cereb. ref., PVC)') +
xlab(expression(paste('APOE ',epsilon,'4',collapse='')))
print(p2)
# AV45+ graphs
for (pcol in to_graph) {
p = ggplot(df_av45, aes_string('AV45_NONTP_wcereb_BIN1.11',pcol)) +
geom_violin(trim=TRUE,aes(fill=AV45_NONTP_wcereb_BIN1.11),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab(paste(pcol,'Score')) +
xlab('AV45 SUVR Pos.')
print(p)
}
p2 = ggplot(df_av45, aes_string('AV45_NONTP_wcereb_BIN1.11','CORTICAL_SUMMARY_prior')) +
geom_violin(trim=TRUE,aes(fill=AV45_NONTP_wcereb_BIN1.11),show.legend=FALSE) +
coord_flip() +
theme(axis.title.x=element_text(size=20),
axis.title.y=element_text(size=20),
axis.text.x=element_text(size=14),
axis.text.y=element_text(size=14),
legend.title=element_text(size=14)) +
ylab('CS SUVR (whole cereb. ref., PVC)') +
xlab('AV45 SUVR Pos.')
print(p2)
|
5fae99e501026c9f788b6814fc10ac600fa48eea | ee4acf485a781d373ae522961713af361464de22 | /R/mod.cd4.R | 1dd78774a6d1fe3d9f6052a83032c542b61c3048 | [] | no_license | dth2/EpiModelHIV_SHAMP | a1f977517a356203b056662be18b3409a3b36d8d | 2dd924d10be81a6aa6d1300dfd60471b076973e8 | refs/heads/master | 2021-01-12T17:32:16.088650 | 2017-09-19T16:05:35 | 2017-09-19T16:05:35 | 71,595,514 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,395 | r | mod.cd4.R |
#' @title CD4 Progression Module
#'
#' @description Module function for simulating progression of CD4 in natural
#' disease dynamics and in the presence of ART.
#'
#' @inheritParams aging_het
#'
#' @keywords module het
#'
#' @export
#'
cd4_het <- function(dat, at) {
status <- dat$attr$status
time.unit <- dat$param$time.unit
if (is.null(dat$attr$cd4Count)) {
dat$attr$cd4Count <- rep(NA, length(status))
}
cd4Count <- dat$attr$cd4Count
# Assign CD4 for newly infected -------------------------------------------
idsAsn <- which(status == 1 & is.na(cd4Count))
if (length(idsAsn) > 0) {
cd4Count[idsAsn] <- expected_cd4(method = "assign",
male = dat$attr$male[idsAsn],
age = dat$attr$age[idsAsn],
ageInf = dat$attr$ageInf[idsAsn],
time.unit = time.unit)
}
# CD4 natural decline -----------------------------------------------------
txStartTime <- dat$attr$txStartTime
infTime <- dat$attr$infTime
idsUpd <- which(status == 1 & infTime < at & is.na(txStartTime))
idsUpd <- setdiff(idsUpd, idsAsn)
if (length(idsUpd) > 0) {
cd4Count[idsUpd] <- expected_cd4(method = "update",
cd4Count1 = cd4Count[idsUpd],
male = dat$attr$male[idsUpd],
age = dat$attr$age[idsUpd],
ageInf = dat$attr$ageInf[idsUpd],
time.unit = time.unit)
}
# CD4 increase with ART ---------------------------------------------------
male <- dat$attr$male
txStat <- dat$attr$txStat
tx.cd4.recrat.feml <- dat$param$tx.cd4.recrat.feml
tx.cd4.recrat.male <- dat$param$tx.cd4.recrat.male
idsTxFeml <- which(status == 1 & male == 0 & txStat == 1)
idsTxMale <- which(status == 1 & male == 1 & txStat == 1)
if (length(idsTxFeml) > 0) {
cd4Cap <- expected_cd4(method = "assign", male = 0, age = 25, ageInf = 25)
cd4Count[idsTxFeml] <- pmin(cd4Count[idsTxFeml] + tx.cd4.recrat.feml, cd4Cap)
}
if (length(idsTxMale) > 0) {
cd4Cap <- expected_cd4(method = "assign", male = 1, age = 25, ageInf = 25)
cd4Count[idsTxMale] <- pmin(cd4Count[idsTxMale] + tx.cd4.recrat.male, cd4Cap)
}
# CD4 decline post ART ----------------------------------------------------
tx.cd4.decrat.feml <- dat$param$tx.cd4.decrat.feml
tx.cd4.decrat.male <- dat$param$tx.cd4.decrat.male
idsNoTxFeml <- which(status == 1 & male == 0 &
!is.na(txStartTime) & txStat == 0)
idsNoTxMale <- which(status == 1 & male == 1 &
!is.na(txStartTime) & txStat == 0)
if (length(idsNoTxFeml) > 0) {
cd4Count[idsNoTxFeml] <- pmax(cd4Count[idsNoTxFeml] - tx.cd4.decrat.feml, 0)
}
if (length(idsNoTxMale) > 0) {
cd4Count[idsNoTxMale] <- pmax(cd4Count[idsNoTxMale] - tx.cd4.decrat.male, 0)
}
if (any(is.na(cd4Count[status == 1]))) {
stop("NA in cd4Count among infected")
}
dat$attr$cd4Count <- cd4Count
return(dat)
}
expected_cd4 <- function(method, cd4Count1, cd4Count2,
male, age, ageInf,
at, time.unit = 7) {
## Variables
timeInf <- (age - ageInf) * (365 / time.unit)
catAI <- cut(ageInf, breaks = c(0, 30, 40, 50, Inf),
labels = FALSE, right = FALSE)
## Model parameters
base.male <- 23.53 - 0.76
base.feml <- base.male + 1.11
bases <- c(base.feml, base.male)
ind.bases <- bases[male + 1]
# Yearly slopes
slope1 <- -1.49 + 0.34
slope2 <- slope1 - 0.10
slope3 <- slope1 - 0.34
slope4 <- slope1 - 0.63
slopes <- c(slope1, slope2, slope3, slope4)
ind.slopes <- slopes[catAI] * (time.unit / 365)
if (method == "timeto") {
tt1 <- (sqrt(cd4Count1) - ind.bases)/ind.slopes
if (!missing(cd4Count2)) {
tt2 <- (sqrt(cd4Count2) - ind.bases)/ind.slopes
return(tt2 - tt1)
} else {
return(tt1)
}
} else {
if (method == "assign") {
cd4CountSqrt <- ind.bases + (ind.slopes * timeInf)
cd4CountSqrt <- pmax(1, cd4CountSqrt)
}
if (method == "update") {
cd4CountSqrt <- sqrt(cd4Count1) + ind.slopes
cd4CountSqrt[cd4CountSqrt < 1] <- 0
}
cd4Count <- cd4CountSqrt ^ 2
return(cd4Count)
}
}
|
f716ce728379ef30d9a55aa8b711b0419ac72154 | 399f173e7c80d13e5bdfce8d8c3d27d1d26dad1c | /1062_class/Multivariate_Analysis/HW4_classification/classify_2_linear.R | 7a90deb75fa84bd9df18bbb5273729cf7342dd76 | [] | no_license | Kazeumi/local_depend | 98032d9ac1d05a664efe70793101acf6cd9f59a5 | 30a398836652f1cab34ef69d059e386d5f3e626e | refs/heads/master | 2022-04-18T09:44:46.861423 | 2020-04-11T05:48:51 | 2020-04-11T05:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,045 | r | classify_2_linear.R | hemo <- readr::read_table2("T11-8.DAT", col_names = FALSE,
col_types = cols(X1 = col_character())
)
###### APER (lda) #####
library(MASS)
lda <- lda(formula = group ~ activity + antigen,
prior=c(1,1)/2, data = hemo)
lda_prdict <- predict(lda)$class
confus_mt <- table(hemo$group, lda_prdict, dnn = c('Actual Group','Predicted Group'))
(confus_mt[1,2] + confus_mt[2,1])/sum(confus_mt)
####### Holdout Procedure(lda) ######
lda_hold <- lda(formula = group ~ activity + antigen,
CV=TRUE, prior=c(1,1)/2, data = hemo)
confus_mt_hold <- table(hemo$group, lda_hold$class,
dnn = c('Actual Group','Predicted Group'))
## Error Rate: Exp_AER
(confus_mt_hold[1,2] + confus_mt_hold[2,1])/sum(confus_mt_hold)
######## Holdout Procedure (Manual) ######
data <- hemo
n1_m <- 0 # holdouts misclassified as 2 in n1
n2_m <- 0 # holdouts misclassified as 2 in n1
pre_group_vec <- vector(mode = "character",length = nrow(data))
for (i in 1:nrow(data)){
holdout <- data[i,]
data <- data[-i,]
library(MASS)
r2 <- lda(formula = group ~ activity + antigen,
data = data)
predicted_group <- predict(r2, newdata = holdout)$class
pre_group_vec[i] <- predicted_group
if (predicted_group != holdout$group) {
if (holdout$group == "1") n1_m = n1_m + 1
else n2_m = n2_m + 1
}
data <- hemo
}
confu_mt <- table(data$group, pre_group_vec,
dnn = c('Actual Group','Predicted Group'))
Exp_AER <- (n1_m + n2_m)/nrow(data)
####### Calculate discriminant fc (Pure manual) #######
x1_bar <- hemo[hemo$group == "1",2:3] %>% colMeans()
x2_bar <- hemo[hemo$group == "2",2:3] %>% colMeans()
S1 <- hemo[hemo$group == "1", 2:3] %>% cov()
S2 <- hemo[hemo$group == "2", 2:3] %>% cov()
n1 <- hemo[hemo$group == "1",] %>% nrow
n2 <- hemo[hemo$group == "2",] %>% nrow
S_pool <- ((n1 - 1)*S1 + (n2 - 1)*S2)/(n1 + n2 - 2)
a <- solve(S_pool) %*% (x1_bar - x2_bar)
cutoff <- .5 * (x1_bar - x2_bar) %*% solve(S_pool) %*% (x1_bar + x2_bar) |
2137e536bbd8a62130b273ae4869097aa36f23ca | 9343c764ef915aa45b648351568df1f267690f3b | /bar plot.R | f3029829089e73f02e271072601c3e28dbaaa0d9 | [] | no_license | GraceNS/Women_In_Data | 2ec6549d1a547aa850c71f8304500ef2948dfe7e | ead88f389b8cafdf04747b1f3cb858bc212ef539 | refs/heads/main | 2023-03-28T18:58:06.428607 | 2021-03-25T17:44:10 | 2021-03-25T17:44:10 | 328,070,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | r | bar plot.R | library(ggplot2)
data <- data.frame(
name=c("A", "B", "C", "D", "E"),
value=c(5,2,7,2,3)
)
ggplot(data, aes(x=name, y=value)) +
geom_bar(stat = "identity" )
|
3cace6dbb118ba66a240ac41aca9040167a5426e | cb29054a5a45a6ff3de2d9611e71b4c6e6d24878 | /MEY1.R | 42c118fa948f7df8e23e27c41f8adbf045c1cd4c | [] | no_license | christophercostello/MEY-Code | 852ee7ca336e7ef077719de643ec3eb6c97bc589 | a530a46972866dd199fe1389662eca0e05443574 | refs/heads/master | 2021-01-01T05:17:07.361798 | 2016-04-26T00:41:24 | 2016-04-26T00:41:24 | 57,061,299 | 0 | 2 | null | 2016-04-26T00:17:11 | 2016-04-25T17:21:07 | R | UTF-8 | R | false | false | 2,665 | r | MEY1.R | rm(list=ls())
library(pracma)
library(ggplot2)
library(dplyr)
library(tidyr)
library(grid)
library(gridExtra)
#RAMdata = read.csv('../Data/RAM Stock Parameters.csv',stringsAsFactors=F)
# Prep Data ---------------------------------------------------------------
load('Data/Policies and Params for Chris.Rdata')
DANdata = for_chris
rm(for_chris)
# DANdata = read.csv('Data/Policies and Params for Chris.csv',stringsAsFactors=F)
# ggKobe(DANdata)
Fisheries = unique(DANdata$IdOrig)
MEYdata = data.frame(matrix(NA,nrow=length(Fisheries),ncol=7))
current_b = vector()
current_f = vector()
current_b_mey = vector()
current_f_mey = vector()
b_mey = vector()
f_mey = vector()
grow_disc = vector()
# Dynamic Optim Functions -------------------------------------------------
DiffF=function(b,bgrid,f0,f1)
{
#To be zeroed out. Want to find b* such that f0(b*) = f1(b*)
fval0 = spline(bgrid,f0,xout=b,method="natural")
fval1 = spline(bgrid,f1,xout=b,method="natural")
difference = fval0$y - fval1$y
return(difference)
}
# Run Dynamic Optim -------------------------------------------------------
for (i in seq(1,length(Fisheries),1))
{
print(i)
DANsub=subset(DANdata,DANdata$IdOrig==Fisheries[i])
bvec = DANsub$b
fvec = DANsub$Opt
phi = DANsub$phi[1]
g = DANsub$g[1]
if (is.na(phi))
{phi=.188}
fpt = ((phi+1)/phi)*(1 - bvec^phi/(phi+1))
bmey = fzero(DiffF,1.2,bgrid=bvec,f0=fvec,f1=fpt)
fmey = ((phi+1)/phi)*(1 - bmey$x^phi/(phi+1))
current_b[i] = DANsub$BvBmsy[1] #current B/Bmsy
current_f[i] = DANsub$FvFmsy[1] #current F/Fmsy
b_mey[i] = bmey$x #Bmey/Bmsy
f_mey[i] = fmey #Fmey/Fmsy
current_b_mey[i] = DANsub$BvBmsy[1]/bmey$x #B/Bmey
current_f_mey[i] = DANsub$FvFmsy[1]/fmey #F/Fmey
grow_disc[i] = ((phi+1)/phi)*g
}
MEYdata = data.frame(IdOrig = Fisheries,current_b,current_f,
b_mey,f_mey,current_b_mey, current_f_mey)
write.csv(MEYdata,file='MEY_results.csv')
kobe_dat <- DANdata %>%
select(IdOrig, Dbase, SciName, CommName, MSY, SpeciesCat, SpeciesCatName, BvBmsy, FvFmsy) %>%
unique() %>%
left_join(MEYdata,by = c('IdOrig'))
head(kobe_dat)
# Make Kobe Plot ----------------------------------------------------------
source("MEY_Functions/ggKobe.R")
kobe_mey <- ggKobe(kobe_dat, xvar = 'current_b_mey', yvar = 'current_f_mey' ) +
labs(x = 'B/Bmey', y = 'F/Fmey')
ggsave('MEY Kobe.pdf', kobe_mey)
kobe_msy <- ggKobe(kobe_dat, xvar = 'current_b', yvar = 'current_f' ) +
labs(x = 'B/Bmsy', y = 'F/Fmsy')
ggsave('MSY Kobe.pdf', kobe_msy)
kobes <- arrangeGrob(kobe_msy, kobe_mey)
grid.draw(kobes)
ggsave('Kobe Comparison.pdf', kobes)
# Match to NEIs
thing=7
thing2=9
|
da36ad4692483c17102da198d5609073166a49b9 | e6c393e77b1388bb2510bae54e7d8d3e91aeb867 | /08_RDA.R | 96f1d81da5f73d3235058fb6e172e751f73e3cf5 | [] | no_license | mdellh2o/ETH_barley | 6cce24c878a7be58324dc7d0e52d4da9b8f51e3e | 6e7b2c82220ae829742ee375a90d585f1f985432 | refs/heads/main | 2023-04-17T15:56:13.200681 | 2022-10-10T13:15:32 | 2022-10-10T13:15:32 | 316,163,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,014 | r | 08_RDA.R | #===============================================================================
# author: Leonardo Caproni
# date: 08/2022
#-------------------------------------------------------------------------------
# Reference: DOI
#-------------------------------------------------------------------------------
# Description: partial Redudancy Analysis (pRDA). Variance partitionig and
# outlier detection. Adapted from Capblancq and Forester 2021
# DOI: https://doi.org/10.1111/2041-210X.13722
#===============================================================================
#PRELIMINARY
wd<-"~/barley/"
setwd(wd)
rm(list=ls())
options(stringsAsFactors = F)
# load libraries
library(pegas)
library(ggplot2)
library(RColorBrewer)
library(ggpubr)
library(vegan)
library(qvalue)
library(robust)
library(cowplot)
library(corrplot)
library(rgeos)
library(stringr)
library(CMplot)
#load environmental data
load(file = "bioclim.barley.383.Rdata") # load env
passport <- read.delim(file = "output/out.RES.11clust.txt")
hulls <- read.delim(file="output/barley.grain.hulls.txt")
pheno <- read.delim(file = "output/phenotypes.OK.txt", header = T)
#gen <- read.table("input/snp.barley.forGF", header = T, row.names = 1)
gen <- read.table("input/snp.barley.pruned.forGF", header = T, row.names = 1)
# start with geno data, check missingness
perc.miss <- sum(is.na(gen))/(nrow(gen)*ncol(gen)) * 100
perc.miss
# simple imputation
gen.imp <- apply(gen, 2, function(x) replace(x, is.na(x), as.numeric(names(which.max(table(x))))))
gen.imp <- as.data.frame(gen.imp)
#if you want to add row type
passport <- merge(passport, hulls, by="ID")
env<- merge(env, passport [, c(1:3,14,4:7,10:13)], by="ID")
#env <-na.omit(env)
colnames(env) [32:34] <- c("PC1","PC2","PC3")
# need 239 geno and env data, same order
ord<-env[,1]
gen.imp<-subset(gen.imp, rownames(gen.imp) %in% ord)
gen.imp<-gen.imp[order(match(rownames(gen.imp), ord)), , drop = FALSE]
# confirm that genotypes and environmental data are in the same order
identical(rownames(gen.imp), env[,1])
# extract geo features
coord <- env[,c("LON","LAT")]
coord <-na.omit(coord)
pcnm <- pcnm(dist(coord)) #this generates the PCNMs, you could stop here if you want all of them
keep <- ceiling(length(which(pcnm$value > 0))/2)
# subset just non-colinear environmental predictors + altitude
non.collinear <-c ("bio4", "bio2", "bio15", "bio3","bio12", "bio19", "bio18", "bio14", "bio9") # as from VIF
# prepare data for RDA
pred <- env[, c(non.collinear)]
pred.alt <- env[, c(non.collinear,"altitude")]
row.type <- as.factor(env[,c("row.type")])
grain.hull <- as.factor(env[,c("grain.hulls")])
regions <- as.factor(env[,"region"])
aezs <- as.factor(env[,"AEZ31"])
dapc.clust <- as.factor(env[,"Cluster"])
geo <- scores(pcnm)[,1:keep]
geo.red <- scores(pcnm)[,1:10]
pred.pcagen <- env[, c(non.collinear, "PC1","PC2","PC3")]
row.hull <-cbind(row.type, grain.hull)
geo.regions <- cbind(regions, aezs,geo.red)
# extract phenology data
phenotypes <- subset(pheno, pheno$ID %in% ord)
phenotypes <- phenotypes[, 11:13]
# also extract genetic PCs
gen.pcs <- env[,32:34]
# save and load data
rm(gen)
save.image(file="output/RDA.str.metadata.1.Rdata")
###############################################
# Redundancy Analysis
###############################################
#loda metadata
load(file="output/RDA.str.metadata.1.Rdata")
# should try with pruned
names(gen.imp) <- gsub("X", "", names(gen.imp))
names(gen.imp) <- gsub("H", "H_", names(gen.imp))
## Pure neutral population structure model
RDA_env <- rda(gen.imp ~ bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9 + Condition(PC1 + PC2 + PC3), env)
RDA_env
RsquareAdj(RDA_env)
screeplot(RDA_env, main="Eigenvalues of constrained axes")
## load Function rdadapt
#### Function to conduct a RDA based genome scan
rdadapt <- function(rda,K)
{
zscores<-rda$CCA$v[,1:as.numeric(K)]
resscale <- apply(zscores, 2, scale)
resmaha <- covRob(resscale, distance = TRUE, na.action= na.omit, estim="pairwiseGK")$dist
lambda <- median(resmaha)/qchisq(0.5,df=K)
reschi2test <- pchisq(resmaha/lambda,K,lower.tail=FALSE)
qval <- qvalue(reschi2test)
q.values_rdadapt<-qval$qvalues
return(data.frame(p.values=reschi2test, q.values=q.values_rdadapt))
}
# use RDA adapt
rdadapt_env<-rdadapt(RDA_env, 2)
## P-values threshold after Bonferroni correction
thres_env <- 0.05/length(rdadapt_env$p.values)
-log10(thres_env)
outliers <- data.frame(Loci = colnames(gen.imp)[which(rdadapt_env$p.values<thres_env)], p.value = rdadapt_env$p.values[which(rdadapt_env$p.values<thres_env)], chr = unlist(lapply(strsplit(colnames(gen.imp)[which(rdadapt_env$p.values<thres_env)], split = "_"), function(x) x[1])))
outliers <- outliers[order(outliers$chr, outliers$p.value),]
## List of outlier names
outliers_rdadapt_env <- as.character(outliers$Loci[!duplicated(outliers$chr)])
Outliers <- rep("Neutral", length(colnames(gen.imp)))
Outliers[colnames(gen.imp)%in%outliers$Loci] <- "Outliers"
TAB_manhatan <- data.frame(pos = 1:length(colnames(gen.imp)),
pvalues = rdadapt_env$p.values,
Outliers = Outliers)
# make a Manhattan plot
name.loc<- colnames(gen.imp)
m.plot <- cbind(name.loc, TAB_manhatan)
m.plot[c('CHR', 'POS')] <- str_split_fixed(m.plot$name.loc, '_', 2)
names(m.plot)
m.plot <- m.plot[,c(1,5,6,3)]
colnames(m.plot)[1]<-"SNP"
colnames(m.plot)[4]<-"RDA_env"
CMplot(m.plot,
plot.type='m', band=1,
cex = 0.6,
col=c("grey30","grey60"),
threshold= thres_env,
threshold.col=c("darkgreen","darkred"),
signal.line=1,
signal.col="red",
amplify=T, signal.cex = 0.8, signal.pch = 19,
ylim=NULL, LOG10 = TRUE,
width=12,height=4,
file = "pdf",
memo="full")
outliers[c('CHR', 'POS')] <- str_split_fixed(outliers$Loci, '_', 2)
colnames(outliers) [1]<- "SNP"
outliers <- outliers[,c(1,4,5,2)]
write.table(outliers, file="output/RDA.outliers.pruned.txt", sep = '\t', quote = F, row.names = F)
## Null model
RDA0 <- rda(gen.imp ~ 1, env[,c(2:22,32:34)])
#RDA full model
RDAfull <- rda(gen.imp ~ PC1 + PC2 + PC3 + LON + LAT + bio1 +
bio2 + bio3 + bio4 + bio5 + bio6 + bio7 +
bio8 + bio9 + bio10 + bio11 + bio12 + bio13 +
bio14 + bio15 + bio16 + bio17 + bio19, env)
mod <- ordiR2step(RDA0, RDAfull, Pin = 0.01, R2permutations = 1000, R2scope = T)
# Variance Partitioning: Partial RDA
## Full model
pRDAfull <- rda(gen.imp ~ PC1 + PC2 + PC3 + LON + LAT + bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9, env)
pRDAfull
RsquareAdj(pRDAfull)
aov.full <- anova(pRDAfull)
## Pure climate model
pRDAclim <- rda(gen.imp ~ bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9 + Condition(LON + LAT + PC1 + PC2 + PC3), env)
pRDAclim
RsquareAdj(pRDAclim)
aov.clim <- anova(pRDAclim)
##Pure geography model
pRDAgeog <- rda(gen.imp ~ LON + LAT + Condition(bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9 + PC1 + PC2 + PC3), env)
pRDAgeog
RsquareAdj(pRDAgeog)
aov.geog <- anova(pRDAgeog)
## Pure neutral population structure model
pRDAstruct <- rda(gen.imp ~ PC1 + PC2 + PC3 + Condition(LON + LAT + bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9), env)
RsquareAdj(pRDAstruct)
aov.struct <- anova(pRDAstruct)
## Pure geno
pRDAgeno <- rda(gen.imp ~ PC1 + PC2 + PC3 , env)
pRDAgeno
RsquareAdj(pRDAgeno)
aov.geno <- anova(pRDAgeno)
## Pure neutral population structure model
RDA_env <- rda(gen.imp ~ bio4 + bio2 + bio15 + bio3 + bio12 + bio19 + bio18 + bio14 + bio9 + Condition(PC1 + PC2 + PC3), env)
RDA_env
RsquareAdj(RDA_env)
aov.env <- anova(RDA_env)
save.image(file="output/RDA.results.Rdata") |
3555c9568c8bc7b4a7b20a810a85643bbc3bb591 | afd40674ba810c44a1a54cfbb53c0329bb04f4e6 | /man/random_token_generator.Rd | 2ff2606e2684bbd653cb03dc5496044c435f21cc | [
"MIT"
] | permissive | tgstoecker/A2TEA.WebApp | 1be42ff5b6a9ee2d9a9940cf889fdf12e8e542fb | 8272860d4fadedd92dfcd8fddbd8737cb4fdb7a7 | refs/heads/master | 2023-07-07T20:50:58.722386 | 2023-06-25T09:40:17 | 2023-06-25T09:40:17 | 525,064,928 | 1 | 0 | NOASSERTION | 2023-03-19T14:49:19 | 2022-08-15T16:47:31 | R | UTF-8 | R | false | true | 333 | rd | random_token_generator.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global.R
\name{random_token_generator}
\alias{random_token_generator}
\title{Create a random token}
\usage{
random_token_generator(n = 5000)
}
\arguments{
\item{n}{sample size}
}
\value{
New random token is created
}
\description{
Create a random token
}
|
473378a84b750b2001e86dc35757ba56e17e8b1e | 4ceaa85dee194e818f41bb4da47fe41fd7dbfcf9 | /inst/jags/GLM0.R | 17a34ed8e73f54ded331c9e012c1a79928793533 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | RushingLab/WILD6900 | 6ee242e491817f065f6374e908aced3bfb0ac3dc | 6543c0c4e4108becf65f6577b8d4b862afdadf49 | refs/heads/master | 2021-06-12T18:45:29.365473 | 2021-03-02T20:13:28 | 2021-03-02T20:13:28 | 156,604,120 | 13 | 3 | NOASSERTION | 2021-01-05T19:35:21 | 2018-11-07T20:27:52 | HTML | UTF-8 | R | false | false | 326 | r | GLM0.R | sink("inst/jags/GLM0.jags")
cat("
model {
# Prior
alpha ~ dnorm(0, 0.01) # log(mean count)
# Likelihood
for (i in 1:nyear){
for (j in 1:nsite){
C[i,j] ~ dpois(lambda[i,j])
lambda[i,j] <- exp(log.lambda[i,j])
log.lambda[i,j] <- alpha
} #j
} #i
}
",fill = TRUE)
sink()
|
9a80b77c22ce74be9e4967220f904f5d8880d965 | e56783d554c86165323a2ae3e29f239b4230fd92 | /models/finwhales/man/finwhales.Rd | b25f173eccf4193f531276064b2cecf77b19c77f | [
"CC0-1.0"
] | permissive | ec-ecopotential/Pelagos | 527257fa1414a17e047c67e7701d1d88cdc8700d | 58556420cc4748ffa4c6ad94f03aab14751e6ac5 | refs/heads/master | 2020-12-25T06:15:05.730428 | 2019-07-12T14:51:49 | 2019-07-12T14:51:49 | 62,131,092 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 285 | rd | finwhales.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finwhales.R
\docType{package}
\name{finwhales}
\alias{finwhales}
\alias{finwhales-package}
\title{finwhales: Pelagos storyline for ECOPOTENTIAL}
\description{
finwhales: Pelagos storyline for ECOPOTENTIAL
}
|
96558ca22a6e079e2566034d545d0a5eea90cb29 | 8baf1e2211226a00af0d7d9eb37d636d36fc065f | /03_analyses/01_pdav3.R | 3f4a30a098d8a24e89a44a0c7763b0665701a62e | [] | no_license | timdisher/pda_dec_tool | 61d4b07800c655d8323863161512aec3d563873e | 6dda3739dc51440846f5864a192564b824e7b811 | refs/heads/master | 2021-10-24T13:51:15.954009 | 2019-03-26T12:44:08 | 2019-03-26T12:44:08 | 139,772,863 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,362 | r | 01_pdav3.R | #User input
library(readxl)
source("./03_analyses/01_clean.R")
source("./04_functions/nma_cont_gemtc.R")
load("./reg_out_resv2.rda")
library(smaa)
library(hitandrun)
#----------------------------------------------------------------------------- -
#----------------------------------------------------------------------------- -
# SMAA in R
#----------------------------------------------------------------------------- -
#----------------------------------------------------------------------------- -
#================================= ==
#Functions for this project====
#================================= ==
nma_binom = function(data, params = binom_params_re,
model = mf_binom_re_con,
tables = c("or","SUCRA"),
est = c("or","SUCRA"),
treats = all_codes_temp){
#Output list for WinBUGS
wb_list = wb_cont_list(data = data, cont = FALSE)
#Run Model
model = jags.parallel(wb_list,NULL,params,model.file = model,
n.chains = 3, n.iter = 200000, n.burnin = 10000, n.thin = 19)
con_tabs = jags_out(model_summary = model$BUGSoutput$summary,treatments = treats,
tables = tables,est = est)
out = list(wb_data = data,trts = treats, cons = model,cons_tables = con_tabs)
out
}
#Binomial models====================================
#write.model(binom_re, "./05_models/binom_re.txt")
#mf_binom_re_con = c("./05_models/binom_re.txt")
mf_binom_re_con = c("./05_models/binom_re_tight_priors.txt")
binom_params_re = c("d","or","SUCRA","rk", "sd")
#----------------------------------------------------------------------------- -
#----------------------------------------------------------------------------- -
# After looking at the absolute treatment effects implied by the base model, Souvik
# and I decided that there is no value in IVH/Oligo since the probabilities are
# obviously incredible (e.g. 100% risk of IVH). The assumption of exchangeable efficacy
# also meant that placebo had 100% risk of oligo which doesn't make sense. As a result,
# we have decided that it makes more sense to just drop outcomes with missing data (BPD, Oligo)
# and to exclude IVH since most of them would happen before treatment would have started.
drop <- c("oligo", "bpd", "ivh", "rpt_rx")
all_outs <- all_outs[!names(all_outs) %in% drop]
all_codes <- all_codes %>% select(-drop)
ranks <- ranks %>% filter(!outcome %in% drop) %>% arrange(rank) %>% mutate(rank = 1:4)
ranks2 <- ranks %>% filter(!outcome %in% drop) %>% arrange(rank) %>% mutate(rank = c(1,4,2,3))
nma_list <- rep(list(NA), length(all_outs))
for(i in seq_along(all_outs)){
all_codes_temp <- all_codes[,c(1, i+1)] %>% `colnames<-`(c("trt", "t"))
nma_list[[i]] <- nma_binom(data = all_outs[[i]] %>% mutate(na = 2))
names(nma_list)[[i]] <- names(all_outs)[[i]]
}
d_s <- rep(list(NA), 4)
for(i in seq_along(d_s)){
temp <- nma_list[[i]]$cons$BUGSoutput$sims.matrix %>% as.data.frame() %>% select(starts_with("d["), starts_with("rk"))
trts <- all_codes %>% select(trt, names(nma_list)[[i]]) %>% drop_na() %>% .$trt
colnames(temp) <- c(trts, paste0(trts, "_rk"))
d_s[[i]] <- temp
names(d_s)[[i]] <- names(nma_list)[[i]]
}
d_s <- d_s %>% map(., ~select(., -ends_with("_rk")))
#save(d_s, file = "reg_out_resv2.rda")
#----------------------------------------------------------------------------- -
#----------------------------------------------------------------------------- -
# Baseline rates
#
# These are just crudely as sums from placebo. Really should either be the random
# effect meta-analysis on proportions or something similar
#----------------------------------------------------------------------------- -
#----------------------------------------------------------------------------- -
p_samp <- function(event, n, iter = 30000){
a <- event
b <- n - event
qlogis(rbeta(iter, a, b)) # logit of probility
}
# Indo IV
base <- NULL
base[["closure"]] <- p_samp(227, 597)
base[["sx"]] <- p_samp(60, 321)
base[["mort"]] <- p_samp(79, 454)
base[["nec"]] <- p_samp(24, 367)
p_s <- rep(list(NA), length(d_s))
for(i in seq_along(d_s)){
p_s[[i]] <- d_s[[i]] %>% select(-ends_with("_rk")) %>% mutate_all(funs(plogis(base[[names(d_s)[[i]]]] + .))) %>%
select(names(d_s[[1]] %>% select(-ends_with("_rk"))))
names(p_s)[[i]] <- names(d_s)[[i]]
}
# Create a probability list for bar plots
p_bar <- map(p_s, ~ as.data.frame(.) %>% sapply(., quantile, probs = c(0.025, 0.5, 0.975)) %>%
t(.) %>% as.data.frame %>% rownames_to_column %>% purrr::set_names(c("trt", "low", "med", "high")) %>%
mutate_at(., vars(low:high), funs(. * 100)))
#save(p_bar_dat, file = "p_bar_datv2.rda")
p_s[-1] <- map(p_s[-1],~ 1 - .) # Choices are based on sum of partial values * weight so higher is better
#smaa requires performance across outcomes as an array
ps_array <- array(unlist(p_s), dim = c(nrow(p_s[[1]]), ncol(p_s[[1]]), length(p_s)),
dimnames = list(NULL, names(p_s[[1]]), names(p_s)))
n <- 4 # length of weight vector
constr <- mergeConstraints(
list(ordinalConstraint(n,1,2),
ordinalConstraint(n,2,3),
ordinalConstraint(n,3,4)
)
)
transform <- simplex.createTransform(n)
constr <- simplex.createConstraints(transform, constr)
seedPoint <- createSeedPoint(constr, homogeneous=TRUE)
N <- 30000
w <- har(seedPoint, constr, N=N * (n-1)^3, thin=(n-1)^3,
homogeneous=TRUE, transform=transform)$samples
#save(w, file = "ordinal_weightsv2.rda")
ps_array_sm <- reorder(ps_array, dim = 3, names = c(ranks %>% arrange(rank) %>% .$outcome)) # Outcome dimension needs to be in same order as weights
ps_array_mcy <- reorder(ps_array, dim = 3, names = c(ranks2 %>% arrange(rank) %>% .$outcome))
pda_smaa <- smaa(ps_array_sm, w)
pda_smaa_mcy <- smaa(ps_array_mcy, w)
values <- smaa.values(ps_array, w)
values_mcy <- smaa.values(ps_array_mcy, w)
ranks_smaa <- smaa.ranks(values)
ranks_smaa_mcy <- smaa.ranks(values_mcy)
cw <- smaa.cw(ranks_smaa, w)
cw_mcy <- smaa.cw(ranks_smaa_mcy, w)
cf <- smaa.cf(ps_array, cw)
cf_mcy <- smaa.cf(ps_array_mcy, cw_mcy)
## No preferences
np <- simplex.sample(4, N)[1] %>% as.data.frame()
smaa_np <- smaa(ps_array, np)
values_np <- smaa.values(ps_array, np)
ranks_smaa_np <- smaa.ranks(values_np)
cw_np <- smaa.cw(ranks_smaa_np, np)
cf <- smaa.cf(ps_array, cw_np)
|
b40b0f1ac5c6e2b964c159bc89e29203be70e939 | 59e684621270e7fe60de4fe4c326c9dd6f5dcec9 | /tests/testthat.R | 8fa631456f872100816c8fdad6a4321f125ee0b2 | [] | no_license | matthew-coad/emc | 8966a59ed8031c6d6ca03f5373e5ecc0aff65041 | 31f9985ad674439cd4ca55d5399f741081967b04 | refs/heads/master | 2020-04-01T23:05:21.279954 | 2018-11-15T05:29:53 | 2018-11-15T05:29:53 | 153,741,296 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50 | r | testthat.R | library(testthat)
library(emc)
test_check("emc")
|
814418c24818349a5aa3d5738ab4503eea7863e2 | 9107f12cffc54c8a23da74643ec888a6f30557f6 | /2_prep/src/yeti_job_utils.R | 58e69fcfb8d9d7afb48afc5957fa09a900046315 | [] | no_license | lindsayplatt/lake-temperature-process-models-old | a30dd2a454a00cfac6437c2fd87acdc7f55297f5 | 8b15022c219df9f7578da0df42962dbcd183c998 | refs/heads/master | 2022-03-15T11:05:12.861719 | 2019-10-30T20:13:13 | 2019-10-30T20:13:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 928 | r | yeti_job_utils.R |
yeti_put <- function(local_dir, dest_dir, files){
user <- Sys.info()[['user']]
session <- ssh::ssh_connect(sprintf('%s@yeti.cr.usgs.gov', user))
on.exit(ssh::ssh_disconnect(session = session))
file_paths = sprintf('%s/%s', local_dir, files)
ssh::scp_upload(session = session, files = file_paths, to = dest_dir)
}
sync_drivers <- function(fileout, nml_list){
nml_meteo_files <- sapply(nml_list, FUN = function(x) x$meteo_fl) %>% as.vector()
sync_dir <- '2_prep/sync'
file.copy(from = file.path('../lake-temperature-model-prep/7_drivers_munge/out', nml_meteo_files), to = file.path(sync_dir, nml_meteo_files), overwrite = TRUE)
sc_indicate(fileout, data_file = file.path(sync_dir, nml_meteo_files))
dest_dir <- file.path('/cxfs/projects/usgs/water/iidd/data-sci/lake-temp/lake-temperature-process-models', sync_dir )
yeti_put(dest_dir = dest_dir, local_dir = sync_dir, files = nml_meteo_files)
}
|
ebca41c1ebe7351eacb61b350c289e0f21cbf7b4 | 6769a21b66f03edff3bb70dc5e9573c6e6ecef98 | /format-data/策略会.R | 8a0f77f4357df8f82235faeb850e891923024c77 | [] | no_license | RookieDay/daily_meeting | a25c29810395b8d5023e4470261b2ca8ef6e2568 | afe08ab4d60b7689b26ba969f1b62926406f0b1b | refs/heads/main | 2023-01-23T02:44:45.037110 | 2020-12-07T12:32:12 | 2020-12-07T12:32:12 | 319,039,405 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 1,129 | r | 策略会.R | setwd("C:\\Users\\GL\\Desktop\\2019年中期策略会\\代码")
library(readxl)
data=read_excel("test.xlsx",sheet = 1)
company=data[,c(3,5)]
for(i in 6:ncol(data)){
aa=data[,c(3,i)]
colnames(aa)=colnames(company)
company=rbind(company,aa)
print(i)
}
#以上为改为:人,公司这样的形式
company=na.omit(data.frame("company"=unique(company[,2]),stringsAsFactors = F))
result=list()
num=c()
for(i in 1:nrow(company)){
aa=data[which(data[,5]==company[i,1]),]
for(j in 6:ncol(data)){
aa=rbind(aa,data[which(data[,j]==company[i,1]),])
}
sub=data.frame()
for(j in 1:nrow(aa)){
sub[1,1]=NA
sub[1,2]=NA
sub[j,3]=aa[j,1]
sub[j,4]=paste(aa[j,2],"-",aa[j,3])
sub[j,5]=aa[j,4]
}
sub=sub[order(sub[,5]),]
sub[1,1]=company[i,1]
sub[1,2]=nrow(aa)
num=c(num,nrow(aa))
result[[i]]=sub
print(i)
}
conclusion=result[[order(num,decreasing = T)[1]]]
for(i in order(num,decreasing = T)[2:length(num)]){
conclusion=rbind(conclusion,result[[i]])
print(i)
}
colnames(conclusion)=c("公司","人数","销售","机构-姓名","分类")
write.csv(conclusion,"上市公司交流.csv")
|
0ee658e499ec3d2ba423ea30822c19de36c754a4 | f3a979438f0ed4305cd01be796cb28f53d3bb538 | /Chapter15/vis_cluster_kmean.r | 0909b450661f1962abcf805106e5d2db2831c631 | [] | no_license | scchess/DataMiningApplicationsWithR | 2be6a064106a44c76b4577a5c15a0e065cd89260 | 580f5a9d3c749c2f6d00e437a28cd8705bf6ab76 | refs/heads/master | 2021-06-14T18:21:08.155141 | 2017-01-27T14:38:02 | 2017-01-27T14:38:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,229 | r | vis_cluster_kmean.r | #!/usr/bin/Rscript
# R script to make kmean clusters
# importations
library(Matrix)
source("./plot_functions.r")
# several variables
filename = "cluster_entry.txt" # input filename
filename = "Cluster3_dnssec.txt"
r = 0.9 # quantity of total magnitude hold (related to tolerance for PCA)
nb_cluster = 4 # cluster for kmean
output_file = paste(format(Sys.time(), "%F-%T"), "-Rout.txt", sep="") # file where to print
par(cex=2)
tmp_file="/tmp/r"
clab <- c("euQR", "reQR", "tR", "euBR", "reBR", "tBR", "cQRT", "reOCC", "euOCC", "tOCC", "CHR", "eQR", "eCPU", "ePRT", "succRatio", "failRatio", "cltQNbr", "pltQNbr", "Qlen", "Rlen", "Sigcheck", "MIRT", "SDIRT", "MPRT", "SDPRT", "MTTL", "SDTTL")
clabf <- c("euBR", "reBR", "QNbr", "pltQNbr", "CHR", "cQRT", "MIRT", "MPRT", "MTTL")
mat_ent <- read.table(filename, row.names=1, col.names=clab)
mat_ent$pIRT = mat_ent$MIRT + mat_ent$SDIRT # addition of column
mat_ent$mIRT = mat_ent$MIRT - mat_ent$SDIRT # addition of column
mat_ent$pPRT = mat_ent$MPRT + mat_ent$SDPRT # addition of column
mat_ent$mPRT = mat_ent$MPRT - mat_ent$SDPRT # addition of column
mat_ent$pTTL = mat_ent$MTTL + mat_ent$SDTTL # addition of column
mat_ent$mTTL = mat_ent$MTTL - mat_ent$SDTTL # addition of column
#mat_ent <- subset(mat_ent, select= -SDIRT) # select column
#mat_ent <- subset(mat_ent, select= -SDPRT) # select column
#mat_ent <- subset(mat_ent, select= -SDTTL) # select column
#sink(output_file)
#X11(width=12, height=12)
mat <- subset(mat_ent, cQRT > 0) # python script return -1 if no request is present and cQRT is used to plot several variables
cor(mat_ent)
# sort
attach(mat_ent)
mat_ent_sorted <- mat_ent[order(tBR),]
detach(mat_ent)
mat <- subset(mat, tBR > mat_ent_sorted[dim(mat_ent)[1] - 1000,]$tBR) # subset the 1000 costliest fqdn
#mat <- subset(mat, tBR > mean(tBR) ) # selection of rows
mat <- subset(mat, MTTL > 0)
mat <- subset(mat, select=clabf)
mat$invTTL = 1/mat$MTTL
pca <- prcomp(mat, scale=TRUE, center=TRUE)
mag <- sum(pca$sdev * pca$sdev) # total magnitude
pca <- prcomp(mat, tol= (1-r)*mag/(pca$sdev[1] * pca$sdev[1]), scale=TRUE, center=TRUE)
write.table(pca$x, file=tmp_file)
d<-read.table(tmp_file, header=TRUE, row.names=1)
write.table(pca$rotation, file=tmp_file)
rot<-read.table(tmp_file, header=TRUE, row.names=1)
k<-kmeans(d,nb_cluster) # kmean
print(pca$rotation) # new vectors
print(k$size) # size of clusters
write.table(k$cluster, file="/tmp/clusters.txt")
g <- read.table ("/tmp/clusters.txt", header=TRUE, row.names=1)
# distance between centers and 0, and elimination of cluster near 0
nmin=Inf # norm min
nmax=0 # norm max
for (i in c(1:nb_cluster)){
tmp=norm(rbind(k$centers[i,]), type="f")
if(nmin > tmp) {
nmin = tmp
cmin = i }
if(nmax < tmp) {
nmax = tmp
cmax = i }
}
mat0 <- subset(mat_ent, match(row.names(mat_ent), row.names(subset(g, x!=cmin))) > 0 )
#plot(d, col=k$cluster)
#X11()
#plot(mat, col=k$cluster)
# print clusters for extra manipulations
for (i in c(1:nb_cluster)) {
g0 <- subset(mat_ent, match(row.names(mat_ent), row.names(subset(g, x==i))) > 0 ) # all dataset parameters for cluster i
write.table(g0, file=paste("/tmp/cluster_", i, ".txt", sep=""))
}
|
6b7820c36c6146af5d557accb82cc56f19bd192f | ae33c584a4efdd6c337abd69fc9fa98b1c01e64b | /data/mosaic/save-mosaic-data.R | 6ac5aa0e1d1ee694b5e721ceb055d2d3c4e9b598 | [] | no_license | elray1/elray1.github.io | de1ffa6d854a7b58991f73c2ce1c18dc51d6723d | 818db063d1785c09a0da019929d6f5651593f1c1 | refs/heads/master | 2023-03-09T23:38:53.181329 | 2023-02-19T15:13:30 | 2023-02-19T15:13:30 | 91,704,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 157 | r | save-mosaic-data.R | library(mosaicData)
library(readr)
write_csv(mosaicData::RailTrail, "data/mosaic/rail_trail.csv")
write_csv(mosaicData::HELPrct, "data/mosaic/HELPrct.csv")
|
8ecc06cc62c9c2b4c2baef8bb7d7510d8eb1af3f | 783e2ab6b69287a4c863b748265725a77efd375f | /R/surrogate_nn.R | d5485b64d5b06b9f6a5b3af24809e3c66175b455 | [] | no_license | RezaSadeghiWSU/rlR | 2190b2918544c69447cea80a3608e469b073e18f | 0b18b5b1d5af2f5d599225cad2ea28b384727f28 | refs/heads/master | 2020-04-16T05:04:39.827534 | 2018-12-12T17:42:22 | 2018-12-12T17:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,260 | r | surrogate_nn.R | SurroNN = R6::R6Class("SurroNN",
inherit = Surrogate,
public = list(
lr = NULL,
arch.list = NULL,
conf = NULL,
agent = NULL,
custom_flag = NULL,
action_input = NULL,
sess = NULL,
initialize = function(agent, arch_list_name = "agent.nn.arch", ...) {
par_list = list(...)
self$agent = agent
self$act_cnt = self$agent$act_cnt
self$custom_flag = FALSE
if ("act_cnt" %in% names(par_list)) {
self$act_cnt = par_list[["act_cnt"]]
}
self$state_dim = self$agent$state_dim
self$conf = self$agent$conf
if (!is.null(self$conf)) {
self$arch.list = self$conf$get(arch_list_name)
self$arch.list$lr = self$conf$get("agent.lr")
self$lr = self$arch.list$lr
}
self$model = self$makeModel()
self$sess = agent$sess
},
initNetworkCreator = function() {
if (self$agent$env$flag_tensor) {
self$agent$network_build_funs[["policy_fun"]] = function(state_dim, act_cnt) {
makeCnnActor(input_shape = state_dim, act_cnt = act_cnt)
}
self$agent$network_build_funs[["value_fun"]] = function(state_dim, act_cnt) {
makeCnnCritic(input_shape = state_dim, act_cnt = act_cnt)
}
} else {
self$agent$network_build_funs[["value_fun"]] = function(state_dim, act_cnt) {
makeKerasModel(input_shape = state_dim, output_shape = act_cnt, arch.list = self$arch.list)
}
self$agent$network_build_funs[["policy_fun"]] = function(state_dim, act_cnt) {
makeKerasModel(input_shape = state_dim, output_shape = act_cnt, arch.list = self$arch.list)
}
}
},
makeModel = function() {
if (self$agent$env$flag_continous) {
model = self$agent$createBrain() # the agent itself is responsible for creating the brain
return(model)
}
self$initNetworkCreator()
do.call(self$agent$network_build_funs[[self$agent$task]], args = list(state_dim = self$state_dim, act_cnt = self$act_cnt))
},
# calculate gradients with respect to input arm instead of weights
calGradients2Action = function(state_input, action_input, output = NULL) {
output = self$model$output
# FIXME: hard coded here.
input_action = self$model$input[[1L]]
input_action_shape = input_action$shape
tf_grad = keras::k_gradients(output, input_action)
aname = input_action$name
sname = self$model$input[[2L]]$name
oname = self$model$output$name
#FIXME: do we need initializer here?
self$sess$run(tensorflow::tf$global_variables_initializer())
np = reticulate::import("numpy", convert = FALSE)
sstate = np$array(state_input)
saction = np$array(action_input)
feed_dict = py_dict(c(sname, aname), c(sstate, saction))
#FIXME: do we need to provide the output as well?
#feed_dict = py_dict(c(sname, aname, oname), c(sstate, saction, output))
self$sess$run(tf_grad, feed_dict)
},
calGradients = function(state, action) {
output = self$model$output
input = self$model$trainable_weights
tf_grad = keras::k_gradients(output, input)
iname = self$model$input$name
oname = self$model$output$name
self$sess$run(tensorflow::tf$global_variables_initializer())
np = reticulate::import("numpy", convert = FALSE)
sstate = np$array(state)
saction = np$array(action)
feed_dict = py_dict(c(iname, oname), c(sstate, saction))
self$sess$run(tf_grad, feed_dict)
},
getGradients = function(state) {
res = self$pred(state)
grad = self$calGradients(state = state, action = res)
},
setModel = function(obj) {
self$model = obj
self$custom_flag = TRUE
},
getWeights = function() {
keras::get_weights(self$model)
},
setWeights = function(weights) {
keras::set_weights(self$model, weights)
},
persist = function(file_path) {
keras::save_model_hdf5(object = self$model, file_path = file_path)
},
train = function(X_train, Y_train, epochs = 1L) {
keras::fit(object = self$model, x = X_train, y = Y_train, epochs = epochs, verbose = 0)
},
pred = function(X) {
res = keras::predict_on_batch(self$model, X)
res # FIXME: prediction might be NA from Keras
},
afterEpisode = function() {
self$lr = self$lr * self$agent$lr_decay
#FIXME: adjust learning rate with dataframe nrow?
keras::k_set_value(self$model$optimizer$lr, self$lr)
lr = keras::k_get_value(self$model$optimizer$lr)
self$agent$interact$toConsole("learning rate: %s \n", lr)
}
),
private = list(
deep_clone = function(name, value) {
# With x$clone(deep=TRUE) is called, the deep_clone gets invoked once for each field, with the name and value.
if (name == "model") {
weights = self$getWeights()
if (self$custom_flag) {
model = keras::clone_model(self$model)
} else {
model = self$makeModel()
}
keras::set_weights(model, weights)
return(model)
} else {
# For all other fields, just return the value
value
}
}
),
active = list()
)
|
10247bd3c50f0ea9adead416b64e6bf3b6869c26 | 92c73a3e01edb04b61636ae237119350c13a9bc5 | /Rperformance/MakespanSpeed.R | d4275b6acd6350fc6a208d63a3cf621172fb1d9d | [] | no_license | jmsallan/optimization | 6f714ca7096e28d1e861e447a3b20196c4526d0b | a0d099890c69090a55f05b93aa4bd0363f7190f4 | refs/heads/master | 2020-04-28T01:59:37.433611 | 2020-04-01T14:37:39 | 2020-04-01T14:37:39 | 174,881,032 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,196 | r | MakespanSpeed.R | #---- two codings of the makespan function ----
makespan1 <- function(M, sol){
m <- dim(M)[1]
n <- dim(M)[2]
t <- matrix(numeric(m*n), m, n)
M <- M[ , sol]
t[1,1] <- M[1,1]
for(j in 2:n) t[1, j] <- t[1, (j-1)] + M[1, j]
for(i in 2:m) t[i, 1] <- t[(i-1), 1] + M[i, 1]
for(i in 2:m){
for(j in 2:n)
t[i,j] <- max(t[i-1,j], t[i, j-1]) + M[i, j]
}
result <- t[m, n]
return(result)
}
makespan2 <- function(M, sol){
m <- dim(M)[1]
n <- dim(M)[2]
M <- M[ , sol]
for(j in 2:n) M[1, j] <- M[1, (j-1)] + M[1, j]
for(i in 2:m) M[i, 1] <- M[(i-1), 1] + M[i, 1]
for(i in 2:m){
for(j in 2:n)
M[i,j] <- max(M[i-1,j], M[i, j-1]) + M[i, j]
}
return(M[m, n])
}
#---- which is faster? -----
#building a random instance of 10 machines and 20 tasks
set.seed(2020)
instance <- matrix(sample(10:90, 200, replace = TRUE), 10, 20)
#building a list of 1000 possible solutions
set.seed(1111)
perms <- lapply(1:1000, function(x) sample(1:20, 20))
#testing speed of two makespan functions
library(rbenchmark)
benchmark(sapply(perms, function(x) makespan1(instance, x)), sapply(perms, function(x) makespan2(instance, x)))
|
a429df45483ad031b97d5f749edff90a0d7a9d19 | 448e3fbc77e3290c1c4196c2258b3d444c8c0d45 | /R/facets-wrapper.R | 289dd17cb7f4a73ed97a1f6ae225da15a164f1ef | [] | no_license | mskcc/facets | 1fe15ddabe7b157c4ffdeddb048797c8a1f4f83b | f3c93ee65b09fc57aaed22a2eb9faa05586a9dc0 | refs/heads/master | 2023-06-09T01:38:44.938368 | 2021-10-12T12:04:39 | 2021-10-12T12:04:39 | 35,636,429 | 135 | 71 | null | 2023-06-04T17:04:27 | 2015-05-14T20:54:49 | R | UTF-8 | R | false | false | 12,284 | r | facets-wrapper.R | readSnpMatrix <- function(filename, skip=0L, err.thresh=Inf, del.thresh=Inf, perl.pileup=FALSE) {
# could have been generated by original VES snp-pileup.pl code (perl)
if (perl.pileup) {
rcmat <- scan(filename, what=list(Chromosome="", Position=0, NOR.DP=0, NOR.RD=0, TUM.DP=0, TUM.RD=0), skip=skip)
if (grepl("chr",rcmat$Chromosome[1])) rcmat$Chromosome <- gsub("chr","",rcmat$Chromosome)
rcmat <- as.data.frame(rcmat, stringsAsFactors=FALSE)
} else {
# read the read count matrix generated by snp-pileup.cpp code
pileup <- read.csv(filename, stringsAsFactors=FALSE, colClasses=rep(c("character", "numeric","character", "numeric"), c(1,1,2,8)))
# remove chr if present in Chrom
if (grepl("chr",pileup$Chromosome[1])) {
pileup$Chromosome <- gsub("chr", "", pileup$Chromosome)
}
# remove loci where errors and deletions exceeded thresholds
ii <- which(pileup$File1E <= err.thresh & pileup$File1D <= del.thresh & pileup$File2E <= err.thresh & pileup$File2D <= del.thresh)
rcmat <- pileup[ii, 1:2]
rcmat$NOR.DP <- pileup$File1R[ii] + pileup$File1A[ii]
rcmat$NOR.RD <- pileup$File1R[ii]
rcmat$TUM.DP <- pileup$File2R[ii] + pileup$File2A[ii]
rcmat$TUM.RD <- pileup$File2R[ii]
}
rcmat
}
preProcSample <- function(rcmat, ndepth=35, het.thresh=0.25, snp.nbhd=250, cval=25, deltaCN=0, gbuild=c("hg19", "hg38", "hg18", "mm9", "mm10", "udef"), ugcpct=NULL, hetscale=TRUE, unmatched=FALSE, ndepthmax=1000) {
gbuild <- match.arg(gbuild)
# integer value for chromosome X depends on the genome
if (gbuild %in% c("hg19", "hg38", "hg18")) nX <- 23
if (gbuild %in% c("mm9", "mm10")) nX <- 20
if (gbuild == "udef") {
if (missing(ugcpct)) {
stop("GC percent data should be supplied if udef option is used")
} else {
nX <- length(ugcpct)
}
}
pmat <- procSnps(rcmat, ndepth, het.thresh, snp.nbhd, nX, unmatched, ndepthmax)
if (gbuild == "udef") {
dmat <- counts2logROR(pmat[pmat$rCountT>0,], gbuild, unmatched, ugcpct)
} else {
dmat <- counts2logROR(pmat[pmat$rCountT>0,], gbuild, unmatched)
}
tmp <- segsnps(dmat, cval, hetscale, deltaCN)
out <- list(pmat=pmat, gbuild=gbuild, nX=nX)
c(out, tmp)
}
procSample <- function(x, cval=150, min.nhet=15, dipLogR=NULL) {
# ensure availability of seg.tree
if (is.null(x$seg.tree)) stop("seg.tree is not available")
# get the numeric value of chromosome X
nX <- x$nX
# make sure that original cval is smaller than current one
cval.fit <- attr(x$seg.tree, "cval")
if (cval.fit > cval) stop("original fit used cval = ", cval.fit)
# jointseg etc
jseg <- x$jointseg
jseg <- jseg[is.finite(jseg$cnlr),]
# chromosomes with data and their counts
chrs <- x$chromlevels
nchr <- length(chrs)
# get chromlevels from chrs
chromlevels <- c(1:(nX-1), "X")[chrs]
# get the segment summary for the fit in seg.tree
nsegs <- 0
# jointseg already has a seg variable numbered 1 thru number of segments for each chromosome
for (i in 1:nchr) {
jseg$seg[jseg$chrom==chrs[i]] <- nsegs + jseg$seg[jseg$chrom==chrs[i]]
nsegs <- max(jseg$seg[jseg$chrom==chrs[i]])
}
focalout <- jointsegsummary(jseg)
# cnlr.median to the left and right
cnlr.med.l <- c(0, focalout$cnlr.median[-nsegs])
cnlr.med.r <- c(focalout$cnlr.median[-1], 0)
# mad of cnlr noise
cnlr.mad <- mad(jseg$cnlr - rep(focalout$cnlr.median, focalout$num.mark))
# segments that show focal changes have big jump in cnlr.median
focalout$focal <- 1*(focalout$cnlr.median > pmax(cnlr.med.l, cnlr.med.r)+3*cnlr.mad) + 1*(focalout$cnlr.median < pmin(cnlr.med.l, cnlr.med.r)-3*cnlr.mad)
# get the segments for the specified cval
nsegs <- 0
for (i in 1:nchr) {
seg.widths <- diff(prune.cpt.tree(x$seg.tree[[i]], cval))
jseg$seg[jseg$chrom==chrs[i]] <- nsegs + rep(1:length(seg.widths), seg.widths)
nsegs <- nsegs + length(seg.widths)
}
# adding the focal change segments - need a jump at the beginning and end
jseg$seg0 <- jseg$seg # detected segments
# jump at the beginning (twice the height)
jseg$seg <- jseg$seg + rep(cumsum(2*focalout$focal), focalout$num.mark)
# drop back for the focal segment to get the steps right
jseg$seg <- jseg$seg - rep(focalout$focal, focalout$num.mark)
# focal segment could already be in; so change seg indicator
jseg$seg <- cumsum(c(1, 1*(diff(jseg$seg) > 0)))
# segment summaries
out <- jointsegsummary(jseg)
# cluster the segments
out <- clustersegs(out, jseg, min.nhet)
# put in the clustered values for snps
jseg$segclust[is.finite(jseg$cnlr)] <- rep(out$segclust, out$num.mark)
# find dipLogR and fit cncf
if (is.null(dipLogR)) {
oo <- findDiploidLogR(out, jseg$cnlr)
} else {
oo <- list()
oo$out0 <- "empty"
oo$dipLogR <- dipLogR
}
out <- fitcncf(out, oo$dipLogR, nX)
c(list(jointseg=jseg, out=out, nX=nX, chromlevels=chromlevels), oo[-1])
}
plotSample <- function(x, emfit=NULL, clustered=FALSE, plot.type=c("em","naive","both","none"), sname=NULL) {
def.par <- par(no.readonly = TRUE) # save default, for resetting...
# plot.type
plot.type <- match.arg(plot.type)
# layout of multi panel figure
if (plot.type=="none") layout(matrix(1:2, ncol=1))
if (plot.type=="em") layout(matrix(rep(1:4, c(9,9,6,1)), ncol=1))
if (plot.type=="naive") layout(matrix(rep(1:4, c(9,9,6,1)), ncol=1))
if (plot.type=="both") layout(matrix(rep(1:6, c(9,9,6,1,6,1)), ncol=1))
par(mar=c(0.25,3,0.25,1), mgp=c(1.75, 0.6, 0), oma=c(3,0,1.25,0))
# raw data used for joint segmentation
jseg <- x$jointseg
# chromosome boundaries
chrbdry <- which(diff(jseg$chrom) != 0)
if (missing(emfit)) {
out <- x$out
if (plot.type=="em" | plot.type=="both") {
warning("emfit is missing; plot.type set to naive")
plot.type <- "naive"
}
} else {
out <- emfit$cncf
# add the naive tcn, lcn and cf to out
out$tcn <- x$out$tcn
out$lcn <- x$out$lcn
out$cf <- x$out$cf
}
# determine which of the cnlr.median & mafR to show
if (clustered) {
cnlr.median <- out$cnlr.median.clust
mafR <- out$mafR.clust
mafR[is.na(mafR)] <- out$mafR[is.na(mafR)]
} else {
cnlr.median <- out$cnlr.median
mafR <- out$mafR
}
mafR <- abs(mafR)
# chromosome colors
chrcol <- 1+rep(out$chrom-2*floor(out$chrom/2), out$num.mark)
nn <- cumsum(table(jseg$chrom[is.finite(jseg$cnlr)]))
segbdry <- cumsum(c(0,out$num.mark))
segstart <- segbdry[-length(segbdry)]
segend <- segbdry[-1]
# plot the logR data and segment medians
plot(jseg$cnlr[is.finite(jseg$cnlr)], pch=".", cex=2, col = c("grey","lightblue","azure4","slateblue")[chrcol], ylab="log-ratio", xaxt="n")
abline(v=chrbdry, lwd=0.25)
abline(h=median(jseg$cnlr, na.rm=TRUE), col="green2")
abline(h = x$dipLogR, col = "magenta4")
segments(segstart, cnlr.median, segend, cnlr.median, lwd=1.75, col=2)
# plot the logOR data and mafR
plot(jseg$valor[is.finite(jseg$cnlr)], pch=".", cex=2.5, col = c("grey","lightblue","azure4","slateblue")[chrcol], ylab="log-odds-ratio", ylim=c(-4,4), xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, sqrt(mafR), segend, sqrt(mafR), lwd=1.75, col=2)
segments(segstart, -sqrt(mafR), segend, -sqrt(mafR), lwd=1.75, col=2)
# naive copy number and cellular faction pieces
cfpalette <- c(colorRampPalette(c("white", "steelblue"))(10),"bisque2")
if (plot.type=="naive" | plot.type=="both") {
# plot the estimated copy numbers and cf
out$tcn[out$tcn > 10] <- 9 + log10(out$tcn[out$tcn > 10])
ii <- which(out$lcn > 5)
if (length(ii)>0) out$lcn[ii] <- 5 + log10(out$lcn[ii])
plot(c(0,length(jseg$cnlr)), c(0,max(out$tcn)), type="n", ylab="copy number (nv)", xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, out$lcn, segend, out$lcn, lwd=1.75, col=2)
segments(segstart, out$tcn, segend, out$tcn, lwd=1.75, col=1)
# add the cf
plot(c(0,length(jseg$cnlr)), 0:1, type="n", ylab="", xaxt="n", yaxt="n")
mtext("cf-nv", side=2, at=0.5, line=0.3, las=2, cex=0.75)
cfcol <- cfpalette[round(10*out$cf+0.501)]
rect(segstart, 0, segend, 1, col=cfcol, border=NA)
}
# EM copy number and cellular faction pieces
if (plot.type=="em" | plot.type=="both") {
# plot the estimated copy numbers and cf
out$tcn.em[out$tcn.em > 10] <- 9 + log10(out$tcn.em[out$tcn.em > 10])
ii <- which(out$lcn.em > 5)
if (length(ii)>0) out$lcn.em[ii] <- 5 + log10(out$lcn.em[ii])
plot(c(0,length(jseg$cnlr)), c(0,max(out$tcn.em)), type="n", ylab="copy number (em)", xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, out$lcn.em, segend, out$lcn.em, lwd=1.75, col=2)
segments(segstart, out$tcn.em, segend, out$tcn.em, lwd=1.75, col=1)
# add the cf
plot(c(0,length(jseg$cnlr)), 0:1, type="n", ylab="", xaxt="n", yaxt="n")
mtext("cf-em", side=2, at=0.5, line=0.2, las=2, cex=0.75)
cfcol <- cfpalette[round(10*out$cf.em+0.501)]
rect(segstart, 0, segend, 1, col=cfcol, border=NA)
}
# now add the chromosome ticks on x-axis
chromlevels <- x$chromlevels
# just make sure chromlevels actually exists
if (is.null(chromlevels)) chromlevels <- 1:length(nn)
axis(labels=chromlevels, side=1, at=(nn+c(0,nn[-length(nn)]))/2, cex=0.65)
mtext(side=1, line=1.75, "Chromosome", cex=0.8)
if (!missing(sname)) mtext(sname, side=3, line=0, outer=TRUE, cex=0.8)
par(def.par) #- reset to default
}
logRlogORspider <- function(cncf, dipLogR=0, nfrac=0.005) {
rho <- seq(0, 0.95, by=0.01)
nrho <- length(rho)
logACR <- logCNR <- matrix(0, nrho, 19)
# initialize index
l <- 1
# one copy loss
logCNR[,l] <- log2(2*(1-rho) + 1*rho) -1
logACR[,l] <- log(1/(1-rho))
# integer copy numbers (clonal)
for(i in 2:7) {
for(j in 0:floor(i/2)) {
l <- l+1
logCNR[,l] <- log2(2*(1-rho) + i*rho) -1 # base-2
logACR[,l] <- log(1-rho+(i-j)*rho) - log(1-rho+j*rho)
}
}
plot(c(-0.95, 1.8), c(0, 5.2), type="n", xlab="Expected(logR - dipLogR)", ylab=" Expected(|logOR|)")
l <- 1; i <-1; j <-0
linecols <- c("black","cyan3","green3","blue")
lines(logCNR[,l], logACR[,l], lty=1, col=j+1, lwd=1.25)
text(logCNR[nrho,l]+0.03, logACR[nrho,l], paste(i,j,sep="-"), cex=0.65)
for(i in 2:7) {
for(j in 0:floor(i/2)) {
l <- l+1
lines(logCNR[,l], logACR[,l], lty=i-1, col=linecols[j+1], lwd=1.25)
text(logCNR[nrho,l]+0.03, logACR[nrho,l], paste(i,j,sep="-"), cex=0.65)
}
}
nsnps <- sum(cncf$num.mark)
nhets <- sum(cncf$nhet)
ii <- cncf$num.mark > nfrac*nsnps & cncf$nhet > nfrac*nhets
cex <- 0.3 + 2.7*(cncf$num.mark[ii]/sum(0.1*cncf$num.mark[ii]))
chrcol <- rainbow(24)
points(cncf$cnlr.median[ii] - dipLogR, sqrt(abs(cncf$mafR[ii])), cex=cex, pch=10, col=chrcol[cncf$chrom[ii]], lwd=1.5)
legend(-1, 5.25, paste("chr", c(1:22, "X"), sep=""), ncol=4, pch=10, col=chrcol[1:23], cex=0.65)
}
# this function can be used to rerun facets on output from procSample
# usage is xx <- rePreProcSample(out$jointseg[,1:8])
# oo <- procSample(xx, ...) etc.
rePreProcSample<- function(jseg, cval=25, deltaCN=0, gbuild=c("hg19", "hg38", "hg18", "mm9", "mm10"), hetscale=TRUE, unmatched=FALSE) {
pmat <- jseg[, c("chrom", "maploc", "rCountT", "rCountN", "vafT", "vafN", "het", "keep")]
gbuild <- match.arg(gbuild)
# integer value for chromosome X depends on the genome
if (gbuild %in% c("hg19", "hg38", "hg18")) nX <- 23
if (gbuild %in% c("mm9", "mm10")) nX <- 20
dmat <- counts2logROR(pmat[pmat$rCountT>0,], gbuild, unmatched)
tmp <- segsnps(dmat, cval, hetscale, deltaCN)
out <- list(pmat=pmat, gbuild=gbuild, nX=nX)
c(out, tmp)
}
|
82855c80542cd193ba7b8ab2884f6fee2a3d594d | c88fb2b121a4a3cb2dc2ed9ecc3744ce410e0e68 | /lab-1/assignment2.R | 579c3f132fb6c3db9c636d087d2f01f1702c5439 | [] | no_license | antonmoeriksson/TDDE01 | 7b3752c7eb8d301a9fce6ffd74f985165b471d87 | b318147095226eb59cb8d5cf4939f542cd4984f8 | refs/heads/master | 2021-09-05T18:37:42.102571 | 2018-01-30T09:31:46 | 2018-01-30T09:31:46 | 108,877,357 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,139 | r | assignment2.R | # Anton MO Eriksson
# anter491
# 2017-11-16
require(readxl)
library(readxl)
setwd("/home/ame/git/tdde01/TDDE01/lab-1")
set.seed(12345)
# 2.1 Import data.
life_expectancy <- data.matrix(read_excel("machines.xlsx"))
# Helper functions for all sub-assiments.
log_likelihood <- function(theta, input_data) {
prob_model = (theta * exp((-theta) * input_data))
log_prob_model = sum(log(probability_model(theta, input_data)))
return(log_prob_model)
}
bayesian_log_likelihood <- function(theta, input_data, lambda) {
old_prob = (theta * exp((-theta) * input_data))
new_prob = (lambda * exp((-lambda) * theta))
bayesion_log_prob = (sum(log(old_prob * new_prob)))
return(bayesion_log_prob)
}
# 2.2
# Plots the log-likehood curve & prints the maximun likelihood value for theta.
theta = seq(from = 0, to = 10, by = 0.01)
log_lh = c()
for (value in 1:length(theta)) {
log_lh[value] = log_likelihood(theta[value], life_expectancy)
}
plot(
theta,
log_lh,
col = "Red",
xlab = "Theta",
ylab = "log(p(x|theta))",
main = c("Max theta = ", max_theta)
)
# 2.3 with only 6 samples.
theta = seq(from = 0, to = 10, by = 0.01)
log_lh_6 = c()
for (value in 1:length(theta)) {
log_lh_6[value] = log_likelihood(theta[value], life_expectancy[1:6])
}
max_theta_6 = theta[which.max(log_lh_6)]
plot(
theta,
log_lh_6,
col = "Pink",
ylim = c(-300, 0),
xlab = "Theta",
ylab = "log(p(x|theta))",
main = c("Max of thetas are ", max_theta, max_theta_6)
)
points(theta, log_lh, col = "black")
legend(
x = "bottomleft",
legend = c("All data", "Data 1:6"),
col = c("Black", "Pink"),
lty = c(1, 1)
)
# Task 2.4
theta = seq(from = 0, to = 10, by = 0.01)
log_lh_bay = c()
for (value in 1:length(theta)) {
log_lh_bay[value] = bayesian_log_likelihood(theta[value], life_expectancy, 10)
}
max_theta = theta[which.max(log_lh_bay)]
plot(
theta,
log_lh_bay,
col = "Red",
xlab = "Theta",
ylab = "log(p(x|theta))",
main = c("Max theta = ", max_theta)
)
random_exp_distribution = rexp(50, max_theta)
# 2.5
random_exp_distribution = rexp(50, theta_max)
hist(random_exp_distribution)
hist(life_expectancy)
|
00deb79d3d440c23edaa50e92d871ac72a16aa2b | 4e508cb5c61643e03bb773ce9e09559219713cc5 | /DefinicaoDePackages.R | 92dc22f7bab1633749f9fd452f5c41eebc189549 | [] | no_license | Galaton/R | 89a71372c2d38109d8e775e9b0f33c54e00a2bfe | 9a28da576ef0e249717b17db1e3302f80619d2d2 | refs/heads/master | 2021-01-23T17:57:16.105611 | 2017-09-07T20:59:19 | 2017-09-07T20:59:19 | 102,785,363 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | DefinicaoDePackages.R | # Packages são coleçãoes de funções em r, dados, e codigos compilados em
# um formato bem definido, o diretório onde estes packages sao guardados
# são chamados de bibliotecas
# Para baixar um package é só digitar
# install.packagers("NomeDoPackage")
# Não basta só baixar os packages temos que os ativar
# library(NomeDoPackage)
|
73c01906e370b82d6ba6e805ae5a2d285e83594c | f737482c0cf4f721392500a0dda203fd10ff8d64 | /TamasFerenci_BiomedicalApplicationsOfTimeSeriesAnalysis.R | 90a884d2d029201f4c3ce22811bc5af374d30f2b | [] | no_license | tamas-ferenci/BiomedicalApplicationsOfTimeSeriesAnalysis | 897b8a5280f71939eb1d0f7450e1b519a7a453f4 | 18a06f1f95c702ac58ecb40ae5b48d29dba7a822 | refs/heads/master | 2021-05-13T11:41:23.755642 | 2018-11-30T14:18:59 | 2018-11-30T14:18:59 | 117,135,499 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,285 | r | TamasFerenci_BiomedicalApplicationsOfTimeSeriesAnalysis.R | ## ----setup, include=FALSE------------------------------------------------
library( lattice )
library( data.table )
library( rms )
knitr::opts_chunk$set( cache = TRUE )
## ------------------------------------------------------------------------
ts( rnorm( 20 ), frequency = 4, start = c( 2010, 2 ) )
ts( rnorm( 30 ), frequency = 12, start = c( 2010, 2 ) )
## ---- fig.height=5-------------------------------------------------------
ldeaths
plot( ldeaths )
## ---- fig.height = 5.5---------------------------------------------------
SimDataFourier <- data.frame( t = 1:1000 )
SimDataFourier <- transform( SimDataFourier, y = 0.5*sin( t*2 ) + sin( t/10*2 ) +
rnorm( length( t ), 0, 0.1 ) )
xyplot( y ~ t, data = SimDataFourier, type = "l", xlim = c( 0, 200 ) )
## ------------------------------------------------------------------------
xyplot( spec ~ freq, data = spectrum( SimDataFourier$y, plot = FALSE ), type = "l",
scales = list( y = list( log = 10 ) ) )
## ------------------------------------------------------------------------
locmaxpanel <- function( x, y, width, maxmeddiff = 1, rounddigit = 2, ... ) {
if( width%%2==0 )
width <- width+1
panel.xyplot( x, y, ... )
maxs <- zoo::rollapply( y, width, function(x) (which.max(x)==(width+1)/2)&
(max(x)-median(x)>maxmeddiff),
align = "center", fill = NA )
panel.abline( v = x[ maxs ], col = "gray" )
panel.points( x[ maxs ], y[ maxs ] )
panel.text( x[ maxs ], y[ maxs ], round( x[ maxs ], rounddigit ), pos = 4 )
}
## ------------------------------------------------------------------------
xyplot( spec ~ freq, data = spectrum( SimDataFourier$y, plot = FALSE ), type = "l",
scales = list( y = list( log = 10 ) ), panel = locmaxpanel, width = 21, maxmeddiff = 2 )
## ----message=FALSE,warning=FALSE,fig.height=5.5--------------------------
## require( tuneR ) ## require( pastecs ) ## devtools::install_github( "mkfs/r-physionet-ptb" )
## https://www.physionet.org/physiobank/database/ptbdb/
## system2( system.file( "exec", "download_ptb.sh", package = "r.physionet.ptb" ) )
## system2( system.file( "exec", "ptb_patient_to_json.rb", package = "r.physionet.ptb" ),
## args="patient001" )
library( r.physionet.ptb )
ptb <- r.physionet.ptb::ptb.from.file( "patient001.json" )
ptbecg <- r.physionet.ptb::ptb.extract.lead( ptb, "i" )$`1-10010`
xyplot( ptbecg~seq_along( ptbecg ), type = "l", xlim = c( 0, 5000 ), xlab = "Time", ylab = "" )
## ---- message=FALSE,warning=FALSE----------------------------------------
xyplot( spec ~ freq, data = spectrum( ptbecg, plot = FALSE, span = rep( 201, 3 ) ), type = "l",
scales = list( y = list( log = 10 ) ), panel = locmaxpanel, width = 21,
maxmeddiff = 2e-4 )
## ---- fig.height = 6-----------------------------------------------------
SimDataWavelet <- data.frame( t = 1:2000 )
SimDataWavelet <- transform( SimDataWavelet,
y = ifelse( t<=1000, sin( t*2 ), sin( t/10*2 ) ) +
rnorm( length( t ), 0, 0.1 ) )
xyplot( y ~ t, data = SimDataWavelet, type = "l" )
## ------------------------------------------------------------------------
xyplot( spec ~ freq, data = spectrum( SimDataWavelet$y, plot = FALSE ), type = "l",
scales = list( y = list( log = 10 ) ) )
## ------------------------------------------------------------------------
WaveletComp::wt.image( WaveletComp::analyze.wavelet( SimDataWavelet, "y",
verbose = FALSE, make.pval = FALSE ) )
## ---- fig.height = 6-----------------------------------------------------
SimDataWavelet <- data.frame( t = 1:2000 )
SimDataWavelet <- transform( SimDataWavelet,
y = WaveletComp::periodic.series( start.period = 20,
end.period = 200,
length = length( t ) ) +
0.1*rnorm( length( t ) ) )
xyplot( y ~ t, data = SimDataWavelet, type = "l" )
## ------------------------------------------------------------------------
xyplot( spec ~ freq, data = spectrum( SimDataWavelet$y, plot = FALSE ), type = "l",
scales = list( y = list( log = 10 ) ) )
## ------------------------------------------------------------------------
WaveletComp::wt.image( WaveletComp::analyze.wavelet( SimDataWavelet, "y",
verbose = FALSE, make.pval = FALSE ) )
## ------------------------------------------------------------------------
tmpfile <- tempfile( fileext = ".xlsx" )
download.file( url = paste0( "https://www.gov.uk/government/uploads/system/uploads/",
"attachment_data/file/339410/NoidsHistoricAnnualTotals.xlsx" ),
destfile = tmpfile, mode = "wb" )
res1 <- XLConnect::loadWorkbook( tmpfile )
XLConnect::setMissingValue( res1, value = c( "*" ) )
res1 <- do.call( plyr::rbind.fill, lapply( XLConnect::getSheets( res1 ), function( s ) {
temp <- XLConnect::readWorksheet( res1, sheet = s, startRow = 4 )
temp <- temp[ , grep( "Disease", colnames( temp ) ):ncol( temp ) ]
temp <- temp[ 1:( if( sum( is.na( temp$Disease ) )==0 ) nrow( temp ) else
which( is.na( temp$Disease ) )[ 1 ]-1 ), ]
for( i in 2:ncol( temp ) )
temp[ , i ] <- as.numeric( gsub( "[[:space:].,‡†]", "", temp[ , i ] ) )
temp2 <- as.data.frame( t( temp[ , - 1 ] ) )
colnames( temp2 ) <- temp[ , 1 ]
temp2$Year <- as.numeric( substring( rownames( temp2 ), 2, 5 ) )
temp2
} ) )
unlink( tmpfile )
## ------------------------------------------------------------------------
tmpfile <- tempfile( fileext = ".xlsx" )
download.file( url = paste0( "https://www.gov.uk/government/uploads/system/uploads/",
"attachment_data/file/664864/",
"Annual_totals_from_1982_to_2016.xlsx" ),
destfile = tmpfile, mode = "wb" )
res2 <- XLConnect::loadWorkbook( tmpfile )
XLConnect::setMissingValue( res2, value = c( "--" ) )
res2 <- do.call( plyr::rbind.fill, lapply( XLConnect::getSheets( res2 )[ -1 ], function( s ) {
temp <- XLConnect::readWorksheet( res2, sheet = s, startRow = 5 )
temp <- temp[ 1:( nrow( temp )-1 ), ]
temp2 <- as.data.frame( t( temp[ , - 1 ] ) )
colnames( temp2 ) <- temp[ , 1 ]
temp2$Year <- as.numeric( substring( rownames( temp2 ), 2, 5 ) )
temp2
} ) )
unlink( tmpfile )
## ------------------------------------------------------------------------
tmpfile <- tempfile( fileext = ".xls" )
download.file( url = paste0( "https://www.ons.gov.uk/file?uri=/",
"peoplepopulationandcommunity/populationandmigration/",
"populationestimates/adhocs/",
"004358englandandwalespopulationestimates1838to2014/",
"englandandwalespopulationestimates18382014tcm77409914.xls" ),
destfile = tmpfile, mode = "wb" )
res3 <- XLConnect::readWorksheetFromFile( tmpfile, sheet = "EW Total Pop 1838-2014", startRow = 2,
endRow = 179 )
unlink( tmpfile )
names( res3 )[ 1 ] <- "Year"
res3$Persons <- ifelse( res3$Persons < 100000, res3$Persons*1000, res3$Persons )
res3 <- res3[ , c( "Year", "Persons" ) ]
res4 <- read.csv( paste0( "https://www.ons.gov.uk/generator?format=csv&uri=/",
"peoplepopulationandcommunity/populationandmigration/",
"populationestimates/timeseries/ewpop/pop" ), skip = 7 )
names( res4 ) <- c( "Year", "Persons" )
res4 <- res4[ res4$Year>=2015, ]
UKEpid <- merge( plyr::rbind.fill( res1, res2 ), rbind( res3, res4 ) )
UKPertussis <- UKEpid[ , c( "Year", "Whooping cough", "Persons" ) ]
UKPertussis$Inc <- UKPertussis$`Whooping cough`/UKPertussis$Persons*100000
UKPertussis <- UKPertussis[ !is.na( UKPertussis$`Whooping cough` ), ]
## ------------------------------------------------------------------------
xyplot( Inc ~ Year, data = UKPertussis, type = "l", ylab = "Incidence [/100 000/year]" )
## ------------------------------------------------------------------------
WaveletComp::wt.image( WaveletComp::analyze.wavelet( UKPertussis, "Inc",
verbose = FALSE, make.pval = FALSE ) )
## ---- fig.height = 6-----------------------------------------------------
data( "CVDdaily", package = "season" )
rownames( CVDdaily ) <- NULL
xyplot( cvd ~ date, data = CVDdaily, type = "l", xlab = "Time", ylab = "Number of deaths" )
## ------------------------------------------------------------------------
CVDdaily$year <- lubridate::year( CVDdaily$date )
CVDdaily$wday <- as.factor( lubridate::wday( CVDdaily$date, week_start = 1 ) )
CVDdaily$yday <- lubridate::yday( CVDdaily$date )/yearDays( CVDdaily$date )
head( CVDdaily[ , c( "date", "year", "wday", "yday", "cvd" ) ] )
## ---- message=FALSE------------------------------------------------------
library( mgcv )
fit <- gam( cvd ~ s( as.numeric( date ) ) + wday + s( yday, bs = "cc" ), data = CVDdaily,
family = nb( link = log ) )
summary( fit )
## ---- fig.height = 6-----------------------------------------------------
plot( fit, select = 1, scale = 0, rug = FALSE, trans = exp, shade = TRUE,
col = trellis.par.get()$superpose.line$col[1], xaxt = "n", xlab = "Year", ylab = "IRR" )
axis( 1, at = seq( CVDdaily$date[1], tail( CVDdaily$date, 1 )+1, by = "year" ),
labels = year( CVDdaily$date[1] ):year( tail( CVDdaily$date, 1 )+1 ) )
## ---- fig.height = 6-----------------------------------------------------
plot( fit, select = 2, scale = 0, rug = FALSE, trans = exp, shade = TRUE,
col = trellis.par.get()$superpose.line$col[1], xaxt = "n", xlab = "Day of year",
ylab = "IRR" )
axis( 1, at = seq( 0, 1, 1/12 ), labels = seq( 0, 1, 1/12 )*30*12 )
## ---- fig.height = 6-----------------------------------------------------
source( "https://pastebin.com/raw/hBmStX4Y" )
termplot2( fit, terms = "wday", se = TRUE, yscale = "exponential",
col.term = trellis.par.get()$superpose.line$col[1],
col.se = "gray80", se.type = "polygon", xlab = "Day of week" )
## ---- fig.height = 4-----------------------------------------------------
do.call( gridExtra::grid.arrange, lapply( c( 2, 6, 12, 24 ), function( o ) {
xyplot( y ~ t, groups = grp, data = rbind( data.frame( grp = "data", SimDataFourier ),
data.frame( grp = "smooth", t = SimDataFourier$t,
y = forecast::ma( SimDataFourier$y,
o ) ) ),
type = "l", xlim = c( 0, 200 ), main = paste0( "Order: ", o ) )
} ) )
## ------------------------------------------------------------------------
plot( decompose( ldeaths ) )
## ------------------------------------------------------------------------
plot( stl( ldeaths, s.window = "periodic" ) )
## ------------------------------------------------------------------------
plot( forecast::seasadj( stl( ldeaths, s.window = "periodic" ) ) )
## ---- fig.height = 5.5---------------------------------------------------
TLCData <- read.table( "https://content.sph.harvard.edu/fitzmaur/ala2e/tlc-data.txt",
col.names = c( "ID", "Trt", paste0( "Wk", c( 0, 1, 4, 6 ) ) ) )
TLCData <- reshape( TLCData, varying = paste0( "Wk", c( 0, 1, 4, 6 ) ), v.names = "LeadLevel",
timevar = "Week", times = c( 0, 1, 4, 6 ), idvar = "ID", direction = "long" )
TLCData$Trt <- relevel( TLCData$Trt, ref = "P" )
TLCData$Week.f <- as.factor( TLCData$Week )
xyplot( LeadLevel ~ Week | Trt, groups = ID, data = TLCData, type = "b" )
## ---- fig.height = 5.5---------------------------------------------------
TLCData <- data.table( TLCData )
TLCData$Time <- as.numeric( TLCData$Week.f )
dd <- datadist( TLCData )
options( datadist = "dd" )
xYplot( Cbind( mean, lwr, upr ) ~ Week, groups = Trt, type = "b",
data = TLCData[ , .( mean = mean( LeadLevel ),
lwr = t.test( LeadLevel )$conf.int[1],
upr = t.test( LeadLevel )$conf.int[2] ) , .( Trt, Week ) ],
ylim = c( 10, 30 ), ylab = "Mean lead level" )
## ------------------------------------------------------------------------
ols( LeadLevel ~ Week.f*Trt, data = TLCData )
## ------------------------------------------------------------------------
fit <- Gls( LeadLevel ~ Week.f*Trt, data = TLCData, corr = nlme::corSymm( form = ~ Time | ID ),
weights = nlme::varIdent( form = ~ 1 | Week.f ) )
fit
## ------------------------------------------------------------------------
temp <- Predict( fit, Trt, Week.f )
temp$Week.f <- as.numeric( levels( temp$Week.f ) )[ temp$Week.f ]
xYplot( Cbind( yhat, lower, upper ) ~ Week.f, groups = Trt, data = temp, type = "b",
ylim = c( 10, 30 ) )
## ------------------------------------------------------------------------
data( "Orthodont", package = "nlme" )
OrthoFem <- Orthodont[ Orthodont$Sex=="Female", ]
plot( OrthoFem )
## ------------------------------------------------------------------------
fit1 <- nlme::lmList( distance ~ I( age - 11 ), data = OrthoFem )
plot( nlme::intervals( fit1 ) )
## ---- fig.height = 5.5---------------------------------------------------
fit2 <- nlme::lme( distance ~ age, data = OrthoFem, random = ~1|Subject )
xyplot( distance + fitted( fit1 ) + fitted( fit2, level = 1 ) +
fitted( fit2, level = 0 ) ~ age | Subject, data = OrthoFem,
type = c( "p", "l", "l", "l" ), distribute.type = TRUE, ylab = "Distance", grid = TRUE,
auto.key = list( text = c( "Measured", "Fitted (individual models)",
"Fitted (mixed effects)", "Fitted (fixed effects only)" ),
columns = 4, points = FALSE, lines = TRUE ) )
|
ce161cd24c1651b64926a3edba8a4c8118f749f9 | 54af41f3f96a678c2281eb761e7024d041542c74 | /input_prep/scripts/dalliance_files.R | f7187abf86ad8e1721a4b04b5a17a4226d88e8cc | [] | no_license | csorianot/godmc-database | 58dd4c12c3bccb71cbc2285ac7920ea28cbe2218 | cbaf37d73f2cff801f6ea6f9c716417d10f68553 | refs/heads/master | 2020-04-23T02:02:49.733715 | 2018-11-21T11:54:41 | 2018-11-21T11:54:41 | 170,832,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,060 | r | dalliance_files.R | library(readr)
#r<-read_tsv("gwas_catalog_180627.txt")
#r<-data.frame(r)
#downloaded from http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/
r2<-read_tsv("gwasCatalog.txt",col_names=F)
r2<-data.frame(r2)
r2<-r2[which(r2$X18<5e-8),]
r3<-data.frame(r2$X2,r2$X3,r2$X4)
p<-paste0(r2$X5,";",r2$X11,"(",r2$X7,")")
p<-gsub(" ","_",p)
write.table(r3,"gwasCatalog.bed",sep=" ",col.names=F,quote=F,row.names=F)
#sort -k1,1 -k2,2n gwasCatalog.bed >gwasCatalog_sorted.bed
#bedToBigBed gwasCatalog_sorted.bed chrom.sizes gwasCatalog_sorted.bb
r2<-read_tsv("EWAS_Catalog_20-02-2018.txt.gz")
r2<-data.frame(r2)
r2<-r2[which(r2$P<1e-7),]
chr<-strsplit(r2$Location,split=":")
chr<-do.call("rbind",chr)
start<-as.numeric(chr[,2])-1
stop<-chr[,2]
name<-paste0(r2$CpG,";",r2$Trait,"(",r2$Author,")")
name<-gsub(" ","_",name)
bed<-data.frame(chr[,1],start,stop,name)
write.table(r3,"ewasCatalog.bed",sep=" ",col.names=F,quote=F,row.names=F)
#sort -k1,1 -k2,2n ewasCatalog.bed >ewasCatalog_sorted.bed
#bedToBigBed ewasCatalog_sorted.bed chrom.sizes ewasCatalog_sorted.bb
|
674f9059faffa6ac2d0d60713da0ab5fae67924a | 5702c29623fd7edf212495b071c736e274760d46 | /Chapter6/Lab1_subset_selection_methods.R | 39d0a353dde7a41db602df1af62f7fba692a8b3e | [] | no_license | GeorgeBatch/introduction_to_sl_with_r | 678f53743d2fd362d274988709f1034c7bde33cc | 52100e00b99c907b0d529731145b8943ef3e9170 | refs/heads/master | 2022-11-24T06:50:36.329939 | 2020-07-23T11:28:35 | 2020-07-23T11:28:35 | 281,922,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,024 | r | Lab1_subset_selection_methods.R | ################################################################################
# Best Subset selection
################################################################################
library(ISLR)
library(car) # companion to applied regression
library(leaps) # variable subset selection
options(scipen=999)
# ------------------------------------------------------------------------------
# First look at the data
# ------------------------------------------------------------------------------
names(Hitters)
# 20 names
str(Hitters)
# numeric and factor variables
dim(Hitters)
# 322 20
sum(is.na(Hitters$Salary))
# 59 NA values
Hitters <- na.omit(Hitters)
dim(Hitters)
# 263 20
sum(is.na(Hitters$Salary))
# 0
# ------------------------------------------------------------------------------
# Best subset selection (nvmax = 8 by default)
# ------------------------------------------------------------------------------
regfit.full <- regsubsets(Salary ~ ., Hitters)
summary(regfit.full)
# ------------------------------------------------------------------------------
# Best subset selection (nvmax = 19 - all variables)
# ------------------------------------------------------------------------------
regfit.very.full <- regsubsets(Salary ~ ., Hitters, nvmax = 19)
reg.summary <- summary(regfit.very.full)
names(reg.summary)
reg.summary$rsq
par(mfrow=c(2,2))
plot(reg.summary$rss, xlab="Number of Variables",
ylab="RSS", type="l")
# Plotting R^2_adj
plot(reg.summary$adjr2, xlab="Number of Variables",
ylab="Adjusted RSq", type="l")
which.max(reg.summary$adjr2)
# 11
points(11,reg.summary$adjr2[11], col="red",cex=2,pch=20)
# Plotting C_p
plot(reg.summary$cp, xlab="Number of Variables ",
ylab="Cp", type="l")
which.min(reg.summary$cp )
# 10
points(10, reg.summary$cp[10], col="red", cex=2, pch=20)
# Plotting BIC
plot(reg.summary$bic, xlab="Number of Variables ",ylab="BIC",
type="l")
which.min(reg.summary$bic )
# 6
points(6,reg.summary$bic[6],col="red",cex=2,pch=20)
# top row shows the best model
# can be used instead of summary (more condence)
par(mfrow = c(1,1))
plot(regfit.very.full, scale="r2")
plot(regfit.very.full, scale="adjr2")
plot(regfit.very.full, scale="Cp")
plot(regfit.very.full, scale="bic")
# looking up the coefficients
coef(regfit.very.full, 6)
################################################################################
# Forward and Backward Stepwise Selection
################################################################################
regfit.fwd <- regsubsets(Salary ~ ., data=Hitters ,
nvmax=19, method ="forward")
summary(regfit.fwd)
regfit.bwd <- regsubsets(Salary ~ ., data=Hitters ,
nvmax=19, method ="backward")
summary(regfit.bwd)
# Different variables for 7-variable models
coef(regfit.very.full, 7)
coef(regfit.fwd, 7)
coef(regfit.bwd, 7)
################################################################################
# Model selection using validation set
################################################################################
# splitting data into test and train sets
set.seed(1)
train <- sample(x = c(TRUE,FALSE), nrow(Hitters), replace = TRUE)
test <- (!train )
# performing best subset selection
regfit.best <- regsubsets(Salary ~ ., data=Hitters[train, ], nvmax =19)
# ------------------------------------------------------------------------------
# Compute the validation set error for the best model of each model size.
# ------------------------------------------------------------------------------
# We first make a model matrix from the test data.
test.mat <- model.matrix(Salary ~ ., data=Hitters[test, ])
# for each of the model sizes
# take the names of the predictors in the best model of this size
# create a vector of predicted responces
# calculate the MSE for the test set
# record the MSE
val.errors=rep(NA,19)
for (i in 1:19){
coefi <- coef(regfit.best, id = i)
pred <- test.mat[, names(coefi)] %*% coefi
val.errors[i] <- mean((Hitters$Salary[test]-pred)^2)
}
val.errors
which.min(val.errors)
coef(regfit.best, 10)
# now let's create a function, taking regsubsets
predict.regsubsets <- function(object, newdata, id,...){
form <- as.formula(object$call [[2]])
mat <- model.matrix(form,newdata)
coefi <- coef(object ,id=id)
xvars <- names(coefi)
mat[, xvars] %*% coefi
}
# Using validation, we have selected 10 - the best number of variables
# Now it's time to choose best ten-variable model using the full data set
# It is important to use the whole data set:
# we can get differnet 10 variables selected
# the coefficient will be more accurate using more data
regfit.best <- regsubsets(Salary ~ ., data=Hitters, nvmax=19)
coef(regfit.best, 10)
# In fact, we see that the best ten-variable model on the full data set has
# a different set of variables than the best ten-variable model on
# the training set.
################################################################################
# Model selection using cross-validation
################################################################################
K <- 10
set.seed(1)
subset <- rep(1:K, ceiling(nrow(Hitters)/K))[1:nrow(Hitters)]
subset <- sample(subset, size=nrow(Hitters), replace = FALSE)
cv.errors <- matrix(NA, K, 19, dimnames = list(NULL, paste(1:19)))
for (j in 1:K){
best.fit <- regsubsets(Salary ~ ., data = Hitters[subset != j, ],
nvmax = 19)
for (i in 1:19){
pred <- predict.regsubsets(object = best.fit,
newdata = Hitters[subset == j, ], id = i)
cv.errors[j, i] <- mean( (Hitters$Salary[subset == j] - pred)^2)
}
}
mean.cv.errors <- apply(X = cv.errors, MARGIN = 2, FUN = mean)
mean.cv.errors
par(mfrow = c(1,1))
plot(mean.cv.errors, type = "b")
# cross-validation selects 11-variable model
reg.best <- regsubsets(Salary ~., data = Hitters, nvmax = 19)
coef(reg.best, 11) |
1298bd9afbef76ba1d84c9bf695e95b2d24446d7 | fbec4c6031fd96a1273ae510baa8a84906f3961d | /prediction/distances.R | 869a0a12505a2561c485d621527760f517db5b14 | [] | no_license | klos71/Junction2019 | f366b35e25929f32d0fffd78de7d5508dd96eabe | cb6abedea85105c0d84b63cb3e2c20a763565511 | refs/heads/master | 2022-12-12T19:46:09.802339 | 2019-11-17T08:18:34 | 2019-11-17T08:18:34 | 221,941,523 | 0 | 0 | null | 2022-12-11T13:37:57 | 2019-11-15T14:36:05 | TypeScript | UTF-8 | R | false | false | 2,400 | r | distances.R |
# This function takes a dataframe that contains a column of all
# the station ids called "id" and returns a dataframe with two
# columns ("id1" and "id2") with all combinations of ids
id_permutations <- function(stations, unused=NULL) {
combinations <- do.call(rbind, lapply(stations$id, function(id) {
data.frame(id1=id, id2=stations$id[stations$id != id])
}))
return(list(combinations=combinations))
}
# This function takes the output from `id_permutations` and the dataframe
# of all trips. The function calculates the quantile(dur_qt) duration of
# the trip and the quantile(dis_qt) distance for each combination
# (discarding those with too little information). The return is a
# dataframe of "DepartureID", "ReturnID", "Duration", and "Distance".
calculate_distances <- function(combinations, trips, dur_qt=0.75, dis_qt=0.2) {
durs <- c(rep(NA, nrow(combinations)))
dists <- c(rep(NA, nrow(combinations)))
for (i in 1:nrow(combinations)) {
mask <- which(trips$DepartureID == combinations$id1[[i]] & trips$ArrivalID == combinations$id2[[i]])
if (length(mask) < 3)
next()
times <- trips$Duration[mask]
lengths <- trips$Distance[mask]
durs[i] <- unname(quantile(times, dur_qt))
dists[i] <- unname(quantile(lengths, dis_qt))
}
mask <- !is.na(durs)
return(list(distances=data.frame(
"DepartureID" = combinations$id1[mask],
"ArrivalID" = combinations$id2[mask],
"Duration" = durs[mask],
"Distance" = dists[mask],
check.names = FALSE
)))
}
read_data_trips <- function() {
data <- do.call(rbind, lapply(4:10, function(i) {
read.csv(sprintf("data/trips-2019-%02d.csv", i), header = TRUE)
}))
data <- data.frame(Departure = data$Departure, Arrival = data$Return,
DepartureID = data$Departure.station.id, ArrivalID = data$Return.station.id,
Distance = data$Covered.distance..m., Duration = data$Duration..sec..
)
data[complete.cases(data), ]
}
# Only run from Rscript
if (sys.nframe() == 0L) {
source("prediction/station_status.R")
source("prediction/predict_bikes.R")
print("Reading Data")
stations <- read_data_stations(42)
trips <- read_data_trips()
dir.create("outputs", FALSE, FALSE)
write.csv(trips, "outputs/trips.csv", row.names = FALSE)
print("Calculating Distances")
combs <- id_permutations(stations)$combinations
dists <- calculate_distances(combs, trips)$distances
write.csv(dists, "outputs/distances.csv", row.names = FALSE)
}
|
c192ca4517eb0c54408836f1a5f480b3d3d0c30f | 53683e2a9f5a393c6e873dc2882f733ed861fae7 | /Download MDRs Code.R | a97b187b023b2e96d68d2fdcaed8997ff2fa186f | [] | no_license | nts2pd/Medical-Device-Reports | cb80c906c0387d3a78ae3cfff0583e8632644c7d | e34ce5b97b60c9012b9a3d6eb4ad51c01f7d8e03 | refs/heads/master | 2020-03-11T00:42:40.943350 | 2018-06-14T12:47:46 | 2018-06-14T12:47:46 | 129,670,228 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,910 | r | Download MDRs Code.R |
#unable to list more than 100 MDRs at a time, so must download each set by 100
url <- NULL
for (i in seq(from = 0, to = 28300, by = 100)) {
url[i] <- paste0("https://api.fda.gov/device/event.json?search=essure&limit=100&skip=", i)
}
url <- url[!is.na(url)]
#Get information on each MDR from openFDA
mdrs <- NULL
for (i in seq_along(url)) {
mdrs[[i]] <- GET(url[i])
}
#getting the first 100 MDRs
mdrx <- GET("https://api.fda.gov/device/event.json?search=essure&limit=100")
textx <- NULL
for (i in seq_along(httr::content(mdrx)$results)) {
textx[[i]] <- httr::content(mdrx)$results[[i]]$mdr_text[[1]]
}
datex <- NULL
for (i in seq_along(httr::content(mdrx)$results)) {
datex[i] <- httr::content(mdrx)$results[[i]]$device[[1]]$date_received
}
textx <- list.stack(textx)
textx <- as_tibble(textx)
datex <- as_tibble(datex)
textx$date <- ymd(datex$value)
textx
#see content: These nodes contain the relevant content for the analysis
httr::content(mdrs[[1]])$results[[1]]$mdr_text[[1]]
httr::content(mdrs[[2]])$results[[4]]$device[[1]]$date_received
httr::content(mdrs[[2]])$results[[4]]$mdr_text[[1]]
httr::content(mdrs[[2]])$results[[4]]$device[[1]]$date_received
httr::content(mdrs[[3]])$results[[4]]$mdr_text[[1]]
httr::content(mdrs[[3]])$results[[4]]$device[[1]]$date_received
#Extract the text
#The text is quite embedded, so extracting it is a tedious process. I can create
#a function to make this easier. Each "MDRnum" contains 100 MDRs.
#For each MDR text, it's corresponding date must be chosen as well.
tidyMDR <- function(MDRnum) {
testtext <- NULL
for (i in 1:100) {
testtext[[i]] <- httr::content(mdrs[[MDRnum]])$results[[i]]$mdr_text[[1]]
}
date_received <- NULL
for (i in 1:100) {
date_received[i] <- httr::content(mdrs[[MDRnum]])$results[[i]]$device[[1]]$date_received
}
testtext <- list.stack(testtext)
testtext <- as_tibble(testtext)
date_received <- as_tibble(date_received)
testtext$date <- ymd(date_received$value)
testtext
}
#I've tried compacting this into a for loop, but each time, R crashes. It seems,
# in this case, it is better to simply copy and paste the function for each set of
# 100 MDRs and bind them to the results
MDRresults <- textx
MDRresults <- MDRresults %>% bind_rows(tidyMDR(1))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(2))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(3))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(4)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(5))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(6))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(7)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(8))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(9))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(10))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(11))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(12))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(13))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(14))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(15))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(16))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(17))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(18))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(19))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(20))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(21))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(22))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(23))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(24))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(25))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(26))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(27))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(28))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(29))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(30))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(31))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(32))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(33))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(34))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(35))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(36))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(37))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(38))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(39))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(40))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(41))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(42)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(43))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(44)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(45))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(46))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(47))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(48)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(49))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(50))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(51))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(52))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(53))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(54))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(55))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(56))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(57))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(58))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(59))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(60))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(61))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(62))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(63))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(64))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(65))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(66))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(67))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(68))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(69))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(70))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(71))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(72))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(73))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(74))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(75)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(76)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(77))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(78))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(79))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(80))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(81))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(82))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(83))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(84))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(85))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(86))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(87))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(88)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(89))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(90))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(91))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(92))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(93))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(94))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(95))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(96))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(97))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(98))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(99))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(100))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(101))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(102))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(103))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(104))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(105)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(106)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(107))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(108))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(109))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(110))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(111))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(112))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(113))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(114)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(115))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(116))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(117))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(118))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(119))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(120))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(121))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(122))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(123))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(124))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(125))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(126))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(127))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(128))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(129))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(130))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(131)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(132))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(133))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(134))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(135))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(136))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(137))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(138))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(139))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(140))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(141))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(142))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(143))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(144))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(145))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(146))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(147))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(148))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(149))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(150))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(151))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(152))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(153))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(154))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(155))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(156))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(157))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(158))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(159))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(160))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(161))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(162))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(163))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(164))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(165))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(166))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(167))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(168))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(169))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(170))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(171))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(172))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(173))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(174))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(175))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(176))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(177))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(178)) #subscript out of bounds
MDRresults <- MDRresults %>% bind_rows(tidyMDR(179))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(180))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(181))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(182))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(183))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(184))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(185))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(186))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(187))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(188))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(189))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(190))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(191))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(192))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(193))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(194))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(195))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(196))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(197))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(198))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(199))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(200))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(201))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(202))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(203))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(204))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(205))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(206))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(207))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(208))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(209))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(210))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(211))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(212))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(213))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(214))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(215))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(216))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(217))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(218))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(219))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(220))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(221))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(222))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(223))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(224))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(225))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(226))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(227))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(228))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(229))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(230))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(231))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(232))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(233))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(234))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(235))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(236))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(237))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(238))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(239))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(240))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(241))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(242))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(243))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(244))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(245))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(246))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(247))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(248))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(249))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(250))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(251))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(252))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(253))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(254))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(255))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(256))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(257))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(258))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(259))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(260))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(261))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(262))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(263))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(264))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(265))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(266))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(267))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(268))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(269))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(270))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(271))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(272))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(273))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(274))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(275))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(276))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(277))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(278))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(279))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(280))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(281))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(282))
MDRresults <- MDRresults %>% bind_rows(tidyMDR(283))
#for (i in 2:10) {
#MDRresults <- MDRresults %>% bind_rows(tidyMDR(i))
#} |
eddfd19d4472e15de1063a0079c56a2ce9bf3b83 | 44d20aa0fefa716ff5b687dbb051de1987ec682c | /plot1.R | 4e96a4d5090b4378b32221400b7056e9ed9270f0 | [] | no_license | Enoana/ExData_Plotting1 | 8618beb1d36738b6f2b1403a61be5ab8af51cb31 | 6f678f40383b4e0262658b38132caa4106c81625 | refs/heads/master | 2021-01-18T13:21:57.924437 | 2016-01-22T15:52:11 | 2016-01-22T15:52:11 | 49,960,151 | 0 | 0 | null | 2016-01-19T15:02:01 | 2016-01-19T15:02:01 | null | UTF-8 | R | false | false | 587 | r | plot1.R | setwd("C:/Adatok/coursera_edX/4_Exploratory Data analysis/Quizes_Assignments/Assignment1")
data<- read.table("household_power_consumption.txt", sep= ";",
na.strings= "?", colClasses = c("character", "character",
rep("numeric", 7)), header= TRUE)
d<- data[data$Date %in% c("1/2/2007","2/2/2007"),]
#Plot1
par(mar = c(4.2, 4.2, 1, 1))
hist(d$Global_active_power, col= "red", xlab= "Global Active Power (kilowatts)",
ylab= "Frequency", main= "Global Active Power", breaks= 20)
dev.copy(png, file = "plot1.png", width=480, height=480)
dev.off() |
36efe811ed887211dd95074df3d00d48efa71b5b | 658650485ab0ef0cefb6ab0e2812cb76addb477f | /R/plugins.r | cd6ea8721c55a5ad502b6757cc831fbcaa983542 | [
"MIT"
] | permissive | jhollist/spocc | bb6fcaae82d6bc1ac8f28477f1e1c88e2fc98f95 | 9d6dc976129ddb63608a493a6fb9a8039bddfab6 | refs/heads/master | 2021-01-17T18:05:22.342694 | 2015-03-23T18:43:04 | 2015-03-23T18:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,273 | r | plugins.r | # Plugins for the occ function for each data source
#' @noRd
foo_gbif <- function(sources, query, limit, geometry, callopts, opts) {
if (any(grepl("gbif", sources))) {
if(!is.null(query)){
if(class(query) %in% c("ids","gbifid")){
if(class(query) %in% "ids"){
query_use <- opts$taxonKey <- query$gbif
} else {
query_use <- opts$taxonKey <- query
}
} else {
query_use <- query
if(is.null(query_use)){
warning(sprintf("No GBIF result found for %s", query))
} else {
opts$scientificName <- query_use
}
}
} else {
query_use <- NULL
}
if(is.null(query_use) && is.null(geometry)){ emptylist(opts) } else {
time <- now()
if(!'limit' %in% names(opts)) opts$limit <- limit
opts$fields <- 'all'
if(!is.null(geometry)){
opts$geometry <- if(grepl('POLYGON', paste(as.character(geometry), collapse=" "))){
geometry } else { bbox2wkt(bbox=geometry) }
}
opts$callopts <- callopts
out <- do.call(occ_search, opts)
if(class(out) == "character") { emptylist(opts) } else {
if(class(out$data) == "character"){ emptylist(opts) } else {
dat <- out$data
dat$prov <- rep("gbif", nrow(dat))
dat$name <- as.character(dat$name)
cols <- c('name','decimalLongitude','decimalLatitude','issues','prov')
cols <- cols[ cols %in% sort(names(dat)) ]
dat <- move_cols(x=dat, y=cols)
dat <- stand_latlon(dat)
list(time = time, found = out$meta$count, data = dat, opts = opts)
}
}
}
} else { emptylist(opts) }
}
move_cols <- function(x, y)
x[ c(y, names(x)[-sapply(y, function(z) grep(paste0('\\b', z, '\\b'), names(x)))]) ]
emptylist <- function(opts) list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
stand_latlon <- function(x){
lngs <- c('decimalLongitude','Longitude','lng','longitude','decimal_longitude')
lats <- c('decimalLatitude','Latitude','lat','latitude','decimal_latitude')
names(x)[ names(x) %in% lngs ] <- 'longitude'
names(x)[ names(x) %in% lats ] <- 'latitude'
x
}
#' @noRd
foo_ecoengine <- function(sources, query, limit, geometry, callopts, opts) {
if (any(grepl("ecoengine", sources))) {
opts <- limit_alias(opts, "ecoengine")
time <- now()
opts$scientific_name <- query
opts$georeferenced <- TRUE
if(!'page_size' %in% names(opts)) opts$page_size <- limit
if(!is.null(geometry)){
opts$bbox <- if(grepl('POLYGON', paste(as.character(geometry), collapse=" "))){
wkt2bbox(geometry) } else { geometry }
}
# This could hang things if request is super large. Will deal with this issue
# when it arises in a usecase
# For now default behavior is to retrive one page.
# page = "all" will retrieve all pages.
if (is.null(opts$page)) {
opts$page <- 1
}
opts$quiet <- TRUE
opts$progress <- FALSE
opts$foptions <- callopts
out_ee <- do.call(ee_observations, opts)
if(out_ee$results == 0){
warning(sprintf("No records found in Ecoengine for %s", query))
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
} else{
out <- out_ee$data
fac_tors <- sapply(out, is.factor)
out[fac_tors] <- lapply(out[fac_tors], as.character)
out$prov <- rep("ecoengine", nrow(out))
names(out)[names(out) == 'scientific_name'] <- "name"
list(time = time, found = out_ee$results, data = out, opts = opts)
}
} else {
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
}
}
#' @noRd
foo_antweb <- function(sources, query, limit, geometry, callopts, opts) {
if (any(grepl("antweb", sources))) {
time <- now()
# limit <- NULL
geometry <- NULL
query <- sub("^ +", "", query)
query <- sub(" +$", "", query)
if(length(strsplit(query, " ")[[1]]) == 2) {
opts$scientific_name <- query
} else {
opts$genus <- query
opts$scientific_name <- NULL
}
if(!'limit' %in% names(opts)) opts$limit <- limit
opts$georeferenced <- TRUE
out <- do.call(aw_data, opts)
if(is.null(out)){
warning(sprintf("No records found in AntWeb for %s", query))
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
} else{
res <- out$data
res$prov <- rep("antweb", nrow(res))
res$name <- query
res <- stand_latlon(res)
list(time = time, found = out$count, data = res, opts = opts)
}
} else {
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
}
}
#' @noRd
foo_bison <- function(sources, query, limit, geometry, callopts, opts) {
if(any(grepl("bison", sources))) {
opts <- limit_alias(opts, "bison")
if(class(query) %in% c("ids","tsn")){
if(class(query) %in% "ids"){
opts$tsn <- query$itis
} else
{
opts$tsn <- query
}
} else
{ opts$species <- query }
time <- now()
if(!'count' %in% names(opts)) opts$count <- limit
opts$config <- callopts
# opts$what <- 'points'
if(!is.null(geometry)){
opts$aoi <- if(grepl('POLYGON', paste(as.character(geometry), collapse=" "))){
geometry } else { bbox2wkt(bbox=geometry) }
}
out <- do.call(bison, opts)
if(is.null(out$points)){
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
} else{
dat <- out$points
dat$prov <- rep("bison", nrow(dat))
dat <- stand_latlon(dat)
list(time = time, found = out$summary$total, data = dat, opts = opts)
}
} else {
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
}
}
#' @noRd
foo_inat <- function(sources, query, limit, geometry, callopts, opts) {
if (any(grepl("inat", sources))) {
opts <- limit_alias(opts, "inat")
time <- now()
opts$query <- query
if(!'maxresults' %in% names(opts)) opts$maxresults <- limit
opts$meta <- TRUE
if(!is.null(geometry)){
opts$bounds <- if(grepl('POLYGON', paste(as.character(geometry), collapse=" ")))
{
# flip lat and long spots in the bounds vector for inat
temp <- wkt2bbox(geometry)
c(temp[2], temp[1], temp[4], temp[3])
} else { c(geometry[2], geometry[1], geometry[4], geometry[3]) }
}
out <- do.call(spocc_inat_obs, opts)
if(!is.data.frame(out$data)){
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
} else{
res <- out$data
res$prov <- rep("inat", nrow(res))
names(res)[names(res) == 'Scientific.name'] <- "name"
res <- stand_latlon(res)
list(time = time, found = out$meta$found, data = res, opts = opts)
}
} else {
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
}
}
#' @noRd
foo_ebird <- function(sources, query, limit, callopts, opts) {
if (any(grepl("ebird", sources))) {
opts <- limit_alias(opts, "ebird")
time <- now()
if (is.null(opts$method))
opts$method <- "ebirdregion"
if (!opts$method %in% c("ebirdregion", "ebirdgeo"))
stop("ebird method must be one of ebirdregion or ebirdgeo")
opts$species <- query
if(!'max' %in% names(opts)) opts$max <- limit
opts$config <- callopts
if (opts$method == "ebirdregion") {
if (is.null(opts$region)) opts$region <- "US"
out <- do.call(ebirdregion, opts[!names(opts) %in% "method"])
} else {
out <- do.call(ebirdgeo, opts[!names(opts) %in% "method"])
}
if(!is.data.frame(out)){
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
} else{
out$prov <- rep("ebird", nrow(out))
names(out)[names(out) == 'sciName'] <- "name"
out <- stand_latlon(out)
list(time = time, found = NULL, data = out, opts = opts)
}
} else {
list(time = NULL, found = NULL, data = data.frame(NULL), opts = opts)
}
}
limit_alias <- function(x, sources){
if(length(x) != 0){
lim_name <- switch(sources, ecoengine="page_size", bison="count", inat="maxresults", ebird="max")
if("limit" %in% names(x)){
names(x)[ which(names(x) == "limit") ] <- lim_name
x
} else { x }
} else { x }
}
|
aca06588ed4641f2bc05db6868c19e00e9339f49 | 66d8313b2687a7de88bdb8a98833e74857db05ac | /02_R Programming/hw2/others.R | a54f79b7cac460edbfc069f4e20706d7cdc0a666 | [] | no_license | Elmasri-Fathallah/CourseraDataScience | 863bbef53e754c0a6d1ee0be699f81b593197190 | 459255f456df003719db8d639c7aac1b16a9b240 | refs/heads/master | 2021-05-29T15:55:58.795267 | 2015-11-04T01:13:50 | 2015-11-04T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,223 | r | others.R | #Programming Assignment 2
# makeCacheMatrix create an object (list) that holds 4 element.
# Namely, data of matrix (x) and its inverse matrix (inv),
# and access function to each of these data (get, set, setinv, getinv).
# m <- matrix(1:4, nrow=2, ncol=2)
# CM <- makeCacheMatrix(m)
makeCacheMatrix <- function(x = matrix()) {
#Object to contain inverse of matrix x
inv <- NULL
#set value of matrix x
set <- function(y) {
x <<- y
inv <<- NULL
}
#get value of matrix x
get <- function() x
#set the value of inverse of x
setinv <- function(inverse) inv <<- inverse
#get the value of inverse of x
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# cacheSolve receives an object that created by makeCacheMatrix(),
# and calculate inverse matrix by using the data that stored in that object,
# and stored the result in that object as cache.
# cacheSolve(CM)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data<-x$get
inv<-solve(data, ...)
x$setinv(inv)
return(inv)
} |
242ba85b181ddfaf0ad87ad26e6a69c754f769a7 | 7bdf0282862891db96a1a2784581c508178ee10f | /Simple_Dendritic_function_parameter.R | 9c1fb1de131f62b11a82c045d6da2eee97ee3681 | [] | no_license | M-Atsuhiko/Simple | ff4cd29ac3ec32e6646e9beb8fb4217449556487 | b9f88b3a6f86fbbfb9dd43f7e24ef556560a0fbd | refs/heads/master | 2020-03-30T21:15:57.283272 | 2015-01-13T02:36:08 | 2015-01-13T02:36:08 | 28,582,197 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,850 | r | Simple_Dendritic_function_parameter.R | source("./Simulation_Parameters.R") #パラメータの初期化
cat("@@@@@ Determine @@@@@\n")
Param_Labels <- Simple_Param_Labels
MAX_PARAMS <- length(Param_Labels)
############### 関数include ###############
#[Param_init.R] : paramの初期値を返す関数
source("./Simple_Param_init.R")
#[set_coordinate.R] : Branchの空間座標点を設定する関数
source("./set_coordinate.R")
#[set_synapse.R] : Dendrite上にsynapseを設定する関数
source("./set_synapse.R")
#[display_morphology.R] : 完成したDendriteを3次元空間状に描画する関数
source("./display_conductance_on_morphology.R")
#[display_synapse] : synapseを3次元空間状に描画する関数
source("./display_synapse.R")
#[display_morphology] : morphologyを3次元空間状に描画する関数
source("./display_morphology.R")
#[display_synaptic_zone] : synaptic_zoneを3次元空間状に描画する関数
source("./display_synaptic_zone.R")
#[NEURON_syntax.R] : Hocファイル生成時に用いる関数群
source("./NEURON_functions.R")
#[make_NEURON_morpho_conductance.R] : コンダクタンスを持った形態データの出力をする関数
source("./make_NEURON_morpho_conductance_data.R")
#[make_NEURON_synapse_data.R] : 作成したsynapseをhocファイルの形式で出力する
source("./make_NEURON_synapse_data.R")
#[data_check.R] : 作成したdendriteデータにエラー(Noの設定が正しいか、synapseの位置が正しいかなど)をチェックする関数
source("./data_check.R")
#[liner_estimation.R] : シミュレーション結果の評価
source("./liner_estimate.R")
#[Result_Estimate.R] : シミュレーション結果の評価
source("./Result_Estimate.R")
##[liner_estimation.R] : シミュレーション結果の評価
#source("./liner_estimate.R")
#[return_result.R] : シミュレーション結果の評価
source("./return_result.R")
#[penalty.R] : うまく形態生成ができなかった場合のpenaltyを与える関数
source("./penalty.R")
#[Simple_Morpho_penalty.R] : うまく形態生成ができなかった場合のpenaltyを与える関数
source("./Simple_Morpho_penalty.R")
#[Simple_evolution.R] : 評価値から次の世代を生成する関数
source("./Simple_evolution.R")
#[calc_Conductance_ratio.R] : コンダクタンスの含有率を計算するプログラム
source("./calc_Conductance_ratio.R")
#[calc_Conductance_amount.R] : TREEのコンダクタンスの絶対量と最大値を計算する関数
source("./calc_Conductance_amount.R")
#[output_results.R] : 結果をファイルに出力する関数
source("./output_results.R")
#[make_simul_parameter] : シミュレーションで用いるパラメータのファイルを作成する関数
source("./make_simul_parameter.R")
#[parallel_task] : シミュレーションを行うための関数
source("./Simple_parallel_task.R")
#[sum_length] : 木の大きさ(Dendriteの長さの総和)を返す関数
source("./sum_length.R")
#[calc_Vollume.R] : TREEの体積を計算する関数
source("./calc_volume.R")
#[TREE_modify.R] : TREEにコンダクタンスと太さを分布させる
source("./TREE_modify.R")
#[TREE_clean.R] : TREEのプロパティを初期化する関数
source("./TREE_clean.R")
#[Simple_CanSimulation.R] : Simple TREEの全てのBranchの太さがFOURCE MIN DIAMを超えているかどうかを判定する関数
source("./Simple_CanSimulation.R")
#[set_Upper_or_Lower_or_Other.R] : 上下の枝の名前付けを行う関数
source("set_Upper_or_Lower_or_Other.R")
#サンプルTREEの作成
source("liner_TREE.R") #一直線のTREEを作成する
source("branched_TREE.R") #分岐したTREEを作成する
sample_TREE <- liner_TREE
#sample_TREE <- branched_TREE
TREE <- sample_TREE()
N_Segs <- length(TREE[[1]])
make_NEURON_synapse_data(TREE,SIMPLE_SYNAPSE_FILE_NAME)
##主要な関数をコンパイルしておく
Param_init <- cmpfun(Param_init)
set_coordinate <- cmpfun(set_coordinate)
set_synapse <- cmpfun(set_synapse)
make_NEURON_morpho_conductance_data <- cmpfun(make_NEURON_morpho_conductance_data)
make_NEURON_synapse_data <- cmpfun(make_NEURON_synapse_data)
data_check <- cmpfun(data_check)
estimate <- cmpfun(estimate)
Morpho_penalty <- cmpfun(Simple_Morpho_penalty)
penalty <- cmpfun(penalty)
calc_Conductance_ratio <- cmpfun(calc_Conductance_ratio)
calc_Conductance_amount <- cmpfun(calc_Conductance_amount)
Simple_evolution <- cmpfun(Simple_evolution)
parallel_task <- cmpfun(parallel_task)
sum_length <- cmpfun(sum_length)
TREE_modify <- cmpfun(TREE_modify)
TREE_clean <- cmpfun(TREE_clean)
Simple_CanSimulation <- cmpfun(Simple_CanSimulation)
### 結果格納ディレクトリ ###
RESULT_DATA_DIR <- paste("./",paste(include_conductances,collapse="_"),"_","Result/",sep="")
RESULT_GRAPH_DIR <- paste("./",paste(include_conductances,collapse="_"),"_","Result/Graphs/",sep="")
extra_prefix <- ""
if(!(is.na(args_extra_prefix))) extra_prefix <- paste("_",args_extra_prefix,sep="")
#出力物につけるPrefix
Prefix <- paste("SEED",RAND_SEED,"_","dt",DELTA_T,"_",paste(include_conductances,collapse="_"),"_",paste("FR",Function_ratio,sep=""),extra_prefix,"_",sep="") #出力物につける識別子
### テストのための変更
if(THIS_IS_TEST){
Prefix <- paste("test_",Prefix,sep="")
MAX_GENERATION <- 10 # GAのMAX世代数
ES_RAMDA <- 5 # 次世代生成に用いる優秀な個体の数
N_INDIVIDUAL <- 20 # 1世代の個体数
SELECT_PROB <- set_select_prob(ES_RAMDA,1)
}
###\テストのための変更
### 形態情報TREEの保存先、ファイル名 ###
BEST_Datas_FILE <- paste(RESULT_DATA_DIR,Prefix,"Best_Datas.xdr",sep="")
### 世代パラメータGENETRATIONの保存先、ファイル名prefix ###
#SAVE_GENERATION <- paste(RESULT_DIR,Prefix,"GENERATION",sep="")
### 最終的な出力物 ###
ESTIMATE_GRAPH <- paste(RESULT_GRAPH_DIR,Prefix,"estimate_graph.eps",sep="") # 世代毎の最優秀評価値と平均値をまとめたグラフ
PERFORMANCE_GRAPH <- paste(RESULT_GRAPH_DIR,Prefix,"performance_graph.eps",sep="") # 世代毎にどの評価関数で評価されたかをまとめたグラフ
GENERATION_ESTIMATE_FILE <- paste(RESULT_DATA_DIR,Prefix,"MAX_MEAN_SE.xdr",sep="")
LAST_GENERATION_FILE <- paste(RESULT_DATA_DIR,Prefix,"LAST_GENERATION.xdr",sep="")
|
cd7d33068a6f1b076c993a362e8e6633a663cd03 | 8d119d59dedf7994c9b14a637fc069d3a3e0494c | /RPackageSource/man/readCOMETSinput.Rd | 7f3e7105d614a92f408ef03a193710e38db0729c | [] | no_license | CBIIT/R-cometsAnalytics | 3f77bf818050eefbcef4e5a74a5cdab0e17157dc | bc0da393319a7fc2ec53275f9545d24b982faabc | refs/heads/master | 2023-08-24T04:34:16.888409 | 2023-08-04T20:28:26 | 2023-08-04T20:28:26 | 64,139,217 | 8 | 9 | null | 2023-09-13T18:43:04 | 2016-07-25T14:02:36 | HTML | UTF-8 | R | false | true | 4,650 | rd | readCOMETSinput.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01_readCOMETSinput.R
\name{readCOMETSinput}
\alias{readCOMETSinput}
\title{Read in Excel file that contains metabolite data, covariate data,
models, and model options.}
\usage{
readCOMETSinput(file)
}
\arguments{
\item{file}{path of Excel file to be read in. This file must contain sheets
with names \bold{Metabolites}, \bold{SubjectMetabolites}, \bold{SubjectData}, \bold{VarMap},
and optionally \bold{Models}, \bold{Model_Types} (see details).}
}
\value{
a list comprising of data and information needed for \code{\link{getModelData}}.
}
\description{
Read in Excel file that contains metabolite data, covariate data,
models, and model options.
}
\details{
Additional information regarding each sheet in the input Excel file is given below.
\bold{Metabolites} \cr
A table with the columns \code{METABID}, \code{METABOLITE_NAME}, and possibly other columns \cr
of information about the metabolites. The \code{METABID} column is used
for harmonizing the metabolite names across different cohorts when meta-analyses are performed. \cr
\bold{SubjectMetabolites} \cr
A table with the subject ids in the first column and metabolites as the other columns. \cr
\bold{SubjectData} \cr
A table with the subject ids in the first column and covariates as the other columns. \cr
\bold{VarMap} \cr
A table with at least the required columns \code{VARREFERENCE}, \code{COHORTVARIABLE},
\code{VARTYPE}, and \code{VARDEFINITION}. The \code{COHORTVARIABLE} column must contain names that match the
column names in the \bold{SubjectData} table. These names will be renamed to their
corresponding name in the \code{VARREFERENCE} column. The \code{VARTYPE} column
should have values \code{continuous} or \code{categorical} for each row. \cr
\bold{Models} \cr
A table where each row represents a model to be run, and with columns \code{MODEL},
\code{OUTCOMES}, \code{EXPOSURE}, \code{ADJUSTMENT},
\code{STRATIFICATION}, \code{WHERE}, and optionally \code{MODEL_TYPE}, \code{TIME}, \code{GROUP}. All variable names in this
table must match variable names in the \code{VARREFERENCE} column of the \bold{VarMap} sheet.
The \code{MODEL} column is a label for the model. The \code{OUTCOMES} and \code{EXPOSURE} columns define the
outcome and exposure variables for the model. Use \code{All metabolites} to specify
that all metabolite variables are to be included as outcomes or exposures, otherwise
use a space separated list of variable names. The \code{ADJUSTMENT} column contains a
space separated list of covariate adjustment variables; use an empty cell for no covariate adjustment.
The \code{STRATIFICATION} column is used for stratified analyses, with a space separated list
of stratification variables. If more than one stratification variable is specified, then the strata
are defined by all unique combinations of the stratification variables that appear in the data.
The \code{WHERE} column is used to define a subset of subjects to include in the analysis,
and has the form \code{variable operator value}, where \code{operator} can be one of the
following \code{>, <, >=, <= !=, =}.
An example \code{WHERE} condition is \code{age > 50}, which will include all subjects older
than 50 in the analysis. Multiple \code{WHERE} conditions must be separated by a comma and are
logically connected with the \code{&} operator.
For example, the \code{WHERE} condition \code{age > 50 , bmi >= 22} will include the subjects older than 50 AND with
bmi >= 22. Values in the \code{MODEL_TYPE} column must match with the \code{MODEL_TYPE} column
in the \bold{Model_Types} sheet. The \code{TIME} column is only required when survival models are run.
This column can contain a single time variable or two time variables separated by a space.
The \code{GROUP} column is only required when conditional logistic regression models are run.
This column can contain only a single variable that defines the groups (sets, strata).
This sheet is not required when running in interactive mode, but is required when
running in batch mode. \cr
\bold{Model_Types} \cr
A table where each row specifies an option and has columns \code{MODEL_TYPE}, \code{FUNCTION},
\code{OPTION}, and \code{VALUE}. For an example sheet and additional information about this
sheet, see the Excel file \code{/extdata/cometsInput.xlsx}.
This sheet is optional, but is required when the \bold{Models} sheet contains the
column \code{MODEL_TYPE}.
}
\examples{
dir <- system.file("extdata", package="RcometsAnalytics", mustWork=TRUE)
csvfile <- file.path(dir, "cometsInputAge.xlsx")
exmetabdata <- readCOMETSinput(csvfile)
}
|
4c70563b4072716d625389cfae511aaa54eac3b4 | 7294f6555183646fc7ae97dce153b99a129bf528 | /R/cph_age_pop1853_raw.R | c30b8a054e0acf7b243c634c215decf5ec8864f6 | [] | no_license | matthew-phelps/CholeraDataDK | 5a0190ce61251d530c41da5dea25435b6c4423df | cc37128c055b50cf268915621afdbfff24abd614 | refs/heads/master | 2020-05-21T12:28:50.478209 | 2017-04-06T10:14:23 | 2017-04-06T10:14:23 | 52,596,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 676 | r | cph_age_pop1853_raw.R | #' Copenhagen population by age group 1853
#'
#' A dataset of the population, by gender, in each age group. Data is imputed
#' from the 1850 & 1855 census. The age groupings used are the same groups used
#' in both censuses
#'
#'
#' @format A data frame with 23 rows and 4 variables:
#' \describe{
#' \item{age_range}{The age range}
#' \item{men1853}{Numeric vector of the number of men in the age group}
#' \item{women1853}{Numeric vector of the number of women in the age group}
#' \item{total1853}{Numeric vector of the total number of men & women in the
#' age group}
#' }
#' @source {Copenhagen census 1850 & 1853. Data entered by ___}
"cph_age_pop1853_raw"
|
87f315adae8affb1d52a851d1754ee8f62d87503 | cdb91c6806f0a81706182a9d7d2e2dc4184c56f5 | /languages/R/methods/time_series/forecast package example.R | 4c346d905f30f91237bf8150a1931e5bab613ee4 | [] | no_license | euriion/code_snippets | 86c95f4dba71f1c1d8f0c22456f3ae21486f89ad | 3af3d2df034a1a7d6b5a5363acebc93d580362d8 | refs/heads/master | 2016-09-06T08:21:11.833257 | 2015-01-14T06:41:47 | 2015-01-14T06:41:47 | 17,372,215 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,820 | r | forecast package example.R | # accuracy example and sim plotting
time.data <- EuStockMarkets[1:200, 1] # non-stationary data
model.rwf <- rwf(time.data, h=200) # Random Walk Model fittig and forecasting
model.arima <- forecast(auto.arima(time.data), h=200) # ARIMA Model fitting and forecasting
model.rwf.index <- accuracy(model.rwf)
model.arima.index <- accuracy(model.arima)
rbind(model.rwf.index, model.arima.index) # Model evaluation
# acf, pacf example
# line in graph: Confidence Interval(CI)
# calculation(upper: 2/sqrt(n), lower: -2/sqrt(n))
time.data.acf <- acf(time.data) # exponential decay: non-stationary
time.data.pacf <- pacf(time.data)
# difference (defalut order = 1)
diff.time.data <- diff(time.data) # stationary data
diff.time.data.acf <- acf(diff.time.data)
diff.time.data.pacf <- pacf(diff.time.data)
# arima, auto.arima example
model.arima <- arima(time.data, order = c(0,1,2)) # arime(0,1,2) model fitting
model.auto.arima <- auto.arima(time.data)
plot(forecast(model.arima), h=20)
plot(forecast(model.auto.arima), h=20)
# ets example
model.ets <- ets(time.data) # Exponentail Smoothing Model fitting
plot(forecast(model.ets, h=200)) # plotting
lines(EuStockMarkets[1:400,1])
# rwf example
model.rwf <- rwf(time.data,h=200) # Randow Walk model fitting
plot(model.rwf) # plotting
lines(EuStockMarkets[1:400,1])
# ma example
model.ma <- ma(time.data, order = 5) # Moving Average model fitting
model.ma2 <- ma(time.data, order = 20)
model.ma3 <- ma(time.data, order = 50)
plot(time.data,type="l", lty=1, col="black", main = "Moving Average model (order = 5, 20, 50)") # plotting
lines(model.ma, lty=2, col="red")
lines(model.ma2, lty=3, col="blue")
lines(model.ma3, lty=4, col="green")
legend(145,1600,c("time.data","order=5","order=20","order=50"),lty=1:4,col=c("black","red","blue","green"))
#tslm example
tslm.data <- ts(rnorm(120,0,3) + 1:120 + 20*sin(2*pi*(1:120)/12), frequency=12) # data setting
model.tslm <- tslm(tslm.data ~ trend + season) # Linear model fitting (trend, season)
tslm.fitdata <- forecast(model.tslm) # store the fitting value
plot(tslm.fitdata) # plotting
model.trend <- tslm(tslm.data ~ trend) # trend fitting
del.trend.data <- tslm.data - model.trend$fitted # delete trend (only season)
plot(tslm.fitdata) # plotting data
lines(model.trand$fitted.values, lty=2, col="red") # plotting trend
lines(del.trend.data, lty=3, col="blue") #plotting season
# forecast example
# store the fitting value: ARIMA, Moving Average, Exponential Smoothing...
model.arima <- auto.arima(time.data) # fitting ARIMA Model
model.arima.fit <- forecast(model.arima, h = 100) # store the fitting value(n=100) of ARIMA Model
plot(model.arima.fit) # plotting
# ndiffs, nsdiffs, diff example
# non-stationary data -> stationary data: difference
# number of difference? => ndiffs, nsdiffs
# calculating difference => diff
nonst.data <- time.data # non-stationary data
ndiffs(nonst.data) # calculating difference
st.data <- diff(nonst.data, lag = 1) # stationary data
plot(nonst.data, type="l", main = "Non-stationary")
plot(st.data, type ="l", main = "stationary")
# BoxCox, BoxCox.lambda example
# Transmation method
data <- lynx # non normal distribution
plot(data, type ="l")
hist(data) # Histogram
qqnorm(data) # Normal Probability plot
lambda <- BoxCox.lambda(data) # calculating lambda
trans.data <- BoxCox(data, lambda) # transformation to normal distribution
plot(trans.data, type ="l")
hist(trans.data)
qqnorm(trans.data)
acf(trans.data)
pacf(trans.data)
model.fit1 <- arima(trans.data, order = c(2,0,0)) # ARIMA(0,0,2) Model fitting
model.fit2 <- auto.arima(trans.data) # ARIMA Model fitting (Auto)
plot(forecast(model.fit1, h=10))
plot(forecast(model.fit2, h=10))
rbind(accuracy(model.fit1),accuracy(model.fit2)) # Model evaluation
|
630e3811bc8756a0c8efa4f1b6f0f9c4c148ed21 | 4bc054c915eab17ec3a10d44f80ab3d44dfbaf2c | /ipms/R/07_Preliminary_Plots.R | f2d58bb6ca4023d6639c776a449023215b8c1f10 | [] | no_license | levisc8/carpobrotus_IPMs | c12e3804fbcd2dbae914ffa568f343c5b87c9743 | 138a135c2ee716bb84a45520413c84046b5107bd | refs/heads/master | 2022-12-21T08:23:17.767986 | 2022-12-13T14:45:02 | 2022-12-13T14:45:02 | 131,842,866 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,852 | r | 07_Preliminary_Plots.R |
all_ramets <- readRDS("ipms/Data/all_ramets_di.rds") %>%
mutate(
native = case_when(
site %in% c("Melkboss", "Vogelgat",
"St_Francis", "Struisbaai",
"Springfontein", "Rooisand") ~ 1,
TRUE ~ 0
)
)
surv_mods <- readRDS("ipms/Model_Fits/ramet_survival_list_krig.rds")
flow_mods <- readRDS("ipms/Model_Fits/ramet_flower_n_list_krig.rds")
use_surv_mod <- surv_mods[[1]][["times_sw3_seas"]]
use_grow_mod <- readRDS("ipms/Model_Fits/grow_2_1_lin_gam_mix.rds")
use_repr_mod <- readRDS("ipms/Model_Fits/repro_lin_gam_mix.rds")
use_flow_mod <- flow_mods[[1]][["times_sw2_ann"]]
surv_pred <- fitted(use_surv_mod)
grow_pred <- fitted(use_grow_mod)
repr_pred <- fitted(use_repr_mod)
flow_pred <- fitted(use_flow_mod)
surv_data <- cbind(use_surv_mod$data,
surv_pred = surv_pred[ ,1])
grow_data <- cbind(use_grow_mod$data,
grow_pred = grow_pred[ ,1])
repr_data <- cbind(use_repr_mod$data,
repr_pred = repr_pred[ ,1])
flow_data <- cbind(use_flow_mod$data,
flow_pred = flow_pred[ ,1])
# loop over populations so that each one gets its own pdf
for(i in seq_along(unique(all_ramets$site))) {
pop <- unique(all_ramets$site)[i]
surv_temp <- filter(surv_data, site == pop)
grow_temp <- filter(grow_data, site == pop)
repr_temp <- filter(repr_data, site == pop)
flow_temp <- filter(flow_data, site == pop)
surv_plt <- ggplot(surv_temp) +
geom_line(aes(x = log_size, y = surv_pred),
color = "red", size = 1.5) +
geom_jitter(aes(x = log_size, y = alive), alpha = 0.5,
height = 0.05, width = 0) +
theme_bw() +
ylab("Survival (t + 1)") +
xlab("Size (t)") +
ggtitle(pop)
grow_plt <- ggplot(grow_temp) +
geom_line(aes(x = log_size, y = grow_pred),
color = "red", size = 1.5) +
geom_abline(intercept = 0, slope = 1, color = "grey50") +
geom_point(aes(x = log_size, y = log_size_next), alpha = 0.5) +
theme_bw() +
ylab("Size (t + 1)") +
xlab("Size (t)")
repr_plt <- ggplot(repr_temp) +
geom_line(aes(x = log_size, y = repr_pred),
color = "red", size = 1.5) +
geom_jitter(aes(x = log_size, y = repro),
alpha = 0.5, height = 0.05, width = 0) +
theme_bw() +
ylab("Pr(Reproductive) (t)") +
xlab("Size (t)") +
ggtitle(pop)
flow_plt <- ggplot(flow_temp) +
geom_line(aes(x = log_size, y = flow_pred),
color = "red", size = 1.5) +
geom_point(aes(x = log_size, y = flower_n),
alpha = 0.5) +
theme_bw() +
ylab("# of Flowers (t)") +
xlab("Size (t)")
pdf(glue('ipms/Figures/vr_models/gam/{pop}_p_kern_preliminary_plots.pdf'),
width = 8,
height = 8)
grid.arrange(surv_plt, grow_plt, nrow = 2, ncol = 1)
dev.off()
pdf(glue('ipms/Figures/vr_models/gam/{pop}_f_kern_preliminary_plots.pdf'),
width = 8,
height = 8)
grid.arrange(repr_plt, flow_plt, nrow = 2, ncol = 1)
dev.off()
}
use_sites <- c("Havatselet", "Foxton", "Rooisand", "Colares", "Rarangi")
use_surv <- filter(surv_data, site %in% use_sites)
use_grow <- filter(grow_data, site %in% use_sites)
use_repr <- filter(repr_data, site %in% use_sites)
use_flow <- filter(flow_data, site %in% use_sites)
surv_plt <- ggplot(use_surv) +
geom_line(aes(x = log_size, y = surv_pred, color = site),
size = 1.5, alpha = 0.7) +
theme_bw() +
ylab("Survival (t + 1") +
xlab("Size (t)")
grow_plt <- ggplot(use_grow) +
geom_line(aes(x = log_size, y = grow_pred, color = site),
size = 1.5, alpha = 0.7) +
theme_bw() +
ylab("Size (t + 1") +
xlab("Size (t)")
repr_plt <- ggplot(use_repr) +
geom_line(aes(x = log_size, y = repr_pred, color = site),
size = 1.5, alpha = 0.7) +
theme_bw() +
ylab("Pr(Reproductive) (t + 1") +
xlab("Size (t)")
flow_plt <- ggplot(use_flow) +
geom_line(aes(x = log_size, y = flow_pred, color = site),
size = 1.5, alpha = 0.7) +
theme_bw() +
ylab("Pr(Reproductive) (t + 1") +
xlab("Size (t)")
pdf(glue('ipms/Figures/vr_models/gam/all_pops_f_kern_preliminary_plots.pdf'),
width = 8,
height = 8)
grid.arrange(repr_plt, flow_plt, nrow = 2, ncol = 1)
dev.off()
pdf(glue('ipms/Figures/vr_models/gam/all_pops_p_kern_preliminary_plots.pdf'),
width = 8,
height = 8)
grid.arrange(surv_plt, grow_plt, nrow = 2, ncol = 1)
dev.off()
# FInally, create pp_check plots for everything for appendix of publication.
# Survival
pdf("ipms/Figures/vr_models/best_mods/survival.pdf")
pp_check(use_surv_mod,
type = 'bars_grouped',
group = 'site',
freq = FALSE,
ndraws = 100L)
dev.off()
# 2_1_test is best for growth is best overall. Create pp_check for it!
pdf("ipms/Figures/vr_models/best_mods/growth.pdf")
pp_check(use_grow_mod,
type = 'scatter_avg_grouped',
group = 'site',
ndraws = 100L) +
geom_abline(slope = 1, intercept = 0)
plot(conditional_smooths(use_grow_mod,
surface = TRUE),
stype = 'raster',
theme = theme_bw(),
ask = FALSE)
dev.off()
# Reproduction
pdf("ipms/Figures/vr_models/best_mods/reproduction.pdf")
pp_check(use_repr_mod,
type = 'bars_grouped',
group = 'site',
freq = FALSE,
ndraws = 100L)
plot(conditional_smooths(use_repr_mod,
surface = TRUE),
stype = 'raster',
theme = theme_bw(),
ask = FALSE)
dev.off()
# Flower Number
pdf("ipms/Figures/vr_models/best_mods/flower_n.pdf")
pp_check(use_flow_mod,
type = 'scatter_avg_grouped',
group = 'site',
ndraws = 100L) +
geom_abline(slope = 1, intercept = 0)
dev.off()
|
a1bb6c11e4d3583434b598c41480e9123a9db116 | c82b300e42469a9d407488831e80215e8b7f27bc | /run_analysis.R | 39f7bcc021857a78ced956e5ddb7088cbc8f05e2 | [] | no_license | kzcabana/Mod3_Getting-and-Cleaning-Data-Course-Project | ebc4af7ee5d0a5c65892ab4120a4bc82dc3385da | 93211daeedc53b87beaeacf566989a0ed8caad36 | refs/heads/master | 2021-09-05T00:56:33.031399 | 2018-01-23T06:45:32 | 2018-01-23T06:45:32 | 118,548,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,555 | r | run_analysis.R | if (!file.exists('./Module3Project')){
dir.create("./Module3Project")
}
setwd("~/Module3Project")
##Downloading File
fileurl = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if (!file.exists("./UCI HAR Dataset")){
download.file(fileurl,'UCI HAR Dataset.zip', mode = 'wb')
unzip("UCI HAR Dataset.zip", exdir = getwd())
}
##Read and Convert Data
library(data.table)
setwd("~/Module3Project/UCI HAR Dataset")
features <- read.csv('features.txt', header = FALSE, sep = ' ')
features <- as.character(features[,2])
train.X <- read.table('./train/X_train.txt')
train.Y <- read.csv('./train/y_train.txt', header = FALSE, sep = ' ')
trainSubject <- read.csv('./train/subject_train.txt',header = FALSE, sep = ' ')
Train<- data.frame(trainSubject, train.Y, train.X)
names(Train) <- c(c('subject', 'activity'), features)
test.X <- read.table('./test/X_test.txt')
test.Y <- read.csv('./test/y_test.txt', header = FALSE, sep = ' ')
testSubject <- read.csv('./test/subject_test.txt', header = FALSE, sep = ' ')
Test <- data.frame(testSubject, test.Y, test.X)
names(Test) <- c(c('subject', 'activity'), features)
## 1--Merges the Training and Testing Sets into 1 data set
allData <- rbind(Train, Test)
## 2--Extracts only the measurements on the mean and standard deviation for each measurement
mean_std <- grep('mean|std', features)
sub <- allData[,c(1,2,mean_std + 2)]
## 3--Uses descriptive activity names to name the activities in the data set
activityLabels <- read.table('activity_labels.txt', header = FALSE)
activityLabels <- as.character(activityLabels[,2])
sub$activity <- activityLabels[sub$activity]
## 4--Appropriately labels the data set with descriptive variable names.
nameNew <- names(sub)
nameNew <- gsub("[(][)]", "", nameNew)
nameNew <- gsub("^t", "TimeDomain", nameNew)
nameNew <- gsub("^f", "FrequencyDomain", nameNew)
nameNew <- gsub("Acc", "Accelerometer", nameNew)
nameNew <- gsub("Gyro", "Gyroscope", nameNew)
nameNew <- gsub("Mag", "Magnitude", nameNew)
nameNew <- gsub("-mean-", " Mean ", nameNew)
nameNew <- gsub("-std-", " StandardDeviation ", nameNew)
nameNew <- gsub("-", "_", nameNew)
names(sub) <- nameNew
## 5--Create a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- aggregate(sub[,3:81], by = list(activity = sub$activity, subject = sub$subject),FUN = mean)
write.table(x = tidyData, file = "tidydata.txt", row.names = FALSE)
|
f4d5433641d7b7ab72b8c3dbccfbb4909ebcb2dc | 13e362022442df7851bf9fd75da910250e9685b5 | /plot1.R | f724b6be800fe0755bd3dfb58fc05eefc9f320e4 | [] | no_license | cwest93/ExData_Plotting1 | 46b9376eb6c649a8d96a25a776ccafb6af45e73a | 9e2d69b86e227c2a0f5e8907923999a92d894857 | refs/heads/master | 2021-01-11T06:27:21.816632 | 2016-10-24T05:26:01 | 2016-10-24T05:26:01 | 71,740,972 | 0 | 0 | null | 2016-10-24T01:25:50 | 2016-10-24T01:25:50 | null | UTF-8 | R | false | false | 907 | r | plot1.R | #Create column labels and load data
lbl = c(Voltage="numeric", Global_active_power="numeric",Global_intensity="numeric",Sub_metering_1="numeric",Sub_metering_2="numeric",Sub_metering_3="numeric",Global_active_power="numeric",Global_reactive_power="numeric")
data = read.table("household_power_consumption.txt", header=TRUE, sep=";",dec=".", stringsAsFactors=FALSE, na.strings = "?",colClasses=lbl)
#Specify range of data
actData = data[data$Date %in% c("1/2/2007", "2/2/2007"),]
#Change data to date format
as.Date(actData$Date)
#Remove null values
actData = na.omit(actData)
#Create Histogram
hist(actData$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power")
#Create PNG File
png("plot1.png")
hist(actData$Global_active_power, col="red",xlab="Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power")
dev.off() |
cc8e1b27fae3a7ef8e3e8c3a26103d209f49715c | 76f7afb7dbabfaf53ff7f31caa7f419813cba303 | /server.R | a0a99add3272b438e046688c037639e699352d8e | [] | no_license | YasukazuIKAMI/blendnormal | 91fa5e00765e6e8549d023c99619c9ef02d30e51 | 77e4140800b6f6d14ec27e5b6f37619a194644de | refs/heads/master | 2020-05-20T09:40:11.980312 | 2015-06-13T12:51:21 | 2015-06-13T12:51:21 | 37,371,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | server.R | library(ggplot2)
library(reshape)
set.seed(132)
shinyServer(
function(input, output) {
output$newHist <- renderPlot({
nsamp <- input$nsamp
mu1 <- input$mu1
mu2 <- input$mu2
sd1 <- input$sd1
sd2 <- input$sd2
input1 <- rnorm(nsamp,mean=mu1,sd=sd1)
input2 <- rnorm(nsamp,mean=mu2,sd=sd2)
average <- (input1+input2)/2
df <- melt(data.frame(input1,input2,average))
g <- ggplot(df,aes(x=value,fill=variable))
g <- g + geom_histogram(alpha=0.5,position="identity")
plot(g)
output$mu3 <- renderPrint({round(mean(average),2)})
output$sd3 <- renderPrint({round(sd(average),2)})
})
}
) |
776b77ff00f92fbaa01b2474bb60e24410efb3c2 | 874d5ee990bb371ba15c4c376af1dba17f7a5f38 | /R/roccer-doctype.r | c42f8e0e6df3e916c3c65a965fe018622b54be70 | [] | no_license | vspinu/roxygen3 | 37a0a3e7cbec26dc2d490e89a4a8a5bc6e55971c | f7ddf377cc6cfa1f7947b80bcd8394745854fb3b | refs/heads/master | 2021-01-18T07:43:59.068801 | 2012-08-28T19:21:50 | 2012-08-28T19:21:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,414 | r | roccer-doctype.r | #' Set object documentation type.
#'
#' @details You can use any doctype, but it will only be included in
#' the Rd file if it is one of the standard R doctypes: data, package,
#' methods and class.
#'
#' @usage
#' @@docType date
#' @@docType package
#' @@docType custom doctype
#'
#' @Section Extension
#' To automatically add new docTypes with their own defaults, implement
#' a method of \code{\link{doctype}} that returns a string, and function
#' \code{doctype_[doctype]} which works in the same way as a \code{one}
#' parser supplied to \code{\link{roc_parsers}}.
roc_doctype <- roccer("docType",
roc_parser(
words_tag(1, 1),
one = function(roc, obj, ...) {
doctype <- roc$docType %||% doctype(obj$value)
if (is.null(doctype)) return()
fname <- str_c("doctype_", doctype)
default <- find_function(fname)
if (is.null(default)) {
message("No default doctype function ", str_c("doctype_", doctype),
" found.")
return()
}
default(roc, obj, ...)
}
),
rd_out(rd_command("docType"))
)
#' @export
format.docType_command <- function(x, ...) {
vals <- unique(x$value)
if (length(vals) != 1) stop("Documentation can only have single docType")
ok <- c("data", "package", "methods", "class")
vals <- intersect(vals, ok)
if (length(vals) == 0) return("")
str_c("\\docType{", x$value, "}")
}
|
0f6b62e1d024e93de9de11d1b2c4e56bb1271e73 | 5429fc2b9df1cae03e6c5b1d803d45735693e197 | /R/region.R | b1498e5e12a78180ed96f3ce0923b91caf414853 | [] | no_license | drninjamommy/c3 | b641916b4a1364570de70f98d3f6ff6600b29ff2 | cc0f0154cfb90443baadf88cc37961d1219c1d1c | refs/heads/master | 2021-01-24T03:57:52.834702 | 2017-11-13T00:05:42 | 2017-11-13T00:05:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 930 | r | region.R |
#' Modify region elements on both x and y axis
#'
#' This is an S3 method.
#' @family region
#' @export
region <- function(x, ...){
UseMethod('region')
}
#' Title
#' @description Regions are defined in multiple axis by passing a single `data.frame`
#' @param c3
#' @param regions data.frame with columns:
#' \itemize{
#' \item{axis}{: character one of 'x', 'y', 'y2'}
#' \item{start}{: numeric but must match defined axis type}
#' \item{end}{: numeric but must match defined axis type}
#' \item{class}{: character css class}
#' }
#' @family c3
#' @family region
#' @return c3
#' @export
#'
#' @examples
#' \dontrun{
#' iris %>%
#' c3(x='Sepal_Length', y='Sepal_Width', group = 'Species') %>%
#' c3_scatter() %>%
#' region(data.frame(axis = 'x',
#' start = 5,
#' end = 6))
#' }
#'
region.c3 <- function(c3, regions){
#stopifnot()
c3$x$regions <- regions
return(c3)
}
|
093f7b353dd0c8a0a06ca56fc8bcd0c38feacd18 | 1830f11907bfdbc97172d36bba89e09a345a456c | /Calc_SeasonalAve.R | b365e57d558457f1f1f5568fc7bc7ad2871b80f6 | [] | no_license | jnephin/salishsea-nemo | 44c4cb74badc29e85c57790df95c4dd75c42d5e2 | 7c8d1099fcd6bc2649d13b12e2bf065bbe5fcb29 | refs/heads/master | 2020-12-09T01:35:31.936322 | 2019-05-23T00:25:33 | 2019-05-23T00:25:33 | 233,154,610 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,434 | r | Calc_SeasonalAve.R | # open daily .RData files, calculate tidal and circulation speed and compute seasonal averages for all variables
# packages
require(abind)
# working directory
setwd('..')
# empty list for results
s.list <- list()
#---------------------------------------------------------------------------------#
# Calculate seasonal tidal and circulation current speed
# load u and v data
u.list <- list.files( path = "Data/Output/zbottom/u", pattern = "*.RData$", full.names = T )
v.list <- list.files( path = "Data/Output/zbottom/v", pattern = "*.RData$", full.names = T )
# get months
mnths <- sub(sub("_.*","",u.list)[1], "", u.list)
mnths <- substr(mnths, 5,6)
m.list <- list()
# for each month
for( m in unique(mnths) ){
# load each file into a list
u.dat <- lapply( u.list[mnths == m], function(x) get(load(x)) )
v.dat <- lapply( v.list[mnths == m], function(x) get(load(x)) )
# Get monthly array
u.array <- abind( u.dat, along=3 )
v.array <- abind( v.dat, along=3 )
# Calculate monthly mean tidal current using root mean square (rms)
uv <- u.array^2+v.array^2
m.list[["mean.tidal"]][[m]] <- apply( uv, c(1,2), function(x) sqrt(mean(x)) )
# Calculate monthly mean circulation current
mean.u <- apply( u.array, c(1,2), mean )
mean.v <- apply( v.array, c(1,2), mean )
# Calculate speed from mean monthly u and v
m.list[["mean.circ"]][[m]] <- sqrt( mean.u^2 + mean.v^2 )
}
# convert months to seasons (sum or win)
seasons <- rep("win", length(names(m.list[["mean.tidal"]])))
seasons[names(m.list[["mean.tidal"]]) %in% c("04","05","06","07","08","09")] <- "sum"
# seasonal means from monthly means
for( s in unique(seasons) ){
# Get monthly indices for s
ind <- which(seasons == s)
# Get seasonal array
tidal.array <- abind( m.list[["mean.tidal"]][ind], along=3 )
circ.array <- abind( m.list[["mean.circ"]][ind], along=3 )
# Calculate seasonal mean values
s.list[["tidal"]][[s]] <- apply( tidal.array, c(1,2), mean )
s.list[["circ"]][[s]] <- apply( circ.array, c(1,2), mean )
}
#---------------------------------------------------------------------------------#
# for all other variables
vars <- c("salt", "temp", "nitrogen", "diatoms")
# loop through vars
for( var in vars ){
# load data
var.list <- list.files( path = paste0("Data/Output/zbottom/", var), pattern = "*.RData$", full.names = T )
# get months
mnths <- sub(sub("_.*","",var.list)[1], "", var.list)
mnths <- substr(mnths, 5,6)
m.list <- list()
# monthly means of hourly values
for( m in unique(mnths) ){
# load each file into a list
var.dat <- lapply( var.list[mnths == m], function(x) get(load(x)) )
# Get monthly array
var.array <- abind( var.dat, along=3 )
# Calculate monthly mean values
m.list[[m]] <- apply( var.array, c(1,2), mean, na.rm=T )
}
# convert months to seasons (sum or win)
seasons <- rep("win", length(names(m.list)))
seasons[names(m.list) %in% c("04","05","06","07","08","09")] <- "sum"
# seasonal means from monthly means
for( s in unique(seasons) ){
# Get monthly indices for s
ind <- which(seasons == s)
# Get seasonal array
var.array <- abind( m.list[ind], along=3 )
# Calculate seasonal mean values
s.list[[var]][[s]] <- apply( var.array, c(1,2), mean )
}
}
# Export
save( s.list, file = "Data/Output/ave/seasonal.means.RData" )
|
80891027bbb45b447a3e43fbf9c03e0e2e6f8b8a | da91a4ab25ce5ce1bdcc217ec2e981bb402b1820 | /man/SRE-package.Rd | df14ff46117b77fba0a3c49f808d0a911918701e | [] | no_license | iamamutt/SRE | ba8ece85659d43943ee273d967be87a7dd11811f | f5277ea874006ab32dd3e8e5583e7449eeafe13d | refs/heads/master | 2020-03-15T19:08:10.767882 | 2018-12-14T20:10:15 | 2018-12-14T20:10:15 | 132,301,079 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 525 | rd | SRE-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SRE-package.R
\docType{package}
\name{SRE-package}
\alias{SRE-package}
\title{Self-Recognition Experiment (SRE)}
\description{
The \code{SRE} package (Self-recognition Experiment) provides
supplementary analyses and data for the paper
\emph{The Impact of Autistic Traits on Self-Recognition of Body Movements}.
}
\details{
See \code{help(package = "SRE")} for a list of functions.
View the vignette with \code{browseVignettes(package ="SRE")}.
}
|
9697ac0b3e691216508c682ca263e75233743031 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/longpower/examples/power.mmrm.Rd.R | a78a7bb10e0bb9580fd9047ac0d8864f59952983 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,118 | r | power.mmrm.Rd.R | library(longpower)
### Name: power.mmrm
### Title: Linear mixed model sample size calculations.
### Aliases: power.mmrm
### Keywords: effects mixed power random sample size
### ** Examples
# reproduce Table 1 from Lu, Luo, & Chen (2008)
phi1 <- c(rep(1, 6), 2, 2)
phi2 <- c(1, 1, rep(2, 6))
lambda <- c(1, 2, sqrt(1/2), 1/2, 1, 2, 1, 2)
ztest <- ttest1 <- c()
for(i in 1:8){
Na <- (phi1[i] + lambda[i] * phi2[i])*(qnorm(0.05/2) + qnorm(1-0.90))^2*(0.5^-2)
Nb <- Na/lambda[i]
ztest <- c(ztest, Na + Nb)
v <- Na + Nb - 2
Na <- (phi1[i] + lambda[i] * phi2[i])*(qt(0.05/2, df = v) + qt(1-0.90, df = v))^2*(0.5^-2)
Nb <- Na/lambda[i]
ttest1 <- c(ttest1, Na + Nb)
}
data.frame(phi1, phi2, lambda, ztest, ttest1)
Ra <- matrix(0.25, nrow = 4, ncol = 4)
diag(Ra) <- 1
ra <- c(1, 0.90, 0.80, 0.70)
sigmaa <- 1
power.mmrm(Ra = Ra, ra = ra, sigmaa = sigmaa, delta = 0.5, power = 0.80)
power.mmrm(N = 174, Ra = Ra, ra = ra, sigmaa = sigmaa, delta = 0.5)
power.mmrm(N = 174, Ra = Ra, ra = ra, sigmaa = sigmaa, power = 0.80)
power.mmrm(Ra = Ra, ra = ra, sigmaa = sigmaa, delta = 0.5, power = 0.80, lambda = 2)
power.mmrm(N = 174, Ra = Ra, ra = ra, sigmaa = sigmaa, delta = 0.5, lambda = 2)
power.mmrm(N = 174, Ra = Ra, ra = ra, sigmaa = sigmaa, power = 0.80, lambda = 2)
# Extracting paramaters from gls objects with general correlation
# Create time index:
Orthodont$t.index <- as.numeric(factor(Orthodont$age, levels = c(8, 10, 12, 14)))
with(Orthodont, table(t.index, age))
fmOrth.corSym <- gls( distance ~ Sex * I(age - 11),
Orthodont,
correlation = corSymm(form = ~ t.index | Subject),
weights = varIdent(form = ~ 1 | age) )
summary(fmOrth.corSym)$tTable
C <- corMatrix(fmOrth.corSym$modelStruct$corStruct)[[1]]
sigmaa <- fmOrth.corSym$sigma *
coef(fmOrth.corSym$modelStruct$varStruct, unconstrained = FALSE)['14']
ra <- seq(1,0.80,length=nrow(C))
power.mmrm(N=100, Ra = C, ra = ra, sigmaa = sigmaa, power = 0.80)
# Extracting paramaters from gls objects with compound symmetric correlation
fmOrth.corCompSymm <- gls( distance ~ Sex * I(age - 11),
Orthodont,
correlation = corCompSymm(form = ~ t.index | Subject),
weights = varIdent(form = ~ 1 | age) )
summary(fmOrth.corCompSymm)$tTable
C <- corMatrix(fmOrth.corCompSymm$modelStruct$corStruct)[[1]]
sigmaa <- fmOrth.corCompSymm$sigma *
coef(fmOrth.corCompSymm$modelStruct$varStruct, unconstrained = FALSE)['14']
ra <- seq(1,0.80,length=nrow(C))
power.mmrm(N=100, Ra = C, ra = ra, sigmaa = sigmaa, power = 0.80)
# Extracting paramaters from gls objects with AR1 correlation
fmOrth.corAR1 <- gls( distance ~ Sex * I(age - 11),
Orthodont,
correlation = corAR1(form = ~ t.index | Subject),
weights = varIdent(form = ~ 1 | age) )
summary(fmOrth.corAR1)$tTable
C <- corMatrix(fmOrth.corAR1$modelStruct$corStruct)[[1]]
sigmaa <- fmOrth.corAR1$sigma *
coef(fmOrth.corAR1$modelStruct$varStruct, unconstrained = FALSE)['14']
ra <- seq(1,0.80,length=nrow(C))
power.mmrm(N=100, Ra = C, ra = ra, sigmaa = sigmaa, power = 0.80)
power.mmrm.ar1(N=100, rho = C[1,2], ra = ra, sigmaa = sigmaa, power = 0.80)
|
be65009e83496ac062985848b20cc04e556780d7 | de719cc6cc53213b4276f031c76389f2d5b2aa6e | /Build Variables-3.0.R | 6876b223f68f13a8bea6c092620fb45997d312b4 | [] | no_license | Libardo1/DSO562-FraudAnalytics | 0524b70a498439e3e7ea7a066ae674a0e0465052 | 15f2cd9d98f400c8ee9c0d4136f564b83215f20e | refs/heads/master | 2020-03-17T04:40:47.324882 | 2017-05-23T02:06:33 | 2017-05-23T02:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,627 | r | Build Variables-3.0.R | load("ny_cleaned.rda")
library(ggplot2)
library(dplyr)
# Change lot area to numeric
ny_cleaned$LOT_AREA = as.double(ny_cleaned$LOT_AREA)
# creating temporary variable...preserving original dataset
ny_temp = ny_cleaned
str(ny_temp)
library(dataQualityR)
num.file <- paste(getwd(), "/dq_num.csv", sep= "")
cat.file <- paste(getwd(), "/dq_cat.csv", sep= "")
checkDataQuality(data= ny_cleaned, out.file.num= num.file, out.file.cat= cat.file)
num.file
cat.file
### HYGIENCE CHECKS
### change numerators containing value 1 to value 0
ny_temp = ny_cleaned
ny_temp$FULLVAL = ifelse(ny_temp$FULLVAL == 0,1, ny_temp$FULLVAL)
summary(ny_temp$FULLVAL)
ny_temp$AVLAND = ifelse(ny_temp$AVLAND == 0,1, ny_temp$AVLAND)
summary(ny_temp$AVLAND)
ny_temp$AVTOT = ifelse(ny_temp$AVTOT == 0,1, ny_temp$AVTOT)
summary(ny_temp$AVTOT)
ny_temp$EXLAND = ifelse(ny_temp$EXLAND == 0,1, ny_temp$EXLAND)
summary(ny_temp$EXLAND)
ny_temp$EXTOT = ifelse(ny_temp$EXTOT == 0,1, ny_temp$EXTOT)
summary(ny_temp$EXTOT)
ny_temp$BLD_VOLUME = ifelse(ny_temp$BLD_VOLUME == 0,1, ny_temp$BLD_VOLUME)
summary(ny_temp$BLD_VOLUME)
ny_temp$LOT_AREA = ifelse(ny_temp$LOT_AREA == 0,1, ny_temp$LOT_AREA)
summary(ny_temp$LOT_AREA)
ny_cleaned = ny_temp
### Creating new variables
### if the denominator is 0, then denominator <- 1
### FULLVAL/AVTOT
ny_cleaned$FV_AT = ifelse(ny_cleaned$AVTOT == 0,
ny_cleaned$FULLVAL/1,
ny_cleaned$FULLVAL/ny_cleaned$AVTOT)
sum(is.nan(ny_cleaned$FV_AT)) # All values are valid
summary(ny_cleaned$FV_AT)
ny_cleaned$FV_AT = ifelse(ny_cleaned$FV_AT == 0,1, ny_cleaned$FV_AT)
summary(ny_cleaned$FV_AT)
### AVTOT/EXTOT
summary(ny_cleaned$EXTOT)
ny_cleaned$AT_ET = ifelse(ny_cleaned$EXTOT == 0,
ny_cleaned$AVTOT/1,
ny_cleaned$AVTOT/ny_cleaned$EXTOT)
sum(is.nan(ny_cleaned$AT_ET)) # All values are valid
summary(ny_cleaned$AT_ET)
ny_cleaned$AT_ET = ifelse(ny_cleaned$AT_ET == 0,1, ny_cleaned$AT_ET)
summary(ny_cleaned$AT_ET)
### FULLVAL/EXTOT
ny_cleaned$FV_ET = ifelse(ny_cleaned$EXTOT == 0,
ny_cleaned$FULLVAL/1,
ny_cleaned$FULLVAL/ny_cleaned$EXTOT)
sum(is.nan(ny_cleaned$FV_ET)) # All values are valid
ny_cleaned$FV_ET = ifelse(ny_cleaned$FV_ET == 0,1, ny_cleaned$FV_ET)
summary(ny_cleaned$FV_ET)
### AVLAND/EXLAND
ny_cleaned$AL_EL = ifelse(ny_cleaned$EXLAND == 0,
ny_cleaned$AVLAND/1,
ny_cleaned$AVLAND/ny_cleaned$EXLAND)
sum(is.nan(ny_cleaned$AL_EL)) # All values are valid
ny_cleaned$AL_EL = ifelse(ny_cleaned$AL_EL == 0,1, ny_cleaned$AL_EL)
summary(ny_cleaned$AL_EL)
ny_temp = ny_cleaned
### Change the order
ny_temp = ny_temp[ , names(ny_temp)[c(1,7:13,2:6,14:17)]]
ny_cleaned = ny_temp
### 9 numerators & 7 denominators
numerator = c(names(ny_cleaned)[-1:-8]) # doing -1 for removing RECORD column
str(numerator)
denominator = c(names(ny_cleaned)[2:8])
str(denominator)
## HYGIENE CHECK .. no zeros/ no NaN's anywhere
j=1
names(ny_cleaned)
for (j in seq(names(ny_cleaned))){
print(colnames(ny_cleaned[j]))
print(sum(ny_cleaned[j] == 0))
print(sum(is.nan(ny_cleaned[,j])))
}
# CREATE VARIABLES FOR PCA
i=1
j=1
for (i in seq(numerator)) {
for(j in seq(denominator)){
name = paste0(numerator[i],"_",denominator[j])
average = paste0("avg_",denominator[j])
# CREATES average variables
mutate_call = lazyeval::interp(~median(a), a = as.name(numerator[i]))
ny_cleaned= ny_cleaned %>%
group_by_(.dots = denominator[j]) %>%
mutate_(.dots = setNames(list(mutate_call), average))
#CREATES MAIN variables
mutate_call = lazyeval::interp(~a/b, a = as.name(numerator[i]), b = as.name(average))
ny_cleaned = ny_cleaned %>% mutate_(.dots = setNames(list(mutate_call), name))
}
}
## V.V. IMP
ny_cleaned = as.data.frame(ny_cleaned)
## THE FINAL HYGIENE CHECK
j=1
names(ny_cleaned)
for (j in seq(names(ny_cleaned))){
print(colnames(ny_cleaned[j]))
print(sum(ny_cleaned[j] == 0))
print(sum(is.nan(ny_cleaned[,j])))
}
ny_cleaned$avg_BORO = NULL
ny_cleaned$avg_EASEMENT = NULL
ny_cleaned$avg_BLDGCL = NULL
ny_cleaned$avg_TAXCLASS = NULL
ny_cleaned$avg_ZIP = NULL
ny_cleaned$avg_LOT_AREA = NULL
ny_cleaned$avg_BLD_VOLUME = NULL
drop = c("AVLAND_BLD_VOLUME", "EXLAND_BLD_VOLUME")
a = ny_cleaned
a = a[ , !(names(a) %in% drop)]
ny_cleaned = a
rm(a)
rm(drop)
ny_subset_100000 = ny_cleaned[1:100000,]
save(ny_cleaned, file = "ny_ready.rda")
save(ny_subset_100000, file = "ny_ready_100000.rda")
|
990ab4c524c6eef0c94e80f8a0e1feea9308e51c | 60b9ed5bf4ca20f0617db179ffc87c91cdb1d7bb | /tests/testthat/test_rotateLwls2dV2.R | 174187a51a5971df337604d29fd081b558b9080f | [
"BSD-3-Clause"
] | permissive | Chunjui/tPACE | 1cc5f2ef6404c3f8b49d092b7c30a1a3ee10d66e | b6bc27682b4d883940db5ce8cd7bc0e3e978a998 | refs/heads/master | 2021-01-22T20:59:44.168431 | 2015-07-15T23:03:18 | 2015-07-15T23:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | r | test_rotateLwls2dV2.R | devtools::load_all()
set.seed(1)
n <- 100
pts <- seq(0, 1, by=0.05)
outPts <- seq(0, 1, by=0.1)
samp3 <- wiener(n, pts) + rnorm(n * length(pts), sd=0.5)
samp3 <- sparsify(samp3, pts, 5:10)
rcov3 <- GetRawCov(samp3$yList, samp3$tList, pts, rep(0, length(pts)), 'Sparse', error=TRUE)
brcov3 <- BinRawCov(rcov3)
gcv3b <- gcvlwls2dV2(pts, outPts, kern='epan', rcov=brcov3, t=samp3$tList)
test_that('RotateLwls2dV2.R interface is correct', {
expect_equal(Rrotatedmullwlsk(c(gcv3b$h, gcv3b$h) , 'epan', t(brcov3$tPairs), brcov3$meanVals, brcov3$count, rbind(outPts, outPts), npoly=1, bwCheck=FALSE), rotateLwls2dV2( gcv3b$h, 'epan', xin=brcov3$tPairs, yin=brcov3$meanVals, win=brcov3$count, xout=cbind(outPts, outPts)))
})
|
8992550f5f696bab7057cf4a51ecfe089f14f707 | c08e6b516a3d341d1fdb893448922082dc3626cf | /R/dimsum__cutadapt_report.R | 3814d63a75fcf3f51617facb1a9ed9d6fe66c17d | [
"MIT"
] | permissive | lehner-lab/DiMSum | eda57459bbb450ae52f15adc95d088747d010251 | ca1e50449f1d39712e350f38836dc3598ce8e712 | refs/heads/master | 2023-08-10T17:20:39.324026 | 2023-07-20T15:29:47 | 2023-07-20T15:29:47 | 58,115,412 | 18 | 5 | null | null | null | null | UTF-8 | R | false | false | 5,698 | r | dimsum__cutadapt_report.R |
#' dimsum__cutadapt_report
#'
#' Generate cutadapt summary plots for all fastq files.
#'
#' @param dimsum_meta an experiment metadata object (required)
#' @param report_outpath cutadapt report output path (required)
#'
#' @return an updated experiment metadata object
#' @export
dimsum__cutadapt_report <- function(
dimsum_meta,
report_outpath
){
#Create report directory (if doesn't already exist)
report_outpath <- gsub("/$", "", report_outpath)
suppressWarnings(dir.create(report_outpath))
#Input files
cutadapt_files <- file.path(dimsum_meta[['exp_design']][,'pair_directory'], paste0(dimsum_meta[['exp_design']][,'pair1'], '.stdout'))
#Check if all input files exist
dimsum__check_files_exist(
required_files = cutadapt_files,
execute = TRUE,
exit = FALSE)
#Get cutadapt results for all read pairs
cutadapt_read1_list <- list()
cutadapt_read2_list <- list()
total_reads_list <- list()
for(i in 1:length(cutadapt_files)){
if(dimsum_meta[["paired"]]){
trim_list <- dimsum__parse_cutadapt_output(
file_path = cutadapt_files[i],
ran_cutadapt = dimsum_meta[['exp_design']][i,'run_cutadapt'],
ran_cutadapt_cutonly = dimsum_meta[['exp_design']][i,'run_cutadapt_cutonly'])
total_reads_list[[i]] <- trim_list[['total_reads']]
cutadapt_read1_list[[trim_list[['name_read1']]]] <- c(
trim_list[['total_read1_a5']]-trim_list[['total_read1_both']],
trim_list[['total_read1_a3']]-trim_list[['total_read1_both']],
trim_list[['total_read1_both']],
trim_list[['total_reads']])
cutadapt_read2_list[[trim_list[['name_read2']]]] <- c(
trim_list[['total_read2_a5']]-trim_list[['total_read2_both']],
trim_list[['total_read2_a3']]-trim_list[['total_read2_both']],
trim_list[['total_read2_both']],
trim_list[['total_reads']])
}else{
trim_list <- dimsum__parse_cutadapt_output_single_end(
file_path = cutadapt_files[i],
ran_cutadapt = dimsum_meta[['exp_design']][i,'run_cutadapt'],
ran_cutadapt_cutonly = dimsum_meta[['exp_design']][i,'run_cutadapt_cutonly'])
total_reads_list[[i]] <- trim_list[['total_reads']]
cutadapt_read1_list[[trim_list[['name_read1']]]] <- c(
trim_list[['total_read1_a5']]-trim_list[['total_read1_both']],
trim_list[['total_read1_a3']]-trim_list[['total_read1_both']],
trim_list[['total_read1_both']],
trim_list[['total_reads']])
}
}
#First read
cutadapt_read1_df <- as.data.frame(do.call('rbind', cutadapt_read1_list))
colnames(cutadapt_read1_df) <- c('five_prime', 'three_prime', 'both', 'total_reads')
cutadapt_read1_df[,'fastq'] <- rownames(cutadapt_read1_df)
cutadapt_read1_df_collapse <- plyr::ddply(cutadapt_read1_df, "fastq", plyr::summarise,
five_prime = sum(five_prime),
three_prime = sum(three_prime),
both = sum(both),
total_reads = sum(total_reads))
cutadapt_read1_df_collapse_perc <- cutadapt_read1_df_collapse
cutadapt_read1_df_collapse_perc[,2:4] <- as.data.frame(t(scale(t(cutadapt_read1_df_collapse_perc[,2:4]), center = F, scale = cutadapt_read1_df_collapse_perc[,'total_reads'])))*100
cutadapt_read1_df_collapse_perc <- cutadapt_read1_df_collapse_perc[,1:4]
#Plot
plot_df <- reshape2::melt(cutadapt_read1_df_collapse_perc, id="fastq")
plot_df[,'Region_trimmed'] <- factor(plot_df[,'variable'])
d <- ggplot2::ggplot(plot_df, ggplot2::aes(fastq, value)) +
ggplot2::geom_col(ggplot2::aes(fill = Region_trimmed)) +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1)) +
ggplot2::labs(x = "FASTQ files", y = "Reads trimmed (percentage)")#, title = paste0("Read 1 percentage constant region identified and trimmed"))
dimsum__save_png(file.path(report_outpath, paste0('dimsum__cutadapt_report_pair1.png')), d, width=12, height=8)
#Second read (if paired design)
if(dimsum_meta[["paired"]]){
cutadapt_read2_df <- as.data.frame(do.call('rbind', cutadapt_read2_list))
colnames(cutadapt_read2_df) <- c('five_prime', 'three_prime', 'both', 'total_reads')
cutadapt_read2_df[,'fastq'] <- rownames(cutadapt_read2_df)
cutadapt_read2_df_collapse <- plyr::ddply(cutadapt_read2_df, "fastq", plyr::summarise,
five_prime = sum(five_prime),
three_prime = sum(three_prime),
both = sum(both),
total_reads = sum(total_reads))
cutadapt_read2_df_collapse_perc <- cutadapt_read2_df_collapse
cutadapt_read2_df_collapse_perc[,2:4] <- as.data.frame(t(scale(t(cutadapt_read2_df_collapse_perc[,2:4]), center = F, scale = cutadapt_read2_df_collapse_perc[,'total_reads'])))*100
cutadapt_read2_df_collapse_perc <- cutadapt_read2_df_collapse_perc[,1:4]
#Plot
plot_df <- reshape2::melt(cutadapt_read2_df_collapse_perc, id="fastq")
plot_df[,'Region_trimmed'] <- factor(plot_df[,'variable'])
d <- ggplot2::ggplot(plot_df, ggplot2::aes(fastq, value)) +
ggplot2::geom_col(ggplot2::aes(fill = Region_trimmed)) +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1)) +
ggplot2::labs(x = "FASTQ files", y = "Reads trimmed (percentage)")#, title = paste0("Read 2 percentage constant region identified and trimmed"))
dimsum__save_png(file.path(report_outpath, paste0('dimsum__cutadapt_report_pair2.png')), d, width=12, height=8)
}
#Render report
dimsum__render_report(dimsum_meta = dimsum_meta)
#New experiment metadata
dimsum_meta_new <- dimsum_meta
#Update fastq metadata
dimsum_meta_new[['exp_design']][,'total_read_pairs'] <- as.numeric(unlist(total_reads_list))
return(dimsum_meta_new)
}
|
b9e2d043ca46784c83a7b8e999e54f061760f02b | 172288efc3ea342e191e503f372a6b5b8b5e7465 | /man/knnDataTransformation.Rd | 96d621bf2818d9c5d6cf3a625ace3c8a1252c7bb | [] | no_license | guypwhunt/r_shiny_geo2r_visulisation_package | 062d14e0fd9500bd3133a45828f56f9db498500c | afa27c0a97f8ab9488005160981d61c0bfb76128 | refs/heads/main | 2023-04-09T17:03:27.130019 | 2021-04-07T14:58:47 | 2021-04-07T14:58:47 | 355,532,616 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 693 | rd | knnDataTransformation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataTransformationFunctions.R
\name{knnDataTransformation}
\alias{knnDataTransformation}
\title{A Function to Perform KNN Impute on an Expression Object}
\usage{
knnDataTransformation(ex, knnTransformation)
}
\arguments{
\item{ex}{A GEO expression object which can be obtained from the extractExpressionData() function}
\item{knnTransformation}{Whether to apply KNN impute. This can be "Yes" or "No"}
}
\description{
A function to impute missing expression data, using nearest neighbor averaging.
}
\examples{
knnDataInput <- knnDataTransformation(dataInput, knnTransformation)
}
\author{
Guy Hunt
}
\keyword{GEO}
|
0fe160ea4c388afb9e686798bbe0abd3ce4120b9 | 78012657605edcf98a4aa6e6539bc2701ac933e8 | /R/h.pr.u.R | a6b574d654f182f2618adb69c932af7c2e15663a | [] | no_license | cran/BHTSpack | adc5eb08ac8867b864fd87fce84d7c0b1db75397 | ab23e62dba2f10f7f2667517d45b558605fb9ac1 | refs/heads/master | 2021-06-03T13:07:14.966652 | 2021-02-08T15:30:13 | 2021-02-08T15:30:13 | 136,819,663 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 202 | r | h.pr.u.R | h.pr.u = function(z, ih, mu, sigma, pk, K, H, n){
M = length(n)
out <- .Call("stick_multnorm_h", z, ih-1, pk, sigma, mu, n, H, PACKAGE="BHTSpack")
return(matrix(out, H*M, K, byrow=TRUE))
}
|
f8e60fab214066187809e9546b9513a7dbff3e0d | 2ee74ce83d5a80e2b83c29593b21f90b7bcf8440 | /Ch. 13 String Manipulation.R | 1dcbc3177aa55f8d37c82dd0f3a40f747dc77497 | [] | no_license | petershahlevlnow/LearnRproj | 8dbb5cd4a9f38fa65b8cf4b38e6929e34747f382 | 105baa6919edd30437a8645562bdb03f1aef0229 | refs/heads/master | 2021-01-19T04:29:56.344858 | 2016-06-30T17:18:01 | 2016-06-30T17:18:01 | 50,056,972 | 0 | 0 | null | 2016-06-30T16:40:25 | 2016-01-20T20:08:11 | R | UTF-8 | R | false | false | 4,978 | r | Ch. 13 String Manipulation.R | # Ch 13 manipulating strings
# 13.1 paste
# notice spaces automatically added between strings with no third arg defined
paste("hello", "Jared", "and others")
paste("hello", "Jared", "and others", sep = "/")
# can aslo paste vectors, paired
paste(c("hello", "jared", "winters"), c("meep", "zort", "narf"))
paste("hello", c("jared", "winters", "meep"))
paste("hello", c("jared", "winters", "meep"), "goodbye")
# use paste to collapse a vector
vectorOftext <- c("hello", "everyone", "out there", ".")
paste(vectorOftext, collapse = " ")
paste(vectorOftext, collapse = "*")
#13.2 sprintf
person <- "Jared"
partysize <- "eight"
waitTime <- 25
paste("Hello ", person, ", your party of ", partysize, " will be seated in ", waitTime, " minutes.",sep = "")
sprintf("Hello %s, your party of %s will be seated in %s minutes.", person, partysize, waitTime)
#sprintf can also be vectorized, but must be multiples of each other
sprintf("Hello %s, your party of %s will be seated in %s minutes.", c("Jared", "Bob"), c("eight", 16, "four", 10),
waitTime)
#13.3 Extracting Text
require(XML)
load("data/presidents.rdata")
theURL <- "http://www.loc.gov/rr/print/list/057_chron.html"
# use readHMTLTable to parse the url html text
presidents1 <- readHTMLTable(theURL, which =3, as.data.frame = TRUE, skip.rows = 1, header = TRUE,
stringAsFactor = FALSE)
head(presidents1)
tail(presidents1$YEAR)
#Only include first 64 rows because bottom of table is shit
presidents1 <- presidents1[1:64,]
# Create a seperate column for year start and end in pres table
# Use str split to split on hyphen and embed into a list
require(stringr)
yearList <- str_split(string = presidents1$YEAR, pattern = "-")
head(yearList)
#combine the list into one matrix
yearMatrix <- data.frame(Reduce(rbind, yearList))
names(yearMatrix) <- c("Start", "Stop")
#cbind year Matrix into pres table and convert into numeric (must convert to characterfirst)
presidents1 <- cbind(presidents1, yearMatrix)
head(presidents1)
presidents1$Start = as.numeric(as.character(presidents1$Start))
presidents1$Stop = as.numeric(as.character(presidents1$Stop))
tail(presidents1)
#finding a subset of strings str_sub
str_sub(string = presidents1$PRESIDENT, start = 1, end = 3)
str_sub(string = presidents1$PRESIDENT, start = 4, end = 8)
#find presidents that were elected in a year ending in 0, which means they start in a year ending in 1
presidents1[str_sub(string = presidents1$Start, start = 4, end = 4) == 1,
c("YEAR", "PRESIDENT", "Start", "Stop")]
#13.4 Regular Expressions
# find a president with the name "John", use str_detect
JohnPos <- str_detect(string = presidents1$PRESIDENT, pattern = "John")
presidents1[JohnPos, c("YEAR", "PRESIDENT", "Start", "Stop")]
badsearch <- str_detect(presidents1$PRESIDENT, "john")
goodsearch <- str_detect(presidents1$PRESIDENT, ignore.case("JOHn"))
sum(badsearch)
sum(goodsearch)
#load war times from url
con <- url("http://www.jaredlander.com/data/warTimes.rdata")
load(con)
close(con)
head(warTimes, 10)
#create a column for the start of the war
warTimes[str_detect(string = warTimes, pattern = "-")]
theTimes <- str_split(string = warTimes, pattern = "(ACAEA)|-", n=2)
head(theTimes)
#check for hyphened seperator
which(str_detect(string = warTimes, pattern = "-"))
theTimes[[147]]
theTimes[[150]]
#extract the start date only
theStart <- sapply(theTimes, FUN = function(x) x[1])
head(theStart)
#trim white space
theStart <- str_trim(theStart)
#pull out january anywhere it's found otherwise NA
str_extract(string = theStart, pattern = "January")
#pull just where january is found
theStart[str_detect(string = theStart, pattern = "January")]
#search for dates with years by 4 consecutive numbers
head(str_extract(string = theStart, "[0-9][0-9][0-9][0-9]"), 20)
head(str_extract(string = theStart, "[0-9]{4}"), 20) #{4} number of occurences of 0-9
head(str_extract(string = theStart, "\\d{4}"), 20) #use \\d for digit
str_extract(string= theStart, "\\d{1,3}") #find any digit that occurs 1, 2, or 3 times
head(str_extract(string= theStart, pattern = "^\\d{4}"), 30) # '^' denotes to extract from the beginning of text
head(str_extract(string= theStart, pattern = "\\d{4}$"), 30) # 'S' denotes to extract from the end of text
head(str_extract(string= theStart, pattern = "^\\d{4}$"), 30)
#str_replace, replace first digit with x
head(str_replace(string = theStart,pattern = "\\d", replacement = "x"), 30)
#str_replace_all, replace all digits with x
head(str_replace_all(string = theStart, pattern = "\\d", replacement = "x"), 30)
#replace any string of digits of length of 1 to 4 with x
head(str_replace_all(string = theStart, pattern = "\\d{1,4}", replacement = "x"), 30)
#search through html tags for text
#create a vector of HTML
commands <- c("<a href = index.html>The link is here</a>", "<b>this is bold text</b>")
#get the text between html tags
str_replace(string = commands, pattern = "<.+?>(.+?)<.+>", replacement = "\\1")
|
79a6ab1de0c2d36f269712e4a6865f8e7f257ab8 | 677c3e302783b1b4388386536589efee6f2bef1c | /R/make_df.R | 9efe6a39977ab77b8d355fd8eee42cdb28885300 | [] | no_license | andrew-stewardson/saturn-r-gnosis | 49921dc97329d445109482326400b5d79f267bfa | ff45c2160732abf1dfef7170185ed86c58f2c1d2 | refs/heads/master | 2021-01-18T19:37:37.280996 | 2016-11-03T11:02:56 | 2016-11-03T11:02:56 | 68,911,682 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,192 | r | make_df.R | ### SET-UP ------------------------
# All patients
# Control, Ciprofloxacin, Nitrofurantoin
# Remove paticipants with missing dates (for now)
# Run script to 'master' dataset
rm(master, df)
# csv file made using:
# 1. source("/Users/andrewstewardson/Dropbox (HumanLabZ)/SATURN/saturnmicro/R/make_master.R")
# 2. write.csv(master, file='master.csv')
# 3. copy to correct folder
### IMPORT & SELECT SUBJECTS ------------------------
master <- read.csv("data/master.csv")
### Select patients
# Keep confirmed ciprofloxacin results for patients in control, nitrofurantoin, or ciprofloxacin households
temp <- master %>%
filter(test=="cip200" & (house.exposure=="control" | house.exposure=="nitrofurantoin" | house.exposure=="ciprofloxacin" | house.exposure=="norfloxacin")) %>%
filter(!is.na(collection.dt)) %>%
select(charID, id_house=houseid, id_site=country, bl_sex=sex, bl_age=age, bl_travel=travel_highrisk, bl_ab12=reported.ab.prev12, bl_residents=houseresidents, exposure, time, state=confirm.bi, sq=screen.sq, den=screen.gth, num=screen.bi, reported.ab.stdt, reported.ab.eddt, collection.dt)
# Remove strange observation (date seems wrong)
temp <- temp %>% filter(!(time=='TP1' & charID=='z-1890-cn-A'))
### TIME ------------------------
### Time scale: Subject (t.subject)
# Fix errors (obvious single digits errors)
temp$collection.dt <- as.character(temp$collection.dt)
temp$collection.dt[temp$charID=='z-0063-ut-A' & temp$time=='TP1'] <- '2011-09-29 18:00:00'
temp$collection.dt[temp$charID=='v-0289-ut-B' & temp$time=='TP1'] <- '2013-02-25 12:40:00'
# Change to date format
temp$collection.dt <- ymd_hms(temp$collection.dt)
# Define day zero for each subject (=day sample 1 collected)
day.zero <- temp %>%
arrange(charID, time) %>%
filter(time=="TP1") %>%
select(charID, day.zero=collection.dt)
# Create variable for time from day zero for each sample
df <- temp %>%
left_join(day.zero, by="charID") %>%
mutate(
int=interval(day.zero, collection.dt), # interval between recruitment date and collection date
t=int/ddays(1),
state=state+1)
# Tidy
rm(day.zero)
df <- df %>%
select(-c(int, day.zero)) %>% # remove redundant interval variable
#filter(!is.na(state) & !is.na(t)) %>% # remove observation if state or time point is missing
filter(t>=0) # !Note: check if need to fix errors in dates!
# Convert time (t) to integers then display
df$t <- floor(df$t)
hist(df$t)
# Change name to indicate this is day zero for subject
df <- df %>%
mutate(t.subject = t) %>%
select(-t)
### Time scale: House (t)
# Define day zero for each household (=day first sample collected)
day.zero <- df %>%
group_by(id_house) %>%
filter(collection.dt == min(collection.dt)) %>%
select(id_house, day.zero = collection.dt) %>%
distinct(id_house, .keep_all = TRUE)
# Create variable for time from day zero for each sample
df <- df %>%
left_join(day.zero, by="id_house") %>%
mutate(
int=interval(day.zero, collection.dt), # interval between recruitment date and collection date
t=int/ddays(1))
# Tidy
rm(temp, day.zero)
df <- df %>%
select(-int) %>% # remove interval variable
filter(!is.na(state) & !is.na(t)) %>% # remove if state or time point is missing
filter(t>=0) # !Note: check if need to fix errors in dates!
# Convert time (t) to integers then display
df$t <- floor(df$t)
hist(df$t)
# Examine difference between house and subject time
plot(df$t.subject, df$t)
df$difference <- df$t.subject - df$t # view
df <- df %>% select(-difference)
### EXPOSURES & COVARIATES ------------------------
# Define and 'factorise' fixed antibiotic exposure
cbind(table(df$exposure, useNA = 'always'))
df$exposure <- as.character(df$exposure)
df$exposure[df$exposure=='ciprofloxacin'] <- 'quinolone'
df$exposure[df$exposure=='norfloxacin'] <- 'quinolone'
df$exposure[df$exposure=='nitrofurantoin'] <- 'nitrofuran'
df$exposure <- factor(df$exposure,
levels=c('no.antibiotic', 'nitrofuran', 'quinolone'))
cbind(table(df$exposure, useNA = 'always'))
# Define and 'factorise' time-varying antibiotic exposure
df$ab[df$exposure=="no.antibiotic"]<-"no.antibiotic"
df$ab[df$exposure=="quinolone" & df$time=="TP1"]<-"quinolone"
df$ab[df$exposure=="quinolone" & df$time=="TP2"]<-"post.quinolone"
df$ab[df$exposure=="quinolone" & df$time=="TP3"]<-"post.quinolone"
df$ab[df$exposure=="nitrofuran" & df$time=="TP1"]<-"nitrofuran"
df$ab[df$exposure=="nitrofuran" & df$time=="TP2"]<-"post.nitrofuran"
df$ab[df$exposure=="nitrofuran" & df$time=="TP3"]<-"post.nitrofuran"
df$ab <- factor(df$ab, levels=c("no.antibiotic", "nitrofuran", "post.nitrofuran", "quinolone", "post.quinolone"))
#df$exposure <- factor(df$exposure, levels=c("no.antibiotic", "nitrofurantoin", "ciprofloxacin"))
# Collapse previous antibiotic exposure
df %>% group_by(bl_ab12) %>% summarise(n=n()) %>% mutate(prop=round(n/sum(n),2))
df$bl_ab12[df$bl_ab12=='unsure'] <- 'no'
df %>% group_by(bl_ab12) %>% summarise(n=n()) %>% mutate(prop=round(n/sum(n),2))
df$bl_ab12 <- factor(as.character(df$bl_ab12),
levels = c('no', 'yes'))
### STATES ------------------------------------------
### Semi-quantitative states
df$state.sq3[df$state==1] <- 1
df$state.sq3[df$sq<=0.001 & df$sq>0] <- 2
df$state.sq3[df$sq>0.001] <- 3
df %>% group_by(state.sq3) %>% summarise(n=n()) %>% mutate(prop=round(n/sum(n),2))
df %>% filter(is.na(df$state.sq3)) %>% group_by(t) %>% summarise(n=n()) %>% mutate(prop=round(n/sum(n),2))
# Last observation carried forward
# Adapted from: http://stackoverflow.com/questions/23818493/carry-last-observation-forward-by-id-in-r
require(zoo)
df <- df %>% arrange(charID, t)
na.locf.na <- function(x, na.rm = FALSE, ...) na.locf(x, na.rm = na.rm, ...)
df <- df %>% group_by(charID) %>% mutate(state.sq3 = na.locf.na(state.sq3))
cbind(table(df$state.sq3, useNA = 'always'))
# Last observation carried forward
# Adapted from above with 'fromLast=T'
df <- df %>% arrange(charID, t)
na.nocb.na <- function(x, na.rm = FALSE, ...) na.locf(x, fromLast=T, na.rm = na.rm, ...)
df <- df %>% group_by(charID) %>% mutate(state.sq3 = na.nocb.na(state.sq3))
cbind(table(df$state.sq3, useNA = 'always'))
df <- as.data.frame(df)
#### Three state S/None/R
df$state.c3[df$den!=0 & df$state!=2] <- 1
df$state.c3[df$den==0] <- 2
df$state.c3[df$state==2] <- 3
cbind(table(df$state.c3, useNA = 'always'))
### FINAL TIDY -----------------------
# Keep subjects with >1 observation
count <- df %>% group_by(charID) %>% summarise(n_obs=n()) # count observations per subject
df <- df %>%
left_join(count, by="charID") %>% # join number of observations to df
filter(n_obs>1) %>% # keep subjects with >1 observation
select(id_subject=charID, id_house, id_site, bl_sex, bl_age, bl_travel, bl_ab12, bl_residents, exposure, exposure.tv=ab, t.subject, t, state, state.c3, state.sq3, sq, den, num, reported.ab.stdt, reported.ab.eddt, collection.dt) # select relevant fields
### OUTPUT -----------------------
# save R data file
save(df, file="data/df.Rda")
# tidy
rm(master, count, na.locf.na, na.nocb.na)
|
7fbed00dce0179f60d6639786198067fd300327a | 3510aff4046282d06759a8f2a59feef5df366402 | /SLR_Nithya_R_assignment 3rd Q.R | e10f2433971dc7490ac034620acd468214111c26 | [] | no_license | NithyaSivasubramanian/NitSiva | f231ad407c3936328f90625583d5c73b8e09cc03 | 7820064b23bcfde0a8e5544be096d857e6c8c377 | refs/heads/main | 2023-01-10T19:33:25.714427 | 2020-11-11T13:36:10 | 2020-11-11T13:36:10 | 311,958,236 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,006 | r | SLR_Nithya_R_assignment 3rd Q.R | getwd() # function we use to know the present working directory
setwd("C://Users//Assignments//Simple Linear Regression") # Created a new working directory
library(readxl)
library(readr)
library(readr)
library("MASS")
# ggplot for adding regresion line for data
library(dplyr)
library(ggplot2)
install.packages("moments")
library(moments)
emp_data <- read.csv("C://Users//Assignments//Simple Linear Regression//emp_data.csv")
# Load emp_data.csv dataset
library(readr)
View(emp_data)
# Exploratory data analysis
summary(emp_data)
attach(emp_data)
#Scatter plot
plot(emp_data$Salary_hike,emp_data$Churn_out_rate) # plot(X,Y)
?plot
attach(emp_data)
#Correlation Coefficient (r)
cor(Salary_hike, Churn_out_rate) # cor(X,Y)
# Simple Linear Regression model
reg <- lm(Salary_hike ~ Churn_out_rate) # lm(Y ~ X)
summary(reg)
pred <- predict(reg)
reg$residuals
sum(reg$residuals)
mean(reg$residuals)
sqrt(sum(reg$residuals^2)/nrow(emp_data)) #RMSE
sqrt(mean(reg$residuals^2)) # another way to find RMSE value
confint(reg,level=0.95)
predict(reg,interval="predict")
# ggplot for adding regresion line for data
library(ggplot2)
?ggplot2
ggplot(data = emp_data, aes(x = Salary_hike , y = Churn_out_rate)) +
geom_point(color='red') +
geom_line(color='blue',data = emp_data, aes(x=Salary_hike, y=pred))
########################
# A simple ggplot code for directly showing the line
# ggplot(wc_at,aes(Waist,AT))+stat_summary(fun.data=mean_cl_normal) + geom_smooth(method='lm')
####################
# Logrithamic Model
# x = log(Delivery.Time); y = Sorting.Time
plot(log(Salary_hike), Churn_out_rate)
cor(log(Salary_hike), Churn_out_rate)
reg_log <- lm(Churn_out_rate ~ log(Salary_hike)) # lm(Y ~ X)
summary(reg_log)
predict(reg_log)
reg_log$residuals
sqrt(sum(reg_log$residuals^2)/nrow(emp_data)) #RMSE
confint(reg_log,level=0.95)
predict(reg_log,interval="confidence")
######################
# Exponential Model
plot(Salary_hike, log(Churn_out_rate))
cor(Salary_hike, log(Churn_out_rate))
reg_exp <- lm(log(Churn_out_rate) ~ Salary_hike) #lm(log(Y) ~ X)
summary(reg_exp)
reg_exp$residuals
sqrt(mean(reg_exp$residuals^2))
logat <- predict(reg_exp)
at <- exp(logat)
error = emp_data$Churn_out_rate - at
error
sqrt(sum(error^2)/nrow(emp_data)) #RMSE
confint(reg_exp,level=0.95)
predict(reg_exp,interval="confidence")
##############################
# Polynomial model with 2 degree (quadratic model)
plot(Salary_hike, Churn_out_rate)
plot(Salary_hike*Salary_hike, Churn_out_rate)
cor(Salary_hike*Salary_hike, Churn_out_rate)
plot(Salary_hike*Salary_hike, log(Churn_out_rate))
cor(Salary_hike, log(Churn_out_rate))
cor(Salary_hike*Salary_hike, log(Churn_out_rate))
# lm(Y ~ X + I(X*X) +...+ I(X*X*X...))
reg2degree <- lm(log(Churn_out_rate) ~ Salary_hike + I(Salary_hike*Salary_hike))
summary(reg2degree)
logpol <- predict(reg2degree)
expy <- exp(logpol)
err = emp_data$Churn_out_rate - expy
sqrt(sum(err^2)/nrow(emp_data)) #RMSE
confint(reg2degree,level=0.95)
predict(reg2degree,interval="confidence")
# visualization
ggplot(data = emp_data, aes(x = Salary_hike + I(Salary_hike^2), y = log(Churn_out_rate))) +
geom_point(color='blue') +
geom_line(color='red',data = emp_data, aes(x=Salary_hike+I(Salary_hike^2), y=logpol))
##############################
# Polynomial model with 3 degree
reg3degree<-lm(log(Churn_out_rate)~Salary_hike + I(Salary_hike*Salary_hike) + I(Salary_hike*Salary_hike*Salary_hike))
summary(reg3degree)
logpol3 <- predict(reg3degree)
expy3 <- exp(logpol3)
# visualization
ggplot(data = emp_data, aes(x = Salary_hike + I(Salary_hike^2) + I(Salary_hike^3), y = Churn_out_rate)) +
geom_point(color='blue') +
geom_line(color='red',data = emp_data, aes(x=Salary_hike+I(Salary_hike^2)+I(Salary_hike^3), y=expy3))
################################
|
95487d50cc511d00f0282681e4878d4bf1980365 | 91eb255fe719c4be1da12a8b30f4d0e35cee01dd | /man/TaxCIsummary.Rd | c21471631965006ea83a0d159e874abc14eaa45b | [] | no_license | eberlejonas/TaxCI | e3220abdcb7f71441cf9862fa4a9f212c4a36c7b | d5cb9efef5c850c402da9a8a3552a7c5e11ec765 | refs/heads/master | 2021-08-17T18:08:07.276642 | 2021-07-20T07:16:42 | 2021-07-20T07:16:42 | 87,522,366 | 3 | 1 | null | 2021-07-20T07:03:21 | 2017-04-07T08:18:29 | R | UTF-8 | R | false | false | 2,012 | rd | TaxCIsummary.Rd | \name{TaxCIsummary}
\alias{TaxCIsummary}
\title{
Statistics of a TaxCI-analysis
}
\description{
Get numbers of species in different kinds of clusters.
}
\usage{
TaxCIsummary(tree, x, sp.col = "binomial", BCCluster.result, plotParamTCI.result, plotParamBCC.result, analysis = "TaxCI.analysis", file=NA)
}
\arguments{
\item{tree}{
The tree that was used for the previous analyses.
}
\item{x}{
The table with taxonomic information (must contain a column with species names which is specified with sp.col). Must be the same one that was used in the previous analyses.
}
\item{sp.col}{
The column of the metadata that contains the species names.
}
\item{BCCluster.result}{
The list that resulted from a \code{\link{BCCluster}}-analysis.
}
\item{plotParamTCI.result}{
The output of \code{\link{plotParamTCI}}.
}
\item{plotParamBCC.result}{
The output of \code{\link{plotParamBCC}}.
}
\item{analysis}{
Optional. A name for the summarized analysis provided as character string.
}
\item{file}{
Optional. If a filename is given the table will be printed to it. Defaults to NA (table is returned as data.frame).
}
}
\value{
A data frame with the following entries:
\item{noMorphospecies}{Number of a priori identified species.}
\item{no.tci.positive.sp}{Number of species with TCI < 1.}
\item{no.cluster}{Number of barcode clusters.}
\item{no.homogen}{Number of homogeneous clusters (Containing only one species).}
\item{no.heterogen}{Number of heterogeneous clusters (Containing multiple species.}
\item{no.species.in.multiple.clusters}{Number of species that appear in more than one cluster.}
\item{no.species.in.multiple.hom.clusters}{Number of species that appear in more than one homogeneous cluster.}
\item{threshold}{The barcode-threshold that was used for clustering. "external" if clustering was done with an external method.}
\item{Q-measure}{A quality measure...}
}
\author{
Jonas Eberle
}
\seealso{
See \code{\link{TaxCI}} for example.
}
|
8dc724771341f3e71bfd74dab2f1f36fd5c198f4 | a64cab2ec424f20976477e228800f6b40be4c47b | /model_test/gminas_to_dev_by_model.R | 3082bff5085c3b3a03c0aadada1d0b1b1065a935 | [] | no_license | Kostee/projekt_interdyscyplinarny | 4a569c5e0bdb4f8ccad57f6611a83218c4f86afb | f0f8988101de51222fc1ae80269a3b3c3e5cf245 | refs/heads/main | 2023-03-23T02:35:35.752637 | 2021-03-15T19:25:33 | 2021-03-15T19:25:33 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 4,001 | r | gminas_to_dev_by_model.R | #set path if needed
#setwd("C:/Users/koste/Studia/07/PI/github/projekt_interdyscyplinarny/model_test")
library(dplyr)
library(randomForest)
library(mlr)
library(ranger)
library(tuneRanger)
library(ggplot2)
set.seed(1613)
# data for regression model
df <- read.csv("../dochody_i_ludnosc_2.csv", encoding = "UTF-8") # 2522 gminas
train <- read.csv("../model_z_danymi_ze_zdj/new_train.csv") # 72 gminas - developed (1) / undeveloped (0)
t2 <- train %>% left_join(df, by = c("id" = "Kod"))
#colnames(t2)
t3 <- subset(t2, select = -c(X.x, gmina, powiat, id, longitude.x, latitude.x,
longitude.y, latitude.y, X.y, Nazwa, X.1,
water.y, vegetation.y))
t3 <- na.omit(t3)
#colnames(t3)
# building a model
classif_task <- makeClassifTask(data = t3, target = "wynik", positive = 1)
classif_lrn <- makeLearner("classif.ranger", par.vals = list( "num.trees" = 2500), predict.type = "prob")
res_ranger <- tuneRanger(classif_task, measure = list(gmean), num.threads = 6, num.trees = 2500)
df_test <- na.omit(subset(df, select = -X))
gminy <- df_test[,c("Nazwa")]
df_test <- subset(df_test, select = -c(Nazwa, Kod))
#colnames(df_test)
colnames(df_test)[colnames(df_test) == 'water'] = 'water.x'
colnames(df_test)[colnames(df_test) == 'vegetation'] = 'vegetation.x'
pred_ranger <- predict(res_ranger$model, newdata = df_test)
#colnames(t3)
#colnames(df_test)
result <- as.data.frame(cbind(as.numeric(pred_ranger$data$prob.1), gminy))
colnames(result) = c("score", "name")
#result %>% arrange(name)
# Attention please! Some gminas appear more than once with the same name in the country
# However, it's only a few percentages. We will simply not take them into account
# (without loss of generality)
print(paste0("Before deleting repeated onces: ", nrow(result)))
resultFiltered <- result %>%
group_by(name) %>%
filter(n() < 2) %>%
as.data.frame()
print(paste0("After deleting repeated onces: ", nrow(resultFiltered)))
print(paste0("Percent of gminas deleted: ", round((nrow(result)-nrow(resultFiltered))/nrow(result)*100, 2),'%'))
# Yay! Shouldn't make a difference
exDir <- "../rankingi_ekspertow_PW" # expert data folder
cities <- read.csv(paste0(exDir, "/miastaNaPrawachPowiatu.tsv"), sep = '\t', encoding = 'UTF-8')
urban <- read.csv(paste0(exDir, "/gminyMiejskie.tsv"), sep = '\t', encoding = 'UTF-8')
urban_rural <- read.csv(paste0(exDir, "/gminyMiejskoWiejskie.tsv"), sep = '\t', encoding = 'UTF-8')
rural <- read.csv(paste0(exDir, "/gminyWiejskie.tsv"), sep = '\t', encoding = 'UTF-8')
all_gminas <- list(cities, urban, urban_rural, rural)
gminas_types <- c("cities", "urban", "urban_rural", "rural")
which <- 1
ourDevNotDevResults <- data.frame()
ourDevProbResults <- data.frame()
for(gmina in all_gminas){
current_type <- gminas_types[which]
print(paste0("Analyzing gminas type: ", current_type))
gmina <- gmina %>%
select(-'Województwo') %>%
right_join(resultFiltered, by = c("Gmina" = "name")) %>%
na.omit()
nOfRows <- nrow(gmina)
gmina <- gmina %>%
mutate(Miejsce.w.rankingu = 1:nOfRows,
their = 1:nOfRows) %>%
rename("our" = "Miejsce.w.rankingu", "gmina" = "Gmina")
gmina <- gmina[order(gmina$score, decreasing=TRUE),] %>%
mutate(our = 1:nOfRows,
score = round(as.double(score), 5))
thisTypesDevOrNot <- gmina %>%
mutate(score = ifelse(score <= 0.5, 0, 1),
gminaType = current_type) %>%
select(-c(our,their)) %>%
rename(powiat = Powiat)
thisTypeDevProb <- gmina %>%
mutate(gminaType = current_type) %>%
select(-c(our,their)) %>%
rename(powiat = Powiat)
print(sample_n(thisTypeDevProb, 10))
#print(sample_n(gmina, 10))
ourDevNotDevResults <- rbind(ourDevNotDevResults, thisTypesDevOrNot)
ourDevProbResults <- rbind(ourDevProbResults, thisTypeDevProb)
which <- which + 1
}
#print(ourDevNotDevResults)
write.csv2(ourDevNotDevResults, "gminasDevClasByModel.csv")
write.csv2(ourDevProbResults, "gminasDevRegByModel.csv") |
18812b2d6ea4666fee4b496154cec5b47abee2a2 | 78d7f133f4cace12bc40150aa286499d5b4f087d | /metabolomics_toolbox/code/net_ana/scripts/network_simu_parallel_large_partial_mutipeak/simu_network_testcluster.R | 9db87cc84bed323b76d16d2bce5096f5bd64334e | [
"MIT"
] | permissive | HealthVivo/Edison_Lab_Shared_Metabolomics_UGA | 4890a438a6452c5d77932df784a01b78319c3000 | 7d762798486849a5f3a11c9f4944d366c69c38c6 | refs/heads/master | 2023-07-21T19:35:54.050001 | 2021-08-23T13:02:31 | 2021-08-23T13:02:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,203 | r | simu_network_testcluster.R | # script for use simulated network to test/evaluate CausalKinetiX and clustering
# the simulation will produce dynamics based random networks with clusters
# CausalKinetiX is then used to recover the edges and clusters in the network
## to run the code here, the user need proper HPC environments, corresponding submit.sh, and the input data
## For running the bootstrapping locally or on other HPCs, please adapt the script correspondingly
rm(list=ls())
options(warn=1)
options(stringsAsFactors=FALSE)
options(digits=15)
require(stringr)
require(magrittr)
require(deSolve)
require(igraph)
require(CausalKinetiX)
require(ggplot2)
require(reshape2)
require(foreach)
require(doMC)
registerDoMC(cores=20)
source("/PATH/TO/network_simu.R")#the corrresponding functions on network reconstruction
#
dir=getwd()
args=commandArgs(trailingOnly=TRUE)
print(args)
if(length(args)<1){
stop("At least one argument must be supplied for the network number", call.=FALSE)
}
#
inetwork=as.numeric(args)#different random network controled within different parallel scripts
n_node=100#number of nodes
compds=seq(n_node)
cluster_size_vec=c(40,20,20)#cluter size
p_edge_vec=c(0.15,0.015)#probability for within cluter edges and outside cluster edges
p_regu=0.2#probability for regulation
p_comb=0.2#probability for reaction combination
krange=c(0,1)#range of kinetic parameters
ncondiseq=c(2,10,40,100)#number of conditions in simulation
nrep=3#number of replicates
times=seq(from=0,to=5,by=0.2)#the time sequence
ntimes=length(times)
randseed=1
set.seed(randseed)
# multiple peaks for the same compound
nrepeat=sample(c(1,2,3,4,5),n_node,replace=TRUE)
repinfor=list()
peakvec=rep(seq(n_node),times=nrepeat)
repinfor[["peakvec"]]=peakvec
repinfor[["peakfactor"]]=c(0.3,3)
# partial observation of peaks. the user can change the proportion of observation here or even make it completely observable
nobs=floor(length(peakvec)/2)#number of observed peaks
obs_seq=sample(length(peakvec),nobs,replace=FALSE)
#
para=list(max.preds=TRUE,expsize=2,#expected number of term in the summation
interactions=TRUE,pen.degree=3,smooth.Y=TRUE,
integrated.model=FALSE,#derivative based objective function
screening=30,#screening predictor size
silent=TRUE)
featurefilt_pval=0.05#filter the feature score to select connected features
#
listressep=vector(mode="list")
for(ncondi in ncondiseq){##different conditions
templist=vector(mode="list")
inforlist=network_constr(n_node=n_node,cluster_size_vec=cluster_size_vec,p_edge_vec=p_edge_vec,p_regu=p_regu,p_comb=p_comb,krange=krange,randseed=randseed+inetwork)
edgetab=inforlist[["edgetab"]]
templist[["clustseq"]]=rep(inforlist[["clustseq"]],times=nrepeat)
templist[["simu_network"]]=edgetab
templist[["peakvec"]]=peakvec
# df.g=graph.data.frame(d=edgetab[,c("from","to")],directed=TRUE)
# plot(df.g,vertex.label=V(df.g)$name)
function_str=react_constr(edgetab,n_node)
cond_vec=rep(seq(ncondi),each=nrep)
unicond=unique(cond_vec)
# initial conditions
iniarra=matrix(NA,nrow=n_node,ncol=length(unicond))
for(cond_i in seq(length(unicond))){
condvec=runif(n_node,min=krange[1],max=krange[2])
iniarra[,cond_i]=condvec
}
matdata=ode_simu(odefunc=function_str,cond_vec=cond_vec,times=times,iniarra=iniarra,repinfor=repinfor)
obs_ind=unlist(sapply(obs_seq,simplify=FALSE,function(x){
seq(from=ntimes*(x-1)+1,to=ntimes*x)
}))
matdata_obs=matdata[,obs_ind]
# searching for edges
inforlist<-foreach(compdi=seq(nobs))%dopar%{#
compd=obs_seq[compdi]
cat(compd)
ck.fit<-CausalKinetiX(matdata_obs,times-min(times),cond_vec,compdi,pars=para)#
scores=ck.fit[["variable.scores"]]
seleind=which(scores<featurefilt_pval)
selefeature=ck.fit[["ranking"]][seleind]
list(detailres=ck.fit,compd=compd,compdi=compdi,selefeature=selefeature)
}
save(inforlist,file=paste0("./","storedata_network_",inetwork,"_condlen_",ncondi,".RData"))
# templist[["causalkinetix_res"]]=inforlist
## format the data into network link matrix
assoc_mat=matrix(0,nrow=nobs,ncol=nobs)##row Y col X
resstorelist=vector(mode="list")
for(resi in seq(length(inforlist))){
tempifor=inforlist[[resi]]
assoc_mat[tempifor[["compdi"]],tempifor[["selefeature"]]]=1
resstorelist[[tempifor[["compdi"]]]]=tempifor[["detailres"]]
}
# scale the histogram
assoc_mat_sym=(assoc_mat+t(assoc_mat))/2
assoc_mat_sym[lower.tri(assoc_mat_sym)]=0;
sourcenod=c()
targetnode=c()
assovec=c()
for(i in 1:nobs){
edgeind=which(assoc_mat_sym[i,]>0)
sourcenod=c(sourcenod,rep(obs_seq[i],times=length(edgeind)))
targetnode=c(targetnode,obs_seq[edgeind])
assovec=c(assovec,assoc_mat_sym[i,edgeind])
}
nettab=data.frame(source=sourcenod,target=targetnode,association=assovec)
save(nettab,file=paste0("./","net.CausalKinetiX_network_",inetwork,"_condlen_",ncondi,".RData"))
write.table(nettab,file=paste0("./","net.CausalKinetiX_network",inetwork,"_condlen_",ncondi,".txt"),sep="\t",row.names=FALSE,quote=FALSE)
templist[["nettab"]]=nettab
listressep[[paste0(inetwork,"_",ncondi)]]=templist
}
save(listressep,file=paste0("./","net.CausalKinetiX.network_",inetwork,".RData"))
|
ce38f1c850a768968822ad514022690f1f966da8 | 771c05fa7b58f8f2dab7938da389e9e72b3cf3d4 | /Rvasp/R/periodictablehandler.R | 9ac9cc6239fc6539bc87b04c55f3231f6a63d17c | [
"MIT"
] | permissive | gokhansurucu/Rvasp | 56a75b10daa606768791935530bd108204d92f4f | 8983440a96ca8acf017f47af8dbfd3f32faaad22 | refs/heads/master | 2020-04-08T21:14:33.155967 | 2014-03-14T20:08:59 | 2014-03-14T20:08:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,162 | r | periodictablehandler.R | #cat(paste0("elements <- do.call(rbind,list(",paste(apply(elements,1,function(x)paste0("c(\"",paste(x,collapse="\",\""),"\")")),collapse=","),"))"))
#' Draws a periodic table
#'
#' \code{plot.periodictable} Draws a periodic table.
#' There are two ways of changing an entry. Highlight and underlay.
#' Highlight colors the box of an entry and allows additional text in that box.
#' Underlay puts a color beneath that box.
#'
#' For both parameters a vector of (element) numbers or names can be used.
#'
#' For very individual ploting use \code{typ="n"} and see \code{\link{plot.periodictable.addelements}},
#' \code{\link{plot.periodictable.addunderlay}} and \code{\link{periodictable.getremainingelements}}
#'
#' @param highlights vector of (element) numbers or names
#' @param col.highlight color of highlighted elements
#' @param highlighttexts text for highlighted elements list of vectors each containing one or two entries. List is recycled along highlights.
#' @param col.rows color of highlight texts
#' @param underlay vector of (element) numbers or names
#' @param col.underlay color of rectangle underlaying elements
#' @param col color of all not highlighted elements
#' @param col.Lanthanoid underlay color of rectangle underlaying Lanthanoids
#' @param col.Actinoid underlay color of rectangle underlaying Actinoids
#' @param rectmar margin of boxes
#' @param text.cex size of element names
#' @param rows.cex size of highlight texts
#' @param round if highlight texts are numeric type, value used for rounding
#' @param typ "n" for suppress plotting
#' @export
plot.periodictable <- function(
highlights=NULL,
highlighttexts=NULL,
underlay=NULL,
col="grey80",
col.highlight="black",
col.underlay="red",
col.Lanthanoid="grey60",
col.Actinoid="grey90",
rectmar=0.05,
xlab="",
ylab="",
xaxs="i",
yaxs="i",
text.cex=1,
rows.cex=0.8,
col.rows="black",
round=NULL,
typ="",
...){
rectmar <- rep(rectmar,length=4)
plot(c(0,18),c(-9.5,0),typ="n",xlab=xlab,ylab=ylab,asp=1,bty="n",xaxs=xaxs,yaxs=yaxs,xaxt="n",yaxt="n",...)
normal <- periodictable.getremainingelements(highlights)
rect(2+rectmar[2],-(6-rectmar[1]),3-rectmar[4],-(5+rectmar[[3]]),col= col.Actinoid,border=NA)
rect(3,-7.5,18,-8.5,col= col.Actinoid,border=NA)
rect(2+rectmar[2],-(7-rectmar[1]),3-rectmar[4],-(6+rectmar[[3]]),col=col.Lanthanoid,border=NA)
rect(3,-8.5,18,-9.5,col=col.Lanthanoid,border=NA)
if(!is.null(underlay))
{
plot.periodictable.addunderlay(underlay,col=col.underlay)
}
if(typ!="n")
{
plot.periodictable.addelements(normal,col=col,rectmar=rectmar,text.cex=text.cex,rows.cex=rows.cex)
plot.periodictable.addelements(highlights,rows=highlighttexts,col=col.highlight,rectmar=rectmar,text.cex=text.cex,rows.cex=rows.cex,rows.col=col.rows,round=round)
}
}
#' Adds elements to an existing periodic table
#'
#' \code{plot.periodictable.addelements} Adds elements to an existing periodic table
#'
#'
#' @param elements vector of (element) numbers or names
#' @param col color of selected elements
#' @param text additional text for selected elements list of vectors each containing one or two entries. List is recycled along selected elements
#' @param rows.col color of selected texts
#' @param fill color of the filling of the box
#' @param rows.cex size of texts
#' @param text.cex size of element name
#' @param round if highlight texts are numeric type, value used for rounding
#' @export
#' @seealso \code{\link{plot.periodictable.addunderlay}} and \code{\link{periodictable.getremainingelements}}
plot.periodictable.addelements <- function(elements,
text=periodictable.getelementnames(elements),
rows=NULL,
rectmar=0.05,
col="black",
fill="white",
rows.col="black",
rows.cex=0.8,
text.cex=1,
round=NULL){
rectmar <- rep(rectmar,length=4)
positions <- periodictable.getelementpositions(elements)
lapply(1:nrow(positions),FUN=function(i)
{
rect(positions[i,1]-1+rectmar[2],-(positions[i,2]-rectmar[1]),positions[i,1]-rectmar[4],-(positions[i,2]-1+rectmar[[3]]),border=col,col=fill)
offset <- 0.5
offset2 <- -0.4
pos <- NULL
if(!is.null(rows[[i]]))
{
pos <- 3
offset <- 0.1
tmpcol <- rep(rows.col,length.out=length(rows[[i]]))
for(j in 1:length(rows[[i]]))
{
offset2 <- offset2 + 0.7*rows.cex
r <- rows[[i]][j]
if(!is.null(round)&length(grep("^[[:digit:].[:space:]]*$", r))>0)
{
r <- round(as.numeric(r),round)
}
text(positions[i,1]-0.5,-positions[i,2]+0.5,r,pos=1,col=tmpcol[[j]],offset=offset2,cex=rows.cex)
}
}
text(positions[i,1]-0.5,-positions[i,2]+0.5,text[i],pos=pos,col=col,offset=offset,cex=text.cex)
})
}
#' Draws rectangular underlayings in existing periodic table
#'
#' \code{plot.periodictable.addunderlay} Draws rectangles underlaying selected elements in existing periodic table.
#'
#' @param underlay vector of (element) numbers or names
#' @param col color of rectangle underlaying elements
#' @export
#' @seealso \code{\link{plot.periodictable.addelements}} and \code{\link{periodictable.getremainingelements}}
plot.periodictable.addunderlay<-function(underlay,col="red"){
positions <- periodictable.getelementpositions(underlay)
lapply(1:nrow(positions),FUN=function(i)
{
# from leftbottom to righttop
rect(positions[i,1]-1-0.001,-(positions[i,2])-0.001,positions[i,1]+0.001,-(positions[i,2]-1)+0.001,border=NA,col=col,lwd=0)
})
}
#' gives you the name of elements
#'
#' \code{periodictable.getelementnames} gives you the name of elements.
#'
#' @param element vector of (element) numbers
#' @export
periodictable.getelementnames <- function(element)
{
elements <- do.call(rbind,list(c("1","H","1s1"),c("2","He","1s2"),c("3","Li","[He] 2s1"),c("4","Be","[He] 2s2"),c("5","B","[He] 2s2 2p1"),c("6","C","[He] 2s2 2p2"),c("7","N","[He] 2s2 2p3"),c("8","O","[He] 2s2 2p4"),c("9","F","[He] 2s2 2p5"),c("10","Ne","[He] 2s2 2p6"),c("11","Na","[Ne] 3s1"),c("12","Mg","[Ne] 3s2"),c("13","Al","[Ne] 3s2 3p1"),c("14","Si","[Ne] 3s2 3p2"),c("15","P","[Ne] 3s2 3p3"),c("16","S","[Ne] 3s2 3p4"),c("17","Cl","[Ne] 3s2 3p5"),c("18","Ar","[Ne] 3s2 3p6"),c("19","K","[Ar] 4s"),c("20","Ca","[Ar] 4s2"),c("21","Sc","[Ar] 3d 4s2"),c("22","Ti","[Ar] 3d2 4s2"),c("23","V","[Ar] 3d3 4s2"),c("24","Cr","[Ar] 3d5 4s"),c("25","Mn","[Ar] 3d5 4s2"),c("26","Fe","[Ar] 3d6 4s2"),c("27","Co","[Ar] 3d7 4s2"),c("28","Ni","[Ar] 3d8 4s2"),c("29","Cu","[Ar] 3d10 4s"),c("30","Zn","[Ar] 3d10 4s2"),c("31","Ga","[Ar] 3d10 4s2 4p"),c("32","Ge","[Ar] 3d10 4s2 4p2"),c("33","As","[Ar] 3d10 4s2 4p3"),c("34","Se","[Ar] 3d10 4s2 4p4"),c("35","Br","[Ar] 3d10 4s2 4p5"),c("36","Kr","[Ar] 3d10 4s2 4p6"),c("37","Rb","[Kr] 5s"),c("38","Sr","[Kr] 5s2"),c("39","Y","[Kr] 4d 5s2"),c("40","Zr","[Kr] 4d2 5s2"),c("41","Nb","[Kr] 4d4 5s"),c("42","Mo","[Kr] 4d5 5s"),c("43","Tc","[Kr] 4d6 5s"),c("44","Ru","[Kr] 4d7 5s"),c("45","Rh","[Kr] 4d8 5s"),c("46","Pd","[Kr] 4d10"),c("47","Ag","[Kr] 4d10 5s"),c("48","Cd","[Kr] 4d10 5s2"),c("49","In","[Kr] 4d10 5s2 5p"),c("50","Sn","[Kr] 4d10 5s2 5p2"),c("51","Sb","[Kr] 4d10 5s2 5p3"),c("52","Te","[Kr] 4d10 5s2 5p4"),c("53","I","[Kr] 4d10 5s2 5p5"),c("54","Xe","[Kr] 4d10 5s2 5p6"),c("55","Cs","[Xe] 6s"),c("56","Ba","[Xe] 6s2"),c("57","La","[Xe] 5d 6s2"),c("58","Ce","[Xe] 4f2 6s2"),c("59","Pr","[Xe] 4f3 6s2"),c("60","Nd","[Xe] 4f4 6s2"),c("61","Pm","[Xe] 4f5 6s2"),c("62","Sm","[Xe] 4f6 6s2"),c("63","Eu","[Xe] 4f7 6s2"),c("64","Gd","[Xe] 4f7 5d 6s2"),c("65","Tb","[Xe] 4f9 6s2"),c("66","Dy","[Xe] 4f10 6s2"),c("67","Ho","[Xe] 4f11 6s2"),c("68","Er","[Xe] 4f12 6s2"),c("69","Tm","[Xe] 4f13 6s2"),c("70","Yb","[Xe] 4f14 6s2"),c("71","Lu","[Xe] 4f14 5d 6s2"),c("72","Hf","[Xe] 4f14 5d2 6s2"),c("73","Ta","[Xe] 4f14 5d3 6s2"),c("74","W","[Xe] 4f14 5d4 6s2"),c("75","Re","[Xe] 4f14 5d5 6s2"),c("76","Os","[Xe] 4f14 5d6 6s2"),c("77","Ir","[Xe] 4f14 5d7 6s2"),c("78","Pt","[Xe] 4f14 5d9 6s"),c("79","Au","[Xe] 4f14 5d10 6s"),c("80","Hg","[Xe] 4f14 5d10 6s2"),c("81","Tl","[Xe] 4f14 5d10 6s2 6p"),c("82","Pb","[Xe] 4f14 5d10 6s2 6p2"),c("83","Bi","[Xe] 4f14 5d10 6s2 6p3"),c("84","Po","[Xe] 4f14 5d10 6s2 6p4"),c("85","At","[Xe] 4f14 5d10 6s2 6p5"),c("86","Rn","[Xe] 4f14 5d10 6s2 6p6"),c("87","Fr","[Rn] 7s"),c("88","Ra","[Rn] 7s2"),c("89","Ac","[Rn] 6d 7s2"),c("90","Th","[Rn] 6d2 7s2"),c("91","Pa","[Rn] 5f2 6d 7s2"),c("92","U","[Rn] 5f3 6d 7s2"),c("93","Np","[Rn] 5f4 6d 7s2"),c("94","Pu","[Rn] 5f6 7s2"),c("95","Am","[Rn] 5f7 7s2"),c("96","Cm","[Rn] 5f7 6d 7s2"),c("97","Bk","[Rn] 5f9 7s2"),c("98","Cf","[Rn] 5f10 7s2"),c("99","Es","[Rn] 5f11 7s2"),c("100","Fm","[Rn] 5f12 7s2"),c("101","Md","[Rn] 5f13 7s2"),c("102","No","[Rn] 5f14 7s2"),c("103","Lr","[Rn] 5f14 6d 7s2"),c("104","Rf","[Rn] 5f14 6d2 7s2"),c("105","Db","[Rn] 5f14 6d3 7s2"),c("106","Sg","[Rn] 5f14 6d4 7s2"),c("107","Bh","[Rn] 5f14 6d5 7s2"),c("108","Hs","[Rn] 5f14 6d6 7s2"),c("109","Mt","[Rn] 5f14 6d7 7s2")))
selector <- periodictable.getelementselector(element)
return(elements[selector,2])
}
#' gives you a selector for specific elements
#'
#' \code{periodictable.getelementselector} gives you a selector for specific elements.
#'
#' @param element vector of (element) numbers or names
#' @export
periodictable.getelementselector <- function(element)
{
if(is.null(element))
{
return(NULL)
}
elements <- do.call(rbind,list(c("1","H","1s1"),c("2","He","1s2"),c("3","Li","[He] 2s1"),c("4","Be","[He] 2s2"),c("5","B","[He] 2s2 2p1"),c("6","C","[He] 2s2 2p2"),c("7","N","[He] 2s2 2p3"),c("8","O","[He] 2s2 2p4"),c("9","F","[He] 2s2 2p5"),c("10","Ne","[He] 2s2 2p6"),c("11","Na","[Ne] 3s1"),c("12","Mg","[Ne] 3s2"),c("13","Al","[Ne] 3s2 3p1"),c("14","Si","[Ne] 3s2 3p2"),c("15","P","[Ne] 3s2 3p3"),c("16","S","[Ne] 3s2 3p4"),c("17","Cl","[Ne] 3s2 3p5"),c("18","Ar","[Ne] 3s2 3p6"),c("19","K","[Ar] 4s"),c("20","Ca","[Ar] 4s2"),c("21","Sc","[Ar] 3d 4s2"),c("22","Ti","[Ar] 3d2 4s2"),c("23","V","[Ar] 3d3 4s2"),c("24","Cr","[Ar] 3d5 4s"),c("25","Mn","[Ar] 3d5 4s2"),c("26","Fe","[Ar] 3d6 4s2"),c("27","Co","[Ar] 3d7 4s2"),c("28","Ni","[Ar] 3d8 4s2"),c("29","Cu","[Ar] 3d10 4s"),c("30","Zn","[Ar] 3d10 4s2"),c("31","Ga","[Ar] 3d10 4s2 4p"),c("32","Ge","[Ar] 3d10 4s2 4p2"),c("33","As","[Ar] 3d10 4s2 4p3"),c("34","Se","[Ar] 3d10 4s2 4p4"),c("35","Br","[Ar] 3d10 4s2 4p5"),c("36","Kr","[Ar] 3d10 4s2 4p6"),c("37","Rb","[Kr] 5s"),c("38","Sr","[Kr] 5s2"),c("39","Y","[Kr] 4d 5s2"),c("40","Zr","[Kr] 4d2 5s2"),c("41","Nb","[Kr] 4d4 5s"),c("42","Mo","[Kr] 4d5 5s"),c("43","Tc","[Kr] 4d6 5s"),c("44","Ru","[Kr] 4d7 5s"),c("45","Rh","[Kr] 4d8 5s"),c("46","Pd","[Kr] 4d10"),c("47","Ag","[Kr] 4d10 5s"),c("48","Cd","[Kr] 4d10 5s2"),c("49","In","[Kr] 4d10 5s2 5p"),c("50","Sn","[Kr] 4d10 5s2 5p2"),c("51","Sb","[Kr] 4d10 5s2 5p3"),c("52","Te","[Kr] 4d10 5s2 5p4"),c("53","I","[Kr] 4d10 5s2 5p5"),c("54","Xe","[Kr] 4d10 5s2 5p6"),c("55","Cs","[Xe] 6s"),c("56","Ba","[Xe] 6s2"),c("57","La","[Xe] 5d 6s2"),c("58","Ce","[Xe] 4f2 6s2"),c("59","Pr","[Xe] 4f3 6s2"),c("60","Nd","[Xe] 4f4 6s2"),c("61","Pm","[Xe] 4f5 6s2"),c("62","Sm","[Xe] 4f6 6s2"),c("63","Eu","[Xe] 4f7 6s2"),c("64","Gd","[Xe] 4f7 5d 6s2"),c("65","Tb","[Xe] 4f9 6s2"),c("66","Dy","[Xe] 4f10 6s2"),c("67","Ho","[Xe] 4f11 6s2"),c("68","Er","[Xe] 4f12 6s2"),c("69","Tm","[Xe] 4f13 6s2"),c("70","Yb","[Xe] 4f14 6s2"),c("71","Lu","[Xe] 4f14 5d 6s2"),c("72","Hf","[Xe] 4f14 5d2 6s2"),c("73","Ta","[Xe] 4f14 5d3 6s2"),c("74","W","[Xe] 4f14 5d4 6s2"),c("75","Re","[Xe] 4f14 5d5 6s2"),c("76","Os","[Xe] 4f14 5d6 6s2"),c("77","Ir","[Xe] 4f14 5d7 6s2"),c("78","Pt","[Xe] 4f14 5d9 6s"),c("79","Au","[Xe] 4f14 5d10 6s"),c("80","Hg","[Xe] 4f14 5d10 6s2"),c("81","Tl","[Xe] 4f14 5d10 6s2 6p"),c("82","Pb","[Xe] 4f14 5d10 6s2 6p2"),c("83","Bi","[Xe] 4f14 5d10 6s2 6p3"),c("84","Po","[Xe] 4f14 5d10 6s2 6p4"),c("85","At","[Xe] 4f14 5d10 6s2 6p5"),c("86","Rn","[Xe] 4f14 5d10 6s2 6p6"),c("87","Fr","[Rn] 7s"),c("88","Ra","[Rn] 7s2"),c("89","Ac","[Rn] 6d 7s2"),c("90","Th","[Rn] 6d2 7s2"),c("91","Pa","[Rn] 5f2 6d 7s2"),c("92","U","[Rn] 5f3 6d 7s2"),c("93","Np","[Rn] 5f4 6d 7s2"),c("94","Pu","[Rn] 5f6 7s2"),c("95","Am","[Rn] 5f7 7s2"),c("96","Cm","[Rn] 5f7 6d 7s2"),c("97","Bk","[Rn] 5f9 7s2"),c("98","Cf","[Rn] 5f10 7s2"),c("99","Es","[Rn] 5f11 7s2"),c("100","Fm","[Rn] 5f12 7s2"),c("101","Md","[Rn] 5f13 7s2"),c("102","No","[Rn] 5f14 7s2"),c("103","Lr","[Rn] 5f14 6d 7s2"),c("104","Rf","[Rn] 5f14 6d2 7s2"),c("105","Db","[Rn] 5f14 6d3 7s2"),c("106","Sg","[Rn] 5f14 6d4 7s2"),c("107","Bh","[Rn] 5f14 6d5 7s2"),c("108","Hs","[Rn] 5f14 6d6 7s2"),c("109","Mt","[Rn] 5f14 6d7 7s2")))
selector <- element
if(!is.numeric(selector))
selector <- match(element,elements[,2])
return(selector)
}
#' gives you a selector for all but specific elements
#'
#' \code{periodictable.getremainingelements} gives you a selector for all but specific elements.
#'
#' @param element vector of (element) numbers or names
#' @export
periodictable.getremainingelements<- function(element)
{
elementscount<-109
element <- periodictable.getelementselector(element)
remaining <- 1:elementscount
if(!is.null(element))
{
if(!is.numeric(element))
{
element <- which(element)
}
remaining <- remaining[-element]
}
return(remaining)
}
#' gives you the positions of specific elements in a periodic table
#'
#' \code{periodictable.getelementpositions} gives you the position of specific elements in a periodic table.
#'
#' @param element vector of (element) numbers or names
#' @export
periodictable.getelementpositions<-function(element)
{
elementscount<-109
elements <- do.call(rbind,list(c("1","H","1s1"),c("2","He","1s2"),c("3","Li","[He] 2s1"),c("4","Be","[He] 2s2"),c("5","B","[He] 2s2 2p1"),c("6","C","[He] 2s2 2p2"),c("7","N","[He] 2s2 2p3"),c("8","O","[He] 2s2 2p4"),c("9","F","[He] 2s2 2p5"),c("10","Ne","[He] 2s2 2p6"),c("11","Na","[Ne] 3s1"),c("12","Mg","[Ne] 3s2"),c("13","Al","[Ne] 3s2 3p1"),c("14","Si","[Ne] 3s2 3p2"),c("15","P","[Ne] 3s2 3p3"),c("16","S","[Ne] 3s2 3p4"),c("17","Cl","[Ne] 3s2 3p5"),c("18","Ar","[Ne] 3s2 3p6"),c("19","K","[Ar] 4s"),c("20","Ca","[Ar] 4s2"),c("21","Sc","[Ar] 3d 4s2"),c("22","Ti","[Ar] 3d2 4s2"),c("23","V","[Ar] 3d3 4s2"),c("24","Cr","[Ar] 3d5 4s"),c("25","Mn","[Ar] 3d5 4s2"),c("26","Fe","[Ar] 3d6 4s2"),c("27","Co","[Ar] 3d7 4s2"),c("28","Ni","[Ar] 3d8 4s2"),c("29","Cu","[Ar] 3d10 4s"),c("30","Zn","[Ar] 3d10 4s2"),c("31","Ga","[Ar] 3d10 4s2 4p"),c("32","Ge","[Ar] 3d10 4s2 4p2"),c("33","As","[Ar] 3d10 4s2 4p3"),c("34","Se","[Ar] 3d10 4s2 4p4"),c("35","Br","[Ar] 3d10 4s2 4p5"),c("36","Kr","[Ar] 3d10 4s2 4p6"),c("37","Rb","[Kr] 5s"),c("38","Sr","[Kr] 5s2"),c("39","Y","[Kr] 4d 5s2"),c("40","Zr","[Kr] 4d2 5s2"),c("41","Nb","[Kr] 4d4 5s"),c("42","Mo","[Kr] 4d5 5s"),c("43","Tc","[Kr] 4d6 5s"),c("44","Ru","[Kr] 4d7 5s"),c("45","Rh","[Kr] 4d8 5s"),c("46","Pd","[Kr] 4d10"),c("47","Ag","[Kr] 4d10 5s"),c("48","Cd","[Kr] 4d10 5s2"),c("49","In","[Kr] 4d10 5s2 5p"),c("50","Sn","[Kr] 4d10 5s2 5p2"),c("51","Sb","[Kr] 4d10 5s2 5p3"),c("52","Te","[Kr] 4d10 5s2 5p4"),c("53","I","[Kr] 4d10 5s2 5p5"),c("54","Xe","[Kr] 4d10 5s2 5p6"),c("55","Cs","[Xe] 6s"),c("56","Ba","[Xe] 6s2"),c("57","La","[Xe] 5d 6s2"),c("58","Ce","[Xe] 4f2 6s2"),c("59","Pr","[Xe] 4f3 6s2"),c("60","Nd","[Xe] 4f4 6s2"),c("61","Pm","[Xe] 4f5 6s2"),c("62","Sm","[Xe] 4f6 6s2"),c("63","Eu","[Xe] 4f7 6s2"),c("64","Gd","[Xe] 4f7 5d 6s2"),c("65","Tb","[Xe] 4f9 6s2"),c("66","Dy","[Xe] 4f10 6s2"),c("67","Ho","[Xe] 4f11 6s2"),c("68","Er","[Xe] 4f12 6s2"),c("69","Tm","[Xe] 4f13 6s2"),c("70","Yb","[Xe] 4f14 6s2"),c("71","Lu","[Xe] 4f14 5d 6s2"),c("72","Hf","[Xe] 4f14 5d2 6s2"),c("73","Ta","[Xe] 4f14 5d3 6s2"),c("74","W","[Xe] 4f14 5d4 6s2"),c("75","Re","[Xe] 4f14 5d5 6s2"),c("76","Os","[Xe] 4f14 5d6 6s2"),c("77","Ir","[Xe] 4f14 5d7 6s2"),c("78","Pt","[Xe] 4f14 5d9 6s"),c("79","Au","[Xe] 4f14 5d10 6s"),c("80","Hg","[Xe] 4f14 5d10 6s2"),c("81","Tl","[Xe] 4f14 5d10 6s2 6p"),c("82","Pb","[Xe] 4f14 5d10 6s2 6p2"),c("83","Bi","[Xe] 4f14 5d10 6s2 6p3"),c("84","Po","[Xe] 4f14 5d10 6s2 6p4"),c("85","At","[Xe] 4f14 5d10 6s2 6p5"),c("86","Rn","[Xe] 4f14 5d10 6s2 6p6"),c("87","Fr","[Rn] 7s"),c("88","Ra","[Rn] 7s2"),c("89","Ac","[Rn] 6d 7s2"),c("90","Th","[Rn] 6d2 7s2"),c("91","Pa","[Rn] 5f2 6d 7s2"),c("92","U","[Rn] 5f3 6d 7s2"),c("93","Np","[Rn] 5f4 6d 7s2"),c("94","Pu","[Rn] 5f6 7s2"),c("95","Am","[Rn] 5f7 7s2"),c("96","Cm","[Rn] 5f7 6d 7s2"),c("97","Bk","[Rn] 5f9 7s2"),c("98","Cf","[Rn] 5f10 7s2"),c("99","Es","[Rn] 5f11 7s2"),c("100","Fm","[Rn] 5f12 7s2"),c("101","Md","[Rn] 5f13 7s2"),c("102","No","[Rn] 5f14 7s2"),c("103","Lr","[Rn] 5f14 6d 7s2"),c("104","Rf","[Rn] 5f14 6d2 7s2"),c("105","Db","[Rn] 5f14 6d3 7s2"),c("106","Sg","[Rn] 5f14 6d4 7s2"),c("107","Bh","[Rn] 5f14 6d5 7s2"),c("108","Hs","[Rn] 5f14 6d6 7s2"),c("109","Mt","[Rn] 5f14 6d7 7s2")))
selector<- periodictable.getelementselector(element)
data <- lapply(elements[selector,3],function(x){
s <- strsplit(x," ")[[1]]
if(length(grep("[",s[1],fixed=T))>0)
{
s <- s[-1]
}
s <- strsplit(s,"")
s <- lapply(s,function(x){if(length(x)<3)
x[3]<- 1
if(length(x)>3)
{
x[3]<-as.numeric(x[3])*10+as.numeric(x[4])
x <- x[1:3]
}
return(x)})
s <- do.call(rbind,s)
row <- as.numeric(s[nrow(s),1])
if(s[nrow(s),2]=="d")
row <- row+1
col <- sum(as.numeric(s[,3]))
if(row==1&&col>1)
{
col <- col+16
}
if((row==2||row==3)&&col>2)
{
col <- col+10
}
if(row==6 || row==7)
{
if(col>2 && col<18)
{
row <- row+2.5
col <- col+1
}
else{
if(col>17)
col <- col-14
}
}
return(c(col,row))
})
data <- do.call(rbind,data)
#data<- cbind(data,elements[selector,2])
return(data)
}
|
df3463ed75cc92d6a936125ecec49e1df10d37a5 | 838293db88714aaab198a6aae6f3a7907c292e9a | /segunda parte/regresion 2 clase/homework.R | c52a38752105ea08c8cc623181a2e1f40b4b1f0e | [] | no_license | David9203/multivariable | f0d51a8826a27dc0518b9cb3cf8ee7b30f3c41d8 | d117810e7fa1b966f965fd6e6bd9886892628dba | refs/heads/master | 2020-05-20T07:04:15.090974 | 2019-08-18T18:47:19 | 2019-08-18T18:47:19 | 185,443,118 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,702 | r | homework.R | library(factanal)
library(factoextra)
library(psy)
library(nFactors)
library(GPArotation)
library(Rgraphviz)
library(stats)
library(lmtest)
library(normtest)
library(randtests)
library(car)
library(EnvStats)
datos<-read.csv(file=file.choose(),header=T,sep=';')
attach(datos)
head(datos)
row.names(datos) <- datos[,c(1)]
correlationanalisis<-function(datanum){
corr_matrix<-cor(datanum) ## saca matrices de correlaciones
corr_matrix
library(ggcorrplot) #para grafico de correlaciones
ggcorrplot(corr_matrix,lab = TRUE, lab_size=2)
}
Data <- subset( datos, select = -foreign ) ##saco la var long por que no aporta nada
head(Data)
values<-Data[,3:length(Data)]
correlationanalisis(values)
shapiro.test(values[,1])
powerTransform(values[,1])
lam= -0.3533898
valu1= boxcoxTransform(values[,1],lam)
shapiro.test(valu1)
shapiro.test(log(values[,2]))
powerTransform(values[,2])
lam= 0.6197182
valu2= boxcoxTransform(values[,2],lam)
shapiro.test(valu2)
ks.test(valu2,pnorm,mean(valu2),sd(valu2))
powerTransform(values[,3])
lam= 0.8142939
valu3= boxcoxTransform(values[,3],lam)
shapiro.test(valu3)
shapiro.test(values[,4])
shapiro.test(log(values[,4]))
powerTransform(values[,4])
lam= 0.5026624
valu4= boxcoxTransform(values[,4],lam)
shapiro.test(valu4)
ks.test(valu4,pnorm,mean(valu4),sd(valu4))
i=5
shapiro.test(values[,i])
valu5=values[,5]
i=6
shapiro.test(values[,i])
shapiro.test(log(values[,i]))
powerTransform(values[,i])
lam= 0.2986733
valu6= boxcoxTransform(values[,i],lam)
shapiro.test(valu6)
ks.test(valu6,pnorm,mean(valu6),sd(valu6))
i=7
shapiro.test(values[,i])
shapiro.test(log(values[,i]))
powerTransform(values[,i])
lam= 0.001216884
valu7= boxcoxTransform(values[,i],lam)
shapiro.test(valu7)
ks.test(valu7,pnorm,mean(valu7),sd(valu7))
i=8
shapiro.test(values[,i])
shapiro.test(log(values[,i]))
powerTransform(values[,i])
lam= 0.09708738
valu8= boxcoxTransform(values[,i],lam)
shapiro.test(valu8)
ks.test(valu8,pnorm,mean(valu8),sd(valu8))
i=9
shapiro.test(values[,i])
shapiro.test(log(values[,i]))
powerTransform(values[,i])
lam= 0.7186669
valu9= boxcoxTransform(values[,i],lam)
shapiro.test(valu9)
ks.test(valu9,pnorm,mean(valu9),sd(valu9))
i=9
ks.test(values[,i],pnorm,mean(values[,i]),sd(values[,i]))
data.frame(log(Consumption),log(Investment),log(Governmemt_expenditure),log(Population_000s))
valuesbox=data.frame(valu1,valu2,valu3,valu4,valu5,valu6,valu7,valu8,valu9)
correlationanalisis(valuesbox)
summary(valuesbox)
x=valuesbox
for (i in 1:length(x)) { ##transformar datos a datos positivos
#datos[i]=dates[i]-min(dates[i]) #para poner todos los datos positivos
x[i] = (x[i]-min(x[i]))/(max(x[i])-min(x[i])) #para poner todos los datos de 0 a 1
}
head(x)
summary(x)
correlationanalisis(x)
fa.parallel(x)
KMO(valuesbox)
cortest.bartlett(valuesbox)
fit<-factanal(x,3,method="ml",scores="regression",rotation="varimax")
fit
scores<-fit$scores
scores
cor(scores)
res.pca <- prcomp(x, scale = TRUE)
res.pca
get_pca_var(res.pca)
get_eig(res.pca)
ks.test(price,pnorm,mean(price),sd(price))
ks.test(log(price),pnorm,mean(log(price)),sd(log(price)))
simple_mpg <- lm(log(price) ~ scores[,1]+scores[,3])
summary(simple_mpg)
anova(simple_mpg)
confint(simple_mpg)
#1. Especificación del modelo
reset(simple_mpg)
2. #Homoscedasticidad
x<-scores[,c(1:2)]
bartlett.test(x)
white.test(x=scores[,1:2],y=log(price))
#3. Autocorrelación: si es serie de tiempo dwtest(def_regres)
resid_def<-residuals(simple_mpg)
x<-resid_def[,c(1)]
plot(num, resid_def)
par(mfrow = c(2, 2))
plot(def_regres)
runs.test(x)
runs.test(resid_def)
install.packages("snpar")
library(snpar)
runs.test(x)
#4. Normalidad de los residuales
jb.norm.test(resid_def, nrepl=2000)
x=scores[,1:2]
x= (x-min(x))/(max(x)-min(x)) #para poner todos los datos de 0 a 1
x=x+1
powerTransform(x)
valufit= boxcoxTransform(x,1.5)
valufit=x
shapiro.test(valufit)
powerTransform(price)
valuprice= boxcoxTransform(price,-1.436082)
shapiro.test(valuprice)
simple_mpg <- lm(valuprice ~ scores[,1]+scores[,3])
summary(simple_mpg)
anova(simple_mpg)
confint(simple_mpg)
bartlett.test(valufit)
write.csv(valuprice, file="/Users/nesdav/Documents/estadisticamultivariada/segunda parte/regresion 2 clase/homework/price.csv",row.names=FALSE, sep='.')
write.csv(valufit, file="/Users/nesdav/Documents/estadisticamultivariada/segunda parte/regresion 2 clase/homework/valufit.csv",row.names=FALSE, sep='.')
scores<-read.csv(file=file.choose(),header=T,sep=',')
|
e17659f16cc224e1ff75949a89752cb5d61c7993 | d5183c300f5aa6f6bbd76a4b12c7afc6f166d7f6 | /Data_Prep.R | 3a1e14cfbf765a5770742f130320d8075e265d6f | [] | no_license | rakeshprusty1/Next_Word_Predictor | 91acba4c8c3ef9a0b15ef3303809781993ee5a0f | 6189932a30a2ff81f2064122cb546093c0c87941 | refs/heads/master | 2020-05-24T17:55:32.669506 | 2017-03-14T18:50:10 | 2017-03-14T18:50:10 | 84,866,600 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,630 | r | Data_Prep.R | #Load Librarries:
library(ngram)
library(stringr)
library(stringi)
library(tm)
#Reading Files:
twitter <- readLines("en_US.twitter.txt",encoding = "UTF-8")
news <- readLines("en_US.news.txt",encoding = "UTF-8")
blogs <- readLines("en_US.blogs.txt",encoding = "UTF-8")
#Sampling:
set.seed(1234)
twit_samp <- sample(twitter,15000,replace = FALSE)
blog_samp <- sample(blogs,15000,replace = FALSE)
news_samp <- sample(news,15000,replace = FALSE)
#Merge samples:
samp <- concatenate(blog_samp,news_samp,twit_samp,collapse = " ",rm.space = FALSE)
#Preprocessing:
##Remove numeric, spcl characters etc..
samp_pro <- gsub("[^a-zA-Z ]","",samp)
##Convert everything to lower caps
samp_pro <- tolower(samp_pro)
##Remove profanity words
badtext <- readLines("profanity.txt")
samp_pro <- removeWords(samp_pro,badtext)
##Remove extra spaces
samp_pro <- stripWhitespace(samp_pro)
#Prepare N-grams:
unigram <- ngram(samp_pro,n=1,sep = " ")
bigram <- ngram(samp_pro,n=2,sep = " ")
trigram <- ngram(samp_pro,n=3,sep = " ")
tetragram <- ngram(samp_pro,n=4,sep = " ")
pentagram <- ngram(samp_pro,n=5,sep = " ")
#Convert ngrams to tables:
uni_df <- get.phrasetable(unigram)
bi_df <- get.phrasetable(bigram)
tri_df <- get.phrasetable(trigram)
tetra_df <- get.phrasetable(tetragram)
penta_df <- get.phrasetable(pentagram)
#Take subset of n-grams-df:(Optional, to improve efficiency)
uni_df <- subset(uni_df,uni_df$freq>1)
bi_df <- subset(bi_df,bi_df$freq>1)
tri_df <- subset(tri_df,tri_df$freq>1)
tetra_df <- subset(tetra_df,tetra_df$freq>1)
penta_df <- subset(penta_df,penta_df$freq>1)
|
53f599fa54fc03d38cf7fc5411888bb3d32da229 | 03a8b5eb4204f4f07f443ce7742c0190a43d8f91 | /_archive/WFA8000x/_Example R codes/cjs_fixing.R | f87fb6ececa61b4d73bcad38d8530aedc087ffeb | [] | no_license | mcolvin/WFA8000-Research-Credits | b6b18c63905f0fa2284cca4b10eb5f3b0ef3b8ce | 96dae9706cb690500e6705e587ccc03d9f64148d | refs/heads/master | 2022-02-21T07:52:45.223428 | 2022-01-25T17:04:55 | 2022-01-25T17:04:55 | 93,789,059 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,104 | r | cjs_fixing.R | ## FIXING PARAMETERS USING RMARK
require(RMark)
Nfish=700
S<-rep(0.6,7)
p<- rep(0.8,7)
p[7]<-1
# simulate some covariates
# [note there is no effect of these covariates on anything...
# just to demo how to handle covariates in data processing]
sex=sample(c("male","female"),Nfish,replace=TRUE)
fl<- runif(Nfish, 100,400) # SIZE AT TAGGING
# SET UP MATRIX FOR SURVIVAL
Z<- matrix(0,nrow=Nfish,ncol=8)
# SIMULATE TRUE SURVIVAL
Z[,1]<-1 # marking occasion at smolt trap
for(i in 2:ncol(Z))
{
Z[,i]<- rbinom(Nfish,1,Z[,i-1]*S[1])# simulate true survival
}
ZZ<- Z
# SIMULATE DETECTION GIVEN FISH IS AVAILABLE (I.E., ALIVE) TO DETECT
for(i in 2:ncol(ZZ))
{
ZZ[,i]<- rbinom(Nfish,1,Z[,i]*p[1])# simulate detection, given fish survived to be detected
}
## end simulation of known data
# make some capture histories
# and bundle up all the data
ch<- data.frame(ch=apply(ZZ,1,paste,collapse=""),sex=sex,fl=fl)
ch$ch<- as.character(ch$ch)# make sure ch is not a factor or Rmark=GRUMPY
# PROCESS THE DATA FOR MARK [SETS UP
dat_proc<- process.data(ch,model="CJS", groups="sex")# this processes the data for use by Program Mark
# MAKE THE DESIGN DATA FILE
dd<-make.design.data(dat_proc) # this makes a design matrix, needed to fix parameters
# how to fix parameters
# need to get right row index from dd
p_fixed_indx<-c(as.numeric(row.names(dd$p[(dd$p$time ==8 ),]))) # fix detection at time '8' to be 1
# vector of values to fix
p_fixed_val<- rep(1,length(p_fixed_indx)) # fix time 8 to be 1 for detection probability
# run the cjs model fit by Program Mark via Rmark
# throw a sham covariate at Phi(sex) and p(fl)
fit<-mark(
data=dat_proc,
ddl=dd,
model.parameters=list(
Phi = list(formula="~ sex"),
p = list(formula= "~ fl", fixed=list(index=p_fixed_indx,
value = p_fixed_val))),
brief=TRUE)
summary(fit, show.fixed=TRUE)
# FIT THE MODEL
# Phi(.) and p(.)
fit<-mark(
data= dat_proc,
ddl=dd,
model.parameters=list(
Phi = list(formula="~ 1"),
p = list(formula= "~1", fixed=list(index=p_fixed_indx,
value = p_fixed_val))),
brief=TRUE)
summary(fit, show.fixed=TRUE)
|
5a57e4283da6a218cbcdc536eb6a6975b1ea9dc2 | bb44671b1a3bee4802e48eb8364af10519c3f94b | /get_tool_pvals.R | 93a9a50af476f3039be6d50f82c3e5ab514dd581 | [
"MIT"
] | permissive | ebi-gene-expression-group/cell-types-analysis | d847ef1e9a7a26a7bca9f7e1d458728ca18fd664 | a8914f3b6af20c460145427cbab9951d1073e9e9 | refs/heads/develop | 2021-11-26T11:16:51.264263 | 2021-08-17T12:17:18 | 2021-08-17T12:17:18 | 227,411,947 | 4 | 0 | MIT | 2021-08-17T12:17:19 | 2019-12-11T16:32:44 | R | UTF-8 | R | false | false | 1,528 | r | get_tool_pvals.R | #!/usr/bin/env Rscript
suppressPackageStartupMessages(require(optparse))
suppressPackageStartupMessages(require(workflowscriptscommon))
### Using empirical CDFs, obtain p-values for statistics calculated in tool performance table
option_list = list(
make_option(
c("-i", "--input-table"),
action = "store",
default = NA,
type = 'character',
help = 'Path to the table of tool statistics from get_tool_performance_table.R'
),
make_option(
c("-d", "--emp-dist-list"),
action = "store",
default = NA,
type = 'character',
help = 'Path to the list of empirical distributions in .rds format'
),
make_option(
c("-o", "--output-table"),
action = "store",
default = NA,
type = 'character',
help = 'Path to the modified output table in text format'
)
)
opt = wsc_parse_args(option_list, mandatory = c("input_table", "emp_dist_list", "output_table"))
distr_list = readRDS(opt$emp_dist_list)
tools_table = read.delim(opt$input_table, stringsAsFactors=FALSE,
check.names=FALSE)
metrics = names(distr_list)
for(metric in metrics){
metric_distr = distr_list[[metric]]
metric_values = tools_table[, metric]
tool_pvals = sapply(metric_values, function(x) round((1-metric_distr(x)), 3))
col_name = paste(metric, "_pval", sep = '')
tools_table[, col_name] = tool_pvals
}
write.table(tools_table, file = opt$output_table, sep="\t", row.names=FALSE)
|
39009a3a6d916359b91ed6cb7f926a4f18d27e30 | e99228b3bde30737df902b6cea7ff3fc32ccad21 | /viz.R | 26315fa10cda80015b2a137cadb97cdd54c10224 | [] | no_license | johnmyleswhite/r_social_dynamics | acc5c9400b0613c645904e41f33dd4c5ad553fff | 562c3919abd6839ff65a57569e9e0fa514e4b434 | refs/heads/master | 2016-09-06T09:50:32.218494 | 2012-08-12T15:18:18 | 2012-08-12T15:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 760 | r | viz.R | library("entropy")
library("plyr")
committers <- read.csv("committers.tsv", sep = "\t")
ggplot(committers,
aes(x = Year, y = Commits, group = Committer, color = Committer)) +
geom_line() +
xlab("Year") +
ylab("Number of Commits per Committer") +
opts(title = "Who Makes Commits to R?")
ggsave("commits.png", height = 7, width = 10)
calculate_entropy <- function(df)
{
probs <- df$Commits / sum(df$Commits)
return(entropy(probs))
}
inequality <- ddply(committers, "Year", function (df) {calculate_entropy(df)})
ggplot(inequality, aes(x = Year, y = V1)) +
geom_line() +
xlab("Year") +
ylab("Entropy of Commits") +
opts(title = "Decreasingly Equal Contributions of R Core Committers")
ggsave("entropy.png", height = 7, width = 10)
|
50d640b4b2e13ead544edb3ca82e55ac3fa12863 | 8e3b68b7cede1eb21bdcfba461b078871307a349 | /UVcalc/R/UVR8.Abs300.R | a7593d806e3f902a4b195714b542422da843f727 | [] | no_license | aphalo/UVcalc | 5324bea93fb511299714213fcde81a40fc5c5770 | 36474de64507fe98e3486d31ade7faaf021969a4 | refs/heads/master | 2021-06-07T18:22:53.377278 | 2021-06-06T06:52:45 | 2021-06-06T06:52:45 | 6,579,747 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | UVR8.Abs300.R | UVR8.Abs300 <-
function(wavelengths){
return(UVR8.Abs(wavelengths)/UVR8.Abs(300))
} |
29f54c3a29fc88fb66cb87e8a051d713403d6d89 | 7844b28405c9dbe9ecc28cbbc165b55ea4d20f79 | /Script for Chapter Four.R | 4adbbf1041a893e5f519c392055451ff0f05503f | [] | no_license | sumon148/Chapter-Four | 6217e069238c2a182b3bd69d72ab7c4ec69e1f5c | 810efb527833f97bae4840c04332a1cb23ee049b | refs/heads/master | 2021-01-10T03:07:10.244442 | 2016-03-16T05:44:16 | 2016-03-16T05:44:16 | 53,993,585 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 110,366 | r | Script for Chapter Four.R | # ------------------------------------------------------------------------------
# Script 2: Chapter Four
# Chapter Title: Development of Robust Mean Squared Error Estimation of ELL Poverty Estimates
# Description: Simulation Work based on Parametric Bootstrap procedure
# considering Homoskedastic (HM) level-specific errors
# -------------------------------------------------------------------------------
rm(list=ls(all=TRUE))
# -------------------------------------------------------------------------------
# Libraries
# -------------------------------------------------------------------------------
library(nlme)
library(mvtnorm)
library(foreach)
library(doMC)
registerDoMC(40)
# -------------------------------------------------------------------------------
# Functions
# -------------------------------------------------------------------------------
tapply.order<-function(value,area,FUN,target.order){
# value: The values
# area: target area
# target.order: The order u wish to see the outcome ....
raw.output<-tapply(value,area,FUN)
data<-data.frame(key=names(raw.output), value=raw.output)
ordered.value<-data[match(target.order, data$key),]$value
return(ordered.value)
}
# -------------------------------------------------------------------------------#
addTrans <- function(color,trans) {
# This function adds transparancy to a color.
# Define transparancy with an integer between 0 and 255
# 0 being fully transparant and 255 being fully visable
# Works with either color and trans a vector of equal length,
# or one of the two of length 1.
if (length(color)!=length(trans)&!any(c(length(color),length(trans))==1)) stop("Vector lengths not correct")
if (length(color)==1 & length(trans)>1) color <- rep(color,length(trans))
if (length(trans)==1 & length(color)>1) trans <- rep(trans,length(color))
num2hex <- function(x)
{
hex <- unlist(strsplit("0123456789ABCDEF",split=""))
return(paste(hex[(x-x%%16)/16+1],hex[x%%16+1],sep=""))
}
rgb <- rbind(col2rgb(color),trans)
res <- paste("#",apply(apply(rgb,2,num2hex),2,paste,collapse=""),sep="")
return(res)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.2<-function(level.2,level.1,res.2,res.1){
# Homoskedasticity at both levels
# level.2: ID number of level 2
# level.1: ID number of level 1
# res.2: Cluster level residuals (average of marginal residuals of respective cluster)
# res.1: HH level residuals (Marginal residuals of HHs / OLS residuals)
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.2<-as.vector(table(level.2))
C<-length(n.2)
ID.C<-unique(level.2)
n<-length(level.1)
n0.bar.2<-sum(n.2^2)/sum(n.2)
s1.e<-sum((res.1-mean(res.1))^2)/(n-1)
s2.e<-sum(n.2*(res.2-mean(res.1))^2)/(C-1)
sigma2.2<-max(((n-1)*(C-1))/((n-n0.bar.2)*(n-C))*(-s1.e+s2.e),0)
sigma2.1<-max((n-1)/(n-C)*s1.e-(C-1)/(n-C)*s2.e,0)
result<-list(sigma2.1=sigma2.1,sigma2.2=sigma2.2)
return(result)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.3<-function(level.3,level.2,level.1,res.3,res.2,res.1){
# Homoskedasticity at all three levels
# level.3: ID number of level 3
# level.2: ID number of level 2
# level.1: ID number of level 1
# res.2: Cluster level residuals (average of marginal residuals of respective cluster)
# res.1: HH level residuals (Marginal residuals of HHs / OLS residuals)
level.3<-as.numeric(factor(level.3))
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.3<-as.vector(table(level.3))
n.2<-as.vector(table(level.2))
n<-length(level.1)
D<-length(n.3)
C<-length(n.2)
ID.D<-unique(level.3)
ID.C<-unique(level.2)
n0.bar.2<-sum(n.2^2)/sum(n.2)
n0.bar.3<-sum(n.3^2)/sum(n.3)
# Area wise calculation
n.0i<-NULL # sum(n.2^2)/sum(n.2)
for (d in 1:D){
n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
}
# n.0i<-NULL # sum(n.2^2)/sum(n.2)
# for (d in unique(level.3)){
# n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
# n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
# }
sum.n.0i<-sum(n.0i)
s1.e<-sum((res.1-mean(res.1))^2)/(n-1)
s2.e<-sum(n.2*(res.2-mean(res.1))^2)/(C-1)
s3.e<-sum(n.3*(res.3-mean(res.1))^2)/(D-1)
sigma2.1<-(n-1)*s1.e/(n-C)-(C-1)*s2.e/(n-C)
sigma2.2<-(-(n-1)*(C-D)*s1.e+(C-1)*(n-D)*s2.e-(D-1)*(n-C)*s3.e)/((n-sum.n.0i)*(n-C))
sigma2.3<-((n-1)*((C-1)*(sum.n.0i-n0.bar.2)-(D-1)*(n-n0.bar.2))*s1.e+
(C-1)*((D-1)*(n-n0.bar.2)-(n-1)*(sum.n.0i-n0.bar.2))*s2.e+
(D-1)*(n-n0.bar.2)*(n-C)*s3.e)/((n-n0.bar.3)*(n-sum.n.0i)*(n-C))
result<-list(sigma2.1=sigma2.1,sigma2.2=sigma2.2,sigma2.3=sigma2.3)
return(result)
}
# -------------------------------------------------------------------------------#
GLS.EST<-function(n.c,sigma2.ch,sigma2.c,x.matrix,y.s){
# n.c is size of clusters
# sigma2.ch is a vector (or constant) of variance components at HH level
# sigma2.c is variance components at cluster level
# Output: beta estimates with thier variance covariance matrix
library(Matrix)
n<-sum(n.c)
number.cluster.s<-length(n.c)
if (length(sigma2.ch)==1){
sampleList <- list()
for (i in 1:number.cluster.s) sampleList[[i]]<-diag(rep(sigma2.ch,n.c[i]))+sigma2.c*matrix(1,n.c[i],n.c[i])
}
if (length(sigma2.ch)>1){
sampleList <- list()
j<-1
for (i in 1:number.cluster.s) {
sampleList[[i]]<-diag(sigma2.ch[j:(j+n.c[i]-1)])+sigma2.c*matrix(1,n.c[i],n.c[i])
j<-j+n.c[i]
}
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls))
}
# -------------------------------------------------------------------------------#
GLS.EST.2L<-function(level.2,level.1,sigma2.2,sigma2.1,x.matrix,y.s){
# level.3: Area Level ID
# level.2: Cluster Level ID
# level.1: HH Level ID
# sigma2.3, sigma2.2, sigma2.1 level specific VC (Homoskedastic here)
# Output: beta estimates with thier variance covariance matrix
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
library(Matrix)
n.2<-as.vector(table(level.2))
n<-sum(n.2)
number.cluster.s<-length(n.2)
if (length(sigma2.1)==1){
sampleList <- list()
for (i in 1:number.cluster.s) sampleList[[i]]<-diag(rep(sigma2.1,n.2[i]))+sigma2.2*matrix(1,n.2[i],n.2[i])
}
if (length(sigma2.1)>1){
sampleList <- list()
j<-1
for (i in 1:number.cluster.s) {
sampleList[[i]]<-diag(sigma2.1[j:(j+n.2[i]-1)])+sigma2.2*matrix(1,n.2[i],n.2[i])
j<-j+n.2[i]
}
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls),V=V)
}
# -------------------------------------------------------------------------------#
GLS.EST.3L<-function(level.3,level.2,level.1,sigma2.3,sigma2.2,sigma2.1,x.matrix,y.s){
# level.3: Area Level ID
# level.2: Cluster Level ID
# level.1: HH Level ID
# sigma2.3, sigma2.2, sigma2.1 level specific VC (Homoskedastic here)
# Output: beta estimates with thier variance covariance matrix
library(Matrix)
level.3<-as.numeric(factor(level.3))
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.3<-as.vector(table(level.3))
n.2<-as.vector(table(level.2))
D<-length(n.3)
sampleList <- list()
for (d in 1:D){
n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
n.i<-sum(n.i.j)
if (length(sigma2.1)==1) {
clusterlist<-list()
for (i in 1:length(n.i.j)) clusterlist[[i]]<-matrix(1,n.i.j[i],n.i.j[i])
sampleList[[d]]<-diag(rep(sigma2.1,n.i))+bdiag(clusterlist)*sigma2.2+sigma2.3*matrix(1,n.i,n.i)
}
if (length(sigma2.1)>1){
clusterlist<-list()
for (i in 1:length(n.i.j)) clusterlist[[i]]<-matrix(1,n.i.j[i],n.i.j[i])
sampleList[[d]]<-diag(sigma2.1[level.1[level.3==d]])+bdiag(clusterlist)*sigma2.2+sigma2.3*matrix(1,n.i,n.i)
}
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls),V=V)
}
# -------------------------------------------------------------------------------#
rsquared.lmm.mom<-function(beta,Model.Matrix,est.sigma2){
# Get design matrix of fixed effects from model
Fmat <- Model.Matrix
# Get variance of fixed effects by multiplying coefficients by design matrix
VarF <- var(as.vector(t(beta)%*%t(Model.Matrix)))
# Get variance of random effects by extracting variance components
VarRand <- sum(as.numeric(unlist(est.sigma2)[-1]), na.rm=T)
VarResid <- as.numeric(unlist(est.sigma2)[1])
R.squared.M<-VarF/(VarF+VarRand+VarResid)
R.squared.C<-(VarF+VarRand)/(VarF+VarRand+VarResid)
list(VarF=VarF,VarRand=VarRand,VarResid=VarResid,R.squared.M=R.squared.M,R.squared.C=R.squared.C)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.2.H<-function(level.2,level.1,res.2,con.res.1){
# Heteroskedasticity at 1st level
# Homoskedasticity at 2nd level
# Homoskedasticity at both levels
# level.2: ID number of level 2
# level.1: ID number of level 1
# res.2: Cluster level residuals (average of marginal residuals of respective cluster)
# con.res.1: HH level conditional residuals (Marginal residuals of HHs - Cluster level residuals)
n.2<-as.vector(table(level.2))
n.1<-length(level.1)
C<-length(n.2)
ID.C<-unique(level.2)
wc<-n.2/sum(n.2)
tau.2.c<-tapply(con.res.1,level.2,var)/n.2
sigma2.2<-max((sum(wc*(res.2-mean(res.2))^2)-sum(wc*(1-wc)*tau.2.c))/sum(wc*(1-wc)),0)
result<-list(sigma2.2=sigma2.2)
return(result)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.3.H<-function(level.3,level.2,level.1,res.3,res.2,con.res.1){
# Heteroskedasticity at 1st level
# Homoskedasticity at 2nd and 3rd level
# level.3 : ID of level 3
# level.2 : ID of level 2
# level.1 : ID of level 1
# ID will be in ascending orders
# res.3 : Area level residuals (average of marginal residuals of respective area)
# res.2 : Cluster level residuals (average of marginal residuals of respective cluster)
# con.res.1: HH level conditional residuals (Marginal residuals of HHs - Cluster level residuals - Area level residuals)
level.3<-as.numeric(factor(level.3))
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.3<-as.vector(table(level.3))
n.2<-as.vector(table(level.2))
n.1<-length(level.1)
D<-length(n.3)
C<-length(n.2)
ID.D<-unique(level.3)
ID.C<-unique(level.2)
# Number of cluster per area
n.2.3<-NULL
for (d in 1: D) n.2.3<-c(n.2.3,length(unique(level.2[level.3==d])))
ID.C.D<-rep(ID.D,n.2.3)
# n0.bar.2<-sum(n.2^2)/sum(n.2)
# n0.bar.3<-sum(n.3^2)/sum(n.3)
# Area wise calculation
# n.0i<-NULL # sum(n.2^2)/sum(n.2)
# for (d in 1:D){
# n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
# n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
# }
# sum.n.0i<-sum(n.0i)
# s1.e<-sum((res.1-mean(res.1))^2)/(n-1)
# s2.e<-sum(n.2*(res.2-mean(res.1))^2)/(C-1)
# s3.e<-sum(n.3*(res.3-mean(res.1))^2)/(D-1)
tau.2.c<-tapply(con.res.1,level.2,var)/n.2
tau.2.d<-tapply(n.2^2*tau.2.c,ID.C.D,sum)/n.3^2
wc<-n.2/sum(n.2)
wd<-n.3/sum(n.3)
sigma2.2<-max( ( sum(wc*(res.2-mean(res.2))^2) - sum(wd*(res.3-mean(res.3))^2) - sum(wc*(1-wc)*tau.2.c) + sum(wd*(1-wd)*tau.2.d) ) /
(sum(wc) - sum (1/wd*tapply(wc^2,ID.C.D,sum))) , 0)
sigma2.3<-max(
( sum( (1/wd-1)*tapply(wc^2,ID.C.D,sum) ) * sum(wc*(1-wc)*tau.2.c)
- sum(wc*(1-wc)) * sum(wd*(1-wd)*tau.2.d)
- sum( (1/wd-1)*tapply(wc^2,ID.C.D,sum) ) * sum(wc*(res.2-mean(res.2))^2)
+ sum(wc*(1-wc)) * sum(wd*(res.3-mean(res.3))^2) ) /
( sum(wd*(1-wd)) * (sum(wc) - sum (1/wd*tapply(wc^2,ID.C.D,sum))) )
)
result<-list(sigma2.2=sigma2.2,sigma2.3=sigma2.3)
return(result)
}
# -------------------------------------------------------------------------------#
FGT.alpha<-function(y,z,alpha){
# Function for FGT indicators # z: Poverty line
if (length(z)==1){
t.z=ifelse(y<z,1,0)
t.z.alpha=t.z*((rep(1,length(y))-y/z)^alpha)
povert=sum(t.z.alpha)/length(y)
}
if (length(z)>1){
povert<-rep(0,length(z))
for (i in 1:length(z)){
t.z=ifelse(y<z[i],1,0)
t.z.alpha=t.z*((rep(1,length(y))-y/z[i])^alpha)
povert[i]=sum(t.z.alpha)/length(y)}
}
povert
}
# -------------------------------------------------------------------------------#
Population<-function(Area,Cluster.Area,HH.Cluster,Mu,Sigma,X,Model=c("Normal","Log-Normal")){
# This function creats population for
# Area=Number of Areas
# Cluster.Area=Vector(Number of clusters per area)
# HH.Cluster=Fixed Number of HHs per cluster
# Mu=Population Mean
# Sigma=Variance of random errors of Two/Three-level Model
No.Area<-Area
No.Cluster<-sum(Cluster.Area)
N.Area<-Cluster.Area*HH.Cluster
N.Cluster<-rep(HH.Cluster,Cluster.Area)
N=sum(N.Area)
if (is.null(X)) X.design<-cbind(rep(1,N))
if (!is.null(X)) X.design<-cbind(rep(1,N),X)
if(length(Sigma)==2){
e1=rnorm(N,0,sqrt(Sigma[1]))
e2=rnorm(No.Cluster,0,sqrt(Sigma[2]))
e.2<-rep(e2,N.Cluster)
y.ijk<-Mu+e1+e.2
if (Model=="Normal") y.ijk<-X.design%*%Mu+e1+e.2
if (Model=="Log-Normal") y.ijk<-(X.design%*%Mu+e1+e.2)
}
if(length(Sigma)==3){
e1=rnorm(N,0,sqrt(Sigma[1]))
e2=rnorm(No.Cluster,0,sqrt(Sigma[2]))
e3=rnorm(No.Area,0,sqrt(Sigma[3]))
e.3<-rep(e3,N.Area)
e.2<-rep(e2,N.Cluster)
if (Model=="Normal") y.ijk<-X.design%*%Mu+e1+e.2+e.3
if (Model=="Log-Normal") y.ijk<-(X.design%*%Mu+e1+e.2+e.3)
}
# ID of Area, Cluster & HHs
ID.Area<-c(1:No.Area)
ID.Cluster<-c(1:No.Cluster)
ID.HH<-c(1:N)
# Construction of Population Data
ID.D=rep(c(1:Area),N.Area)
ID.EA.D=rep(ID.Cluster,N.Cluster)
if (Model=="Normal") pop.data<-data.frame(ID.D,ID.EA.D,ID.HH,y.ijk)
if (Model=="Log-Normal") pop.data<-data.frame(ID.D,ID.EA.D,ID.HH,y.ijk,X)
# Main population census
return(pop.data)
}
# -------------------------------------------------------------------------------#
Sample<-function(Pop.Data,Cluster.Area.s,HH.Cluster.s){
# This function darw sample in two-stage for every area
# Pop.Data<-data.frame(Area ID, Cluster ID, HH ID, Y.ijk)
# Cluster.Area.s<-Vector(Number of Sampled cluster per area)
# HH.Cluster.s<-No. of HHs per Cluster
# Selection of cluster area wise
ID.Cluster.s<-NULL
for (d in unique(Pop.Data[,1])){
ID.Cluster.s<-c(ID.Cluster.s,sample(unique(Pop.Data[,2][Pop.Data[,1]==d]),Cluster.Area.s[d]))
}
# Selection of individuals cluster wise
ID.HH.s<-NULL
for (c in ID.Cluster.s){
ID.HH.s<-c(ID.HH.s,sample(unique(Pop.Data[,3][Pop.Data[,2]==c]),HH.Cluster.s))
}
# Selection of the sampling units from the population
data.s<-Pop.Data[ID.HH.s,] ; dim(data.s)
row.names(data.s) <- NULL
return(data.s)
}
# -------------------------------------------------------------------------------#
# Estimation of Variance component under a three-level null model
# -------------------------------------------------------------------------------#
mme<-function(Sample.Data){
# Function for Estimating Variance Component: Method of Moments --------------------------------------------
# Parameters
n.3<-tapply(Sample.Data[,3],Sample.Data[,1],length)
n.2<-tapply(Sample.Data[,3],Sample.Data[,2],length)
n<-sum(n.3)
D<-length(unique(Sample.Data[,1]))
C<-length(unique(Sample.Data[,2]))
n0.bar.2<-sum(n.2^2)/n
n0.bar.3<-sum(n.3^2)/n
# Area wise calculation
n.0i<-NULL # sum(n.2^2)/sum(n.2)
for (d in 1:D){
n.i.j<-tapply(Sample.Data[,3][Sample.Data[,1]==d],Sample.Data[,2][Sample.Data[,1]==d],length)
n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
}
sum.n.0i<-sum(n.0i)
# Means
mean.s<-mean(Sample.Data[,4])
cluster.mean.s<-tapply(Sample.Data[,4],Sample.Data[,2],mean)
area.mean.s<-tapply(Sample.Data[,4],Sample.Data[,1],mean)
# Variances
s1.3<-var(Sample.Data[,4])
s2.3<-sum(as.vector(n.2)*(as.vector(cluster.mean.s)-mean.s)^2)/(C-1)
s3.3<-sum(as.vector(n.3)*(as.vector(area.mean.s)-mean.s)^2)/(D-1)
# variance component under three-level model
lemda.1.3<-(n-1)*s1.3/(n-C)-(C-1)*s2.3/(n-C)
lemda.2.3<-(-(n-1)*(C-D)*s1.3+(C-1)*(n-D)*s2.3-(D-1)*(n-C)*s3.3)/((n-sum.n.0i)*(n-C))
lemda.3.3<-((n-1)*((C-1)*(sum.n.0i-n0.bar.2)-(D-1)*(n-n0.bar.2))*s1.3+
(C-1)*((D-1)*(n-n0.bar.2)-(n-1)*(sum.n.0i-n0.bar.2))*s2.3+
(D-1)*(n-n0.bar.2)*(n-C)*s3.3)/((n-n0.bar.3)*(n-sum.n.0i)*(n-C))
lemda.3.3[lemda.3.3<0]=0
est.lemda.3<-c(lemda.1.3,lemda.2.3,lemda.3.3)
# variance component under two-level model
lemda.1.2<-(n-1)/(n-C)*s1.3-(C-1)/(n-C)*s2.3
lemda.2.2<-(n-1)*(C-1)/((n-n0.bar.2)*(n-C))*(-s1.3+s2.3)
est.lemda.2<-c(lemda.1.2,lemda.2.2)
result<-list(lemda.3=est.lemda.3,lemda.2=est.lemda.2,size.cluster=n.2,size.area=n.3,area=D,cluster=C,n.0i=n.0i)
# names(result)<-c("lemda.3","lemda.2","size.cluster","size.area","area","cluster","n.0i")
return(result)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.2<-function(level.2,level.1,res.2,res.1){
# Homoskedasticity at both levels
# level.2: ID number of level 2
# level.1: ID number of level 1
# res.2: Cluster level residuals (average of marginal residuals of respective cluster)
# res.1: HH level residuals (Marginal residuals of HHs / OLS residuals)
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.2<-as.vector(table(level.2))
C<-length(n.2)
ID.C<-unique(level.2)
n<-length(level.1)
n0.bar.2<-sum(n.2^2)/sum(n.2)
s1.e<-sum((res.1-mean(res.1))^2)/(n-1)
s2.e<-sum(n.2*(res.2-mean(res.1))^2)/(C-1)
sigma2.2<-max(((n-1)*(C-1))/((n-n0.bar.2)*(n-C))*(-s1.e+s2.e),0)
sigma2.1<-max((n-1)/(n-C)*s1.e-(C-1)/(n-C)*s2.e,0)
result<-list(sigma2.1=sigma2.1,sigma2.2=sigma2.2)
return(result)
}
# -------------------------------------------------------------------------------#
Var.Com.MM.3<-function(level.3,level.2,level.1,res.3,res.2,res.1){
# Homoskedasticity at all three levels
# level.3: ID number of level 3
# level.2: ID number of level 2
# level.1: ID number of level 1
# res.2: Cluster level residuals (average of marginal residuals of respective cluster)
# res.1: HH level residuals (Marginal residuals of HHs / OLS residuals)
level.3<-as.numeric(factor(level.3))
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.3<-as.vector(table(level.3))
n.2<-as.vector(table(level.2))
n<-length(level.1)
D<-length(n.3)
C<-length(n.2)
ID.D<-unique(level.3)
ID.C<-unique(level.2)
n0.bar.2<-sum(n.2^2)/sum(n.2)
n0.bar.3<-sum(n.3^2)/sum(n.3)
# Area wise calculation
n.0i<-NULL # sum(n.2^2)/sum(n.2)
for (d in 1:D){
n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
}
# n.0i<-NULL # sum(n.2^2)/sum(n.2)
# for (d in unique(level.3)){
# n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
# n.0i<-c(n.0i,sum(n.i.j^2)/sum(n.i.j))
# }
sum.n.0i<-sum(n.0i)
s1.e<-sum((res.1-mean(res.1))^2)/(n-1)
s2.e<-sum(n.2*(res.2-mean(res.1))^2)/(C-1)
s3.e<-sum(n.3*(res.3-mean(res.1))^2)/(D-1)
sigma2.1<-(n-1)*s1.e/(n-C)-(C-1)*s2.e/(n-C)
sigma2.2<-(-(n-1)*(C-D)*s1.e+(C-1)*(n-D)*s2.e-(D-1)*(n-C)*s3.e)/((n-sum.n.0i)*(n-C))
sigma2.3<-((n-1)*((C-1)*(sum.n.0i-n0.bar.2)-(D-1)*(n-n0.bar.2))*s1.e+
(C-1)*((D-1)*(n-n0.bar.2)-(n-1)*(sum.n.0i-n0.bar.2))*s2.e+
(D-1)*(n-n0.bar.2)*(n-C)*s3.e)/((n-n0.bar.3)*(n-sum.n.0i)*(n-C))
result<-list(sigma2.1=sigma2.1,sigma2.2=sigma2.2,sigma2.3=sigma2.3)
return(result)
}
# -------------------------------------------------------------------------------#
GLS.EST.2L<-function(level.2,level.1,sigma2.2,sigma2.1,x.matrix,y.s){
# level.3: Area Level ID
# level.2: Cluster Level ID
# level.1: HH Level ID
# sigma2.3, sigma2.2, sigma2.1 level specific VC (Homoskedastic here)
# Output: beta estimates with thier variance covariance matrix
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
library(Matrix)
n.2<-as.vector(table(level.2))
n<-sum(n.2)
number.cluster.s<-length(n.2)
if (length(sigma2.1)==1){
sampleList <- list()
for (i in 1:number.cluster.s) sampleList[[i]]<-diag(rep(sigma2.1,n.2[i]))+sigma2.2*matrix(1,n.2[i],n.2[i])
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls))
}
# -------------------------------------------------------------------------------#
GLS.EST.2L<-function(level.2,level.1,sigma2.2,sigma2.1,x.matrix,y.s){
# level.3: Area Level ID
# level.2: Cluster Level ID
# level.1: HH Level ID
# sigma2.3, sigma2.2, sigma2.1 level specific VC (Homoskedastic here)
# Output: beta estimates with thier variance covariance matrix
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
library(Matrix)
n.2<-as.vector(table(level.2))
n<-sum(n.2)
number.cluster.s<-length(n.2)
if (length(sigma2.1)==1){
sampleList <- list()
for (i in 1:number.cluster.s) sampleList[[i]]<-diag(rep(sigma2.1,n.2[i]))+sigma2.2*matrix(1,n.2[i],n.2[i])
}
if (length(sigma2.1)>1){
sampleList <- list()
j<-1
for (i in 1:number.cluster.s) {
sampleList[[i]]<-diag(sigma2.1[j:(j+n.2[i]-1)])+sigma2.2*matrix(1,n.2[i],n.2[i])
j<-j+n.2[i]
}
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls),V=V)
}
# -------------------------------------------------------------------------------#
GLS.EST.3L<-function(level.3,level.2,level.1,sigma2.3,sigma2.2,sigma2.1,x.matrix,y.s){
# level.3: Area Level ID
# level.2: Cluster Level ID
# level.1: HH Level ID
# sigma2.3, sigma2.2, sigma2.1 level specific VC (Homoskedastic here)
# Output: beta estimates with thier variance covariance matrix
library(Matrix)
level.3<-as.numeric(factor(level.3))
level.2<-as.numeric(factor(level.2))
level.1<-as.numeric(factor(level.1))
n.3<-as.vector(table(level.3))
n.2<-as.vector(table(level.2))
D<-length(n.3)
sampleList <- list()
for (d in 1:D){
n.i.j<-tapply(level.1[level.3==d],level.2[level.3==d],length)
n.i<-sum(n.i.j)
if (length(sigma2.1)==1) {
clusterlist<-list()
for (i in 1:length(n.i.j)) clusterlist[[i]]<-matrix(1,n.i.j[i],n.i.j[i])
sampleList[[d]]<-diag(rep(sigma2.1,n.i))+bdiag(clusterlist)*sigma2.2+sigma2.3*matrix(1,n.i,n.i)
}
if (length(sigma2.1)>1){
clusterlist<-list()
for (i in 1:length(n.i.j)) clusterlist[[i]]<-matrix(1,n.i.j[i],n.i.j[i])
sampleList[[d]]<-diag(sigma2.1[level.1[level.3==d]])+bdiag(clusterlist)*sigma2.2+sigma2.3*matrix(1,n.i,n.i)
}
}
V<-bdiag(sampleList)
inv.V<-solve(V,sparse=TRUE)
xtx<-(t(x.matrix)%*%inv.V%*%(x.matrix))
xty<-(t(x.matrix)%*%inv.V%*%(y.s))
beta.gls <- solve(xtx)%*%xty
vcov.beta.gls<- solve(t(x.matrix)%*%(x.matrix)) %*% (t(x.matrix)%*%V%*%(x.matrix)) %*% solve(t(x.matrix)%*%(x.matrix))
list(beta.gls=as.matrix(beta.gls),vcov.beta.gls=as.matrix(vcov.beta.gls),V=V)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U,t){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (length(t)==1){
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
}
if (length(t)>1){
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F11=F11)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.Mean<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
F11<-tapply(y.l,ID.D,mean)
list(F11=F11)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.FGT<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U,t){
# This function is for estimating FGT estimates
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector : It should be in original form
# y.l is in original scale
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l # z.l is in logarithm scale
y.l<-exp(z.l) # y.l is in original scale
if (length(t)==1){
F00<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))
F22<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))
}
if (length(t)>1){
F00<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F22<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F00=F00,F11=F11,F22=F22)
}
# -------------------------------------------------------------------------------#
Cons.ELL.PB.HM<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U,t){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(D,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.d)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.d)+eps.l
if (length(t)==1){
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
}
if (length(t)>1){
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F11=F11)
}
# -------------------------------------------------------------------------------#
Cons.ELL.PB.HM.Mean<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(D,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.d)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.d)+eps.l
F11<-tapply(y.l,ID.D,mean)
list(F11=F11)
}
# -------------------------------------------------------------------------------#
Cons.ELL.PB.HM.FGT<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U,t){
# This function is for estimating FGT estimates
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector : It should be in original form
# y.l is in original scale
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(D,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.d)+eps.l
if (! is.null(X.U)) z.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.d)+eps.l # z.l is in logarithm scale
y.l<-exp(z.l) # y.l is in original scale
if (length(t)==1){
F00<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))
F22<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))
}
if (length(t)>1){
F00<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F22<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F00=F00,F11=F11,F22=F22)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.3L<- function(beta,var.beta,var.com.1,var.com.2,var.com.3,ID.D,ID.C,X.U,t){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
u.l<-rnorm(D,0,sqrt(var.com.3))
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
if (length(t)==1){
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
}
if (length(t)>1){
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F11=F11)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.3L.Mean<- function(beta,var.beta,var.com.1,var.com.2,var.com.3,ID.D,ID.C,X.U){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
u.l<-rnorm(D,0,sqrt(var.com.3))
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
F11<-tapply(y.l,ID.D,mean)
list(F11=F11)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.3L.FGT<- function(beta,var.beta,var.com.1,var.com.2,var.com.3,ID.D,ID.C,X.U,t){
# This function is for estimating FGT estimates
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector : It should be in original form
# y.l is in original scale
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
u.l<-rnorm(D,0,sqrt(var.com.3))
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
y.l<-exp(z.l) # y.l is in original scale
if (length(t)==1){
F00<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))
F22<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))
}
if (length(t)>1){
F00<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F22<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F00=F00,F11=F11,F22=F22)
}
# -------------------------------------------------------------------------------#
MELL.PB.HM.2L<- function(beta,var.beta,var.com.1,var.com.2,NoClusterBlock,ID.D,ID.C,X.U,t){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
# var.com.2: Should be vector of length - number of Block
# NoClusterBlock: Should be a vector of length - number of Block
NoBlock<-length(var.com.2)
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
adj.eta.l<-list()
for(i in 1:NoBlock) adj.eta.l[[i]]<-rnorm(NoClusterBlock[i],0,sqrt(var.com.2[i]))
eta.l<-c(unlist(adj.eta.l))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (length(t)==1){
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
}
if (length(t)>1){
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F11=F11)
}
# -------------------------------------------------------------------------------#
MELL.PB.HM.2L.Mean<- function(beta,var.beta,var.com.1,var.com.2,NoClusterBlock,ID.D,ID.C,X.U){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
# var.com.2: Should be vector of length - number of Block
# NoClusterBlock: Should be a vector of length - number of Block
NoBlock<-length(var.com.2)
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
adj.eta.l<-list()
for(i in 1:NoBlock) adj.eta.l[[i]]<-rnorm(NoClusterBlock[i],0,sqrt(var.com.2[i]))
eta.l<-c(unlist(adj.eta.l))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) y.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) y.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
F11<-tapply(y.l,ID.D,mean)
list(F11=F11)
}
# -------------------------------------------------------------------------------#
MELL.PB.HM.2L.FGT<- function(beta,var.beta,var.com.1,var.com.2,NoClusterBlock,ID.D,ID.C,X.U,t){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
# var.com.2: Should be vector of length - number of Block
# NoClusterBlock: Should be a vector of length - number of Block
NoBlock<-length(var.com.2)
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
adj.eta.l<-list()
for(i in 1:NoBlock) adj.eta.l[[i]]<-rnorm(NoClusterBlock[i],0,sqrt(var.com.2[i]))
eta.l<-c(unlist(adj.eta.l))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
y.l<-exp(z.l) # y.l is in original scale
if (length(t)==1){
F00<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))
F11<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))
F22<-tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))
}
if (length(t)>1){
F00<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,0))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F11<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,1))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
F22<-array(matrix(simplify2array(tapply(y.l,ID.D,function (x) FGT.alpha(x,t,2))),nrow=length(unique(ID.D)),ncol=length(t),byrow=TRUE),
dim=c(1,length(unique(ID.D)),length(t)))
}
list(F00=F00,F11=F11,F22=F22)
}
# -------------------------------------------------------------------------------#
MCS.MELL<-function(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal","Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("MM","REML"),
Parameter=c("Mean","DF","FGT"),ELL.Method=c("ELL.2L","Opt.ELL.2L","Cons.ELL.2L","MELL1.2L","MELL2.2L","MELL3.2L","ELL.3L"),quant,NoSim,No.Boot){
# MCS.MELL : Monte Carlo Simulation
# MELL : Modified ELL
# t: the targeted Quantile (Single value or Vector)
# Block.ID: Strata/Block ID for the area
# Parallel Object Creation (Inner loop) ==============================================================
# f2 is for single Parameter (here Mean and DF for single quantile)
f2 <- function(obj1,obj2) {
z <- list(
F11.True=rbind(obj1$F11.True,obj2$F11.True)
)
}
# f2.array is for multiple Parameter (here DF for multiple quantile)
f2.array <- function(obj1,obj2) {
z <- list(
F11.True=abind(obj1$F11.True,obj2$F11.True,along=1)
)
}
# f2.FGT is for FGT at single quantile
f2.FGT <- function(obj1,obj2) {
# f2 is for single quantile
z <- list(
F11.FGT0=rbind(obj1$F11.FGT0,obj2$F11.FGT0),
F11.FGT1=rbind(obj1$F11.FGT1,obj2$F11.FGT1),
F11.FGT2=rbind(obj1$F11.FGT2,obj2$F11.FGT2)
)
}
# f2.array.FGT is for FGT at multiple quantiles
f2.array.FGT <- function(obj1,obj2) {
# f2.array is for multiple quantile
z <- list(
F11.FGT0=abind(obj1$F11.FGT0,obj2$F11.FGT0,along=1),
F11.FGT1=abind(obj1$F11.FGT1,obj2$F11.FGT1,along=1),
F11.FGT2=abind(obj1$F11.FGT2,obj2$F11.FGT2,along=1)
)
}
# Estimated variance components: 2-L & 3-L
est.lemda.3<-matrix(0,NoSim,3) ; est.lemda.2<-matrix(0,NoSim,2)
# Population Area Mean: True, ELL, Optimistic ELL, Conservative ELL, Adjusted ELL =====================
if (Parameter=="Mean"){
Mean.True<-matrix(0,NoSim,Area);
Mean.ELL<-matrix(0,NoSim,Area);
Mean.ELL.MSE<-matrix(0,NoSim,Area);
Mean.CR.I<-matrix(0,NoSim,Area)
# Mean.Opt.ELL<-matrix(0,NoSim,Area); Mean.Con.ELL<-matrix(0,NoSim,Area); Mean.Adj.ELL.1<-matrix(0,NoSim,Area); Mean.Adj.ELL.2<-matrix(0,NoSim,Area); Mean.Adj.ELL.3<-matrix(0,NoSim,Area)
# Mean.Opt.ELL.var<-matrix(0,NoSim,Area); Mean.Con.ELL.var<-matrix(0,NoSim,Area); Mean.Adj.ELL.1.var<-matrix(0,NoSim,Area); Mean.Adj.ELL.2.var<-matrix(0,NoSim,Area); Mean.Adj.ELL.3.var<-matrix(0,NoSim,Area)
}
# Population Area-specific DF & FGT: True, ELL, Optimistic ELL, Conservative ELL, Adjusted ELL ========
No.Q=length(quant)
if (Parameter=="DF"){
if (No.Q==1) {
DF.True<-array(0,dim=c(NoSim,No.Area))
DF.ELL<-array(0,dim=c(NoSim,No.Area))
DF.ELL.MSE<-array(0,dim=c(NoSim,No.Area))
DF.CR.I<-array(0,dim=c(NoSim,No.Area))
# Estimate of Population Parameter: FGT
# DF.Opt.ELL<-array(0,dim=c(NoSim,No.Area)) # DF.Con.ELL<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.1<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.2<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.3<-array(0,dim=c(NoSim,No.Area))
# Estimated Variance of Population Parameter: FGT
# DF.Opt.ELL.var<-array(0,dim=c(NoSim,No.Area)) # DF.Con.ELL.var<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.1.var<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.2.var<-array(0,dim=c(NoSim,No.Area)) # DF.Adj.ELL.3.var<-array(0,dim=c(NoSim,No.Area))
}
if (No.Q>1) {
# True Population Parameter
DF.True<-array(0,dim=c(NoSim,No.Area,No.Q))
DF.ELL<-array(0,dim=c(NoSim,No.Area,No.Q))
DF.ELL.MSE<-array(0,dim=c(NoSim,No.Area,No.Q))
DF.CR.I<-array(0,dim=c(NoSim,No.Area,No.Q))
# Estimate of Population Parameter: FGT
# DF.ELL<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Opt.ELL<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Con.ELL<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Adj.ELL.1<-array(0,dim=c(NoSim,No.Area,No.Q))
# DF.Adj.ELL.2<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Adj.ELL.3<-array(0,dim=c(NoSim,No.Area,No.Q))
# Estimated Variance of Population Parameter: FGT
# DF.ELL.var<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Opt.ELL.var<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Con.ELL.var<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Adj.ELL.1.var<-array(0,dim=c(NoSim,No.Area,No.Q))
# DF.Adj.ELL.2.var<-array(0,dim=c(NoSim,No.Area,No.Q)) # DF.Adj.ELL.3.var<-array(0,dim=c(NoSim,No.Area,No.Q))
}
}
if (Parameter=="FGT"){
if (No.Q==1) {
# True Population Parameter
F0.True<-array(0,dim=c(NoSim,No.Area))
F1.True<-array(0,dim=c(NoSim,No.Area))
F2.True<-array(0,dim=c(NoSim,No.Area))
F0.F11<-array(0,dim=c(NoSim,No.Area)) # ELL estimates FGT 0
F1.F11<-array(0,dim=c(NoSim,No.Area)) # ELL estimates FGT 1
F2.F11<-array(0,dim=c(NoSim,No.Area)) # ELL estimates FGT 2
F0.F11.MSE<-array(0,dim=c(NoSim,No.Area)) # MSE of estimated FGT 0
F1.F11.MSE<-array(0,dim=c(NoSim,No.Area)) # MSE of estimated FGT 1
F2.F11.MSE<-array(0,dim=c(NoSim,No.Area)) # MSE of estimated FGT 2
F0.CR.I<-array(0,dim=c(NoSim,No.Area)) # Coverage Indicator of 95% CI
F1.CR.I<-array(0,dim=c(NoSim,No.Area)) # Coverage Indicator of 95% CI
F2.CR.I<-array(0,dim=c(NoSim,No.Area)) # Coverage Indicator of 95% CI
}
if (No.Q>1) {
# True Population Parameter
F0.True<-array(0,dim=c(NoSim,No.Area,No.Q)) # True estimates: FGT 0
F1.True<-array(0,dim=c(NoSim,No.Area,No.Q)) # True estimates: FGT 1
F2.True<-array(0,dim=c(NoSim,No.Area,No.Q)) # True estimates: FGT 2
F0.F11<-array(0,dim=c(NoSim,No.Area,No.Q)) # ELL estimates FGT 0
F1.F11<-array(0,dim=c(NoSim,No.Area,No.Q)) # ELL estimates FGT 1
F2.F11<-array(0,dim=c(NoSim,No.Area,No.Q)) # ELL estimates FGT 2
F0.F11.MSE<-array(0,dim=c(NoSim,No.Area,No.Q)) # MSE of estimated FGT 0
F1.F11.MSE<-array(0,dim=c(NoSim,No.Area,No.Q)) # MSE of estimated FGT 1
F2.F11.MSE<-array(0,dim=c(NoSim,No.Area,No.Q)) # MSE of estimated FGT 2
F0.CR.I<-array(0,dim=c(NoSim,No.Area,No.Q)) # Coverage Indicator of 95% CI
F1.CR.I<-array(0,dim=c(NoSim,No.Area,No.Q)) # Coverage Indicator of 95% CI
F2.CR.I<-array(0,dim=c(NoSim,No.Area,No.Q)) # Coverage Indicator of 95% CI
}
}
# Store the significant test results: Variance component
test.results<-rep(0,NoSim)
# Number of block/Strata to split the whole population -------------------------------------------
NoBlock<-length(unique(Block.ID))
N<-sum(Cluster.Area*HH.Cluster)
# Simulation start from Here ===========================================================================
for(s in 1:NoSim){
cat(date(),"Iteration number",s,"starting","\n",fill=T)
if (Model==c("Normal")) X=NULL
if (Model==c("Log-Normal")) {
Sigma.Mu<-c(0.5,0.75)
Sigma2<-matrix(c(1.50,0.10,0.10,0.95),2,2,byrow=TRUE)
X<-rmvnorm(N,Sigma.Mu,Sigma2)
}
# Population and Sample generation -------------------------------------------------------------------------
#Pop.Data<-Population(Area,Cluster.Area,HH.Cluster,Mu,Sigma)
if (Model==c("Normal")) Pop.Data<-Population(Area,Cluster.Area,HH.Cluster,Mu,Sigma,X=NULL,Model=c("Normal"))
if (Model==c("Log-Normal")) Pop.Data<-Population(Area,Cluster.Area,HH.Cluster,Mu,Sigma,X,Model=c("Log-Normal"))
Sample.Data<-Sample(Pop.Data,Cluster.Area.s,HH.Cluster.s)
if (Model==c("Normal")) z<-quantile(Pop.Data$y.ijk,quant)
if (Model==c("Log-Normal") & Parameter==c("FGT")) z<-quantile(exp(Pop.Data$y.ijk),quant)
# Population & Sample parameters -------------------------------------------------------------------
No.Cluster<-length(unique(Pop.Data$ID.EA.D))
No.Cluster.s<-length(unique(Sample.Data$ID.EA.D))
N.Area<-tapply(Pop.Data$y.ijk,Pop.Data$ID.D,length)
N.Cluster<-tapply(Pop.Data$y.ijk,Pop.Data$ID.EA.D,length)
N<-sum(N.Area)
n<-length(Sample.Data$y.ijk)
n.i<-tapply(Sample.Data$ID.HH,Sample.Data$ID.D,length)
n.ij<-tapply(Sample.Data$ID.HH,Sample.Data$ID.EA.D,length)
if (is.null(X)) {
lm1<-lm(y.ijk~1,Sample.Data)
X.ols<-model.matrix(lm1)
}
if (!is.null(X)) {
lm1<-lm(y.ijk~X1+X2,Sample.Data)
X.ols<-model.matrix(lm1)
}
# Estimation of VC, regression parametrs and their var-cov matrix: MM ------------------------------
if (Var.Com=="MM") {
# Needs to be change ================#
# fit.mm<-mme(Sample.Data) # Only for mean model
# est.lemda.3[s,]<-fit.mm$lemda.3
# est.lemda.2[s,]<-fit.mm$lemda.2
Sample.Data$u.ch<-resid(lm1)
eta<-as.vector(tapply(Sample.Data$u.ch,Sample.Data$ID.D,mean))
u<-as.vector(tapply(Sample.Data$u.ch,Sample.Data$ID.EA.D,mean))
Sample.Data$eps<-Sample.Data$u.ch-rep(eta,n.i)-rep(u,n.ij) # No application latter
Sample.Data$y.hat<-fitted.values(lm1)
est.sigma2.2<-Var.Com.MM.2(Sample.Data$ID.EA.D,Sample.Data$ID.HH,u,Sample.Data$u.ch)
est.sigma2.3<-Var.Com.MM.3(Sample.Data$ID.D,Sample.Data$ID.EA.D,Sample.Data$ID.HH,eta,u,Sample.Data$u.ch)
est.lemda.2[s,]<-c(est.sigma2.2$sigma2.1,est.sigma2.2$sigma2.2)
est.lemda.3[s,]<-c(est.sigma2.3$sigma2.1,est.sigma2.3$sigma2.2,est.sigma2.3$sigma2.3)
gls.lm2<-GLS.EST.2L(Sample.Data$ID.EA.D,Sample.Data$ID.HH,est.sigma2.2$sigma2.2,est.sigma2.2$sigma2.1,x.matrix=X.ols,Sample.Data$y.ijk)
beta.gls.2<-gls.lm2$beta.gls
var.beta.gls.2<-gls.lm2$vcov.beta.gls
# I have to modify this
gls.lm3<-GLS.EST.3L(Sample.Data$ID.D,Sample.Data$ID.EA.D,Sample.Data$ID.HH,est.sigma2.3$sigma2.3,est.sigma2.3$sigma2.2,est.sigma2.3$sigma2.1,x.matrix=X.ols,Sample.Data$y.ijk)
beta.gls.3<-gls.lm3$beta.gls
var.beta.gls.3<-gls.lm3$vcov.beta.gls
# Need to check whether
}
# Estimation of VC, regression parametrs and their var-cov matrix: REML ------------------------------
if (Var.Com=="REML") {
if (Model=="Normal") fit.lme.2<-lme(fixed=y.ijk~1, random=~1|ID.EA.D,data=Sample.Data)
if (Model=="Log-Normal") fit.lme.2<-lme(fixed=y.ijk~X1+X2, random=~1|ID.EA.D,data=Sample.Data)
lemda.2.lme<-nlme::VarCorr(fit.lme.2)
est.lemda.2[s,]<-as.numeric(c(lemda.2.lme[2,1],lemda.2.lme[1,1]))
beta.gls.2<-fixed.effects(fit.lme.2)
var.beta.gls.2<-summary(fit.lme.2)$varFix
if (Model=="Normal") fit.lme.3<-lme(fixed=y.ijk~1, random=~1|ID.D/ID.EA.D,data=Sample.Data)
if (Model=="Log-Normal") fit.lme.3<-lme(fixed=y.ijk~X1+X2, random=~1|ID.D/ID.EA.D,data=Sample.Data)
lemda.3.lme<-nlme::VarCorr(fit.lme.3)
est.lemda.3[s,]<-as.numeric(c(lemda.3.lme[5,1],lemda.3.lme[4,1],lemda.3.lme[2,1]))
beta.gls.3<-fixed.effects(fit.lme.3)
var.beta.gls.3<-summary(fit.lme.3)$varFix
test.results[s]<-anova.lme(fit.lme.2,fit.lme.3)$ 'p-value'[2]
}
# names(summary(fit.lme.3)) # to see the objests under summary(lm.object)
# Block ID for every HHs
ID.Block.U<-rep(Block.ID,N.Area)
# Adjustment of the 2nd variance component under 2-L ------------------------------------------------
Ki.s<-matrix(0,Area,1) ## Adjustment factor by area from sample structure
Ki.P<-matrix(0,Area,1) ## Adjustment factor by area from population structure
for (d in 1:Area){
nij<-tapply(Sample.Data$ID.HH[Sample.Data$ID.D==d],Sample.Data$ID.EA.D[Sample.Data$ID.D==d],length)
Ki.s[d,]<-sum(nij)^2/sum(nij^2) ## Adjustment factor by area
Nij<-tapply(Pop.Data$ID.HH[Pop.Data$ID.D==d],Pop.Data$ID.EA.D[Pop.Data$ID.D==d],length)
Ki.P[d,]<-sum(Nij)^2/sum(Nij^2) ## Adjustment factor by area from population structure
}
# Blockwise parameter ------------------------------------------------------------------------
Ki.Block<-list(); for(i in 1:NoBlock) Ki.Block[[i]]<-Ki.P[unique(Pop.Data$ID.D[ID.Block.U==i])]
Cluster.B<-list(); for(i in 1:NoBlock) Cluster.B[[i]]<-unique(Pop.Data$ID.EA.D[ID.Block.U==i])
No.Cluster.B<-list(); for(i in 1:NoBlock) No.Cluster.B[[i]]<-length(unique(Pop.Data$ID.EA.D[ID.Block.U==i]))
N.B<-list(); for(i in 1:NoBlock) N.B[[i]]<-length(Pop.Data$ID.EA.D[ID.Block.U==i])
# Blockwise calculations by manually ---------------------------------------------------------
# for(i in 1:NoBlock) assign(paste("Ki.Block", i, sep = "."),Ki.P[unique(Pop.Data$ID.D[ID.Block.U==i])])
# Ki.Block.1<-Ki.P[01:15] ; Ki.Block.2<-Ki.P[16:30]; Ki.Block.3<-Ki.P[31:45]; Ki.Block.4<-Ki.P[46:60]; Ki.Block.5<-Ki.P[61:75]
# for(i in 1:NoBlock) assign(paste("Cluster.B", i, sep = ""),unique(Pop.Data$ID.EA.D[ID.Block.U==i]))
# Cluster.B1<-unique(Pop.Data$ID.EA.D[Pop.Data$ID.D<=15]); Cluster.B2<-unique(Pop.Data$ID.EA.D[Pop.Data$ID.D>=16 & Pop.Data$ID.D<=30]); Cluster.B3<-unique(Pop.Data$ID.EA.D[Pop.Data$ID.D>=31 & Pop.Data$ID.D<=45])
# Cluster.B4<-unique(Pop.Data$ID.EA.D[Pop.Data$ID.D>=46 & Pop.Data$ID.D<=60]); Cluster.B5<-unique(Pop.Data$ID.EA.D[Pop.Data$ID.D>=61 & Pop.Data$ID.D<=75])
# for(i in 1:NoBlock) assign(paste("No.Cluster.B", i, sep = ""),length(unique(Pop.Data$ID.EA.D[ID.Block.U==i])))
# No.Cluster.B1<-length(Cluster.B1); No.Cluster.B2<-length(Cluster.B2); No.Cluster.B3<-length(Cluster.B3); No.Cluster.B4<-length(Cluster.B4); No.Cluster.B5<-length(Cluster.B5)
# for(i in 1:NoBlock) assign(paste("N.B", i, sep = ""),length(Pop.Data$ID.EA.D[ID.Block.U==i]))
# N.B1<-length(Pop.Data[,2][Pop.Data$ID.D<=15]); N.B2<-length(Pop.Data[,2][Pop.Data$ID.D>=16 & Pop.Data$ID.D<=30]); N.B3<-length(Pop.Data[,2][Pop.Data$ID.D>=31 & Pop.Data$ID.D<=45])
# N.B4<-length(Pop.Data[,2][Pop.Data$ID.D>=46 & Pop.Data$ID.D<=60]); N.B5<-length(Pop.Data[,2][Pop.Data$ID.D>=61 & Pop.Data$ID.D<=75])
# All together in a single function: Cluster ID by Block; No. of Cluster by block; No. of HH by block
# for(i in 1:NoBlock){
# assign(paste("Ki.Block", i, sep = "."),Ki.P[unique(Pop.Data$ID.D[ID.Block.U==i])]) # Area-specific weight by block
# assign(paste("Cluster.B", i, sep = ""),unique(Pop.Data$ID.EA.D[ID.Block.U==i])) # Block specific cluster
# assign(paste("No.Cluster.B", i, sep = ""),length(unique(Pop.Data$ID.EA.D[ID.Block.U==i]))) # Block specific number of cluster
# assign(paste("N.B", i, sep = ""),length(Pop.Data$ID.EA.D[ID.Block.U==i])) # Block size
# }
# Adjustment factors --------------------------------------------------------------------------
# 1st adjustment factor
K1<-(est.lemda.3[s,2]+mean(Ki.s)*est.lemda.3[s,3])/est.lemda.2[s,][2]
adj.lemda2.2.K1<-K1*est.lemda.2[s,][2]
# 2nd adjustment factor
K2<-(est.lemda.3[s,2]+mean(Ki.P)*est.lemda.3[s,3])/est.lemda.2[s,][2]
adj.lemda2.2.K2<-K2*est.lemda.2[s,][2]
# 3rd adjustment factor: Block wise
K3<-list(); for(i in 1:NoBlock) K3[[i]]<-(est.lemda.3[s,2]+mean(Ki.Block[[i]])*est.lemda.3[s,3])/est.lemda.2[s,][2]
adj.lemda2.2.K3.i<-list(); for(i in 1:NoBlock) adj.lemda2.2.K3.i[[i]]<-K3[[i]]*est.lemda.2[s,][2]
# Manual calculation of the Weights ========================================================================================
# 3rd adjustment factor: Block wise
# K3.Block.1<-(est.lemda.3[s,2]+mean(Ki.Block.1)*est.lemda.3[s,3])/est.lemda.2[s,][2]
# adj.lemda2.2.K3.1<-K3.Block.1*est.lemda.2[s,][2]
# K3.Block.2<-(est.lemda.3[s,2]+mean(Ki.Block.2)*est.lemda.3[s,3])/est.lemda.2[s,][2]
# adj.lemda2.2.K3.2<-K3.Block.2*est.lemda.2[s,][2]
# K3.Block.3<-(est.lemda.3[s,2]+mean(Ki.Block.3)*est.lemda.3[s,3])/est.lemda.2[s,][2]
# adj.lemda2.2.K3.3<-K3.Block.3*est.lemda.2[s,][2]
# K3.Block.4<-(est.lemda.3[s,2]+mean(Ki.Block.4)*est.lemda.3[s,3])/est.lemda.2[s,][2]
# adj.lemda2.2.K3.4<-K3.Block.4*est.lemda.2[s,][2]
# K3.Block.5<-(est.lemda.3[s,2]+mean(Ki.Block.5)*est.lemda.3[s,3])/est.lemda.2[s,][2]
# adj.lemda2.2.K3.5<-K3.Block.5*est.lemda.2[s,][2]
# adj.lemda2.2.K3<-mean(adj.lemda2.2.K3.1,adj.lemda2.2.K3.2,adj.lemda2.2.K3.3,adj.lemda2.2.K3.4,adj.lemda2.2.K3.5)
# Application of the ELL and MELL Functions ================================================================
if (Parameter=="Mean") {
if (ELL.Method=="ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="Opt.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True) }
}
if (ELL.Method=="Cons.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-Cons.ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL1.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL2.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL3.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-MELL.PB.HM.2L.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="ELL.3L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.3L.Mean(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X)$F11
list(F11.True=F11.True)}
}
# ELL.2L<-ELL.PB.HM.Mean(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# Opt.ELL.2L<-ELL.PB.HM.Mean(Opt.beta.gls.2,Opt.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# Cons.ELL.2L<-Cons.ELL.PB.HM.Mean(Cons.beta.gls.2,Cons.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# MELL1.2L<-ELL.PB.HM.Mean(beta.gls.k1,var.beta.gls.k1,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# MELL2.2L<-ELL.PB.HM.Mean(beta.gls.k2,var.beta.gls.k2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# MELL3.2L<-MELL.PB.HM.2L.Mean(beta.gls.k3,var.beta.gls.k3,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
# ELL.3L<-ELL.PB.HM.3L.Mean(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL)$F11
Mean.True[s,]<-tapply(Pop.Data$y.ijk,Pop.Data$ID.D,mean)
Mean.ELL[s,]<-colMeans(r.FGT$F11.True)
Mean.ELL.MSE[s,]<-apply(r.FGT$F11.True,2,sd)
F11.Q.02.5<-apply(r.FGT$F11.True,2,function(x) quantile(x,0.025,na.rm=TRUE))
F11.Q.97.5<-apply(r.FGT$F11.True,2,function(x) quantile(x,0.975,na.rm=TRUE))
Mean.CR.I[s,]<-(Mean.True[s,]>=F11.Q.02.5 & Mean.True[s,]<=F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
}
if (Parameter=="DF") {
if (No.Q==1) {
if (ELL.Method=="ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="Opt.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True) }
}
if (ELL.Method=="Cons.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-Cons.ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL1.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL2.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL3.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-MELL.PB.HM.2L(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="ELL.3L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
F11.True<-ELL.PB.HM.3L(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
DF.True[s,]<-tapply(Pop.Data$y.ijk,Pop.Data$ID.D,function(x) FGT.alpha(x,z,0))
DF.ELL[s,]<-colMeans(r.FGT$F11.True)
DF.ELL.MSE[s,]<-apply(r.FGT$F11.True,2,sd)
F11.Q.02.5<-apply(r.FGT$F11.True,2,function(x) quantile(x,0.025,na.rm=TRUE))
F11.Q.97.5<-apply(r.FGT$F11.True,2,function(x) quantile(x,0.975,na.rm=TRUE))
DF.CR.I[s,]<-(DF.True[s,]>=F11.Q.02.5 & DF.True[s,]<=F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
}
# ELL.2L<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
# Opt.ELL.2L<-ELL.PB.HM(Opt.beta.gls.2,Opt.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
# Con.ELL.2L<-Cons.ELL.PB.HM(Cons.beta.gls.2,Cons.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
# MELL1.2L<-ELL.PB.HM(beta.gls.k1,var.beta.gls.k1,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
# MELL2.2L<-ELL.PB.HM(beta.gls.k2,var.beta.gls.k2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
# MELL3.2L<-MELL.PB.HM.2L(beta.gls.k3,var.beta.gls.k3,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)$F11
# ELL.3L<-ELL.PB.HM.3L(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U==NULL,z)
if (No.Q>1) {
if (ELL.Method=="ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="Opt.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True) }
}
if (ELL.Method=="Cons.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-Cons.ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL1.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL2.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-ELL.PB.HM(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="MELL3.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-MELL.PB.HM.2L(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
if (ELL.Method=="ELL.3L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array) %dopar% {
F11.True<-ELL.PB.HM.3L(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)$F11
list(F11.True=F11.True)}
}
DF.True[s,,]<-array(matrix(simplify2array(tapply(Pop.Data$y.ijk,Pop.Data$ID.D,function (x) FGT.alpha(x,z,0))),nrow=No.Area,ncol=length(z),byrow=TRUE),dim=c(1,No.Area,length(z))) # True estimates: FGT 0
DF.ELL[s,,]<-colMeans(r.FGT$F11.True,dims=1)
DF.ELL.MSE[s,,]<-apply(r.FGT$F11.True,c(2,3),sd)
F11.Q.02.5<-apply(r.FGT$F11.True,c(2,3),function(x) quantile(x,0.025,na.rm=TRUE))
F11.Q.97.5<-apply(r.FGT$F11.True,c(2,3),function(x) quantile(x,0.975,na.rm=TRUE))
DF.CR.I[s,,]<-(DF.True[s,,]>=F11.Q.02.5 & DF.True[s,,]<=F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
}
}
if (Parameter=="FGT") {
if (No.Q==1) {
if (ELL.Method=="ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="Opt.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="Cons.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-Cons.ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL1.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL2.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL3.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-MELL.PB.HM.2L.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="ELL.3L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.FGT) %dopar% {
F11.True<-ELL.PB.HM.3L.FGT(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
F0.True[s,]<-tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function(x) FGT.alpha(x,z,0))
F1.True[s,]<-tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function(x) FGT.alpha(x,z,1))
F2.True[s,]<-tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function(x) FGT.alpha(x,z,2))
F0.F11[s,]<-colMeans(r.FGT$F11.FGT0) ; F0.F11.MSE[s,]<-apply(r.FGT$F11.FGT0,2,sd)
F0.F11.Q.02.5<-apply(r.FGT$F11.FGT0,2,function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$F11.FGT0,2,function(x) quantile(x,0.975,na.rm=TRUE))
F0.CR.I[s,]<-(F0.True[s,]>=F0.F11.Q.02.5 & F0.True[s,]<=F0.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
F1.F11[s,]<-colMeans(r.FGT$F11.FGT1) ; F1.F11.MSE[s,]<-apply(r.FGT$F11.FGT1,2,sd)
F1.F11.Q.02.5<-apply(r.FGT$F11.FGT1,2,function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$F11.FGT1,2,function(x) quantile(x,0.975,na.rm=TRUE))
F1.CR.I[s,]<-(F1.True[s,]>=F1.F11.Q.02.5 & F1.True[s,]<=F1.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
F2.F11[s,]<-colMeans(r.FGT$F11.FGT2) ; F2.F11.MSE[s,]<-apply(r.FGT$F11.FGT2,2,sd)
F2.F11.Q.02.5<-apply(r.FGT$F11.FGT2,2,function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$F11.FGT2,2,function(x) quantile(x,0.975,na.rm=TRUE))
F2.CR.I[s,]<-(F2.True[s,]>=F2.F11.Q.02.5 & F2.True[s,]<=F2.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
}
# ELL.2L<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
# Opt.ELL.2L<-ELL.PB.HM.FGT(Opt.beta.gls.2,Opt.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
# Con.ELL.2L<-Cons.ELL.PB.HM.FGT(Cons.beta.gls.2,Cons.var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
# MELL1.2L<-ELL.PB.HM.FGT(beta.gls.k1,var.beta.gls.k1,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
# MELL2.2L<-ELL.PB.HM.FGT(beta.gls.k2,var.beta.gls.k2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
# MELL3.2L<-MELL.PB.HM.2L.FGT(beta.gls.k3,var.beta.gls.k3,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)$F11
# ELL.3L<-ELL.PB.HM.3L.FGT(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X==NULL,z)
if (No.Q>1) {
if (ELL.Method=="ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=est.lemda.2[s,2],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="Opt.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="Cons.ELL.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-Cons.ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=(est.lemda.3[s,2]+est.lemda.3[s,3]),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL1.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K1,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL2.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-ELL.PB.HM.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=adj.lemda2.2.K2,Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="MELL3.2L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-MELL.PB.HM.2L.FGT(beta.gls.2,var.beta.gls.2,var.com.1=est.lemda.2[s,1],var.com.2=c(unlist(adj.lemda2.2.K3.i)),NoClusterBlock=c(unlist(No.Cluster.B)),Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
if (ELL.Method=="ELL.3L") {
r.FGT <- foreach(icount(No.Boot), .combine=f2.array.FGT) %dopar% {
F11.True<-ELL.PB.HM.3L.FGT(beta.gls.3,var.beta.gls.3,var.com.1=est.lemda.3[s,1],var.com.2=est.lemda.3[s,2],var.com.3=est.lemda.3[s,3],Pop.Data$ID.D,Pop.Data$ID.EA.D,X.U=X,z)
F11.FGT0<-F11.True$F00; F11.FGT1<-F11.True$F11; F11.FGT2<-F11.True$F22
F11.True<-NULL
list(F11.FGT0=F11.FGT0,F11.FGT1=F11.FGT1,F11.FGT2=F11.FGT2)
}
}
F0.True[s,,]<-array(matrix(simplify2array(tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function (x) FGT.alpha(x,z,0))),nrow=No.Area,ncol=length(z),byrow=TRUE),dim=c(1,No.Area,length(z))) # True estimates: FGT 0
F1.True[s,,]<-array(matrix(simplify2array(tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function (x) FGT.alpha(x,z,1))),nrow=No.Area,ncol=length(z),byrow=TRUE),dim=c(1,No.Area,length(z))) # True estimates: FGT 1
F2.True[s,,]<-array(matrix(simplify2array(tapply(exp(Pop.Data$y.ijk),Pop.Data$ID.D,function (x) FGT.alpha(x,z,2))),nrow=No.Area,ncol=length(z),byrow=TRUE),dim=c(1,No.Area,length(z))) # True estimates: FGT 2
F0.F11[s,,]<-colMeans(r.FGT$F11.FGT0,dims=1) ; F0.F11.MSE[s,,]<-apply(r.FGT$F11.FGT0,c(2,3),sd)
F0.F11.Q.02.5<-apply(r.FGT$F11.FGT0,c(2,3),function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$F11.FGT0,c(2,3),function(x) quantile(x,0.975,na.rm=TRUE))
F0.CR.I[s,,]<-(F0.True[s,,]>=F0.F11.Q.02.5 & F0.True[s,,]<=F0.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
F1.F11[s,,]<-colMeans(r.FGT$F11.FGT1,dims=1) ; F1.F11.MSE[s,,]<-apply(r.FGT$F11.FGT1,c(2,3),sd)
F1.F11.Q.02.5<-apply(r.FGT$F11.FGT1,c(2,3),function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$F11.FGT1,c(2,3),function(x) quantile(x,0.975,na.rm=TRUE))
F1.CR.I[s,,]<-(F1.True[s,,]>=F1.F11.Q.02.5 & F1.True[s,,]<=F1.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
F2.F11[s,,]<-colMeans(r.FGT$F11.FGT2,dims=1) ; F2.F11.MSE[s,,]<-apply(r.FGT$F11.FGT2,c(2,3),sd)
F2.F11.Q.02.5<-apply(r.FGT$F11.FGT2,c(2,3),function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$F11.FGT2,c(2,3),function(x) quantile(x,0.975,na.rm=TRUE))
F2.CR.I[s,,]<-(F2.True[s,,]>=F2.F11.Q.02.5 & F2.True[s,,]<=F2.F11.Q.97.5)*1 # Converting the logical matrix into numerical matrix
}
}
} # Simulation End
if (Parameter==c("Mean")) Results<-list(Sigma2.HM.3l=est.lemda.3,Sigma2.HM.2l=est.lemda.2,test.results=test.results,Mean.True=Mean.True, Mean.ELL=Mean.ELL, Mean.ELL.MSE=Mean.ELL.MSE,Mean.CR.I=Mean.CR.I)
if (Parameter==c("DF")) Results<-list(Sigma2.HM.3l=est.lemda.3,Sigma2.HM.2l=est.lemda.2,test.results=test.results,DF.True=DF.True, DF.ELL=DF.ELL, DF.ELL.MSE=DF.ELL.MSE,DF.CR.I=DF.CR.I)
if (Parameter==c("FGT")) Results<-list(Sigma2.HM.3l=est.lemda.3,Sigma2.HM.2l=est.lemda.2,test.results=test.results,F0.True=F0.True,F1.True=F1.True,F2.True=F2.True,
F0.F11=F0.F11,F0.F11.MSE=F0.F11.MSE,F0.CR.I=F0.CR.I,F1.F11=F1.F11,F1.F11.MSE=F1.F11.MSE,F1.CR.I=F1.CR.I,F2.F11=F2.F11,F2.F11.MSE=F2.F11.MSE,F2.CR.I=F2.CR.I)
return(Results )
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.2L.FGT.Estimator<- function(beta,var.beta,var.com.1,var.com.2,ID.D,ID.C,X.U,t,HH.Size,No.Boot){
# Validation
#beta<-beta.gls.2;var.beta<-var.beta.gls.2;var.com.1<-est.sigma2.2$sigma2.1;var.com.2<-est.sigma2.2$sigma2.2;
#ID.D<-Census.data.fitted$ID.UPZ;ID.C<-Census.data.fitted$psu;
#X.U<-x.matrix.U; t<-Census.data.fitted$lpovln;HH.Size<-Census.data.fitted$HH.size.U
# This function is for estimating FGT estimates under ELL HM
# Considering HH Size, No HH size put HH.Size=NULL
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector : It should be in original form
# ID.D, ID.C,X.U,t,HH.Size all correspond to HH-level information
# All variables are ordered according to ID.D
f2 <- function(obj1,obj2) {
# f2 is for single quantile
z <- list(
FGT0=rbind(obj1$FGT0,obj2$FGT0),
FGT1=rbind(obj1$FGT1,obj2$FGT1),
FGT2=rbind(obj1$FGT2,obj2$FGT2)
)
}
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
N.d<-as.vector(table(ID.D))
C<-length(unique(ID.C))
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-as.matrix(cbind(rep(1,N),X.U))%*%t(beta.l)+rep(eta.l,N.c)+eps.l # z.l is in logarithm scale
y.l<-exp(z.l) # y.l is in original scale
if (is.null(HH.Size)) {
index.0<-ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,mean)
FGT1<-tapply(index.1,ID.D,mean)
FGT2<-tapply(index.2,ID.D,mean)
}
if (! is.null(HH.Size)) {
index.0<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT1<-tapply(index.1,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT2<-tapply(index.2,ID.D,sum)/tapply(HH.Size,ID.D,sum)
}
list(FGT0=FGT0,FGT1=FGT1,FGT2=FGT2)
}
F0.F11<-colMeans(r.FGT$FGT0)
F0.F11.MSE<-apply(r.FGT$FGT0,2,sd)
F0.F11.Q.02.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.975,na.rm=TRUE))
F1.F11<-colMeans(r.FGT$FGT1)
F1.F11.MSE<-apply(r.FGT$FGT1,2,sd)
F1.F11.Q.02.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.975,na.rm=TRUE))
F2.F11<-colMeans(r.FGT$FGT2)
F2.F11.MSE<-apply(r.FGT$FGT2,2,sd)
F2.F11.Q.02.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.975,na.rm=TRUE))
list(Area.ID=unique(ID.D),
F0.F11=F0.F11,F0.F11.MSE=F0.F11.MSE,F0.F11.Q.02.5=F0.F11.Q.02.5,F0.F11.Q.97.5=F0.F11.Q.97.5,
F1.F11=F1.F11,F1.F11.MSE=F1.F11.MSE,F1.F11.Q.02.5=F1.F11.Q.02.5,F1.F11.Q.97.5=F1.F11.Q.97.5,
F2.F11=F2.F11,F2.F11.MSE=F2.F11.MSE,F2.F11.Q.02.5=F2.F11.Q.02.5,F2.F11.Q.97.5=F2.F11.Q.97.5)
}
# -------------------------------------------------------------------------------#
ELL.PB.HM.3L.FGT.Estimator<- function(beta,var.beta,var.com.1,var.com.2,var.com.3,ID.D,ID.C,X.U,t,HH.Size,No.Boot){
# This function is for estimating FGT estimates
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector : It should be in original form
# y.l is in original scale
f2 <- function(obj1,obj2) {
# f2 is for single quantile
z <- list(
FGT0=rbind(obj1$FGT0,obj2$FGT0),
FGT1=rbind(obj1$FGT1,obj2$FGT1),
FGT2=rbind(obj1$FGT2,obj2$FGT2)
)
}
N<-length(ID.D)
N.c<-as.vector(table(ID.C))
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))
D<-length(unique(ID.D))
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
u.l<-rnorm(D,0,sqrt(var.com.3))
eta.l<-rnorm(C,0,sqrt(var.com.2))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-as.matrix(cbind(rep(1,N),X.U))%*%t(beta.l)+rep(u.l,N.d)+rep(eta.l,N.c)+eps.l # z.l is in logarithm scale
y.l<-exp(z.l) # y.l is in original scale
if (is.null(HH.Size)) {
index.0<-ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,mean)
FGT1<-tapply(index.1,ID.D,mean)
FGT2<-tapply(index.2,ID.D,mean)
}
if (! is.null(HH.Size)) {
index.0<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT1<-tapply(index.1,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT2<-tapply(index.2,ID.D,sum)/tapply(HH.Size,ID.D,sum)
}
list(FGT0=FGT0,FGT1=FGT1,FGT2=FGT2)
}
F0.F11<-colMeans(r.FGT$FGT0)
F0.F11.MSE<-apply(r.FGT$FGT0,2,sd)
F0.F11.Q.02.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.975,na.rm=TRUE))
F1.F11<-colMeans(r.FGT$FGT1)
F1.F11.MSE<-apply(r.FGT$FGT1,2,sd)
F1.F11.Q.02.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.975,na.rm=TRUE))
F2.F11<-colMeans(r.FGT$FGT2)
F2.F11.MSE<-apply(r.FGT$FGT2,2,sd)
F2.F11.Q.02.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.975,na.rm=TRUE))
list(F0.F11=F0.F11,F0.F11.MSE=F0.F11.MSE,F0.F11.Q.02.5=F0.F11.Q.02.5,F0.F11.Q.97.5=F0.F11.Q.97.5,
F1.F11=F1.F11,F1.F11.MSE=F1.F11.MSE,F1.F11.Q.02.5=F1.F11.Q.02.5,F1.F11.Q.97.5=F1.F11.Q.97.5,
F2.F11=F2.F11,F2.F11.MSE=F2.F11.MSE,F2.F11.Q.02.5=F2.F11.Q.02.5,F2.F11.Q.97.5=F2.F11.Q.97.5)
}
# -------------------------------------------------------------------------------#
MELL.PB.HM.2L.FGT.Estimator<- function(beta,var.beta,var.com.1,Mod.var.com.2,NoClusterBlock,ID.D,ID.C,X.U,t,HH.Size,No.Boot){
# This function is for estimating Distribution Function
# Basic ELL Parametric method considering Homoskedastic Variance components
# t is a value or a vector
# Mod.var.com.2: Should be vector of length - number of Block
# NoClusterBlock: Should be a vector of length - number of Block
f2 <- function(obj1,obj2) {
# f2 is for single quantile
z <- list(
FGT0=rbind(obj1$FGT0,obj2$FGT0),
FGT1=rbind(obj1$FGT1,obj2$FGT1),
FGT2=rbind(obj1$FGT2,obj2$FGT2)
)
}
NoBlock<-length(Mod.var.com.2)
N<-length(ID.D)
# N.c<-as.vector(table(ID.C))
# C<-length(unique(ID.C))
N.c<-as.vector(table(ID.C))[unique(ID.C)]
C<-length(unique(ID.C))
N.d<-as.vector(table(ID.D))[unique(ID.D)]
D<-length(unique(ID.D))
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
adj.eta.l<-list()
for(i in 1:NoBlock) adj.eta.l[[i]]<-rnorm(NoClusterBlock[i],0,sqrt(Mod.var.com.2[i]))
eta.l<-c(unlist(adj.eta.l))
eps.l<-rnorm(N,0,sqrt(var.com.1))
if (is.null(X.U)) z.l<-cbind(rep(1,N))%*%t(beta.l)+rep(eta.l,N.c)+eps.l
if (! is.null(X.U)) z.l<-as.matrix(cbind(rep(1,N),X.U))%*%t(beta.l)+rep(eta.l,N.c)+eps.l # z.l is in logarithm scale
y.l<-exp(z.l) # y.l is in original scale
if (is.null(HH.Size)) {
index.0<-ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,mean)
FGT1<-tapply(index.1,ID.D,mean)
FGT2<-tapply(index.2,ID.D,mean)
}
if (! is.null(HH.Size)) {
index.0<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT1<-tapply(index.1,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT2<-tapply(index.2,ID.D,sum)/tapply(HH.Size,ID.D,sum)
}
list(FGT0=FGT0,FGT1=FGT1,FGT2=FGT2)
}
F0.F11<-colMeans(r.FGT$FGT0)
F0.F11.MSE<-apply(r.FGT$FGT0,2,sd)
F0.F11.Q.02.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.975,na.rm=TRUE))
F1.F11<-colMeans(r.FGT$FGT1)
F1.F11.MSE<-apply(r.FGT$FGT1,2,sd)
F1.F11.Q.02.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.975,na.rm=TRUE))
F2.F11<-colMeans(r.FGT$FGT2)
F2.F11.MSE<-apply(r.FGT$FGT2,2,sd)
F2.F11.Q.02.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.975,na.rm=TRUE))
list(F0.F11=F0.F11,F0.F11.MSE=F0.F11.MSE,F0.F11.Q.02.5=F0.F11.Q.02.5,F0.F11.Q.97.5=F0.F11.Q.97.5,
F1.F11=F1.F11,F1.F11.MSE=F1.F11.MSE,F1.F11.Q.02.5=F1.F11.Q.02.5,F1.F11.Q.97.5=F1.F11.Q.97.5,
F2.F11=F2.F11,F2.F11.MSE=F2.F11.MSE,F2.F11.Q.02.5=F2.F11.Q.02.5,F2.F11.Q.97.5=F2.F11.Q.97.5)
}
# -------------------------------------------------------------------------------#
MELL.NPB.HM.2L.FGT.Estimator<- function(beta,var.beta,eta.s,eps.s,var.com.1,var.com.2,Mod.var.com.2,NoClusterBlock,ID.C.s,ID.D,ID.C,X.U,t,HH.Size,No.Boot,residual.1=c("unconditional","conditional")){
# Non-parametric Bootstrap procedure (ELL, 2002,2003)
# This function is for estimating Poverty Indicators
# Heteroskedastic error variances based on logistic function
# t is here a value but need to be a vector
# Conditional: Draw individual level effects for a population cluster from the sample cluster whose random effect is randomly drawn for the population cluster
# Unconditional: Draw without restriction of drawing cluster random effects
# eta.s should be ordered in ID.C.s
# eps.s and sig2.eps.s should be ordered in ID.C.s
# Sample and population data sets should be ordered by area and clusters
f2 <- function(obj1,obj2) {
# f2 is for single quantile
z <- list(
FGT0=rbind(obj1$FGT0,obj2$FGT0),
FGT1=rbind(obj1$FGT1,obj2$FGT1),
FGT2=rbind(obj1$FGT2,obj2$FGT2)
)
}
N<-length(ID.D)
N.d<-as.vector(table(ID.D))[unique(ID.D)]
N.c<-as.vector(table(ID.C))[unique(ID.C)]
C<-length(unique(ID.C))
# standardized the residuals using the individual specific residuals
# eps.std<-eps.s/sqrt(sig2.eps.s)-mean(eps.s/sqrt(sig2.eps.s))
scale.2<-sqrt(var.com.2/(sum(eta.s^2)/(length(eta.s)-1)))
eta.s.scl<-eta.s*scale.2
scale.1<-sqrt(var.com.1/(sum(eps.s^2)/(length(eps.s)-1)))
eps.s.scl<-eps.s*scale.1
# M2.eta.s<-u/sqrt(est.sigma2.2.HT)*sqrt(M2.est.sigma2.2)
NoBlock<-length(Mod.var.com.2)
r.FGT <- foreach(icount(No.Boot), .combine=f2) %dopar% {
beta.l<-mvtnorm::rmvnorm(1,beta,var.beta)
if (residual.1=="unconditional") {
adj.eta.l<-list()
#for(i in 1:NoBlock) adj.eta.l[[i]]<-sample(eta.s/sqrt(var.com.2)*sqrt(Mod.var.com.2[i]),NoClusterBlock[i],replace=TRUE)
for(i in 1:NoBlock) adj.eta.l[[i]]<-sample(eta.s.scl/sqrt(var.com.2)*sqrt(Mod.var.com.2[i]),NoClusterBlock[i],replace=TRUE)
eta.l<-c(unlist(adj.eta.l))
eps.l<-sample(eps.s.scl,N,replace=TRUE)
}
if (residual.1=="conditional") {
adj.eta.l<-list()
ID.C.sampling<-sample(c(1:length(unique(ID.C.s))),C,replace=TRUE)
eta.l.origin<-eta.s.scl[ID.C.sampling]
for(i in 1:NoBlock) adj.eta.l[[i]]<-sample(eta.l.origin/sqrt(var.com.2)*sqrt(Mod.var.com.2[i]),NoClusterBlock[i],replace=TRUE)
eta.l<-c(unlist(adj.eta.l))
ID.C.sampled<-unique(ID.C.s)[ID.C.sampling]
eps.l<-NULL
for (c in 1:C){
eps.U<-sample(eps.s.scl[ID.C.s==ID.C.sampled[c]],N.c[c],replace=TRUE)
eps.l<-c(eps.l,eps.U)
}
}
z.l<-cbind(rep(1,N),X.U)%*%t(beta.l)+rep(eta.l,N.c)+eps.l
y.l<-exp(z.l) # y.l is in original scale
if (is.null(HH.Size)) {
index.0<-ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,mean)
FGT1<-tapply(index.1,ID.D,mean)
FGT2<-tapply(index.2,ID.D,mean)
}
if (! is.null(HH.Size)) {
index.0<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^0
index.1<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^1
index.2<-HH.Size*ifelse(y.l<t,1,0)*((t-y.l)/t)^2
FGT0<-tapply(index.0,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT1<-tapply(index.1,ID.D,sum)/tapply(HH.Size,ID.D,sum)
FGT2<-tapply(index.2,ID.D,sum)/tapply(HH.Size,ID.D,sum)
}
list(FGT0=FGT0,FGT1=FGT1,FGT2=FGT2)
}
F0.F11<-colMeans(r.FGT$FGT0)
F0.F11.MSE<-apply(r.FGT$FGT0,2,sd)
F0.F11.Q.02.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.025,na.rm=TRUE))
F0.F11.Q.97.5<-apply(r.FGT$FGT0,2,function(x) quantile(x,0.975,na.rm=TRUE))
F1.F11<-colMeans(r.FGT$FGT1)
F1.F11.MSE<-apply(r.FGT$FGT1,2,sd)
F1.F11.Q.02.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.025,na.rm=TRUE))
F1.F11.Q.97.5<-apply(r.FGT$FGT1,2,function(x) quantile(x,0.975,na.rm=TRUE))
F2.F11<-colMeans(r.FGT$FGT2)
F2.F11.MSE<-apply(r.FGT$FGT2,2,sd)
F2.F11.Q.02.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.025,na.rm=TRUE))
F2.F11.Q.97.5<-apply(r.FGT$FGT2,2,function(x) quantile(x,0.975,na.rm=TRUE))
list(F0.F11=F0.F11,F0.F11.MSE=F0.F11.MSE,F0.F11.Q.02.5=F0.F11.Q.02.5,F0.F11.Q.97.5=F0.F11.Q.97.5,
F1.F11=F1.F11,F1.F11.MSE=F1.F11.MSE,F1.F11.Q.02.5=F1.F11.Q.02.5,F1.F11.Q.97.5=F1.F11.Q.97.5,
F2.F11=F2.F11,F2.F11.MSE=F2.F11.MSE,F2.F11.Q.02.5=F2.F11.Q.02.5,F2.F11.Q.97.5=F2.F11.Q.97.5)
}
# -------------------------------------------------------------------------------
simulation.Results.Robust.Work<-function(True.Value.object,Estimate.object,Estimate.object.MSE){
No.Area<-dim(True.Value.object)[2]
True.Area.Mean<-apply(True.Value.object,2,mean)
True.Area.Mean.MSE<-apply(True.Value.object,2,var)
Estimate.Mean<-apply(Estimate.object,2,mean)
Estimate.Mean.MSE<-apply(Estimate.object.MSE,2,mean)
RB.Estimate.Mean<-(Estimate.Mean-True.Area.Mean)/True.Area.Mean
RB.Estimate.Mean.MSE<-(Estimate.Mean.MSE-True.Area.Mean.MSE)/True.Area.Mean.MSE
Average.RB.Estimate.Mean<-mean(RB.Estimate.Mean)
Average.RB.Estimate.Mean.MSE<-mean(RB.Estimate.Mean.MSE)
ARB.Estimate.Mean<-abs(Estimate.Mean-True.Area.Mean)/True.Area.Mean
ARB.Estimate.Mean.MSE<-abs(Estimate.Mean.MSE-True.Area.Mean.MSE)/True.Area.Mean.MSE
Average.ARB.Estimate.Mean<-mean(ARB.Estimate.Mean)
Average.ARB.Estimate.Mean.MSE<-mean(ARB.Estimate.Mean.MSE)
RMSE.Estimate.Mean.MSE<-sqrt(colMeans((Estimate.object.MSE-matrix(True.Area.Mean.MSE,nrow=dim(Estimate.object.MSE)[1],ncol=dim(Estimate.object.MSE)[2],byrow=TRUE))^2))
RRMSE.Estimate.Mean.MSE<-sqrt(colMeans((Estimate.object.MSE-matrix(True.Area.Mean.MSE,nrow=dim(Estimate.object.MSE)[1],ncol=dim(Estimate.object.MSE)[2],byrow=TRUE))^2)/True.Area.Mean.MSE)
Average.RMSE.Estimate.Mean.MSE<-mean(RMSE.Estimate.Mean.MSE)
Average.RRMSE.Estimate.Mean.MSE<-mean(RRMSE.Estimate.Mean.MSE)
IND<-(True.Value.object>=(Estimate.object-1.96*sqrt(Estimate.object.MSE)) & True.Value.object<=(Estimate.object+1.96*sqrt(Estimate.object.MSE)))*1
# IND<-ifelse(True.Value.object>=(Estimate.object-1.96*sqrt(Estimate.object.MSE)) & True.Value.object<=(Estimate.object+1.96*sqrt(Estimate.object.MSE)),1,0)
CR<-apply(IND,2,mean)
Average.CR<-mean(CR)
CIW<-apply((Estimate.object+1.96*sqrt(Estimate.object.MSE))-(Estimate.object-1.96*sqrt(Estimate.object.MSE)),2,mean)
Average.CIW<-mean(CIW)
list(True.Area.Mean.MSE=True.Area.Mean.MSE,Estimate.Mean.MSE=Estimate.Mean.MSE,
RB.Estimate.Mean=RB.Estimate.Mean,Average.RB.Estimate.Mean=Average.RB.Estimate.Mean,
RB.Estimate.Mean.MSE=RB.Estimate.Mean.MSE,Average.RB.Estimate.Mean.MSE=Average.RB.Estimate.Mean.MSE,
ARB.Estimate.Mean=ARB.Estimate.Mean,Average.ARB.Estimate.Mean=Average.ARB.Estimate.Mean,
ARB.Estimate.Mean.MSE=ARB.Estimate.Mean.MSE,Average.ARB.Estimate.Mean.MSE=Average.ARB.Estimate.Mean.MSE,
RMSE.Estimate.Mean.MSE=RMSE.Estimate.Mean.MSE,Average.RMSE.Estimate.Mean.MSE=Average.RMSE.Estimate.Mean.MSE,RRMSE.Estimate.Mean.MSE=RRMSE.Estimate.Mean.MSE,
Average.RRMSE.Estimate.Mean.MSE=Average.RRMSE.Estimate.Mean.MSE,CR=CR,Average.CR=Average.CR,CIW=CIW,Average.CIW=Average.CIW)
}
# -------------------------------------------------------------------------------
# Simulation Study
# -------------------------------------------------------------------------------
# Population 1 : Three-Level mean model with Normal distributed random errors
# -------------------------------------------------------------------------------
Area=75
Cluster.Area=rep(c(15:29),each=5)
HH.Cluster=c(rep(c(96:100),3),rep(c(101:105),3),rep(c(106:110),3),rep(c(111:115),3),rep(c(116:120),3))
Mu=20
Sigma<-c(0.80,0.15,0.05)
No.Area<-Area
# -------------------------------------------------------------------------------#
Cluster.Area.s<-rep(c(2:4),25)
HH.Cluster.s<-10
D=c(1:Area)
quant<-c(0.10,0.25,0.50,0.75,0.90)
Block.ID<-rep(1:5,each=15)
# -------------------------------------------------------------------------------
# Simulation for Population 1: Three-level Normal Population - Area specific Mean
# -------------------------------------------------------------------------------
set.seed(1000)
Sce1.Mean.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.Opt.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("Opt.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.Cons.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("Cons.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.MELL1.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL1.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.MELL2.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL2.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.MELL3.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL3.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.Mean.ELL.3L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("ELL.3L"),quant,NoSim=5,No.Boot=5)
# -------------------------------------------------------------------------------
# Simulation for Population 1: Three-level Normal Population - Area-specific DF
# -------------------------------------------------------------------------------
set.seed(1000)
Sce1.DF.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.Opt.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("Opt.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.Cons.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("Cons.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.MELL1.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL1.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.MELL2.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL2.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.MELL3.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL3.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce1.DF.ELL.3L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("ELL.3L"),quant,NoSim=5,No.Boot=5)
# -------------------------------------------------------------------------------
# Population 2: Two-Level mean model with Normally distributed Random Errors
# -------------------------------------------------------------------------------
Area=75
Cluster.Area=rep(c(15:29),each=5)
HH.Cluster=c(rep(c(96:100),3),rep(c(101:105),3),rep(c(106:110),3),rep(c(111:115),3),rep(c(116:120),3))
Mu=20
Sigma<-c(0.80,0.20)
No.Area<-Area
# -------------------------------------------------------------------------------#
Cluster.Area.s<-rep(c(2:4),25)
HH.Cluster.s<-10
D=c(1:Area)
quant<-c(0.10,0.25,0.50,0.75,0.90)
# -------------------------------------------------------------------------------
# Simulation for Population 2: Two-level Normal Population - NULL Model - Area specific Mean
# -------------------------------------------------------------------------------
set.seed(1000)
Sce2.Mean.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.Opt.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("Opt.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.Cons.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("Cons.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.MELL1.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL1.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.MELL2.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL2.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.MELL3.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("MELL3.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.Mean.ELL.3L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("Mean"),ELL.Method=c("ELL.3L"),quant,NoSim=5,No.Boot=5)
# -------------------------------------------------------------------------------
# Simulation for Population 2: Two-level Normal Population - NULL MOdel - Area-specific DF
# -------------------------------------------------------------------------------
set.seed(1000)
Sce2.DF.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.Opt.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("Opt.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.Cons.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("Cons.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.MELL1.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL1.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.MELL2.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL2.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.MELL3.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("MELL3.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce2.DF.ELL.3L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("DF"),ELL.Method=c("ELL.3L"),quant,NoSim=5,No.Boot=5)
# -------------------------------------------------------------------------------
# Population 3 : Three Level full model with log-Normal Population
# -------------------------------------------------------------------------------
Area=75
Cluster.Area=rep(c(15:29),each=5)
HH.Cluster=c(rep(c(96:100),3),rep(c(101:105),3),rep(c(106:110),3),rep(c(111:115),3),rep(c(116:120),3))
Mu<-c(6,0.5,-0.55)
Sigma<-c(0.20,0.035,0.015)
#Sigma<-c(0.50,0.20,0.05)
N<-sum(Cluster.Area*HH.Cluster)
No.Area<-Area
# Generate Design Matrix (Fixed)
Sigma.Mu<-c(0.5,0.75)
Sigma2<-matrix(c(1.50,0.10,0.10,0.95),2,2,byrow=TRUE)
X<-rmvnorm(N,Sigma.Mu,Sigma2)
# -------------------------------------------------------------------------------#
Cluster.Area.s<-rep(c(2:4),25)
HH.Cluster.s<-10
D=c(1:Area)
quant<-c(0.10,0.25)
# -------------------------------------------------------------------------------
# Simulation for Population 3: Three-level log-normal Population - Area-specific FGT
# -------------------------------------------------------------------------------
set.seed(1000)
Sce3.FGT.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.Opt.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("Opt.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.Cons.ELL.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("Cons.ELL.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.MELL1.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("MELL1.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.MELL2.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("MELL2.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.MELL3.2L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("MELL3.2L"),quant,NoSim=5,No.Boot=5)
set.seed(1000)
Sce3.FGT.ELL.3L<-MCS.MELL(Area,Cluster.Area,HH.Cluster,Mu,Sigma,Model=c("Log-Normal"),Cluster.Area.s,HH.Cluster.s,Block.ID,Var.Com=c("REML"),
Parameter=c("FGT"),ELL.Method=c("ELL.3L"),quant,NoSim=5,No.Boot=5)
# -------------------------------------------------------------------------------
|
d0f849012f37ae1f41966ba887c34ef903480958 | 210d6be771cd1c18ed615bf5d6b6f22aa007180e | /R/bin_diff.R | 56b4406559161d799d11c567d6c39ea1cf9e8058 | [
"MIT"
] | permissive | BlishLab/scriabin | 7be4c911f918f723e2e12f1d00fe5bc28ef2d377 | adf16451c870d710b4a0152f55cb25a941bf15ee | refs/heads/main | 2023-07-13T00:20:22.379959 | 2023-06-23T21:12:21 | 2023-06-23T21:12:21 | 438,698,206 | 75 | 7 | NOASSERTION | 2023-08-25T15:04:32 | 2021-12-15T16:29:22 | R | UTF-8 | R | false | false | 10,710 | r | bin_diff.R |
#' Identify most perturbed bins across >2 samples
#'
#' @param seu A binned Seurat object with bin identities in the "bin" column of meta.data
#' @param interaction_graphs List of summarized interaction graphs built from `BuildPriorInteraction` or `BuiltWeightedInteraction`
#'
#' @return For each sender bin-receiver bin combination, returns a data.frame of Kruskal-Wallis p-value and test statistic
#' @export
#'
#' @examples
PerturbedBins <- function(seu, interaction_graphs = NULL) {
nbin = length(unique(seu$bins))
bin_combos <- expand.grid(1:nbin,1:nbin)
bins <- unique(seu$bins)
bin_pvals <- pblapply(seq_along(1:nrow(bin_combos)), function(x) {
sbin <- as.character(bin_combos[x,1])
rbin <- as.character(bin_combos[x,2])
scells <- colnames(seu)[seu$bins==sbin]
rcells <- colnames(seu)[seu$bins==rbin]
bin_ig <- reshape2::melt(lapply(interaction_graphs, function(x) {
as.vector(x[rownames(x) %in% scells,colnames(x) %in% rcells])
}))
res <- kruskal.test(value~L1, data = bin_ig)
return(c(res$p.value,
res$statistic,
nrow(bin_ig),
sd(aggregate(bin_ig$value,by = list(bin_ig$L1), FUN=mean)$x)))
})
return(bin_pvals)
}
#' Summary plots of perturbed bins
#'
#' @param seu A binned Seurat object with bin identities in the "bin" column of meta.data
#' @param bin_pvals Bin perturbation signifance data, ie. the output of `PerturbedBins`
#' @param cell.type.calls Meta.data slot column corresponding to cell type annotations for summarization
#'
#' @return Returns plots of bin-bin perturbation p-values, cross sample communication standard deviation, and summary by cell type or other meta.data of interest
#' @import ggplot2 dplyr cowplot scales
#' @export
#'
#' @examples
PerturbedBinSummary <- function(seu, bin_pvals, cell.type.calls = "celltype.l2") {
bin_pvals_p <- unlist(lapply(bin_pvals, function(x) {x[1]}))
bin_pvals_stat <- unlist(lapply(bin_pvals, function(x) {x[2]}))
bin_pvals_size <- unlist(lapply(bin_pvals, function(x) {x[3]}))
bin_pvals_sd <- unlist(lapply(bin_pvals, function(x) {x[4]}))
ggplot(data.frame(x = bin_pvals_size, y = -log(bin_pvals_p)), aes(x=x,y=y)) +
geom_bin2d(bins=100) + theme_cowplot() + scale_fill_gradient(trans="log", low = "blue", high = "red")
ggplot(data.frame(x = bin_pvals_size, y = -log(bin_pvals_p), color = bin_pvals_sd), aes(x=x,y=y,color=color)) +
geom_point(size=0) + theme_cowplot() + scale_color_gradient(trans="log", low = "blue", high = "red")
#Correct for number of bins as a function of cell type proportion
nbct <- as.data.frame(table(as.character(seu@meta.data[,cell.type.calls]),as.character(seu$bins)))
colnames(nbct) <- c("celltype","nb","freq")
nbct.f <- nbct %>% group_by(nb) %>% top_n(1,freq) %>% sample_n(1) %>% ungroup() %>% mutate_all(as.character)
expected_sizes <- as.data.frame(table(seu@meta.data[,cell.type.calls]))
colnames(expected_sizes) <- c("celltype","celln")
bin_types <- as.data.frame(table(nbct.f$celltype))
colnames(bin_types) <- c("celltype","binn")
expected_sizes <- merge(expected_sizes,bin_types, by = "celltype")
expected_sizes$corrected <- lm(binn~celln, data = expected_sizes)$fitted.values
expected_sizes$adjustment_factor <- expected_sizes$corrected/expected_sizes$binn
pbins_plot <- as.data.frame(table(sig_bin_combos$send_celltype,sig_bin_combos$rec_celltype)) %>% dplyr::filter(Freq>0)
colnames(pbins_plot) <- c("send_celltype","rec_celltype","Freq")
pbins_plot$send_adjustment <- scriabin::mapvalues(pbins_plot$send_celltype,
from = expected_sizes$celltype,
to = expected_sizes$adjustment_factor)
pbins_plot$rec_adjustment <- scriabin::mapvalues(pbins_plot$rec_celltype,
from = expected_sizes$celltype,
to = expected_sizes$adjustment_factor)
pbins_plot$freq_adjusted <- as.numeric(as.character(pbins_plot$Freq))*
as.numeric(as.character(pbins_plot$send_adjustment))*
as.numeric(as.character(pbins_plot$rec_adjustment))
ggplot(pbins_plot, aes(x = rec_celltype, y = send_celltype)) + geom_point(aes(size=freq_adjusted,color=freq_adjusted)) +
theme_cowplot() + ggpubr::rotate_x_text() +
scale_color_gradient(low = "lightgrey",high = "red", limits = c(10,100), oob = scales::squish) +
labs(x = "Receiving celltype", y = "Sender celltype", color = "Number of\nperturbed\nbin pairs") + guides(size=F)
}
#' Perform post-hoc analysis of perturbed bins
#'
#' @param seu A binned Seurat object with bin identities in the "bin" column of meta.data
#' @param bin_pvals Bin perturbation signifance data, ie. the output of `PerturbedBins`
#' @param interaction_graphs List of summarized interaction graphs built from `BuildPriorInteraction` or `BuiltWeightedInteraction`
#' @param split.by Meta.data column name indicating how data was split for interaction graph generation
#' @param cell.type.calls Meta.data slot column corresponding to cell type annotations for summarization
#' @param kw_p.value Bin-bin combinations with a KW p value above this threshold will be discarded. Default: 0.001
#' @param bin_sd.quantile Bin-bin combinations with a summarized interaction standard deviation below this quantile will be discarded. Ensures that bin-bin combinations displaying both statistical significance and effect size of perturbation are analyzed. Default: 0.9.
#'
#' @return Performs Dunn's Kruskal-Wallis multiple comparison post-hoc test to evaluate which samples within perturbed bins are significantly perturbed
#' @import pbapply FSA dplyr
#' @export
#'
#' @examples
BinPostHoc <- function(seu, bin_pvals, interaction_graphs,
split.by = "orig.ident", cell.type.calls = "celltype",
kw_p.value = 0.001, bin_sd.quantile = 0) {
nbin = length(unique(seu$bins))
bin_combos <- expand.grid(1:nbin,1:nbin)
bins <- unique(seu$bins)
bin_pvals_p <- unlist(lapply(bin_pvals, function(x) {x[1]}))
bin_pvals_stat <- unlist(lapply(bin_pvals, function(x) {x[2]}))
bin_pvals_size <- unlist(lapply(bin_pvals, function(x) {x[3]}))
bin_pvals_sd <- unlist(lapply(bin_pvals, function(x) {x[4]}))
#Perform a post-hoc test to identify which groups differ most in each significant bin
bin_combos$p_vals <- bin_pvals_p
bin_combos$kw_stat <- bin_pvals_stat
bin_combos$size <- bin_pvals_size
bin_combos$sd <- bin_pvals_sd
sig_bin_combos <- bin_combos %>%
dplyr::filter(p_vals<kw_p.value) %>%
dplyr::filter(sd>quantile(bin_combos$sd,bin_sd.quantile))
bin_posthoc <- pblapply(seq_along(1:nrow(sig_bin_combos)), function(x) {
sbin <- bins[sig_bin_combos[x,1]]
rbin <- bins[sig_bin_combos[x,2]]
scells <- colnames(seu)[seu$bins==sbin]
rcells <- colnames(seu)[seu$bins==rbin]
bin_ig <- lapply(interaction_graphs, function(x) {
as.vector(x[rownames(x) %in% scells,colnames(x) %in% rcells])
})
bin_ig.df <- data.frame(name = as.factor(unlist(lapply(seq_along(1:length(bin_ig)),
function(x) {rep(names(bin_ig)[x],lapply(bin_ig,length)[[x]])}))),
score=unlist(bin_ig))
p <- dunnTest(score~name, data = bin_ig.df, method = "bh")$res %>%
separate(Comparison, into = c("tp1","tp2"), sep = " - ") %>%
dplyr::filter(P.adj<0.05)
bin_means <- aggregate(bin_ig.df$score, by = list(bin_ig.df$name), FUN=mean)
# bin mean that is furthest from group mean (mean of all timepoints)
# bin_means$x <- abs(bin_means$x-mean(bin_means$x))
# bin mean that is furthest from baseline
bin_means$x <- abs(bin_means$x-bin_means[1,"x"])
# bin_means %<>% dplyr::filter(Group.1 %in% unique(c(p$tp1,p$tp2))) %>%
# top_n(1,x) %>% pull(Group.1)
return(as.character(bin_means %>% dplyr::filter(Group.1 %in% unique(c(p$tp1,p$tp2))) %>%
top_n(1,x) %>% pull(Group.1)))
})
sig_bin_combos$tp <- (bin_posthoc)
#some of these bin-bin combos will return zero statistically significant results between timepoints. Remove those.
sig_bin_combos <- sig_bin_combos[sig_bin_combos$tp %in% unique(seu@meta.data[,split.by]),]
#now let's transfer these bin numbers to cell type calls
sig_bin_combos$Var1 <- scriabin::mapvalues(sig_bin_combos$Var1, from = 1:length(bins), to = bins,warn_missing = F)
sig_bin_combos$Var2 <- scriabin::mapvalues(sig_bin_combos$Var2, from = 1:length(bins), to = bins,warn_missing = F)
nbct <- as.data.frame(table(as.character(seu@meta.data[,cell.type.calls]),as.character(seu$bins)))
colnames(nbct) <- c("celltype","nb","freq")
nbct.f <- nbct %>% group_by(nb) %>% top_n(1,freq) %>% sample_n(1) %>% ungroup() %>% mutate_all(as.character)
sig_bin_combos$send_celltype <- as.character(scriabin::mapvalues(sig_bin_combos$Var1,
from = nbct.f$nb,
to = nbct.f$celltype, warn_missing = F))
sig_bin_combos$rec_celltype <- as.character(scriabin::mapvalues(sig_bin_combos$Var2,
from = nbct.f$nb, to = nbct.f$celltype,
warn_missing = F))
return(sig_bin_combos)
}
#' Assign quality scores to each bin
#'
#' @param seu A binned Seurat object with bin identities in the "bin" column of meta.data
#' @param celltype.calls Name of meta.data column containing the cell type labels to use for connectivity testing. Must be a finer (more granular) cell type label than used for coarse_cell_types in `BinDatasets`.
#' @param split.by Meta.data column name indicating how data was split for interaction graph generation
#'
#' @return A named numeric vector of bin quality scores
#' @import pbapply dplyr
#' @export
#'
#' @examples
ScoreBins <- function(seu, celltype.calls = NULL, split.by = NULL) {
bins <- as.character(unique(seu$bins))
scores <- unlist(pblapply(1:length(bins), function(x) {
meta <- seu@meta.data[seu$bins==bins[x],c(celltype.calls,"bins",split.by)]
colnames(meta) <- c("celltype","bins","split")
mp_total <- meta %>% dplyr::count(split)
mp_count <- meta %>% dplyr::count(split,celltype)
n_unique <- length(unique(mp_count$celltype))
mp_count <- mp_count %>%
group_by(split) %>% dplyr::slice(which.max(n)) %>%
left_join(.,mp_total,by = "split") %>% dplyr::mutate(prop = n.x/n.y) %>%
pull(prop)
score <- (1-sd(mp_count))/n_unique
}))
names(scores) <- bins
return(scores)
}
|
7605d0a3d80058c0f6f1d5b4428140479eb6369e | 0b8a6b48c1bb2d10cca4d4d02b1e3da9331e9252 | /plot4.R | 7639d24be7bed06d6d0eb338e3504fd29c3d76ff | [] | no_license | alexandrateste/ExData_Plotting1 | 87576c67b5c0913a351d151d4eb76d5af62a5757 | 3f27a7922efbb8fffdb28fee33d131c24b919e1f | refs/heads/master | 2021-01-15T14:53:34.398614 | 2015-04-10T05:34:27 | 2015-04-10T05:34:27 | 33,651,390 | 0 | 0 | null | 2015-04-09T06:02:50 | 2015-04-09T06:02:50 | null | UTF-8 | R | false | false | 1,755 | r | plot4.R | # Assumption: the zip file is in the working directory
ffile <- unzip("./exdata-data-household_power_consumption.zip")
# Extract only rows for which Date is in [01/02/2007, 02/02/2007]
# Requires: install.packages("sqldf")
library(sqldf)
alldata <-read.csv.sql("household_power_consumption.txt", sep = ';', sql = "select * from file where Date in ('1/2/2007','2/2/2007')")
# Replace empty values and "?" by NA
alldata[alldata=="?"]<-NA
alldata[alldata==""]<-NA
# Concatenates the Date and Time columns
alldata <- within(alldata, DateTime <- paste(Date, Time, sep=" "))
# Transforms the new DateTime columns into a Date/Time class
alldata$DateTime <- strptime(alldata$DateTime,format="%d/%m/%Y %H:%M:%S")
# Plots to the plot4.png file (480 pixels wide and high)
# 4 window plot:
# 1. Time series of Global active power
# 2. Time series of Voltage
# 3. Time series of the 3 energy sub-meterings
# 4. Times series of Global reactive power
png(filename = "plot4.png", width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
with(alldata, {
plot(alldata$DateTime,alldata$Global_active_power,type='l',xlab="",ylab="Global Active Power")
plot(alldata$DateTime,alldata$Voltage,type='l',xlab="datetime",ylab="Voltage")
plot(alldata$DateTime,alldata$Sub_metering_1,type='l',col='black',xlab="",ylab="Energy sub metering")
lines(alldata$DateTime,alldata$Sub_metering_2,type='l',col='red')
lines(alldata$DateTime,alldata$Sub_metering_3,type='l',col='blue')
legend("topright",lty=c(1,1), col=c('black','red','blue'),legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), bty = "n")
plot(alldata$DateTime,alldata$Global_reactive_power,type='l',xlab="datetime",ylab="Global_reactive_power")
})
dev.off() |
c88e1ecdb7dd39fca2c092fa8162050a5f84a18c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ISLR/examples/Portfolio.Rd.R | 9b1ad042189187a113ccf5d2261e2b6a97e199b0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | Portfolio.Rd.R | library(ISLR)
### Name: Portfolio
### Title: Portfolio Data
### Aliases: Portfolio
### Keywords: datasets
### ** Examples
summary(Portfolio)
attach(Portfolio)
plot(X,Y)
|
6a7abbbc13e6b347ca10d4adb0f265826b167a1a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RNCEP/examples/NCEP.uv.revert.Rd.R | e14f65ccbab2bb19e69680cbcdd555032a4a399d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 242 | r | NCEP.uv.revert.Rd.R | library(RNCEP)
### Name: NCEP.uv.revert
### Title: Reverts speed and direction to U and V components.
### Aliases: NCEP.uv.revert
### ** Examples
library(RNCEP)
## Using NCEP.uv.revert ##
NCEP.uv.revert(spd=12, dir=225, radians=FALSE)
|
95f78160bb47d191cf3879b3ccb1264c8bb2b5f6 | 3a2d9786a6ef824d60e840012979a37f73a384d2 | /scratchpad.R | 1f275c8d1f67bf490ae6d2bdb5c5c306d530bc2a | [] | no_license | ms609/hyoliths | 8321413f0139e1383c8713ad156d1cdb2ef500ff | 5ddfe4ea2a4ee62d662af28e206365306bf79ffb | refs/heads/master | 2021-08-16T07:56:20.694322 | 2018-10-04T09:55:54 | 2018-10-04T09:55:54 | 119,563,670 | 1 | 1 | null | 2018-10-04T09:55:54 | 2018-01-30T16:35:08 | Turing | UTF-8 | R | false | false | 1,943 | r | scratchpad.R | ```{R tnt-validate-results, echo=FALSE, eval=FALSE}
tntLog <- readLines('tnt.log')
scoreStarts <- which(grepl("Tree \\d+, total adjusted homoplasy", tntLog))
TntScores <- function (start) {
scoreLines <- start + (3 * seq_len(ceiling(n_char / 5)))
characterLines <- scoreLines - 1L
firstBlank <- tntLog[characterLines] == " "
if (any(firstBlank)) {
beforeEnd <- seq_len(which(firstBlank) - 1L)
scoreLines <- scoreLines[beforeEnd]
characterLines <- characterLines[beforeEnd]
}
scores <- unlist(sapply(tntLog[scoreLines], function (line) {
as.double(unlist(strsplit(line, "\\s+"))[-1])
}))
names(scores) <- unlist(sapply(tntLog[characterLines], function (line) {
as.integer(unlist(strsplit(line, "\\s+"))[-1])
}))
# Return:
scores
}
tntScores <- lapply(scoreStarts, TntScores)
fullScores <- as.double(unlist(lapply(tntScores, sum)))
reportedScores <- as.double(gsub(".*?(\\d+(\\.\\d+)?).*", "\\1", tntLog[scoreStarts - 1L]))
k <- 2L
tntSteps <- round(k * tntScores[[1]] / (1 - tntScores[[1]]), 2)
if (all(tntSteps != as.integer(tntSteps))) warning("Step counting error?")
sum(tntSteps)
minSteps <- attr(fitch_data, 'min.steps')
weights <- attr(fitch_data, 'weight')
tr <- tnTrees[[1]][[1]]
treeSteps <- rep(FitchSteps(tr, fitch_data) - minSteps, weights)
treeSteps - tntSteps
sum(rep(FitchSteps(tr, fitch_data) - minSteps, weights))
rep(FitchSteps(tr, fitch_data) - minSteps, weights)
IWScore
lapply(seq_along(tnTrees), function (i) unique(unlist(lapply(tnTrees[[i]], IWScore, fitch_data, concavity=i))))
tntNames <- rep(kValues, lapply(tnTrees, length))
scores <- vapply(kValues, function (k) {
ret <- vapply(unlist(tnTrees, recursive = FALSE), IWScore, double(1), fitch_data, concavity=k)
names(ret) <- tntNames
return(ret)
}, double(length(tntNames)))
colnames(scores) <- kValues
bestScore <- apply(scores, 2, min)
apply(scores, 2, function (col) rownames(scores)[col == min(col)])
```
|
0a13a35f050d7e42a792e810903112689ba8a4cc | c3f84f89131fad2a1967892d4cc9234053275419 | /activity7/activity7.R | b9db2614d1a2a1e0b4be2c2a558494bb374968a5 | [] | no_license | hjwydler/ENVST206 | 87f5e96c021f42fca6b1c311dd1237def234aa2f | eb0aa2f77c2eaa70d7a84ad00ad984bb8eb4a378 | refs/heads/master | 2023-01-19T12:38:48.817123 | 2020-11-24T19:06:45 | 2020-11-24T19:06:45 | 291,063,723 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,678 | r | activity7.R | #package for vector data
library(sp)
#package for reading in spatial data
library(rgdal)
#data manangement package
library(dplyr)
#read in shapefiles
#readOGR in rgdal does this
#ice in 1966
icetotal <- readOGR("Users/ginnytw/Desktop/EnvstData/a07/sea_ice")
ice1979 <- readOGR("/Users/ginnytw/Desktop/EnvstData/a07/sea_ice/extent_N_197909_polygon_v3.0")
ice2019 <- readOGR("/Users/ginnytw/Desktop/EnvstData/a07/sea_ice/extent_N_201909_polygon_v3.0")
##############################
####SUMMARY STATISTICS (5)####
##############################
seaiceAll <- readOGR("/Users/ginnytw/Desktop/EnvstData/a07/sea_ice_all")
head(seaiceAll@data)
plot(seaiceAll) #looking from the perspective of the north pole
plot(seaiceAll@data$year == 1979,])
aggregate(seaiceAll@data$area, by=list(seaiceAll@data$year), FUN="mean", na.rm=TRUE)
seaiceaAll@proj4string
seaiceAll
sum(area[seaiceAll@data$year == 1979,])
seaiceYears <- seaiceAll@data$year
seaiceAreas <- seaiceAll@data$area
seaiceAreas <- (area(seaiceAll@data))
##############################
##########PLOT (5)############
##############################
#map showing sea ice extent in 1979 and 2019
#legend designating ice extent year and colors
plot(ice1979, main = "Arctic Sea Ice Extent 1979 vs 2019", col = "brown1", border="grey50")
plot(ice2019, col = "lightblue2", border = "grey50", add=TRUE)
legend("bottomright",
c("1979", "2019"),
col=c("brown1", "lightblue2"),
pch=19,
lwd=1,
bty="n")
|
9f8a09c93428085d7eb5c3e24bf58790f156f305 | ee27d10f44373d68477fa3f3c60ab9cecd1851c8 | /man/okboomer.Rd | 254342b0fddafdaceeabcd04efb9a959c4543253 | [
"MIT"
] | permissive | kjhealy/demog | 9cb4c3428f05c748460974e3f8bb92739aae16b2 | 6af6f100e8f600172b09272ba8b582b88efa5f57 | refs/heads/master | 2020-09-05T09:39:30.794038 | 2019-11-09T01:16:33 | 2019-11-09T01:16:33 | 220,059,824 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 723 | rd | okboomer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{okboomer}
\alias{okboomer}
\title{Birth rates in the US and England/Wales 1938-2015}
\format{A tibble
\describe{
\item{\code{year}}{Year}
\item{\code{month}}{Month}
\item{\code{n_days}}{N days in month}
\item{\code{births}}{N births}
\item{\code{total_pop}}{National Population}
\item{\code{births_pct}}{N births / National Population}
\item{\code{births_pct_day}}{Average number of births per million
people per day, for the observed month}
\item{\code{date}}{Date}
}}
\source{
US Census Bureau and UK ONS
}
\usage{
okboomer
}
\description{
Birth rates in the US and England/Wales 1938-2015
}
\keyword{datasets}
|
74099bc020b1927bcaac977380bb2fc492b8470d | 96a92240ed4a82922bc13d0f93b0959fce2af7fd | /ggROC.R | f587f3844530818818c728e94f195ecf84df9743 | [] | no_license | ajphilpotts/DataVisualisationR | df981430e70687f8a9c00453e5c542024062e24e | 2903aad6cad8cec26d88544ae684b07ea3847367 | refs/heads/master | 2020-04-02T06:19:09.309595 | 2018-10-22T15:41:38 | 2018-10-22T15:41:38 | 154,141,401 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,172 | r | ggROC.R | ggROC_plotROC <- function(dt,title=''){
plot <- ggplot(dt,aes(d=TARGET,m=PREDICTION)) +
geom_abline(slope = 1,intercept = 0,color = "#322332", size=.8) +
geom_roc(color = "#FF0000",size=1) +
# Formatting
ylab('True Positive Rate') +
xlab('False Positive Rate') +
labs(title=title,
subtitle=paste(Sys.Date())) +
scale_y_continuous(labels = scales::percent,
expand = c(0,0),
breaks=pretty_breaks(n=20)) + # Modify xy scales. Moved to (0,0) to remove gap between data and axis
scale_x_continuous(labels = scales::percent,
expand = c(0,0),
breaks=pretty_breaks(n=20)) +
#Theming
theme_minimal() +
theme(plot.caption = element_text(family = "Arial",hjust = 0,size=6),
plot.margin = margin(t=12,r=14,l=10,b=10.5),
plot.title = element_text(face='bold',size=12),
plot.subtitle = element_text(face='italic',size=9),
axis.line = element_line(size=.5),
axis.text.x = element_text(size=9),
axis.ticks = element_blank(),
axis.title.x = element_text(face='italic',margin = margin(t=10,b=5,l=5,r=5)),
axis.title.y = element_text(face='italic',margin = margin(t=5,b=5,l=5,r=10)),
#Legend
legend.position = 'bottom',
legend.direction = 'horizontal',
# legend.key.height = unit(2,'line'),
# legend.key.width=unit(.5,'line'),
legend.title = element_text(face='italic')
# legend.box.margin = margin(l=150)
) +
guides(fill = FALSE)
return(plot)
}
ggROC_pROC <- function(roc,title='',caption=""){
plot <- ggroc(roc,color = "#FF0000",size=1) +
geom_abline(slope = 1,intercept = 1,color = "#322332", size=.8,linetype="dotdash") +
# Formatting
#ylab('True Positive Rate') +
#xlab('False Positive Rate') +
labs(title=title,
subtitle = paste(Sys.Date(),sep=" - "),
caption = caption) +
#scale_y_continuous(breaks=pretty_breaks(n=10)) + # Modify xy scales. Moved to (0,0) to remove gap between data and axis
#scale_x_continuous(breaks=pretty_breaks(n=10)) +
#Theming
theme_minimal() +
theme(plot.caption = element_text(family = "Arial",hjust = 0,size=6),
plot.margin = margin(t=12,r=14,l=10,b=10.5),
plot.title = element_text(face='bold',size=12),
plot.subtitle = element_text(face='italic',size=9),
axis.line = element_line(size=.5),
axis.text.x = element_text(size=9),
axis.ticks = element_blank(),
axis.title.x = element_text(face='italic',margin = margin(t=10,b=5,l=5,r=5)),
axis.title.y = element_text(face='italic',margin = margin(t=5,b=5,l=5,r=10)),
#Legend
legend.position = 'bottom',
legend.direction = 'horizontal',
# legend.key.height = unit(2,'line'),
# legend.key.width=unit(.5,'line'),
legend.title = element_text(face='italic')
# legend.box.margin = margin(l=150)
) +
guides(fill = FALSE)
return(plot)
}
|
ad67f9eb492863f4b07d7fbc5dde49d51d44a327 | c5ebc2af6aeb4d5d8e2aa9dfc5e3bcb21deee5ee | /app.R | 9ca878ca4f0ce1f53221cb623015de2ba1cf632e | [] | no_license | ishrat98/SPL-3 | df09f18fb7bbc0d1357980eb45cee47ff8d9a76b | 1a5eac27308421816df20d558293025d8a608a62 | refs/heads/master | 2023-04-01T07:34:58.130880 | 2021-04-05T06:59:35 | 2021-04-05T06:59:35 | 261,375,611 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,869 | r | app.R | library(SingleCellExperiment)
library(SummarizedExperiment)
library(HDF5Array)
library(TSCAN)
library(M3Drop)
library(monocle)
library(destiny)
library(scater)
library(ggplot2)
library(ggthemes)
library(ggbeeswarm)
library(corrplot)
library(Polychrome)
library(slingshot)
library(SLICER)
#library(Seurat)
library(monocle3)
library(matrixStats)
library(Matrix)
cdSk <- loadHDF5SummarizedExperiment(dir="updated app/cdScFiltAnnotHDF5", prefix="")
#View(cdSk)
cdScFiltAnnot <- as(cdSk, "SingleCellExperiment")
#View(cdScFiltAnnot)
#install.packages("matrixStats")
cellLabels <- cdScFiltAnnot$cellType
cds <- counts(cdScFiltAnnot)
colnames(cds) <- cellLabels
#cdScFiltAnnot <- scater::runPCA(cdScFiltAnnot,ncomponent = 5)
## change color Palette with library(Polychrome)
cdScFiltAnnot
table(cdScFiltAnnot$cellType)
# Run PCA on cds data. Use the runPCA function from the SingleCellExperiment package.
cdScFiltAnnot <- runPCA(cdScFiltAnnot, ncomponents = 50)
# Use the reducedDim function to access the PCA and store the results.
pca <- reducedDim(cdScFiltAnnot, "PCA")
# Describe how the PCA is stored in a matrix. Why does it have this structure?
head(pca)
dim(pca)
# Add PCA data to the cdScFiltAnnot object.
cdScFiltAnnot$PC1 <- pca[, 1]
cdScFiltAnnot$PC2 <- pca[, 2]
#View(cdScFiltAnnot)
my_color <- createPalette(14, c("#010101", "#ff0000"), M=1000)
names(my_color) <- unique(as.character(cdScFiltAnnot$cellType))
# Plot PC biplot with cells colored by cellType.
# colData(cdScFiltAnnot) accesses the cell metadata DataFrame object for cdScFiltAnnot.
# Look at Figure 1A of the paper as a comparison to your PC biplot.
ggplot(as.data.frame(colData(cdScFiltAnnot)), aes(x = PC1, y = PC2, color = cellType)) + geom_quasirandom(groupOnX = FALSE) +
scale_color_tableau() + theme_classic() +
xlab("PC1") + ylab("PC2") + ggtitle("PC biplot")
##Monocle2
cds <- counts(cdScFiltAnnot)
m3dGenes <- as.character(
M3DropFeatureSelection(cds)$Gene
)
# component1
d <- cdScFiltAnnot[which(rownames(cdScFiltAnnot) %in% m3dGenes), ]
d <- d[!duplicated(rownames(d)), ]
colnames(d) <- 1:ncol(d)
geneNames <- rownames(d)
rownames(d) <- 1:nrow(d)
pd <- data.frame(timepoint = cellLabels)
pd <- new("AnnotatedDataFrame", data=pd)
fd <- data.frame(gene_short_name = geneNames)
fd <- new("AnnotatedDataFrame", data=fd)
dCellData <- newCellDataSet(counts(d), phenoData = pd, featureData = fd)
#
dCellData <- setOrderingFilter(dCellData, which(geneNames %in% m3dGenes))
dCellData <- estimateSizeFactors(dCellData)
dCellDataSet <- reduceDimension(dCellData,reduction_method = "DDRTree", pseudo_expr = 1)
dCellDataSet <- orderCells(dCellDataSet, reverse = FALSE)
plot_cell_trajectory(dCellDataSet)
# Store the ordering
pseudotime_monocle2 <-
data.frame(
Timepoint = phenoData(dCellDataSet)$timepoint,
pseudotime = phenoData(dCellDataSet)$Pseudotime,
State = phenoData(dCellDataSet)$State
)
rownames(pseudotime_monocle2) <- 1:ncol(d)
pseudotime_order_monocle <-
rownames(pseudotime_monocle2[order(pseudotime_monocle2$pseudotime), ])
cdScFiltAnnot$pseudotime_monocle2 <- pseudotime_monocle2$pseudotime
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_monocle2,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("monocle2 pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by monocle2 pseudotime")
##monocle3
gene_meta <- rowData(cdScFiltAnnot)
#gene_metadata must contain a column verbatim named 'gene_short_name' for certain functions.
gene_meta$gene_short_name <- rownames(gene_meta)
cds <- new_cell_data_set(expression_data = counts(cdScFiltAnnot),
cell_metadata = colData(cdScFiltAnnot),
gene_metadata = gene_meta)
## Step 1: Normalize and pre-process the data
cds <- preprocess_cds(cds,num_dim = 5)
plot_pc_variance_explained(cds)
## Step 3: Reduce the dimensions using UMAP
cds <- reduce_dimension(cds)
## No preprocess_method specified, using preprocess_method = 'PCA'
## Step 4: Cluster the cells
cds <- cluster_cells(cds)
## change the clusters
## cds@clusters$UMAP$clusters <- cdScFiltAnnot$cellType
## Step 5: Learn a graph
cds <- learn_graph(cds,use_partition = TRUE)
## Step 6: Order cells
cds <- order_cells(cds, root_cells = c("zy","zy.1","zy.2","zy.3") )
plot_cells(cds, color_cells_by="cellType", graph_label_size = 4, cell_size = 2,
group_label_size = 6)+ scale_color_manual(values = my_color)
plot_cells(cds, graph_label_size = 6, cell_size = 1,
color_cells_by="pseudotime",
group_label_size = 6)
pdata_cds <- pData(cds)
pdata_cds$pseudotime_monocle3 <- monocle3::pseudotime(cds)
ggplot(as.data.frame(pdata_cds),
aes(x = pseudotime_monocle3,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("monocle3 pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by monocle3 pseudotime")
#diffusion map1
cds <- logcounts(cdScFiltAnnot)
cellLabels <- cdScFiltAnnot$cellType
colnames(cds) <- cellLabels
dm <- DiffusionMap(t(cds)) #library destiny
tmp <- data.frame(DC1 = eigenvectors(dm)[,1],
DC2 = eigenvectors(dm)[,2],
Timepoint = cdScFiltAnnot$cellType)
ggplot(tmp, aes(x = DC1, y = DC2, colour = Timepoint)) +
geom_point() + scale_color_manual(values = my_color) +
xlab("Diffusion component 1") +
ylab("Diffusion component 2") +
theme_classic()
#part2
cdScFiltAnnot$pseudotime_diffusionmap <- rank(eigenvectors(dm)[,1])
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_diffusionmap,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("Diffusion map pseudotime (first diffusion map component)") +
ylab("Timepoint") +
ggtitle("Cells ordered by diffusion map pseudotime")
# Read the Slingshot documentation (?slingshot) and then run Slingshot below.
# Given your understanding of the algorithm and the documentation, what is one
# major set of parameters we omitted here when running Slingshot?
sce <- slingshot(cdScFiltAnnot, reducedDim = 'PCA') # no clusters
# Plot PC1 vs PC2 colored by Slingshot pseudotime.
colors <- rainbow(50, alpha = 1)
plot(reducedDims(sce)$PCA, col = colors[cut(sce$slingPseudotime_1,breaks=50)], pch=16, asp = 1)
lines(SlingshotDataSet(sce), lwd=2)
# Plot Slingshot pseudotime vs cell stage.
ggplot(as.data.frame(colData(cdScFiltAnnot)), aes(x = sce$slingPseudotime_1, y = cellType,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_tableau() + theme_classic() +
xlab("Slingshot pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by Slingshot pseudotime")
plot(reducedDims(sce)$PCA, col = colors[cut(sce$slingPseudotime_1,breaks=50)], pch=16, asp = 1)
lines(SlingshotDataSet(sce), lwd=2)
ggplot(as.data.frame(colData(cdScFiltAnnot)), aes(x = sce$slingPseudotime_1, y = cellType,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_tableau() + theme_classic() +
xlab("Slingshot pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by Slingshot pseudotime")
ggplot(as.data.frame(colData(cdScFiltAnnot)), aes(x = slingPseudotime_2, y = cellType,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) + theme_classic() +
xlab("Second Slingshot pseudotime") + ylab("cell type") +
ggtitle("Cells ordered by Slingshot pseudotime")+scale_colour_manual(values = my_color)
##tscan
procdeng <- TSCAN::preprocess(counts(cdScFiltAnnot))
colnames(procdeng) <- 1:ncol(cdScFiltAnnot)
dengclust <- TSCAN::exprmclust(procdeng, clusternum = 14)
TSCAN::plotmclust(dengclust)
dengorderTSCAN <- TSCAN::TSCANorder(dengclust, orderonly = FALSE)
pseudotime_order_tscan <- as.character(dengorderTSCAN$sample_name)
cdScFiltAnnot$pseudotime_order_tscan <- NA
cdScFiltAnnot$pseudotime_order_tscan[as.numeric(dengorderTSCAN$sample_name)] <-
dengorderTSCAN$Pseudotime
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_order_tscan,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("TSCAN pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by TSCAN pseudotime")
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_order_tscan,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("TSCAN pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by TSCAN pseudotime")
proccds <- TSCAN::preprocess(counts(cdScFiltAnnot))
#colnames(proccds) <- 1:ncol(cdScFiltAnnot)
cdsclust <- TSCAN::exprmclust(proccds, clusternum = 14)
TSCAN::plotmclust(cdsclust)
cdsorderTSCAN <- TSCAN::TSCANorder(cdsclust, orderonly = FALSE)
pseudotime_order_tscan <- as.character(cdsorderTSCAN$sample_name)
cdScFiltAnnot$pseudotime_order_tscan <- NA
cdScFiltAnnot$pseudotime_order_tscan[as.numeric(cdsorderTSCAN$sample_name)] <-
cdsorderTSCAN$Pseudotime
cellLabels[cdsclust$clusterid == 14]
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_order_tscan,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("TSCAN pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by TSCAN pseudotime")
## slingshot
cdScFiltAnnot <- slingshot(cdScFiltAnnot, clusterLabels = 'cellType',reducedDim = "PCA",
allow.breaks = FALSE)
summary(cdScFiltAnnot$slingPseudotime_1)
lnes <- getLineages(reducedDim(cdScFiltAnnot,"PCA"),
cdScFiltAnnot$cellType)
plot(reducedDims(cdScFiltAnnot)$PCA, col = my_color[as.character(cdScFiltAnnot$cellType)],
pch=16,
asp = 1)
legend("bottomleft",legend = names(my_color[levels(cdScFiltAnnot$cellType)]),
fill = my_color[levels(cdScFiltAnnot$cellType)])
lines(SlingshotDataSet(cdScFiltAnnot), lwd=2, type = 'lineages', col = c("black"))
## Plotting the pseudotime inferred by slingshot by cell types
slingshot_df <- data.frame(colData(cdScFiltAnnot))
ggplot(slingshot_df, aes(x = slingPseudotime_1, y = cellType,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) + theme_classic() +
xlab("First Slingshot pseudotime") + ylab("cell type") +
ggtitle("Cells ordered by Slingshot pseudotime")+scale_colour_manual(values = my_color)
ggplot(slingshot_df, aes(x = slingPseudotime_2, y = cellType,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) + theme_classic() +
xlab("Second Slingshot pseudotime") + ylab("cell type") +
ggtitle("Cells ordered by Slingshot pseudotime")+scale_colour_manual(values = my_color)
ggplot(slingshot_df, aes(x = slingPseudotime_1, y = slingPseudotime_2,
colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) + theme_classic() +
xlab("First Slingshot pseudotime") + ylab("Second Slingshot pseudotime") +
ggtitle("Cells ordered by Slingshot pseudotime")+scale_colour_manual(values = my_color)
##Heatmap
t <- cdScFiltAnnot$slingPseudotime_1
# for time, only look at the 100 most variable genes
Y <- log1p(assay(cdScFiltAnnot,"logcounts"))
var100 <- names(sort(apply(Y,1,var),decreasing = TRUE))[1:100]
Y <- Y[var100,]
# fit a GAM with a loess term for pseudotime
gam.pval <- apply(Y,1,function(z){
d <- data.frame(z=z, t=t)
suppressWarnings({
tmp <- gam(z ~ lo(t), data=d)
})
p <- summary(tmp)[3][[1]][2,3]
p
})
## Plot the top 100 genes' expression
topgenes <- names(sort(gam.pval, decreasing = FALSE))[1:100]
heatdata <- assays(cdScFiltAnnot)$logcounts[topgenes, order(t, na.last = NA)]
heatclus <- cdScFiltAnnot$cellType[order(t, na.last = NA)]
heatmap(heatdata, Colv = NA,
ColSideColors = my_color[heatclus],cexRow = 1,cexCol = 1)
########## Monocle2 ########
## Part - 1
library(monocle)
#d <- cdScFiltAnnot[m3dGenes,]
## feature selection
cds <- counts(cdScFiltAnnot)
m3dGenes <- as.character(
M3DropFeatureSelection(cds)$Gene
)
##part -2
d <- cdScFiltAnnot[which(rownames(cdScFiltAnnot) %in% m3dGenes), ]
d <- d[!duplicated(rownames(d)), ]
colnames(d) <- 1:ncol(d)
geneNames <- rownames(d)
rownames(d) <- 1:nrow(d)
pd <- data.frame(timepoint = cellLabels)
pd <- new("AnnotatedDataFrame", data=pd)
fd <- data.frame(gene_short_name = geneNames)
fd <- new("AnnotatedDataFrame", data=fd)
dCellData <- newCellDataSet(counts(d), phenoData = pd, featureData = fd)
#
dCellData <- setOrderingFilter(dCellData, which(geneNames %in% m3dGenes))
dCellData <- estimateSizeFactors(dCellData)
dCellDataSet <- reduceDimension(dCellData,reduction_method = "DDRTree", pseudo_expr = 1)
dCellDataSet <- orderCells(dCellDataSet, reverse = FALSE)
plot_cell_trajectory(dCellDataSet)
##part-3
# Store the ordering
pseudotime_monocle2 <-
data.frame(
Timepoint = phenoData(dCellDataSet)$timepoint,
pseudotime = phenoData(dCellDataSet)$Pseudotime,
State = phenoData(dCellDataSet)$State
)
rownames(pseudotime_monocle2) <- 1:ncol(d)
pseudotime_order_monocle <-
rownames(pseudotime_monocle2[order(pseudotime_monocle2$pseudotime), ])
cdScFiltAnnot$pseudotime_monocle2 <- pseudotime_monocle2$pseudotime
ggplot(as.data.frame(colData(cdScFiltAnnot)),
aes(x = pseudotime_monocle2,
y = cellType, colour = cellType)) +
geom_quasirandom(groupOnX = FALSE) +
scale_color_manual(values = my_color) + theme_classic() +
xlab("monocle2 pseudotime") + ylab("Timepoint") +
ggtitle("Cells ordered by monocle2 pseudotime")
cds <- logcounts(cdScFiltAnnot)
colnames(cds) <- cellLabels
View(cds)
dm <- DiffusionMap(t(cds))
cdScFiltAnnot
slicer_genes <- select_genes(t(cds))
k <- select_k(t(cds[slicer_genes,]), kmin = 30, kmax=60)
slicer_traj_lle <- lle(t(cds[slicer_genes,]), m = 2, k)$Y
reduceddim(cdScFiltAnnot, "lle") <- slicer_traj_lle
plot_df <- data.frame(slicer1 = reduceddim(cdScFiltAnnot, "lle")[,1],
slicer2 = reduceddim(cdScFiltAnnot, "lle")[,2],
cellType = cdScFiltAnnot$cellType)
ggplot(data = plot_df)+geom_point(mapping = aes(x = slicer1,
y = slicer2,
color = cellType))+
scale_color_manual(values = my_color)+ xlab("lle component 1") +
ylab("lle component 2") +
ggtitle("locally linear embedding of cells from slicer")+
theme_classic()
|
1b1cc8bb7e7875e4f203db97def67769e64306fc | 6c4464440bf42df3df8eb947b3a2798476dfac78 | /PBSmodelling/inst/examples/fib.r | f05563d9477f59638e291bf10f3f31d8a1a308b9 | [] | no_license | pbs-software/pbs-modelling | ad59ca19ced6536d2e44ff705e36a787341f60d7 | 44b14f20af33d5dee51401bad2ff3dce2dfd3cea | refs/heads/master | 2023-01-11T16:18:06.846368 | 2023-01-06T22:45:05 | 2023-01-06T22:45:05 | 37,491,656 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,539 | r | fib.r | local(envir=.PBSmodEnv,expr={
locale = sys.frame(sys.nframe() - 1) # local environment
# ***********************************************************
# fib.C:
# Compute Fibonacci numbers iteratively using a .C call to
# C code
# Arguments:
# n - final nth fibonacci number to calculate
# len - length of output vector with previous fibonacci numbers
# -----------------------------------------------------------
fib.C <- function(n=defaultN, len=defaultLen) {
if (n<0) return(NA)
if (len>n) len <- n
retArr <- numeric(len)
out <- .C("fibonacci", as.integer(n), as.integer(len),
as.numeric(retArr))
x <- out[[3]]
return(x) }
# ***********************************************************
# fib.R:
# A native R version of fib.C, used for comparison
# Arguments:
# n - final nth fibonacci number to calculate
# len - length of output vector with previous fibonacci numbers
# -----------------------------------------------------------
fib.R=function(n=defaultN, len=defaultLen){
if (n<0) return(NA)
if (len>n) len <- n
retArr <- numeric(len)
xa=0; xb=1; xn=-1;
for(i in 0:n){
if(i <= 1)
xn=i
else{
xn=xa+xb
xa=xb
xb=xn
}
j=i-n+len
if(j>=0)
retArr[j]=xn
}
return(retArr)
}
#initialization for testing
fib.init=function(){
defaultN <- 200; tput(defaultN)
defaultLen <- 10; tput(defaultLen)
}
#fib.init()
#print(fib.R(13))
#print(fib.C(13))
}) # end local scope
|
caea2c66773a8b4446e68b1976aabf7d45764421 | e12b786bfbc1cc66d1fee14fd20d1a4faa4ae823 | /main.R | 79f8176e7becf29f76948889386297fe6cd54bac | [] | no_license | Harrisonkamau/r-programming | c0628f3c84687c491d0052aa2af2a429c50d856a | ec690d46cbe8371dfdbe94126e66a9b18b8df1f9 | refs/heads/master | 2022-05-03T20:19:53.461669 | 2020-04-14T00:06:25 | 2020-04-14T00:06:25 | 255,468,686 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 672 | r | main.R | library(bupaR)
library(readr)
library(eventdataR)
library(processmapR)
library(processmonitR)
# import data
data <- read.csv('./new_data.csv', header = TRUE)
head(data)
# reshaping the data
data$timestamp <- as.Date(data$timestamp, format='%Y-%m-%d')
head(data)
log <- eventlog(
eventlog = data,
case_id = 'case_id',
activity_id = 'activity',
activity_instance_id = 'activity_instance_id',
lifecycle_id = 'lifecycle_id',
timestamp = 'timestamp',
resource_id = 'resource'
)
log
# you can attempt to create an event log object manually by uncommenting the line below
# ieventlog(data)
#use log above to plot a process map
processmapR::process_map(log)
|
474d5588d7678b6c6de9ea5cc6f4cb71a51c00c8 | 55e4887da2c65d6ed3a0325a4d7e2a78e38e4a34 | /R/prior_package.R | b0576101a3f0dd9da4c35521d39fd061190982af | [] | no_license | realAkhmed/SparkRext | dfdaa6428a7f4be4a6480192750aee8b7245ba59 | 74adba926cb0b8b4bed1e7199a38db5a48ac0766 | refs/heads/master | 2020-12-25T22:30:28.752940 | 2015-10-31T04:54:10 | 2015-10-31T04:54:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,394 | r | prior_package.R | #' @export
unload_package <- function(pkg_name) {
pkg_name <- as.character(substitute(pkg_name))
unload_package_(pkg_name)
}
#' @export
unload_package_ <- function(pkg_name) {
packages <- Filter(function(x) stringr::str_detect(x, "^package:"), search())
packages <- Map(function(x) stringr::str_replace(x, "^package:", ""), packages)
packages <- unlist(unname(packages))
if(!(pkg_name %in% packages)) {
return(pkg_name)
}
result_packages <- pkg_name
while(TRUE) {
tryCatch({
detach(paste0("package:", pkg_name), character.only = TRUE)
break
}, error = function(e) {
required_package <- stringr::str_match(e$message, pattern = "required by ([\\S]+)")[1, 2]
required_package <- stringr::str_sub(required_package, start = 2, end = -2)
required_packages <- unload_package_(required_package)
result_packages <<- c(result_packages, required_packages)
})
}
unique(result_packages)
}
#' @export
prior_library <- function(pkg_name) {
pkg_name <- as.character(substitute(pkg_name))
prior_library_(pkg_name)
}
#' @export
prior_library_ <- function(pkg_name) {
pkg_names <- unload_package_(pkg_name)
if(pkg_name == "SparkRext") {
unload_package(SparkR)
pkg_names <- c("SparkR", pkg_name)
}
for (pkg_name in pkg_names) {
suppressPackageStartupMessages(library(pkg_name, character.only = TRUE))
}
}
|
b09d7d27221bd9ef1eb78a6ffedd8124a9d437d4 | 4cb6210546ecb8bdd54e5c3e287917f73153eb59 | /man/ivlev.Rd | bc7dfff72bda3a22753e40026afd2d8b91244087 | [] | no_license | cran/selectapref | cd631e15b09c36877232f533b2c9d6693d9534e2 | 53d5ac51798b97a36238478bcff3f8c2bf94501b | refs/heads/master | 2021-01-20T01:31:36.580889 | 2020-02-08T18:30:02 | 2020-02-08T18:30:02 | 89,290,234 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 928 | rd | ivlev.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selectivity.R
\name{ivlev}
\alias{ivlev}
\title{Ivlev's electivity (Ivlev 1961).
Returns Ivlev's electivity index from vectors of consumed and available food items.}
\usage{
ivlev(available, consumed, jacob = FALSE)
}
\arguments{
\item{available}{A vector of food items available to the organism in the environment}
\item{consumed}{A vector of food items consumed by the organism}
\item{jacob}{Converts to Jacob's electivity index? Defaults to FALSE.}
}
\description{
Ivlev's electivity (Ivlev 1961).
Returns Ivlev's electivity index from vectors of consumed and available food items.
}
\examples{
availableprey <- c(10,10,10,10,10)
consumedprey <- c(9,0,0,1,5)
ivlev(available = availableprey, consumed = consumedprey, jacob = FALSE)
ivlev(available = availableprey, consumed = consumedprey, jacob = TRUE)
}
\keyword{ivlev}
\keyword{selectivity}
|
c378eaa7c790e2353ca98a51b7628f37f96f4ff8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/jmcm/examples/regressogram.Rd.R | d1a1d9a56241776441e335ea7bcc81c13ea3493a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 335 | r | regressogram.Rd.R | library(jmcm)
### Name: regressogram
### Title: Plot Sample Regressograms and Fitted Curves
### Aliases: regressogram
### ** Examples
cattleA <- cattle[cattle$group=='A', ]
fit.mcd <- jmcm(weight | id | I(ceiling(day/14 + 1)) ~ 1 | 1, data=cattleA,
triple = c(8, 3, 4), cov.method = 'mcd')
regressogram(fit.mcd, time = 1:11)
|
263037e35fd98f4421df276bbce901ec6af0b124 | 8e40eaafd38ccf1731fcbbc2d240551b251ef0eb | /compute_STAD_R_network/2_compute_STAD_R_network.R | 02abd6291f5a4555abde0041148d58ada25882a3 | [] | no_license | vda-lab/ICD_diagnosis_explorer | dec349c3193be03f94555696b0cc6eaaff620c21 | 98a87c606c8aed81acc71a15ad1af7c0e535a923 | refs/heads/master | 2021-07-09T10:46:38.822332 | 2020-11-04T20:57:19 | 2020-11-04T20:57:19 | 210,180,374 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,749 | r | 2_compute_STAD_R_network.R |
# ---- Compute STAD-R network -----
# Description: This an example script of how to compute STAD-R for multiple distance matrices
# Dependencies
require(tidyverse)
require(stad)
require(igraph)
require(usedist)
require(doParallel)
# ----------------------------------------------------------------------------------
# Set the working directory
setwd("icd_list")
# ----------------------------------------------------------------------------------
# Snippet to determine the number of pending codes:
# The data.frame status_to contains the list of ICD codes to execute.
# It reads the files from the working directory.
# Read files in directory
files <- dir()
r_files <- which(!is.na(files %>% str_extract("a_")))
# Define status
status <- as.data.frame(matrix( unlist(strsplit(files[r_files], "_")), byrow = TRUE, ncol = 3))
colnames(status) <- c("n", "icd", "file")
status_s <- status %>%
select(-n) %>%
group_by(icd) %>%
summarise(num = n()) %>%
ungroup()
# ICD to analyze
status_todo <- status_s %>% filter(num == 1)
# Read ICD9 Codes
icd <- read_csv("icd_diagnostic_categories.csv")
names(icd) <- toupper(names(icd))
icd <- icd %>% rename(ICD = ICD_DIAGNOSTIC_CATEGORY, SEQ_NUM = SEQUENCE_NUMBER)
## DIAGNOSES: Exploring the ICD9 for patients with patients
# ----------------------------------------------------------------------------------
### Read DIAGNOSES_ICD: List of patients and diagnoses MIMIC-III database
diagnoses <- "Use a valid function to read the file e.g. read_csv('...')"
# ---------------------------------------------------------------------------------
# Function: Encapsulate computation process in a function to iterate over multiple distance matrices
stad_diagnosis <- function(icd){
require(tidyverse)
require(stad)
require(igraph)
require(usedist)
require(doParallel)
# Filter values
diag <- diagnoses %>% filter(ICD9_CODE == icd)
# Remove duplicates
diag <- diag %>%
group_by(HADM_ID) %>%
mutate(MIN_SEQ = min(SEQ_NUM)) %>%
filter(MIN_SEQ == SEQ_NUM) %>%
select(-MIN_SEQ) %>%
ungroup()
# Spread diagnosis
diag_s <- diagnoses %>%
inner_join(diag %>% select(HADM_ID), by = "HADM_ID") %>%
group_by(HADM_ID, ICD) %>%
summarize(N = min(SEQ_NUM) ) %>%
ungroup() %>%
spread(ICD, N, fill = NA)
# Select IDs to use in nodes
diag_s_values <- diag_s %>% select(-HADM_ID)
diag_s_id <- diag_s %>% select(HADM_ID)
# Ranking
ranking <- diag_s %>%
select(HADM_ID) %>%
inner_join(diag %>% select(HADM_ID, SEQ_NUM), by = "HADM_ID")
# STAD
distance_matrix <- readRDS(paste0("a_",icd,"_distance.rds"))
set.seed(42)
diag_stad <- stad(distance_matrix)
saveRDS(diag_stad, file = paste0("a_",icd,"_stad.rds"))
# Labels
diag_labels <- diagnoses %>%
filter(ICD9_CODE == icd) %>%
group_by(HADM_ID) %>%
summarise(LABEL = paste(ICD, collapse = "-") ) %>%
ungroup()
## Exporting graph
net_stad <- diag_stad$graph
# Edges
net_stad_edges <- as_data_frame(net_stad) %>%
mutate(Id = row_number(),
value = 1/(1+value)) %>%
select(Id, from, to, value) %>%
rename(Source = from, Target = to, Weight = value)
# Nodes
net_stad_vertices <- ranking %>%
inner_join(diag_labels, by = "HADM_ID") %>%
mutate(Id = row_number()) %>%
select(Id, LABEL, SEQ_NUM)
# Save
write_csv(net_stad_vertices, paste0("a_", icd, "_nodes.csv"))
write_csv(net_stad_edges, paste0("a_", icd, "_edges.csv"))
return(paste("Finished", icd))
}
# ----- Parallelize the computation -------
no_cores <- detectCores() - 1
cl <- makePSOCKcluster(no_cores)
registerDoParallel(cl)
foreach(i= as.character(status_todo$icd) ) %dopar% stad_diagnosis(i)
stopCluster(cl) |
6ebb1b6977044e033f8bf8a1e8938f227ea49d8a | fdd0a3e1d79be39f43edf9b04c061f04cdd2ac87 | /varexplore/man/Mode.Rd | 6a4a5e6e35a16392f923155f7607d4f692014b62 | [] | no_license | cywei23/varexplore | e5040b419a7d910437516246da1f8123c59ccc04 | 14856142dcd22239122985a096390443cfa43571 | refs/heads/master | 2021-01-10T12:44:52.200326 | 2018-10-25T16:28:23 | 2018-10-25T16:28:23 | 49,693,997 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 297 | rd | Mode.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varexplore.R
\name{Mode}
\alias{Mode}
\title{Mode function}
\usage{
Mode(x, na.rm = FALSE)
}
\arguments{
\item{x:}{Vector}
\item{na.rm:}{Remove NA; default = FALSE}
}
\value{
Mode of x
}
\description{
Mode function
}
|
ccdf3523748588e8cbf8b5c6fa36b471b553dd0a | eef234939eeebc2e5dcf2ad9cfd1888ce36259df | /stats253/pig.R | 09d57563f97a507321220c96a02fd71929abc1ea | [] | no_license | snarles/misc | 5d4e138cbb17bfd08143fc4c097fb84417446990 | 246f9fac0130340e44837b528a2f59e9256f2711 | refs/heads/master | 2023-06-26T06:53:53.933752 | 2023-06-12T13:29:17 | 2023-06-12T13:29:17 | 18,860,939 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 698 | r | pig.R | #http://www.mrc-bsu.cam.ac.uk/wp-content/uploads/pigweights.txt
setwd("stats253/")
y=c(1,1,0,7,5,10,30,30,41,48,66,72,56,46,45,22,24,12,5,0,1)
n=522
s=21
data <- list(y=c(1,1,0,7,5,10,30,30,41,48,66,72,56,46,45,22,24,12,5,0,1),n=522,s=21)
inits <- list(gam=c(-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3))
gam=c(-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3,-3)
library(rjags)
model <- jags.model(file="pig.bug", data = data, inits=inits, n.chains=5)
out <- coda.samples(model, "Sm", n.iter = 1000)
length(out)
dim(out[[1]])
matplot(out[[1]])
matplot(out[[1]], type = "l")
gd <- gelman.diag(out)
help(gelman.diag)
gd$psrf
names(gd)
traceplot(out[[1]][, 1])
|
46b6d33d25c4960a30d7a2184267105101165700 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/gpk/examples/Fairness.Rd.R | f674715d5b3e515c5ba98c9a42bbfd2d4aa71123 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 211 | r | Fairness.Rd.R | library(gpk)
### Name: Fairness
### Title: Comparison of formulations and sample size determination of a
### fairness product
### Aliases: Fairness
### Keywords: datasets
### ** Examples
data(Fairness)
|
29fd1c0c63b10c038ff282e63c90986911e1bb32 | 7593dbef54dbdae7a0946a97397987738e0e9f74 | /2013/pm10d_laei2013_colours_breaks.R | 1caf82179a5cabeadeefe3c60f177bce0f267d3d | [] | no_license | ICL-ERG/colour_schemes | 368d8e00c240ca18ac10e4a080f942a7459f35f7 | de1ec22fea582901b15b62dc574ca1da0a2c9e8c | refs/heads/master | 2022-01-04T20:37:27.988336 | 2019-05-20T14:45:21 | 2019-05-20T14:45:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 656 | r | pm10d_laei2013_colours_breaks.R | pm10d_laei2013_breaks <- c(0,5,10,15,20,25,30,35,36,40,45,50,55,60,65,70,2000) # 12
pm10d_laei2013_labels <- c('', '<5','5-10','10-15','15-20','20-25','25-30','30-35','35-36','36-40','40-45','45-50', '50-55','55-60','60-65','65-70','>70') # 12
pm10d_laei2013_colours <- c(rgb(0,40,81, max=255),rgb(1,18,156, max=255),rgb(3,37,211, max=255),rgb(6,74,244, max=255),rgb(12,149,233, max=255),rgb(25,207,210, max=255),rgb(130,253,207, max=255),rgb(255,255,128, max=255),rgb(255,214,0, max=255),rgb(255,173,91, max=255),rgb(249,124,0, max=255),rgb(255,66,0, max=255),rgb(255,0,0, max=255),rgb(210,20,16, max=255),rgb(128,0,64, max=255),rgb(64,0,64, max=255))
|
047540f54db1c0243c94d24cc40c247db4a3e342 | bfa38cd9176d48d337f40f602ba37f51060a188b | /R/xmpp_message_generator.fun.R | fa7d3ba0b612f5e947545bc01a1cf91c0a08eaa2 | [] | no_license | c3h3/XMPPNotifier | f0d7b80cd838a11e8c9cc38c9ac864f4243281d3 | 66217550312bf7d8bc1190e7ad0b6aa451119a83 | refs/heads/master | 2021-01-15T21:19:06.696546 | 2012-12-14T15:37:07 | 2012-12-14T15:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 307 | r | xmpp_message_generator.fun.R | ##' Generate xmpp message
##'
##' @export
##' @param from
##' @param to
##' @param message
##' @param subject
xmpp_message_generator <- function(from, to, message) {
paste('<message from="',from,'@chat.facebook.com" to="-',to,'@chat.facebook.com" type="chat"><body>',message,'</body></message>', sep='')
} |
cd1e7bca72f619b158bf760bc2806d23e4ad9a35 | ab3ba20d8a8d1deeb49a30c10186d44763afeccc | /tests/testthat/test-nl_geocode.R | 7394b32cf2025f83155739c465272f16663111e9 | [] | no_license | cran/nlgeocoder | f6c30f4e3d16226d9a1233e6d1e1a4b81aa6f26c | 97c919b0f651eb01eaba9d521a95344409ba3e6e | refs/heads/master | 2020-03-31T10:13:44.864225 | 2018-10-08T17:30:06 | 2018-10-08T17:30:06 | 152,127,650 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,231 | r | test-nl_geocode.R | context("nl_geocode")
test_that("Class of object returned is sf", {
skip_on_cran()
res <- nl_geocode("Martinikerkhof 3 Groningen")
expect_true(class(res)[1] == "sf")
})
test_that("type = adres", {
skip_on_cran()
res <- nl_geocode("Martinikerkhof 3 Groningen")
expect_true(res$type == "adres")
})
test_that("Geocode a vector of adresses", {
skip_on_cran()
res <- nl_geocode(c("Martinikerkhof 3, Groningen", "9712 JN 4", "Henri Faasdreef 312 Den Haag", "Hoofdstraat 1A Een"))
expect_true(nrow(res) == 4)
})
test_that("No NULL values", {
skip("Nice to have, but not right now")
res <- nl_geocode(c("Martinikerkhof 3, Groningen", "9712 JN 4", "Henri Faasdreef 312 Den Haag", "Hoofdstraat 1A Een"))
cols <- colnames(res)
for (i in seq_along(cols)){expect_true(sum(is.na(res[[cols[i]]])) == 0, label = paste0(cols[i], " does not contain NULL values"))}
})
test_that("Location returned is in WGS84", {
skip_on_cran()
res <- nl_geocode("Martinikerkhof 3 Groningen")
expect_true(class(res$centroide_ll)[1] == "sfc_POINT")
})
test_that("Location returned is in RD_New", {
skip_on_cran()
res <- nl_geocode("Martinikerkhof 3 Groningen", output = "rd")
expect_true(class(res$centroide_rd)[1] == "sfc_POINT")
})
test_that("Dataframe returned (not an sf object)", {
res <- nl_geocode("Martinikerkhof 3 Groningen", output = "data.frame")
expect_true(class(res) == "data.frame")
})
test_that("return as wgs84 is working",{
res <- nl_geocode("Martinikerkhof 3 Groningen", output = "wgs84")
expect_equal(sf::st_crs(res), sf::st_crs(4326))
})
test_that("return as rd is working",{
res <- nl_geocode("Martinikerkhof 3 Groningen", output = "rd")
expect_equal(sf::st_crs(res), sf::st_crs(28992))
})
test_that("return as data.frame is working",{
res <- nl_geocode("Martinikerkhof 3 Groningen", output = "data.frame")
expect_true(is.data.frame(res))
})
test_that("restrict search to Groningen", {
res <- nl_geocode("Hoofdstraat", fq = "provincienaam:Groningen")
expect_true(res$provincienaam == "Groningen")
})
test_that("restrict fields", {
res <- nl_geocode("Hoofdstraat", fl = c("woonplaatsnaam", "centroide_ll"))
expect_equal(names(res), c("woonplaatsnaam", "centroide_ll"))
})
|
a21aa137833050f685aacc1014d2af10ae2e4961 | f64346037d600604bcdf0689f950a41712ccd8f9 | /best.R | c0ab618caa2a6b168abf5356367042bcd54f7d80 | [] | no_license | mrusert/Hospital-Quality | 5ac6d6c909487e634159b54e017b78ea943a676d | 4e6e9741d9d1e5c4d2d1b935b7d21247577e26f9 | refs/heads/master | 2020-04-06T04:43:20.018858 | 2017-02-23T06:04:12 | 2017-02-23T06:04:12 | 82,890,257 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,727 | r | best.R | sort_hospital_data <- function(state, outcome) {
if(outcome == "heart attack") { ## Check if correct outcome was passed
colNum <- 11 ## if data is correct, assign colNum value
} else if (outcome == "heart failure") {
colNum <- 17
} else if (outcome == "pneumonia") {
colNum <- 23
} else {
stop("invalid outcome")
}
states <- c(state.abb,"DC") ## initiate states vector and add missing "DC"
if (!is.element(state, states)) { ## check if correct state value passed
stop("invalid state")
}
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Read in data from csv file
data_by_state <- subset(data, State == state) ## subset data by state
data_by_state[,colNum] <- suppressWarnings(as.numeric(data_by_state[,colNum])) ## convert outcome column to numeric w/ suppress warning NA coercion
data_by_state <- data_by_state[complete.cases(data_by_state[,colNum]),] ## remove NA values
sorted_data <- data_by_state[order(data_by_state[,colNum],data_by_state[,2]),] ## sort data set by outcome column and by Hospital Name
}
best <- function(state, outcome) {
sorted_data <-sort_hospital_data(state, outcome) ## function returns dataset sorted by outcome (rate), for given state and outcome
hospital <- sorted_data[1,2] ## assign and return hospital name
hospital
} |
a959a68d38b8f6c39e0814c6f80caaa396e7e7fb | 0ff9b831ec106b5d40165f05d3ff3bd36d8782e9 | /data products/bristoltraffic/munge/01-A.R | 85a0a4c2d8ec7f94ccc1496fcddca6cf3c14decf | [] | no_license | connectedblue/datascience | 3b77ba51b58f3006529300f55b92a673f2d066a2 | 93dcb65b84a631b72bdb7f61255bcab4bd04654e | refs/heads/master | 2020-04-05T14:08:34.947754 | 2016-09-03T12:10:10 | 2016-09-03T12:10:10 | 49,199,982 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,093 | r | 01-A.R |
# Function to clean up a raw dataset
# input is:
# journeys is a dataframe in the form specified by Bristol CC
# in this dataset: https://opendata.bristol.gov.uk/Mobility/Historic-journey-times/jdq4-bmr7
# location_fixes is a dataframe that corrects central location points for some routes
clean_traffic_data <- function(journeys, location_fixes) {
# convert time column to date object
journeys$time <- as.POSIXct(as.character(journeys$time),
format="%m/%d/%Y %I:%M:%S %p %z")
# Create some new columns to split the date object into more useful components
journeys$day <- weekdays(journeys$time)
journeys$month <- months(journeys$time)
journeys$week <- as.numeric(strftime(journeys$time, "%W"))
journeys$hour <- as.numeric(strftime(journeys$time, "%H"))
journeys$doy <- as.numeric(strftime(journeys$time, "%j"))
journeys$distance_miles <- round(journeys$travel.time/(60*60)*journeys$est_speed,1)
journeys$time_period <- ifelse(journeys$hour>=0 & journeys$hour< 6, "Night",
ifelse(journeys$hour>=6 & journeys$hour<10, "Morning",
ifelse(journeys$hour>=10 & journeys$hour<16, "Day",
ifelse(journeys$hour>=16 & journeys$hour<20, "Evening", "Night")
)))
# Rename column to show mph
names(journeys)[names(journeys)=="est_speed"] <- "est_speed_mph"
# fix errors in the data, found through exploratory analysis
# some sections have 2 leading zeros instead of 3
journeys$section_id <- sub("SECTIONTL00([0-9])([0-9])$", "SECTIONTL000\\1\\2",
journeys$section_id)
# this section has the wrong section ID - should be 176, not 130
journeys$section_id <-ifelse(journeys$section_id=="SECTIONTL00130" &
journeys$section_description=="St Michaels OB to Blackboy Hill OB",
"SECTIONTL00176", journeys$section_id )
# these sections have the wrong distance for some lines.
# remove them because we don't know if it's journey time or speed that is
# incorrect (or both even)
journeys <- journeys[!((journeys$section_id=="SECTIONTL00104" &
journeys$distance==1.8) |
(journeys$section_id=="SECTIONTL00079" &
journeys$distance==2.8) |
(journeys$section_id=="SECTIONTL00109" &
journeys$distance==1.7)),]
# correct the sections that have multiple location points
# (see file sent by bristol city council)
journeys<-merge(journeys, location_fixes, by="section_id", all.x=TRUE)
journeys$location<-as.character(journeys$location)
journeys$new_loc<-as.character(journeys$new_loc)
journeys$location<-ifelse(is.na(journeys$new_loc), journeys$location,
journeys$new_loc)
journeys$new_loc<-NULL
# remove lat/long columns since they are now inconsistent
journeys$lat<-NULL
journeys$long<-NULL
# weed out duplicates - keep only a single time record per route
primary <- paste0(journeys$section_id, format(journeys$time, "%m/%d/%Y %H:%M:%S"))
journeys <- journeys[!duplicated(primary),]
# Re-create some factor variables to make subsequent analysis more efficient
journeys$section_id<-as.factor(journeys$section_id)
journeys$location<-as.factor(journeys$location)
journeys$day<-as.factor(journeys$day)
journeys$month<-as.factor(journeys$month)
journeys$time_period<-as.factor(journeys$time_period)
# Return the cleaned up dataframe
journeys
}
|
b5d0b76366293c55c5c613458b2c18c1214fa099 | c22f16bbaa95ca5f4f9956df2f1f2807aab33ed5 | /code/sst.metrics.timecluster_ncc2019.R | 71b805f62925f5ae1d5ae95d5fdcec0e54249109 | [] | no_license | WCS-Marine/2016-bleaching-patterns | 8954a95debe20882c6c3ee698b95a580baf50fd3 | f20c211b8e97215c15c4011e040d6190fddb1ff6 | refs/heads/master | 2020-06-26T18:19:34.531499 | 2019-10-30T21:26:10 | 2019-10-30T21:26:10 | 199,711,787 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 12,029 | r | sst.metrics.timecluster_ncc2019.R | ##prepared by Maina. j Mar 2018
## takes a temporal subset (90 days) of time series SST to do temporal clusteriung and to calculate metrics
rm(list=ls())
library(hydrostats)
library(modes)
library(diptest)
library(grid)
library(gridExtra)
library(gtable)
library(plyr)
library(ggplot2)
library(pdc)
library(reshape)
##prepare temoerature data
#Observed bleaching data
#pts1<-read.csv("/Users/josephmaina/Dropbox/Global Bleaching/Paper/Paper 1/data/masterfile.for.analyses.withtaxa_16feb.csv")
pts<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/data/masterfile_29Oct2017.csv")
#x<-(pts1$unique.id)
#y<-(pts$unique.id)
#diff<-x[!(x %in% y)]
#diff<-x[is.na(match(x,y))]
#diff<-which(!x%in%y)
##load and prepare SST data
sst.ts<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/data/sst.extr.csv")
##remove rows diff
#sst.ts<-sst.ts[-diff,]
sst.ts.t<-t(sst.ts)
sstDat<- data.frame(sst.ts.t[2:1392,])
dim(sstDat)
rownames(sstDat)<-seq(1:1391)
date<-seq(as.Date("2013/03/12"), by = "day", length.out = 1391)
year<-as.numeric(format(date,"%Y"))
month<-as.numeric(format(date,"%m"))
sstData<-cbind(data.frame(date),month,year,sstDat)
pts_sub<-pts[,1:29]
labels<-as.list(paste(pts_sub$unique.id, pts_sub$location,pts_sub$site, sep="."))
colnames(sstData)[4:229]<-labels
sst_a<-sstData[,4:229]
##get threshold
#mmm.dhw<-read.csv("~/Dropbox/Global Bleaching/PaperVersions2&3/data/Env2017/dhw/dhw.extr.csv")
mmm.dhw<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/data/dhw.extr.092017.csv")
#mmm.dhw<-mmm.dhw[-diff,]
thresso<-as.list(mmm.dhw[,2])
thres<-lapply(thresso, function(x) x+1)
#create sapmling dates
bl.dates<-pts[,c(7,8,9)]
bl.dates$Obsdate<-paste(bl.dates[,1],bl.dates[,2],bl.dates[,3], sep="-")
bl.dates$onset<- as.Date(bl.dates$Obsdate)-90
onset<-as.list(bl.dates$onset)
obsdate<-as.list(bl.dates$Obsdate)
###time series anlayses
output <- list()
lowspell=list()
highspell=list()
baseflows_mean=list()
baseflows_annual=list()
sstoutputsub=list()
bimodality.ampl<-list()
bimodality.coeff<-list()
bimodality.ratio<-list()
sst.modes<-list()
dip.Statistic<-list()
dhd<-list()
ave<-list()
stdev<-list()
mypath <- file.path("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output",paste("myplots_HighSpell", ".pdf", sep = ""))
pdf(file=mypath)
for(i in seq_along(names(sst_a))){
y <- sst_a[,i]
output[[i]] = cbind(date, data.frame(y))
colnames(output[[i]])<-c("Date","Q")
sstoutputsub[[i]] <- with(output[[i]], output[[i]][(Date >= onset[[i]] & Date <= obsdate[[i]]), ])
stdev[[i]]<-sd(sstoutputsub[[i]][,2])
ave[[i]]<-mean(sstoutputsub[[i]][,2])
#lowspell[[i]]<-low.spells(sstoutputsub[[i]], quant=0.1, ann.stats=TRUE,hydro.year=FALSE)
#mypath <- file.path("~/Documents/LatestWorkingFolder/envData/extremes/figures/",paste(i, ".pdf", sep = ""))
#pdf(file=mypath)
#highspell[[i]]<-high.spells(sstoutputsub[[i]], ind.days = 5, volume=TRUE, threshold=thres[[i]])
#highspell[[i]]<-high.spells(sstoutputsub[[i]], ind.days = 5, volume=TRUE, quant = 0.9, threshold=NULL)
#baseflows_mean[[i]]<-baseflows(output[[i]],a=0.975, ts="mean")
#baseflows_annual[[i]]<-baseflows(output[[i]],a=0.975, ts="annual")
#dip.Statistic[[i]]<-dip(sstoutputsub[[i]][,2])
#bimodality.ampl[[i]]<-bimodality_amplitude(sstoutputsub[[i]][,2],TRUE)
#bimodality.coeff[[i]]<-bimodality_coefficient(sstoutputsub[[i]][,2],TRUE)
#bimodality.ratio[[i]]<-bimodality_ratio(sstoutputsub[[i]][,2], list = FALSE)
#sst.modes[[i]]<-modes(sstoutputsub[[i]][,2], type = 1, digits = "NULL", nmore = "NULL")
#dhd[[i]]<-sstoutputsub[[i]] - thres[[i]]
}
dev.off()
dhd.df <- do.call("cbind",dhd)
sstoutputsub.df <- do.call("cbind",sstoutputsub)
lowspell.df <- do.call("rbind",lowspell)
highspell.df<- do.call("rbind",highspell)
#baseflows_mean.df <- do.call("rbind", baseflows_mean)
#baseflows_annual.df<- do.call("rbind",baseflows_annual)
bimodality.ampl.sst<-cbind(bimodality.ampl)
bimodality.coeff.sst<-cbind(bimodality.coeff)
bimodality.ratio.sst<-cbind(bimodality.ratio)
sst.modes.sst<-cbind(sst.modes)
dip.Statistic.sst<-rbind(dip.Statistic)
write.csv(dhd.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/dhd.df.csv")
write.csv(bimodality.ampl.sst, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/bimodality.ampl.sst.csv")
write.csv(bimodality.coeff.sst, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/bimodality.coeff.ss.csv")
write.csv(bimodality.ratio.sst, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/bimodality.ratio.sst.csv")
write.csv(sst.modes.sst, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/sst.modes.sst.csv")
write.csv(dip.Statistic.sst, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/dip.Statistic.sst.csv")
write.csv(highspell.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/highSpell_sst_90perc.csv")
write.csv(lowspell.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/sst_lowspell.df.csv")
write.csv(sstoutputsub.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/SST_uniqueDates.csv")
###time series cluster analyses
##if clustering entiure time series
sstDat<-data.frame(apply(sstDat, 2, function(x) as.numeric(x)))
#if clustering based on 90 days
a<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/SST_uniqueDates.csv")
dim(a)
b<-append("Q",paste("Q", seq(1:234), sep="."))
sstDat<-subset(a, select=b)
dim(sstDat)
colnames(sstDat)<-labels
clustering <- pdclust(sstDat)
clustering
#truth.col <- rep(c("red", "blue","black","green","grey"), each = 47)
#truth.col <- rep(c("red", "blue","black"), each = 78)
#plot(clustering, cols = truth.col, type ="triangle")
plot(clustering, labels=F, type="rectangle",timeseries.as.labels = F, p.values=F)
#loo1nn(clustering, truth.col)
mine <- entropyHeuristic(sstDat,m.min=3, m.max=7, t.min = 1, t.max = 1)
summary(mine)
plot(mine,type = "image", mark.optimum = TRUE,col = heat.colors(12))
#plot(clustering, cols=c(rep("red",5),rep("blue",5)))
clust<-cutree(clustering, k = 4)
sstdatClust<-rbind(clust,sstDat)
sstdatClustFin<-t(sstdatClust)
colnames(sstdatClustFin)[1]<-"cluster"
colnames(sstdatClustFin)[2:92]<-seq(1:91)
sstdatClustFin<-as.data.frame(sstdatClustFin)
sstdatClustFin$cluster<-as.character(sstdatClustFin$cluster)
sstdatClustFin$site<-rownames(sstdatClustFin)
molten<-melt(sstdatClustFin, id.var =c("site","cluster"))
molten$variable<-as.numeric(molten$variable)
molten$x<-as.numeric(molten$variable)
molten.sstClust<-cbind(molten,rep(1:235, 91))
colnames(molten.sstClust)<-c("site","cluster","day","sst","x","stack")
molten.sstClust1<-cbind(molten.sstClust,rep(pts$grp6.check , 91))
colnames(molten.sstClust1)<-c("site","cluster","day","sst","x","stack","siteCoralClust")
write.csv(sstdatClustFin,"/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/sstDistributionCluster.csv")
Clust.Mean<-aggregate(sstdatClustFin[,2:92], by=list(Cluster=sstdatClustFin$cluster),FUN = mean, na.rm=TRUE)
Clust.sd<-aggregate(sstdatClustFin[,2:92], by=list(Cluster=sstdatClustFin$cluster),FUN = sd, na.rm=TRUE)
dat.mean<-as.data.frame(t(Clust.Mean))
dat.mean<-dat.mean[2:91,]
dat.mean.num<-apply(dat.mean, 2, function(x) as.numeric(x))
dat.mean.num<-as.data.frame(cbind(dat.mean.num, seq(1:90)))
dat.sd<-data.frame(t(Clust.sd))
dat.sd<-dat.sd[2:91,]
dat.mean.sd<-apply(dat.sd, 2, function(x) as.numeric(x))
dat.mean.sd<-as.data.frame(dat.mean.sd)
mean.stack<-melt(dat.mean.num, id="V5")
colnames(mean.stack)<-c("day","cluster","ave.sst")
sd.stack<-stack(dat.mean.sd)
colnames(sd.stack)<-c("sd","cluster")
plot.sst<-cbind(mean.stack,sd.stack$sd)
colnames(plot.sst)[4]<-"sd"
ggplot(plot.sst, aes(day,ave.sst)) + geom_line()+facet_wrap(~cluster) + geom_ribbon(aes(ymin=ave.sst-sd, ymax=ave.sst+sd, x=day), alpha = 0.3) + ylab("Average SST")
### dhw calculation
rm(list=ls())
pts1<-read.csv("/Users/josephmaina/Dropbox/Global Bleaching/Paper/PaperVersion2/data/masterfile.for.analyses.withtaxa_16feb.csv")
pts<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/data/masterfile_29Oct2017.csv")
x<-(pts1$unique.id)
y<-(pts$unique.id)
diff<-which(!x%in%y)
##load and prepare dhw data
dhw<-read.csv("/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/data/dhw.extr.092017.csv")
dhw<-dhw[-diff,]
##remove rows diff
dhw.ts.t<-t(dhw)
dhwDat<- data.frame(dhw.ts.t[3:1310,])
dim(dhwDat)
rownames(dhwDat)<-seq(1:1308)
date<-seq(as.Date("2013/06/03"), by = "day", length.out = 1308)
year<-as.numeric(format(date,"%Y"))
month<-as.numeric(format(date,"%m"))
dhwData<-cbind(data.frame(date),month,year,dhwDat)
pts_sub<-pts[,1:29]
labels<-as.list(paste(pts_sub$unique.id, pts_sub$location,pts_sub$site, sep="."))
colnames(dhwData)[4:229]<-labels
dhw_a<-dhwData[,4:229]
##get threshold
#mmm.dhw<-read.csv("~/Dropbox/Global Bleaching/PaperVersions2&3/data/Env2017/dhw/dhw.extr.csv")
thresso<-as.list(dhw[,2])
thres<-lapply(thresso, function(x) x+1)
#create sapmling dates
bl.dates<-pts[,c(7,8,9)]
bl.dates$Obsdate<-paste(bl.dates[,1],bl.dates[,2],bl.dates[,3], sep="-")
bl.dates$onset<- as.Date(bl.dates$Obsdate)-90
onset<-as.list(bl.dates$onset)
obsdate<-as.list(bl.dates$Obsdate)
##subset the dhw
output <- list()
sstoutputsub=list()
maxdhw<-list()
avedhw<-list()
dhwoutputsub=list()
cummsumdhw=list()
sumdhw=list()
for(i in seq_along(names(dhw_a))){
y <- dhw_a[,i]
output[[i]] = cbind(date, data.frame(y))
colnames(output[[i]])<-c("Date","Q")
dhwoutputsub[[i]] <- with(output[[i]], output[[i]][(Date >= onset[[i]] & Date <= obsdate[[i]]), ])
maxdhw[[i]]<-max(dhwoutputsub[[i]][,2])
avedhw[[i]]<-mean(dhwoutputsub[[i]][,2])
sumdhw[[i]]<-sum(dhwoutputsub[[i]][,2])
}
sumdhw.df <- do.call("rbind",sumdhw)
dhwoutputsub.df <- do.call("cbind",dhwoutputsub)
dhwmax.90days.df <- do.call("rbind",maxdhw)
dhwave.90days.df <- do.call("rbind",avedhw)
dhd.df<-do.call("cbind",dhd)
write.csv(sumdhw.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/cummsumdhw.df.csv")
write.csv(dhwoutputsub.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/dhwoutputsub.df.csv")
write.csv(dhwmax.90days.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/dhwmax.90days.df.csv")
write.csv(dhwave.90days.df, "/Users/josephmaina/OneDrive - Macquarie University/Macquarie/Papers/BleachingPaper/GlobalBleachingVer3/output/dhwave.90days.df.csv")
|
96b46c6ad4514d604aa45fcc5b8bffe0b4fffc13 | 4aa28de9627080f8f145e2ac0e8b1329953fbdb7 | /assign1/getdata.R | 646aec3a31b52ec9bee3c1fe8f5e0a6063e328bc | [] | no_license | kgegner/exploratoryData | 3aa7d59fdf75a3e11b6e2479ad1e1c1ee7dc39e7 | 1d2a412ec8ed6479e7abc7d1c6b8bd047defd29c | refs/heads/master | 2020-05-16T22:18:57.424396 | 2015-08-20T02:25:45 | 2015-08-20T02:25:45 | 38,973,323 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 475 | r | getdata.R | getdata <- function() {
# Read data
txt_file <- "~/Documents/Programming/R/coursera/exploratorydata/data/household_power_consumption.txt"
col_classes <- c(rep("character",2), rep("numeric",7))
pwr_data <- read.table(txt_file, header = TRUE, sep = ";", colClasses = col_classes, na.strings = "?")
# Subset data to include only Feb 1, 2007 through Feb 2, 2007
dat <- subset(pwr_data, pwr_data$Date == "1/2/2007" | pwr_data$Date == "2/2/2007")
return(dat)
}
|
301fcc45e610070eb56a85f71aee7bf6ca36a55a | 82cf58e384211134e3e887f8d084acfe950945d4 | /R/plots.R | 1990f9c10fa0652e36b54b3ab010fa0184c9fde4 | [] | no_license | hrk2109/bestNormalize | ae50bcc925de18fed7bfd3b8c1474460a9f0ab5a | ab716529e354b667dbb4dcdf33ab7d95f9992633 | refs/heads/master | 2020-04-02T20:40:39.587857 | 2018-09-25T17:44:43 | 2018-09-25T17:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,756 | r | plots.R | #' Transformation plotting
#'
#' Plots transformation functions for objects produced by the bestNormalize
#' package
#'
#' @name plot.bestNormalize
#' @aliases plot.orderNorm plot.boxcox plot.yeojohnson plot.lambert
#'
#' @details The plots produced by the individual transformations are simply
#' plots of the original values by the newly transformed values, with a line
#' denoting where transformations would take place for new data.
#'
#' For the bestNormalize object, this plots each of the possible
#' transformations run by the original call to bestNormalize. The first
#' argument in the "cols" parameter refers to the color of the chosen
#' transformation.
#'
#' @rdname plot.bestNormalize
#' @param x a fitted transformation
#' @param inverse if TRUE, plots the inverse transformation
#' @param bounds a vector of bounds to plot for the transformation
#' @param cols a vector of colors to use for the transforms (see details)
#' @param methods a vector of transformations to plot
#' @param leg_loc the location of the legend on the plot
#' @param ... further parameters to be passed to \code{plot} and \code{lines}
#' @importFrom graphics legend lines plot points
#' @export
plot.bestNormalize <- function(x, inverse = FALSE, bounds = NULL,
cols = c("green3", 1, 2, 4:6),
methods = c('boxcox', 'yeojohnson', "orderNorm",
"lambert_s", "lambert_h"),
leg_loc = 'top',
...) {
if(!inverse) {
xvals <- x$x
x_t <- x$x.t
} else {
xvals <- x$x.t
x_t <- x$x
}
if(is.null(bounds)) {
xx <- seq(min(xvals), max(xvals), length = 1000)
} else
xx <- seq(min(bounds), max(bounds), length = 1000)
yy <- predict(x, newdata = xx, inverse = inverse, warn = FALSE)
methods <- methods[methods != names(x$norm_stats)[which.min(x$norm_stats)]]
methods <- methods[methods %in% names(x$norm_stats)]
ys <- lapply(methods, function(i) {
obj <- x$other_transforms[[i]]
y_i <- predict(obj, newdata = xx, inverse = inverse, warn = FALSE)
y_i
})
plot(xx, yy, ylim = range(yy, ys, na.rm = TRUE),
xlim = range(xx), type = 'l',
col = cols[1], lwd = 2,
xlab = ifelse(inverse, "g(x)", "x"),
ylab = ifelse(!inverse, "g(x)", "x"),
...)
lapply(1:length(ys), function(i) {lines(xx, ys[[i]], col = cols[i + 1], lwd = 2, ...)})
labs <- c(class(x$chosen_transform), methods)
legend(leg_loc, labs, col = cols, bty = 'n', lwd = 2)
if(!inverse)
points(x = xvals, y = rep(min(range(ys, yy, na.rm = T)), length(xvals)), pch = '|')
else
points(x = rep(min(xvals, bounds), length(x$x)), y = x$x, pch = '_')
invisible(x)
}
#' @rdname plot.bestNormalize
#' @importFrom graphics lines plot
#' @export
plot.orderNorm <- function(x, inverse = FALSE, bounds = NULL, ...) {
if(!inverse) {
xvals <- x$x
x_t <- x$x.t
} else {
xvals <- x$x.t
x_t <- x$x
}
if(is.null(bounds)) {
xx <- seq(min(xvals), max(xvals), length = 1000)
} else
xx <- seq(min(bounds), max(bounds), length = 100)
yy <- predict(x, newdata = xx, inverse = inverse, warn = FALSE)
plot(xvals, x_t, pch = 20, ylim = range(yy, na.rm = TRUE), xlim = range(xx), ...)
lines(xx, yy, col = 'slateblue', lwd = 2, ...)
invisible(x)
}
#' @rdname plot.bestNormalize
#' @importFrom graphics lines plot
#' @export
plot.boxcox <- plot.orderNorm
#' @rdname plot.bestNormalize
#' @importFrom graphics lines plot
#' @export
plot.yeojohnson <- plot.orderNorm
#' @rdname plot.bestNormalize
#' @importFrom graphics lines plot
#' @export
plot.lambert <- plot.orderNorm
|
2dfab49808322aea1a9ade0b42343ef904e5e961 | 3128c1fb4314561fc8d0ef509c4faa6cbecba679 | /R/RcppExports.R | 04a5959276752e9ae0784f860d8d3cfdcf030771 | [] | no_license | introRcpp/confinement2 | 94147063ae604123ca00ba260c60cceb599cbd19 | 9af35cd3f51cf14de654f65ef10cbfbaba6b4167 | refs/heads/master | 2021-03-31T04:09:36.319517 | 2020-03-17T21:14:22 | 2020-03-17T21:14:22 | 248,075,623 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
demo_heure <- function() {
invisible(.Call(`_confinement2_demo_heure`))
}
demo_lapin_1 <- function() {
invisible(.Call(`_confinement2_demo_lapin_1`))
}
demo_lapin_2 <- function() {
invisible(.Call(`_confinement2_demo_lapin_2`))
}
demo_rdv <- function() {
invisible(.Call(`_confinement2_demo_rdv`))
}
rcpp_hello_world <- function() {
.Call(`_confinement2_rcpp_hello_world`)
}
|
ed712fc45b20a0c3833e5e453d8e6a2c7711d795 | 0974cce1a019a2fe22b7fd453ac2ef73ce335cbc | /man/linearized_work.Rd | 0436abde27313da23aa401ecb816396cf88b6d09 | [
"MIT"
] | permissive | cavargasru/globalrc | b8ee273ea21271132efaebe83214f7a28ce6939c | 75911c6a0c8c06b80643416f06b0830ab5ee48ce | refs/heads/main | 2023-08-06T09:43:33.344894 | 2021-09-15T20:49:25 | 2021-09-15T20:49:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 518 | rd | linearized_work.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/block_work.R
\name{linearized_work}
\alias{linearized_work}
\title{Takes an input array in multiple dimensions and linearizes and removes NA.
This replaces the outputs back into multiple dimensions after making the call.}
\usage{
linearized_work(input_list, run_func)
}
\description{
Takes an input array in multiple dimensions and linearizes and removes NA.
This replaces the outputs back into multiple dimensions after making the call.
}
|
f8f3af9334cdc38b58406acf877ebc8ba027d1f4 | f1251bc1b0068adb269ca1cdf6276a90226c6b9e | /emiliano/code/chalhs.R | 6c0a3becc61d73e5053f900cc6daabc99dc40f23 | [] | no_license | NovaisGabriel/FMTC_2019 | c69f459f1e7f935ae3a33c90df226aceff6652e5 | 7a89dfbe2b4048c345cac5a6843424f89d00506a | refs/heads/master | 2022-11-28T23:40:43.791347 | 2020-08-04T12:17:06 | 2020-08-04T12:17:06 | 284,966,227 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,158 | r | chalhs.R | # Latin hypercube sampling
# Guojun Gan
# Feb 11, 2019
############
# Generate sample
############
mylhs <- function(k, L, H, A) {
# continuous part
d1 <- length(L)
mR <- matrix(runif(k*d1), nrow=k)
mOrder <- apply(mR, 2, order)
mN <- matrix(L, nrow=k, ncol=d1, byrow=T) + (mOrder - 1) * matrix(H-L, nrow=k, ncol=d1, byrow=T) / (k-1)
# categorical part
d2 <- length(A)
mC <- matrix(0, nrow=k, ncol=d2)
for(j in 1:d2) {
mC[,j] <- sample(1:A[j], k, replace=T)
}
return(cbind(mN,mC))
}
L <- c(1,10)
H <- c(10,20)
A <- c(2, 5)
set.seed(1)
m1 <- mylhs(20,L,H,A)
table(m1[,3])
table(m1[,4])
set.seed(2)
m2 <- mylhs(20,L,H,A)
table(m2[,3])
table(m2[,4])
dev.new(width=8,height=4)
par(mfrow=c(1,2),mar=c(4,4,1,1))
plot(m1[,1],m1[,2])
plot(m2[,1],m2[,2])
############
# Calculate score
############
calDist <- function(mS, L, H) {
k <- nrow(mS)
d1 <- length(L)
d2 <- ncol(mS) - d1
mD <- matrix(0, nrow=k, ncol=k)
mN <- mS[,1:d1] # continuous part
mC <- mS[,-(1:d1)] # categorical part
for(j in 1:k) {
mD[,j] <- abs(mN - matrix(mN[j,], nrow=k, ncol=d1, byrow=T)) %*%
matrix( 1/(H-L), ncol=1) +
apply(sign(abs( mC - matrix(mC[j,], nrow=k, ncol=d2, byrow=T) )), 1, sum)
}
return(mD)
}
mD1 <- calDist(m1, L, H)
mD2 <- calDist(m2, L, H)
mD1[1:5,1:5]
mD2[1:5,1:5]
calScore <- function(mD) {
ind <- lower.tri(mD)
return(min(mD[ind]))
}
calScore(mD1)
calScore(mD2)
############
# Select representative policies
############
# Load data
setwd("C:/Users/Guojun Gan/Downloads/vamc/datasets")
setwd("/Volumes/Scratch/gan/MathDepartment/Data/vamc/datasets")
inforce <- read.csv("inforce.csv")
summary(inforce[,1:10])
vNames <- c("gbAmt", "gmwbBalance", "withdrawal", paste("FundValue", 1:10, sep=""))
age <- with(inforce, (currentDate-birthDate)/365)
ttm <- with(inforce, (matDate - currentDate)/365)
datN <- cbind(inforce[,vNames], data.frame(age=age, ttm=ttm))
datC <- inforce[,c("gender", "productType")]
datM <- as.matrix(cbind(datN, data.frame(lapply(datC, as.numeric))))
summary(datN)
summary(datC)
# Get input space
L <- apply(datN, 2, min)
H <- apply(datN, 2, max)
A <- sapply(lapply(datC, levels), length)
round(L,2)
round(H,2)
A
# Generate samples
k <- 340
nSamples <- 50
vScore <- c()
maxScore <- 0
bestS <- NULL
set.seed(123)
for(i in 1:nSamples) {
mS <- mylhs(k,L,H,A)
mD <- calDist(mS, L, H)
score <- calScore(mD)
if(score > maxScore) {
maxScore <- score
bestS <- mS
}
vScore <- c(vScore, score)
}
vScore
maxScore
head(bestS)
d1 <- length(L)
colnames(bestS) <- c(names(datN), names(datC))
summary(bestS[,1:d1])
table(bestS[,d1+1])
table(bestS[,d1+2])
plot(bestS[,1], bestS[,2])
# find nearest policies
findPolicy <- function(mS, dat, L, H) {
ind <- c()
k <- nrow(mS)
n <- nrow(dat)
d1 <- length(L)
d2 <- ncol(dat) - d1
for(i in 1:k) {
mN <- dat[,1:d1] # continuous part
mC <- dat[,-(1:d1)] # categorical part
vD <- abs(mN - matrix(mS[i,1:d1], nrow=n, ncol=d1, byrow=T)) %*%
matrix( 1/(H-L), ncol=1) +
apply(sign(abs( mC - matrix(mS[i,-(1:d1)], nrow=n, ncol=d2, byrow=T) )), 1, sum)
tmp <- setdiff(order(vD), ind)
ind <- c(ind, tmp[1])
}
return(sort(ind))
}
{
t1 <- proc.time()
lhs <- findPolicy(bestS, datM, L, H)
proc.time() - t1
}
lhs[1:50]
summary(datN[lhs,])
table(datC[lhs,])
pairs(datN[lhs,])
write.table(lhs, "lhs.csv", sep=",", quote=F, col.names=F)
# generate 680 reprsentative policies
k <- 680
nSamples <- 50
vScore <- c()
maxScore <- 0
bestS <- NULL
set.seed(123)
for(i in 1:nSamples) {
mS <- mylhs(k,L,H,A)
mD <- calDist(mS, L, H)
score <- calScore(mD)
if(score > maxScore) {
maxScore <- score
bestS <- mS
}
vScore <- c(vScore, score)
}
vScore
maxScore
head(bestS)
{
t1 <- proc.time()
lhs <- findPolicy(bestS, datM, L, H)
proc.time() - t1
}
lhs[1:50]
summary(datN[lhs,])
table(datC[lhs,])
write.table(lhs, "lhs2.csv", sep=",", quote=F, col.names=F)
|
ce4ec4e0824ece4a87bd2b4f7f2e2d04f5dedc37 | b2b096f8b1658f7b5e4e727a25b29abcf50c1ebe | /HammingDistance.R | e024690b92f4de0a9815cf026e92feacf452ae75 | [] | no_license | ShrutiMarwaha/Bioinformatics-Related-R-codes | 5cf69a292d91f711ff75792196f3857fed90b029 | 67ab1d40de33fdd70946b8a28c1dd2c353ebcde5 | refs/heads/master | 2021-01-19T11:37:42.199005 | 2016-10-24T01:51:40 | 2016-10-24T01:51:40 | 37,564,686 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 526 | r | HammingDistance.R | ## calculated hamming distance between two dna sequences
#s1 <- c("GAGCCTACTAACGGGAT")
#s2 <- c("CATCGTAATGACGGCCT")
dna_string <- readLines("./Dropbox/SHRUTIM/Rscripts/PractiseR/DNAstrings.txt")
# dna_string <- readLines("./Dropbox/SHRUTIM/Rscripts/PractiseR/rosalind_hamm.txt")
dna_string
s1 <- dna_string[1]
s2 <- dna_string[2]
s1_array <- unlist(strsplit(s1,""))
s2_array <- unlist(strsplit(s2,""))
count <- 0
for(i in seq_along(s1_array))
{
if(s1_array[i] != s2_array[i])
{
count <- count+1
}
}
print(count) |
b546435697e6e4196209562ffc15676b4076ca76 | b799e9587950b9225486a2ca5493f15cb7b7dedb | /tests/testthat/test_cvis.R | 6cdbf68741dc9dcd8abc873d01b973cc036415b0 | [] | no_license | Htimstrebor/dtwclust | 7f015cab0c55b3410a9f97ffa1dbbde773336566 | d6ef9b54668ec4319bfcf99b6b30f97a3e27dc18 | refs/heads/master | 2020-07-03T12:04:04.395036 | 2016-11-18T21:50:34 | 2016-11-18T21:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,047 | r | test_cvis.R | context("Test CVIs")
# =================================================================================================
# both internal and external
# =================================================================================================
test_that("dtwclust CVI calculations are consistent regardless of quantity or order of CVIs computed", {
pc_mv <- dtwclust(data_multivariate, type = "partitional", k = 4,
distance = "dtw_basic", centroid = "pam",
preproc = NULL, control = list(window.size = 18L), seed = 123,
dist.method = "L1")
base_cvis <- cvi(pc_mv, rep(1:4, each = 5), "valid")
i_cvis <- cvi(pc_mv, type = "internal")
e_cvis <- cvi(pc_mv, rep(1:4, each = 5), type = "external")
expect_identical(base_cvis, c(e_cvis, i_cvis))
cvis <- c(internal_cvis, external_cvis)
expect_true(all(replicate(100L, {
considered_cvis <- sample(cvis, sample(length(cvis), 1L))
this_cvis <- cvi(pc_mv, rep(1:4, each = 5), considered_cvis)
all(base_cvis[considered_cvis] == this_cvis[considered_cvis])
})))
pc_mv@datalist <- list()
pc_mv@distmat <- NULL
expect_warning(cvi(pc_mv, type = "valid"))
skip_on_cran()
expect_equal_to_reference(base_cvis, file_name(base_cvis))
})
# =================================================================================================
# external
# =================================================================================================
test_that("external CVI calculations are consistent regardless of quantity or order of CVIs computed", {
expect_error(cvi(labels_shuffled, type = "external"))
base_cvis <- cvi(labels_shuffled, CharTrajLabels, "external")
expect_true(all(replicate(1000L, {
considered_cvis <- sample(external_cvis, sample(length(external_cvis), 1L))
this_cvis <- cvi(labels_shuffled, CharTrajLabels, considered_cvis)
all(base_cvis[considered_cvis] == this_cvis[considered_cvis])
})))
})
|
7ce3eb710967c536c44c2dfae3628a61c32d6c04 | 6b769ade12829c97f7aa9930265418bede361967 | /R/table10_13.R | d3d80d92d0a2a178f74cb1fd3f3d687926dcf17f | [] | no_license | brunoruas2/gujarati | 67b579a4fde34ae1d57f4a4fd44e5142285663c1 | a532f2735f9fa8d7cd6958e0fc54e14720052fd4 | refs/heads/master | 2023-04-07T01:15:45.919702 | 2021-04-04T15:10:58 | 2021-04-04T15:10:58 | 284,496,240 | 14 | 6 | null | null | null | null | UTF-8 | R | false | false | 415 | r | table10_13.R | #' Table 10_13
#'
#' U.S. Imports, GDP, and CPI, 1975–2005 (For all urban consumers; 1982–84 = 100, except as noted)
#' Source: Department of Labor, Bureau of Labor Statistics
#'
#' @docType data
#' @usage gujarati::Table10_13
#' @format
#'
#' \itemize{
#' \item \strong{Year}
#' \item \strong{CPI: }Consumer Price Index
#' \item \strong{GDP: }Gross domestic product
#' \item \strong{Imports}
#' }
'Table10_13'
|
eba035695637e6bf6347c6eb6a3d50a77214f3de | cd545db34cc4e68b1b5ff7f726d0f4039e9792f8 | /man/res.mod2.Rd | 39a42b1190af2b6002ca2783a481a83ad016277f | [] | no_license | menggf/DEComplexDisease | 3c3598cf7403fda0c0a2fbc4c988196fe229c6cb | c7ad81ba12b3274c4d2d36a77cc22baea16caf60 | refs/heads/master | 2022-07-09T00:53:35.770287 | 2022-06-25T06:13:24 | 2022-06-25T06:13:24 | 95,849,656 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 256 | rd | res.mod2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{res.mod2}
\alias{res.mod2}
\title{clustered module 1}
\format{a list}
\value{
A list
}
\description{
The module analysis results
}
\keyword{datasets}
|
5c0ba5384ac2c44f7646570307ad9e700210a0e6 | d52a2c14988820d9162dbe76d0bc537932209b62 | /R/hydro_metrics.R | 553dd11ea87ed40154d740966e87c0465b66d878 | [] | no_license | srearl/indian-bend-wash | d698261e28fc12bb1a9349ee020e5ea9db6c1267 | 50c1734e73d60891923c7bcf4e86f340fcc846a1 | refs/heads/master | 2022-02-16T23:11:38.020734 | 2019-08-01T20:59:53 | 2019-08-01T20:59:53 | 118,653,921 | 0 | 0 | null | 2019-07-12T23:38:48 | 2018-01-23T18:49:42 | R | UTF-8 | R | false | false | 3,544 | r | hydro_metrics.R |
# README ------------------------------------------------------------------
# This workflow generates storm-analyte indices, including total load, b,
# cumulative Q, and emc, though additional indices, particularly related to
# hysteresis, can and should be included.
# Input to the worflow is a long-form data featuring flow and interpolated
# chemistry, here ibwQchem accessed from Dropbox is being used but any data with
# those characteristics could be used.
# libraries (in addition to TV from import_from_file.R) -------------------
library(zoo)
library(lubridate)
library(here)
# options -----------------------------------------------------------------
options(scipen = 999)
# data import -------------------------------------------------------------
if (!exists("ibwQchem") | !exists("ibwQminute")) {
source(here::here("R", "import_from_file.R"))
}
# assess months in which storms occur -------------------------------------
# distribution of months in which storms with good chem. coverage occur for
# assigning a season index; no June or Oct storms in this set so set monsoon (1)
# to storms in July, August, and September. For exploration only, uncomemnt to
# rerun.
# ibwQchem %>%
# filter(stormMark %in% c(storms_with_chem)) %>%
# distinct(dateTime) %>%
# mutate(month = month(dateTime)) %>%
# ggplot(aes(x = month)) +
# geom_histogram(binwidth = 1) +
# scale_x_continuous(breaks = c(1:12))
# calculate hydro metric(s) -----------------------------------------------
# calculations include:
# stormDuration: duration of storm in hours
# totalLoad: total load of the analyte over the course of a storm
# b: exponent of total load / cumulative Q power function
# cumQ: cumulative discharge
# maxQ: maximum discharge
# emc: Event Mean Concentration
# maxC: maximum concentration
# monsoon: whether storm occurs during monsoon season (1 = mos. 7-9)
# antecedentDay: antecedent dry day
hydroMetrics <- inner_join(
ibwQchem %>%
filter(stormMark %in% c(storms_with_chem)) %>%
group_by(stormMark, analyte) %>%
filter(any(!is.na(concentration))) %>%
mutate(
intpConc = na.approx(concentration, x = dateTime, na.rm = FALSE),
intpConc = na.locf(intpConc, fromLast = TRUE, na.rm = FALSE),
intpConc = na.locf(intpConc, na.rm = FALSE),
load = (intpConc * Qls * (60/1000000)),
normLoad = cumsum(load)/max(cumsum(load)),
monsoon = case_when(
month(dateTime) %in% c(7,8,9) ~ 1,
TRUE ~ 0
)
) %>%
summarise(
stormDuration = difftime(max(dateTime), min(dateTime), units = c("hours")),
totalLoad = round(sum(load), digits = 0),
b = round(coef(nls(cumsum(load)/max(cumsum(load)) ~ I((cumQ/max(cumQ))^b), start=list(b=1), trace = F)), digits = 3),
cumQ = round(max(cumQ), digits = 0),
maxQ = round(max(Qls), digits = 0),
emc = round((sum(load)/max(cumQ))*1000000, digits = 2),
maxC = round(max(concentration, na.rm = TRUE), digits = 2),
monsoon = max(monsoon)
) %>%
ungroup(),
ibwQminute %>%
group_by(stormMark) %>%
summarise(
minDT = min(dateTime),
maxDT = max(dateTime)
) %>%
ungroup() %>%
mutate(antecedentDay = difftime(minDT, lag(maxDT, n = 1L), units = c("days"))) %>%
select(stormMark, antecedentDay),
by = c("stormMark")) %>%
mutate(
monsoon = as.factor(monsoon),
stormDuration = round(as.numeric(stormDuration), digits = 1),
antecedentDay = round(as.numeric(antecedentDay), digits = 2)
)
|
4a249601ef5510278f855ab17810dbe32a2a9524 | 12f3db3b7f75b1ab6a1b7823d2dfec1c7cf5eb5f | /bernoulli.R | 1d756d78bfdf94aa2eca4bdcc54c78c513d6b5d4 | [] | no_license | rupshabagchi/RStan-models | 9a8c195a785f9fc73729688f9663cd2a0ade40cf | 9e4b28023f27d26f588c4bea97a6e04d8bd415fa | refs/heads/master | 2020-12-30T12:23:43.187356 | 2017-06-26T15:37:28 | 2017-06-26T15:37:28 | 91,382,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 442 | r | bernoulli.R | require(rstan)
bernoulli <- "
data {
int alpha;
int beta;
int <lower=0> N;
int <lower=0,upper=1> y[N];
}
parameters {
real <lower=0,upper=1> theta;
}
model {
theta ~ beta(alpha,beta);
y ~ bernoulli(theta);
}"
N <- 100
y <- rbinom(N, 1, .4)
data <- list(y=y, N=N)
m <- stan_model(model_code = bernoulli)
samples <- sampling(m, data=data, iter=1000, chains=1)
mu <- mean(extract(samples)$theta)
print(mu)
|
15f7f40bf1fe0779589d19e9c6ae49672e36193d | 2caa11cba1f9f7486f5aa6a1672225384b372ad4 | /man/data.Rd | 0f05944aa56ba4bda17bffceb34d5dd9be361c57 | [] | no_license | cran/DCEmgmt | 68929ae06ccd099aac24a75b2c6a8b6f307bf8a8 | cd72e53510c6c98b3f3b55ab751426fd92ce0629 | refs/heads/master | 2023-08-23T20:03:42.220636 | 2021-10-26T07:40:02 | 2021-10-26T07:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 816 | rd | data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survey.R
\docType{data}
\name{data}
\alias{data}
\title{Survey data from the DCE in Consumers' preferences and WTP for personalised nutrition (https://doi.org/10.1007/s40258-021-00647-3)}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 242 rows and 21 columns.
}
\source{
\doi{10.1007/s40258-021-00647-3}{DOI}
}
\usage{
data(survey)
}
\description{
Survey data from the DCE in Consumers' preferences and WTP for personalised nutrition (https://doi.org/10.1007/s40258-021-00647-3)
}
\examples{
data <- data(survey)
}
\references{
P?rez-Troncoso et al. (2021) Applied Health Economics and Health Policy 19, 757-767
(\href{https://pubmed.ncbi.nlm.nih.gov/33900567}{PubMed})
}
\keyword{datasets}
|
fbf775cb385c764c4d7f240862cdc79ea73548ad | c7edbf71ef5547c91cf5f127565f744c82d0501e | /R_code_AED_pubmed_abstracts_analysis.R | 8fc044321e42f0cc4b354d09b0e6feb6895ee6c1 | [] | no_license | shatrunjai/aed_pubmed | 71a0f0597677e2ff30d28c978cdb88d6f28d47a4 | c0ea41478f6487fab8992fe60d3016e1dac66f82 | refs/heads/master | 2021-04-16T10:07:06.877430 | 2018-01-09T22:55:46 | 2018-01-09T22:55:46 | 116,880,000 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 47,944 | r | R_code_AED_pubmed_abstracts_analysis.R | ##############################################################################
# FileName: R_Epilepsy_Wordcloud.R
# Description: This file has the R code that can be used to get abstracts from PUBMED
# and analyze them with text analytics
# Author(s): Jai Singh
# Version: $Id$
# History:
# 2017/11/20 First Version
###############################################################################
#get current working dir
getwd()
options(max.print=1000000)
#Set current working directory to the clustering folder
setwd("//home/a159899/01-Projects/Pubmed_Analysis")
getwd()
##Install packages
# install.packages("/home/a159899/01-Projects/Pubmed_Analysis/felixfan-PubMedWordcloud-v0.3-14-g00674ad.tar.gz", repos = NULL, type="source")
# install.packages("/home/a159899/01-Projects/Pubmed_Analysis/slam_0.1-37.tar.gz", repos = NULL, type="source")
# install.packages("devtools")
# install.packages("twitteR")
# install.packages("reshape2")
# install.packages("sentimentr")
# install.packages("plyr")
# install.packages("ggplot2")
# install.packages("lazyeval")
# install.packages("wordcloud")
# install.packages("RColorBrewer")
# install.packages("ggplot2")
# install.packages("SnowballC")
# install.packages("devtools")
# install.packages("tm")
# install.packages("shiny")
# install.packages("shinythemes")
# install.packages("rsconnect")
# install.packages("NLP")
# install.packages("openNLP")
# install.packages("reshape2")
# install.packages("RColorBrewer")
# install.packages("plotly")
# install.packages("topicmodels")
# install.packages("tidytext")
# install.packages("DT")
# install.packages("sentimentr")
# install.packages("dplyr")
# install.packages("RWeka")
# install.packages("shiny")
# install.packages("shinythemes")
# install.packages("stringr")
# install.packages("RedditExtractoR")
# install.packages("scales")
# install.packages("qdap")
# install.packages("plotly")
# install.packages("magrittr")
# install.packages("shinydashboard")
# install.packages("textstem")
# install.packages("stringr")
# install.packages("splitstackshape")
# install.packages("tidyr")
#call libraries
library(PubMedWordcloud)
library(textstem)
library(RColorBrewer)
library(devtools)
library(twitteR)
library(reshape2)
library(sentimentr)
library(plyr)
library(ggplot2)
library(lazyeval)
library(wordcloud)
library(RColorBrewer)
library(ggplot2)
library(SnowballC)
library(devtools)
library(tm)
library(shiny)
library(dplyr)
library(shinythemes)
library(rsconnect)
library(NLP)
library(openNLP)
library(reshape2)
library(RColorBrewer)
library(plotly)
library(topicmodels)
library(tidytext)
library(DT)
library(sentimentr)
library(dplyr)
library(RWeka)
library(shiny)
library(shinythemes)
library(stringr)
library(RedditExtractoR)
library(scales)
library(qdap)
library(plotly)
library(magrittr)
library(shinydashboard)
library(stringr)
library(splitstackshape)
library(tidyr)
#search by keyword
epilepsy_drug<-getPMIDsByKeyWords(keys = "Antiepileptic Drug", journal = NULL, dFrom = 2007,
dTo = 2017,n=10000, https = TRUE)
#Save epilepsy abstracts as rds
epilepsy_abs<-getAbstracts(epilepsy, https = TRUE, s = 100)
#saveRDS(epilepsy_abs,"epilepsy_abstracts.rds")
#Get epilepsy drug abstracts and save them
epilepsy_drug_abs<-getAbstracts(epilepsy_drug, https = TRUE, s = 100)
#saveRDS(epilepsy_drug_abs,"epilepsy_drug_abstracts.rds")
#Read previously saved rds data
#epilepsy_drug_abs<-readRDS("epilepsy_drug_abstracts.rds")
#epilepsy_abs<-readRDS("epilepsy_abstracts.rds")
#clean abstracts of stopwords, line nums, etc
clean_epilepsy=cleanAbstracts(epilepsy_abs)
clean_epilepsy_drug=cleanAbstracts(epilepsy_drug_abs)
dim(clean_epilepsy_drug)
head(clean_epilepsy_drug)
write.csv(clean_epilepsy_drug, file = "clean_epilepsy_drug_word_freq.csv")
#Plot wordclouds
plotWordCloud(clean_epilepsy_drug, scale = c(3, 0.3), min.freq = 1, max.words = 100,
random.order = FALSE, rot.per = 0.35, use.r.layout = FALSE,
colors = brewer.pal(8, "Dark2"))
#Read in drug names from https://catalog.data.gov/dataset/drugsfda-database
drugs <- read_delim("~/01-Projects/Pubmed_Analysis/drug_name_full.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
head(drugs)
names(drugs)
lapply(drugs,typeof)
drugs$stmarkdate<-as.Date(drugs$stmarkdate)
#convert to data.frame for inner join
drugs<-data.frame(drugs)
clean_epilepsy_drug<-data.frame(clean_epilepsy_drug)
epilepsy_drug<-merge(clean_epilepsy_drug, drugs, by.x="word",by.y="propname")
head(ftable(epilepsy_drug$pharmclas),2)
#Export this file for tableau
write.csv(epilepsy_drug, file = "EpilepsyDrugs.csv", row.names=FALSE)
#Split 'Pharmclas' into multiple columns
epilepsy_drug2<-cSplit(epilepsy_drug, "pharmclas", sep=",")
epilepsy_drug3 <- melt(epilepsy_drug2, id = c("word","freq","ndc","prodtype","npropname","dosename","routename","stmarkdatestr","stmarkdate","markname","appnum","labelname","subname","actnumstr","actingunit"))
pharmaclass<-data.frame(table(epilepsy_drug$pharmclas))
pharmaclass<-pharmaclass[order(pharmaclass[,2],decreasing=TRUE),]
barplot(table(epilepsy_drug$pharmclas),horiz = TRUE)
#remove duplicated rows
epilepsy_drug = epilepsy_drug[!duplicated(epilepsy_drug$word),]
epilepsy_drug$word
#Order by frequency
epilepsy_drug = epilepsy_drug[order(-epilepsy_drug[,'freq']),]
dim(epilepsy_drug)
head(epilepsy_drug,20)
#Convert to df
epilepsy_drug_abs<-data.frame(epilepsy_drug_abs)
# Keep only drugs
epilepsy_drug_abs2<-dplyr::filter(epilepsy_drug_abs, grepl(
'gabapentin|
levetiracetam|
topiramate|
lamotrigine|
acetazolamide|
analgesic|
riluzole|
zonisamide|
propofol|
ethosuximide|
lidocaine|
fluoxetine|
duloxetine|
haloperidol|
tizanidine|
acetaminophen|
venlafaxine|
adenosine|
ibuprofen|
risperidone|
phosphate|
olanzapine|
aripiprazole|
naproxen|
antibacterial|
celecoxib|
nicotine|
indomethacin|
temozolomide|
furosemide|
sumatriptan|
testosterone|
bupropion|
etomidate|
nifedipine|
modafinil|
temazepam|
ceftriaxone|
diphenhydramine|
pioglitazone|
escitalopram|
ondansetron|
simvastatin|
ketoconazole|
paroxetine|
cimetidine|
mirtazapine|
paclitaxel|
amantadine|
fenofibrate|
zaleplon|
ciprofloxacin|
erythromycin|
meloxicam|
ropinirole|
hydrochlorothiazide|
oxaliplatin|
aspirin|
hydrocortisone|
isoniazid|
azithromycin|
cefepime|
cytarabine|
decitabine|
sirolimus|
doxycycline|
epinephrine|
eszopiclone|
letrozole|
carboplatin|
carisoprodol|
clarithromycin|
clopidogrel|
menthol|
methazolamide|
ofloxacin|
rifampin|
telmisartan|
atomoxetine|
gemcitabine|
metronidazole|
acyclovir|
fluorouracil|
itraconazole|
tetrabenazine|
ampicillin|
disposable|
levofloxacin|
valsartan|
voriconazole|
omeprazole|
calcitriol|
cefazolin|
cortisone|
fluconazole|
linezolid|
lovastatin|
nevirapine|
oxybutynin|
bexarotene|
cilostazol|
clotrimazole|
emilia|
expectorant|
guanfacine|
hemorrhoidal|
laxative|
leflunomide|
lisinopril|
loratadine|
misoprostol|
piroxicam|
ribavirin|
almotriptan|
bacitracin|
budesonide|
chlorzoxazone|
cyanocobalamine|
ganciclovir|
gatifloxacin|
lansoprazole|
mercaptopurine|
minoxidil|
mupirocin|
oxandrolone|
propylthiouracil|
rifabutin|
suboxone|
sulfacetamide|
topotecan|'
, ignore.case=TRUE,epilepsy_drug_abs))
#check dimensions
dim(epilepsy_drug_abs2)
#create drug_indicator variables
epilepsy_drug_abs2$gabapentin <- as.factor(ifelse(grepl('gabapentin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$levetiracetam <- as.factor(ifelse(grepl('levetiracetam',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$topiramate <- as.factor(ifelse(grepl('topiramate',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$lamotrigine <- as.factor(ifelse(grepl('lamotrigine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$acetazolamide <- as.factor(ifelse(grepl('acetazolamide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$analgesic <- as.factor(ifelse(grepl('analgesic',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$riluzole <- as.factor(ifelse(grepl('riluzole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$zonisamide <- as.factor(ifelse(grepl('zonisamide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$propofol <- as.factor(ifelse(grepl('propofol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ethosuximide <- as.factor(ifelse(grepl('ethosuximide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$lidocaine <- as.factor(ifelse(grepl('lidocaine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$fluoxetine <- as.factor(ifelse(grepl('fluoxetine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$duloxetine <- as.factor(ifelse(grepl('duloxetine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$haloperidol <- as.factor(ifelse(grepl('haloperidol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$tizanidine <- as.factor(ifelse(grepl('tizanidine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$acetaminophen <- as.factor(ifelse(grepl('acetaminophen',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$venlafaxine <- as.factor(ifelse(grepl('venlafaxine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$adenosine <- as.factor(ifelse(grepl('adenosine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ibuprofen <- as.factor(ifelse(grepl('ibuprofen',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$risperidone <- as.factor(ifelse(grepl('risperidone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$phosphate <- as.factor(ifelse(grepl('phosphate',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$olanzapine <- as.factor(ifelse(grepl('olanzapine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$aripiprazole <- as.factor(ifelse(grepl('aripiprazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$naproxen <- as.factor(ifelse(grepl('naproxen',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$antibacterial <- as.factor(ifelse(grepl('antibacterial',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$celecoxib <- as.factor(ifelse(grepl('celecoxib',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$nicotine <- as.factor(ifelse(grepl('nicotine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$indomethacin <- as.factor(ifelse(grepl('indomethacin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$temozolomide <- as.factor(ifelse(grepl('temozolomide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$furosemide <- as.factor(ifelse(grepl('furosemide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$sumatriptan <- as.factor(ifelse(grepl('sumatriptan',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$testosterone <- as.factor(ifelse(grepl('testosterone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$bupropion <- as.factor(ifelse(grepl('bupropion',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$etomidate <- as.factor(ifelse(grepl('etomidate',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$nifedipine <- as.factor(ifelse(grepl('nifedipine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$modafinil <- as.factor(ifelse(grepl('modafinil',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$temazepam <- as.factor(ifelse(grepl('temazepam',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ceftriaxone <- as.factor(ifelse(grepl('ceftriaxone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$diphenhydramine <- as.factor(ifelse(grepl('diphenhydramine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$pioglitazone <- as.factor(ifelse(grepl('pioglitazone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$escitalopram <- as.factor(ifelse(grepl('escitalopram',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ondansetron <- as.factor(ifelse(grepl('ondansetron',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$simvastatin <- as.factor(ifelse(grepl('simvastatin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ketoconazole <- as.factor(ifelse(grepl('ketoconazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$paroxetine <- as.factor(ifelse(grepl('paroxetine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cimetidine <- as.factor(ifelse(grepl('cimetidine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$mirtazapine <- as.factor(ifelse(grepl('mirtazapine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$paclitaxel <- as.factor(ifelse(grepl('paclitaxel',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$amantadine <- as.factor(ifelse(grepl('amantadine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$fenofibrate <- as.factor(ifelse(grepl('fenofibrate',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$zaleplon <- as.factor(ifelse(grepl('zaleplon',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ciprofloxacin <- as.factor(ifelse(grepl('ciprofloxacin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$erythromycin <- as.factor(ifelse(grepl('erythromycin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$meloxicam <- as.factor(ifelse(grepl('meloxicam',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ropinirole <- as.factor(ifelse(grepl('ropinirole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$hydrochlorothiazide <- as.factor(ifelse(grepl('hydrochlorothiazide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$oxaliplatin <- as.factor(ifelse(grepl('oxaliplatin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$aspirin <- as.factor(ifelse(grepl('aspirin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$hydrocortisone <- as.factor(ifelse(grepl('hydrocortisone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$isoniazid <- as.factor(ifelse(grepl('isoniazid',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$azithromycin <- as.factor(ifelse(grepl('azithromycin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cefepime <- as.factor(ifelse(grepl('cefepime',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cytarabine <- as.factor(ifelse(grepl('cytarabine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$decitabine <- as.factor(ifelse(grepl('decitabine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$sirolimus <- as.factor(ifelse(grepl('sirolimus',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$doxycycline <- as.factor(ifelse(grepl('doxycycline',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$epinephrine <- as.factor(ifelse(grepl('epinephrine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$eszopiclone <- as.factor(ifelse(grepl('eszopiclone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$letrozole <- as.factor(ifelse(grepl('letrozole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$carboplatin <- as.factor(ifelse(grepl('carboplatin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$carisoprodol <- as.factor(ifelse(grepl('carisoprodol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$clarithromycin <- as.factor(ifelse(grepl('clarithromycin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$clopidogrel <- as.factor(ifelse(grepl('clopidogrel',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$menthol <- as.factor(ifelse(grepl('menthol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$methazolamide <- as.factor(ifelse(grepl('methazolamide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ofloxacin <- as.factor(ifelse(grepl('ofloxacin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$rifampin <- as.factor(ifelse(grepl('rifampin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$telmisartan <- as.factor(ifelse(grepl('telmisartan',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$atomoxetine <- as.factor(ifelse(grepl('atomoxetine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$gemcitabine <- as.factor(ifelse(grepl('gemcitabine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$metronidazole <- as.factor(ifelse(grepl('metronidazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$acyclovir <- as.factor(ifelse(grepl('acyclovir',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$fluorouracil <- as.factor(ifelse(grepl('fluorouracil',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$itraconazole <- as.factor(ifelse(grepl('itraconazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$tetrabenazine <- as.factor(ifelse(grepl('tetrabenazine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ampicillin <- as.factor(ifelse(grepl('ampicillin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$disposable <- as.factor(ifelse(grepl('disposable',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$levofloxacin <- as.factor(ifelse(grepl('levofloxacin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$valsartan <- as.factor(ifelse(grepl('valsartan',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$voriconazole <- as.factor(ifelse(grepl('voriconazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$omeprazole <- as.factor(ifelse(grepl('omeprazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$calcitriol <- as.factor(ifelse(grepl('calcitriol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cefazolin <- as.factor(ifelse(grepl('cefazolin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cortisone <- as.factor(ifelse(grepl('cortisone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$fluconazole <- as.factor(ifelse(grepl('fluconazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$linezolid <- as.factor(ifelse(grepl('linezolid',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$lovastatin <- as.factor(ifelse(grepl('lovastatin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$nevirapine <- as.factor(ifelse(grepl('nevirapine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$oxybutynin <- as.factor(ifelse(grepl('oxybutynin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$bexarotene <- as.factor(ifelse(grepl('bexarotene',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cilostazol <- as.factor(ifelse(grepl('cilostazol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$clotrimazole <- as.factor(ifelse(grepl('clotrimazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$emilia <- as.factor(ifelse(grepl('emilia',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$expectorant <- as.factor(ifelse(grepl('expectorant',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$guanfacine <- as.factor(ifelse(grepl('guanfacine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$hemorrhoidal <- as.factor(ifelse(grepl('hemorrhoidal',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$laxative <- as.factor(ifelse(grepl('laxative',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$leflunomide <- as.factor(ifelse(grepl('leflunomide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$lisinopril <- as.factor(ifelse(grepl('lisinopril',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$loratadine <- as.factor(ifelse(grepl('loratadine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$misoprostol <- as.factor(ifelse(grepl('misoprostol',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$piroxicam <- as.factor(ifelse(grepl('piroxicam',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ribavirin <- as.factor(ifelse(grepl('ribavirin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$almotriptan <- as.factor(ifelse(grepl('almotriptan',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$bacitracin <- as.factor(ifelse(grepl('bacitracin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$budesonide <- as.factor(ifelse(grepl('budesonide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$chlorzoxazone <- as.factor(ifelse(grepl('chlorzoxazone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$cyanocobalamine <- as.factor(ifelse(grepl('cyanocobalamine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$ganciclovir <- as.factor(ifelse(grepl('ganciclovir',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$gatifloxacin <- as.factor(ifelse(grepl('gatifloxacin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$lansoprazole <- as.factor(ifelse(grepl('lansoprazole',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$mercaptopurine <- as.factor(ifelse(grepl('mercaptopurine',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$minoxidil <- as.factor(ifelse(grepl('minoxidil',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$mupirocin <- as.factor(ifelse(grepl('mupirocin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$oxandrolone <- as.factor(ifelse(grepl('oxandrolone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$propylthiouracil <- as.factor(ifelse(grepl('propylthiouracil',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$rifabutin <- as.factor(ifelse(grepl('rifabutin',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$suboxone <- as.factor(ifelse(grepl('suboxone',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$sulfacetamide <- as.factor(ifelse(grepl('sulfacetamide',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
epilepsy_drug_abs2$topotecan <- as.factor(ifelse(grepl('topotecan',epilepsy_drug_abs2$epilepsy_drug_abs),'TRUE','FALSE'))
#check dimensions
dim(epilepsy_drug_abs2)
head(epilepsy_drug_abs2,1)
names(epilepsy_drug_abs2)
#Add average sentiment analysis
#Add a column with clean text
epilepsy_drug_abs2$sentiment<- sentiment_by(epilepsy_drug_abs2$epilepsy_drug_abs)$ave_sentiment
drugname<-c("gabapentin",
"levetiracetam",
"topiramate",
"lamotrigine",
"acetazolamide",
"analgesic",
"riluzole",
"zonisamide",
"propofol",
"ethosuximide",
"lidocaine",
"fluoxetine",
"duloxetine",
"haloperidol",
"tizanidine",
"acetaminophen",
"venlafaxine",
"adenosine",
"ibuprofen",
"risperidone",
"phosphate",
"olanzapine",
"aripiprazole",
"naproxen",
"antibacterial",
"celecoxib",
"nicotine",
"indomethacin",
"temozolomide",
"furosemide",
"sumatriptan",
"testosterone",
"bupropion",
"etomidate",
"nifedipine",
"modafinil",
"temazepam",
"ceftriaxone",
"diphenhydramine",
"pioglitazone",
"escitalopram",
"ondansetron",
"simvastatin",
"ketoconazole",
"paroxetine",
"cimetidine",
"mirtazapine",
"paclitaxel",
"amantadine",
"fenofibrate",
"zaleplon",
"ciprofloxacin",
"erythromycin",
"meloxicam",
"ropinirole",
"hydrochlorothiazide",
"oxaliplatin",
"aspirin",
"hydrocortisone",
"isoniazid",
"azithromycin",
"cefepime",
"cytarabine",
"decitabine",
"sirolimus",
"doxycycline",
"epinephrine",
"eszopiclone",
"letrozole",
"carboplatin",
"carisoprodol",
"clarithromycin",
"clopidogrel",
"menthol",
"methazolamide",
"ofloxacin",
"rifampin",
"telmisartan",
"atomoxetine",
"gemcitabine",
"metronidazole",
"acyclovir",
"fluorouracil",
"itraconazole",
"tetrabenazine",
"ampicillin",
"disposable",
"levofloxacin",
"valsartan",
"voriconazole",
"omeprazole",
"calcitriol",
"cefazolin",
"cortisone",
"fluconazole",
"linezolid",
"lovastatin",
"nevirapine",
"oxybutynin",
"bexarotene",
"cilostazol",
"clotrimazole",
"emilia",
"expectorant",
"guanfacine",
"hemorrhoidal",
"laxative",
"leflunomide",
"lisinopril",
"loratadine",
"misoprostol",
"piroxicam",
"ribavirin",
"almotriptan",
"bacitracin",
"budesonide",
"chlorzoxazone",
"cyanocobalamine",
"ganciclovir",
"gatifloxacin",
"lansoprazole",
"mercaptopurine",
"minoxidil",
"mupirocin",
"oxandrolone",
"propylthiouracil",
"rifabutin",
"suboxone",
"sulfacetamide",
"topotecan")
avg_sentiment<-c(
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$gabapentin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$levetiracetam=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$topiramate=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lamotrigine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$acetazolamide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$analgesic=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$riluzole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$zonisamide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$propofol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ethosuximide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lidocaine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$fluoxetine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$duloxetine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$haloperidol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$tizanidine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$acetaminophen=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$venlafaxine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$adenosine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ibuprofen=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$risperidone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$phosphate=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$olanzapine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$aripiprazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$naproxen=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$antibacterial=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$celecoxib=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$nicotine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$indomethacin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$temozolomide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$furosemide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$sumatriptan=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$testosterone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$bupropion=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$etomidate=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$nifedipine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$modafinil=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$temazepam=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ceftriaxone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$diphenhydramine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$pioglitazone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$escitalopram=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ondansetron=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$simvastatin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ketoconazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$paroxetine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cimetidine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$mirtazapine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$paclitaxel=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$amantadine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$fenofibrate=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$zaleplon=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ciprofloxacin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$erythromycin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$meloxicam=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ropinirole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$hydrochlorothiazide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$oxaliplatin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$aspirin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$hydrocortisone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$isoniazid=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$azithromycin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cefepime=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cytarabine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$decitabine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$sirolimus=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$doxycycline=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$epinephrine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$eszopiclone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$letrozole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$carboplatin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$carisoprodol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$clarithromycin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$clopidogrel=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$menthol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$methazolamide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ofloxacin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$rifampin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$telmisartan=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$atomoxetine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$gemcitabine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$metronidazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$acyclovir=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$fluorouracil=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$itraconazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$tetrabenazine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ampicillin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$disposable=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$levofloxacin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$valsartan=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$voriconazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$omeprazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$calcitriol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cefazolin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cortisone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$fluconazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$linezolid=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lovastatin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$nevirapine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$oxybutynin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$bexarotene=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cilostazol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$clotrimazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$emilia=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$expectorant=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$guanfacine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$hemorrhoidal=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$laxative=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$leflunomide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lisinopril=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$loratadine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$misoprostol=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$piroxicam=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ribavirin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$almotriptan=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$bacitracin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$budesonide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$chlorzoxazone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$cyanocobalamine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$ganciclovir=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$gatifloxacin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lansoprazole=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$mercaptopurine=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$minoxidil=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$mupirocin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$oxandrolone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$propylthiouracil=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$rifabutin=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$suboxone=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$sulfacetamide=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE)),
epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$topotecan=="TRUE") %>% summarize(Mean = mean(sentiment, na.rm=TRUE))
)
drug_avg_sent <- data.frame(cbind(drugname, avg_sentiment))
#####topic modelling by drug
#gabapentin
sub<-epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$gabapentin=="TRUE")
sen_epi<-CleanDataframe(sub[1])
epi_drug_topics<-show_em_topics(sen_epi$clean.text,2)
epi_drug_topics
#levetiracetam
sub<-epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$levetiracetam=="TRUE")
sen_epi<-CleanDataframe(sub[1])
epi_drug_topics<-show_em_topics(sen_epi$clean.text,2)
epi_drug_topics
#topiramate
sub<-epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$topiramate=="TRUE")
sen_epi<-CleanDataframe(sub[1])
epi_drug_topics<-show_em_topics(sen_epi$clean.text,4)
epi_drug_topics
#lamotrigine
sub<-epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$lamotrigine=="TRUE")
sen_epi<-CleanDataframe(sub[1])
epi_drug_topics<-show_em_topics(sen_epi$clean.text,4)
epi_drug_topics
#acetazolamide
sub<-epilepsy_drug_abs2 %>% filter(epilepsy_drug_abs2$acetazolamide=="TRUE")
sen_epi<-CleanDataframe(sub[1])
epi_drug_topics<-show_em_topics(sen_epi$clean.text,3)
epi_drug_topics
####################################functions#####################################
# Topic models fucntion
show_em_topics <- function(final_txt, count.topic)
{
# Create DTM
myCorpus <- Corpus(VectorSource(final_txt))
DTM <- DocumentTermMatrix(myCorpus)
rowTotals <- apply(DTM , 1, sum) #Find the sum of words in each Document
DTM <- DTM[rowTotals > 0, ]
#Compute word frequencies
freq <- sort(colSums(as.matrix(DTM)), decreasing = TRUE)
wf <- data.frame(word = names(freq),
freq = freq)
# Set parameters for Gibbs sampling
burnin <- 4000
iter <- 2000
thin <- 500
seed <- list(2003, 5, 63, 100001, 765)
nstart <- 5
best <- TRUE
#Number of topics
#Run LDA using Gibbs sampling
ldaOut <- LDA(DTM,as.numeric(count.topic))
#Get top 5 words for each topic
top_terms <- as.data.frame(terms(ldaOut,15))
#Return the top topics
return(top_terms)
}
#Sentiment Polarity Function
SentimentPolarity <- function(sentiment) { # Required for CleanDataframe & ConvertDataframe
if (sentiment > 0) {
return("positive")
} else if (sentiment == 0) {
return("neutral")
} else {
return("negative")
}
}
#clean the text fucntion
CleanText <- function(some_txt) { # Clean text function
# remove html links
some_txt <- gsub("http\\S+\\s*",
"",
some_txt)
# remove retweet entities
some_txt <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)",
"",
some_txt)
# remove at people
some_txt <- gsub("@\\w+",
"",
some_txt)
try.error <- function(x) { # "tolower error handling" function
# create missing value
y <- NA
# tryCatch error
try_error <- tryCatch(tolower(x),
error = function(e) e)
# if not an error
if ( !inherits(try_error, "error") ) {
y <- tolower(x)
}
# result
return(y)
}
# lower case using try.error with sapply
some_txt <- sapply(some_txt, try.error)
# remove NAs in some_txt
some_txt <- some_txt[ !is.na(some_txt) ]
names(some_txt) <- NULL
myCorpus <- Corpus(VectorSource(some_txt))
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
myCorpus <- tm_map(myCorpus, removePunctuation)
myCorpus <- tm_map(myCorpus, content_transformer(strip), char.keep = ".") # Keep period
myCorpus <- tm_map(myCorpus, removeNumbers)
#Add words to be excluded from the list of stop words here
exceptions <- c("not","nor","neither","never")
my_stopwords <- setdiff(stopwords("en"), exceptions)
myCorpus <- tm_map(myCorpus, removeWords, my_stopwords)
#myCorpus <- tm_map(myCorpus, stemDocument)
some_txt_clean <- as.character(unlist(sapply(myCorpus, `[`, "content")))
# remove trailing/leading spaces
some_txt_clean <- str_trim(some_txt_clean)
return(some_txt_clean)
}
#attach sentiment score to dataframe
CleanDataframe <- function(text.dataframe) { # Clean text.dataframe and attach sentiment value
text.dataframe$clean.text <- CleanText(text.dataframe[1,])
text.dataframe$sentiment <- sentiment_by(text.dataframe$clean.text)$ave_sentiment
text.dataframe$sentiment <- text.dataframe$sentiment
text.dataframe$sentiment.pol <- lapply(text.dataframe$sentiment, SentimentPolarity)
return(text.dataframe)
}
################################################################################# |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.