blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1ce72c993e399355065d166fc59d217754109e7 | 32435049c2bfd861f05aac44aa65a7b15b2a4165 | /man/ops_aggregate_perf.Rd | f897bceb889634ba0202c92d749dc3a8016a5b0b | [
"MIT"
] | permissive | austl001/MOSLR | 3b9c3c5043c688782ccc6bd6b5cbe4d53b91fe9c | 0186f24e176f17df6e1399a460198ffcfdf706dd | refs/heads/master | 2023-03-21T10:43:38.860067 | 2022-07-06T09:41:11 | 2022-07-06T09:41:11 | 235,149,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 626 | rd | ops_aggregate_perf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ops_aggregate_perf.R
\name{ops_aggregate_perf}
\alias{ops_aggregate_perf}
\title{Aggregates Trading Party OPS Performance}
\usage{
ops_aggregate_perf(
my.dir = getwd(),
df = readRDS(paste0(my.dir, "/data/rdata/ops_data_clean.Rda")),
tp.details = utils::read.csv(paste0(my.dir, "/data/inputs/tp_details.csv")),
spid.counts = utils::read.csv(paste0(my.dir, "/data/inputs/spid_counts.csv"))
)
}
\arguments{
\item{my.dir}{character}
\item{df}{dataframe}
\item{tp.details}{dataframe}
}
\description{
Aggregates Trading Party OPS Performance
}
|
ce56fc090aa33da2ab2ce2d2437f9d4517515dba | 100ef7748001814334951538fa23869ee00f58f7 | /project/project.r | 72e29ef418d4275b8a098b73d666497d430cb444 | [] | no_license | dahewett/geostatistics | c51ca284288cc0a086886879629a852f7e083cab | 3dd1c66391503f88c60160f1f8dfa270fdc2addd | refs/heads/master | 2022-10-19T14:32:47.146003 | 2020-06-03T03:37:34 | 2020-06-03T03:37:34 | 152,347,434 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,426 | r | project.r |
install.packages("plyr")
install.packages("dplyr")
install.packages("spdep")
install.packages("GISTools")
install.packages("raster")
install.packages("maptools")
install.packages("rgdal")
install.packages("spatstat")
install.packages("sp")
install.packages("spatstat")
install.packages("spgwr")
install.packages("tmap")
install.packages("ggmap")
#Geog 418/518 Final Project
library(plyr)
library(dplyr)
library(spdep)
library(GISTools)
library(raster)
library(maptools)
library(rgdal)
library(spatstat)
library(sp)
library(spgwr)
library(tmap)
library(ggmap)
#Set working directory
setwd("/csc/geog418/project")
#Reading in particulate matter dataset
pm25 <- read.csv("PM25.csv") #Read in PM2.5 data
#Select only columns 1 and 2
pm25 <- pm25[,1:2]
#Change the column names
colnames(pm25) <- c("POSTALCODE", "PM25")
#Reading in postal code shapefile
postalcodes <- shapefile("BC_Postal_Codes") #Read in related postal code data
#Join PM2.5 data with postal code data using the POSTALCODE column
pm25.spatial <- merge(postalcodes,pm25,by = "POSTALCODE")
#Plot the points on a map
plot(pm25.spatial)
#Examine the first several rows
head(pm25.spatial)
#You should notice that the dataset contains NA's, so these need to be removed.
pm25.spatial <- pm25.spatial[!is.na(pm25.spatial$PM25),]
#Reading in the income dataset
income <- read.csv("Income.csv") #Read in census income data
#Change the column names
colnames(income) <- c("DAUID", "Income") #Select only ID and Income columns
#Read in the dissemination tract shapefile
census.tracts <- shapefile("BC_DA.shp")
#Merge the income dataset and the DA shapefile
income.tracts <- merge(census.tracts,income, by = "DAUID")
#Remove any NA's from the merged dataset
income.tracts <- income.tracts[!is.na(income.tracts$Income),]
#Study area map
tmap_mode("view")
data("World")
## tmap mode set to interactive viewing
tm_basemap("OpenStreetMap.Mapnik") +tm_shape(income.tracts)+tm_borders("purple", lwd = .4) +tm_scale_bar()+tm_shape(pm.income)+tm_symbols(size=0.01,alpha = 0,border.col = "Green", border.alpha = 0.7) +tm_style("classic", legend.only = TRUE)
memory.limit(size=50000)
#Create choropleth map of income
med.income <- income.tracts$Income
shades <- auto.shading(med.income, n=6, cols = brewer.pal(6, "Blues"))
choropleth(income.tracts, med.income, shades, border="transparent") #map the data with associated colours
choro.legend(-122.7, 49.495, shades) #add a legend (you might need to change the location)
#Perform a spatial intersection on the PM2.5 and Income data
pm.income <- intersect(pm25.spatial,income.tracts)
#Observe the result
head(pm.income)
pm.income$DAUID <- as.numeric(pm.income$DAUID)
pm.income$PM25 <- as.numeric(pm.income$PM25)
#Aggregate the the multiple PM2.5 values for each DA. Here the mean function is used.
pm.income <- aggregate(pm.income$PM25~pm.income$DAUID, FUN=mean)
#Change the column names
colnames(pm.income) <- c("DAUID", "PM25")
#Remove any NA's
pm.income <- na.omit(pm.income)
#Seeing as how the datasets are not properly merged, perform another merge to have PM and income together
pm.income.poly <- merge(income.tracts,pm.income,by = "DAUID")
pm.income.poly <- na.omit(pm.income.poly)
#Remove unwanted columns
pm.income.poly <- pm.income.poly[,-(2:23)]
#Observe the result. Are there still NA's? If so, apply following line to get rid of them.
pm.income.poly <- pm.income.poly[!is.na(pm.income.poly$PM25),]
#Create choropleth map of PM25
avg.pm <- pm.income.poly$PM25
shades <- auto.shading(avg.pm, n=6, cols = brewer.pal(6, 'Reds'))
choropleth(income.tracts, avg.pm, shades, border ="transparent") #map the data with associated colours
choro.legend(-122.7, 49.496, shades) #add a legend (you might need to change the location)
# Testing Spatial autocorrelation of income
#Create spatial neigh weight matrix
income.tracts <- income.tracts[,-(2:23)]
income.nb <- poly2nb(income.tracts)
plot(income.tracts)
plot(income.nb, coordinates(income.tracts), add = TRUE, col = "red")
income.lw <- nb2listw(income.nb, zero.policy = TRUE, style = "W")
print.listw(income.lw, zero.policy = TRUE)
income.a <- income.tracts$Income
income.lagged.means = lag.listw(income.lw, income.a, zero.policy = TRUE)
shades2 <- auto.shading(income.lagged.means, n=6, cols = brewer.pal(6, 'Oranges'))
choropleth(income.tracts, income.lagged.means,shades2, border="transparent")
choro.legend(-122.7, 49.49, shades2)
#Global Moran's I
mi <- moran.test(income.a, income.lw, zero.policy = TRUE)
mi
#To contextualize your Moran's I value, retrieve range of potential Moran's I values.
moran.range <- function(lw) {
wmat <- listw2mat(lw)
return(range(eigen((wmat + t(wmat))/2)$values))
}
moran.range(income.lw)
#Perform the Z-test
#You can get the necessary values from your mi object resulting from your Moran's I test above.
#For example, the Moran's I value is the first value in the output mi, so you call mi$estimate[1] to get the value.
z=((mi$estimate[1]-mi$estimate[2])/(mi$estimate[3]))
z
#Local Moran's I
lisa.test <- localmoran(income.a, income.lw)
lisa.test
lisa.test <- na.omit(lisa.test)
#Create a choropleth map of the LISA values.
lisa.shades <- auto.shading(c(lisa.test[,1],-lisa.test[,1]),cols=brewer.pal(5,"PRGn"))
choropleth(income.tracts, lisa.test[,1],shading=lisa.shades, border="transparent")
choro.legend(-122.7, 49.49,lisa.shades,fmt="%6.2f")
#Create a Moran's I scatterplot
moran.plot(income.a, income.lw, zero.policy=NULL, spChk=NULL, labels=NULL, xlab="Median Income",
ylab="Spatially Lagged Median Income", quiet=NULL)
tm_layout(basemaps = c('OpenStreetMap')) + tm_scale_bar()+ tm_compass()
######Linear Regression##########
head(pm.income.poly)
#Plot income and PM2.5 from the pm.income.poly dataset you created
plot(pm.income.poly$Income~pm.income.poly$PM25)
#Notice that there are a lot of 0's in this dataset. If you decide to remove them, use the following line:
pm.income.poly <- pm.income.poly[pm.income.poly$PM25 != 1, ]
#Now plot the data again
plot(pm.income.poly$Income~pm.income.poly$PM25)
#Perform a linear regression on the two variables. You should decide which one is dependent.
lm.model <- lm(pm.income.poly$Income~pm.income.poly$PM25)
#Add the regression model to the plot you created
abline(lm.model)
#Get the summary of the results
summary(lm.model)
#You want to determine if the model residuals are spatially clustered.
#First obtain the residuals from the model
model.resids <- as.data.frame(residuals.lm(lm.model))
#Then add the residuals to your spatialpolygon dataframe
pm.income.poly$residuals <- residuals.lm(lm.model)
#Observe the result to make sure it looks correct
head(pm.income.poly)
#Now, create choropleth map of residuals
resids <- pm.income.poly$residuals
shades <- auto.shading(resids, n=6, cols = brewer.pal(6, 'Greens'))
choropleth(income.tracts, resids, shades, border ="transparent") #map the data with associated colours
choro.legend(-122.7, 49.49, shades) #add a legend (you might need to change the location)
## Global Moran's I
# Create Neigbourhood Weights Matrix
# Queen's Neigbour
head(pm.income.poly)
tracts <- poly2nb(pm.income.poly)
# Create the spatial weighted neighbour list with queens
tracts.lw <- nb2listw(tracts, zero.policy = TRUE, style = "W")
mi <- moran.test(pm.income.poly$residuals, tracts.lw, zero.policy = TRUE)
mi
#To contextualize your Moran's I value, retrieve range of potential Moran's I values.
moran.range <- function(lw) {
wmat <- listw2mat(lw)
return(range(eigen((wmat + t(wmat))/2)$values))
}
moran.range(tracts.lw)
#Perform the Z-test
#You can get the necessary values from your mi object resulting from your Moran's I test above.
#For example, the Moran's I value is the first value in the output mi, so you call mi$estimate[1] to get the value.
z=((mi$estimate[1]-mi$estimate[2])/(mi$estimate[3]))
z
#Local Moran's I
lisa.test <- localmoran(pm.income.poly$residuals, tracts.lw)
lisa.test
lisa.test <- na.omit(lisa.test)
#Create a choropleth map of the LISA values.
lisa.shades <- auto.shading(c(lisa.test[,1],-lisa.test[,1]),cols=brewer.pal(5,"PRGn"))
choropleth(pm.income.poly, lisa.test[,1],shading=lisa.shades, border ="transparent")
choro.legend(-122.7, 49.49,lisa.shades,fmt="%6.2f")
tracts.lw <- na.omit(tracts.lw)
pm.income.poly$residuals <- na.omit(pm.income.poly$residuals)
#Create a Moran's I scatterplot
moran.plot(pm.income.poly$residuals, tracts.lw, zero.policy=NULL, spChk=NULL, labels=NULL, xlab="Residuals",
ylab="Spatially Lagged Residuals", quiet=NULL)
####Geographically Weighted Regression
#The first thing you need to do is to add the polygon coordinates to the spatialpolygondataframe.
#You can obtain the coordinates using the "coordinates" function from the sp library
pm.income.poly.coords <- sp::coordinates(pm.income.poly)
#Observe the result
head(pm.income.poly.coords)
#Now add the coordinates back to the spatialpolygondataframe
pm.income.poly$X <- pm.income.poly.coords[,1]
pm.income.poly$Y <- pm.income.poly.coords[,2]
head(pm.income.poly)
###Determine the bandwidth for GWR: this will take a while
GWRbandwidth <- gwr.sel(pm.income.poly$Income~pm.income.poly$PM25,
data=pm.income, coords=cbind(pm.income.poly$X,pm.income.poly$Y),adapt=T)
###Perform GWR on the two variables with the bandwidth determined above
###This will take a looooooong while
gwr.model = gwr(pm.income.poly$Income~pm.income.poly$PM25,
data=pm.income.poly, coords=cbind(pm.income.poly$X,pm.income.poly$Y),
adapt=GWRbandwidth, hatmatrix=TRUE, se.fit=TRUE)
#Print the results of the model
gwr.model
#Look at the results in detail
results<-as.data.frame(gwr.model$SDF)
head(results)
#Now for the magic. Let's add our local r-square values to the map
pm.income.poly$localr <- results$localR2
#Create choropleth map of r-square values
local.r.square <- pm.income.poly$localr
shades <- auto.shading(local.r.square, n=6, cols = brewer.pal(6, 'Oranges'))
choropleth(income.tracts, local.r.square, shades, border = "transparent") #map the data with associated colours
choro.legend(-122.7, 49.49,shades) #add a legend (you might need to change the location)
#Time for more magic. Let's map the coefficients
pm.income.poly$coeff <- results$pm.income.poly.PM25
#Create choropleth map of the coefficients
local.coefficient <- pm.income.poly$coeff
shades <- auto.shading(local.coefficient, n=6, cols = brewer.pal(6, 'Oranges'))
choropleth(income.tracts, local.coefficient, shades,border="transparent") #map the data with associated colours
choro.legend(3864000, 1965000, shades) #add a legend (you might need to change the location)
plot(income.tracts)
plot(pm.income.poly)
pm.income.poly.ext <- as.matrix(extent(pm.income.poly))
window <- as.owin(list(xrange=pm.income.poly.ext[1,], yrange = pm.income.poly.ext[2,]))
pm.income.ppp <- ppp(x=pm.income.poly$X, y = pm.income.poly$Y, window = window)
nearestNeighbour <- nndist(pm.income.ppp)
##Convert the nearestNeighbor object into a dataframe.
nearestNeighbour=as.data.frame(as.numeric(nearestNeighbour))
##Change the column name to "Distance"
colnames(nearestNeighbour) = "Distance"
##Calculate the nearest neighbor statistic to test for a random spatial distribution.
#mean nearest neighbour
nnd = sum((nearestNeighbour$Distance))
nnd = nnd/nrow(nearestNeighbour)
#mean nearest neighbour for random spatial distribution
r.nnd = 1/(2*sqrt(nrow(nearestNeighbour)/0.44502))
d.nnd = 1.07453/sqrt(nrow(nearestNeighbour)/0.44502)
r = nnd/r.nnd
sd.nnd = 0.26136/sqrt((nrow(nearestNeighbour)*nrow(nearestNeighbour))/0.44502)
z = (nnd-r.nnd)/sd.nnd
z
# k function
k.fun <- Kest(pm.income.ppp, correction = "Ripley")
plot(k.fun)
#use simulation to test the point pattern against CSR
k.fun.e <- envelope(pm.income.ppp, Kest, nsim = 99, correction = "Ripley")
plot(k.fun.e)
#quadrat analysis
quads <- 10
qcount = quadratcount(pm.income.ppp, nx = quads, ny = quads)
plot(pm.income.ppp, pch = "+", cex = 0.5)
plot(qcount, add = T, col = "red")
qcount.df <- as.data.frame(qcount)
##Second, count the number of quadrats with a distinct number of points.
qcount.df = plyr::count(qcount.df,'Freq')
##Change the column names so that x=number of points and f=frequency of quadrats with x cells.
colnames(qcount.df) = c("x","f")
##Third, create new columns for total number of points and for fx^2.
qcount.df$TotPoints <- qcount.df$x * qcount.df$f
qcount.df$fx2 = (qcount.df$x)^2 * qcount.df$f
qcount.df$xfx2 = qcount.df$fx2 * qcount.df$f #adjusted for the count
##Fourth, calculate the sum of each column, which you will use as inputs into the
##formula for VMR.
f.sum = sum(qcount.df$f)
TotPoints.sum = sum(qcount.df$TotPoints)
fx2.sum = sum(qcount.df$fx2)
l
##Fifth, calculate VAR, MEAN, and VMR. ### OF WHICH VARIABLES? f.sum, TotPoints.Sum, fx2.sum?
VAR = sum(qcount.df$xfx2)/(sum(qcount.df$f)-1)
mean.points = TotPoints.sum/(quads*quads)
VMR = VAR/mean.points
#Finally, perform the test statistic to test for the existence of a random spatial pattern.
chi.square = VMR*(100-1) |
e609d223210fe36bdadfe2eb413e880d03349cb1 | cef3b5e2588a7377281a8f627a552350059ca68b | /paws/man/wellarchitected_associate_lenses.Rd | 8233a8efdced54c8300e2e68e85919e9e0118fba | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sanchezvivi/paws | b1dc786a9229e0105f0f128d5516c46673cb1cb5 | 2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05 | refs/heads/main | 2023-02-16T11:18:31.772786 | 2021-01-17T23:50:41 | 2021-01-17T23:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 571 | rd | wellarchitected_associate_lenses.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wellarchitected_operations.R
\name{wellarchitected_associate_lenses}
\alias{wellarchitected_associate_lenses}
\title{Associate a lens to a workload}
\usage{
wellarchitected_associate_lenses(WorkloadId, LensAliases)
}
\arguments{
\item{WorkloadId}{[required]}
\item{LensAliases}{[required]}
}
\description{
Associate a lens to a workload.
}
\section{Request syntax}{
\preformatted{svc$associate_lenses(
WorkloadId = "string",
LensAliases = list(
"string"
)
)
}
}
\keyword{internal}
|
bc8a39b15e51d17bfc93514df90774053a647d19 | 2457cfad7d128305656d73c79fd0f162d3181608 | /PrettyR/man/spaghetti_mean.Rd | ddba4716d6fd2d28ef71944c084d2e8fdc17b0c1 | [
"MIT"
] | permissive | Jadamso/PrettyR | 6b6bcc8331c45364267e9c4a67a31c594fd579a8 | 16cc947bb2f216297113a3189326072d149635ea | refs/heads/master | 2021-01-15T16:51:41.219506 | 2020-03-22T23:08:35 | 2020-03-22T23:08:35 | 99,728,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 444 | rd | spaghetti_mean.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpaghettiPlot.R
\name{spaghetti_mean}
\alias{spaghetti_mean}
\title{Plot Average of Spaghetti Lines}
\usage{
spaghetti_mean(YList, Xlist, plot_col = 1, lwd = 2, ...)
}
\arguments{
\item{YList, Xlist}{output from PrettyR::spag_loess}
\item{plot_col, lwd}{passed to lines()}
\item{...}{passed to lines()}
}
\value{
}
\description{
Plot Average of Spaghetti Lines
}
|
4d1fdab0b2465e80b23aee3bbe9c4aea5d862ec2 | b40ea60204f4f3e6c53079d012ef7a2cd8b2bc41 | /day16/day16.R | 52a5504a0c5b6c3f6739a26c8196f9006ae69101 | [] | no_license | johnlocker/adventofcode2018 | f9648fc316e6ce6e61dab38d936a2eb4acde3587 | 3d8cae621c663df3a423b30e0b15e9bf713871c6 | refs/heads/master | 2020-04-09T09:16:36.802088 | 2018-12-16T16:41:03 | 2018-12-16T16:41:03 | 160,227,411 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,315 | r | day16.R | input <- read.table(file.path("day16", "inputA.txt"),
stringsAsFactors = FALSE, sep = "\n")
inputB <- read.table(file.path("day16", "inputB.txt"),
stringsAsFactors = FALSE, sep = "\n")
#' Extract digits
#' @param x String
#' @return digits from string
extractDigits <- function (x) {
return(readr::parse_number(x))
}
#' Parse instruction into dataframe
#' @param instruction instruction as from input
#' @return instructions in data.frame
proInstrut <- function(instruction) {
splitIns <- as.numeric(unlist(strsplit(instruction, " ")))
return(data.frame(opcode = splitIns[1],
inputA = splitIns[2],
inputB = splitIns[3],
outputC = splitIns[4]))
}
#' Process instructions on register
#' @param opcode Opcode of instruction
#' @param parsedInstruction data.frame with instructions
#' @param register register to process
#' @return processed register
process <- function(opcode, parsedInstruction, register) {
if (opcode == "addr") {
register[parsedInstruction$outputC + 1] <- c(
register[parsedInstruction$inputA + 1] +
register[parsedInstruction$inputB + 1]
)
}
if (opcode == "addi") {
register[parsedInstruction$outputC + 1] <- c(
register[parsedInstruction$inputA + 1] +
parsedInstruction$inputB
)
}
if (opcode == "mulr") {
register[parsedInstruction$outputC + 1] <- c(
register[parsedInstruction$inputA + 1] *
register[parsedInstruction$inputB + 1]
)
}
if (opcode == "muli") {
register[parsedInstruction$outputC + 1] <- c(
register[parsedInstruction$inputA + 1] *
parsedInstruction$inputB
)
}
if (opcode == "banr") {
register[parsedInstruction$outputC + 1] <- c(
bitwAnd(register[parsedInstruction$inputA + 1],
register[parsedInstruction$inputB + 1])
)
}
if (opcode == "bani") {
register[parsedInstruction$outputC + 1] <- c(
bitwAnd(register[parsedInstruction$inputA + 1],
parsedInstruction$inputB)
)
}
if (opcode == "borr") {
register[parsedInstruction$outputC + 1] <- c(
bitwOr(register[parsedInstruction$inputA + 1],
register[parsedInstruction$inputB + 1])
)
}
if (opcode == "bori") {
register[parsedInstruction$outputC + 1] <- c(
bitwOr(register[parsedInstruction$inputA + 1],
parsedInstruction$inputB)
)
}
if (opcode == "setr") {
register[parsedInstruction$outputC + 1] <- c(
register[parsedInstruction$inputA + 1]
)
}
if (opcode == "seti") {
register[parsedInstruction$outputC + 1] <- c(
parsedInstruction$inputA
)
}
if (opcode == "gtir") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(parsedInstruction$inputA >
register[parsedInstruction$inputB + 1])
)
}
if (opcode == "gtri") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(register[parsedInstruction$inputA + 1] >
parsedInstruction$inputB)
)
}
if (opcode == "gtrr") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(register[parsedInstruction$inputA + 1] >
register[parsedInstruction$inputB + 1])
)
}
if (opcode == "eqir") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(parsedInstruction$inputA ==
register[parsedInstruction$inputB + 1])
)
}
if (opcode == "eqri") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(register[parsedInstruction$inputA + 1] ==
parsedInstruction$inputB)
)
}
if (opcode == "eqrr") {
register[parsedInstruction$outputC + 1] <- c(
as.numeric(register[parsedInstruction$inputA + 1] ==
register[parsedInstruction$inputB + 1])
)
}
return(register)
}
opcodes <- c("addr", "addi", "mulr", "muli", "banr", "bani", "borr", "bori", "setr", "seti",
"gtir", "gtri", "gtrr", "eqir", "eqri", "eqrr")
opcodesDF <- data.frame(opcodes,
numbers = NA,
stringsAsFactors = FALSE)
opcodesSucces <- c()
knownCodes <- c()
for (i in 1:nrow(input)) {
if (grepl("Before", input[i, ])) {
before <- extractDigits(unlist(strsplit(input[i, ], ",")))
inst <- proInstrut(input[i + 1, ])
after <- extractDigits(unlist(strsplit(input[i + 2, ], ",")))
opcodeTest <- sapply(opcodesDF$opcodes, function(opcode) all(process(opcode, inst, before) == after))
opcodeSum <- sum(opcodeTest)
opcodesSucces <- c(opcodesSucces, opcodeSum)
opcodeTest <- opcodeTest[which(!labels(opcodeTest) %in% knownCodes)]
if (sum(opcodeTest) == 1) {
if (!inst$opcode %in% opcodesDF$numbers[!is.na(opcodesDF$numbers)]) {
opcodesDF$numbers[which(opcodesDF$opcodes == labels(opcodeTest)[which(opcodeTest)])] <- inst$opcode
knownCodes <- opcodesDF$opcodes[!is.na(opcodesDF$numbers)]
}
}
}
}
cat(paste0("Solution part 1: ", sum(opcodesSucces >= 3)))
curRegister <- c(0, 0, 0, 0)
for (i in 1:nrow(inputB)) {
inst <- proInstrut(inputB$V1[i])
curRegister <- process(opcode = opcodesDF$opcodes[which(opcodesDF$numbers == inst$opcode)],
inst,
register = curRegister)
}
cat(paste0("Solution part 2: ", curRegister[1]))
|
22b3feee052fe7be5fb976ec72ee3e8e79a4b5b6 | 54e59bed4a9adddfa1efcb4642b1bd0e0cf6b623 | /E.coli-models/Chen01.R | 760022382e12da6f5fdb816c77321126aad88057 | [] | no_license | evolgeniusteam/nucleotideSkews | e62d831c29658ca47ea3771d45a13e591b9b208b | 01512e59842ac488e636a188e757fd6b05644dda | refs/heads/master | 2020-03-30T16:59:08.967543 | 2019-05-16T01:22:25 | 2019-05-16T01:22:25 | 39,622,600 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,255 | r | Chen01.R | ## -- for testing purpose only --
library(sybil)
library(sybilSBML);
library(glpkAPI)
## load an xml model --
m <- readSBMLmod(filename="~/05_skews_bacteria/7_math_models/Bacillus_subtilis.xml");
# remove the requirement to produce maintenance ATP:
m <- changeBounds(m, "ATPM", lb=0)
# set glucose uptake limit to 1.0:
ex <- findExchReact(m)
m <- changeBounds(m, ex["EX_glc(e)"], lb = -1);
m2 <- addReact(m, "burnATP", c("atp[c]", "h2o[c]", "adp[c]", "pi[c]", "h[c]"), c(-1, -1, 1, 1, 1)) # same as ATPM
m2 <- changeObjFunc(m2, react = "burnATP")
atp.per.glc <- lp_obj( optimizeProb(m2) ) # 23.500
atp.per.glc
upt <- uptReact(ex)
ex[upt]
mg <- changeBounds(m, ex["EX_glc(e)"], lb = -1000) # allow unlimited uptake of glucose
mg <- changeObjFunc(mg, react = "EX_glc(e)", obj_coef = 1) # but minimize this uptake (given what needs to be produced)
glc.per.mol.RNA <- c() # use this to store the results
# how much of glucose is needed per Guanine (gua) de novo synthesis etc.?
# add missing (hypothetical) transport reactions:
m.nt <- addReact(mg, "ade_tr", c("ade[c]", "ade[e]"), c(-1, 1)) # transport reaction to extracellular
m.nt <- addReact(mg, "gua_tr", c("gua[c]", "gua[e]"), c(-1, 1)) # transport reaction to extracellular
m.nt <- addReact(mg, "ura_tr", c("ura[c]", "ura[e]"), c(-1, 1)) # transport reaction to extracellular
m.nt <- addReact(mg, "csn_tr", c("csn[c]", "csn[e]"), c(-1, 1)) # transport reaction to extracellular
m.nt.o <- changeBounds(m.nt, "EX_ade(e)", lb=1) # require at least 1 mol of this nucleotide to be produced
glc.per.mol.RNA["A"] <- -lp_obj(optimizeProb(m.nt.o))
m.nt.o <- changeBounds(m.nt, "EX_csn(e)", lb=1) # require at least 1 mol of this nucleotide to be produced
glc.per.mol.RNA["C"] <- -lp_obj(optimizeProb(m.nt.o))
m.nt.o <- changeBounds(m.nt, "EX_gua(e)", lb=1) # require at least 1 mol of this nucleotide to be produced
glc.per.mol.RNA["G"] <- -lp_obj(optimizeProb(m.nt.o))
m.nt.o <- changeBounds(m.nt, "EX_ura(e)", lb=1) # require at least 1 mol of this nucleotide to be produced
glc.per.mol.RNA["U"] <- -lp_obj(optimizeProb(m.nt.o))
atp.per.RNA <- glc.per.mol.RNA * atp.per.glc
atp.per.RNA
m2 <- readTSVmod(prefix="iJO1366", quoteChar = "\"")
m2 <- changeBounds(m2, "ATPM", lb=0);
|
63ac019478f3bc3f5684007b2a4322547e2c1c98 | 57cd8fb566404ab9fcda2cac40d13343b5750acb | /Projects/KPI_anomaly/dataanalysic.R | 2f5dd4d2cbb02d76c8145dba0756d4ef959373e1 | [] | no_license | snowisland1/Dews | abbb2179d0856bebbbbd0d4aadc2bb5c4dd24f7e | 0bd75a5bdd7403cfde363025c3ecf61e3157a8c3 | refs/heads/master | 2020-08-07T20:49:49.555889 | 2017-12-29T06:44:58 | 2017-12-29T06:44:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,992 | r | dataanalysic.R | library(xts)
library(lubridate)
library(dplyr)
train<-read.csv("../../dataset/anomaly_detection_1/train.csv")
submit<-read.csv("../../dataset/anomaly_detection_1/test_dataset.csv")
submit$label<-0
KPI_IDs<-unique(train$KPI.ID)
#基于一个简单的规则,根据观察,周期主要是一天一天的,所以把每天某时刻某分钟单独提取出来比较,偏离比较大的视为异常点
tmp_line1<-train[which(train$KPI.ID == KPI_IDs[27]),]
tmp_line2<-submit[which(submit$KPI.ID == KPI_IDs[27]),]
names(tmp_line1)<-c("id","timestamp","value","label")
names(tmp_line2)<-c("id","timestamp","value","label")
tmp_line<-rbind(tmp_line1,tmp_line2)
tmp_line$datetime<-as.POSIXct(tmp_line$timestamp, origin="1970-01-01")
samples<-NULL
order<-1
for(i in 1:nrow(tmp_line)){
samples$id[order]<-as.character(tmp_line[i,1])
samples$timestamp[order]<-tmp_line[i,"timestamp"]
samples$label[order]<-as.numeric(tmp_line[i,'label'])
samples$hour[order]<-hour(tmp_line[i,"datetime"])
samples$minute[order]<-as.integer(minute(tmp_line[i,"datetime"]))
samples$value[order]<-tmp_line[i,"value"]
samples$date[order]<-as.Date(tmp_line[i,"datetime"])
order<-order+1
}
samples<-data.frame(samples)
#samples$value<-log(samples$value+1)
group<-group_by(samples,date)
sta1<-summarise(group,mean=mean(value),sd=sd(value))
samples<-merge(samples,sta1,by.x = "date",by.y = "date")
samples$value<-(samples$value-samples$mean)/samples$sd
group<-group_by(samples,hour,minute)
sta2<-summarise(group,median=median(value,na.rm = T))
samples<-merge(samples,sta2,by.x = c("hour","minute"),by.y = c("hour","minute"))
samples$diff1<-abs(samples$median-samples$value)
samples$predict<-0
samples1<-merge(tmp_line1[,c(1,2)],samples,by.x=c("id","timestamp"),by.y = c("id","timestamp"),all.x = T)
samples2<-merge(tmp_line2[,c(1,2)],samples,by.x=c("id","timestamp"),by.y = c("id","timestamp"),all.x = T)
write.csv(samples1,"./samples/samples1.csv",row.names = F,quote = F)
samples2[which(samples2$diff1>0.9),"label"]<-1
write.csv(samples2[,c("id","timestamp","label")],"./results/result_27.csv",row.names = F,quote = F)
#计算结果
getRecallandPre<-function(tmp_line,gap){
sum1<-sum(tmp_line$label)
sum2<-length(tmp_line[which(tmp_line$diff1>gap),"label"])
get1<-sum(tmp_line[which(tmp_line$diff1>gap),"label"])
pre<-get1/sum2
rec<-get1/sum1
print(get1)
print(sum2)
print(pre)
print(rec)
f1<-2*pre*rec/(pre+rec)
f1
}
#提交
filenames<-c("result_01.csv","result_02.csv","result_03.csv","result_04.csv","result_05.csv","result_06.csv","result_07.csv","result_08.csv","result_09.csv","result_10.csv","result_11.csv","result_12.csv","result_13.csv","result_14.csv","result_15.csv","result_16.csv","result_17.csv","result_18.csv","result_19.csv","result_20.csv","result_21.csv","result_22.csv","result_23.csv","result_24.csv","result_25.csv","result_26.csv","result_27.csv")
pdata<-NULL
for(i in filenames){
data<-read.csv(paste("./results/",i,sep = ""))
pdata<-rbind(pdata,data)
} |
4b1d7e87e6fdbfba0429671848d2810cd5c49ee7 | 61a508d7e5038afcdcdc403f4318b0f75a77dcc8 | /tests/testthat/test-datasets.R | 97f7f06ede9040dfb6c39fc8866bb8a6e5af45f3 | [] | no_license | zhaoxiaohe/datasauRus | f2da677d0bea047d5767461d1e2622cf4fad9615 | 327885e23f039b3ce3f59ad590c95f1134cda1d7 | refs/heads/master | 2021-01-20T08:57:21.434040 | 2017-05-03T18:04:18 | 2017-05-03T18:04:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,256 | r | test-datasets.R | context("datasets")
datashapetests<-function(df, ncols, nrows, uniquecol=NULL, nuniques=NULL){
expect_equal(ncol(df),ncols)
expect_equal(nrow(df),nrows)
if(!is.null(uniquecol))
expect_equal(nrow(unique(df[uniquecol])),nuniques)
}
test_that("box_plots is correctly shaped",{
datashapetests(box_plots,6,2484)
})
test_that("datasaurus_dozen is correctly shaped",{
datashapetests(datasaurus_dozen,3,1846,"dataset",13)
})
test_that("datasaurus_dozen_wide is correctly shaped",{
datashapetests(datasaurus_dozen_wide,26,142)
})
test_that("simpsons_paradox is correctly shaped",{
datashapetests(simpsons_paradox,3,444,"dataset",2)
})
test_that("simpsons_paradox_wide is correctly shaped",{
datashapetests(simpsons_paradox_wide,4,222)
})
test_that("twelve_from_slant_long is correctly shaped",{
datashapetests(twelve_from_slant_long,3,2184,"dataset",12)
})
test_that("twelve_from_slant_wide is correctly shaped",{
datashapetests(twelve_from_slant_wide,24,182)
})
test_that("twelve_from_slant_alternate_long is correctly shaped",{
datashapetests(twelve_from_slant_alternate_long,3,2184,"dataset",12)
})
test_that("twelve_from_slant_alternate_wide is correctly shaped",{
datashapetests(twelve_from_slant_alternate_wide,24,182)
})
|
68a213243b5a6d7feb48a3a538f776f588fea1bd | 24b9d09b1dea9db88cbf7b9dbb6f3d24c0cd4b88 | /prettyRadio.R | 9132744ee6522ca5279b8161395749686cf498c6 | [] | no_license | pat-alt/stackQuerries | 55f67b78429ea90f981ae3b97d608708a8023cd4 | f0e82e02feb6a9e649a5744836cfdab43a6583bb | refs/heads/master | 2022-11-05T01:20:41.080961 | 2020-06-27T13:33:08 | 2020-06-27T13:33:08 | 272,362,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,380 | r | prettyRadio.R | library(shiny)
library(shinyWidgets)
list1 = letters[1:3]
list2 = letters[4:6]
list3 = letters[7:9]
list4 = letters[10:12]
ui <- fluidPage(
titlePanel("Dynamic Boxes"),
fluidRow(
prettyRadioButtons("check1", "Please Pick", c("Option 1" = "op1", "Option 2" = "op2"), selected=character(0), shape="square", outline = T, inline = T),
checkboxGroupInput("check2", "Please Pick", c("Option 3" = "op3", "Option 4" = "op4"), inline = T),
uiOutput("select")
)
)
server <- function(input, output){
output$select = renderUI({
req(input$check1)
if (input$check1 == 'op1') {
selectInput('select_1', 'No.1 Select',choices = list1)
}
else if (input$check1 == 'op2') {
req(input$check2)
if (length(input$check2) == 1) {
if(input$check2 == 'op3') {
selectInput('select_2', 'No.2 Select',choices = list2)
}
else if (input$check2 == 'op4') {
selectInput('select_3', 'No.3 Select',choices = list3)
}
}
else if(length(input$check2) == 2){
selectInput('select_4', 'No.4 Select',choices = list4)
}
}
})
}
shinyApp(ui, server)
# Continuing from above ----
ui <- fluidPage(
titlePanel("Dynamic Boxes"),
fluidRow(
prettyRadioButtons("check1", "Please Pick", c("Option 1" = "op1", "Option 2" = "op2"), selected=character(0), shape="square", outline = T, inline = T),
uiOutput("check2"),
uiOutput("select")
)
)
server <- function(input, output) {
output$check2 = renderUI({
req(input$check1)
if (input$check1=='op2') {
prettyRadioButtons("check2", "Please Pick",
c("Option 2.1" = "op3", "Option 2.2" = "op4", "Option 2.3" = "op5"),
selected=character(0), shape="square", outline = T, inline = T)
}
})
output$select = renderUI({
req(input$check1)
if (input$check1 == 'op1') {
selectInput('select_1', 'No.1 Select',choices = list1)
}
else if (input$check1 == 'op2') {
req(input$check2)
if (input$check2 == 'op3') {
selectInput('select_2', 'No.2 Select',choices = list2)
} else if (input$check2== 'op4') {
selectInput('select_3', 'No.3 Select',choices = list3)
} else {
selectInput('select_4', 'No.4 Select',choices = list4)
}
}
})
}
shinyApp(ui, server)
|
a3016a0cd5aa10de192f7d223cc04e41acb033bf | bc3d6d25f38f5651afe9e01bb199c0d125466042 | /Code/Library Import.R | d0693540c8e63a404ebb8fa9eaab3dd73b2fe16f | [] | no_license | LukeRyanCCC/Project1 | 586ebc5055739d10bb46dc5b3ff65ad9915624a1 | 0bb400e205715a3c1544ac10e4356ab02891cb84 | refs/heads/master | 2020-03-29T20:05:05.193017 | 2018-09-26T11:58:11 | 2018-09-26T11:58:11 | 150,294,457 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 567 | r | Library Import.R | #Load librarys
library_load <- c("plyr", "tidyr", "data.table", "readr",
"stringr","maps","sp","rgeos","raster",
"tmap","ggplot2","ggmap","leaflet",
"spatstat","gstat","sf","dplyr","magrittr",
"maptools", "rgdal", "lubridate", "corrplot",
"httr", "jsonlite", "zoo", "tibble", "broom",
"tweenr", "nomisr", "animation", "magick", "shiny",
"rsconnect", "geosphere","RColorBrewer")
lapply(library_load, library, character.only = TRUE)
|
653bb53b67a1e9618723ca0ab65bbe86bd9880fe | 681ea911df31c6f7204373fcf472af1203cb0026 | /R/get_incidence_matrix.R | b9dbffce75fd4d98b10a4f5ac1c4fc773d2055cd | [
"MIT"
] | permissive | alrobles/geotax | eac0a6b8b711b21b5dbb235b7e7a4af2dc3047a1 | 4375908a4862ba7d36589b03a32eabdf598d2c71 | refs/heads/master | 2023-07-19T17:15:32.556263 | 2023-07-07T14:54:38 | 2023-07-07T14:54:38 | 86,359,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,491 | r | get_incidence_matrix.R | #' Generate an incidence matrix from a interaction table
#'
#' @description This function creates an incidence matrix
# for species interactions. Needs dataframe with 2 columns,
# for example one with the host and the other with the parasites
#' @param db A data.frame or matrix with two columns,
#' for example host and parasites
#'
#' @param returnDataFrame Logical, by default if FALSE and the function
#' returns a Matrix
#'
#' @return An incidence matrix or data.frame (tibble) with rows of one
#' kind (for example host) and columns with the other (for example parasites)
#' filled with ones and zeros.
#' @export
#'
#' @examples
#' get_incidence_matrix(beetleTreeInteractions)
#' get_incidence_matrix(beetleTreeInteractions,
#' returnDataFrame = TRUE)
get_incidence_matrix <- function(db, returnDataFrame = FALSE){
flag <- "data.frame" %in% class(db)
if (!flag){
stop("The table is not a data frame.") }
if (ncol(db) != 2){
stop("The table does not have two columns.") }
if(!returnDataFrame){
db <- as.matrix(db)
interactors <- unique(db[ ,1])
interactuans <- unique(db[ ,2])
incidence.matrix <- sapply(interactors, function(x){
as.numeric( interactuans %in% db[ (db[ ,1] %in% x), 2 ] ) } )
rownames(incidence.matrix) <- interactuans
colnames(incidence.matrix) <- interactors
return(t(incidence.matrix))
} else {
db %>%
dplyr::mutate(value = 1) %>%
dplyr::distinct() %>%
spread(2, 3, fill = 0)
}
}
|
1d5393785fb5089c5620bc97e715eeccc2058ebc | 5e90fe6a6db2611e73d08dca0245dab7d4039667 | /man/sim.Rd | 27766453b699ae101a3dbbbce2648169aa3d1f75 | [] | no_license | crazybilly/modelr | 4d5a719d4e0090c3ea9843eb6b820e291544907c | 71502aeeb74b7deaab3c2b033e12cdcacb8aa3d3 | refs/heads/master | 2021-01-21T00:02:19.878566 | 2016-09-09T13:25:53 | 2016-09-09T13:25:53 | 67,800,592 | 0 | 0 | null | 2016-09-09T13:19:44 | 2016-09-09T13:19:43 | null | UTF-8 | R | false | true | 494 | rd | sim.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sim}
\alias{sim}
\alias{sim1}
\alias{sim2}
\alias{sim3}
\alias{sim4}
\title{Simple simulated datasets}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 30 rows and 2 columns.}
\usage{
sim1
sim2
sim3
sim4
}
\description{
These simple simulated datasets are useful for teaching modelling
basics.
}
\keyword{datasets}
|
aefcba924f095ef4aadb579c00f1298040aba9fe | 1dc0ab4e2b05001a5c9b81efde2487f161f800b0 | /experiments/keel/noisy/an/an_nc5.R | 6ea1999e763d9187b4646b1df1e8db2719372af5 | [] | no_license | noeliarico/knnrr | efd09c779a53e72fc87dc8c0f222c0679b028964 | 9f6592d1bbc1626b2ea152fbd539acfe9f9a5ab3 | refs/heads/master | 2020-06-01T02:44:34.201881 | 2020-03-13T13:30:52 | 2020-03-13T13:30:52 | 190,601,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,967 | r | an_nc5.R | # ecoli -------------------------------------------------------------------
ecoli_5an_nc_5_1 <- train_keel(ecoli_5an_nc_5_1tra, ecoli_5an_nc_5_1tst)
ecoli_5an_nc_5_2 <- train_keel(ecoli_5an_nc_5_2tra, ecoli_5an_nc_5_2tst)
ecoli_5an_nc_5_3 <- train_keel(ecoli_5an_nc_5_3tra, ecoli_5an_nc_5_3tst)
ecoli_5an_nc_5_4 <- train_keel(ecoli_5an_nc_5_4tra, ecoli_5an_nc_5_4tst)
ecoli_5an_nc_5_5 <- train_keel(ecoli_5an_nc_5_5tra, ecoli_5an_nc_5_5tst)
ecoli_5an_nc <- bind_rows(ecoli_5an_nc_5_1,
ecoli_5an_nc_5_2,
ecoli_5an_nc_5_3,
ecoli_5an_nc_5_4,
ecoli_5an_nc_5_5,) %>%
group_by(k, method, type) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) %>%
ungroup()
# glass -------------------------------------------------------------------
glass_5an_nc_5_1 <- train_keel(glass_5an_nc_5_1tra, glass_5an_nc_5_1tst)
glass_5an_nc_5_2 <- train_keel(glass_5an_nc_5_2tra, glass_5an_nc_5_2tst)
glass_5an_nc_5_3 <- train_keel(glass_5an_nc_5_3tra, glass_5an_nc_5_3tst)
glass_5an_nc_5_4 <- train_keel(glass_5an_nc_5_4tra, glass_5an_nc_5_4tst)
glass_5an_nc_5_5 <- train_keel(glass_5an_nc_5_5tra, glass_5an_nc_5_5tst)
glass_5an_nc <- bind_rows(glass_5an_nc_5_1,
glass_5an_nc_5_2,
glass_5an_nc_5_3,
glass_5an_nc_5_4,
glass_5an_nc_5_5,) %>%
group_by(k, method, type) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) %>%
ungroup()
# ionosphere -------------------------------------------------------------------
ionosphere_5an_nc_5_1 <- train_keel(ionosphere_5an_nc_5_1tra, ionosphere_5an_nc_5_1tst)
ionosphere_5an_nc_5_2 <- train_keel(ionosphere_5an_nc_5_2tra, ionosphere_5an_nc_5_2tst)
ionosphere_5an_nc_5_3 <- train_keel(ionosphere_5an_nc_5_3tra, ionosphere_5an_nc_5_3tst)
ionosphere_5an_nc_5_4 <- train_keel(ionosphere_5an_nc_5_4tra, ionosphere_5an_nc_5_4tst)
ionosphere_5an_nc_5_5 <- train_keel(ionosphere_5an_nc_5_5tra, ionosphere_5an_nc_5_5tst)
ionosphere_5an_nc <- bind_rows(ionosphere_5an_nc_5_1,
ionosphere_5an_nc_5_2,
ionosphere_5an_nc_5_3,
ionosphere_5an_nc_5_4,
ionosphere_5an_nc_5_5,) %>%
group_by(k, method, type) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) %>%
ungroup()
# sonar -------------------------------------------------------------------
sonar_5an_nc_5_1 <- train_keel(sonar_5an_nc_5_1tra, sonar_5an_nc_5_1tst)
sonar_5an_nc_5_2 <- train_keel(sonar_5an_nc_5_2tra, sonar_5an_nc_5_2tst)
sonar_5an_nc_5_3 <- train_keel(sonar_5an_nc_5_3tra, sonar_5an_nc_5_3tst)
sonar_5an_nc_5_4 <- train_keel(sonar_5an_nc_5_4tra, sonar_5an_nc_5_4tst)
sonar_5an_nc_5_5 <- train_keel(sonar_5an_nc_5_5tra, sonar_5an_nc_5_5tst)
sonar_5an_nc <- bind_rows(sonar_5an_nc_5_1,
sonar_5an_nc_5_2,
sonar_5an_nc_5_3,
sonar_5an_nc_5_4,
sonar_5an_nc_5_5,) %>%
group_by(k, method, type) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) %>%
ungroup()
# wine -------------------------------------------------------------------
wine_5an_nc_5_1 <- train_keel(wine_5an_nc_5_1tra, wine_5an_nc_5_1tst)
wine_5an_nc_5_2 <- train_keel(wine_5an_nc_5_2tra, wine_5an_nc_5_2tst)
wine_5an_nc_5_3 <- train_keel(wine_5an_nc_5_3tra, wine_5an_nc_5_3tst)
wine_5an_nc_5_4 <- train_keel(wine_5an_nc_5_4tra, wine_5an_nc_5_4tst)
wine_5an_nc_5_5 <- train_keel(wine_5an_nc_5_5tra, wine_5an_nc_5_5tst)
wine_5an_nc <- bind_rows(wine_5an_nc_5_1,
wine_5an_nc_5_2,
wine_5an_nc_5_3,
wine_5an_nc_5_4,
wine_5an_nc_5_5,) %>%
group_by(k, method, type) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) %>%
ungroup() |
9513fa0c151bbfe206161e3fd2b482466394351d | 3567ad9c30686ff697565e674fb0a9e8567fe8a1 | /plot2.R | 9d4c682b61a7c8a8eb90bf1249b8f37dabe3a56f | [] | no_license | skroonenburg/ExData_Plotting1 | c7303bab8e409c88191bd1da036f82af8362683b | e30ac35fcc0c988d98f3c02736473422eefe682b | refs/heads/master | 2021-01-15T21:30:11.741135 | 2015-02-06T10:24:23 | 2015-02-06T10:24:23 | 30,239,415 | 0 | 0 | null | 2015-02-03T11:25:11 | 2015-02-03T11:25:09 | null | UTF-8 | R | false | false | 1,326 | r | plot2.R | library(data.table)
require(data.table)
readData <- function()
{
# Only read the data from the relevant dates
# I've done this by hardcoding the lines from the text file to read,
# such that only the required lines are read and the performance
# of this script is optimised
data <- read.table('household_power_consumption.txt', sep = ';', na.strings="?", skip=66637,nrows=2880, stringsAsFactors = FALSE)
# Because only specific lines are read, the header line is not -- and therefore the column
# names must be specified manually
names(data) = c('Date','Time','Global_active_power','Global_reactive_power','Voltage','Global_intensity','Sub_metering_1','Sub_metering_2','Sub_metering_3')
# Convert the Time & Date columns to appropriate data types
data$Time = strptime(paste(data$Date, data$Time, sep = ' '), "%d/%m/%Y %H:%M:%S")
data$Date = as.Date(data$Date, "%d/%m/%Y")
data
}
drawGlobalActivePowerGraph <- function(data)
{
plot(data$Time, data$Global_active_power, type="l", ylab = 'Global Active Power (kilowatts)', xlab = '')
}
# Open the PNG device
png(file = "plot2.png", bg = "transparent", width = 480, height = 480)
# Read the relevant data from a file
data <- readData()
# Render the global active power graph
drawGlobalActivePowerGraph(data)
# Close the PNG device
dev.off() |
5aaa04501d1490a4ece3f0c32718c163f1fb0dd9 | 00fbe684e4412050534584d8c21810aea532c4e5 | /code/calculateVE.R | 37ba6fe832cb770feb8bf441a0472b8eb3d6bb3f | [] | no_license | dviraran/covid_analyses | 39000d1937f534202237b8c98271db7784fdad56 | c3b6ed5474643be423fbaa44366329bf2f582f0d | refs/heads/master | 2023-07-31T00:59:02.761826 | 2021-09-19T20:02:37 | 2021-09-19T20:02:37 | 334,678,742 | 8 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,589 | r | calculateVE.R | library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(cowplot)
library(ggrepel)
library(plyr)
library(superheat)
library(ggpubr)
source('~/Documents/covid_analyses/code/vac_functions.R')
### READ DATA
last.date='2021-02-22'
# https://data.gov.il/dataset/covid-19/resource/57410611-936c-49a6-ac3c-838171055b1f/download/vaccinated-per-day-2021-02-09.csv
vac_data = read.csv('~/Documents/covid_analyses/data/vaccinated-per-day-2021-02-27.csv',header = T,row.names = NULL,stringsAsFactors = F)
vac_data = vac_data[vac_data$VaccinationDate<=last.date,]
death = read.csv('~/Documents/covid_analyses/data/deadPatientsPerDate.csv',header = T,row.names = NULL,stringsAsFactors = F)
death$date = as.Date(death$date)
death = death[death$date>='2020-12-20' & death$date<='2021-02-22',]
moh = read.csv('~/Documents/covid_analyses/data/moh_data.2.23.2021.csv',header = T,row.names = 1,stringsAsFactors = F)
rossman = read.csv('~/Documents/covid_analyses/data/Fig2_national_3rd_lockdown_y_vectors_daily.csv',header = T,row.names = NULL,stringsAsFactors = F)
rossman$date = as.Date(rossman$X)
rossman = rossman[rossman$date>='2020-12-20',]
#rossman[rossman$date>'2021-02-15',substr(colnames(rossman),1,1)=='y'] = 0
### ORGANZIE DATA
rossman_cases_60plus = rossman
rossman_cases_60plus$amount = (rossman[,'y_pos_3_national_60.'])
rossman_cases_60minus = rossman
rossman_cases_60minus$amount = (rossman[,'y_pos_3_national_0.59'])
rossman_hosp_60plus = rossman
rossman_hosp_60plus$amount = rossman[,'y_hosped_3_national_60.']
rossman_hosp_60minus = rossman
rossman_hosp_60minus$amount = (rossman[,'y_hosped_3_national_0.59'])
rossman_severe_60plus = rossman
rossman_severe_60plus$amount = (rossman[,'y_sevhosped_3_national_60.'])
rossman_severe_60min = rossman
rossman_severe_60min$amount = (rossman[,'y_sevhosped_3_national_0.59'])
A = vac_data$age_group %in% c('60-69','70-79','80-89','90+')
vac_1dose_old = aggregate(as.numeric(vac_data$first_dose[A]),by=list(vac_data$VaccinationDate[A]),sum,na.rm=T)
vac_2dose_old = aggregate(as.numeric(vac_data$second_dose[A]),by=list(vac_data$VaccinationDate[A]),sum,na.rm=T)
A = !(vac_data$age_group %in% c('60-69','70-79','80-89','90+'))
vac_1dose_yng = aggregate(as.numeric(vac_data$first_dose[A]),by=list(vac_data$VaccinationDate[A]),sum,na.rm=T)
vac_2dose_yng = aggregate(as.numeric(vac_data$second_dose[A]),by=list(vac_data$VaccinationDate[A]),sum,na.rm=T)
vac_1dose = aggregate(as.numeric(vac_data$first_dose),by=list(vac_data$VaccinationDate),sum,na.rm=T)
vac_2dose = aggregate(as.numeric(vac_data$second_dose),by=list(vac_data$VaccinationDate),sum,na.rm=T)
colnames(vac_1dose_yng) = c('Date','Counts')
colnames(vac_2dose_yng) = colnames(vac_1dose_yng)
colnames(vac_2dose_old) = colnames(vac_1dose_yng)
colnames(vac_1dose_old) = colnames(vac_1dose_yng)
colnames(vac_1dose) = colnames(vac_1dose_yng)
colnames(vac_2dose) = colnames(vac_1dose_yng)
### CREATE SUPP FIGURE
p1 = getCounts(vac_1dose_old,rossman_cases_60plus,'2020-12-20',last.date,0,14,cohort_size = cohort_size,create_plot ='group1',tit='Group 1 - 1st dose 0-13')
p2 = getCounts(vac_1dose_old,rossman_cases_60plus,'2020-12-20',last.date,14,7,cohort_size=cohort_size,create_plot ='group2',tit='Group 2 - 1st dose 14-20')
p3 = getCounts(vac_2dose_old,rossman_cases_60plus,'2021-01-10',last.date,0,7,cohort_size=cohort_size,create_plot ='group3',tit='Group 3 - 2nd dose 0-6')
p4 = getCounts(vac_2dose_old,rossman_cases_60plus,'2021-01-10',last.date,7,7,cohort_size=cohort_size,create_plot ='group4',tit='Group 4 - 2nd dose 7-13')
p5 = getCounts(vac_2dose_old,rossman_cases_60plus,'2021-01-10',last.date,14,30,cohort_size=cohort_size,create_plot ='group5',tit='Group 5 - 2nd dose 14+')
ggarrange(p1$plot,p2$plot,p3$plot,p4$plot,p5$plot)
ggsave('~/Documents/covid_analyses/plots/supp_fig1_old.pdf',width=14,height = 10)
p1 = getCounts(vac_1dose_yng,rossman_cases_60minus,'2020-12-20',last.date,0,14,cohort_size = cohort_size,create_plot ='group1',tit='Group 1 - 1st dose 0-13')
p2 = getCounts(vac_1dose_yng,rossman_cases_60minus,'2020-12-20',last.date,14,7,cohort_size=cohort_size,create_plot ='group2',tit='Group 2 - 1st dose 14-20')
p3 = getCounts(vac_2dose_yng,rossman_cases_60minus,'2021-01-10',last.date,0,7,cohort_size=cohort_size,create_plot ='group3',tit='Group 3 - 2nd dose 0-6')
p4 = getCounts(vac_2dose_yng,rossman_cases_60minus,'2021-01-10',last.date,7,7,cohort_size=cohort_size,create_plot ='group4',tit='Group 4 - 2nd dose 7-13')
p5 = getCounts(vac_2dose_yng,rossman_cases_60minus,'2021-01-10',last.date,14,30,cohort_size=cohort_size,create_plot ='group5',tit='Group 5 - 2nd dose 14+')
ggarrange(p1$plot,p2$plot,p3$plot,p4$plot,p5$plot)
ggsave('~/Documents/covid_analyses/plots/supp_fig1_yng.pdf',width=14,height = 10)
### CALCULATE ESTIMATIONS
res = runAnalysisForBeta(moh$Cases60plus,vac_1dose_old,vac_2dose_old,rossman_cases_60plus,cohort_size=1428000,'cases')
df = melt(res[[1]], id.vars ='beta')
colnames(df)[3] = c('Cases60plus')
x = melt(res[[2]], id.vars ='beta')
df$Cases60plus.low = x$value[grepl('low',x$variable)]
df$Cases60plus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Cases60min,vac_1dose_yng,vac_2dose_yng,rossman_cases_60minus,cohort_size=7539000,'cases')
x = melt(res[[1]], id.vars ='beta')
df$Cases60minus = x$value
x = melt(res[[2]], id.vars ='beta')
df$Cases60minus.low = x$value[grepl('low',x$variable)]
df$Cases60minus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Hosp60plus,vac_1dose_old,vac_2dose_old,rossman_hosp_60plus,cohort_size=1428000,'hosp')
x = melt(res[[1]], id.vars ='beta')
df$Hosp60plus = x$value
x = melt(res[[2]], id.vars ='beta')
df$Hosp60plus.low = x$value[grepl('low',x$variable)]
df$Hosp60plus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Hosp60min,vac_1dose_yng,vac_2dose_yng,rossman_hosp_60minus,cohort_size=7539000,'hosp')
x = melt(res[[1]], id.vars ='beta')
df$Hosp60minus = x$value
x = melt(res[[2]], id.vars ='beta')
df$Hosp60minus.low = x$value[grepl('low',x$variable)]
df$Hosp60minus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Severe60plus,vac_1dose_old,vac_2dose_old,rossman_severe_60plus,cohort_size=1428000,'severe')
x = melt(res[[1]], id.vars ='beta')
df$Severe60plus = x$value
x = melt(res[[2]], id.vars ='beta')
df$Severe60plus.low = x$value[grepl('low',x$variable)]
df$Severe60plus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Severe60min,vac_1dose_yng,vac_2dose_yng,rossman_severe_60min,cohort_size=7539000,'severe')
x = melt(res[[1]], id.vars ='beta')
df$Severe60minus = x$value
x = melt(res[[2]], id.vars ='beta')
df$Severe60minus.low = x$value[grepl('low',x$variable)]
df$Severe60minus.hi = x$value[grepl('hi',x$variable)]
res = runAnalysisForBeta(moh$Deceased,vac_1dose_old,vac_2dose_old,death,cohort_size=1428000,'severe')
x = melt(res[[1]], id.vars ='beta')
df$Deceased = x$value
x = melt(res[[2]], id.vars ='beta')
df$Deceased.low = x$value[grepl('low',x$variable)]
df$Deceased.hi = x$value[grepl('hi',x$variable)]
df$variable = mapvalues(df$variable,from=c('Dose2.0', 'Dose2.7', 'Dose2.14', 'Dose1.0', 'Dose1.14'),
to=c('2nd dose 0-6','2nd dose 7-13','2nd dose 14+','1st dose 0-13','1st dose 14-20'))
colnames(df)[2] = 'Group'
df$Group <- factor(df$Group,
levels = c('1st dose 0-13','1st dose 14-20','2nd dose 0-6','2nd dose 7-13','2nd dose 14+'))
### CREATE PLOTS
p1 = createPlot(df,'Cases60plus','Positive cases >60')
p2 = createPlot(df,'Cases60minus','Positive cases <60')
p3 = createPlot(df,'Hosp60plus','Hospitalization cases >60')
p4 = createPlot(df,'Hosp60minus','Hospitalization cases <60')
p5 = createPlot(df,'Severe60plus','Severe cases >60')
p6 = createPlot(df,'Severe60minus','Severe cases <60')
p7 = createPlot(df,'Deceased','Deceased')
createPlotBars(df)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.bars.pdf',width=8,height = 6)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.bars.jpg',width=8,height = 6)
ggarrange(p1, p2, p3, p4,p5,p6, ncol=2, nrow=3, common.legend = TRUE, legend="bottom",labels=c('A','B','C','D','E','F'))
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.pdf',width=8,height = 8)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.jpg',width=8,height = 8)
ggarrange(p1, p2, ncol=2, nrow=1, common.legend = TRUE, legend="bottom",labels=c('A','B'))
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.cases.pdf',width=8,height = 4)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.cases.jpg',width=8,height = 4)
ggarrange(p3, p4, ncol=2, nrow=1, common.legend = TRUE, legend="bottom",labels=c('A','B'))
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.hosp.pdf',width=8,height = 4)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.hosp.jpg',width=8,height = 4)
ggarrange(p5, p6, ncol=2, nrow=1, common.legend = TRUE, legend="bottom",labels=c('A','B'))
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.severe.pdf',width=8,height = 4)
ggsave('~/Documents/covid_analyses/plots/figure1.2.23.2021.severe.jpg',width=8,height = 4)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.pdf',width=7,height = 9)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.p1.pdf',p1,width=5,height = 3.2)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.p2.pdf',p2,width=5,height = 3.2)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.p3.pdf',p3,width=5,height = 3.2)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.p4.pdf',p4,width=5,height = 3.2)
ggsave('~/Documents/covid_analyses/plots/figure1.2.10.2021.p5.pdf',p5,width=5,height = 3.2)
#### SHIFT TEST
d1 = runAnalysis14days(moh$Cases60plus,vac_1dose_old,vac_2dose_old,rossman_cases_60plus,cohort_size=1428000,beta=0.79,p2=0.73,dash=2)
d2 = runAnalysis14days(moh$Hosp60plus,vac_1dose_old,vac_2dose_old,rossman_hosp_60plus,cohort_size=1428000,beta=0.7,p2=0.81,dash=4)
d3 = runAnalysis14days(moh$Severe60plus,vac_1dose_old,vac_2dose_old,rossman_severe_60plus,cohort_size=1428000,beta=0.76,p2=0.81,dash=8)
d = cbind(d1,d2,d3)[c(1:2,4,6)]
colnames(d) = c('Days_Removed','Cases','Hospitalizations','Severe')
d = melt(d,id.vars = c('Days_Removed'))
ggplot(d,aes(Days_Removed,value,color=variable,group=variable))+geom_line()+geom_point()+
theme_classic()+ylim(0.85,1)+geom_vline(xintercept=2,linetype='dashed')+
geom_vline(xintercept=4,linetype='dashed')+
geom_vline(xintercept=8,linetype='dashed')+xlim(c(0,20))
ggsave('~/Documents/covid_analyses/figure1.2.23.2021.shift.pdf',width=6,height = 4)
|
419c5afe841391ffa15fbc5c13783f4749deff84 | 20036893929f5078bf99a066775d5ebcd37cac49 | /code/Cluster_Expression_Level.R | 985d848be7a4265dcd3c637849312615abcc8cd1 | [] | no_license | zehualilab/RNAseq_singlecellfetal | f88da23b8a1b2ba1d1c47d60f51d28f9cd237d21 | fb71ea75c29ea114861105e6f52c81a38df038d4 | refs/heads/master | 2020-09-11T00:45:28.481841 | 2019-01-17T03:50:49 | 2019-01-17T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,818 | r | Cluster_Expression_Level.R | # Damon Polioudakis
# 2017-05-09
# Pooled expression level for each gene by cluster
################################################################################
rm(list = ls())
require(Seurat)
require(reshape2)
require(dplyr)
# Drop-seq
load("../analysis/Cluster_Seurat/Cluster_Seurat_exon_FtMm250_fetb_seurat.Robj")
# Set ggplot2 theme
theme_set(theme_bw())
theme_set(theme_get() + theme(text = element_text(size = 11)))
theme_update(plot.title = element_text(size = 11))
theme_update(axis.line = element_line(colour = "black")
, panel.border = element_blank()
)
pdf("../analysis/graphs/Cluster_Expression_Level.pdf", width = 12, height = 9)
ldf <- split(as.data.frame(t(fetb@raw.data)), fetb@ident)
ll <- lapply(ldf, function(df) {rowSums(t(df))})
ll <- lapply(ll, function(l) sort(l, decreasing = TRUE))
df <- data.frame(ll)
df$RANK <- c(1:nrow(df))
ggDF <- melt(df, id.vars = "RANK")
## Assign annotated cluster names to clusters
current.cluster.ids <- c("X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9")
new.cluster.ids <- c(
"Excitatory Upper Layer Neuron 1"
, "Excitatory Neuron"
, "Excitatory Upper Layer Neuron 2"
, "Excitatory Deep Layer Neuron"
, "Intermediate Progenitors"
, "Interneuron"
, "Mitotic Progenitors"
, "oRG"
, "Oligodendrocyte Precursor"
, "Endothelial")
ggDF$variable <- as.character(ggDF$variable)
ggDF$variable <- plyr::mapvalues(ggDF$variable, from = current.cluster.ids
, to = new.cluster.ids)
ggDF$variable <- factor(ggDF$variable, levels = new.cluster.ids)
ggplot(ggDF, aes(y = value, x = RANK, group = 1)) +
geom_line() +
facet_wrap("variable", scales = "free_x") +
coord_cartesian(ylim = c(0, 500)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ylab("Raw counts") +
xlab("Genes ordered by expression") +
ggtitle(paste0(
"Cluster_Expression_Level.R"
, "\n"
, "\nSummed expression level for each gene for each cluster"
, "\nRaw counts"
, "\nY-axis limit set to 100"
, "\n"
))
ldf <- split(as.data.frame(t(fetb@scale.data)), fetb@ident)
ll <- lapply(ldf, function(df) {rowSums(t(df))})
ll <- lapply(ll, function(l) sort(l, decreasing = TRUE))
df <- data.frame(ll)
df$RANK <- c(1:nrow(df))
ggDF <- melt(df, id.vars = "RANK")
ggplot(ggDF, aes(y = value, x = RANK, group = 1)) +
geom_line() +
facet_wrap("variable", scales = "free_x") +
coord_cartesian(ylim = c(0, 500)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ylab("Mean centered scaled log2 normalized expression") +
xlab("Genes ordered by expression") +
ggtitle(paste0(
"Cluster_Expression_Level.R"
, "\n"
, "\nSummed expression level for each gene for each cluster"
, "\nMean centered scaled log2 normalized expression"
, "\nY-axis limit set to 100"
, "\n"
))
dev.off()
|
e048977e637382a8b16f1cd856a96d61bc7ff965 | db6b7886398c4602d858fbb1855ce6d8c91b3e68 | /man/agg_xaxislabel.Rd | 7dd5742e74a0384a1b0a35e398cf24facfd79fd4 | [
"MIT"
] | permissive | angusmoore/arphit | 08c1bec58bf22d29999204480e962547c1486add | 389efbf00b0775d1e1ec6b0f8f2311eff72bb1b0 | refs/heads/master | 2023-02-08T06:20:35.082649 | 2021-02-09T05:27:35 | 2021-02-09T05:27:35 | 104,701,875 | 3 | 4 | MIT | 2021-02-09T05:27:36 | 2017-09-25T04:02:59 | R | UTF-8 | R | false | true | 765 | rd | agg_xaxislabel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gg-constructors.R
\name{agg_xaxislabel}
\alias{agg_xaxislabel}
\title{Add an axis label to the x axis}
\usage{
agg_xaxislabel(axislabel, panel = NULL)
}
\arguments{
\item{axislabel}{A string specifying the axis label}
\item{panel}{(optional) Specify a panel identifier to add to a specific
panel. If blank, axis label will be applied to all panels. You can specify a
vector of panels (e.g. `panel = c("1","3")`) to apply the axis to multiple
panels at once.}
}
\description{
Add an axis label to the x axis
}
\examples{
arphitgg(data) + agg_xaxislabel("year")
}
\seealso{
\code{vignette("plotting-options", package = "arphit")} for a
detailed description of all the plotting options
}
|
35df7289d89fcda9c6df15e66937c41fd22014ce | 5162c82b2a5450b836d28e9c5d2bf18c53ebf815 | /src/plot_struct.R | 2b50f221f72faa42e394ef580008d3be37aca2b1 | [
"MIT"
] | permissive | ggirelli/chromflock-scripts | 188dea03b6fcb779f7a619de69fdc2d2765e2772 | 2daebfde8a151a5176ea70e464327d296cb6356f | refs/heads/master | 2022-04-08T17:08:12.962277 | 2020-03-19T14:45:00 | 2020-03-19T14:45:00 | 195,254,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,432 | r | plot_struct.R | #!/usr/bin/env Rscript
# ------------------------------------------------------------------------------
#
# Author: Gabriele Girelli
# Email: gigi.ga90@gmail.com
# Date: 20190704
#
# ------------------------------------------------------------------------------
# DEPENDENCIES =================================================================
suppressMessages(require(argparser))
suppressMessages(require(cowplot))
suppressMessages(require(data.table))
suppressMessages(require(ggplot2))
suppressMessages(require(pbapply))
suppressMessages(require(viridis))
theme_set(theme_cowplot())
setDTthreads(1)
pboptions(type = "timer")
# INPUT ========================================================================
script_name = 'plot_struct.R'
parser = arg_parser('Generate plots comparing structures and GPSeq.',
name = script_name)
parser = add_argument(parser, 'gRDS',
'Path to input GPSeq rds generated with add_gpseq2rd.R')
parser = add_argument(parser, 'fRDS',
'Path to input FISH rds generated with add_gpseq2rd.R')
parser = add_argument(parser, 'outDir', 'Path to output folder.')
parser = add_argument(parser, arg = '--threads', short = '-t', type = class(0),
help = 'Number of threads for parallelization.', default = 1, nargs = 1)
p = parse_args(parser)
attach(p['' != names(p)])
cat(sprintf("
# %s
GPSeq RDS : %s
iFISH RDS : %s
Output : %s
Threads : %d
", script_name, gRDS, fRDS, outDir, threads))
# FUNCTIONS ====================================================================
save_and_plot <- function(x, bname, width, height,
dpi=300, use.cowplot=FALSE, ncol=1, nrow=1,
base_height=4, base_width=NULL, base_aspect_ratio=1,
plot=FALSE, formats = c("png", "pdf")){
# Function to save the plot (and separately its data)
# to file and show the plot in the notebook
if( !use.cowplot ){
if ( "png" %in% formats) {
png(filename=file.path(paste0(bname, ".png")),
units="in", height=height, width=width, res=dpi)
print(x)
while(length(dev.list())>0) invisible(dev.off())
}
if ( "pdf" %in% formats) {
cairo_pdf(filename=file.path(paste0(bname, "_cairo_pdf.pdf")),
onefile = TRUE, height=height, width=width, family="Helvetica",
pointsize=8, antialias="none")
print(x)
while(length(dev.list())>0) invisible(dev.off())
}
if ( "eps" %in% formats) {
cairo_ps(filename=file.path(paste0(bname, "_cairo_ps.eps")),
onefile = TRUE, height=height, width=width, family="Helvetica",
pointsize=8, antialias="none")
print(x)
while(length(dev.list())>0) invisible(dev.off())
postscript(file=file.path(paste0(bname, "_postscript.eps")),
onefile = TRUE, paper="special", height=height, width=width,
family="Helvetica", pointsize=8, horizontal=FALSE)
print(x)
while(length(dev.list())>0) invisible(dev.off())
}
}else{
if ( "png" %in% formats) {
save_plot(x, filename=file.path(paste0(bname, ".png")),
ncol = ncol, nrow = nrow, base_height = base_height,
base_width = base_width, base_aspect_ratio = base_aspect_ratio,
dpi=dpi)
while(length(dev.list())>0) invisible(dev.off())
}
if ( "pdf" %in% formats) {
save_plot(x, filename=file.path(paste0(bname, "_cairo_pdf.pdf")),
ncol = ncol, nrow = nrow, base_height = base_height,
base_width = base_width, base_aspect_ratio = base_aspect_ratio,
device=cairo_pdf)
while(length(dev.list())>0) invisible(dev.off())
}
if ( "eps" %in% formats) {
save_plot(x, filename=file.path(paste0(bname, "_cairo_ps.eps")),
ncol = ncol, nrow = nrow, base_height = base_height,
base_width = base_width, base_aspect_ratio = base_aspect_ratio,
device=cairo_ps)
save_plot(x, filename=file.path(paste0(bname, "_postscript.eps")),
ncol = ncol, nrow = nrow, base_height = base_height,
base_width = base_width, base_aspect_ratio = base_aspect_ratio,
device="ps")
while(length(dev.list())>0) invisible(dev.off())
}
}
if( plot ) print(x)
}
chrom2chromID = function(chrom, nchrom = 24, hetero = c("X", "Y")) {
# Convert a chromosome name (e.g., "chr1", "chrX") to a numerical ID.
if ( grepl(":", chrom) ) {
return(floor(as.numeric(gsub(":", ".",
substr(chrom, 4, nchar(chrom))))))
} else {
chromID = substr(chrom, 4, nchar(chrom))
if ( chromID %in% hetero )
chromID = nchrom - which(rev(hetero) == chromID) + 1
chromID = as.numeric(chromID)
stopifnot(!is.na(chromID))
return(chromID)
}
}
plot_chrom_profile = function(cData, val.var = "rmedian") {
corData = rbindlist(by(cData,
cData$gpseqLab, function(gt) {
rbindlist(by(gt, gt$contactLab, function(ct) {
pcor = cor(ct$prob_g, 1-unlist(ct[, ..val.var]),
use = "pairwise.complete.obs", method = "pearson")
scor = cor(ct$prob_g, 1-unlist(ct[, ..val.var]),
use = "pairwise.complete.obs", method = "spearman")
data.table(pearson = pcor, spearman = scor,
contactLab = ct[1, contactLab], gpseqLab = ct[1, gpseqLab])
}))
}))
p = ggplot(cData, aes(x = (start+end)/2e6)
) + geom_line(aes(y = prob_g, color = "GPSeq"), size = 1
) + geom_line(aes(y = 1-rmedian, color = "structures"), size = 1
) + geom_text(data = corData, aes(label = sprintf(
"Pearson: %.3f\nSpearman: %.3f\nR2: %.3f", pearson, spearman, pearson**2
)), x = 0, y = 1.25, hjust = 0, vjust = 1, size = 3, lineheight = .9
) + facet_grid(~gpseqLab~contactLab
) + scale_color_brewer(palette = "Paired"
) + guides(color = guide_legend(title = "Distance from")
) + theme(legend.position = "top",
axis.text.x = element_text(angle = 45, hjust = 1)
) + xlab("Bin midpoint genomic coordinate (Mb)"
) + ylab("Distance from lamina (a.u.)"
) + ggtitle(cData[1, chrom]
) + ylim(0, 1.25) + xlim(0, cData[, max(end, na.rm = T)/1e6])
#save_and_plot(p, file.path(outDir,
# sprintf("3Dstruct.GPSeq.profile.%s", cData[1, chrom])),
# format = "png", width = 10, height = 6)
return(p)
}
# RUN ==========================================================================
# Prepare GPSeq data
cat("Reading GPSeq data...\n")
gsData = readRDS(gRDS)
gsData[, prob_g := log2(prob_g)]
gsData[prob_g < 0, prob_g := 0]
gsData[prob_g > 1, prob_g := 1]
gsData[, gpseqLab := "No GPSeq"]
gsData[(gpseq), gpseqLab := "GPSeq included"]
gsData[, gpseqLab := factor(gpseqLab,
levels = c("No GPSeq", "GPSeq included"))]
gsData[, contactLab := "Only intra contacts"]
gsData["all" == contacts, contactLab := "All contacts"]
gsData[, contactLab := factor(contactLab,
levels = c("Only intra contacts", "All contacts"))]
binSize = gsData[, unique(end-start)]
stopifnot(1 == length(binSize))
binStep = gsData[, .(step = unique(diff(start))),
by = c("contactLab", "gpseqLab", "label", "chrom")] [, unique(step)]
stopifnot(1 == length(binStep))
# Plot correlation scatter
cat("GPSeq vs Structure correlation plot...\n")
corData = rbindlist(by(gsData[!is.na(chromID)],
gsData[!is.na(chromID)]$gpseqLab, function(gt) {
rbindlist(by(gt, gt$contactLab, function(ct) {
pcor = cor(ct$prob_g, 1-ct$rmedian,
use = "pairwise.complete.obs", method = "pearson")
scor = cor(ct$prob_g, 1-ct$rmedian,
use = "pairwise.complete.obs", method = "spearman")
data.table(pearson = pcor, spearman = scor,
contactLab = ct[1, contactLab], gpseqLab = ct[1, gpseqLab])
}))
}))
p = ggplot(gsData[!is.na(chromID)], aes(x = prob_g, y = 1-rmedian)
) + geom_point(aes(color = reorder(chrom, chromID)), alpha = .5
) + geom_smooth(method = "lm", linetype = "dashed", color = "red", fill = NA
) + geom_text(data = corData, aes(label = sprintf(
"Pearson: %.3f\nSpearman: %.3f\nR2: %.3f",
pearson, spearman, pearson**2
)), x = 0, y = 1, hjust = 0, vjust = 1
) + facet_grid(~gpseqLab~contactLab
) + scale_color_viridis(discrete = T
) + guides(color = guide_legend("Chromosome", nrow = 3)
) + theme(legend.position = "top",
strip.background = element_rect(fill = NA),
strip.text = element_text(face = "bold")
) + xlab("log2(GPSeq score)") + ylab("Distance from lamina from structures"
) + xlim(0, 1) + ylim(0, 1) + coord_fixed()
save_and_plot(p, file.path(outDir, "3Dstruct.GPSeq.cor"),
format = "png", height = 10, width = 10)
# Plot chromosome profiles
cat("GPSeq vs Structure chromosome-profiles...\n")
gsData2 = rbindlist(by(gsData, gsData$chrom, function(ct) {
rbindlist(by(ct, paste0(ct$gpseq, "~", ct$label, "~", ct$contact),
function(dt) {
dt = dt[order(start), .(chrom, start, end, chromID,
prob_g, rmean, rmedian,
gpseqLab, contactLab, label)]
starts = with(dt, seq(min(start, na.rm = T), max(start, na.rm = T),
by = binStep))
missing = starts[!starts %in% dt$start]
if ( 0 == length(missing) ) {
return(dt)
}
dt = rbind(dt, data.table(
chrom = dt[1, chrom],
start = missing, end = missing + binSize,
chromID = dt[1, chromID],
prob_g = NA, rmean = NA, rmedian = NA,
gpseqLab = dt[1, gpseqLab],
contactLab = dt[1, contactLab],
label = dt[1, label]
))
dt[order(start)]
}
))
}))
gsData2$chromID = unlist(lapply(gsData2$chrom, chrom2chromID))
pList = pblapply(split(gsData2, gsData2$chromID), plot_chrom_profile, cl = threads)
pdf(file.path(outDir, "3Dstruct.GPSeq.profile.pdf"), width = 10, height = 10)
l = lapply(pList, print)
graphics.off()
# Prepare iFISH data
cat("Reading iFISH data...\n")
fsData = readRDS(fRDS)
fsData[, prob_g := log2(prob_g)]
fsData[prob_g < 0, prob_g := 0]
fsData[prob_g > 1, prob_g := 1]
fsData = melt(fsData,
id.vars = c("chrom", "start", "end", "chromID", "mid", "prob_g"))
fsData[, gpseqLab := "No GPSeq"]
fsData[grepl("G", variable), gpseqLab := "GPSeq included"]
fsData[, gpseqLab := factor(gpseqLab,
levels = c("No GPSeq", "GPSeq included"))]
fsData[, contactLab := "Only intra contacts"]
fsData[!grepl("intra", variable), contactLab := "All contacts"]
fsData[, contactLab := factor(contactLab,
levels = c("Only intra contacts", "All contacts"))]
# Plot correlation scatter
cat("iFISH vs Structure correlation plot...\n")
corData = rbindlist(by(fsData, fsData$gpseqLab, function(gt) {
rbindlist(by(gt, gt$contactLab, function(ct) {
pcor = cor(ct$prob_g, 1-ct$value,
use = "pairwise.complete.obs", method = "pearson")
scor = cor(ct$prob_g, 1-ct$value,
use = "pairwise.complete.obs", method = "spearman")
data.table(pearson = pcor, spearman = scor,
contactLab = ct[1, contactLab], gpseqLab = ct[1, gpseqLab])
}))
}))
p = ggplot(fsData, aes(x = prob_g, y = 1-value)
) + geom_point(aes(color = reorder(chrom, chromID)), alpha = .5
) + geom_smooth(method = "lm", linetype = "dashed", color = "red", fill = NA
) + geom_text(data = corData, aes(label = sprintf(
"Pearson: %.3f\nSpearman: %.3f\nR2: %.3f", pearson, spearman, pearson**2
)), x = 0, y = 1, hjust = 0, vjust = 1
) + facet_grid(~gpseqLab~contactLab
) + scale_color_viridis(discrete = T
) + guides(color = guide_legend("Chromosome", nrow = 3)
) + theme(legend.position = "top",
strip.background = element_rect(fill = NA),
strip.text = element_text(face = "bold")
) + xlab("Median normalized distance from lamina (iFISH)"
) + ylab("Distance from lamina from structures"
) + xlim(0, 1) + ylim(0, 1) + coord_fixed()
save_and_plot(p, file.path(outDir, "3Dstruct.iFISH.cor"),
format = "png", height = 10, width = 10)
# END --------------------------------------------------------------------------
################################################################################
|
68ff6093529d8d2dc906acb15b06009a780b6d19 | ff34d68c0fcc63a4cc447a5200c6fea11cc6f5db | /R/get_plant_tbl.R | 0ce56528723ab7a86e2a38df6a695e52a78b3cda | [] | no_license | soerenpost/poor_countries_simple_products | b921dd772b7db0b3ad7e71182dbdbcc9f6eae394 | 4b30525519c077319fcd83cfbd8634d5dafb9dff | refs/heads/master | 2022-11-17T14:39:14.222951 | 2020-07-15T11:46:07 | 2020-07-15T11:46:07 | 273,551,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,219 | r | get_plant_tbl.R | #' This functions creates a a table that contains plant-specific observations
#' (complexity and power shortages information not included). It cleans and
#' filters the observations based on a number of "flags".
#'
#' @param block_list List containing the merged ASI blocks.
#'
#' @return A data frame (plant_tbl) containing plant specific observations.
#' @export
# For testing:
# block_list <- readd("asi_blocks_clean")
# wpi_index <- readd("wpi_tbl")
get_plant_tbl <- function(block_list, wpi_index) {
#################################################################
## 1: CLEAN DATA ##
#################################################################
##---------------------------------------------------------------
## ID -
##---------------------------------------------------------------
block_a_tbl <- block_list$block_a_tbl
id_tbl <-
block_a_tbl %>%
select(-block)
# Create state information ------------------------------------------------
# States change a bit during the period. A little bit of messy code is needed
# to fix it. Correct state names is supplied with the panel data. The 6th and
# 7th character in factory_id lists the state code. I use this code to assign
# state names.
id_tbl <-
id_tbl %>%
mutate(
state_code = str_sub(
string = factory_id,
start = 6,
end = 7
) %>% as.numeric()
)
# Read state reference up to 2011-2012
state_names_98_to_11 <-
read_excel(
here("data/external/asi/asi_2010_2016/State Master 1998-99 to 2011-12.xls"),
skip = 2
) %>%
clean_names(case = "snake") %>%
mutate(codes = as.numeric(codes)) %>%
rename(old_state_name = state_name)
# Read state reference from 2012-13 to 2015-16.
state_names_12_to_15 <-
read_excel(
here("data/external/asi/asi_2010_2016/State Master 2012-13 onwards.xls"),
skip = 2
) %>%
clean_names(case = "snake") %>%
mutate(codes = as.numeric(codes)) %>%
rename(new_state_name = state_name)
# Join method: create index on whether or not to join. If in 99-12 range,
# "old_states" is equal to 1, 0 otherwise. If in 13-16 range, "new_states" is
# qual to 1, 0 if not. All entries in state_names_99_to_12 have "old_states"
# == 1, all entries in state_names_13_to_16 have "new_states" == 1. Join based
# on this. Clean up mess after.
state_names_98_to_11 <-
state_names_98_to_11 %>%
mutate(old_states = 1)
state_names_12_to_15 <-
state_names_12_to_15 %>%
mutate(new_states = 1)
id_tbl <-
id_tbl %>%
mutate(
old_states = ifelse(year %in% 1998:2011, 1, 0),
new_states = ifelse(year %in% 2012:2015, 1, 0)
)
id_tbl <-
id_tbl %>%
left_join(
state_names_98_to_11, by = c("state_code" = "codes", "old_states")
) %>%
left_join(
state_names_12_to_15, by = c("state_code" = "codes", "new_states")
) %>%
mutate(
state_name = case_when(
old_states == 1 ~ old_state_name,
new_states == 1 ~ new_state_name,
TRUE ~ NA_character_
)
) %>%
select(-c(old_states, new_states, old_state_name, new_state_name))
##----------------------------------------------------------------
## REVENUES -
##----------------------------------------------------------------
block_j_tbl <- block_list$block_j_tbl
# 10-15: Total items = 9995000 or sno = 12
revenue_tbl <-
block_j_tbl %>%
filter(item_code == "9995000") %>%
select(
year,
factory_id,
unadj_revenue = gross_sale_val
)
# Price-adjust revenues to financial year 2004-2005 prices using WPI.
revenue_tbl <-
revenue_tbl %>%
left_join(wpi_index, by = "year")
revenue_tbl <-
revenue_tbl %>%
mutate(
adj_revenue = unadj_revenue / (index_val / 100)
) %>%
select(year, factory_id, unadj_revenue, adj_revenue)
##---------------------------------------------------------------
## ELECTRICITY USE -
##---------------------------------------------------------------
block_h_tbl <- block_list$block_h_tbl
# Get electricity consumed -----------------------------------------
# Electricity generated (kwh):
# 10-11: sno = 15, code = 9990400
# 11-12: sno = 15, code = 9990400
# 12-13: sno = 15, code = 9990400
# 13-14: sno = 15, code = 9990400
# 14-15: sno = 15, code = 9990400
# 15-16: sno = 15, code = 9990400
e_self_gen_tbl <-
block_h_tbl %>%
filter(item_code == "9990400") %>%
select(
year,
factory_id,
electricity_self_gen_kwh = qty_consumed
)
# Electricity purchased and consumed (kwh):
# 10-11: sno = 16, code = 9990500
# 11-12: sno = 16, code = 9990500
# 12-13: sno = 16, code = 9990500
# 13-14: sno = 16, code = 9990500
# 14-15: sno = 16, code = 9990500
# 15-16: sno = 16, code = 9990500
e_purch_tbl <-
block_h_tbl %>%
filter(item_code == "9990500") %>%
select(
year,
factory_id,
electricity_purch_kwh = qty_consumed,
electricity_purch_val = purchase_val
)
# Join electricity tables
electricity_tbl <-
left_join(e_purch_tbl, e_self_gen_tbl) %>%
mutate( # If a plant does not make electricity it becomes NA. Make 0 instead.
electricity_self_gen_kwh = ifelse(
is.na(electricity_self_gen_kwh), 0, electricity_self_gen_kwh
),
electricity_consumed_kwh = electricity_purch_kwh + electricity_self_gen_kwh
)
# Get electricity intensity -----------------------------------------
# Electricity is measured in kWh, adj_revenue is measured in 2004 Indian Rs.
# To get electricity intensity of revenues I divide electriciy consumed by
# adjusted revenues to get kWh/Rs.
electricity_tbl <-
left_join(revenue_tbl, electricity_tbl, by = c("year", "factory_id")) %>%
mutate(
electricity_intensity = electricity_consumed_kwh / adj_revenue
) %>%
select(-c(unadj_revenue, adj_revenue))
##----------------------------------------------------------------
## INTERMEDIATE INPUT USE -
##----------------------------------------------------------------
# Indigenous inputs ------------------------------------------------
# 10-15: total basic items = 9990100 excludes electricity, non-basic
# chemicals packing items, electricity generated, fuel, coal, etc.
basic_inputs_tbl <-
block_h_tbl %>%
filter(item_code == "9990100") %>%
rename(unadj_basic_input_val = purchase_val) %>%
select(year, factory_id, unadj_basic_input_val)
# 10-15: total inputs = 9993000 (includes everything)
total_inputs_tbl <-
block_h_tbl %>%
filter(item_code == "9993000") %>%
rename(unadj_total_input_val = purchase_val) %>%
select(year, factory_id, unadj_total_input_val)
# Imported inputs -------------------------------------------------
block_i_tbl <- block_list$block_i_tbl
# 10-15: total imports consumed = 9995000
total_imports_tbl <-
block_i_tbl %>%
filter(item_code == "9994000") %>%
rename(unadj_total_import_val = purchase_val) %>%
select(year, factory_id, unadj_total_import_val)
# Combine total inputs, domestic and imports ----------------------
total_inputs_tbl <-
total_inputs_tbl %>%
left_join(total_imports_tbl, by = c("year", "factory_id")) %>%
mutate(
unadj_total_import_val = ifelse(
is.na(unadj_total_import_val), 0, unadj_total_import_val
),
unadj_total_input_val = unadj_total_input_val + unadj_total_import_val
) %>%
select(year, factory_id, unadj_total_input_val)
# Join
inputs_tbl <-
left_join(total_inputs_tbl, basic_inputs_tbl, by = c("year", "factory_id"))
# Get input share of revenue --------------------------------------
inputs_tbl <-
left_join(revenue_tbl, inputs_tbl, by = c("year", "factory_id")) %>%
mutate(
total_input_share = unadj_total_input_val / unadj_revenue,
basic_input_share = unadj_basic_input_val / unadj_revenue
) %>%
select(-c(unadj_revenue, adj_revenue))
##---------------------------------------------------------------
## EMPLOYEES AND WAGES -
##---------------------------------------------------------------
block_e_tbl <- block_list$block_e_tbl
# 2010-15: Total employees: sno = 9
labor_tbl <-
block_e_tbl %>%
filter(sno == 9) %>%
select(
year,
factory_id,
avg_total_employees = avg_person_worked,
unadj_total_wages = wages
)
##----------------------------------------------------------------
## WAGE-REVENUE SHARE -
##----------------------------------------------------------------
# To get the wage-revenue share, I just divide wages by the unadjusted
# revenue. Since all the amounts are in the same monetary units (current Rs)
# there is no effect of adjusting.
labor_tbl <-
left_join(labor_tbl, revenue_tbl, by = c("year", "factory_id")) %>%
mutate(
wage_share = unadj_total_wages / unadj_revenue
) %>%
select(-c(unadj_revenue, adj_revenue))
#################################################################
## 2: CREATE FLAGS ##
#################################################################
# Revenue share flags: ----------------------------------------------
# Wage share of revenue (not 2+)
wage_flag_tbl <-
labor_tbl %>%
mutate(
wage_revenue_flag = ifelse(wage_share >=2, 1, 0)
) %>%
select(year, factory_id, wage_revenue_flag)
# Input share of revenue (not 2+)
input_flag_tbl <-
inputs_tbl %>%
mutate(
total_input_revenue_flag = ifelse(total_input_share >= 2, 1, 0),
basic_input_revenue_flag = ifelse(basic_input_share >= 2, 1, 0)
) %>%
select(
year,
factory_id,
total_input_revenue_flag,
basic_input_revenue_flag
)
# Electricity use (not 0)
electricity_flag_tbl <-
left_join(id_tbl, electricity_tbl, by = c("year", "factory_id")) %>%
left_join(revenue_tbl, by = c("year", "factory_id")) %>%
mutate(
zero_electricity_flag = case_when(
electricity_purch_kwh > 0 ~ 0,
electricity_self_gen_kwh > 0 ~ 0,
TRUE ~ 1
),
electricity_val_share = ifelse(electricity_purch_val / unadj_revenue >= 1, 1, 0)
) %>%
select(
year,
factory_id,
zero_electricity_flag,
electricity_val_share
)
# Within plant flags ("change" flags): ------------------------------
# Change in revenue
revenue_change_flag_tbl <-
revenue_tbl %>%
mutate(ln_adj_revenue = log(adj_revenue)) %>%
group_by(factory_id) %>%
arrange(year) %>%
mutate(
# Positive if higher than prev obs
change_from_prev = ln_adj_revenue - lag(ln_adj_revenue),
# Positive if higher than next obs
change_from_next = ln_adj_revenue - lead(ln_adj_revenue)
) %>%
mutate(
adj_revenue_change_flag = case_when(
# if one obs is mistakenly much higher
change_from_prev >=3.5 & change_from_next >= 3.5 ~ 1,
# if one obs is mistakenly much lower
change_from_prev <=-3.5 & change_from_next <= -3.5 ~ 1,
# if first obs is much higher or lower than next obs
is.na(lag(ln_adj_revenue)) & (change_from_next >= 3.5 | change_from_next <=-3.5) ~ 1,
# if first obs is much higher or lower than next obs
is.na(lead(ln_adj_revenue)) & (change_from_prev >=3.5 | change_from_prev <=-3.5) ~ 1,
TRUE ~ 0
)
) %>%
select(
year,
factory_id,
adj_revenue_change_flag
)
# Change in employees
employees_change_flag_tbl <-
labor_tbl %>%
mutate(ln_total_employees = log(avg_total_employees)) %>%
group_by(factory_id) %>%
arrange(year) %>%
mutate(
change_from_prev = ln_total_employees - lag(ln_total_employees), # Positive if higher than prev obs
change_from_next = ln_total_employees - lead(ln_total_employees) # Positive if higher than next obs
) %>%
mutate(
employees_change_flag = case_when(
change_from_prev >=3.5 & change_from_next >= 3.5 ~ 1, # if one obs is mistakenly much higher
change_from_prev <=-3.5 & change_from_next <= -3.5 ~ 1, # if one obs is mistakenly much lower
is.na(lag(ln_total_employees)) & (change_from_next >= 3.5 | change_from_next <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
is.na(lead(ln_total_employees)) & (change_from_prev >=3.5 | change_from_prev <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
TRUE ~ 0
)
) %>%
select(
year,
factory_id,
employees_change_flag
)
# Change in electricity consumed (purchased and self-generated)
electricity_change_flag_tbl <-
electricity_tbl %>%
mutate(
ln_electricity_purch = log(electricity_purch_kwh),
ln_electricity_selfgen = log(electricity_self_gen_kwh)
) %>%
group_by(factory_id) %>%
arrange(year) %>%
mutate(
purch_change_from_prev = ln_electricity_purch - lag(ln_electricity_purch), # Positive if higher than prev obs
purch_change_from_next = ln_electricity_purch - lead(ln_electricity_purch), # Positive if higher than next obs
selfgen_change_from_prev = ln_electricity_selfgen - lag(ln_electricity_selfgen), # Positive if higher than prev obs
selfgen_change_from_next = ln_electricity_selfgen - lead(ln_electricity_selfgen) # Positive if higher than next obs
) %>%
mutate(
electricity_purch_change_flag = case_when(
purch_change_from_prev >=3.5 & purch_change_from_next >= 3.5 ~ 1, # if one obs is mistakenly much higher
purch_change_from_prev <=-3.5 & purch_change_from_next <= -3.5 ~ 1, # if one obs is mistakenly much lower
is.na(lag(ln_electricity_purch)) & (purch_change_from_next >= 3.5 | purch_change_from_next <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
is.na(lead(ln_electricity_purch)) & (purch_change_from_prev >=3.5 | purch_change_from_prev <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
TRUE ~ 0
),
electricity_selfgen_change_flag = case_when(
selfgen_change_from_prev >=3.5 & selfgen_change_from_next >= 3.5 ~ 1, # if one obs is mistakenly much higher
selfgen_change_from_prev <=-3.5 & selfgen_change_from_next <= -3.5 ~ 1, # if one obs is mistakenly much lower
is.na(lag(ln_electricity_selfgen)) & (selfgen_change_from_next >= 3.5 | selfgen_change_from_next <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
is.na(lead(ln_electricity_selfgen)) & (selfgen_change_from_prev >=3.5 | selfgen_change_from_prev <=-3.5) ~ 1, # if first obs is much higher or lower than next obs
TRUE ~ 0
)
) %>%
select(
year,
factory_id,
electricity_purch_change_flag,
# electricity_selfgen_change_flag <- not really needed. it should change a lot given the shortages change
)
# TODO: Change in amount of inputs used
##---------------------------------------------------------------
## GATHER FLAG TABLES -
##---------------------------------------------------------------
flag_tbl <-
wage_flag_tbl %>%
left_join(input_flag_tbl) %>%
left_join(electricity_flag_tbl) %>%
mutate(
rev_share_flags = wage_revenue_flag + basic_input_revenue_flag + electricity_val_share
)
change_flag_tbl <-
revenue_change_flag_tbl %>%
# TODO: left_join(input_change_flag_tbl)
left_join(employees_change_flag_tbl) %>%
left_join(electricity_change_flag_tbl) %>%
mutate(
change_flags = adj_revenue_change_flag + employees_change_flag + electricity_purch_change_flag
)
plant_tbl <-
id_tbl %>%
left_join(revenue_tbl) %>%
left_join(electricity_tbl) %>%
left_join(labor_tbl) %>%
left_join(inputs_tbl) %>%
left_join(flag_tbl) %>%
left_join(change_flag_tbl)
##################################################################
## 3: FILTER DATA ##
##################################################################
plant_tbl <-
plant_tbl %>%
filter(unit_status == 1) %>% # Remove non-open factories
filter((rev_share_flags + change_flags) < 2) # Remove observations that have two or more flags (rev_share and change flags combined)
# TODO: CONSIDERATIONS: should observations w/o electricity use be included?
# TODO: CONSIDERATIONS: Make electricity consumption "missing" for all observations that have electricity consumed = 0.
joined_flag_tbl <- full_join(flag_tbl, change_flag_tbl)
return(list("plant_tbl" = plant_tbl, "flag_tbl" = joined_flag_tbl))
}
|
fcf91b6c4708f01d56e25c6b33b3095f8b38fa5d | 4183b4ab0554a4bd57b8f097d484f43cccb40142 | /Plot2.R | 0ff1a6c6a1d585cab3ce3a53404f952bab79fb75 | [] | no_license | calliope7/ParticulateMatterInvestigation | 02afb2a6a7f6b142ec58ba9574669b44be0dfb5b | d01f2a031cf0b4ddcbda0ee1d5e9eba1dc66ea7e | refs/heads/master | 2016-09-06T15:39:53.989431 | 2015-08-23T09:01:29 | 2015-08-23T09:01:29 | 41,016,895 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 793 | r | Plot2.R | # National Emissions Inventory (NEI)
#
library(dplyr)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question 2: Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
# from 1999 to 2008?
plot2 <- function() {
polBaltimoreByYear <- filter(NEI, fips == "24510") %>% group_by(year)
totalBaltimoreEmissionsByYear <- summarise(polBaltimoreByYear, sum(Emissions, na.rm=TRUE))
names(totalBaltimoreEmissionsByYear)[2]<-"totalEmissions"
png(filename = "Plot2.png", width = 480, height = 480)
with(totalBaltimoreEmissionsByYear, plot(year, totalEmissions, ylab="Total Emissions (tons)", main="Total Baltimore Emissions By Year"))
dev.off()
} |
78e705d2dd841bf84977ad61af33c621589df3d2 | d2c75e5ff89f2ce4426d9d721810a500abd7b9d1 | /analysis/cross_validation/within_CV.R | 1ef30e4ea937a3362d2989e2ca57dddf129c97fe | [] | no_license | ch728/squash-gs | 03dbcccf80a5bb72f8da0124c1da0a68533c8056 | 0b32b5c0ffb002f2b01cecb5e5422226e56d0e19 | refs/heads/master | 2022-11-21T17:48:24.379497 | 2020-07-16T17:31:49 | 2020-07-16T17:31:49 | 277,221,114 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,053 | r | within_CV.R | library(DBI)
library(tidyverse)
library(asreml)
library(doMC)
source("CV_functions.R")
# Set up parallel backend
registerDoMC(12)
# Read in phenotypic data from project database
con <- dbConnect(RSQLite::SQLite(),"../../pheno/pheno.db") # Connect to pheno database
res <- dbSendQuery(con, "SELECT * FROM trainingData")
pheno <- dbFetch(res)
dbClearResult(res)
res <- dbSendQuery(con, "SELECT Gid, TotalFrtCt, TotalWtKg FROM trainingYield")
YLD <- dbFetch(res)
dbClearResult(res)
res <- dbSendQuery(con, "SELECT Gid, Pop, avg(Brix) AS Brix, avg(DM) AS DM,
avg(a) AS a, avg(b) AS b, avg(L) as L,
avg(Weight) AS Weight, avg(Length) AS Length,
avg(Width) AS Width, avg(Length/Width) AS Shape FROM trainingData GROUP BY Gid")
pheno.avg <- dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
pheno.avg <- plyr::join(pheno.avg, YLD, by="Gid", type="inner")
pheno.avg <- mutate(pheno.avg, Y3=TotalWtKg * (DM/100))
traits <- colnames(pheno.avg)[3:14]
# Start running differnt cross validation test sets
# C0 within
seeds <- sample.int(10000000,50)
C0.geno <- as.matrix(read.table("../../geno/filtered_C0_kinship.txt", skip=3, row.names=1)) # Read in kinship matrix
diag(C0.geno) <- diag(C0.geno) + 0.000001
C0.ginv <- solve(C0.geno) # Get inverse of kinship matrix
rownames(C0.ginv) <- rownames(C0.geno) # Add rownames to inverse matrix
C0.pheno <- pheno.avg %>% filter(Gid %in% rownames(C0.ginv))
C0.res <- foreach(i=1:length(traits), .combine=rbind) %dopar%
asremlWithinUni(pheno=C0.pheno, trait=traits[i], set="C0_uni", ginv=C0.ginv,
weights=NULL, train.prop=0.8, nrep=50, seeds=seeds)
# C2 within
seeds <- sample.int(10000000,50)
C2.geno <- as.matrix(read.table("../../geno/filtered_C2_kinship.txt", skip=3, row.names=1)) # Read in kinship matrix
diag(C2.geno) <- diag(C2.geno) + 0.000001
C2.ginv <- solve(C2.geno) # Get inverse of kinship matrix
rownames(C2.ginv) <- rownames(C2.geno) # Add rownames to inverse matrix
C2.pheno <- pheno.avg %>% filter(Gid %in% rownames(C2.ginv))
C2.res <- foreach(i=1:length(traits), .combine=rbind) %dopar%
asremlWithinUni(pheno=C2.pheno, trait=traits[i], set="C2_uni", ginv=C2.ginv,
weights=NULL, train.prop=0.8, nrep=50, seeds=seeds)
# #T1 within
seeds <- sample.int(10000000,50)
T1.geno <- as.matrix(read.table("../../geno/filtered_T1_kinship.txt", skip=3, row.names=1)) # Read in kinship matrix
diag(T1.geno) <- diag(T1.geno) + 0.00001
T1.ginv <- solve(T1.geno) # Get inverse of kinship matrix
rownames(T1.ginv) <- rownames(T1.geno) # Add rownames to inverse matrix
T1.pheno <- pheno.avg %>% filter(Gid %in% rownames(T1.ginv))
T1.res <- foreach(i=1:length(traits), .combine=rbind) %dopar%
asremlWithinUni(pheno=T1.pheno, trait=traits[i], set="T1_uni", ginv=T1.ginv,
weights=NULL, train.prop=0.8, nrep=50, seeds=seeds)
within.res <- rbind(C0.res, C2.res, T1.res)#
write.csv(within.res, "within_predictions.csv", quote=F, row.names=F)
|
b47777663739b285a5751dfdf2a60925f3896c23 | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.storagegateway/man/create_snapshot.Rd | f6b087f9e07274ed3ca7cad4ffe880e3116217da | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,696 | rd | create_snapshot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.storagegateway_operations.R
\name{create_snapshot}
\alias{create_snapshot}
\title{Initiates a snapshot of a volume}
\usage{
create_snapshot(VolumeARN, SnapshotDescription)
}
\arguments{
\item{VolumeARN}{[required] The Amazon Resource Name (ARN) of the volume. Use the ListVolumes operation to return a list of gateway volumes.}
\item{SnapshotDescription}{[required] Textual description of the snapshot that appears in the Amazon EC2 console, Elastic Block Store snapshots panel in the \strong{Description} field, and in the AWS Storage Gateway snapshot \strong{Details} pane, \strong{Description} field}
}
\description{
Initiates a snapshot of a volume.
}
\details{
AWS Storage Gateway provides the ability to back up point-in-time snapshots of your data to Amazon Simple Storage (S3) for durable off-site recovery, as well as import the data to an Amazon Elastic Block Store (EBS) volume in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway volume on a scheduled or ad-hoc basis. This API enables you to take ad-hoc snapshot. For more information, see \href{http://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot}{Editing a Snapshot Schedule}.
In the CreateSnapshot request you identify the volume by providing its Amazon Resource Name (ARN). You must also provide description for the snapshot. When AWS Storage Gateway takes the snapshot of specified volume, the snapshot and description appears in the AWS Storage Gateway Console. In response, AWS Storage Gateway returns you a snapshot ID. You can use this snapshot ID to check the snapshot progress or later use it when you want to create a volume from a snapshot. This operation is only supported in stored and cached volume gateway type.
To list or delete a snapshot, you must use the Amazon EC2 API. For more information, see DescribeSnapshots or DeleteSnapshot in the \href{http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Operations.html}{EC2 API reference}.
Volume and snapshot IDs are changing to a longer length ID format. For more information, see the important note on the \href{http://docs.aws.amazon.com/storagegateway/latest/APIReference/Welcome.html}{Welcome} page.
}
\section{Accepted Parameters}{
\preformatted{create_snapshot(
VolumeARN = "string",
SnapshotDescription = "string"
)
}
}
\examples{
# Initiates an ad-hoc snapshot of a gateway volume.
\donttest{create_snapshot(
SnapshotDescription = "My root volume snapshot as of 10/03/2017",
VolumeARN = "arn:aws:storagegateway:us-east-1:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABB"
)}
}
|
7d3df845a5a6af7577f6c83cb15f7786ca195911 | d12c721275c0c4627b91fb4de111c45b281cf944 | /AutoReport/ui.R | 0f8e6492d321d472e18450bafc486887820d4eef | [] | no_license | BeyondMultipleChoice/AACRAutoReport | 6a42e5b16a24e3ba15eb87a7d8c3d85e7ad7864d | 54d463db0f8d00edc59725fa0fef56b84a235b77 | refs/heads/master | 2021-09-26T04:29:41.244850 | 2021-09-20T19:08:31 | 2021-09-20T19:08:31 | 49,663,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 295 | r | ui.R | #UI file for shiny AACR report generation app
library(shiny)
library(DT)
shinyUI(fluidPage(
#MOdif - horizontal line in black 8-18-2020, see primaryPanelSet.R
tags$head(
tags$style(HTML("hr {border-top: 1px solid #000000;}"))
),
uiOutput("topLevelPanelSet")
)) |
97af48c0d7bed359eb0ee3a6964f5beb9f204daa | 9facb350e63a01b4274f0432b5592ba42f55572c | /Week5.R | 3820f1b286d40ed7c78619b1e88b61d12452b60e | [] | no_license | saurabhstha/Week5 | b43a58e59a4fa9aa81cc20cc4feb824acd39997d | 23b129d78b7a8dcdd468e834a4ddab13b2cc500c | refs/heads/main | 2023-04-10T21:27:25.478237 | 2021-04-18T19:51:16 | 2021-04-18T19:51:16 | 359,238,408 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,866 | r | Week5.R | # Assignment: Housing Dataset
# Name: Shrestha, Saurabh
# Date: 2021-04-14
install.packages("dplyr")
library(readxl)
library(dplyr)
# Set working directory
setwd("C:/Users/Saurabh/Desktop/DSC 520/Week5")
# Load the housing dataset
housing_data = read_excel("week-6-housing.xlsx")
head(housing_data)
# GroupBy, Summarize, Mutate, Filter, Select, and Arrange
colnames(housing_data)[2] <- "Sale_Price"
housing_data %>% group_by(Sale_Price)
housing_data %>% group_by(Sale_Price) %>% summarize(mean(square_feet_total_living))
# Mutate
housing_data %>% mutate(price_per_square_ft = Sale_Price/square_feet_total_living)
housing_data %>% summarize(mean(Sale_Price))
# Filter
housing_data %>% filter(Sale_Price < 300000)
# Select
housing_data %>% select(`Sale Date`,Sale_Price,square_feet_total_living,bedrooms)
# Arrange
housing_data %>% arrange(Sale_Price, square_feet_total_living, bedrooms)
library(purrr)
# purr functions
zip_data <- compact(housing_data$zip5)
low_price <- housing_data$Sale_Price %>% keep(function(x) x < 250000)
head(zip_data)
head(low_price)
# cbind
baths <- cbind(housing_data$bath_full_count, housing_data$bath_half_count,
housing_data$bath_3qtr_count)
head(baths)
#rbind
expensive_house <- housing_data %>% filter(Sale_Price >= 250000)
less_expensive_house <- housing_data %>% filter(Sale_Price < 250000)
# joined these two dfs to one
house_range<- rbind(expensive_house,less_expensive_house)
head(house_range)
# string concatenation(concatenating addr_full, postalctyn and zip5)
library(stringr)
address_categories <- str_split(string = housing_data$addr_full,pattern = ' ')
housing_data$full_address <- paste(housing_data$addr_full,housing_data$postalctyn,housing_data$zip5,sep = ',')
head(housing_data %>% select(`Sale Date`, Sale_Price,full_address))
|
16a2d87612e18130d49908a0a25551a7b8faa6ba | f6c335fe47cd81bf7bd3a576aed7ee871b98eac7 | /Exercise_6.1_3DHome_Range/Raster3dScript.R | b679d99ae28486251a5156e4164bcfb532dcc527 | [] | no_license | walterASEL/Walter-Datasets | b968d58cb5a97e74d98e1a5d851ce61e1d3673e9 | 176b6b0f348e90b2204f913260c0695ef5b08fe8 | refs/heads/master | 2022-10-18T19:46:30.750059 | 2022-09-28T16:40:08 | 2022-09-28T16:40:08 | 84,325,243 | 21 | 51 | null | null | null | null | WINDOWS-1252 | R | false | false | 8,306 | r | Raster3dScript.R | library(rasterVis)
library(raster)
library(rgl)
library(sp)
library(rgdal)
library(maptools)#writeAsciiGrid
library(ks)#hpikde.ud
library(adehabitatHR)
library(adehabitatMA)
library(BBMM)
library(chron)
#Reads and prepares the data
muleys<-read.csv("muleysexample.csv")
str(muleys)
#Remove outlier locations
newmuleys <-subset(muleys, muleys$Long > -110.90 & muleys$Lat > 37.80)
muleys <- newmuleys
newmuleys <-subset(muleys, muleys$Long < -107)
muleys <- newmuleys
muleys$GPSFixTime<-as.POSIXct(muleys$GPSFixTime, format="%Y.%m.%d%H:%M:%S")
muleys$NewDate<-as.POSIXct(muleys$GPSFixTime, format="%Y.%m.%d %H:%M:%S")
#Sort Data
muleys <- muleys[order(muleys$id, muleys$NewDate),]
#TIME DIFF NECESSARY IN BBMM CODE
timediff <- diff(muleys$NewDate)*60
# remove first entry without any difference
muleys <- muleys[-1,]
muleys$timelag <-as.numeric(abs(timediff))
#Remove locations greater than 24 hours apart in time
muleys <- subset(muleys, muleys$timelag < 18000)
#Code separate each animal into a shapefile or text file to use as "List" in Cumming and Cornelis
# get input file
indata <- muleys
innames <- unique(muleys$id)
innames <- innames[1:2]
outnames <- innames
# begin loop to calculate home ranges
for (i in 1:length(innames)){
data <- indata[which(indata$id==innames[i]),]
if(dim(data)[1] != 0){
# export the point data into a shp file
data.xy = data[c("X", "Y")]
coordinates(data.xy) <- ~X+Y
sppt <- SpatialPointsDataFrame(coordinates(data.xy),data)
proj4string(sppt) <- CRS("+proj=utm +zone=12 +datum=WGS84")
#writePointsShape(sppt,fn=paste(outnames[i],sep="/"),factor2char=TRUE)
#sppt <-data[c(-9,-10)]
write.table(sppt, paste(outnames[i],"txt",sep="."), sep="\t", quote=FALSE, row.names=FALSE)
write.table(paste(outnames[i],"txt",sep="."), "In_list.txt",sep="\t", quote=FALSE, row.names=FALSE, col.names=FALSE, append=TRUE)
}
}
#Reads and prepares the data
List<-read.table("In_list.txt",sep="\t",header=F)
head(List) #“List” contains the filenames (e.g. “D4.txt”)of the deer data sets
########################################################################
#Begin loop for home range PKDE
for(i in 1:nrow(List)) {
coords<-read.table(as.character(List[i,]),sep="\t",header=T)
loc<-coords[,c("X", "Y")]
#Reference grid : input parameters
RESO <- 100 # grid resolution (m)
BUFF <- 5000 # grid extent (m) (buffer around location extremes)
XMIN <- RESO*(round(((min(coords$X)-BUFF)/RESO),0))
YMIN <- RESO*(round(((min(coords$Y)-BUFF)/RESO),0))
XMAX <- XMIN+RESO*(round(((max(coords$X)+BUFF-XMIN)/RESO),0))
YMAX <- YMIN+RESO*(round(((max(coords$Y)+BUFF-YMIN)/RESO),0))
NRW <- ((YMAX-YMIN)/RESO)
NCL <- ((XMAX-XMIN)/RESO)
#Generation of refgrid
refgrid<-raster(nrows=NRW, ncols=NCL, xmn=XMIN, xmx=XMAX, ymn=YMIN, ymx=YMAX)
refgrid<-as(refgrid,"SpatialPixels")
#PKDE computation
##convert the SpatialGrid class to a raster
sampRaster <- raster(refgrid)
##set all the raster values to 1 such as to make a data mask
sampRaster[] <- 1
##Get the center points of the mask raster with values set to 1
evalPoints <- xyFromCell(sampRaster, 1:ncell(sampRaster))
##Here we can see how grid has a buffer around the locations and trajectory, if needed. This will ensure that we
#project our home range estimates into a slightly larger extent than the original points extent (bbox) alone.
#plot(sampRaster)
#lines(loc, cex=0.5, lwd=0.1, col="grey")
#points(loc, pch=1, cex=0.5)
##Calculate Hpi from the xy coordinates we used above, Hpi performs bivariate smoothing whereas hpi
#performs univariate. Bivariate is preferred.
Hpi1 <- Hpi(x = loc)
##write out the bandwidth matrix to a file as you might want to refer to it later
#write.table(Hpi1, paste("hpivalue_", range, ".txt", sep=""), row.names=FALSE,sep="\t")
##Create the KDE using the evaluation points
hpikde <- kde(x=loc, H=Hpi1,eval.points=evalPoints)
##Create a template raster based upon the mask and then assign the values from the kde to the template
hpikde.raster <- raster(refgrid)
hpikde.raster <- setValues(hpikde.raster,hpikde$estimate)
##We can take this raster and put it back into an adehabitatHR object
##Cast over to SPxDF
hpikde.px <- as(hpikde.raster,"SpatialPixelsDataFrame")
##create new estUD using the SPxDF
hpikde.ud <- new("estUD", hpikde.px)
##Assign values to a couple slots of the estUD
hpikde.ud@vol = FALSE
hpikde.ud@h$meth = "Plug-in Bandwidth"
##Convert the UD values to volume using getvolumeUD from adehabitatHR and cast over to a raster
udvol <- getvolumeUD(hpikde.ud, standardize=TRUE)
if (require(rgl)) {
r <- raster(udvol)
plot3D(r,zfac=-1, xlim = XMIN, ylim = YMIN,xlab = "x", ylab = "y", zlab = "z", rev=TRUE)
title3d('UD with KDE plug-in')
decorate3d()
}
}
########################################################################
#Now run code for creating KDE using href smoothing for comparison
for(i in 1:nrow(List)) {
coords<-read.table(as.character(List[i,]),sep="\t",header=T)
head(coords)
coords$GPSFixTime<-as.POSIXct(coords$GPSFixTime, format="%Y-%m-%d %H:%M:%S")
loc<-coords[,c("X", "Y")]
coordinates(loc) = c("X", "Y")
#Coordinate system info may not be needed
proj4string(loc) = CRS("+proj=utm +zone=12 +datum=WGS84")
#Generation of a reference grid around the location data
#Reference grid : input parameters
RESO <- 100 # grid resolution (m)
BUFF <- 5000 # grid extent (m) (buffer around location extremes)
XMIN <- RESO*(round(((min(coords$X)-BUFF)/RESO),0))
YMIN <- RESO*(round(((min(coords$Y)-BUFF)/RESO),0))
XMAX <- XMIN+RESO*(round(((max(coords$X)+BUFF-XMIN)/RESO),0))
YMAX <- YMIN+RESO*(round(((max(coords$Y)+BUFF-YMIN)/RESO),0))
NRW <- ((YMAX-YMIN)/RESO)
NCL <- ((XMAX-XMIN)/RESO)
#Generation of refgrid
refgrid<-raster(nrows=NRW, ncols=NCL, xmn=XMIN, xmx=XMAX, ymn=YMIN, ymx=YMAX)
refgrid<-as(refgrid,"SpatialPixels")
#LKDE computation
ud <- kernelUD(loc, grid=refgrid, h="href")
# Volume contours computation
udvol1<-getvolumeUD(ud, standardize = FALSE)
if (require(rgl)) {
r1 <- raster(udvol1)
plot3D(r1,zfac=-2, xlim = XMIN, ylim = YMIN,xlab = "x", ylab = "y", zlab = "z", rev=TRUE)
title3d('UD with KDE href')
decorate3d()
}
}
########################################################################
#Now run code for creating BBMM computation start of loop
for(i in 1:nrow(List)) {
coords<-read.table(as.character(List[i,]),sep="\t",header=T)
head(coords)
loc<-coords[,c("X", "Y")]
coordinates(loc) = c("X", "Y")
#Coordinate system info may not be needed
proj4string(loc) = CRS("+proj=utm +zone=12 +datum=WGS84")
#Generation of a reference grid around the location data
#Reference grid : input parameters
RESO <- 100 # grid resolution (m)
BUFF <- 5000 # grid extent (m) (buffer around location extremes)
XMIN <- RESO*(round(((min(coords$X)-BUFF)/RESO),0))
YMIN <- RESO*(round(((min(coords$Y)-BUFF)/RESO),0))
XMAX <- XMIN+RESO*(round(((max(coords$X)+BUFF-XMIN)/RESO),0))
YMAX <- YMIN+RESO*(round(((max(coords$Y)+BUFF-YMIN)/RESO),0))
NRW <- ((YMAX-YMIN)/RESO)
NCL <- ((XMAX-XMIN)/RESO)
#Generation of refgrid
refgrid<-raster(nrows=NRW, ncols=NCL, xmn=XMIN, xmx=XMAX, ymn=YMIN, ymx=YMAX)
##Get the center points of the mask raster with values set to 1
refgrid <- xyFromCell(refgrid, 1:ncell(refgrid))
#BBMM computation
BBMM <- brownian.bridge(x=coords$X, y=coords$Y, time.lag=coords$timelag[-1], area.grid=refgrid, location.error=22, max.lag=18000)
# Volume contours computation
# Create a data frame from x,y,z values
BBMM.df <- data.frame("x"=BBMM$x,"y"=BBMM$y,"z"=BBMM$probability)
##Make a raster from the x, y, z values, assign projection from above, match the resolution to that of the
#raster mask, note 100 is the cell resolution defined in evalPoints above
bbmm.raster <- rasterFromXYZ(BBMM.df, res=c(100,100), crs=proj4string(loc))
##Cast the data over to an adehabitatHR estUD
bbmm.px <- as(bbmm.raster, "SpatialPixelsDataFrame")
bbmm.ud <- new("estUD",bbmm.px)
bbmm.ud@vol = FALSE
bbmm.ud@h$meth = "BBMM"
##Convert the raw UD values to volume
udvol2 <- getvolumeUD(bbmm.ud, standardize=TRUE)
proj4string(udvol2) = CRS("+proj=utm +zone=12 +datum=WGS84")
if (require(rgl)) {
r2 <- raster(udvol2)
plot3D(r2,zfac=-2, xlim = XMIN, ylim = YMIN,xlab = "x", ylab = "y", zlab = "z", rev=TRUE)
title3d('UD with Brownian Bridge Movement Model ')
decorate3d()
}
}
|
29dae70b2df1c16d6a8b76197b1516b70b8e650d | 789dd3039ae8c7a1b29582e563c66f2f3b573e9b | /GRAPH/MetabNet/R/data_preprocess.R | 7374f4d5d12a1e271af830e07419c946078d9580 | [] | no_license | Aurametrix/R | 44ecb2969e0eb39120176692761304adae7a3539 | affb2b2e06b94ff8a1c8d552aa3b996b0158911f | refs/heads/master | 2023-01-31T22:28:15.893079 | 2023-01-27T01:17:57 | 2023-01-27T01:17:57 | 16,440,534 | 4 | 3 | null | null | null | null | UTF-8 | R | false | false | 20,079 | r | data_preprocess.R | data_preprocess <-
function(Xmat=NA,Ymat=NA,feature_table_file,parentoutput_dir,class_labels_file,num_replicates=3,feat.filt.thresh=NA,summarize.replicates=TRUE,summary.method="mean",
all.missing.thresh=0.5,group.missing.thresh=0.7,
log2transform=TRUE,medcenter=TRUE,znormtransform=FALSE,quantile_norm=TRUE,lowess_norm=FALSE,
madscaling=FALSE,missing.val=0,samplermindex=NA, rep.max.missing.thresh=0.5,summary.na.replacement="zeros",featselmethod=NA){
options(warn=-1)
#read file; First row is column headers
if(is.na(Xmat==TRUE)){
data_matrix<-read.table(feature_table_file,sep="\t",header=TRUE)
}else{
data_matrix<-Xmat
#rm(Xmat)
}
#print("signal filter threshold ")
#print(group.missing.thresh)
#print(missing.val)
if(is.na(all.missing.thresh)==TRUE){
all.missing.thresh=0
}
if(is.na(samplermindex)==FALSE){
data_matrix<-data_matrix[,-c(samplermindex)]
}
#use only unique records
data_matrix<-unique(data_matrix)
if(is.na(missing.val)==FALSE){
#print("Replacing missing values with NAs.")
data_matrix<-replace(as.matrix(data_matrix),which(data_matrix==missing.val),NA)
}
#print("dim of original data matrix")
#print(dim(data_matrix))
data_matrix_orig<-data_matrix
snames<-colnames(data_matrix)
dir.create(parentoutput_dir,showWarnings=FALSE)
parentoutput_dir<-paste(parentoutput_dir,"/Stage1/",sep="")
dir.create(parentoutput_dir,showWarnings=FALSE)
fheader="transformed_log2fc_threshold_"
setwd(parentoutput_dir)
data_m<-as.matrix(data_matrix[,-c(1:2)])
#Step 2) Average replicates
if(summarize.replicates==TRUE)
{
if(num_replicates>1)
{
data_m<-getSumreplicates(data_matrix,alignment.tool="apLCMS",numreplicates=num_replicates,numcluster=10,rep.max.missing.thresh=rep.max.missing.thresh,summary.method=summary.method,summary.na.replacement, missing.val=missing.val)
if(summary.method=="mean"){
print("Replicate averaging done")
filename<-paste("Rawdata_averaged.txt",sep="")
}else{
if(summary.method=="median"){
print("Replicate median summarization done")
filename<-paste("Rawdata_median_summarized.txt",sep="")
}
}
data_m_prenorm<-cbind(data_matrix[,c(1:2)],data_m)
write.table(data_m_prenorm, file=filename,sep="\t",row.names=FALSE)
data_matrix<-cbind(data_matrix[,c(1:2)],data_m)
#num_samps_group[[1]]=(1/num_replicates)*num_samps_group[[1]]
#num_samps_group[[2]]=(1/num_replicates)*num_samps_group[[2]]
}
}
data_matrix<-cbind(data_matrix[,c(1:2)],data_m)
data_matrix_orig<-data_matrix
data_subjects<-data_m
ordered_labels={}
num_samps_group<-new("list")
if(is.na(class_labels_file)==FALSE)
{
print("Class labels file:")
print(class_labels_file)
data_matrix={}
if(is.na(Ymat)==TRUE){
classlabels<-read.table(class_labels_file,sep="\t",header=TRUE)
}else{
classlabels<-Ymat
}
classlabels<-as.data.frame(classlabels)
colnames(classlabels)<-c("SampleID","Class")
f1<-table(classlabels$SampleID)
classlabels<-as.data.frame(classlabels)
classlabels<-classlabels[seq(1,dim(classlabels)[1],num_replicates),]
#print(classlabels)
class_labels_levels<-levels(as.factor(classlabels[,2]))
bad_rows<-which(class_labels_levels=="")
if(length(bad_rows)>0){
class_labels_levels<-class_labels_levels[-bad_rows]
}
for(c in 1:length(class_labels_levels))
{
if(c>1){
data_matrix<-cbind(data_matrix,data_subjects[,which(classlabels[,2]==class_labels_levels[c])])
}else{
data_matrix<-data_subjects[,which(classlabels[,2]==class_labels_levels[c])]
}
classlabels_index<-which(classlabels[,2]==class_labels_levels[c])
ordered_labels<-c(ordered_labels,as.character(classlabels[classlabels_index,2]))
num_samps_group[[c]]<-length(classlabels_index)
}
#colnames(data_matrix)<-as.character(ordered_labels)
data_matrix<-cbind(data_matrix_orig[,c(1:2)],data_matrix)
data_m<-as.matrix(data_matrix[,-c(1:2)])
}else
{
if(is.na(Ymat)==TRUE)
{
classlabels<-rep("A",dim(data_m)[2])
ordered_labels<-classlabels
num_samps_group[[1]]<-dim(data_m)[2]
class_labels_levels<-c("A")
data_m<-as.matrix(data_matrix[,-c(1:2)])
}else{
classlabels<-Ymat
classlabels<-as.data.frame(classlabels)
colnames(classlabels)<-c("SampleID","Class")
f1<-table(classlabels$SampleID)
classlabels<-as.data.frame(classlabels)
classlabels<-classlabels[seq(1,dim(classlabels)[1],num_replicates),]
#print(classlabels)
class_labels_levels<-levels(as.factor(classlabels[,2]))
bad_rows<-which(class_labels_levels=="")
if(length(bad_rows)>0){
class_labels_levels<-class_labels_levels[-bad_rows]
}
for(c in 1:length(class_labels_levels))
{
#if(c>1){
#data_matrix<-cbind(data_matrix,data_subjects[,which(classlabels[,2]==class_labels_levels[c])])
#}else{
# data_matrix<-data_subjects[,which(classlabels[,2]==class_labels_levels[c])]
#}
classlabels_index<-which(classlabels[,2]==class_labels_levels[c])
ordered_labels<-c(ordered_labels,as.character(classlabels[classlabels_index,2]))
num_samps_group[[c]]<-length(classlabels_index)
}
#colnames(data_matrix)<-as.character(ordered_labels)
#data_matrix<-cbind(data_matrix_orig[,c(1:2)],data_matrix)
#data_m<-as.matrix(data_matrix[,-c(1:2)])
}
}
#Step 3a) Remove features if signal is not detected in at least x% of all samples
##################################################################################
metab_zeros={}
data_clean<-{}
clean_metabs<-{}
#num_samps_group[[3]]<-0
if(is.na(all.missing.thresh)==FALSE)
{
total_sigs<-apply(data_m,1,function(x){
if(is.na(missing.val)==FALSE){return(length(which(x>missing.val)))
}else{
return(length(which(is.na(x)==FALSE)))
}})
total_sig_thresh<-dim(data_m)[2]*all.missing.thresh
total_good_metabs<-which(total_sigs>total_sig_thresh)
}
#Step 3b) Find features for which the signal is not detected in at least x% of samples in either of the groups
data_m<-data_matrix[,-c(1:2)]
if(is.na(class_labels_file)==FALSE)
{
if(is.na(group.missing.thresh)==FALSE)
{
if(length(class_labels_levels)==2)
{
sig_thresh_groupA<-group.missing.thresh*num_samps_group[[1]]
sig_thresh_groupB<-group.missing.thresh*num_samps_group[[2]]
for(metab_num in 1:dim(data_matrix)[1])
{
#print(missing.val)
if(is.na(missing.val)==FALSE){
num_sigsA<-length(which(data_m[metab_num,1:num_samps_group[[1]]]>missing.val))
num_sigsB<-length(which(data_m[metab_num,(num_samps_group[[1]]+1):(num_samps_group[[1]]+num_samps_group[[2]])]>missing.val))
}else{
num_sigsA<-length(which(is.na(data_m[metab_num,1:num_samps_group[[1]]])==FALSE))
num_sigsB<-length(which(is.na(data_m[metab_num,(num_samps_group[[1]]+1):(num_samps_group[[1]]+num_samps_group[[2]])])==FALSE))
}
if((num_sigsA>=sig_thresh_groupA) || (num_sigsB>=sig_thresh_groupB))
{
clean_metabs<-c(clean_metabs,metab_num)
}
}
#print(length(clean_metabs))
}else{
if(length(class_labels_levels)==3){
sig_thresh_groupA<-group.missing.thresh*num_samps_group[[1]]
sig_thresh_groupB<-group.missing.thresh*num_samps_group[[2]]
sig_thresh_groupC<-group.missing.thresh*num_samps_group[[3]]
for(metab_num in 1:dim(data_matrix)[1])
{
if(is.na(missing.val)==FALSE){
num_sigsA<-length(which(data_m[metab_num,1:num_samps_group[[1]]]>missing.val))
num_sigsB<-length(which(data_m[metab_num,(num_samps_group[[1]]+1):(num_samps_group[[1]]+num_samps_group[[2]])]>missing.val))
num_sigsC<-length(which(data_m[metab_num,(num_samps_group[[1]]+num_samps_group[[2]]+1):(num_samps_group[[1]]+num_samps_group[[2]]+num_samps_group[[3]])]>missing.val))
}else{
num_sigsA<-length(which(is.na(data_m[metab_num,1:num_samps_group[[1]]])==FALSE))
num_sigsB<-length(which(is.na(data_m[metab_num,(num_samps_group[[1]]+1):(num_samps_group[[1]]+num_samps_group[[2]])])==FALSE))
num_sigsC<-length(which(is.na(data_m[metab_num,(num_samps_group[[1]]+num_samps_group[[2]]+1):(num_samps_group[[1]]+num_samps_group[[2]]+num_samps_group[[3]])])==FALSE))
}
if((num_sigsA>=sig_thresh_groupA) || (num_sigsB>=sig_thresh_groupB) || (num_sigsC>=sig_thresh_groupC))
{
clean_metabs<-c(clean_metabs,metab_num)
}
}
}else{
if(length(class_labels_levels)>2){
for(metab_num in 1:dim(data_m)[1])
{
for(c in 1:length(class_labels_levels)){
if(is.na(missing.val)==FALSE){
num_cursig<-length(which(data_m[metab_num,which(ordered_labels==class_labels_levels[c])]>missing.val))
}else{
num_cursig<-length(which(is.na(data_m[metab_num,which(ordered_labels==class_labels_levels[c])])==FALSE))
}
sig_thresh_cur<-length(which(ordered_labels==class_labels_levels[c]))*group.missing.thresh
if(num_cursig>=sig_thresh_cur)
{
clean_metabs<-c(clean_metabs,metab_num)
break #for(i in 1:4){if(i==3){break}else{print(i)}}
}
}
}
}
else{
if(length(class_labels_levels)==1){
num_samps_group[[1]]<-num_samps_group[[1]]
sig_thresh_groupA<-group.missing.thresh*num_samps_group[[1]]
for(metab_num in 1:dim(data_matrix)[1])
{
if(is.na(missing.val)==FALSE){
num_sigsA<-length(which(data_m[metab_num,1:num_samps_group[[1]]]>missing.val))
}else{
num_sigsA<-length(which(is.na(data_m[metab_num,1:num_samps_group[[1]]])==FALSE))
}
if((num_sigsA>=sig_thresh_groupA) )
{
clean_metabs<-c(clean_metabs,metab_num)
}
}
}
}
}
}
}
}
####################################################################################
#Step 4) Replace missing values
if(summarize.replicates==TRUE)
{
{
if(summary.na.replacement=="zeros"){
data_m<-replace(data_m,which(is.na(data_m)==TRUE),0)
}else{
if(summary.na.replacement=="halfsamplemin"){
data_m<-apply(data_m,2,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
}else{
if(summary.na.replacement=="halfdatamin"){
min_val<-min(data_m,na.rm=TRUE)*0.5
data_m<-replace(data_m,which(is.na(data_m)==TRUE),min_val)
#data_m<-apply(data_m,1,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
}else{
if(summary.na.replacement=="halffeaturemin"){
data_m<-apply(data_m,1,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
data_m<-t(data_m)
}else{
if(summary.na.replacement=="bpca"){
library(pcaMethods)
pc1 <- pcaMethods::pca(t(data_m), method="bpca", nPcs=3)
data_m<-completeObs(pc1)
data_m<-t(data_m)
min_val<-min(data_m,na.rm=TRUE)*0.5
data_m<-replace(as.matrix(data_m),which(data_m<0),min_val)
# print(length(which(is.na(data_m)==TRUE)))
#print("bpca done")
#write.table(data_m,file="bpca_res.txt",sep="\t",row.names=FALSE)
}
}
}
}
}
}
}else
{
data_m<-data_matrix[,-c(1:2)]
if(summary.na.replacement=="zeros"){
data_m<-replace(data_m,which(is.na(data_m)==TRUE),0)
}else{
if(summary.na.replacement=="halfsamplemin"){
data_m<-apply(data_m,2,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
}else{
if(summary.na.replacement=="halfdatamin"){
min_val<-min(data_m,na.rm=TRUE)*0.5
data_m<-replace(data_m,which(is.na(data_m)==TRUE),min_val)
#data_m<-apply(data_m,1,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
}else{
if(summary.na.replacement=="halffeaturemin"){
data_m<-apply(data_m,1,function(x){naind<-which(is.na(x)==TRUE); if(length(naind)>0){x[naind]<-min(x,na.rm=TRUE)/2}; return(x)})
data_m<-t(data_m)
}else{
if(summary.na.replacement=="bpca"){
library(pcaMethods)
pc1 <- pcaMethods::pca(t(data_m), method="bpca", nPcs=3)
data_m<-completeObs(pc1)
data_m<-t(data_m)
min_val<-min(data_m,na.rm=TRUE)*0.5
data_m<-replace(as.matrix(data_m),which(data_m<0),min_val)
# print(length(which(is.na(data_m)==TRUE)))
# write.table(data_m,file="bpca_res.txt",sep="\t",row.names=FALSE)
}
}
}
}
}
}
#remove bad features based on all missing values criteria
if(length(total_good_metabs)>0){
data_m<-data_m[total_good_metabs,]
data_matrix<-data_matrix[total_good_metabs,]
#print(paste("Dimension of data matrix after overall ",all.missing.thresh,"% signal threshold filtering",sep=""))
print(paste("Dimension of data matrix after using overall ",100*all.missing.thresh, "% signal criteria for filtering:"),sep="")
print(dim(data_matrix))
}else{
stop(paste("None of the m/z features have signal in ",all.missing.thresh*100, "% of samples",sep=""))
}
#group-wise missing values
if(is.na(class_labels_file)==FALSE){
if(length(clean_metabs)>0)
{
data_m<-data_m[clean_metabs,]
data_matrix<-data_matrix[clean_metabs,]
print(paste("Dimension of data matrix after using group-wise ",100*group.missing.thresh, "% signal criteria for filtering:"),sep="")
print(dim(data_matrix))
}
}
####################################################################
#Step 4) Data transformation and normalization
if(log2transform==TRUE)
{
data_m<-log2(data_m+1)
print("log scale")
print(data_m[1:10,1:10])
}
if(quantile_norm==TRUE)
{
data_m<-normalizeQuantiles(data_m)
print("quant norm")
print(data_m[1:10,1:10])
}
if(lowess_norm==TRUE)
{
data_m<-normalizeCyclicLoess(data_m)
print("lowess")
}
data_m_prescaling<-data_m
if(medcenter==TRUE)
{
colmedians=apply(data_m,1,function(x){median(x,na.rm=TRUE)})
data_m=sweep(data_m,1,colmedians)
}
if(znormtransform==TRUE)
{
data_m<-scale(t(data_m))
data_m<-t(data_m)
}
if(madscaling==TRUE)
{
colmedians=apply(data_m,2,function(x){median(x,na.rm=TRUE)})
Y=sweep(data_m,2,colmedians)
mad<-apply(abs(Y),2,function(x){median(x,na.rm=TRUE)})
const<-prod(mad)^(1/length(mad))
scale.normalized<-sweep(data_m,2,const/mad,"*")
data_m<-scale.normalized
}
#print("after")
#print(data_m[1:10,1:10])
#Use this if first column is gene/metabolite name
#for apLCMS:
#data_matrix_temp<-data_matrix[,c(1:2)]
data_matrix<-cbind(data_matrix[,c(1:2)],data_m)
#print(dim(data_matrix))
#print(dim(data_m))
data_m<-as.data.frame(data_m)
num_rows<-dim(data_m)[1]
num_columns<-dim(data_m)[2]
#print("num rows is ")
#print(num_rows)
#for apLCMS:
rnames<-paste("mzid_",seq(1,num_rows),sep="")
rownames(data_m)=rnames
mzid_mzrt<-data_matrix[,c(1:2)]
colnames(mzid_mzrt)<-c("mz","time")
rownames(mzid_mzrt)=rnames
write.table(mzid_mzrt, file="mzid_mzrt.txt",sep="\t",row.names=FALSE)
#filename<-paste("Classlabels_file.txt",sep="")
#write.table(classlabels, file=filename,sep="\t",row.names=FALSE)
filename<-paste("Normalized_sigthreshfilt_averaged_data.txt",sep="")
data_matrix<-cbind(data_matrix[,c(1:2)],data_m)
write.table(data_matrix, file=filename,sep="\t",row.names=FALSE)
data_matrix_prescaling<-cbind(data_matrix[,c(1:2)],data_m_prescaling)
return(list(data_matrix_afternorm_scaling=data_matrix,data_matrix_prescaling=data_matrix_prescaling))
#return(data_matrix)
}
|
899fb82f62d9bbbe7e5dcb65212688c42ebb9e05 | c5d63edeb104e84820a99432ae6c3c3b77b0ed19 | /src/metacluster.bipartite.R | 27f68c0b25af2001abbed4d7cb57eaf7c5d4ccae | [] | no_license | genepattern/FLAMEMetacluster | 5ff41f44ed2d2d912609fe0b368b46c893b34a21 | 3fb2d6f7742df9548e463e66056e6516884d9014 | refs/heads/master | 2021-01-10T11:15:18.961571 | 2016-02-12T20:10:28 | 2016-02-12T20:10:28 | 51,612,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,488 | r | metacluster.bipartite.R |
######## Euclidean distance ###########################
d=function(x,y) { sqrt(sum((x-y)^2)) }
######## Integer Programming ##########################
IP = function(dist.ST, cap.S, cap.T, delta, constraint) {
nS = dim(dist.ST)[1]; nT = dim(dist.ST)[2]; n=nS+nT
f.obj = as.vector(t(dist.ST))
f.con.1 = f.con.3 = array(1:nS*nS*nT, dim=c(nS, nS*nT)) * 0
f.con.2 = f.con.4 = array(1:nT*nS*nT, dim=c(nT, nS*nT)) * 0
s=seq(1,nT)
for (i in 1:nS) { f.con.1[i,s]=cap.T; f.con.3[i,s]=rep(1,nT); s=s+nT }
t=seq(1,nS*nT,nT)
for (i in 1:nT) { f.con.2[i,t]=cap.S; f.con.4[i,t]=rep(1,nS); t=t+1 }
if (constraint=="11") {
f.con = rbind(f.con.1,f.con.2,f.con.3,f.con.4)
f.rhs = c(cap.S+delta, cap.T+delta, rep(1, n))
f.dir = c(rep("<=", n), rep(">=", n))
}
if (constraint=="10") {
f.con = rbind(f.con.1,f.con.2,f.con.3)
f.rhs = c(cap.S+delta, cap.T+delta, rep(1, nS))
f.dir = c(rep("<=", n), rep(">=", nS))
}
if (constraint=="01") {
f.con = rbind(f.con.3, f.con.4)
f.rhs = c(rep(1, nS),rep(1, nT))
f.dir = rep(">=", n)
}
if (constraint=="00") {
f.con = f.con.3
f.rhs = rep(1, nS)
f.dir = rep(">=", nS)
}
IP=lp ("min", f.obj, f.con, f.dir, f.rhs, all.bin=T)
IP.solution=IP$solution
matrix(IP$solution, nrow=nS, byrow=T)
}
metacluster.bp <- function(classname, concatfiles, density, dim, max.reiter = 2, kmin, kmax, classnum = 0,classified) {
suppressMessages(library(lpSolve))
suppressMessages(library(cluster))
##make gfile & list of filenames##
cat("Class:",classname,'\n')
cat("Meta-clustering",' ........\n')
filenames <- c()
classfilenames <- c()
for (i in 1:nrow(concatfiles)) {
filename <- strsplit(concatfiles[i,1],paste('.',density,sep=''))[[1]][1]
filenames <- c(filenames, filename)
classfilename <- paste(classname, i,sep = '.')
if (classified == T) {
classfilename <- strsplit(concatfiles[i,1],"\\.")[[1]][1]
}
classfilenames <- c(classfilenames, classfilename)
}
#generate meansmatrix#
gfile <- c()
for (i in 1:nrow(concatfiles)) {
file <- dir("./", pattern = as.character(concatfiles[i,1]))
datafile <- read.table(file, header = T, sep = "\t")
g <- length(unique(datafile$cluster))
gfile <- c(gfile, g)
}
meansmatrix <- matrix(ncol = dim+1, nrow = sum(gfile), dimnames = list(rep(filenames, gfile), c(paste("v",1:dim,sep=""),"props")))
col.names = colnames(datafile)[1:dim]
for (i in 1:nrow(concatfiles)) {
#print(i)
file <- dir("./", pattern = as.character(concatfiles[i,1]))
datafile <- read.table(file, header = T, sep = "\t")
total.size <- dim(datafile)[1]
g <- unique(datafile$cluster)
for (m in 1:length(g)) {
n <- g[m]
clus<- subset(datafile, datafile$cluster == n)
size <- dim(clus)[1]
prop <- size/total.size
for (d in 1:dim) {
meansmatrix[(m+sum(gfile[0:(i-1)])), d] <- mean(clus[,d])
}
meansmatrix[(m+sum(gfile[0:(i-1)])), d+1] <- prop
}
}
allfiles <- rep(as.character(filenames),gfile)
pooledlocation<-paste(classname,"withprops.pooledlocations.txt", sep = ".")
write.table(meansmatrix, sep = "\t", quote = F, row.names = allfiles, file = pooledlocation)
meanfile <- meansmatrix[,1:dim]
c <- pamk.2(meanfile, k = kmin:kmax,classname=classname)
pam.k <- c$nc
pamclus <- pam(meanfile, k = pam.k)
optimal.k = pam.k
Number.samples = length(concatfiles)
k.NN = max(1,trunc(Number.samples * 0.2))
delta = 0.1
modes.pch=20
medoids.pch=22
# Input file
data=meansmatrix
modes = data[,1:dim]
prop = data[,(dim+1)]
sample = allfiles
unique.sample <- unique(allfiles)
pam.out = pam(modes, k=optimal.k)
indices = pam.out$clustering
medoids = pam.out$medoids
median.prop = rep(0, optimal.k)
for (i in 1:optimal.k)
{
pick = which(indices == i)
prop.cluster = prop[pick]
sample.cluster = sample[pick]
modes.cluster = as.matrix(modes[pick,])
dist = rep(Inf, length(pick))
for (j in 1:length(pick)) dist[j] = d(modes.cluster[j,], medoids[i,])
dist.NN = sort(dist)[1:k.NN]
indices.NN = which(dist %in% dist.NN)
median.prop[i] = median(prop.cluster[indices.NN])
}
###### to compare sample S's modes with the medoid
# start with medoid info
ncol=dim+optimal.k+1
matched.points=c(medoids[1,],1,rep(0,optimal.k-1),medoids.pch)
for (i in 2:optimal.k) matched.points=rbind(matched.points,t(c(medoids[i,],i,rep(0,optimal.k-1),medoids.pch)))
for (S in 1:Number.samples) {
delta = 0.05
sample.id = which(sample==unique.sample[S])
cap.S = prop[sample.id]
modes.sample = modes[sample.id,]
# template proportions
cap.T = median.prop
# distances between modes and medoids
nS = length(sample.id); nT = optimal.k
dist.ST = array(1:nS*nT, dim=c(nS,nT))
for (i in 1:nS) { for (j in 1:nT) {
dist.ST[i,j] = d(modes.sample[i,], medoids[j,])
}}
# call to IP
constraint.choices=c("11", "10", "01", "00")
min.obj = Inf
for (constraint in constraint.choices) {
try.match = IP(dist.ST, cap.S, cap.T, delta, constraint)
check = sum(try.match)
if (check) {
try.obj = sum(dist.ST * try.match)
if (try.obj < min.obj) { match = try.match }
}}
# rownames(match) = paste( as.character(S), ".", seq(1:nS) )
# colnames(match) = paste( "T.", seq(1:nT) )
for (k in 1:nS) {
colors = rep(0, nT)
labels=which(match[k,]==1)
l.l=length(labels)
if (l.l>1) {
dist = rep(Inf, l.l)
for (j in 1:l.l) dist[j]=d(modes.sample[k,], medoids[labels[j],])
dist.order=order(dist)
labels=labels[dist.order]
}
colors[1:l.l]=labels
p = t(c(modes.sample[k,],colors,modes.pch))
vec=1:ncol; for (m in 1:ncol) vec[m]=p[[m]]
matched.points=rbind(matched.points,vec)
}
}
write.table(matched.points[1:optimal.k,1:(dim+1)], file=paste(classname,"medoids.txt",sep='.'),quote = FALSE, sep = "\t", row.names = FALSE, col.names = c(col.names,"metacluster#"))
out <- matched.points
out <- subset(out,out[,(dim+optimal.k+1)]!=22)[,1:(dim+optimal.k)]
metafile <- out
mcluster <- (metafile[,(dim+1):(dim+optimal.k)])
metafile <- cbind(meanfile[,1:dim], mcluster)
columns <- paste("member",1:optimal.k,sep='')
write.table (metafile, file = paste(classname,"medoids.metacluster.assignments.txt", sep = "."),quote = F, col.names = c(col.names,columns),row.names =allfiles,sep="\t")
#plot results of metacluster
#silhouette plot
if (.Platform$OS.type == "windows")
{
png(filename = paste(classname,"metacluster.silhouette.png",sep = "."),width = 960,height = 960)
}
else
{
library(Cairo)
CairoPNG(filename = paste(classname,"metacluster.silhouette.png",sep = "."),width = 960,height = 960)
}
plot(pamclus, ask = FALSE, which.plots = 2)
dev.off()
#clusplot (PCA)
mtitle <- paste(classname,"metacluster.pca", sep = ".")
if (.Platform$OS.type == "windows")
{
png(filename = paste(mtitle, "png", sep = "."), width = 960, height = 960)
}
else
{
library(Cairo)
CairoPNG(filename = paste(mtitle, "png", sep = "."), width = 960, height = 960)
}
clusplot(pamclus, main = mtitle, lines = 0, labels = 4)
dev.off()
#make new concat file, collect patterns#
for (i in 1:nrow(concatfiles)) {
file <- dir("./", pattern = as.character(concatfiles[i,1]))
tfile <- read.table(file, header = T, sep = "\t")
g <- unique(tfile$cluster)
row.start <- sum(as.numeric(gfile[0:(i-1)]))+1
row.end <- row.start+length(g)-1
assign <- mcluster[row.start:row.end,1:optimal.k] #m-clusters assignment for each cluster
count <- cbind(g, assign)
newfile <- c()
for (tclus in g[1:length(g)]) {
clus<-subset(tfile, tfile$cluster == tclus)
mclus <- count[,2:(optimal.k+1)][count[,1] == tclus]
mclus <- matrix(rep(mclus,each=nrow(clus)),ncol=optimal.k)
clus <- cbind(clus, mclus)
newfile<-rbind(newfile, clus)
}
#if (classname == "final" & classified == T) {
# colnames = c(names(newfile)[1:dim],"o.cluster", paste("c.cluster",1:optimal.k,sep=''), paste("cluster",1:optimal.k,sep=''))
#} else {
colnames = c(names(newfile)[1:dim], "o.cluster", paste("cluster",1:optimal.k,sep=''))
#}
filename <- paste(classname, filenames[i],density, pam.k,"metacluster.membership", sep = ".")
write.table(newfile, file = filename,
row.names = F, col.names = colnames, quote = F, sep = "\t")
}
cat('\n')
results <- c()
results$pam.k <- pam.k
results$matrix <- pooledlocation
results$numberfiles <- Number.samples
return(results)
} |
1abdd98ab044ca33519a2a3a12d69e03c48d75bb | 07cbeb7cffaa00d21734c486c793e4ed4fe698db | /plot3.R | 156adf71644a73175aaa2b6c5c04699129cf0518 | [] | no_license | ssuresh8/ExData_Plotting1 | 0897bd40a96cb1769c270a640e196496fa28f558 | 376b0d95156015f90de680d346d831aac8a8233a | refs/heads/master | 2020-05-29T11:45:56.200047 | 2015-12-13T22:19:51 | 2015-12-13T22:19:51 | 47,864,087 | 0 | 0 | null | 2015-12-12T04:25:19 | 2015-12-12T04:25:19 | null | UTF-8 | R | false | false | 1,206 | r | plot3.R | # read in the text file
# since it is a semicolon delimited file use sep as semi colon and headings are in first row
dat <- read.csv("household_power_consumption.txt", head = TRUE, sep= ";", na.strings ="?")
#convert the date and time columns in to the date and time classes in R
dat$Date <-as.Date(dat$Date, format = "%d/%m/%Y")
#index the data frame for the required dates of 2007-02-01 and 2007-02-02
dat2 <- dat[(dat$Date=="2007-2-1" | dat$Date=="2007-2-2" ), ]
#convert global active power column to numeric
dat2$Global_active_power <- as.numeric(as.character(dat2$Global_active_power))
#Combine the date and the time
dat2$DateTime <- strptime(paste(dat2$Date, dat2$Time), "%Y-%m-%d %H:%M:%S")
#send the plot to PNG
png(filename='plot3.png',width=480,height=480,units='px')
#plot meter 1,2,3
plot(dat2$DateTime, as.numeric(as.character(dat2$Sub_metering_1)), type = "l",ylab="Energy sub metering",xlab="")
#red label for meter2
lines(dat2$DateTime,dat2$Sub_metering_2, type="l", col="red")
#blue label for meter3
lines(dat2$DateTime,dat2$Sub_metering_3, type="l", col="blue")
#legend
legend("topright", legend=names(dat2[7:9]), lty=1, col=c("black","red","blue"))
#complete plot
dev.off() |
9e22c8af7d207ff29339e18a2a007a51b41c65d0 | 0872b355956686bd4e68384d3b83f342ec02bd31 | /R/patientView/db.R | 4f01c95c24caa32cea2d71a3144772b322721808 | [] | no_license | smartscalpel/MSui | ff211af5a0f8eb54cf9bcf488e81a8b06f11e74f | 8de2d77ecd368834c50ec717137f28569d025ac0 | refs/heads/master | 2021-06-08T11:49:55.908612 | 2019-07-16T11:44:42 | 2019-07-16T11:44:42 | 161,582,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,827 | r | db.R | library(pool)
library(rlang)
library(pander)
library(MonetDBLite)
pool <- dbPool(MonetDBLite::MonetDB(),
dbname = 'msinvent',
user='msinvent',password='msinvent')
#### Tissue part #########
getTissueTable<-function(con,tabName){
as_data_frame(con %>% tbl('patisue'))
}
makeNewTissue<-function(){
data.frame(id=-1,emsid="",yob=-1,
age=-1,sex="",label="",location="",
diagnosis="",grade="",coords="",dt=""
)
}
updateTissue<-function(con,name,olddata,newdata){
cat(name,'\n',pander(head(olddata),caption = "old"),'\n=====\n',pander(head(newdata),caption = "new"),'\n====\n')
}
#### Patient part #########
getPatientTable<-function(con,tabName){
as_data_frame(con %>% tbl('patient'))
}
makeNewPatient<-function(){
data.frame(id=-1,emsid="",yob=-1,
age=-1,sex="")
}
updatePatient<-function(con,name,olddata,newdata){
cat(name,'\n',pander(head(olddata),caption = "old"),'\n=====\n',pander(head(newdata),caption = "new"),'\n====\n')
}
insertTissue<-function(con,data){
}
sqlTICall<-paste0('select rt,sum(intensity) as tic,spectrid ',
'from peak ',
'where rt<= 600 ',
'group by spectrid,rt ',
'order by spectrid,rt')
sqlTICset<-paste0('select rt,sum(intensity) as tic,spectrid ',
'from peak ',
'where rt<= 600 and spectrid between ? and ?',
'group by spectrid,rt ',
'order by spectrid,rt')
sqlTICsetMZ<-paste0('select rt,sum(intensity) as tic,spectrid ',
'from peak ',
'where rt<= 600 and spectrid between ? and ?',
'and mz between ? and ?',
'group by spectrid,rt ',
'order by spectrid,rt')
sqlTICone<-paste0('select rt,sum(intensity) as tic,spectrid ',
'from peak ',
'where rt<= 600 and spectrid=?',
'group by spectrid,rt ',
'order by spectrid,rt')
sqlGetMZdata<-paste0('select id, mz,rt,scan,intensity,spectrid ',
'from peak ',
'where spectrid=? ')
sqlGetMZdataRange<-paste0('select id, mz,rt,scan,intensity,spectrid ',
'from peak ',
'where spectrid=? ',
'and mz between ? and ?',
'and intensity >= ?')
sqlGetMZset<-paste0('select id, mz,rt,scan,intensity,spectrid ',
'from peak ',
'where spectrid between ? and ? ')
sqlGetMZset<-paste0('select id, mz,rt,scan,intensity,spectrid ',
'from peak ',
'where spectrid between ? and ? ')
sqlSpectra<-'select * from spectra '
getSpectra<-function(con){
con<-getCon(con)
cat(system.time(p<-
data.table(
dbGetQuery(con,
sqlSpectra))
),
'\n')
return(p)
}
getMZ<-function(con,spID,mzRange=c(0,5000),threshold=1e2){
con<-getCon(con)
cat(system.time(p<-
data.table(
dbGetQuery(con,
sqlGetMZdataRange,
spID,
mzRange[1],
mzRange[2],
threshold))
),
'\n')
cat(dim(p),'\n')
binz<-seq(min(p$mz),max(p$mz),by=0.01)
p[,bin:=findInterval(mz, binz)]
p[,rcomp:=rcomp(intensity,total=1e6),by=.(spectrid)]
return(p)
}
# Views for patient
#create view patisue as select t.id,emsid,yob,age,sex,label,location,d.name as diagnosis,grade,coords,dt from ms.Patient p join ms.Tissue t on p.id=t.patientid join ms.diagnosis d on t.diagnosis=d.id;
|
4d0bd0e29e1fb9c4947b1cc1532a96c3f0c42c0e | 8a780feb331e64a7aa098ea00ff15e572b831ad9 | /R/pie.gg.R | 2393e7b957ef68c2aab5e909094650b017466325 | [] | no_license | kwlee58/Poll_1987_kr | 53f9fad18f9a86772d2f87ff498629835499ee30 | bfe963a1c3a95e757eba51bede9b9cc098b46164 | refs/heads/master | 2022-10-26T16:27:45.695526 | 2022-10-14T16:58:49 | 2022-10-14T16:58:49 | 43,960,444 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,249 | r | pie.gg.R | pie.gg <-
function(df, ggtitle = "", font.family = ""){
n <- length(names(df))
y.coord <- cumsum(df$Freq)
pie.label <- paste(levels(df$vote), format(df$Freq, big.mark = ","),
sep = "\n")
p1 <- ggplot(df, aes(x = "",
y = Freq,
fill = vote))
p2 <- p1 +
geom_bar(width = 1,
stat = "identity",
position = position_stack(reverse = TRUE))
pie.1 <- p2 +
coord_polar(theta = "y",
start = 3 * pi / 2,
direction = -1)
pie.2 <- pie.1 +
scale_y_continuous(name = "",
breaks = NULL) +
scale_x_discrete(name = "")
pie.3 <- pie.2 +
scale_fill_manual(name = "",
values = rainbow(n)[n:1])
pie.4 <- pie.3 +
theme_void(base_family = font.family)
pie.5 <- pie.4 +
guides(fill = "none")
pie.6 <- pie.5 +
geom_text(aes(y = y.coord/2),
label = pie.label,
family = font.family,
position = position_stack(reverse = TRUE))
pie.7 <- pie.6 +
ggtitle(ggtitle) +
theme(plot.margin = unit(c(1, 1, 1.5, 1), "lines"),
plot.title = element_text(hjust = 0.5))
return(pie.7)
}
|
2a890fc570224f8945d5c42b734231b9cac67615 | e20ae46b59c09099a3b49e1ea018dfc855c11541 | /R/k-circles.R | c77a412769f108a6cce8f0acf770a9e7283fcdc6 | [] | no_license | Daenecompass/eliter | a081eb7f89ab34e9f2e34c3937e791c60a776873 | 884885a2e747e6cbb9560d4b11851ddc4e7c640b | refs/heads/master | 2023-03-17T18:19:04.883599 | 2020-11-16T21:25:50 | 2020-11-16T21:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,053 | r | k-circles.R |
#' Minimal members decomposition
#'
#' @param incidence a sparse incidence matrix
#' @param minimum.memberships the minimum number of memberships for the individuals (rows)
#'
#' @return a list of incidence matrices
#' @export
#'
#' @examples
#' data(den)
#' den <- den[den$SOURCE != "Events",]
#' incidence <- xtabs(~NAME + AFFILIATION, droplevels(den), sparse = TRUE)
#' l.inc <- minimal.members.decomposition(incidence, 3)
#' level.membership(l.inc)
#' l.inc[[5]] %>% colSums() %>% sort() %>% as.matrix()
#' l.inc[[5]] %>% rowSums() %>% sort() %>% as.matrix()
minimal.members.decomposition <- function(incidence, minimum.memberships = 3, check.for.nested = TRUE){
##############
# Tests
# Is it a sparse matrix?
if(!inherits(incidence, "dgCMatrix")) stop("incidence has to be a sparse matrix of class dgCMatrix. With xtabs you can set sparse to TRUE and get a valid matrix.")
# Check for multiple memberships and odd values
if(any(incidence@x != 1)) warning("incidence has values other than 1 and . (the sparse version of 0). Try table(incidence@x) to see them.")
# Backup of the incidence matrix
inc <- incidence
# k is the minimum number of members
k <- 1
# l.inc is a list of incidence matrices
l.inc <- list()
# j is the minimum number of memberships for any individual
level.up <- function(inc, k, j = 3, check.for.nested = TRUE){
test.mat <- function(inc, j, k){
cond <- !is.null(dim(inc))
if(cond){
cond <- any(
c(
any(Matrix::rowSums(inc) < j), # Is there any individuals with less than j positions
any(Matrix::colSums(inc) < k) # Is there any affiliations with less than k members
))
}
cond
}
# Levelling up
while(test.mat(inc, j, k)){
# Removing rows
inc.t <- inc[Matrix::rowSums(inc) >= j, ]
if(is.null(dim(inc.t))) break
inc <- inc.t # Keep only those members with j or more positions
# Removing columns
inc.t <- inc[, Matrix::colSums(inc) >= k] # Keep only those affiliations with more than k members
if(is.null(dim(inc.t))) break
inc <- inc.t
# Merging overlapping affiliations
if(identical(check.for.nested, TRUE)){
inc <- merge.perfect.overlap(inc, combine.labels = "&")
}
}
inc
}
while(
k <= suppressWarnings(min(Matrix::colSums(inc))) & ncol(inc) > minimum.memberships # While k is smaller than the lowest number of members and the number of affiliations is larger than the minimum number of memberships
){
k <- k + 1
tmp <- level.up(inc, k, j = minimum.memberships, check.for.nested = check.for.nested)
inc <- tmp
#if(identical(duplicate.check, TRUE)) inc <- unique.matrix(inc, MARGIN = 2)
l.inc[[k]] <- inc
}
# It gives us an annoying warning because level.up doesn't use a proper test of whether the inc is valid for further operation
l.inc <- c(incidence, l.inc)
compact(l.inc)
}
#' K-circles decomposition
#'
#' @param incidence a sparse incidence matrix
#' @param minimum.memberships the minimum number of memberships for the individuals (rows)
#'
#' @return an object of class "k.circles"
#' @export
#'
#' @examples
#' data(den)
#' den <- den[den$SOURCE != "Events",]
#' incidence <- xtabs(~NAME + AFFILIATION, droplevels(den), sparse = TRUE)
#' l.inc <- k.circles(incidence, 3, check.for.nested = TRUE)
#' level.membership(l.inc)
#' l.inc[[5]] %>% colSums() %>% sort() %>% as.matrix()
#' l.inc[[5]] %>% rowSums() %>% sort() %>% as.matrix()
k.circles <- function(incidence, minimum.memberships = 3, check.for.nested = TRUE){
##############
# Tests
# Is it a sparse matrix?
if(!inherits(incidence, "dgCMatrix")) stop("incidence has to be a sparse matrix of class dgCMatrix. With xtabs you can set sparse to TRUE and get a valid matrix.")
# Check for multiple memberships and odd values
if(any(incidence@x != 1)) warning("incidence has values other than 1 and . (the sparse version of 0). Try table(incidence@x) to see them.")
# Backup of the incidence matrix
inc <- incidence
# k is the minimum number of members
k <- 1
# l.inc is a list of incidence matrices
l.inc <- list()
# j is the minimum number of memberships for any individual
level.up <- function(inc, k, j = 3, check.for.nested = TRUE){
test.mat <- function(inc, j, k){
cond <- !is.null(dim(inc))
if(cond){
cond <- any(
c(
any(Matrix::rowSums(inc) < j), # Is there any individuals with less than j positions
any(Matrix::colSums(inc) < k) # Is there any affiliations with less than k members
))
}
cond
}
# Levelling up
while(test.mat(inc, j, k)){
# Removing rows
inc.t <- inc[Matrix::rowSums(inc) >= j, ]
if(is.null(dim(inc.t))) break
inc <- inc.t # Keep only those members with j or more positions
# Removing columns
inc.t <- inc[, Matrix::colSums(inc) >= k] # Keep only those affiliations with more than k members
if(is.null(dim(inc.t))) break
inc <- inc.t
# Merging overlapping affiliations
if(identical(check.for.nested, TRUE)){
inc <- merge.perfect.overlap(inc, combine.labels = "&")
}
}
inc
}
while(
k <= suppressWarnings(min(Matrix::colSums(inc))) & ncol(inc) > minimum.memberships # While k is smaller than the lowest number of members and the number of affiliations is larger than the minimum number of memberships
){
k <- k + 1
tmp <- level.up(inc, k, j = minimum.memberships, check.for.nested = check.for.nested)
inc <- tmp
#if(identical(duplicate.check, TRUE)) inc <- unique.matrix(inc, MARGIN = 2)
l.inc[[k]] <- inc
}
# It gives us an annoying warning because level.up doesn't use a proper test of whether the inc is valid for further operation
# Clean up and class
l.inc <- c(incidence, l.inc)
l.inc <- compact(l.inc)
class(l.inc) <- append("k.circle", class(l.inc))
l.inc
}
merge.perfect.overlap <- function(incidence, combine.labels = "&"){
# This functions throws an error if any of the affiliations are empty
# Goal: Merge perfectly overlapping affiliations
# Combine their labels and remove one of the columns.
# They merge into either to largest affiliation or to the first in the order
# It is run when the incidence has been pruned. So affiliations with just a single member will have disappeared
# The merged affiliation will have all its values set to 0
adj <- Matrix::crossprod(incidence)
affil.members <- Matrix::diag(adj)
names(affil.members) <- rownames(adj)
adj.s <- adj / affil.members
diag(adj.s) <- 0
merge.ind <- Matrix::which(adj.s == 1, arr.ind = TRUE) %>% as_tibble() # Row and column indices
s <- merge.ind %>% apply(1, sort) %>% t()
if(nrow(s) > 1) merge.ind <- merge.ind %>% filter(!duplicated(s)) # Check if two of equal size are there.
s <- merge.ind$col %in% merge.ind$row # col må ikke være i row - fordi vi må ikke slette noget der er blevet merget ind i.
merge.ind <- merge.ind %>% filter(!s)
if(nrow(merge.ind) == 0) return(incidence)
if(identical(combine.labels, FALSE) == FALSE){
for(i in 1:nrow(merge.ind)){
cr <- merge.ind$row[i]
cc <- merge.ind$col[i]
label <- paste(colnames(incidence)[cc], combine.labels, colnames(incidence)[cr])
colnames(incidence)[cc] <- label
}
}
incidence[, merge.ind$row] <- 0
drop0(incidence)
}
#' Level membership from minimal membership decomposition
#'
#' @param l.inc a list of nested incidence matrices
#'
#' @return a tibble with rownames and level membership
#' @export
#'
#' @examples
level.membership <- function(l.inc, mode = c("ind", "affil", "two-mode"), levels = seq_along(l.inc)){
# When we merge affilations the naming of the affil and two-mode will be more complicated
mode <- match.arg(mode)
l.inc <- l.inc[levels]
l <- length(l.inc)
# Membership for individuals
membership <- map(l.inc, rownames) %>% imap(~ tibble(Name = .x, Level = .y)) %>%
bind_rows() %>% arrange(Name)
mem <- membership %>% group_by(Name) %>% summarise(Level = max(Level))
mem.ind <- tibble(Name = rownames(l.inc[[1]])) %>% left_join(., mem, by = "Name")
if(mode == "ind") return(mem.ind)
# Membership for affiliations
inc <- l.inc[[1]]
f <- function(x, inc) Matrix::colSums(inc[x,]) %>% as_tibble(rownames = "Name")
level.mem <- map(l.inc, rownames) %>% map(., f, inc = inc) %>% set_names(1:l) %>% bind_rows(.id = "level")
level.mem <- level.mem %>% mutate(level = as.numeric(level)) %>% filter(value >= level)
level.mem <- level.mem %>% group_by(Name) %>% summarise(Level = max(level))
mem.affil <- tibble(Name = colnames(inc)) %>% left_join(., level.mem, by = "Name")
if(mode == "affil") return(mem.ind)
# Membership for two-mode
# We assume that Igraph or other will sort first by rows and then columns
mem.two <- bind_rows(mem.ind, mem.affil, .id = "type")
if(mode == "two-mode") return(mem.two)
}
level.summary <- function(l.inc){
l.inc <- compact(l.inc)
l.g <- map(l.inc, ~graph_from_incidence_matrix(incidence = .x))
l.cl <- map(l.g, clusters)
map_dbl(l.cl, "no")
l.g %>% map(~bipartite.projection(.x)[[1]]) %>% map(degree) %>% map_dbl(mean)
}
print.k.circles <- function(x){
x
}
|
02b074d7abc18431159898eaba56f6861b8fe419 | 84e5f53dbd80c43ed1ae0e1a822761f7ca4fa9e8 | /R/plotPerms.R | d17a3b145b1cf4c47569ccd1b43fa8cde3404b37 | [] | no_license | vjcitn/bceQTL | d9b21887af6c7ca0f331c3372cb965ca161a6d46 | 49c018ab6f632d41a23c5a0b2d2a273719bec089 | refs/heads/master | 2021-05-11T17:04:23.347947 | 2018-01-17T05:46:52 | 2018-01-17T05:46:52 | 117,783,319 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 866 | r | plotPerms.R |
plotPerms = function() {
data(allpermFDR)
allpermFDR[which(allpermFDR$permfdr < 1e-6), "permfdr"] = 5e-3 # don't let 0 permutation FDR spoil log-log
with(allpermFDR, plot(limfdr, permfdr, log="xy", xlim=c(1e-4,.15),
xlab="FDR by limma", ylab="FDR by 501 permutations of adjusted expr. against genotype", col=factor(type), pch=19, axes=FALSE))
axis(2)
axis(1, at=c(1e-4, 5e-4, 5e-3, 5e-2, 1e-1), labels=c(".0001", ".0005", ".005", ".05", ".1"))
abline(0,1)
abline(h=.1, lty=2)
bi= which(allpermFDR$permfdr>.1)
bad = allpermFDR[bi,]
with(bad[bad$permfdr>.2,], text(limfdr, permfdr+.04, as.character(snp), cex=.6))
with(bad[bad$permfdr < .2 & bad$permfdr > .1,], text(limfdr, permfdr+.01, as.character(snp), cex=.6))
legend(1e-4, .25, pch=19, legend=levels(factor(allpermFDR$type)), col=
as.numeric(factor(levels(factor(allpermFDR$type)))))
abline(v=.1, lty=2)
}
|
7baaed13ddfd263ded7d6a8fa0b004e791d4312f | 1e49bd4391160a2504422750ba401bb90a019fef | /Workshop_Scripts/Workshop_5-Data_IO_and_Packages.R | 2eae2a64fadb25dd66c39fb8589e5d894062f799 | [] | no_license | asunghong/SoDA-Workshop-Series-Introduction-to-Data-Science | 54d62c049409d268f0c99aff28425ddfc950ef16 | f0de55d12c76866513a7af64194c783de449cae5 | refs/heads/master | 2021-01-24T01:00:28.688914 | 2018-02-17T01:32:48 | 2018-02-17T01:32:48 | 122,788,956 | 1 | 0 | null | 2018-02-24T23:25:17 | 2018-02-24T23:25:17 | null | UTF-8 | R | false | false | 5,040 | r | Workshop_5-Data_IO_and_Packages.R | #### Data I/O and Packages ####
# In this workshop, we are going to write our school children data to a .csv file
# and then read the data back in to another R object. We are also going to learn
# how to save R objects in R's native binary format (very space efficient).
### Setting Your Working Directory ###
# The easiest way to set your working directory is to go to:
# Session -> Set Working Directory -> Choose Directory... and select the folder
# where you would like to set it. This will look different on each computer, so
# you will need to follow the directions for your computer. For example, if I
# wanted to set my working directory to my "Desktop" folder on my computer, I
# could follow the instructions above, set my working directory to the desktop
# folder, and RStudio would enter the command in my R Console. For me, this
# would look like:
setwd("~/Desktop")
# If I wanted to save some data using the save() function, this is where it
# would go:
my_vec <- 1:1000
save(my_vec, file = "my_vec.RData")
# Your working directory is also the place where R goes to look for data when
# you try to load it in. If I were to now clear my Environment and then try to
# load in the data, we see that it would work. If I were to do the same thing
# but now change my working directory, it would not work:
# Works!
rm(list = ls())
load("my_vec.RData")
# Does not work!
rm(list = ls())
# set my working directory somewhere else:
setwd("~/Dropbox")
# Trying to load in the data fails:
load("my_vec.RData")
# Try setting your working directory to a folder that does not exist:
setwd("~/Desktop/sdkfhskdhfllgkjsddf")
### Working With .csv Files Using Base R ###
# Create some fake data!
student_id <- c(1:10)
grades <- c("A","B","C","A","C","F","D","B","B","A")
class <- c(rep(0,times = 5),rep(1,times = 5))
free_lunch <- rep(TRUE,times = 10)
# Put it in a data.frame
my_data <- data.frame(student_id,
grades,
class,
free_lunch,
stringsAsFactors = FALSE)
# Set column and row names
colnames(my_data) <- c("Student_ID", "Grades","Class","Free_Lunch")
rownames(my_data) <- LETTERS[11:20]
# We make use of the 'write.csv()' function here. Make sure you do not write row
# names, this can really mess things up as it adds an additional column and is
# generally confusing:
write.csv(x = my_data,
file = "school_data.csv",
row.names = FALSE)
# Now we are going to read the data back in from the .csv file we just created.
# You should make sure that you specify the correct separator (the 'write.csv()'
# function defaults to using comma separation). I also always specify
# 'stringsAsFactors = FALSE' to preserve any genuine string variables I read in.
school_data <- read.csv(file = "school_data.csv",
stringsAsFactors = FALSE, # Always!!!
sep = ",")
## Other Data Formats ##
# We will need to load a package in order to read in excel data. This will
# extend the usefulness of R so that we can now read in .xlsx files among other
# types.
# First we need to download the 'rio' package, we can either do this manually
# or by using the package manager in base R. You can check this package out by
# visiting the development Github page: https://github.com/leeper/rio. You need
# to make sure you select 'dependencies = TRUE' so that you download the other
# packages that your package depends on, otherwise it will not work! Here is the
# manual way of installing an R package:
install.packages("rio", dependencies = TRUE)
# Now we have to actually load the package so we can use it. We do this using
# the library() command:
library(rio)
# Write our school children data to an .xlsx file:
export(my_data, "school_data.xlsx")
# Now we can read in our data from the excel file:
excel_school_data <- import("school_data.xlsx")
# We can do the same thing for Stata .dta files:
# Write data to a .dta file:
export(my_data, "school_data.dta")
# Then read it back in:
stata_school_data <- import("school_data.dta")
## RData files ##
# Finally we may want to read and write our data to an .RData file that can hold
# everything in our workspace, or just a single variable. This is a very good
# strategy for saving all of your files after a day of working so you can pick
# back up where you left off:
# Save one object:
save(my_data, file = "Object.RData")
# Save just a few objects:
save(list = c("my_data", "school_data"), file = "Two_objects.RData")
# Save your whole working directory
save(list = ls(), file = "MyData.RData")
# Now lets test it out by clearing our whole workspace:
rm(list = ls())
# Now we can load the data back in! It is good practice to set our working
# directory again first (remember to change this to the folder location where
# you downloaded the workshop materials or saved this script file!):
setwd("~/Desktop")
# Load in the two objects
load(file = "Two_objects.RData")
# Load in everything
load(file = "MyData.RData")
|
09d1f289d3dae5949d4567843869ff0c50b41d05 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/diepoxybutane.R | e3b5856833cc373240793c5119ab886e875d3d7d | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 234 | r | diepoxybutane.R | library("knitr")
library("rgl")
#knit("diepoxybutane.Rmd")
#markdownToHTML('diepoxybutane.md', 'diepoxybutane.html', options=c("use_xhml"))
#system("pandoc -s diepoxybutane.html -o diepoxybutane.pdf")
knit2html('diepoxybutane.Rmd')
|
f50b620ec3159791b0c6a96e21493bfa95f9d476 | a0b699fc45352735746a0da7db60b3e5be0a3820 | /00 Doc/R_ETL.project3.R | a05945e1bafd41409e31722e3f0f0789f24e4b2f | [] | no_license | Dingkzhang/s17dvproject4-dvproject-4-crider-gonzales-zhang | c782881064b478f6da3eae7ccd374cef3c852ff6 | 2435644a5fbc131a52bccb3a13b5acea3a96e352 | refs/heads/master | 2021-01-23T06:50:22.209327 | 2017-03-28T02:42:30 | 2017-03-28T02:42:30 | 86,404,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,913 | r | R_ETL.project3.R | require(readr)
#-----------------------FILE ONE------------------------------------------
file_path = "../01 Data/PreETL_ConfidenceHomeStats.csv"
chs <- readr::read_csv(file_path)
names(chs)
df <- chs
names(df)
str(df) # Uncomment this line and run just the lines to here to get column types to use for getting the list of measures.
measures <- c("Home_Sales_Avg", "Year_Avgs", "Home_Price_Avg", "CCI_AVG")
dimensions <- setdiff(names(df), measures)
dimensions
# Get rid of special characters in each column.
# Google ASCII Table to understand the following:
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
str(df)
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
na2emptyString <- function (x) {
x[is.na(x)] <- ""
return(x)
}
if( length(dimensions) > 0) {
for(d in dimensions) {
# Change NA to the empty string.
df[d] <- data.frame(lapply(df[d], na2emptyString))
# Get rid of " and ' in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="[\"']",replacement= ""))
# Change & to and in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="&",replacement= " and "))
# Change : to ; in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern=":",replacement= ";"))
}
}
na2zero <- function (x) {
x[is.na(x)] <- 0
return(x)
}
# Get rid of all characters in measures except for numbers, the - sign, and period.dimensions, and change NA to 0.
if( length(dimensions) > 1) {
for(m in dimensions) {
print(m)
df[m] <- data.frame(lapply(df[m], gsub, pattern="[^--.0-9]",replacement= ""))
df[m] <- data.frame(lapply(df[m], na2zero))
df[m] <- lapply(df[m], function(x) as.numeric(as.character(x)))
# This is needed to turn measures back to numeric because gsub turns them into strings.
}
}
str(df)
write.csv(df, gsub("PreETL_", "", file_path), row.names=FALSE, na = "")
#-----------------------FILE TWO----------------------------------------
file_path = "../01 Data/PreETL_KEI_Avg.csv"
kavg <- readr::read_csv(file_path)
names(kavg)
df <- kavg
names(df)
str(df) # Uncomment this line and run just the lines to here to get column types to use for getting the list of measures.
measures <- c("Year_Avg", "Unemployment_U_S__Avg", "CCI_US__Avg", "Consumer_Confidence_Index_US", "Unemployment_U_S_")
#measures <- NA # Do this if there are no measures.
dimensions <- setdiff(names(df), measures)
dimensions
# Get rid of special characters in each column.
# Google ASCII Table to understand the following:
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
str(df)
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
na2emptyString <- function (x) {
x[is.na(x)] <- ""
return(x)
}
if( length(dimensions) > 0) {
for(d in dimensions) {
# Change NA to the empty string.
df[d] <- data.frame(lapply(df[d], na2emptyString))
# Get rid of " and ' in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="[\"']",replacement= ""))
# Change & to and in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="&",replacement= " and "))
# Change : to ; in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern=":",replacement= ";"))
}
}
na2zero <- function (x) {
x[is.na(x)] <- 0
return(x)
}
# Get rid of all characters in measures except for numbers, the - sign, and period.dimensions, and change NA to 0.
if( length(dimensions) > 1) {
for(m in dimensions) {
print(m)
df[m] <- data.frame(lapply(df[m], gsub, pattern="[^--.0-9]",replacement= ""))
df[m] <- data.frame(lapply(df[m], na2zero))
df[m] <- lapply(df[m], function(x) as.numeric(as.character(x))) # This is needed to turn measures back to numeric because gsub turns them into strings.
}
}
df["Year_Avg"] <- lapply(df["Year_Avg"], function(x) as.numeric(as.character(x)))
str(df)
write.csv(df, gsub("PreETL_", "", file_path), row.names=FALSE, na = "")
#-----------------------FILE THREE----------------------------------------
file_path = "../01 Data/PreETL_unemploymentTXandUSA_Ding.csv"
ding <- readr::read_csv(file_path)
names(ding)
df <- ding
names(df)
str(df) # Uncomment this line and run just the lines to here to get column types to use for getting the list of measures.
measures <- c("Unemployment_TX", "Unemployment_US", "ddate")
dates <- c("date")
dimensions <- setdiff(names(df), measures)
dimensions
# Get rid of special characters in each column.
# Google ASCII Table to understand the following:
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
str(df)
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
na2emptyString <- function (x) {
x[is.na(x)] <- ""
return(x)
}
if( length(dimensions) > 0) {
for(d in dimensions) {
# Change NA to the empty string.
df[d] <- data.frame(lapply(df[d], na2emptyString))
# Get rid of " and ' in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="[\"']",replacement= ""))
# Change & to and in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="&",replacement= " and "))
# Change : to ; in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern=":",replacement= ";"))
}
}
na2zero <- function (x) {
x[is.na(x)] <- 0
return(x)
}
# Get rid of all characters in measures except for numbers, the - sign, and period.dimensions, and change NA to 0.
if( length(measures) > 1) {
for(m in measures) {
print(m)
df[m] <- data.frame(lapply(df[m], gsub, pattern="[^--.0-9]",replacement= ""))
df[m] <- data.frame(lapply(df[m], na2zero))
df[m] <- lapply(df[m], function(x) as.numeric(as.character(x))) # This is needed to turn measures back to numeric because gsub turns them into strings.
}
}
if( length(dates) > 0) {
for(d in dates) {
print(d)
df[d] <- data.frame(lapply(df[d],function(x) as.Date(x, "%m/%d/%Y")))
}
}
str(df)
d_subset = df[,c(2,3,4,5)]
write.csv(d_subset, gsub("PreETL_", "", file_path), row.names=FALSE, na = "")
|
14cc9d84cb73f725ae86c3ef3795916f50d5f7f0 | 97cecd6ed08dc44aafcf7edbe7d3cbec2ab89451 | /R/simulation.study.R | 82a62b78f17e6a87a6ca3871f1b400b4ff8c2bde | [] | no_license | Polkas/ICsims | 433d01e09933c4720df4516dfe5e72823f441e25 | 1630e7b94bd6930759329a5e4835552ad4492452 | refs/heads/master | 2020-03-27T05:37:06.417788 | 2016-01-06T18:34:51 | 2016-01-06T18:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,661 | r | simulation.study.R | #' Run simulations allowing for between-sample heterogeneity
#'
#' \code{simulation.study} implements a simulation framework sampling repeatedly
#' from linear regression models and GLMs, allowing for between-sample heterogeneity.
#' The purpose is to allow the study of AIC and related statistics in the
#' context of model selection, with prediction quality as target.
#'
#' @param type Character string determining what type of model to fit. At present,
#' available model types are "lm" and "glm", with the former the default.
#' @param nsims Number of simulated data sets to analyse for each sample size
#' @param nsamples Vector of integers containing the sample sizes
#' @param nX Number of "real" covariates
#' @param nZ Number of "spurious" covariates
#' @param alpha Intercept for simulation model
#' @param beta.x Either: vector of slopes for the X covariates; or a single numeric values
#' for a constant slope for all X's
#' @param meanX Either: vector of means for the X covariates; or a single numeric value
#' for a constant mean across all X's
#' @param meanZ As for meanX but for the Z covariates
#' @param XZCov Covariance matrix of the X's and Z's. Must be of dimension (nX+nZ) by
#' (nX+nZ). Ignored if \code{simulate.from.data==TRUE} or if \code{is.null(rho)==FALSE}
#' @param varmeanX Either: vector of variances for the means of the X covariates; or a
#' single numeric value for a constant mean across all X's. Non-zero
#' values will produce a different set of covariate means for each
#' individual simulated data set
#' @param varmeanZ As for varmeanX but for the Z covariates
#' @param simulate.from.data Logical. If \code{TRUE}, function takes actual covariate data to
#' use as the basis of simulations; if \code{FALSE} (the default) the function uses the
#' distributions defined by the model parameters given as input to the function
#' @param X Matrix of "real" covariates; only used if \code{simulate.from.data==TRUE}
#' @param Y Vector of "real" response variables; only used if \code{simulate.from.data==TRUE},
#' and if given the values of alpha and beta above will be ignored,
#' but instead derived from a regression model of Y against X
#' @param var.res Residual variance of the simulation model
#' @param var.RE.Intercept Random effect variance for the intercept
#' @param var.RE.X Either: vector of random effect variances for the X covariate slopes;
#' or a single numeric value for no random slopes in the X's
#' @param rho A numeric constant specifying the mean correlation between the X's and the
#' Z's
#' @param epsilon A numeric constant specifying the level of variability around the mean
#' correlation rho; note that a necessary condition is \code{max(abs(rho+epsilon))<=1}, so
#' combinations of rho and epsilon which break this constraint will cause and error (as
#' the simcor function would produce correlations outside the range [-1,1]). If not
#' supplied but \code{is.null(rho)==TRUE}, then epsilon is set to zero
#' @param corsim.var If generating the covariance matrices using rho and epsilon, we
#' to specify the variances (which otherwise are in the leading
#' diagonal of XZCov)
#' @param noise.epsilon A numeric constant used to specify whether XZCov is to vary from
#' sample to sample. Higher values indicate more variability; note that this cannot be
#' greater than 1 minus the largest absolute value of (off-diagonal) correlations in the
#' corresponding correlation matrix
#' @param step.k Numeric value of the AIC criterion in the stepwise analysis; defaults
#' to about 3.84, corresponding to a p-value of 0.05 for both adding and removing variables
#' @param keep.dredge Logical constant on whether to keep the dredge outputs
#' (\code{TRUE==yes}); required if \code{simulate.from.data==TRUE}
#' @param Xin.or.out Vector of length nX (or \code{nrow(X)}) of logicals, specifying whether
#' an X is made available as data (\code{TRUE} for yes; \code{FALSE} for no)
#' @param glm.family If a GLM is to be fitted, the error distribution must be supplied
#' (to the standard family argument to glm).
#' @param glm.offset An (optional) offset can be supplied if fitting a GLM. (Not
#' currently implemented.)
#' @param filename Character string providing the root for the output files. Intermediate files are
#' saved as "filenameX.RData" where X is an incremental count from 1 to length(nsamples). The final
#' output is in "filename.RData".
#' @param binomial.n If fitting a binomial GLM, the number of trials per sample. Must be
#' either a scalar (in which case the same number of trials are used for each sample) or a
#' vector of length nsamples. (Default is 1)
#' @return If \code{keep.dredge==FALSE} (the default), the output is a list of length equal to the length
#' of nsamples, each containing two matrices, \code{reg.bias} (prediction bias for each
#' sample) and \code{reg.rmse} (root mean square error of prediction for each sample). Each
#' of these two matrices has length \code{nsims} and four columns, corresponding to model
#' selection by AICc, AIC, BIC and stepwise regression. If \code{keep.dredge==TRUE}, then the output
#' is a list of lists, with a top level list with length equal to the length of nsamples as before, and
#' with the next level having length equal to \code{nsims}; this inner list contains the full model set
#' output from \code{dredge}, converted to a matrix for storage efficiency.
#' @export
simulation.study <- function(type="lm",nsims=1000,
nsamples=c(20,50,100,200,500,1000,2000,5000,10000),
alpha=0,beta.x=1,nX=10,nZ=5,meanX=0,meanZ=0,
XZCov=diag(nX+nZ),varmeanX=0,varmeanZ=0,simulate.from.data=FALSE,
X=NULL,Y=NULL,var.res=1,var.RE.Intercept=0,var.RE.X=0,
rho=NULL,epsilon=NULL,corsim.var=NULL,noise.epsilon=NULL,
step.k=qchisq(0.05,1,lower.tail=FALSE),keep.dredge=FALSE,
Xin.or.out=rep(TRUE,nX),glm.family=NULL,glm.offset=NULL,
binomial.n=1,filename="results"){
if(!(type%in%c("lm","glm"))){
stop("Error: type not recognised; should be either \"lm\" or \"glm\".")
}
if(type=="glm"){
if(is.null(glm.family)){
stop("Error: please supply \"glm.family\" to specify error distribution for GLM.")
}
if(!(substr(glm.family,1,3)%in%c("poi","bin"))){
stop("Error: GLM errors currently restricted to \"binomial\" and \"poisson\"")
}
}
if( type=="glm" && substr(glm.family,1,3)=="bin" ){
rmse.calc <- function(model,newdata,y){
sqrt(mean((predict(model,newdata=newdata,type="response")-y)^2))
}
if(length(binomial.n)>1.5){
if(length(nsamples)>1.5){
stop("Error: can only run simulations for a single value of nsamples if a vector binomial.n is supplied.")
}
if(length(binomial.n)!=nsamples){
stop("Error: if a vector binomial.n is supplied, it must have length nsamples")
}
}
}else{
rmse.calc <- function(model,newdata,y){
sqrt(mean((predict(model,newdata=newdata,type="response")-y)^2))
}
}
if(!is.null(glm.offset)){
if(length(glm.offset)!=n){
glm.offset <- rep(glm.offset[1],n)
}
}
if(simulate.from.data){
if(is.null(X)){
stop("Error: X required if simulating from data.\n")
}
X <- as.matrix(X)
dimnames(X) <- NULL
nX <- ncol(X)
meanX <- colMeans(X)
XZCov <- cov(X)
corsim.var <- diag(XZCov)
nZ <- 0
if(!is.null(Y)){
if(type=="lm"){
reg.temp <- lm(Y~X)
}else{
if( is.null(glm.offset) ){
reg.temp <- glm(Y~X,family=glm.family)
}else{
reg.temp <- glm(Y~X,family=glm.family,offset=glm.offset)
}
}
alpha <- coef(reg.temp)[1]
beta.x <- coef(reg.temp)[-1]
}
keep.dredge <- TRUE
}
if(keep.dredge){
dredge.out <- list()
}
XZCor <- cov2cor(XZCov)
#print(XZCor)
if(!is.null(rho)){
if(is.null(epsilon)){
epsilon <- 0
}else{
if(abs(epsilon+rho)>1){
stop("Error: abs(epsilon+rho)>1\n")
}
}
if(is.null(corsim.var)){
corsim.var <- 1
}
if(length(corsim.var)==1){
corsim.var <- rep(corsim.var,nX+nZ)
}
XZCor <- simcor(k=1,size=nX+nZ,rho=rho,epsilon=epsilon)
XZCov <- sqrt(corsim.var)*XZCor*rep(sqrt(corsim.var),each=nX+nZ)
}
if(!is.null(noise.epsilon)){
if(is.null(corsim.var)){
corsim.var <- 1
}
if(length(corsim.var)==1){
corsim.var <- rep(corsim.var,nX+nZ)
}
maxeps <- 0.9999-max(abs(XZCor*(upper.tri(XZCor)+lower.tri(XZCor))))
noise.epsilon <- min(noise.epsilon,maxeps)
}
n.sample.sizes <- length(nsamples)
criteria <- c("AICc","AIC","BIC","stepwise")
ncriteria <- length(criteria)
sd.res <- sqrt(var.res)
sdmeanX <- sqrt(varmeanX)
sdmeanZ <- sqrt(varmeanZ)
beta.z <- rep(0,nZ)
Sigma.RE.X <- var.RE.X*diag(nX)
sd.RE.Intercept <- sqrt(var.RE.Intercept) # should perhaps be correlated with RE.X?
if(length(beta.x)==1){
beta.x <- rep(beta.x,nX)
}
results <- list()
for(i in 1:n.sample.sizes){
print(date())
if(keep.dredge){
dredge.out[[i]] <- list()
}
n <- nsamples[i]
all.x.names <- paste("x.",1:nX,sep="")
z.names <- paste("z.",1:nZ,sep="")
x.names <- all.x.names[Xin.or.out]
reg.terms <- vector("list",nsims)
reg.bias <- array(NA,dim=c(nsims,ncriteria))
colnames(reg.bias) <- criteria
reg.rmse <- array(NA,dim=c(nsims,ncriteria))
colnames(reg.rmse) <- criteria
if(nZ > 0.5){
reg.eqn <- as.formula(paste("y",paste(paste("",x.names,sep="",collapse="+"),
paste("",z.names,sep="",collapse="+"),sep="+",collapse="+"),sep="~"))
}else{
reg.eqn <- as.formula(paste("y",paste(paste("",x.names,sep="",collapse="+"),
sep="+",collapse="+"),sep="~"))
}
options(na.action = "na.fail") # Necessary
reg.preds <- array(NA,dim=c(n,ncriteria))
for(j in 1:nsims){
cat(paste(i,":",j,":"))
# Generate "true" response
x.sim <- rnorm(nX,meanX,sdmeanX)
z.sim <- rnorm(nZ,meanZ,sdmeanZ)
if(!is.null(noise.epsilon)){
XZCor.temp <- noisecor(XZCor,epsilon=noise.epsilon)
XZCov <- sqrt(corsim.var)*XZCor.temp*rep(sqrt(corsim.var),each=nX+nZ)
}
x.and.z <- mvrnorm(n=n,mu=c(x.sim,z.sim),Sigma=XZCov)
x <- x.and.z[,1:nX]
if(nZ > 0.5){
z <- x.and.z[,(1+nX):(nZ+nX)]
}
alpha.sim <- rnorm(1,alpha,sd.RE.Intercept)
beta.sim <- mvrnorm(n=1,mu=beta.x,Sigma=Sigma.RE.X)
if(type=="lm"){
y <- rnorm(n,0,sd.res)
y <- as.numeric(y+alpha.sim+x%*%beta.sim)
}else{
if(substr(glm.family,1,3)=="bin"){
y1.prob <- as.numeric(alpha.sim+x%*%beta.sim)
y1.prob <- exp(y1.prob)/(1+exp(y1.prob))
y1 <- rbinom(n,binomial.n,y1.prob)
y2 <- binomial.n-y1
y <- cbind(y1,y2)
}else{ # hence Poisson
y.mean <- exp(as.numeric(alpha.sim+x%*%beta.sim))
if(!is.null(glm.offset)){
y.mean <- exp(glm.offset)*y.mean
}
y <- rpois(n,y.mean)
}
}
if(nZ > 0.5){
reg.data <- data.frame(y=y,x=x,z=z)
}else{
reg.data <- data.frame(y=y,x=x)
}
if(type=="lm"){
reg.model <- lm(reg.eqn,data=reg.data)
}else{
if(is.null(glm.offset)){
reg.model <- glm(reg.eqn,family=glm.family,data=reg.data)
}else{
reg.model <- glm(reg.eqn,family=glm.family,offset=glm.offset,data=reg.data)
}
}
reg.dredge <- dredge(reg.model,extra=c("AIC","BIC"))
capture.output(reg.stepwise <- step(reg.model,k=step.k))
# Get second, test set
x.sim <- rnorm(nX,meanX,sdmeanX)
z.sim <- rnorm(nZ,meanZ,sdmeanZ)
if(!is.null(noise.epsilon)){
XZCor.temp <- noisecor(XZCor,epsilon=noise.epsilon)
XZCov <- sqrt(corsim.var)*XZCor.temp*rep(sqrt(corsim.var),each=nX+nZ)
}
x.and.z <- mvrnorm(n=n,mu=c(x.sim,z.sim),Sigma=XZCov)
x.new <- x.and.z[,1:nX]
if(nZ > 0.5){
z.new <- x.and.z[,(1+nX):(nZ+nX)]
}
alpha.sim <- rnorm(1,alpha,sd.RE.Intercept)
beta.sim <- mvrnorm(n=1,mu=beta.x,Sigma=Sigma.RE.X)
if(type=="lm"){
y.new <- rnorm(n,0,sd.res)
y.new <- as.numeric(y.new+alpha.sim+x.new%*%beta.sim)
}else{
if(substr(glm.family,1,3)=="bin"){
y1.prob <- as.numeric(alpha.sim+x.new%*%beta.sim)
y1.prob <- exp(y1.prob)/(1+exp(y1.prob))
y1 <- rbinom(n,binomial.n,y1.prob)
y2 <- binomial.n-y1
y.new <- cbind(y1,y2)
}else{ # hence Poisson
y.mean <- exp(as.numeric(alpha.sim+x.new%*%beta.sim))
if(!is.null(glm.offset)){
y.mean <- exp(glm.offset)*y.mean
}
y.new <- rpois(n,y.mean)
}
}
if(nZ > 0.5){
newdata <- data.frame(x=x.new,z=z.new)
}else{
newdata <- data.frame(x=x.new)
}
if( type=="glm" && substr(glm.family,1,3)=="poi" ){
newdata$glm.offset <- glm.offset
}
if( type=="glm" && substr(glm.family,1,3)=="bin" ){
y.calc <- y.new[,1]/rowSums(y.new)
}else{
y.calc <- y.new
}
reg.best <- get.models(reg.dredge,subset=which.min(AICc))[[1]]
reg.preds[,1] <- predict(reg.best,newdata=newdata,type="response")
reg.best <- get.models(reg.dredge,subset=which.min(AIC))[[1]]
reg.preds[,2] <- predict(reg.best,newdata=newdata,type="response")
reg.best <- get.models(reg.dredge,subset=which.min(BIC))[[1]]
reg.preds[,3] <- predict(reg.best,newdata=newdata,type="response")
reg.preds[,4] <- predict(reg.stepwise,newdata=newdata,type="response")
reg.bias[j,] <- colMeans(reg.preds-y.calc)
reg.rmse[j,] <- sqrt(colMeans((reg.preds-y.calc)^2))
if(keep.dredge){
capture.output(dredge.out[[i]][[j]] <- as.matrix(model.sel(reg.dredge,rank=rmse.calc,rank.args=list(newdata=newdata,y=y.calc),extra=alist(AICc,AIC,BIC))))
}
}
options(na.action = "na.omit")
print(date())
results[[i]] <- list(reg.bias,reg.rmse)
names(results[[i]]) <- c("reg.bias","reg.rmse")
save(results[[i]],file=paste(filename,i,".RData",sep=""))
}
save(results,file=paste(filename,".RData",sep=""))
if(keep.dredge){
return(dredge.out)
}else{
return(results)
}
}
|
984a1c103b5b901a531755e5d0da33777924fff9 | 0892ab6c7e4e157afb923afce615a32d718f0f61 | /QuantST/R/ST-F-ADX.R | bf551f0891fc308028cb90d2a742e723bdeb028f | [
"Apache-2.0"
] | permissive | NFS002/QuantST | 8e8b7bcd061bbf21abaae9bfa97a93b3d83bbbb9 | 129b4eaf6d257219cb9969f4eee7a4e89aed8316 | refs/heads/master | 2022-01-22T17:36:28.582250 | 2022-01-02T19:17:43 | 2022-01-02T19:17:43 | 104,096,127 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,025 | r | ST-F-ADX.R | #' Title "ST.F.ADX"
#' function to calculate the ADX(DIp DIn, ADX) of a price series.
#' and bind the result to the orginal data frame
#' @param dataframe time series on which the adx is calculated
#' @param n time period (days)
#'
#' @return dataframe
#' @export
#'
#' @examples
ST.F.ADX <- function(dataframe, n = 14) {
if ("High" %in% colnames(dataframe) && "Low" %in% colnames(dataframe) && "Close" %in% colnames(dataframe)) {
ADX <- ADX(dataframe[,c("High","Low","Close")],n)
}
else if ("High" %in% colnames(dataframe) && "Low" %in% colnames(dataframe) && "Last" %in% colnames(dataframe)) {
ADX <- ADX(dataframe[,c("High","Low","Last")],n)
}
else if (ncol(dataframe) > 2) {
ADX <- ADX(dataframe,n)
}
else {
return(dataframe)
}
dataframe <- cbind(dataframe, ADX)
dataframe$"DX" <- NULL
dataframe[is.na(dataframe)] <- 0
dataframe$"DIp" <- round(dataframe$"DIp",3)
dataframe$"DIn" <- round(dataframe$"DIn",3)
dataframe$"ADX" <- round(dataframe$"ADX",3)
return(dataframe)
} |
f544371e578506d9cb6072865a01809a74cb5abd | e178e45be0015f54397b4813205e0f5281169e2b | /horseshoe_jl_post.R | e3956f3736756de7c1990e82edc0ce98ce5519df | [] | no_license | jamesjohndrow/horseshoe_jo | 936cb80e2ed80c72186ae64f1272ddf0d7837b62 | 4f9cc96b4c2bf297bb525b16c810f70d25b8ed95 | refs/heads/master | 2021-01-20T04:25:26.289365 | 2019-10-01T20:31:53 | 2019-10-01T20:31:53 | 89,685,247 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 636 | r | horseshoe_jl_post.R | setwd('~/Documents/GitHub/horseshoe_jo/')
if (!require(pacman)) {install.packages('pacman')}
pacman::p_load(readr,tidyr,ggplot2,dplyr)
df <- read_csv('Outputs/julia_test.csv')
names(df) <- seq(100)
df$iter <- seq(20000)
df <- df %>% gather(variable,value,-iter)
df$variable <- factor(df$variable,levels=seq(100))
ggplot(df[as.numeric(df$variable)<=25 & df$iter>5000,],aes(x=variable,y=value)) + geom_boxplot()
ggplot(df[as.numeric(df$variable)<=25 & df$iter>5000,],aes(x=variable,y=value)) + geom_violin()
ggplot(df[as.numeric(df$variable)<=25 & df$iter>5000,],aes(x=value)) + geom_density() + facet_wrap(~variable,scales='free')
|
492c15e3e7f95f38bb69742f68d211169b4a037f | 819e060e57ea525fe50d94d1edfed3686da4f8a6 | /Moran_model.R | 3e7cd0bf8ead317a4a576dc4acfb5481ebfefa2f | [] | no_license | yanglaboratory/2019_Reply_Matters_Arising_Nature-Communications | cb38b356955f72e00d4bc94157fd03ddee7b866c | 19361e6a558ddef72beba67d8fef85303c864c9f | refs/heads/master | 2020-05-30T23:56:06.614530 | 2019-06-03T15:01:54 | 2019-06-03T15:01:54 | 190,026,520 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 936 | r | Moran_model.R | rm(list = ls())
seed=15486
set.seed(seed)
####Fig. 1 Moran effect (CCM)
nsim=10000
eT=R1=R2=N1=N2=rep(0,nsim)
#r1=3.4;r2=2.9
#s1=0.4;s2=0.35
r1=3.4;r2=3.4
s1=0.4;s2=0.4
ph1=0.5;ph2=0.6
D1=3;D2=3
R1[1]=1;R2[1]=1;N1[1]=0.5;N1[1]=0.5
#set.seed(2301)
set.seed(15486)
#set.seed(3486)
for(t in 1:nsim){
eT[t]=rnorm(1)
R1[t+1] = N1[t]*(r1*(1-N1[t]))*exp(-ph1*eT[t])
N1[t+1] = s1*N1[t] + max(R1[t-D1], 0)
R2[t+1] = N2[t]*(r2*(1-N2[t]))*exp(-ph2*eT[t])
N2[t+1] = s2*N2[t] + max(R2[t-D2], 0)
}
n=10000
dam=data.frame(Time=1:n,cbind(R1,R2,N1,N2)[(nsim-n+1):nsim,])
cor(dam[,'N1'],dam[,'N2'])
write.table(dam[,'N1'],"N1.txt",sep='\t',row.names=F,col.names=F)
write.table(dam[,'N2'],"N2.txt",sep='\t',row.names=F,col.names=F)
write.table(dam[,'R1'],"R1.txt",sep='\t',row.names=F,col.names=F)
write.table(dam[,'R2'],"R2.txt",sep='\t',row.names=F,col.names=F)
write.table(dam[,'Time'],"time_moran.txt",sep='\t',row.names=F,col.names=F)
|
79180e0c1a1c8895e7e8a43f819800086578b44c | 3e1d11e08794d616d881ac044a9d57a59cce0f43 | /Compute the Perplexity.R | 50cdcf30409f52093038aff1985663530d89d06d | [] | no_license | PaulineNimo/HackerRank | ce512a5b29012e4ad30996b5c75a3bedfe547d1b | 8f40f5022926234113c8402948ea44a364ccf6c5 | refs/heads/master | 2023-01-24T09:06:25.224783 | 2020-11-26T09:40:33 | 2020-11-26T09:40:33 | 282,261,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | Compute the Perplexity.R | # Enter your code here. Read input from STDIN. Print output to STDOUT
c <- 2^9.91
c <- round(c)
write(as.numeric(c), stdout())
|
aa83ca321c9ef6ebc380423fd4b047710cf2d8d4 | 0e76443b6de1312c8d3988d2538263db0cd7385b | /分析及画图/堆叠柱状图各成分连线.R | aefcb833096b4a7610f43ea02ab362b6e4dd7b6e | [] | no_license | mrzhangqjankun/R-code-for-myself | 0c34c9ed90016c18f149948f84503643f0f893b7 | 56f387b2e3b56f8ee4e8d83fcb1afda3d79088de | refs/heads/master | 2022-12-30T08:56:58.880007 | 2020-10-23T03:20:17 | 2020-10-23T03:20:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,391 | r | 堆叠柱状图各成分连线.R | ##堆叠柱状图各成分连线
##2018-02-06 朱微金 李陈浩 宏基因组
##https://mp.weixin.qq.com/s?__biz=MzUzMjA4Njc1MA==&mid=2247484986&idx=1&sn=5885daab9fd0fd74871e9d22f9043e99&chksm=fab9ec8bcdce659d618179a28b2f4bb45488e327c0fe5cd10e80ed6155855bc0f09fd34f586a&scene=0#rd
# 安装和加载tidyverse包
install.packages("tidyverse")
library(tidyverse)
?tidyverse
#devtools::install_github("hadley/tidyverse")
#library(tidyverse)
# 生成测试数据
df=data.frame(
Phylum=c("Ruminococcaceae","Bacteroidaceae","Eubacteriaceae","Lachnospiraceae","Porphyromonadaceae"),
GroupA=c(37.7397,31.34317,222.08827,5.08956,3.7393),
GroupB=c(113.2191,94.02951,66.26481,15.26868,11.2179)
)
df
# 计算连线起始点Y轴坐标,即累计丰度的值
#http://blog.csdn.net/flyfrommath/article/details/79013582
#R中的管道操作。
#df %>% arrange(by=desc(Phylum)) 等价于arrange(df,by=desc(Phylum))。即%>%左边的值作为右边表达式函数的第一个参数。
#arrange是plyr包的一个函数。arrange(df,var1,var2,...)按照列给数据框排序。df为数据框,var是变量.desc(Phylum)降序排列。
?arrange
#mutate是dplyr包的一个函数,可以增加新的列。这里是更新原来的列。cumsum累计求和
?mutate
#把下面三行分解开即为:
# a = df %>% arrange(by=desc(Phylum));a
# b = a %>% mutate(GroupA=cumsum(GroupA), GroupB=cumsum(GroupB)) ;b
link_dat <- df %>%
arrange(by=desc(Phylum)) %>%
mutate(GroupA=cumsum(GroupA), GroupB=cumsum(GroupB))
# 数据格式转换,宽表格转换为ggplot2使用的长表格
df.long <- df %>% gather(group, abundance, -Phylum)
df.long
?gather
## 或者使用reshape2的melt函数
## df.long <- reshape2::melt(df, value.name='abundance', variable.name='group')
# 绘图,堆叠柱状图+组间连线
#geom_segment添加直线
p <- ggplot(df.long, aes(x=group, y=abundance, fill=Phylum));p
p <- p+ geom_bar(stat = "identity", width=0.5, col='black');p
p <- p+ geom_segment(data=link_dat, aes(x=1.25, xend=1.75, y=GroupA, yend=GroupB));p
###############################################三组
# 画三个组间比较
library(reshape2)
# 读生一个测试数据宽表格
df=data.frame(
Phylum=c("Ruminococcaceae","Bacteroidaceae","Eubacteriaceae","Lachnospiraceae","Porphyromonadaceae"),
GroupA=c(37.7397,31.34317,222.08827,5.08956,3.7393),
GroupB=c(113.2191,94.02951,66.26481,15.26868,11.2179),
GroupC=c(123.2191,94.02951,46.26481,35.26868,1.2179)
)
# melt转换为长表格为ggplot2绘图通用格式
# geom_segment添加直线和曲线,arrange按门水平名称字母降序排列;cumsum先将数值累计,再用mutate取代;现在己有两组间的高度位置,再设置X轴位置1.25, 1.75, 和Y位置
ggplot(melt(df), aes(x=variable, y=value, fill=Phylum)) +
geom_bar(stat = "identity", width=0.5, col='black') + theme_classic()+
geom_segment(data=df %>% arrange(by=desc(Phylum)) %>% mutate(GroupA=cumsum(GroupA)) %>% mutate(GroupB=cumsum(GroupB)), aes(x=1.25, xend=1.75, y=GroupA, yend=GroupB))+
geom_segment(data=df %>% arrange(by=desc(Phylum)) %>% mutate(GroupB=cumsum(GroupB)) %>% mutate(GroupC=cumsum(GroupC)), aes(x=2.25, xend=2.75, y=GroupB, yend=GroupC))
# 添加theme_classic()修改主题样式,这个经典主题我更喜欢
# x和xend分别为起始和终止,1,2组间X值起始分别为1.25和1.75,2,3组间则为2.25和2.75
################################################三组及以上
# 三组或更多组的画法,只需添加数据即可
library(tidyverse)
df <- data.frame(
Phylum=c("Ruminococcaceae","Bacteroidaceae","Eubacteriaceae","Lachnospiraceae","Porphyromonadaceae"),
GroupA=c(37.7397,31.34317,222.08827,5.08956,3.7393),
GroupB=c(113.2191,94.02951,66.26481,15.26868,11.2179),
GroupC=c(123.2191,94.02951,46.26481,35.26868,1.2179),
GroupD=c(37.7397,31.34317,222.08827,5.08956,3.7393)
)
df
df.long <- df %>% gather(group, abundance, -Phylum)
df.long
## 组间连线数据:
## 假设第一列是Phylum
link_dat <- df %>%
arrange(by=desc(Phylum)) %>%
mutate_if(is.numeric, cumsum) ;link_dat
bar.width <- 0.7
link_dat <- link_dat[, c(1,2,rep(3:(ncol(link_dat)-1),each=2), ncol(link_dat))];link_dat
link_dat <- data.frame(y=t(matrix(t(link_dat[,-1]), nrow=2)));link_dat
link_dat$x.1 <- 1:(ncol(df)-2)+bar.width/2;link_dat$x.1
link_dat$x.2 <- 1:(ncol(df)-2)+(1-bar.width/2);link_dat$x.2
ggplot(df.long, aes(x=group, y=abundance, fill=Phylum)) +
geom_bar(stat = "identity", width=bar.width, col='black') +
geom_segment(data=link_dat,
aes(x=x.1, xend=x.2, y=y.1, yend=y.2), inherit.aes = F)
####此图比较适合展示时间序列、梯度变化有规律的连续组。
####因为只能连接相临的组,需要大家想好谁与谁比较很重要。对于需要全部两两比较是无法实现的。
##2018.12.17
#分类堆叠柱状图顺序排列及其添加合适条块标签
https://mp.weixin.qq.com/s?__biz=MzUzMjYyMDE2OQ==&mid=2247484273&idx=1&sn=be46f67bba36dbb18aed658866404445&chksm=fab13597cdc6bc81654843d6d6c0a2760f26b17fca5cbacb796ea2f9c4d3d8b63aeba2ed1d89&mpshare=1&scene=1&srcid=1217NljIB6S4iIvHptT1QuaK&pass_ticket=gm1wJVkQVFHm73S6JXr2zI0WqrIap0TM1KzKUvgEk0xrYUJ2jqyvev8kEieOtB%2Bh#rd
|
7d6759d69f0c6f42fa064ae4a7995c47b5aa9f60 | 4c457b7fa98f798e2814a69d481ccc50df2b92a5 | /calsim-text-to-dss/cs_text_to_dss_lib.R | 254629d9e74a8a765f95b29cb613e76937eaf2de | [] | no_license | usbr/visualization | 4342994647826d478f55a3ce0a8b6982ae580430 | 64b2d60ceee522f5a7c65092ad5e03e047900b9f | refs/heads/master | 2021-01-01T18:03:16.348871 | 2015-05-27T17:30:23 | 2015-05-27T17:30:23 | 31,608,812 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 7,607 | r | cs_text_to_dss_lib.R | calsim_cplex_text_to_dt <- function(fn, date_, cycle_){
require(data.table)
require(stringr)
lines = readLines(fn)
start = grep('Maximize',lines) + 1
end = grep('constraint',lines) - 2
keep_lines = lines[start:end]
clean = gsub('\\+','',keep_lines)
dt = data.table(read.table(textConnection(clean)))
setnames(dt,c('value','variable'))
dt[,variable:=str_trim(variable)]
dt[,date:=date_]
dt[,cycle:=cycle_]
dt
}
calsim_text_to_dt <- function(fn, date_, cycle_){
require(data.table)
require(stringr)
dt = data.table(read.table(fn,sep=':',comment.char = '/',stringsAsFactors=FALSE))
setnames(dt,c('variable','value'))
dt[,variable:=str_trim(variable)]
dt[,value:= as.numeric(gsub(",","", value))]
dt[,date:=date_]
dt[,cycle:=cycle_]
dt
}
file_name_parts_to_dt <- function(files, names=NULL, name_in_col=TRUE){
require(tools)
require(data.table)
parts_list = strsplit(file_path_sans_ext(basename(files)),'_')
dt = rbindlist(lapply(parts_list,function(x)data.frame(t(as.matrix(x)), stringsAsFactors=FALSE)))
if(!is.null(names)) setnames(dt,names)
if(name_in_col) dt[,filename:=files]
dt
}
# Determine if range of vector is FP 0.
zero_range <- function(x, tol = .Machine$double.eps ^ 0.5) {
if (length(x) == 1) return(TRUE)
x <- range(x) / mean(x)
isTRUE(all.equal(x[1], x[2], tolerance = tol))
}
concatSinglePath <- function(row){
paste('',paste(row[1:6],collapse='/'),'',sep='/')
}
concatPathParts <- function(parts){
apply(parts,1,concatSinglePath)
}
get_wildcard_ts <- function(path,dss){
require(dssrip)
getFullTSC(dss, fullPathByWildcard(paths))
}
posixct_to_dss_time <- function(x){
as.integer(x)/60 + 2209075200/60
}
dss_time_to_posixct <- function(x){
as.POSIXct(x*60, origin="1899-12-31 00:00", tz="UTC")
}
monthly_data_to_tsc <- function(values, times, dssMetadata=NULL, ...){
library(lubridate)
# this next bit is convouted, but the goal is to make a list of
# dates that spans from the first date in the series to the last
# date in the series with no missing values in between. The reason
# things are so complicated is that months are not a fixed length
# The other reason is that dss reports the end time for a monthly
# time step as the first day of the following month...
first = year(times) == year(min(times))
last = year(times) == year(max(times))
first_year = rep(year(min(times)), abs(month(min(times)) - 12)+1)
middle_years = rep(unique(year(times[!first & !last])), each=12)
last_year = rep(year(max(times)), month(max(times)))
years = c(first_year, middle_years, last_year)
mons = (month(min(times))+0:(length(years)-1)) %% 12
mons = ifelse(mons==0,12,mons)
fullTimes = round_date(ymd(sprintf('%04d-%02d-01',years,mons)),'day')
#blankTimes = fullTimes[!(fullTimes %in% times)]
#empties = xts(rep(J("hec/script/Constants")$UNDEFINED, length(blankTimes)), order.by=blankTimes)
#colnames(empties) = colnames(tsObject)
#tsObject = rbind(tsObject, empties)
df = merge(data.frame(times=fullTimes),data.frame(times=times,values=values),all.x=TRUE)
## Configure slots for TimeSeriesContainer object
times_java = posixct_to_dss_time(fullTimes)
values = as.numeric(df$values)
#browser()
metadata = list(
times = .jarray(as.integer(times_java), contents.class="java/lang/Integer"), #, as.integer(times)), new.class="java/lang/Integer")
values = .jarray(values, contents.class="java/lang/Double"),
endTime = max(times_java),
startTime = min(times_java),
numberValues = length(values),
storedAsdoubles = TRUE,
modified=FALSE,
fileName="",
...
)
if(!is.null(dssMetadata)){
for(mdName in colnames(dssMetadata)){
if(mdName %in% names(metadata)){
next
}
metadata[[mdName]] = first(dssMetadata[[mdName]])
}
}
#browser()
ePart = "1MON"
dPart = paste0("01JAN", lubridate::year(times[1]))
metadata$fullName = paste("", metadata$watershed, metadata$location, metadata$parameter, dPart, ePart, metadata$version, "", sep="/")
tsc = .jnew("hec/io/TimeSeriesContainer")
tscFieldsDF = get("tscFieldsDF", envir=dssrip:::hecJavaObjectsDB)
for(n in names(metadata)){
#print(sprintf("%s:", n))
#print(metadata[[n]])
writeVal = metadata[[n]]
if(typeof(writeVal)!='S4'){
if(is.na(writeVal) | writeVal == ""){
#print("Value is NA, not writing.")
next
}
}
if(is.factor(writeVal)){
writeVal = as.character(writeVal)
}
if(tscFieldsDF$CLASS[tscFieldsDF$SHORTNAME == n] %in% c("int")){
#print("Converting to integer.")
writeVal = as.integer(writeVal)
}
.jfield(tsc, n) = writeVal
}
return(tsc)
}
get_pa_variable_dt <- function(variable, dss, pa_year, start_year, end_year, start_month, run_length=1, year_labels=TRUE){
require(dssrip)
require(dplyr)
require(data.table)
end_month = ifelse(start_month == 1, 12, start_month-1)
start_date = ymd(sprintf('%d-%02d-01',start_year,start_month))
# does the model run end in the following calendar year or not
year_modifier = ifelse(start_month == 1, run_length-1, run_length)
init_month = end_month
# Range of dates for the first model run period
first_date_range = seq(start_date, start_date + months(12*run_length) - months(1), by='months')
# the starting years of each model run
run_start_year = seq(start_year, end_year, by=run_length)
# open the DSS file and fiddle with the paths to allow bulk extraction of a
# single variable. Internally, DSS stores monthly data in 10 year chunks (why?!?!)
paths = getAllPaths(dss)
parts = separatePathParts(paths)
parts$D = '*'
wildcard_paths = unique(concatPathParts(parts))
wildcard_path = grep(sprintf('/%s/',variable),wildcard_paths,value=TRUE)
path_group = fullPathByWildcard(paths, wildcard_path)
# get a single variable as a data.table
dt = getFullDT(dss,path_group)
# dss reports the end of month timestamp as the last day of the month timestamp with
# hour 24:00, which is not actually a real time (damn you dss), and so gets converted to
# the first day of the next month with an hour of 00:00 by R, so move back a month so that
# now all the timestamps are at them beginning of each month
dt[,datetime:=datetime-months(1)]
dt$group=NA_real_
# create a new data colum equal to the year the run was started,
# this is used to group the data, hence the name
for(i in 1:length(run_start_year))
dt[datetime %in% (first_date_range+years(i-1)),group:=run_start_year[i]]
dt = na.omit(dt)
# the init value for all the runs is on the start date because of the shifted month,
# it's convoluted I know
init_value = dt[datetime == start_date]$value
# year_mod is the number of years each run should be shifted so that they can all be
# plotted on the same time window
year_mod = summarise(group_by(dt,group),modifier=min(pa_year-year(datetime)))
dt = merge(dt,year_mod,by='group')
# create the new dates with the year shift, used for plotting
dt[,pa_date := datetime+years(modifier)]
# add in the data for the init point
init_dt = data.table(datetime=ymd(sprintf('%d-%02d-01',start_year:end_year-year_modifier,init_month)),
pa_date=ymd(sprintf('%d-%02d-01',pa_year-year_modifier,init_month)),
value=init_value, units=dt$units[1], group=start_year:end_year, modifier=0)
dt = rbind(dt,init_dt,use.names=TRUE)
return(dt)
} |
1439699764551bad7b1edb0a5e64c35a436657ec | d20145e080798319e0028a4c9d8851f619285c29 | /ch09/R/phd-halfnorm.R | d0beb17ba2c21c4d3f9a1ab14f5be41259846593 | [] | no_license | friendly/VCDR | da441b8306969fd6469a3bbc6a4475e028c61e29 | 818156ee0a26943a656ae915d924773c87654cec | refs/heads/master | 2023-02-18T08:40:28.634897 | 2014-12-05T13:24:55 | 2014-12-05T13:24:55 | 16,708,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,618 | r | phd-halfnorm.R | # halfnormal plot example
data("PhdPubs", package="vcdExtra")
library(MASS)
phd.nbin <- glm.nb(articles ~ ., data=PhdPubs)
library(car)
qqPlot(rstudent(phd.nbin), xlab="Normal quantiles", ylab="Studentized residuals", id.n=3)
# examine distribution of residuals
res <- rstudent(phd.nbin)
plot(density(res), lwd=2, col="blue", main="Density of studentized residuals")
rug(res)
# why the bimodality?
plot(jitter(log(PhdPubs$articles+1), factor=1.5), res,
xlab="log (articles+1)", ylab="Studentized residual")
observed <- sort(abs(rstudent(phd.nbin)))
n <- length(observed)
expected <- qnorm((1:n + n - 1/8)/(2*n + 1/2))
S <- 100
sims <- simulate(phd.nbin, nsim=S)
simdat <- cbind(PhdPubs, sims)
# calculate residuals for one simulated data set
resids <- function(y)
rstudent(glm.nb(y ~ female + married + kid5 + phdprestige + mentor, data=simdat, start=coef(phd.nbin)))
simres <- matrix(0, nrow(simdat), S)
for(i in 1:S) {
simres[,i] <- sort(abs(resids(dat[,paste("sim", i, sep="_")])))
}
envelope <- 0.95
mean <- apply(simres, 1, mean)
lower <- apply(simres, 1, quantile, prob=(1 - envelope)/2)
upper <- apply(simres, 1, quantile, prob=(1 + envelope)/2)
op <- par(mar=c(4,4,1,1)+.1)
plot(expected, observed,
xlab='Expected value of half-normal order statistic',
ylab='Absolute value of studentized residual')
lines(expected, mean, lty=1, lwd=2, col="blue")
lines(expected, lower, lty=2, lwd=2, col="red")
lines(expected, upper, lty=2, lwd=2, col="red")
identify(expected, observed, labels=names(observed), n=3)
par(op)
cd("C:/Dropbox/Documents/VCDR/ch09/fig")
dev.copy2pdf(file="phd-halfnorm.pdf")
|
d74a7cd6ccb3a9258a16ca7b467fe885a8e638bd | b7c0f8e75f4261c462d4d5980c0901543040c382 | /server.R | 4fdba28bd3a38a70850a4de2d5b327302d34a6e7 | [] | no_license | icetornado/ds_capstone_shiny | a66950d0aeebabdb51888babbbb9bbf937606409 | 06f10cc7a88807c1abeb36962c36c1644d599904 | refs/heads/master | 2016-09-13T11:11:34.049552 | 2016-05-31T19:18:00 | 2016-05-31T19:18:00 | 59,299,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,776 | r | server.R | library(shiny)
library(tm)
library(stringr)
library(doParallel)
registerDoParallel()
source(file.path("models2.R"))
wordCount1 <- readRDS(file.path("data3", "wordCount1.Rds"))
wordCount2 <- readRDS(file.path("data3", "wordCount2.Rds"))
wordCount3 <- readRDS(file.path("data3", "wordCount3.Rds"))
wordCount1NoStem <- readRDS(file.path("data3", "wordCount1NoStem.Rds"))
shinyServer(function(input, output, clientData, session) {
globalWords <- list()
getPredictedWord <- function(txt){
myArr <- FilterInput(txt)
print("server getPredictedWord func")
predictArr <- rep("...", 3)
spellingArr <- rep("", 3)
# get the spelling suggestions
if(length(myArr) > 0) {
suggestedWords <- PredictWord(myArr[length(myArr)], wordCount1NoStem)
if(length(suggestedWords) > 0) {
for(i in 1:length(suggestedWords)) {
spellingArr[i] = FilterOutput(suggestedWords[i], contractionsDF)
}
}
}
## get the prediction
if(length(myArr) > 1) {
predicted <- PredictKN3(myArr[1], myArr[2], wordCount1, wordCount2, wordCount3, limit = 3, cutoff = )
if(length(predicted) > 0) {
for(i in 1:length(predicted)) {
predictArr[i] = FilterOutput(predicted[i], contractionsDF)
}
}
}
else if(length(myArr) == 1) {
predicted <- PredictKN2(myArr[1], wordCount1, wordCount2)
if(length(predicted) > 0) {
for(i in 1:length(predicted)) {
predictArr[i] = FilterOutput(predicted[i], contractionsDF)
}
}
}
return(as.data.frame(list(spelling = spellingArr, predicted = predictArr)))
}
predictionReactive <- reactive(getPredictedWord(input$text))
getWord1 <- reactive({
w <- predictionReactive()
return(w$predicted[1])
})
getWord2 <- reactive({
w <- predictionReactive()
return(w$predicted[2])
})
getWord3 <- reactive({
w <- predictionReactive()
return(w$predicted[3])
})
getSpelling1 <- reactive({
w <- predictionReactive()
return(w$spelling[1])
})
getSpelling2 <- reactive({
w <- predictionReactive()
return(w$spelling[2])
})
getSpelling3 <- reactive({
w <- predictionReactive()
return(w$spelling[3])
})
observe({
w1 <- getWord1()
w2 <- getWord2()
w3 <- getWord3()
s1 <- getSpelling1()
s2 <- getSpelling2()
s3 <- getSpelling3()
output$choice1 <- renderUI({
if(w1 == "..." || is.na(w1)) {
actionButton("action1", class="my_predictive_btn", label = w1, disabled= TRUE)
}
else {
actionButton("action1", class="my_predictive_btn", label = w1)
}
})
output$choice2 <- renderUI({
if(w2 == "..." || is.na(w2)) {
actionButton("action2", class="my_predictive_btn", label = w2, disabled= TRUE)
}
else {
actionButton("action2", class="my_predictive_btn", label = w2)
}
})
output$choice3 <- renderUI({
if(w3 == "..." || is.na(w3)) {
actionButton("action3", class="my_predictive_btn", label =w3, disabled= TRUE)
}
else {
actionButton("action3", class="my_predictive_btn", label = w3)
}
})
output$spell1 <- renderUI({
if(s1 == "" || is.na(s1)) {
actionButton("spelling1", class="my_spelling_btn", label = s1, disabled= TRUE)
}
else {
actionButton("spelling1", class="my_spelling_btn", label = s1)
}
})
output$spell2 <- renderUI({
if(s2 == "" || is.na(s2)) {
actionButton("spelling2", class="my_spelling_btn", label = s2, disabled= TRUE)
}
else {
actionButton("spelling2", class="my_spelling_btn", label = s2)
}
})
output$spell3 <- renderUI({
if(s3 == "" || is.na(s3)) {
actionButton("spelling3", class="my_spelling_btn", label = s3, disabled= TRUE)
}
else {
actionButton("spelling3", class="my_spelling_btn", label = s3)
}
})
})
observeEvent(input$action1, {
isolate({
updateTextInput(session, "text", value = paste(str_trim(input$text), getWord1()))
})
})
observeEvent(input$action2, {
isolate({
updateTextInput(session, "text", value = paste(str_trim(input$text), getWord2()))
})
})
observeEvent(input$action3, {
isolate({
updateTextInput(session, "text", value = paste(str_trim(input$text),getWord3()))
})
})
observeEvent(input$spelling1, {
isolate({
txtVal <- gsub("\\s(\\w+)$", "", str_trim(input$text))
txtVal <- paste(txtVal, getSpelling1())
updateTextInput(session, "text", value = txtVal)
})
})
observeEvent(input$spelling2, {
isolate({
txtVal <- gsub("\\s(\\w+)$", "", str_trim(input$text))
txtVal <- paste(txtVal, getSpelling2())
updateTextInput(session, "text", value = txtVal)
})
})
observeEvent(input$spelling3, {
isolate({
txtVal <- gsub("\\s(\\w+)$", "", str_trim(input$text))
txtVal <- paste(txtVal, getSpelling3())
updateTextInput(session, "text", value = txtVal)
})
})
observeEvent(input$back, {
isolate({
updateTextInput(session, "text", value = gsub("\\s(\\w+)$", "", input$text))
})
})
})
|
be4bf10042f6b1cdd735a92febea142204d48450 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/chillR/examples/Date2YEARMODA.Rd.R | 5bda3acb77f09d8694c2f350c6d2398419eb4976 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 234 | r | Date2YEARMODA.Rd.R | library(chillR)
### Name: Date2YEARMODA
### Title: Date to YEARMODA conversion
### Aliases: Date2YEARMODA
### Keywords: utility
### ** Examples
Date2YEARMODA(YEARMODA2Date(20001205))
Date2YEARMODA(YEARMODA2Date(19901003))
|
2f1c2467fdc5550c351a5fc36fe134a516febeb3 | 4eca6e4383e23ce0408db002bc9087b7ba28a089 | /complete.R | 58079748616b28e366c2e7eaf7df96b8afc8833d | [] | no_license | coolbig/datasciencecoursera | 57989910f0880c318e6ae48dcd9656911be6bc81 | ae2ed94974ef7ae181c7caaa331f07078155d4a8 | refs/heads/master | 2021-01-02T23:08:14.739022 | 2014-06-17T08:02:21 | 2014-06-17T08:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 645 | r | complete.R | complete <-function(directory, id = 1:332) {
allfiles<-list.files(path=directory,pattern=".csv", full.names=TRUE)
df <- data.frame()
nobs <- data.frame()
xid <- data.frame()
for(i in id) {
##df <- NULL
df <- read.csv(allfiles[i])
##df <- rbind(df,read.csv(allfiles[i]))
nobs <- rbind(nobs,sum(complete.cases(df)))
colnames(nobs) <- c("nobs")
xid <- rbind(xid, id)
##dat <- data.frame(id, nobs)
}
finedata<-data.frame(cbind(id,nobs))
finedata
}
|
8b3eef4dfd2feccebccaa51a5a5d777866f54878 | 32d6189a56ea7d0b66a9b01bee60c3d61fbe0eed | /myfile.R | d9f31134e182e83a575da59c8b54d6175737cb43 | [] | no_license | EwersAquaGenomics/git-boot | 0e67b32b3b545d1f001def5c885000ba2085b040 | 1bdacff365ad95377fef25feff5e7c41b4a929bd | refs/heads/master | 2023-02-08T19:33:14.404581 | 2015-03-17T19:27:45 | 2015-03-17T19:27:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 53 | r | myfile.R | # this is my first line
# am a fake collaborator...
|
b351428e2a58c389fe2d4aaee2d04e56c1cd6397 | ea7a3cb08c2a242abaacd880e84caf6513886353 | /prepare_calib_sedi.R | 9dc11a84389d9323dc433c3275e93568e6371fe1 | [
"Unlicense"
] | permissive | ecustwy/MMEMO | f3b1b7b3c83951c296548bb38f620badce174c91 | 05cd3bb2ddbe9d471d5dff746dfd5cfd15d1491f | refs/heads/master | 2022-12-03T15:45:56.712242 | 2020-08-20T12:32:39 | 2020-08-20T12:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,752 | r | prepare_calib_sedi.R | #WASA calibration using PSO / DDS
#to be called after calibration for water has finished
# use existing directory "thread1_best" (created by view_progress.R)
# run on cluster or
# when called locally before copying to cluster: delete dds.* and thread.* in run directories
setwd("E:/till/uni/parameterisierung/Esera_2014/runs3")
param_ids=c(
# "A_u_24",
# "A+1_u_24",
# "A+2_u_24",
# "A+3_u_24",
# "A+4_u_24",
# "A+5_u_24",
# "A+6_u_24",
#"A+7_u_24"
#"A_u_1",
#"A+1_u_1",
# "A+2_u_1",
# "A+3_u_1"
# "A+4_u_1",
# "A+5_u_1",
# "A+7_u_1"
# "B_u_24",
# "B-1_u_24",
# "B-2_u_24",
# "B-3_u_24",
# "B-4_u_24",
# "B-5_u_24",
# "B-7_u_24"
# "B_u_1",
# "B-1_u_1",
# "B-2_u_1",
"B-3_u_1"
# "B-4_u_1",
# "B-5_u_1",
# "B-7_u_1"
)
for (param_id in param_ids)
{
setwd(param_id)
#param_id="A_u_24"
if (!file.exists("thread1_best"))
stop(paste0(param_id, ": thread1_best not found. Run view_progress.R to create it."))
#save files of water calibration
if (!file.exists("water_calib_files"))
{
dir.create("water_calib_files")
movefiles=dir(pattern = "\\..*")
file.rename(from = movefiles, paste0("water_calib_files/",movefiles))
} else
unlink(x = dir(pattern="\\.", include.dirs = FALSE), recursive = FALSE) #empty files in directory
#restore files that are still needed for sediment calibration
copyfiles=dir(path = "water_calib_files", pattern = "\\.*.R|bat|pbs|exe|lin|psox")
file.copy(from = paste0("water_calib_files/",copyfiles), "./", copy.date=TRUE)
#delete old thread dirs
old_threaddirs=dir(pattern="thread[0-9]*$")
unlink(old_threaddirs, recursive = TRUE)
#delete old lock files
unlink(dir(pattern="thread[0-9]*\\.lock$"), recursive = TRUE)
#create new initialisation directory
unlink("init_config_sed", recursive=TRUE)
dir.create("init_config_sed")
file.copy(from="thread1_best/.", to="init_config_sed", overwrite=TRUE, recursive=TRUE, copy.date = TRUE)
#update initial conditions
outdir="init_config_sed/output/isabena_2010-2013/"
movefiles=dir(pattern = ".*_start$", path = outdir) #find storage files
file.remove(dir(path="init_config_sed/input/isabena_2010-2013/init_conds/", full.names = TRUE))
file.rename(from=paste0(outdir,movefiles), to=paste0("init_config_sed/input/isabena_2010-2013/init_conds/", sub(movefiles, pattern = "_start$", repl="")))
#remove time series output of template run and pre-start model state files
old_outfiles=dir(path = "init_config_sed", pattern = "\\.out|stat.*_start", recursive = TRUE)
file.remove(paste0("init_config_sed/",old_outfiles))
#remove logfiles of previous runs
old_logfiles=dir(recursive=FALSE, path="init_config_sed/", include.dirs=FALSE, pattern="\\.+")
file.remove(paste0("init_config_sed/",old_logfiles))
#copy general files for sediment calibration
file.copy(from=paste0("../templates_sed/all/."), to=".", overwrite=TRUE, recursive=TRUE, copy.date = TRUE)
file.rename("init_config_sed/input/isabena_2010-2013/x_erosion.ctl","init_config_sed/input/isabena_2010-2013/erosion.ctl")
#enable sediment modelling
source("modify_wasa_input.R")
modify_wasa_input(wasa_input_dir = "init_config_sed/input/isabena_2010-2013/", parameters = data.frame(dosediment=".t."))
#set files modified in water calibration as new standard, i.e. delete old templates
modified_inputfiles=dir(path = "init_config_sed", pattern="calib_bak", recursive = TRUE) #get names of input files modified in water calibration
modified_inputfiles=paste0("init_config_sed/",modified_inputfiles)
file.remove(modified_inputfiles)
#copy configuration-specific files
sed_templates = dir(path="../templates_sed/")
cur_config = sub(pattern = ".*/", x = getwd(), rep="")
to_copy=sapply(X = sed_templates, FUN = grepl, x=cur_config, fixed = TRUE)
for (tdir in sed_templates[to_copy])
file.copy(from=paste0("../templates_sed/",tdir,"/."), to=".", overwrite=TRUE, recursive=TRUE, copy.date = TRUE)
if (grepl(pattern = "B", cur_config) && !grepl(pattern = "B-2", cur_config)) #use subbasin data
file.copy(from=paste0("../templates_sed/A+2/."), to=".", overwrite=TRUE, recursive=TRUE, copy.date = TRUE)
#modify jobscript
tt=scan("job_parallel.pbs", what = character(), sep = "\n")
tt=sub(x = tt, pattern="calibrate_main_dds_mpi", replacement = "calibrate_main_dds_mpi_sed")
tt=sub(x = tt, pattern="(-N .*)", replacement = "\\1_sed")
tt=sub(x = tt, pattern="#PBS -m abe", replacement = "#PBS -m ae")
write(tt, file="job_parallel.pbs")
setwd("../")
}
print("Don't forget to convert linebreaks of job_parallel.pbs manually!")
|
f9f814f8ac78d40a8d1931d0b6b8b64163ed7469 | d6e3f8759f52fee91d1ee7dcd813751593aa906e | /Code/PLSC503-2021-WeekSeven.R | 45135572cb727419cbe547d4c340e4f32675ff41 | [] | no_license | nsflemming/PLSC503-2021-git | cf822efac01886f9294305b84c3085bd2023de1e | 55ad1a8efd901fd040724029e3d1e3f7293b161a | refs/heads/master | 2023-02-26T09:24:51.821351 | 2021-04-28T12:21:11 | 2021-04-28T12:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,924 | r | PLSC503-2021-WeekSeven.R | #######################################################
# PLSC 503 - Spring 2021
#
# Model specification / omitted variable bias,
# plus multiplicative interactions.
#######################################################
# Packages, my dude:
library(car)
library(RCurl)
library(lattice)
# setwd() here:
#
# setwd("~/Dropbox (Personal)/PLSC 503/Notes")
#
# Options:
options(scipen = 99) # bias against scientific notation
options(digits = 4) # show fewer decimal places
#######################################################
# Model specification, etc.
#######################################################
# Simulate data:
set.seed(7222009)
N <- 100
X1<-rnorm(N) # <- X1
X2<-(-X1)+1.5*(rnorm(N)) # <- correlated w/X1
Y<-X1-(2*X2)+(2*(rnorm(N))) # <- Y
Z<- (-2*X1) + rnorm(N) # <- correlated w/X1 but irrelevant
data <- data.frame(Y=Y,X1=X1,X2=X2,Z=Z)
# Scatterplot matrix:
pdf("MisspecificationExampleScatterplotMatrixR.pdf",7,7)
scatterplotMatrix(data)
dev.off()
# "Correct" model:
correct<-lm(Y~X1+X2)
summary(correct)
# "Overspecified" model:
overspec<-lm(Y~X1+X2+Z)
summary(overspec)
# "Underspecified" model:
incorrect<-lm(Y~X1)
summary(incorrect)
# Omitted variable plot:
pdf("MisspecifiedResidualsR.pdf",6,6)
plot(data$X2,incorrect$residuals,pch=20,
xlab="Omitted Variable (X2)",ylab="Residuals")
abline(lm(incorrect$residuals~data$X2),lwd=2)
abline(h=0,lty=2)
abline(v=0,lty=2)
dev.off()
#######################################################
# INTERACTIONS....
#######################################################
# Simulations for pictures....
#
# Two dummy predictors:
set.seed(7222009)
N<-400
D1<-rep(c(0,1),times=N/2)
D2<-rep(c(0,0,1,1),times=N/4)
Y <- rnorm(N,(20-10*D2+10*D1+20*D1*D2),2)
df<-data.frame(D1=D1,D2=D2,Y=Y)
pdf("TwoDummyBoxPlotsRD1.pdf",6,6)
par(mar=c(4,4,2,2))
with(df, boxplot(Y~D2+D1,xaxt="n",xlab="Values of D1,D2"))
axis(1,at=c(1,2,3,4),
labels=c("D1=0, D2=0","D1=0, D2=1",
"D1=1, D2=0","D1=1, D2=1"))
arrows(1,median(df$Y[which(df$D1==0 & df$D2==0)]),
3,median(df$Y[which(df$D1==1 & df$D2==0)]),
lwd=2,length=0.10,col="red")
arrows(2,median(df$Y[which(df$D1==0 & df$D2==1)]),
4,median(df$Y[which(df$D1==1 & df$D2==1)]),
lwd=2,length=0.10,col="red")
legend("topleft",bty="n",legend="E(Y) | change in D1",col="red")
dev.off()
pdf("TwoDummyBoxPlotsRD2.pdf",6,6)
par(mar=c(4,4,2,2))
with(df, boxplot(Y~D2+D1,xaxt="n",xlab="Values of D1,D2"))
axis(1,at=c(1,2,3,4),
labels=c("D1=0, D2=0","D1=0, D2=1",
"D1=1, D2=0","D1=1, D2=1"))
arrows(1,median(df$Y[which(df$D1==0 & df$D2==0)]),
2,median(df$Y[which(df$D1==0 & df$D2==1)]),
lwd=2,length=0.10)
arrows(3,median(df$Y[which(df$D1==1 & df$D2==0)]),
4,median(df$Y[which(df$D1==1 & df$D2==1)]),
lwd=2,length=0.10)
legend("topleft",bty="n",legend="E(Y) | change in D2")
dev.off()
# Dummy + continuous:
set.seed(7222009)
N<-200
D<-rep(c(0,1),times=N/2)
X<-rnorm(N,0,5)
color<-ifelse(D==0,"black","red")
df2<-data.frame(D=D,X=X,color=color,
stringsAsFactors=FALSE)
df2$Y1 <- 50+2*df2$X+3*rnorm(N)
df2$Y2 <- 50+2*df2$X+30*df2$D+3*rnorm(N)
df2$Y3 <- 50+2*df2$X-(4*df2$D*df2$X)+3*rnorm(N)
df2$Y4 <- 50+2*df2$X+30*df2$D-(4*df2$D*df2$X)+3*rnorm(N)
pdf("ScatterInterSameR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df2, plot(X,Y1,pch=D+16,col=color,ylab="Y"))
legend("topleft",bty="n",legend=c("D=0","D=1"),
pch=c(16,17),col=c("black","red"))
abline(with(df2[df2$D==0,],lm(Y1~X)),col="black",lwd=2)
abline(with(df2[df2$D==1,],lm(Y1~X)),col="red",lwd=2)
abline(v=0,lty=2)
dev.off()
pdf("ScatterInterInterceptR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df2, plot(X,Y2,pch=D+16,col=color,ylab="Y"))
legend("topleft",bty="n",legend=c("D=0","D=1"),
pch=c(16,17),col=c("black","red"))
abline(with(df2[df2$D==0,],lm(Y2~X)),col="black",lwd=2)
abline(with(df2[df2$D==1,],lm(Y2~X)),col="red",lwd=2)
abline(v=0,lty=2)
dev.off()
pdf("ScatterInterSlopeR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df2, plot(X,Y3,pch=D+16,col=color,ylab="Y"))
legend("topright",bty="n",legend=c("D=0","D=1"),
pch=c(16,17),col=c("black","red"))
abline(with(df2[df2$D==0,],lm(Y3~X)),col="black",lwd=2)
abline(with(df2[df2$D==1,],lm(Y3~X)),col="red",lwd=2)
abline(v=0,lty=2)
dev.off()
pdf("ScatterInterBothR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df2, plot(X,Y4,pch=D+16,col=color,ylab="Y"))
legend("topright",bty="n",legend=c("D=0","D=1"),
pch=c(16,17),col=c("black","red"))
abline(with(df2[df2$D==0,],lm(Y4~X)),col="black",lwd=2)
abline(with(df2[df2$D==1,],lm(Y4~X)),col="red",lwd=2)
abline(v=0,lty=2)
dev.off()
# Two continuous: Wireframe plots...
df3<-expand.grid(X1=seq(0,10,1),
X2=seq(0,10,1))
df3$YNoInt<-10 + 2*df3$X1 + 2*df3$X2
df3$YInt <-(10 - 2*df3$X1 - 2*df3$X2 + 4*df3$X1*df3$X2)/5
trellis.par.set("axis.line",list(col="transparent"))
pdf("TwoContinuousNoInteractiveR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df3, wireframe(YNoInt~X1+X2,
drape=TRUE,
xlab=list("X1",rot=30),
ylab=list("X2",rot=-40),
zlab=list("Y",rot=90),
scales=list(arrows=FALSE,col="black"),
zoom=0.85,pretty=TRUE,
col.regions=colorRampPalette(c("blue","red"))(100)))
dev.off()
pdf("TwoContinuousInteractiveR.pdf",6,6)
par(mar=c(4,4,2,2))
with(df3, wireframe(YInt~X1+X2,
drape=TRUE,
xlab=list("X1",rot=30),
ylab=list("X2",rot=-40),
zlab=list("Y",rot=90),
scales=list(arrows=FALSE,col="black"),
zoom=0.85,pretty=TRUE,
col.regions=colorRampPalette(c("blue","red"))(100)))
dev.off()
# Polynomials...
N<-200
set.seed(7222009)
df4 <- data.frame(X = runif(N,-5,5))
df4$Y2A <- 10 + 1*df4$X - 5*(df4$X^2) + rnorm(N,0,10) # Quad #1
df4$Y2B <- -50 - 1*df4$X + 3*(df4$X^2) + rnorm(N,0,10) # Quad #2
df4$Y3 <- -8 - 6*df4$X + 3*(df4$X^2) + 1*(df4$X^3) + rnorm(N,0,10) # Cubic
df4 <- df4[order(df4$X),]
fitA<-with(df4, lm(Y2A~X+I(X^2)))
fitB<-with(df4, lm(Y2B~X+I(X^2)))
fit3<-with(df4, lm(Y3~X+I(X^2)+I(X^3)))
pdf("TwoQuadraticsR.pdf",7,6)
par(mar=c(4,4,2,2))
with(df4, plot(X,Y2B,pch=16,col="black",ylab="Y",
ylim=c(min(df4$Y2B),max(df4$Y2A))))
points(df4$X,df4$Y2A,pch=17,col="red")
lines(df4$X,fitted(fitA),lwd=3,col="red")
lines(df4$X,fitted(fitB),lwd=3,col="black")
dev.off()
pdf("CubicR.pdf",7,6)
par(mar=c(4,4,2,2))
with(df4, plot(X,Y3,pch=16,col="black",ylab="Y"))
lines(df4$X,fitted(fit3),lwd=3,col="black")
dev.off()
# Three-way interaction sim:
N <- 100
X <- runif(N,-5,5)
df00<-data.frame(X=X)
df01<-data.frame(X=X)
df10<-data.frame(X=X)
df11<-data.frame(X=X)
set.seed(7222009)
df00$Y<-0.5*df00$X+rnorm(N)
df01$Y<-2*df01$X+rnorm(N)
df10$Y<- -0.5*df10$X+rnorm(N)
df11$Y<- -2*df11$X+rnorm(N)
fit00<-lm(Y~X,data=df00)
fit01<-lm(Y~X,data=df01)
fit10<-lm(Y~X,data=df10)
fit11<-lm(Y~X,data=df11)
hi<-12
lo<- -12
pdf("TwoDummyOneContinuousR.pdf",7,6)
par(mfrow=c(2,2))
par(mar=c(4,4,4,2))
with(df00, plot(X,Y,main="D1=0,D2=0",ylab="Y",
ylim=c(lo,hi),pch=20))
abline(h=0,lty=2)
abline(reg=fit00,lwd=2)
with(df01, plot(X,Y,main="D1=0,D2=1",ylab="Y",
ylim=c(lo,hi),pch=20))
abline(h=0,lty=2)
abline(reg=fit01,lwd=2)
with(df10, plot(X,Y,main="D1=1,D2=0",ylab="Y",
ylim=c(lo,hi),pch=20))
abline(h=0,lty=2)
abline(reg=fit10,lwd=2)
with(df11, plot(X,Y,main="D1=1,D2=1",ylab="Y",
ylim=c(lo,hi),pch=20))
abline(h=0,lty=2)
abline(reg=fit11,lwd=2)
dev.off()
#######################################################
# Actual data:
temp<-getURL("https://raw.githubusercontent.com/PrisonRodeo/PLSC503-2021-git/master/Data/ClintonTherm.csv")
ClintonTherm<-read.csv(text=temp, header=TRUE)
rm(temp)
summary(ClintonTherm)
summary(with(ClintonTherm, lm(ClintonTherm~RConserv+GOP)))
fit1<-with(ClintonTherm, lm(ClintonTherm~RConserv+GOP+RConserv*GOP))
summary(fit1)
# Plot of thermometer scores vs. conservatism:
pdf("ClinThermScatterR.pdf",6,6)
scatterplot(ClintonTherm$ClintonTherm~ClintonTherm$RConserv|as.factor(ClintonTherm$GOP),
legend.plot=FALSE,
xlab="Respondent Conservatism",
ylab="Clinton Thermometer Score",
smooth=FALSE,boxplots=FALSE,
pch=c(4,16),col=c("red","blue","red","blue"),
lwd=2,grid=FALSE)
dev.off()
# Separate regressions:
NonReps<-subset(ClintonTherm,GOP==0)
summary(with(NonReps, lm(ClintonTherm~RConserv)))
Reps<-subset(ClintonTherm,GOP==1)
summary(with(Reps, lm(ClintonTherm~RConserv)))
# psi_1:
Psi1<-fit1$coeff[2]+fit1$coeff[4]
Psi1
SPsi1<-sqrt(vcov(fit1)[2,2] + (1)^2*vcov(fit1)[4,4] + 2*1*vcov(fit1)[2,4])
SPsi1
Psi1 / SPsi1 # <-- t-statistic
# psi_2 | RConserv = 1
fit1$coeff[3]+(1 * fit1$coeff[4])
sqrt(vcov(fit1)[3,3] + (1)^2*vcov(fit1)[4,4] + 2*1*vcov(fit1)[3,4])
# Implies t is approximately 2
# psi_2 | RConserv = 7
fit1$coeff[3]+(7 * fit1$coeff[4])
sqrt(vcov(fit1)[3,3] + (7)^2*vcov(fit1)[4,4] + 2*7*vcov(fit1)[3,4])
# t is approximately 11
# Using linearHypothesis:
linearHypothesis(fit1,"RConserv+RConserv:GOP")
# Note: Same as t-test:
sqrt(72.99)
# psi_2 | RConserv = 7:
linearHypothesis(fit1,"GOP+7*RConserv:GOP")
# MFX / psi plots:
ConsSim<-seq(1,7,1)
psis<-fit1$coeff[3]+(ConsSim * fit1$coeff[4])
psis.ses<-sqrt(vcov(fit1)[3,3] +
(ConsSim)^2*vcov(fit1)[4,4] + 2*ConsSim*vcov(fit1)[3,4])
pdf("ClinMFX1.pdf",7,6)
par(mar=c(4,4,2,2))
plot(ConsSim,psis,t="l",lwd=2,xlab="Respondent Conservatism",
ylab="Estimated Marginal Effect",ylim=c(-40,0))
lines(ConsSim,psis+(1.96*psis.ses),lty=2,lwd=2)
lines(ConsSim,psis-(1.96*psis.ses),lty=2,lwd=2)
abline(h=0,lwd=1,lty=2)
dev.off()
# Continuous covariates:
fit2<-with(ClintonTherm,
lm(ClintonTherm~RConserv+ClintonConserv+RConserv*ClintonConserv))
summary(fit2)
# Hypothesis tests:
fit2$coef[2]+(1*fit2$coef[4])
sqrt(vcov(fit2)[2,2] + (1)^2*vcov(fit2)[4,4] + 2*1*vcov(fit2)[2,4])
linearHypothesis(fit2,"RConserv+1*RConserv:ClintonConserv")
# More hypothesis tests:
# psi_1 | ClintonConserv = mean
fit2$coef[2]+((mean(ClintonTherm$ClintonConserv))*fit2$coef[4])
sqrt(vcov(fit2)[2,2] + (mean(ClintonTherm$ClintonConserv)^2*vcov(fit2)[4,4] +
2*(mean(ClintonTherm$ClintonConserv))*vcov(fit2)[2,4]))
pt(((fit2$coef[2]+(2.985*fit2$coef[4])) / sqrt(vcov(fit2)[2,2] +
(2.985)^2*vcov(fit2)[4,4] + 2*2.985*vcov(fit2)[2,4])),df=1293)
# psi_2 | RConserv = 1
fit2$coef[3]+(1*fit2$coef[4])
# psi_2 | RConserv = 6
fit2$coef[3]+(6*fit2$coef[4])
# Marginal Effect Plot II:
psis2<-fit2$coef[3]+(ConsSim*fit2$coef[4])
psis2.ses<-sqrt(vcov(fit2)[3,3] + (ConsSim)^2*vcov(fit2)[4,4]
+ 2*ConsSim*vcov(fit2)[3,4])
pdf("ClinMFX2.pdf",6,6)
plot(ConsSim,psis2,t="l",lwd=2,xlab="Respondent's Conservatism",
ylab="Marginal Effect of Clinton's
Conservatism",ylim=c(-10,20))
lines(ConsSim,psis2+(1.96*psis2.ses),lty=2,lwd=2)
lines(ConsSim,psis2-(1.96*psis2.ses),lty=2,lwd=2)
abline(h=0,lty=2,lwd=1,col="red")
dev.off()
# Contour Plot:
grid<-expand.grid(RConserv=seq(1,7,1),
ClintonConserv=seq(1,7,1))
hats<-predict(fit2,newdata=grid)
pdf("ClinContour.pdf",6,6)
levelplot(hats~grid$RConserv*grid$ClintonConserv,
contour=TRUE,
cuts=12,pretty=TRUE,xlab="Respondent's Conservatism",
ylab="Clinton's Conservatism",
col.regions=heat.colors)
dev.off()
# Wireframe plot:
trellis.par.set("axis.line",list(col="transparent"))
pdf("ClinWireframe.pdf",7,7)
wireframe(hats~grid$RConserv*grid$ClintonConserv,
drape=TRUE,
xlab=list("Respondent's Conservatism",rot=30),
ylab=list("Clinton's Conservatism",
rot=-40),zlab=list("Predictions",rot=90),
scales=list(arrows=FALSE,col="black"),
zoom=0.85,pretty=TRUE,
col.regions=colorRampPalette(c("blue","red"))(100))
dev.off()
|
85999b47eeb56b40543b3684372f3a248d2ed874 | f0c7b2b3589a9f0e52c9f6e8b0982020cec80f19 | /codes/shim.wo.quad.original.r | 9a4c4edae4a13f9df444bdc125bfe09e1a8bd5a7 | [] | no_license | Kedong/VSSH | 1e91198412b6483a342e8000406c735bd20f09fd | 094221a0e65a6c31af2c102fa1be4108161e1629 | refs/heads/master | 2021-01-10T16:45:04.218840 | 2017-11-16T23:10:51 | 2017-11-16T23:10:51 | 50,731,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,131 | r | shim.wo.quad.original.r | ##
## NOTE:
## Please see "Example" at the bottom before running the code.
##
##
## main function: shim
##
## ---- Input Variables ----
## x: a matrix containing main terms
## lambda1: regularization parameter for betas
## lambda2: regularization parameter for gammas
## phi.weight: a vector containing adaptive weights
## phi.prev: can plug in coefficient estimates obtained before if any.
## e.g. when fitting a model on a grid of lambdas,
## can use the coeff estimates obtained for the previous pair of (lambda1, lambda2) for faster convergence.
## This will be used as initial values for the iteration.
## Default is to use the OLS estimates from simple linear regression models with each individual predictor.
##
## ---- Stopping criterion ----
## The iteration stops when
## 1) differ = abs(crit.new - crit.old) / abs(crit.old) < eps2
## (when it converged)
## OR,
## 2) the number of iterations reached at max.iter
## (when it didn't converge by then.. an warning will appear in this case)
##
##
## main function: shim
##
shim <- function(x, y, lambda1, lambda2, phi.weight=NULL, phi.prev=NULL, max.iter=10^4, eps1 = 10^(-10), eps2 = 10^(-4)){
np <- dim(x)
n <- np[1]
p <- np[2]
K <- (p-1)*p/2
## index set for interactions ##
jk1 <- jk2 <- numeric(K)
i <- 1
for(j in 1:(p-1)){
for(k in (j+1):p){
jk1[i] <- j; jk2[i] <- k
i <- i + 1
}
}
## calculating interaction terms ##
xx = NULL
for(j in 1:K){
xx = cbind(xx, x[,jk1[j]]*x[,jk2[j]])
}
## Center y ##
meany <- mean(y)
y <- y - meany
## Standardize x ##
meanx <- apply(x, 2, mean)
x <- scale(x, center=meanx, scale=F)
normx <- sqrt(apply(x^2, 2, mean))
x <- scale(x, center=F, scale=normx)
meanxx <- apply(xx, 2, mean)
xx <- scale(xx, center=meanxx, scale=F)
normxx <- sqrt(apply(xx^2, 2, mean))
xx <- scale(xx, center=F, scale=normxx)
## Initial Estimate ##
if(is.null(phi.prev)){
beta.old <- t(x) %*% y / apply(x^2, 2, sum)
delta.old <- t(xx) %*% y / apply(xx^2, 2, sum)
bb <- beta.old[jk1] * beta.old[jk2]
bb[ abs(bb) <= eps1 ] <- eps1
gamma.old <- delta.old / bb
}else{
beta.old <- phi.prev[1:p]
gamma.old <- phi.prev[(p+1):(p+K)]
delta.old <- gamma.old * beta.old[jk1] * beta.old[jk2]
}
## Adaptive Weights ##
if(is.null(phi.weight)){
phi.weight <- rep(1, p+K)
}
beta.w <- phi.weight[1:p]
delta.w <- phi.weight[(p+1):(p+K)]
beta.w[ beta.w <= eps1 ] <- eps1
bb.w <- beta.w[jk1] * beta.w[jk2]
gamma.w <- delta.w / bb.w
gamma.w[ gamma.w <= eps1 ] <- eps1
## Index sets needed for subfunction entire.beta ##
NEWIDX1 <- matrix(NA, p-1, p)
NEWIDX2 <- matrix(NA, K-p+1, p)
IDX1 <- IDX2 <- matrix(NA, K-p+1, p)
idx <- seq(1:p)
for(J in 1:p){
if(J==1){
idx1 <- rep(1, p-1)
idx2 <- c(2:p)
} else if(J==p){
idx1 <- c(1:(p-1))
idx2 <- rep(p, p-1)
} else{
idx1 <- c(1:J, rep(J, p-1-J))
idx2 <- c(rep(J, J-1),c((J+1):p))
}
NEWIDX1[,J] <- (idx1-1)*(p-idx1/2) + (idx2-idx1)
idxJ <- idx[-J]
i <- 1
for(j in 1:(p-2)){
for(k in (j+1):(p-1)){
IDX1[i,J] <- idxJ[j]
IDX2[i,J] <- idxJ[k]
i <- i + 1
}
}
idx1 <- IDX1[,J]
idx2 <- IDX2[,J]
NEWIDX2[,J] <- (idx1-1)*(p-idx1/2) + (idx2-idx1)
}
##### Iterative Approximation ######
iter <- 0
differ <- 1
crit.old <- crossprod(y - x%*%beta.old - xx%*%delta.old) + lambda1*sum(abs(beta.old)/beta.w) + lambda2*sum(abs(gamma.old)/gamma.w)
while(differ > eps2){
################################
### STEP 1: solve for \gamma ###
################################
bb <- beta.old[jk1] * beta.old[jk2]
bb[ abs(bb) < eps1 ] <- eps1
## Construct a new X ##
newx <- rep(bb, each=n) * xx
## Construct a new Y ##
newy <- y - x %*% beta.old
## Calculate a new gamma ##
gamobj <- solve.gam(newx, newy, lambda2, gamma.w)
gamma.new <- gamobj$phi.new
###############################
### STEP 2: solve for \beta ###
###############################
beta.new <- beta.old
beta.new <- sapply(1:p, entire.beta, beta.new, gamma.new, x, xx, y, lambda1, beta.w, NEWIDX1, NEWIDX2, IDX1, IDX2, n)
delta.new <- gamma.new*beta.new[jk1]*beta.new[jk2]
crit.new <- crossprod(y - x%*%beta.new - xx%*%delta.new) + lambda1*sum(abs(beta.new)/beta.w) + lambda2*sum(abs(gamma.new)/gamma.w)
differ <- abs(crit.new - crit.old) / abs(crit.old)
crit.old <- crit.new
beta.old <- beta.new
gamma.old <- gamma.new
iter <- iter + 1
if(iter >= max.iter){
cat("Warning: the whole iteration did not converge.. \n")
break
}
} ## end while
phi.prev <- c(beta.new, gamma.new)
gamma.new[abs(gamma.new) <= eps1] <- 0
beta.new[abs(beta.new) <= eps1] <- 0
## Rescale the coeff estimates back into the original scale ##
beta <- beta.new
delta <- gamma.new * beta.new[jk1] * beta.new[jk2]
beta <- beta / normx
delta <- delta / normxx
beta0 <- meany - sum(beta*meanx) - sum(delta*meanxx)
phi <- c(beta0, beta, delta)
return( list(phi=phi, phi.prev=phi.prev, iter=iter) )
}
##
## subfunctions
##
solve.gam <- function(x, y, lambda, gam, max.iter=10^4, eps1=10^(-10), eps2=10^(-4)) {
xtx <- t(x) %*% x
xty <- t(x) %*% y
## Initial estimate ##
phi.old <- xty / apply(x^2, 2, sum)
Dphi <- diag( length(gam) )
Qold <- crossprod(y - x%*%phi.old) + lambda*sum(abs(phi.old)/gam)
differ <- 1
iter <- 1
#sing <- 0
while (differ > eps2) {
denom <- phi.old*gam
denom[ abs(denom) < eps1 ] <- eps1
diag(Dphi) <- as.vector( 1 / abs(denom) )
xtx2 <- xtx + lambda * Dphi
phi.new <- qr.solve(xtx2, tol=1e-10) %*% xty
Qnew <- crossprod(y - x%*%phi.new) + lambda*sum(abs(phi.new)/gam)
differ <- sum(abs(Qnew - Qold)) / sum(abs(Qold))
Qold <- Qnew
phi.old <- phi.new
iter <- iter + 1
if(iter >= max.iter){
cat("... gamma did not converge ...\n")
break
}
}
return(list(phi.new=phi.new))
}
entire.beta <- function(J, beta.new, gamma.new, x, xx, y, lambda1, beta.w, NEWIDX1, NEWIDX2, IDX1, IDX2, n){
## calcualte newx ##
newidx <- NEWIDX1[,J]
newx <- rep((gamma.new[newidx]*beta.new[-J]), each=n)*xx[,newidx]
newx <- x[,J] + rowSums(newx)
## calculate newy ##
idx1 <- IDX1[,J]
idx2 <- IDX2[,J]
newidx2 <- NEWIDX2[,J]
bb.mJ <- beta.new[idx1]*beta.new[idx2]
gamma.mJ <- gamma.new[newidx2]
xx.mJ <- xx[,newidx2]
newy <- y - rowSums( rep(beta.new[-J], each=n) * x[,-J] )
newy <- newy - rowSums( rep((gamma.mJ*bb.mJ), each=n) * xx.mJ )
beta.newJ <- solve.betaJ(newx, newy, lambda1, beta.w[J])
return(beta.newJ)
}
solve.betaJ <- function(x, y, lambda, bet){
xty <- sum(x*y)
xtx <- sum(x^2)
if(xty - lambda/(2*bet) > 0){
phi <- ( xty - lambda/(2*bet) ) / xtx
}else if(xty + lambda/(2*bet) < 0){
phi <- ( xty + lambda/(2*bet) ) / xtx
}else{
phi <- 0
}
return(phi)
}
|
6b18a7317f7808b943d2fc7b99ab4344114b555b | a5ebd749bbaa2e377ee810334bdded8e3d9794c6 | /helper.R | 3058d4ee4d1b5348dc0f8cc59ad643fffedbedc0 | [] | no_license | aariani/AnnotWizR | 085426a7ce5ac4282faaa9f494c0408702cf0f2a | 8cbd6b25df04269b21d4f48cb03218adf3682f4f | refs/heads/master | 2021-01-23T13:30:57.423767 | 2017-11-09T21:02:58 | 2017-11-09T21:02:58 | 102,667,516 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 691 | r | helper.R | # helper functions
get_SNPs_Ranges=function(file_in){
# function to used when input file is a SNPs association file
# return a GRanges object for SNPs
SNPs_info = read.table(file_in, sep=',', header=T)
SNPs_data = GRanges(seqnames=SNPs_info$CHR,
ranges=IRanges(start=SNPs_info$POS, end = SNPs_info$POS),
scores = SNPs_info$P)
SNPs_data
}
get_Windows_range=function(file_in){
# Return a GRanges object for windows data
window_info = read.table(file_in, sep-',', header=T)
window_data = GRanges(seqnames = window_info$CHR,
ranges = IRanges(start=window_info$START, end = window_info$END))
window_data
} |
615b44f664f8f17605275e1f4a8b4ef0a6a9062c | bf93f409d6da8f1050c1c4d4b9be9bcb5634e685 | /code/mSimple.R | ca6758db2ebbd502722ddef5f4b901167fb4aa15 | [] | no_license | zouyuxin/GTEx | b28baa22b7f6338c60d573ea713f26e966469f44 | 20d45f3e15427ab6129395af5a4211202726ba61 | refs/heads/master | 2020-04-12T17:13:20.452499 | 2020-02-20T15:25:19 | 2020-02-20T15:25:19 | 162,638,387 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 432 | r | mSimple.R | library(mashr)
gtex <- readRDS('gtexv6.rds')
data.strong = mash_set_data(gtex$strong.b, gtex$strong.s)
data.random = mash_set_data(gtex$random.b, gtex$random.s)
U.c = cov_canonical(data.random)
U.ed = readRDS('Ued.rds')
V.simple = estimate_null_correlation_simple(data.random)
data.random.V.simple = mash_update_data(data.random, V = V.simple)
saveRDS(mash(data.random.V.simple, c(U.c, U.ed), outputlevel = 1), 'm_simple.rds')
|
0e2c5a3d075611d99c19b8f66693aa87ac9c629c | 0ea96e7b4d16c6595415d657103f7a2391421706 | /tests/testthat/test_make_hicexp.R | 058d246dc0c9f21af14b41d5157aae025f844820 | [
"MIT"
] | permissive | dozmorovlab/multiHiCcompare | c913334612e9b6fc8807341d370f11715ed51da0 | dcfe4aaa8eaef45e203f3d7f806232bb613d2c9b | refs/heads/master | 2022-04-29T02:56:04.486050 | 2022-04-22T01:43:22 | 2022-04-22T01:43:22 | 144,622,761 | 4 | 7 | NOASSERTION | 2020-03-30T13:53:16 | 2018-08-13T19:10:22 | R | UTF-8 | R | false | false | 1,442 | r | test_make_hicexp.R | test_that('make_hicexp works', {
data("HCT116_r1", "HCT116_r2", "HCT116_r3", "HCT116_r4", "HCT116_r5","HCT116_r6")
groups <- c(1, 1, 1, 2, 2, 2)
covariates <- data.frame(enzyme = c('mobi', 'mboi', 'mboi', 'dpnii',
'dpnii', 'dpnii'),
batch = c(1, 2, 1, 2, 1, 2))
hicexp_new <- make_hicexp(HCT116_r1, HCT116_r2, HCT116_r3, HCT116_r4, HCT116_r5, HCT116_r6,
groups = groups, covariates = covariates)
expect_is(hicexp_new, "Hicexp")
# test for errors on wrong input
expect_error(make_hicexp(HCT116_r2, HCT116_r3, HCT116_r4, HCT116_r5, HCT116_r6,
groups = groups, covariates = covariates),
"Length of groups must equal the number of Hi-C data objects entered")
expect_error(make_hicexp(HCT116_r1, HCT116_r2, HCT116_r3, HCT116_r4, HCT116_r5, HCT116_r6,
groups = c(1,1,1,2,2),
covariates = covariates),
"Length of groups must equal the number of Hi-C data objects entered")
# providing list or data give same results
dat <- list(HCT116_r1, HCT116_r2, HCT116_r3, HCT116_r4)
hicexp1 <- make_hicexp(HCT116_r1, HCT116_r2, HCT116_r3, HCT116_r4,
groups = c(1,1,2,2))
hicexp2 <- make_hicexp(data_list = dat, groups = c(1,1,2,2))
expect_equal(hicexp1, hicexp2)
})
|
603faa00ce1b5f92b82c42275e1bcd4a9f745b95 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/eha/examples/toTime.Rd.R | 3fc29164ac6d424604daca84799ac727708d5cbb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | toTime.Rd.R | library(eha)
### Name: toTime
### Title: Calculate duration in years from "0000-01-01" to a given date
### Aliases: toTime
### Keywords: survival
### ** Examples
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
toTime(c("1897-05-16", "1901-11-21"))
|
81359f50b37f50aa54e706222d7d949ddc992153 | dda965daf7a75a5738ae28a5c598953d552f9dcd | /R/EM.R | a7bbef5152b69371ddc03cc601b08a57ec26cb1c | [] | no_license | weidewind/evolikelihood | db56aff18296f74bab9ef3781da2897479d0b2cc | aa144e3d6361c778b0dbbf0935668c3210cd9a65 | refs/heads/master | 2020-05-20T23:51:05.500547 | 2016-08-04T13:13:31 | 2016-08-04T13:13:31 | 56,065,055 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,034 | r | EM.R | list.of.packages <- c("scatterplot3d", "parallel")
new.packages <- setdiff(list.of.packages, installed.packages()[,"Package"])
if(length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org')
#Don’t use library() or require(). These modify the search path, affecting what functions are available from the global environment.
#It’s better to use the DESCRIPTION to specify your package’s requirements
#library(scatterplot3d)
## EM algorithm
## prepare data
## clusterize observed MLE parameters
## construct rk vectors (1/0) for EM (based on cluster membership)
## find initial a (and b) parameters for EM (maximisation)
## compute initial weights based on (1/0) rk vectors (expectation)
## returns list with two values: vector of weights and matrix of parameters
initialize_by_clustering <- function (data, params, mutation_position = "middle", cluster.number = 4, model = NULL){
if (model == "weibull"){
parclust <-params[,c("lambda_weib_root", "p_root")]
scaling <-max(parclust[,2])/max(parclust[,1])
parclust_scaled <- data.frame(lambda_weib_root = scaling*parclust[,1], p_root = parclust[,2])
}
else {
parclust_scaled <- data.frame(lambda_exp_root = params[,c("lambda_exp_root")])
}
clusters <-kmeans(parclust_scaled, cluster.number)
categories <- seq(from = 1, to = cluster.number, by = 1)
rkvectors <- sapply(categories, function(k){
rk <-sapply(clusters$cluster, function(category){
if (category == k){1}
else {0}
})
})
rownames(rkvectors) <- params$node
init_params <-compute_params(data = data, rkvectors = rkvectors, model = model, mutation_position = mutation_position)
init_weights <- compute_weights(rkvectors)
list(iparameters = init_params, iweights = init_weights)
}
## alternative: choose random a (and b)
## set equal weights for all categories
## returns list with two values: vector of weights and matrix of parameters
initialize_random <- function (params, cluster.number = 4, model = NULL){
if (model == "weibull"){
init_params <-matrix(ncol =2, nrow = cluster.number, byrow = TRUE)
colnames(init_params) = c("lambda", "p")
p_roots <- params[, "p_root"]
prand <- exp(runif(cluster.number, min=min(log (p_roots)),max=max(log (p_roots))))
init_params[, "p"] <-prand
lambda_roots <- params[, "lambda_weib_root"]
lambdarand <- exp(runif(cluster.number, min=min(log (lambda_roots)),max=max(log (lambda_roots))))
init_params[, "lambda"] <-lambdarand
}
else if (model == "exponential"){
init_params <-matrix( ncol =1, nrow = cluster.number, byrow = TRUE)
colnames(init_params) = c("lambda")
lambda_roots <- params[, "lambda_exp_root"]
lambdarand <- exp(runif(cluster.number, min=min(log (lambda_roots)),max=max(log (lambda_roots))))
init_params[, "lambda"] <-lambdarand
}
init_weights = rep(1/cluster.number, cluster.number)
list(iparameters = init_params, iweights = init_weights)
}
initialize_by <- function (init_params, init_weights, model = NULL, cluster.number = 4){
if (class(init_params) != "numeric") {
stop ("Invalid argument params: expected vector of length 2*cluster.number")
}
if (model == "weibull"){
if (length(init_params) != 2*cluster.number) {
stop ("Invalid params length: expected vector of length 2*cluster.number")
}
}
else if (model == "exponential"){
if (length(init_params) != cluster.number) {
stop ("Invalid params length: expected vector of length cluster.number")
}
}
else {
stop (paste (c("Invalid model name: expected 'weibull' or 'exponential', recieved ", model), collpase = ""))
}
if (length(init_weights) != cluster.number) {
stop ("Invalid weights length: expected vector of length cluster.number")
}
if (sum(init_weights) != 1){
stop ("Invalid weights value: sum of weights must be equal to 1")
}
categories <- seq(from = 1, to = cluster.number, by = 1)
if (model == "weibull"){
init_params <-matrix(init_params, ncol =2, nrow = cluster.number, byrow = TRUE)
colnames(init_params) = c("lambda", "p")
}
else if (model == "exponential"){
init_params <-matrix(init_params, ncol =1, nrow = cluster.number, byrow = TRUE)
colnames(init_params) = c("lambda")
}
list(iparameters = init_params, iweights = init_weights)
}
filter_unsolved <-function(data, params){
fparams <-params[!is.na(params$p_precision) ,]
fparams <-fparams[fparams$p_precision< 1e-5 ,]
fdata <- data[fparams$node]
list(fdata = fdata, fparams = fparams)
}
filter_unsolved_and_single <-function(data, params){
fparams <-params[!is.na(params$p_precision),]
fparams <-fparams[fparams$p_precision< 1e-5,]
fparams <-fparams[fparams$events > 1,]
fdata <- data[fparams$node]
list(fdata = fdata, fparams = fparams)
}
#' @export
em_procedure <-function(data, params, model = NULL, iter = 100, cluster.number= 4, init_method = c("cluster", "random", "by"), init_params = NULL, init_weights = NULL, mutation_position = "middle", filtering = c("single", "unsolved"), trace = TRUE, trackfile = NULL, trackcount = 10){
if (filtering == "single"){
fi <- filter_unsolved_and_single(data=data, params=params)
fdata <- fi$fdata
fparams <- fi$fparams
}
else if (filtering == "unsolved"){
fi <- filter_unsolved(data=data, params=params)
fdata <- fi$fdata
fparams <- fi$fparams
}
else {
stop("Invalid argument filtering: must be either single or unsolved")
}
if (init_method == "cluster") {
init <- initialize_by_clustering(data=fdata, params=fparams, model = model, mutation_position = mutation_position, cluster.number = cluster.number)
iparameters <- init$iparameters
iweights <- init$iweights
}
else if (init_method == "random"){
init <- initialize_random(params=fparams, model = model, cluster.number = cluster.number)
iparameters <- init$iparameters
iweights <- init$iweights
}
else if (init_method == "by"){
if (is.null(init_params) || is.null(init_weights)){
stop ("Chosen inititalization method requires init_params and init_weights arguments")
}
init <- initialize_by(init_params, init_weights, model = model, cluster.number = cluster.number)
iparameters <- init$iparameters
iweights <- init$iweights
}
else {
stop (paste ("Invalid initialization method ", method ))
}
print ("Initial parameters:")
print (iparameters)
print (iweights)
em_results <- em(data = fdata, parameters = iparameters, model = model, weights = iweights, iter= iter, mutation_position = mutation_position, cluster.number = cluster.number, trackfile = trackfile, trackcount = trackcount, trace = trace)
}
em <- function(data, model = NULL, parameters, weights, iter = 100, cluster.number= 4, mutation_position = "middle", trackfile = NULL, trackcount = 10, trace = TRUE){
if (trace){
myplot <- tracer(parameters, weights, cluster.number, init = TRUE)
}
old_lnL <- NULL
for (i in seq(1,iter,1)){
print (paste(c("------------Step ", i), collapse=""))
rkvectors <- compute_rkvectors(data=data, parameters=parameters, model = model, weights=weights)
parameters <- compute_params(data=data, rkvectors=rkvectors, model = model, mutation_position = mutation_position)
weights <- compute_weights(rkvectors)
print(rkvectors)
print(weights)
if (trace){
myplot <- tracer(parameters, weights, cluster.number, myplot = myplot, init = FALSE)
}
model_lnL <- compute_model_lnL(data=data, model = model,parameters=parameters, weights=weights)
print ("model lnL")
print(model_lnL)
if (!is.null(trackfile) && round(i/trackcount) == i/trackcount){
sink(paste(c(trackfile, "_", i), collapse=""))
print(model_lnL)
sink()
}
if (!is.null(old_lnL) && model_lnL - old_lnL < 0.0001){
break
}
else {old_lnL <- model_lnL}
}
model_bic <- bic(lnL = model_lnL, model = model, cluster.number = cluster.number, n = length(data))
print ("data length")
print (length(data))
print ("model bic")
print(model_bic)
list(parameters=parameters, rkvectors=rkvectors, weights=weights, lnL = model_lnL, bic = model_bic)
}
tracer <- function (parameters, weights, cluster.number, myplot, init = FALSE){
colors <- c("red", "blue", "green", "black", "orange", "gray", "violet")
if (ncol(parameters) == 2){ #weibull model
if (init){
myplot <- scatterplot3d(parameters[1,"lambda"], parameters[1,"p"], weights[1], color= colors[1], type="h", xlim = c(0, 0.1), ylim = c(0,10), zlim = c(0,1), pch=19)
if (cluster.number > 1){
for (i in seq(2,cluster.number,1)){
myplot$points3d(parameters[i,"lambda"], parameters[i,"p"], weights[i], col= colors[i], pch=19, type="h")
}
}
}
else {
for (i in seq(1,cluster.number,1)){
print ("weights")
print(weights)
myplot <- myplot
myplot$points3d(parameters[i,"lambda"], parameters[i,"p"], weights[i], col= colors[i], type="h")
}
}
}
else if (ncol(parameters) == 1){ #exponential model
if (init){
myplot <- plot(parameters[1,"lambda"], weights[1], col= colors[1], xlim = c(0, 0.1), ylim = c(0,1), xlab = "lambda", ylab = "weight", pch=19)
if (cluster.number > 1){
for (i in seq(2,cluster.number,1)){
points(parameters[i,"lambda"], weights[i], col= colors[i], pch=19)
}
}
}
else {
for (i in seq(1,cluster.number,1)){
points(parameters[i,"lambda"], weights[i], col= colors[i])
}
}
}
myplot
}
compute_weights <- function(rkvectors){
if (class(rkvectors) != "matrix"){
stop (paste (c("Invalid type of rkvectors: expected matrix, got ", class(rkvectors))))
}
categories <- seq(from = 1, to = ncol(rkvectors), by = 1)
weights <- sapply (categories, function (k){
rkvector = rkvectors[, k]
sum(rkvector)/length(rkvector)
})
}
compute_rkvectors <- function(data, model = NULL, parameters, weights){
cluster.number = length(weights)
categories <- seq(from = 1, to = cluster.number, by = 1)
rkvectors <- sapply(categories, function(k){
rk <-sapply(names(data), function(nodename){
cat_probs <- sapply ( categories, function (cat) {
if (model == "weibull"){
lnL_dat <- lnlikelihood_weibull(data[[nodename]], parameters[cat,"lambda"], parameters[cat,"p"], fishy = TRUE)
}
else {
lnL_dat <- lnlikelihood_exp(data[[nodename]], parameters[cat,"lambda"], fishy = TRUE)
}
lnL <- lnL_dat[1]
weights[cat] * exp(lnL)
}
)
cat_probs[k]/sum(cat_probs)
})
})
rownames(rkvectors) <- names(data)
rkvectors
}
compute_params_insane <- function(data, model = NULL, rkvectors, mutation_position = "middle", parallel = FALSE ){
cluster.number = ncol(rkvectors)
categories <- seq(from = 1, to = cluster.number, by = 1)
if (parallel){
if (Sys.info()["sysname"] == "Windows"){
count_cores <- detectCores() - 1
cl <- makeCluster(count_cores)
clusterExport(cl, list("data", "rkvectors", "mutation_position", "model"), envir = environment())
clusterCall(cl, function() library(evolike))
func <- parLapply
} else {
func <- mclapply
mc.cores <- cluster.number
}
} else {
func <- mysapply
mc.cores <- 0 # mock variable
}
params_list <- func(categories, function(k){
k_params <- find_single_root(data = data, mutation_position=mutation_position, rkvector = rkvectors[, k], jack = FALSE, pack = "rootsolve", verbose=TRUE)
if (model == "weibull"){
c(k_params["lambda_weib_root"], k_params["p_root"])
} else {
c(k_params["lambda_exp_root"])
}
}, mc.cores = mc.cores)
if (parallel && Sys.info()["sysname"] == "Windows"){
stopCluster(cl)
}
if(model == "weibull"){
new_params <- matrix(unlist(params_list), ncol = 2, byrow = TRUE)
colnames(new_params) = c("lambda", "p")
} else {
new_params <- matrix(unlist(params_list), ncol = 1, byrow = TRUE)
colnames(new_params) = c("lambda")
}
new_params
}
mysapply <- function(X, FUN, mc.cores = 1){
sapply(X, FUN)
}
compute_params <- function(data, model = NULL, rkvectors, mutation_position = "middle"){
cluster.number = ncol(rkvectors)
categories <- seq(from = 1, to = cluster.number, by = 1)
params_list <- sapply(categories, function(k){
k_params <- find_single_root(data = data, mutation_position=mutation_position, rkvector = rkvectors[, k], jack = FALSE, pack = "rootsolve", verbose=TRUE)
if (model == "weibull"){
c(k_params["lambda_weib_root"], k_params["p_root"])
} else {
c(k_params["lambda_exp_root"])
}
})
if(model == "weibull"){
new_params <- matrix(unlist(params_list), ncol = 2, byrow = TRUE)
colnames(new_params) = c("lambda", "p")
} else {
new_params <- matrix(unlist(params_list), ncol = 1, byrow = TRUE)
colnames(new_params) = c("lambda")
}
new_params
}
compute_model_lnL <- function(data, model = NULL, parameters, weights){
cluster.number = length(weights)
categories <- seq(from = 1, to = cluster.number, by = 1)
likelihood_vector <-sapply(names(data), function(nodename){
cat_probs <- sapply ( categories, function (cat) {
if (model == "weibull"){
lnL_dat <- lnlikelihood_weibull(data[[nodename]], parameters[cat,"lambda"], parameters[cat,"p"], fishy = TRUE)
}
else {
lnL_dat <- lnlikelihood_exp(data[[nodename]], parameters[cat,"lambda"], fishy = TRUE)
}
lnL <- lnL_dat[1]
weights[cat] * exp(lnL)
}
)
sum(cat_probs)
})
lnL <- sum(log(likelihood_vector))
}
## EM: E - compute rk vectors and weights of each category
## M - given rk, compute new sets of parameters for each category
### Procedures
#prot <- "h1"
#prot_data <- read.csv(paste(c(getwd(), "/input/" ,prot,"_for_LRT.csv"), collapse=""),stringsAsFactors=FALSE)
#splitted <- split(prot_data, list(prot_data$site, prot_data$ancestor_node), drop=TRUE)
#params <-parameters(splitted, mutation_position = "middle", filter = TRUE, jack = FALSE, pack = "rootsolve", verbose = FALSE)
#params <- data.frame(matrix(unlist(params), nrow=length(params), byrow=T),stringsAsFactors=FALSE)
#names(params) <- c("node", "lambda_exp_root", "lambda_weib_root", "p_root", "p_precision" )
#params <- transform(params, lambda_exp_root = as.numeric(lambda_exp_root), lambda_weib_root = as.numeric(lambda_weib_root), p_root = as.numeric(p_root), p_precision = as.numeric(p_precision))
#filtered <-params[!is.na(params$p_precision) ,]
#filtered <-filtered[filtered$p_precision< 1e-5 ,]
##filtered <-filtered[filtered$p_root< 30 ,]
#df <-filtered[,c("lambda_weib_root", "p_root")]
#plot(df$p_root, df$lambda_weib_root, main = "n2")
#plot(df$p_root, df$lambda_weib_root, xlim = c(0, 1.5), ylim = c(0, 0.1), main = "n2")
#h1_kmeans <-kmeans(df, 3)
|
19af645a05f6298c2897ecc272411580a61b5c74 | e522efd67f51c373a6df13ce4a683e6c95fa802d | /AQI.R | a91eca0df2172cd8d1d3b0a9c99fc85c016376b0 | [] | no_license | gcatt/gcatt | 9ede6622bc993a55f202d5e6aa54f6c2b381415f | 8393ac6df874d5bde2fac1cd4945f25df0b0e622 | refs/heads/master | 2022-04-09T22:13:09.388129 | 2020-03-30T07:37:00 | 2020-03-30T07:37:00 | 250,362,391 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,517 | r | AQI.R | #Air Quality Data
#librarys
library(ggplot2)
library(ggthemes)
library(stats)
library(lubridate)
#Read in Data
Air_Beijing<-read.csv("C:/Users/MrGaM/Documents/GitHub/gcatt/Data_International/beijing-air-quality.csv",stringsAsFactors = FALSE)
Air_Berlin<-read.csv("C:/Users/MrGaM/Documents/GitHub/gcatt/Data_International/berlin,-germany-air-quality.csv",stringsAsFactors = FALSE)
Air_Milano<-read.csv("C:/Users/MrGaM/Documents/GitHub/gcatt/Data_International/milano-senato, lombardia, italy-air-quality.csv",stringsAsFactors = FALSE)
Air_NY<-read.csv("C:/Users/MrGaM/Documents/GitHub/gcatt/Data_International/new-york, usa-air-quality.csv",stringsAsFactors = FALSE)
Air_Wuhan<-read.csv("C:/Users/MrGaM/Documents/GitHub/gcatt/Data_International/wuhan-air-quality.csv",stringsAsFactors = FALSE)
#Calculate overall AQI
head(Air_Beijing)
Air_Beijing$AQI<-apply(Air_Beijing[,2:ncol(Air_Beijing)],1,max)
Air_Berlin$AQI<-apply(Air_Berlin[,2:ncol(Air_Berlin)],1,max)
Air_Milano$AQI<-apply(Air_Milano[,2:(ncol(Air_Milano)-1)],1,max)
Air_NY$AQI<-apply(Air_NY[,2:ncol(Air_NY)],1,max)
Air_Wuhan$AQI<-apply(Air_Wuhan[,2:ncol(Air_Wuhan)],1,max)
#Format as Datum
Air_Beijing$date <- as.Date(Air_Beijing$date , format = "%Y/%m/%d")
Air_Berlin$date <- as.Date(Air_Berlin$date , format = "%Y/%m/%d")
Air_Milano$date <- as.Date(Air_Milano$date , format = "%Y/%m/%d")
Air_NY$date <- as.Date(Air_NY$date , format = "%Y/%m/%d")
Air_Wuhan$date <- as.Date(Air_Wuhan$date , format = "%Y/%m/%d")
#Plots over all Years
ggplot(data = Air_Beijing,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Beijing")+
theme_fivethirtyeight()
ggplot(data = Air_Berlin,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Berlin")+
theme_fivethirtyeight()
ggplot(data = Air_Milano,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Milano")+
theme_fivethirtyeight()
ggplot(data = Air_Wuhan,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Wuhan")+
theme_fivethirtyeight()
#Plot since 2019
AQI20_Beijing=subset(Air_Beijing, year(Air_Beijing$date) %in% c("2020","2019"))
AQI20_Berlin=subset(Air_Berlin, year(Air_Berlin$date) %in% c("2020","2019"))
AQI20_Milano=subset(Air_Milano, year(Air_Milano$date) %in% c("2020","2019"))
AQI20_NY=subset(Air_NY, year(Air_NY$date) %in% c("2020","2019"))
AQI20_Wuhan=subset(Air_Wuhan, year(Air_Wuhan$date) %in% c("2020","2019"))
ggplot(data = AQI20_Beijing,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Beijing")+
theme_fivethirtyeight()
ggplot(data = AQI20_Berlin,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Berlin")+
theme_fivethirtyeight()
ggplot(data = AQI20_Milano,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Milano")+
theme_fivethirtyeight()
ggplot(data = AQI20_Wuhan,
mapping = aes(y = AQI,
x = date
)) +
geom_line(color="darkblue")+
geom_smooth(color="red")+
ggtitle("Air Quality Index Wuhan")+
theme_fivethirtyeight()
#Plots Januar-März
AQI20_Beijing20=subset(Air_Beijing, year(Air_Beijing$date) %in% "2020")
AQI20_Beijing19=subset(Air_Beijing, year(Air_Beijing$date) %in% "2019")
AQI20_Beijing19_123=subset(AQI20_Beijing19, !month(AQI20_Beijing19$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Beijing18=subset(Air_Beijing, year(Air_Beijing$date) %in% "2018")
AQI20_Beijing18_123=subset(AQI20_Beijing18, !month(AQI20_Beijing18$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Wuhan20=subset(Air_Wuhan, year(Air_Wuhan$date) %in% "2020")
AQI20_Wuhan19=subset(Air_Wuhan, year(Air_Wuhan$date) %in% "2019")
AQI20_Wuhan19_123=subset(AQI20_Wuhan19, !month(AQI20_Wuhan19$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Wuhan18=subset(Air_Wuhan, year(Air_Wuhan$date) %in% "2018")
AQI20_Wuhan18_123=subset(AQI20_Wuhan18, !month(AQI20_Wuhan18$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Berlin20=subset(Air_Berlin, year(Air_Berlin$date) %in% "2020")
AQI20_Berlin19=subset(Air_Berlin, year(Air_Berlin$date) %in% "2019")
AQI20_Berlin19_123=subset(AQI20_Berlin19, !month(AQI20_Berlin19$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Berlin18=subset(Air_Berlin, year(Air_Berlin$date) %in% "2018")
AQI20_Berlin18_123=subset(AQI20_Berlin18, !month(AQI20_Berlin18$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Milano20=subset(Air_Milano, year(Air_Milano$date) %in% "2020")
AQI20_Milano19=subset(Air_Milano, year(Air_Milano$date) %in% "2019")
AQI20_Milano19_123=subset(AQI20_Milano19, !month(AQI20_Milano19$date) %in% c("4","5","6","7","8","9","10","11","12"))
AQI20_Milano18=subset(Air_Milano, year(Air_Milano$date) %in% "2018")
AQI20_Milano18_123=subset(AQI20_Milano18, !month(AQI20_Milano18$date) %in% c("4","5","6","7","8","9","10","11","12"))
ggplot() +
geom_smooth(data = AQI20_Beijing20, aes(x = date, y = AQI, color = "2020")) +
geom_smooth(data = AQI20_Beijing19_123, aes(x = (date + 1*365), y = AQI, color = "2019")) +
geom_smooth(data = AQI20_Beijing18_123, aes(x = (date + 2*365), y = AQI, color = "2018")) +
xlab('Dates') +
ylab('AQI')+
ggtitle("Air Quality Index Beijing")+
scale_colour_manual(name="Legend", values=c("orange", "red", "blue"))+
theme(legend.position="right")
ggplot() +
geom_smooth(data = AQI20_Wuhan20, aes(x = date, y = AQI, color = "2020")) +
geom_smooth(data = AQI20_Wuhan19_123, aes(x = (date + 1*365), y = AQI, color = "2019")) +
geom_smooth(data = AQI20_Wuhan18_123, aes(x = (date + 2*365), y = AQI, color = "2018")) +
xlab('Dates') +
ylab('AQI')+
ggtitle("Air Quality Index Wuhan")+
scale_colour_manual(name="Legend", values=c("orange", "red", "blue"))+
theme(legend.position="right")
ggplot() +
geom_smooth(data = AQI20_Berlin20, aes(x = date, y = AQI, color = "2020")) +
geom_smooth(data = AQI20_Berlin19_123, aes(x = (date + 1*365), y = AQI, color = "2019")) +
geom_smooth(data = AQI20_Berlin18_123, aes(x = (date + 2*365), y = AQI, color = "2018")) +
xlab('Dates') +
ylab('AQI')+
ggtitle("Air Quality Index Berlin")+
scale_colour_manual(name="Legend", values=c("orange", "red", "blue"))+
theme(legend.position="right")
ggplot() +
geom_smooth(data = AQI20_Milano20, aes(x = date, y = AQI, color = "2020")) +
geom_smooth(data = AQI20_Milano19_123, aes(x = (date + 1*365), y = AQI, color = "2019")) +
geom_smooth(data = AQI20_Milano18_123, aes(x = (date + 2*365), y = AQI, color = "2018")) +
xlab('Dates') +
ylab('AQI')+
ggtitle("Air Quality Index Milano")+
scale_colour_manual(name="Legend", values=c("orange", "red", "blue"))+
theme(legend.position="right")
|
370238853a18934e83b1bd1dd77f90a3561bb220 | dd68bc3bf1fc13036551563399bb161bde8ff93d | /WATS_USU/snotel_single.R | 7db391f4ac4fd368c9947d1e783096b27cd744ba | [] | no_license | davebetts/Data-management-scripts | fba0941c5689ffd7d3771f1624b6085282d51df4 | f6e4a980e0d938c589071299007009111463bc86 | refs/heads/master | 2021-01-21T22:19:16.028802 | 2017-10-04T22:13:18 | 2017-10-04T22:13:18 | 102,146,427 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,761 | r | snotel_single.R | rm(list = ls())
gc()
filenames <- list.files("snotel", pattern="*.csv", full.names=TRUE)
# Process each file in diretory "./swe" with function "snotelgof()"
OvsE <- vector(mode = "list", length = length(filenames))
for (i in 1:length(filenames)) {
filename <- filenames[i]
title <- gsub(".csv", "", filename)
title <- gsub("snotel/", "", title)
snotel_all <- read.csv(filename, skip = 5, check.names = FALSE)
colnames(snotel_all) <- c("Year", "Month", "depth","gauge")
obs <- snotel_all[,4]
weap <- snotel_all[,3]
date <- as.Date(paste(snotel_all[,1],snotel_all[,2],01, sep = "-"))
snotel_all[,1] <- date
snotel_all <- snotel_all[,-2]
colnames(snotel_all)[1] <- "Date"
# GoF
require(hydroGOF)
nxt_col <- gof(sim = weap, obs = obs)
colnames(nxt_col)=title
OvsE[[i]]=nxt_col
# names(OvsE)[[i]]=title
g_range <- range(obs, weap, na.rm = TRUE)
ID <- as.character(title)
# create folder for output from R if folder doesn't already exist
ifelse(!dir.exists(file.path(getwd(), "output_R/snotel")),
dir.create(file.path(getwd(), "output_R/snotel"), recursive = TRUE),
FALSE)
mainDir <- "."
subDir <- paste("output_R/snotel", title, sep = "_")
if (!file_test("-d", file.path(mainDir, subDir))) {
if(file_test("-f", file.path(mainDir, subDir))) {
stop ("Plots will be stored in ", subDir)
} else {
dir.create(file.path(mainDir, subDir))
}
}
png(filename = paste(mainDir, "/", subDir, "/", ID, ".png", sep =""), width = 960, height = 600, units = "px", pointsize = 12)
plot(date, obs, type="l", lwd=2,
col="blue",
ylim = g_range,
xaxs="i",
xlab = "Date", ylab = "Snow Water Equivalent [mm]",
main = ID)
lines(date, weap, lwd=2, col="red")
legend("topright", legend=c("SNOTEL","WEAP"), lwd=c(2,2),
col=c("blue","red"), inset = .05)
dev.off()
}
svw1=do.call(cbind,OvsE)
svw2 <- as.data.frame(svw1)
svw3 <- cbind(as.character(row.names(nxt_col)), svw2)
svw4 <- svw3[c("PBIAS %", "RSR", "RMSE", "NSE", "d", "MAE"),]
colnames(svw4)[1]<-"GoF"
# svw5 <- svw4
# svw6 <- svw5[,-3]
write.csv(svw4, paste("./output_R/snotel/SNOTEL_", "GoF_", title, ".csv", sep = ""), row.names = FALSE)
########################################################################################
### List files within the subfolder "./sweGoF" and ###
### process each file using the function snotelgof() ###
########################################################################################
##
########################################################################################
### Function: GoF_tables - Collates all the GoF tables produced by ###
### snotelgof() into a set of combined tables ###
### Source: Each GoF table produced by snotelgof() as found in "output/swe" folder ###
########################################################################################
## !!! !!!
## !!! requires "dirloc" to be defined (below) !!!
## !!! Overwrites previous iterations of output !!!
## !!! !!!
##
## Produces a single combined table of all of the goodness of fit measures selected
## within snotelgof(), in addition to separate tables for each of the individual measures.
##
## All tables are saved within the "output_swe" folder within the working directory
##
GoF_tables <- function(dirloc) {
GoF_files <- list.files(dirloc, pattern="*.csv", full.names=TRUE)
gofnames <- gsub(".*GoF_", "", GoF_files)
gofnames <- gsub("_201.*","", gofnames)
x <- c("PBIAS %", "RSR", "RMSE", "NSE", "d", "MAE")
y <- read.csv(GoF_files[1], check.names = FALSE)
z <- vector(mode = "list", length = length(x))
names(z) <- x
sites <- y[,!(colnames(y) %in% x)]
write.csv(sites, "swe_sites.csv", row.names = FALSE)
for (i in seq_along(z)) {
z[[i]] <- as.data.frame(sites)
colnames(z[[i]]) <- c("site","catchment")
}
for (i in seq_along(GoF_files)) {
w <- read.csv(GoF_files[i], check.names = FALSE)
for (j in x) {
z[[j]] <- cbind(z[[j]], as.data.frame(w[,j]))
colnames(z[[j]])[ncol(z[[j]])] <- paste(j, gofnames[i], sep = "_")
}
sapply(names(z), function (zz) write.csv(z[[zz]], file=paste("output_R/swe_GoF",zz, "csv", sep="."), row.names = FALSE))
}
final <- do.call(cbind, z)
write.csv(final, "output_R/all_swe.csv", row.names = FALSE)
}
# define "dirloc"
dirloc <- "output_R/sweGoF"
GoF_tables(dirloc)
|
ab65298a16c42fa2ff4625eeed7fd2c66a1cfeb3 | b9a5b874df0e0d1bd97c56e9f29584c510a2e73d | /pre-season-forecast/pre-season-forecast.R | 6f8fefaa5307dbdfc3b1f0c970a6083144fe1f59 | [] | no_license | amoeba/2020-yukon-forecasting | 6543a8df425235c79985e6d14acd7035c6a09d4b | cb9a5b10eb81ddd9a0000fd945bf8840f0ed004e | refs/heads/master | 2022-11-22T05:50:08.833552 | 2020-07-22T01:49:22 | 2020-07-22T01:49:22 | 259,212,703 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,761 | r | pre-season-forecast.R | #' may_forecast.R
#'
#' 15/25/50 %tiles of timing using
library(dplyr)
library(ggplot2)
theme_set(theme_bw())
library(readr)
library(patchwork)
# Data loading/cleanup
######################
yuk <- read_csv("data/yukon.csv")
forecast_year <- 2020
# Plots
p_msstc <- ggplot(yuk, aes(msstc, mdj)) +
geom_point(shape=1) +
geom_vline(xintercept = yuk[which(yuk$year == forecast_year),"msstc"][[1]]) +
labs(x = expression("MSSTC,"*~degree*"C"), y = "Median Run Timing (June)")
ggsave("pre-season-forecast/figures/mdj_against_msstc.png", width = 4, height = 4)
p_pice <- ggplot(yuk, aes(pice, mdj)) +
geom_point(shape=1) +
geom_vline(xintercept = yuk[which(yuk$year == forecast_year),"pice"][[1]]) +
scale_x_continuous(limits = c(0, 1.0)) +
labs(x = "PICE", y = "Median Run Timing (June)")
ggsave("pre-season-forecast/figures/mdj_against_pice.png", width = 4, height = 4)
p_amatc <- ggplot(yuk, aes(amatc, mdj)) +
geom_point(shape=1) +
geom_vline(xintercept = yuk[which(yuk$year == forecast_year),"amatc"][[1]]) +
labs(x = expression("AMATC,"*~degree*"C"), y = "Median Run Timing (June)")
ggsave("pre-season-forecast/figures/mdj_against_amatc.png", width = 4, height = 4)
# Three panel plot
theme_set(theme_bw())
p_amatc + p_msstc + p_pice
ggsave("pre-season-forecast/figures/three_panel.png", width = 12, height = 4)
# Time series plot
theme_set(theme_classic() + theme(axis.title = element_blank(),
panel.border = element_rect(size = 1, fill = NA),
axis.line = element_blank(),
title = element_text(size = 10)))
p1 <- ggplot(yuk, aes(year, amatc)) +
geom_line() +
geom_point(data = subset(yuk, year == forecast_year)) +
geom_hline(yintercept = median(yuk[yuk$year < forecast_year, "amatc"][[1]])) +
labs(title = "April Average Air Temperature (°C) at the Nome, AK airport") +
theme(axis.title.x = element_blank())
p2 <- ggplot(yuk, aes(year, msstc)) +
geom_line() +
geom_point(data = subset(yuk, year == forecast_year)) +
geom_hline(yintercept = median(yuk[yuk$year < forecast_year, "msstc"][[1]])) +
labs(title = "May Average Sea Surface Temperature (°C), 62–64°N by 165–169°W") +
theme(axis.title.x = element_blank())
p3 <- ggplot(yuk, aes(year, pice)) +
geom_line() +
geom_point(data = subset(yuk, year == forecast_year)) +
geom_hline(yintercept = median(yuk[yuk$year < forecast_year, "pice"][[1]], na.rm = TRUE)) +
scale_y_continuous(limits = c(0, 1)) +
labs(title = "Spring Proportion of Ice Cover, 62–64°N by 165–169°W") +
theme(axis.title.x = element_blank())
timeseries_3p <- p1 / p2 / p3
ggsave("pre-season-forecast/figures/timseries_3p.png", timeseries_3p, width = 6, height = 5)
# Models
# 15%
model_fifdj <- lm(fifdj ~ amatc + msstc + pice, data = subset(yuk, year < forecast_year))
summary(model_fifdj)
prediction_fifdj <- floor(predict(model_fifdj, newdata = yuk[yuk$year == forecast_year,]))
prediction_fifdj
# 25%
model_qdj <- lm(qdj ~ amatc + msstc + pice, data = subset(yuk, year < forecast_year))
summary(model_qdj)
prediction_qdj <- floor(predict(model_qdj, newdata = yuk[yuk$year == forecast_year,]))
prediction_qdj
# 50%
model_mdj <- lm(mdj ~ amatc + msstc + pice, data = subset(yuk, year < forecast_year))
summary(model_mdj)
prediction_mdj <- floor(predict(model_mdj, newdata = yuk[yuk$year == forecast_year,]))
prediction_mdj
predictions <- data.frame(percentile = c("fifdj", "qdj", "mdj"),
prediction = as.integer(c(prediction_fifdj,
prediction_qdj,
prediction_mdj)))
write_csv(predictions, path = "pre-season-forecast/output/predictions.csv")
|
b164282c9c8c14831565b9e49677ed0fbf806d89 | c114cb86ac335b056175550a77501e8217c9032c | /man/gate_draw-GatingSet-method.Rd | b88bc695a2ada0c954dadb31aa0f06c5ef4b5661 | [] | no_license | Bia10/CytoRSuite | 7ffdfdb241e4d83a54c319ecffdaa482be2f2b5f | baf716187fe2d5c2543cf5524b90265190137466 | refs/heads/master | 2020-06-20T19:33:34.688082 | 2019-03-07T08:44:01 | 2019-03-07T08:44:01 | 197,223,695 | 1 | 0 | null | 2019-07-16T15:49:42 | 2019-07-16T15:49:42 | null | UTF-8 | R | false | true | 3,895 | rd | gate_draw-GatingSet-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gate_draw-methods.R
\docType{methods}
\name{gate_draw,GatingSet-method}
\alias{gate_draw,GatingSet-method}
\title{gate_draw GatingSet Method}
\usage{
\S4method{gate_draw}{GatingSet}(x, group_by = NULL, select = NULL,
parent = "root", alias = NULL, channels = NULL, type = "polygon",
gatingTemplate = NULL, display = NULL, axis = "x",
density_smooth = 1.5, label = TRUE, plot = TRUE, ...)
}
\arguments{
\item{x}{object of class
\code{\link[flowWorkspace:GatingSet-class]{GatingSet}}.}
\item{group_by}{vector of pData column names (e.g.
c("Treatment","Concentration") indicating how the samples should be grouped
prior to gating, set to the length of x by default to construct a single
gate for all samples. If group_by is supplied a different gate will be
constructed for each group.}
\item{select}{vector containing the indices of samples within each group to
use for plotting.}
\item{parent}{name of the \code{parent} population to extract for gating.}
\item{alias}{the name(s) of the populations to be gated. If multiple
population names are supplied (e.g. \code{c("CD3,"CD4)}) multiple gates
will be returned. \code{alias} is \code{NULL} by default which will halt
the gating routine.}
\item{channels}{vector of channel names to use for plotting, can be of length
1 for 1-D density histogram or length 2 for 2-D scatter plot.}
\item{type}{vector of gate type names used to construct the gates. Multiple
gate types are supported but should be accompanied with an \code{alias}
argument of the same length (i.e. one \code{type} per \code{alias}).
Supported gate types are \code{polygon, rectangle, ellipse, threshold,
boundary, interval, quadrant and web} which can be abbreviated as upper or
lower case first letters as well. Default \code{type} is \code{"interval"}
for 1D gates and \code{"polygon"} for 2D gates.}
\item{gatingTemplate}{name of \code{gatingTemplate} csv file to be saved.}
\item{display}{numeric [0,1] to control the percentage of events to be
plotted. Specifying a value for \code{display} can substantial improve
plotting speed for less powerful machines.}
\item{axis}{indicates whether the \code{"x"} or \code{"y"} axis should be
gated for 2-D interval gates.}
\item{density_smooth}{smoothing factor passed to
\code{\link[stats:density]{density}} for 1-D plots (defaults to 1.5).}
\item{label}{logical indicating whether to include
\code{\link{cyto_plot_label}} for the gated population(s), \code{TRUE} by
default.}
\item{plot}{logical indicating whether a plot should be drawn, set to
\code{TRUE} by default.}
\item{...}{additional arguments for \code{\link{cyto_plot,GatingSet-method}}.}
}
\value{
drawn gates are applied to the
\code{\link[flowWorkspace:GatingSet-class]{GatingSet}} and saved to a
\code{\link[openCyto:gatingTemplate-class]{gatingTemplate}}.
}
\description{
Manually draw gates around populations for analysis of flow cytometry data.
}
\examples{
\dontrun{
library(CytoRSuiteData)
# Load in samples
fs <- Activation
gs <- GatingSet(fs)
# Apply compensation
gs <- compensate(gs, fs[[1]]@description$SPILL)
# Transform fluorescent channels
trans <- estimateLogicle(gs[[4]], cyto_fluor_channels(fs))
gs <- transform(gs, trans)
# Gate using gate_draw
gating(Activation_gatingTemplate, gs)
# draw gates using gate_draw - add contour lines & overlay control
gate_draw(gs,
parent = "Dendritic Cells",
channels = c("Alexa Fluor 488-A", "Alexa Fluor 700-A"),
alias = c("CD8+ DC", "CD4+ DC"),
gatingTemplate = "Example-gatingTemplate.csv",
type = "rectangle",
contour_lines = 15
)
# Constructed gate applied directly to GatingSet
getNodes(gs)
}
}
\seealso{
\code{\link{cyto_plot,GatingSet-method}}
\code{\link{gate_draw,flowFrame-method}}
\code{\link{gate_draw,flowSet-method}}
}
\author{
Dillon Hammill, \email{Dillon.Hammill@anu.edu.au}
}
|
9f489cd840c001f746422318e2037c5e9e3f95ca | b32545a4bbb7d5d07d24bddcfb23c7a76bcaae21 | /plot_dim_reduce.R | e6ef0d618bd6c76835130174d2845884d2384af5 | [] | no_license | igledaniel/lit2vec | 1ab76dba7d6ae513f67bdcca8c85437737f5ed54 | 9ac34e2e079be9cffc557c296550d1aa64186ab9 | refs/heads/master | 2023-03-17T14:37:06.653242 | 2016-06-08T18:43:34 | 2016-06-08T18:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,029 | r | plot_dim_reduce.R | model_name <- "docs50"
fp = "data/"
wordAndcategory = FALSE
labs <- read.csv(paste0(fp, "names_", model_name, ".csv"), header = FALSE, stringsAsFactors = FALSE)[,1]
# pca
x1 <- read.csv(paste0(fp, "docvecs_", model_name, ".csv"), header = FALSE) # ndocs by vec-dim np array
stopifnot(nrow(unique(x1)) == nrow(x1))
# convert
X <- as.matrix(x1)
# PCA
pca_result <- prcomp(X, retx=TRUE, scale. = TRUE)
pca <- pca_result$x[,1:3]
save(pca, file = paste0(fp, "pca", model_name, ".Rda"))
# plot
library(ggplot2)
library(ggrepel)
load(paste0(fp, "pca", model_name, ".Rda"))
pcadf <- data.frame(X = pca[,1], Y = pca[,2], Z = pca[,3])
if(wordAndcategory){
Word <- read.csv(paste0(fp,"word_names.csv"), header = FALSE, stringsAsFactors = FALSE)[,1]
pcadf$Labels <- c(Branch, Word)
pcadf$Type <- c(rep("Branch", length(Branch)), rep("Word", length(Word)))
pcadf$size <- c(rep(3, length(Branch)), rep(1, length(Word)))
embedding_plot <- ggplot(pcadf[pcadf$Type=="Branch" & ! pcadf$Labels %in% "court" | pcadf$Labels %in% keywords,], aes(x = tspX, y = tspY, color = Type, label= Labels)) +
geom_text() + theme_bw() + xlab("") + ylab("") + theme(legend.position="none")
} else {
pcadf$Labels <- labs
p <- ggplot(pcadf, aes(x = X, y = Y, color = Z, label= Labels)) +
geom_text_repel() + geom_point() +
theme_classic() + xlab("") + ylab("") +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position="none")
pdf(paste0("figs/pca_", model_name, ".pdf"), width=15, height=15)
print(p)
dev.off()
}
x1 <- read.csv(paste0(fp, "docvecs_", model_name, ".csv"), header = FALSE) # ndocs by vec-dim np array
rownames(x1) <- labs
hc <- stats::hclust(dist(x1))
library(ggplot2)
library(ggdendro)
dendr <- dendro_data(hc, type="rectangle") # convert for ggplot
clust <- cutree(hc,k=11) # k clusters
clust.df <- data.frame(label=names(clust), cluster=factor(clust))
# dendr[["labels"]] has the labels, merge with clust.df based on label column
dendr[["labels"]] <- merge(dendr[["labels"]],clust.df, by="label")
# plot the dendrogram; note use of color=cluster in geom_text(...)
p <- ggplot() +
geom_segment(data=segment(dendr), aes(x=x, y=y, xend=xend, yend=yend)) +
geom_text(data=label(dendr), aes(x, y, label=label, hjust=0, color=cluster),
size=2.5) + ylab("") +
coord_flip() + scale_y_reverse(expand=c(0.2, 0)) +
theme(axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.ticks.x=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.title.y=element_blank(),
panel.background=element_rect(fill="white"),
panel.grid=element_blank()) +
scale_fill_brewer(palette = "Dark2")
pdf(paste0("figs/cluster_", model_name, ".pdf"), width=10, height=10)
print(p)
dev.off()
|
a0e6e36fc3c5cb07772bac45bef9ae889a90b402 | 8e62b56339ad30b31343a5067fee29b2fa4dfed7 | /hate_crimes_final_Fuentes_Villhauer_Nicklas.R | d862d9302de88b90e569eef5938da6dd2223a4ca | [] | no_license | UVA-MSDS-STAT-6021-HateCrimeProject/Nick | c07b89958fe56e0577a68af835e73bc9feac1b9f | f74e98f0abb018f19deae7d9d70154b655c3969f | refs/heads/main | 2023-02-03T04:19:59.378457 | 2020-12-09T03:31:35 | 2020-12-09T03:31:35 | 313,777,417 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 64,056 | r | hate_crimes_final_Fuentes_Villhauer_Nicklas.R | #STAT 6021 Project 2 (Hate Crimes in America)
#David Fuentes, Matt Villhauer, Matt Nicklas
#Using data from FiveThirtyEight, RAND Corp, and other sources to see which variables contribute to hate crimes measured as rate per 100K residents
# #########################################################################
# We will produce a model using our full suite of methods pertaining to multiple
# linear regression (MLR). We will focus on variable and data exploration, including
# performing any data transformations necessary to meet the assumptions of MLR;
# outlier/influential-point analysis; ANOVA and partial F tests to determine predictive power
# and inclusion of variables; and general model selection procedures learned throughout the semester.
# #########################################################################
library(glmnet)
library(faraway)
library(leaps)
library(tidyverse)
# Read in csv file from the data (((((((CLeansing file name here)))))))
hc_df<-read.csv('hate_crimes_full_vfin.csv', row.names = 'state_abbrev')
colnames(hc_df)
# #########################################################################
# DATA ANALYSIS FOR DATA WITH DC INCLUDED
# #########################################################################
# We don't really want to use the SPLC data since those data were collected for a short period (little over a week) directly after the 2016 election.
# SPLC also collects HC data in a very specific manner vastly different than how the FBI reports
# Need to set categorical variables so R can recognize them as such
hc_df$confederate<-factor(hc_df$confederate)
hc_df$permit<-factor(hc_df$permit) #State has permit to purchase law 1 = yes
hc_df$universl<-factor(hc_df$universl) #State has universal background checks 1 = yes
hc_df$con_uni_combo<-factor(hc_df$con_uni_combo)
# make police killings per 100k pop
hc_df$pk_per100k <- hc_df$pk_percap*100000
# remove some variables from the hc_df dataframe in which we aren't interested
fbi_df_2016<-hc_df[ , !(names(hc_df) %in% c('fbi_2019_per100k', 'hate_crimes_per_100k_splc', 'state_full', 'FIP', 'Year', 'confederate', 'universl',
'hate_group_count_2019', 'FIP Year', 'HFR_se', 'BRFSS', 'GALLUP', 'GSS', 'PEW', 'HuntLic', 'GunsAmmo',
'BackChk', 'PewQChng', 'BS1', 'BS2', 'BS3', 'pk_count', 'Number.of.participating.agencies',
'Agencies.submitting.incident.reports', 'pop_covered', 'population', 'incidents', 'pk_percap'))]
fbi_df_2019<-hc_df[ , !(names(hc_df) %in% c('average_hatecrimes_per_100k_fbi', 'hate_crimes_per_100k_splc', 'state_full', 'FIP', 'Year', 'confederate', 'universl',
'hate_group_count_2019', 'FIP Year', 'HFR_se', 'BRFSS', 'GALLUP', 'GSS', 'PEW', 'HuntLic', 'GunsAmmo',
'BackChk', 'PewQChng', 'BS1', 'BS2', 'BS3', 'pk_count', 'Number.of.participating.agencies',
'Agencies.submitting.incident.reports', 'pop_covered', 'population', 'incidents', 'pk_percap'))]
colnames(fbi_df_2016)
# make 'Neither' the reference
levels(fbi_df_2016$con_uni_combo)
fbi_df_2016$con_uni_combo<-relevel(fbi_df_2016$con_uni_combo, ref = "Neither")
levels(fbi_df_2016$con_uni_combo)
# rename hate crime columns to standardize
fbi_df_2016<-rename(fbi_df_2016, hc_per100k=avg_hatecrimes_per_100k_fbi)
fbi_df_2019<-rename(fbi_df_2019, hc_per100k=fbi_2019_per100k)
# drop NAs from HC values (only Hawaii is missing)
fbi_df_2016<-fbi_df_2016 %>% drop_na(hc_per100k)
levels(fbi_df_2016$con_uni_combo)
fbi_df_2016$con_uni_combo<-relevel(fbi_df_2016$con_uni_combo, ref = "Neither")
levels(fbi_df_2016$con_uni_combo)
levels(fbi_df_2019$con_uni_combo)
fbi_df_2019$con_uni_combo<-relevel(fbi_df_2019$con_uni_combo, ref = "Neither")
levels(fbi_df_2019$con_uni_combo)
# #########################################################################
# create box plots of categorical data. Notice difference in shape of confederate data -- confederate states had fewer HCs
# remaining two variables do not show the same difference
par(mfrow=c(1,2))
boxplot(fbi_df_2016$hc_per100k~fbi_df_2016$con_uni_combo, main = '2016 FBI: HC Rate by Levels w DC', xlab = 'Confederate-Background Check Combo',
ylab = 'Hate Crimes per 100k (2016)', ylim=c(0,32))
grid()
boxplot(fbi_df_2019$hc_per100k~fbi_df_2019$con_uni_combo, main = '2019 FBI: HC Rate by Levels w DC', xlab = 'Confederate-Background Check Combo',
ylab ='Hate Crimes per 100k (2019)', ylim=c(0,32))
grid()
# DC is a huge outlier on the avg hatecrimes
# #########################################################################
# not much to see in the scatter matrix due to # of variables. DC sticks out a lot as far as hate crimes, and we notice
# some heavy correlation between variables. Will want to explore DC during outlier analysis
pairs(fbi_df_2016, lower.panel=NULL)
# fit the full data; not helpful except to look at correlation/multicollinearity
model_2016<-lm(hc_per100k~., data = fbi_df_2016)
# run VIF. notice high values (like HFR, suicide rates, share_population_in_metro_areas). We will remove these
vif(model_2016)
# remove correlated variables
fbi_df_2016<-fbi_df_2016[ , !(names(fbi_df_2016) %in% c('pk_per100k', 'share_non_citizen', 'median_household_income', 'Fem_FS_S', 'Male_FS_S', 'share_population_in_metro_areas', 'HFR', 'share_population_with_high_school_degree'))]
# fit models again and run VIF
model_2016<-lm(hc_per100k~., data = fbi_df_2016)
vif(model_2016)
# check model summary
summary(model_2016)
# produce scatter matrices again after removing variables that were highly correlated
pairs(fbi_df_2016, lower.panel=NULL, main = 'FBI 2016, post Multicollinearity Analysis')
# #########################################################################
# MODEL SELECTION
# #########################################################################
#intercept-only model
regnull <- lm(hc_per100k~1, data=fbi_df_2016)
#full model
regfull <- lm(hc_per100k~., data=fbi_df_2016)
#forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
# selection methods all showing the same results
for_model <- lm(formula = hc_per100k ~ share_voters_voted_trump + gini_index +
con_uni_combo + share_non_white + elasticity, data = fbi_df_2016)
back_model <- lm(formula = hc_per100k ~ gini_index + share_non_white + share_voters_voted_trump +
elasticity + con_uni_combo, data = fbi_df_2016)
both_model <- lm(formula = hc_per100k ~ share_voters_voted_trump + gini_index +
con_uni_combo + share_non_white + elasticity, data = fbi_df_2016)
summary(for_model)
summary(back_model)
summary(both_model)
step_wise<-both_model
summary(step_wise)
# all produce the same model. Relatively high adj R^2 and significant t-vals
par(mfrow=c(2,2))
# Plots to check whether linear assumptions hold true
plot(step_wise$fitted.values,step_wise$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(step_wise, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
# Acf plot of residuals
acf(step_wise$residuals, main="ACF Lag Plot")
qqnorm(step_wise$residuals)
qqline(step_wise$residuals, col="red")
# Graphs indicate that transformation must occur
# #########################################################################
# TRANSFORMATIONS
# #########################################################################
# Try a sqrt transform and then check to see if the linear assumptions are then met
sqrt_model <- lm(formula = sqrt(hc_per100k) ~ share_voters_voted_trump + gini_index +
con_uni_combo + share_non_white + elasticity, data = fbi_df_2016)
summary(sqrt_model)
par(mfrow=c(2,2))
plot(sqrt_model$fitted.values,sqrt_model$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(sqrt_model, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
# Acf plot of residuals
acf(sqrt_model$residuals, main="ACF Lag Plot")
qqnorm(sqrt_model$residuals)
qqline(sqrt_model$residuals, col="red")
# Results look better but we might want to rerun the model selection with the newly-transformed data
# #########################################################################
# RERUN MODEL SELECTION
# #########################################################################
# transform variable
fbi_df_2016$hc_per100k<-sqrt(fbi_df_2016$hc_per100k)
# rerun code from above
# no need to rerun correlation/VIF analysis since the transformation doesn't change that analysis
pairs(fbi_df_2016, lower.panel=NULL, main = 'FBI 2016')
#intercept only model
regnull <- lm(hc_per100k~1, data=fbi_df_2016)
#full model
regfull <- lm(hc_per100k~., data=fbi_df_2016)
#forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
for_model <- lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white +
gini_index + con_uni_combo + elasticity, data = fbi_df_2016)
back_model <- lm(formula = hc_per100k ~ gini_index + share_non_white + share_voters_voted_trump +
elasticity + con_uni_combo, data = fbi_df_2016)
both_model <- lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white +
gini_index + con_uni_combo + elasticity, data = fbi_df_2016)
summary(for_model)
summary(back_model)
summary(both_model)
# they're all the same! decent adj R^2. Also the same as the model found above,
# just with transformed response
full_sqrt<-lm(hc_per100k~., data = fbi_df_2016)
vif(full_sqrt)
# low VIFs, which makes sense since the transformation shouldn't have changed that
fin_vars<-c('hc_per100k', 'gini_index', 'share_non_white',
'share_voters_voted_trump', 'con_uni_combo', 'elasticity')
# correlation matrix with the subset of variables
pairs(fbi_df_2016[fin_vars], lower.panel=NULL, main = 'Final Variables')
fin_data<-fbi_df_2016[fin_vars]
model_full<-lm(hc_per100k~., data = fin_data)
summary(model_full)
red<-lm(hc_per100k~gini_index+share_voters_voted_trump+share_non_white+con_uni_combo, fin_data) # tested without elasticity, but they're significant; remove elasticity
anova(red, model_full) # elasticity is not significant with alpha = 0.05
# #########################################################################
# Use the best-criteria processes to produce similar models
# #########################################################################
#perform all possible regressions (1st order)
allreg <- regsubsets(hc_per100k ~., data=fbi_df_2016, nbest=9) # sqrt
##create a "data frame" that stores the predictors in the various models considered as well as their various criteria
best <- as.data.frame(summary(allreg)$outmat)
best$p <- as.numeric(substr(rownames(best),1,1))+1
best$r2 <- summary(allreg)$rsq
best$adjr2 <- summary(allreg)$adjr2
best$mse <- (summary(allreg)$rss)/(dim(hc_df)[1]-best$p)
best$cp <- summary(allreg)$cp
best$bic <- summary(allreg)$bic
#best
##sort by various criteria
best[order(best$r2,decreasing = TRUE),] # our full_model
best[order(best$adjr2, decreasing = TRUE),] # our full_model
best[order(best$mse),] # our full_model
best[order(best$cp),] # our full_model
best[order(best$bic),] # not the full model. confederate, and Gini index
# mostly produce the same model as above (with elasticity, which we've determined isn't significant)
# same model with hate group might be decent. will check
hategroup_mod<-lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white + hate_group_count_2016 +
gini_index + con_uni_combo + elasticity, data = fbi_df_2016)
summary(hategroup_mod)
hategroup_mod_red<-lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white + hate_group_count_2016 +
gini_index + con_uni_combo, data = fbi_df_2016)
# removed elasticity
summary(hategroup_mod_red)
anova(hategroup_mod, hategroup_mod_red)
# can keep elasticity out. check about hate group
hategroup_mod_red<-lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white +
gini_index + con_uni_combo, data = fbi_df_2016)
anova(hategroup_mod, hategroup_mod_red)
# partial F says it's not significant (cannot reject H_0, that its corresponding beta is 0)
# Tells us the best model is the one found with step-wise selection process so that will be called best_mod
best_mod <- red
# #########################################################################
# OUTLIER ANALYSIS
# #########################################################################
# residuals
res_full <- unname(best_mod[['residuals']]) # need to remove the field headers
#res_2016 # uncomment to see residuals
# outlier, influential & leverage points analysis
# studentized residuals
student.res<-rstandard(best_mod)
# externally studentized residuals
ext.student.res<-rstudent(best_mod)
sort(ext.student.res)
# plot residuals vs standardized residuals found above
n<-length(fin_data$hc_per100k)
p<-length(best_mod$coefficients)
##critical value using Bonferroni procedure
qt(1-0.05/(2*n), n-p-1)
par(mfrow=c(1,3))
plot(best_mod$fitted.values,res_full,main="Residuals")
plot(best_mod$fitted.values,student.res,main="Studentized Residuals")
#plot(best_mod$fitted.values,ext.student.res,main="Externally Studentized Residuals")
# calc values
plot(ext.student.res,main="Externally Studentized Residuals", ylim=c(-4,4))
abline(h=qt(1-0.05/(2*n), n-p-1), col="red")
abline(h=-qt(1-0.05/(2*n), n-p-1), col="red")
# no outliers in the response...
ext.student.res[abs(ext.student.res)>qt(1-0.05/(2*n), n-p-1)]
##leverages
lev_full<-lm.influence(best_mod)$hat
sort(lev_full)
par(mfrow=c(1,1))
plot(lev_full, main="Leverages", ylim=c(0,0.6))
abline(h=2*p/n, col="red")
# get leverage points
lev_full[lev_full>2*p/n]
# DC and VT leveraged
# influential observations
DFFITS<-dffits(best_mod)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
DFBETAS<-dfbetas(best_mod)
DFBETAS[abs(DFBETAS)>2/sqrt(n)]
COOKS<-cooks.distance(best_mod)
COOKS[COOKS>qf(0.5,p,n-p)]
# influential points by DFFITS, but not Cooks
# #########################################################################
# SHRINKAGE MODELS
# #########################################################################
# check shrinkage regression to see if we can find a challenger model
# begin shrinkage analysis
x_2016<-model.matrix(hc_per100k~., fbi_df_2016)[,-1] # remove the first column of 1s representing the intercept
y_2016<-fbi_df_2016$hc_per100k
###### Lasso
# should produce MLR results
lasso.r<-glmnet(x_2016,y_2016,alpha=1, lambda=0, thresh = 1e-14)
coefficients(lasso.r)
# MLR - produce the same thing as above
result_2016<-lm(hc_per100k~.,fbi_df_2016)
summary(result_2016)
# split data
set.seed(12)
train<-sample(1:nrow(x_2016), nrow(x_2016)/2)
test<-(-train)
y.test<-y_2016[test]
# use CV to find optimal lambda based on training set
set.seed(12)
cv.out<-cv.glmnet(x_2016[train,],y_2016[train],alpha=1) # lasso regression
bestlam<-cv.out$lambda.min # value of lambda that minimizes MSE (the optimal value)
bestlam
plot(cv.out)
# fit lasso regression using training data
lasso.mod<-glmnet(x_2016[train,],y_2016[train],alpha=1,lambda=bestlam, thresh = 1e-14)
# test MSE with lambda=1
lasso.pred.0<-predict(lasso.mod,newx=x_2016[test,])
mean((lasso.pred.0-y.test)^2)
# fit lasso regression using training data
lasso.mod.1<-glmnet(x_2016[train,],y_2016[train],alpha=1,lambda=1, thresh = 1e-14)
# test MSE with lambda=1
lasso.pred.1<-predict(lasso.mod.1,newx=x_2016[test,])
mean((lasso.pred.1-y.test)^2)
# MSE = 0.3581547
# perform ridge
ridge.r<-glmnet(x_2016,y_2016,alpha=0, lambda=0, thresh = 1e-14)
coefficients(ridge.r)
summary(result_2016)
set.seed(12)
train<-sample(1:nrow(x_2016), nrow(x_2016)/2)
test<-(-train)
y.test<-y_2016[test]
set.seed(12)
cv.out.ridge<-cv.glmnet(x_2016[train,],y_2016[train],alpha=0) # ridge regression
bestlam.r<-cv.out.ridge$lambda.min # value of lambda that minimizes MSE (the optimal value)
bestlam.r
plot(cv.out.ridge)
##fit ridge regression using training data
ridge.mod<-glmnet(x_2016[train,],y_2016[train],alpha=0,lambda=bestlam.r, thresh = 1e-14)
##test MSE with lambda=1
ridge.pred.0<-predict(ridge.mod,newx=x_2016[test,])
mean((ridge.pred.0-y.test)^2)
##fit ridge regression using training data
ridge.mod.1<-glmnet(x_2016[train,],y_2016[train],alpha=0,lambda=1, thresh = 1e-14)
##test MSE with lambda=1
ridge.pred.1<-predict(ridge.mod.1,newx=x_2016[test,])
mean((ridge.pred.1-y.test)^2)
# lasso produced a model with slightly lower MSE
##Compare ridge with OLS using best lambda and all observations
out.lasso<-glmnet(x_2016,y_2016,alpha=1,lambda=bestlam,thresh = 1e-14)
out.ridge<-glmnet(x_2016,y_2016,alpha=0,lambda=bestlam.r,thresh = 1e-14)
out.ols<-glmnet(x_2016,y_2016,alpha=0, lambda=0, thresh = 1e-14)
cbind(coefficients(out.lasso), coefficients(out.ridge), coefficients(out.ols))
residuals.glm(out.lasso, type = 'response')
# we favor the first-order model found using MLR analyses above over shrinkage regression
# LOOCV with first-order model
n <- nrow(fbi_df_2016)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(best_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
print(preds - testrow$hc_per100k)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# check with hate group, found from MSE, adjR^2, etc. analysis
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(hategroup_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
print(preds - testrow$hc_per100k)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# w elasticity and hate group does slightly better by MSE
# both produce pretty good MSEs, so both predict pretty well
summary(best_mod)
summary(hategroup_mod)
summary(red)
##perform levene's test. Null states the variances are equal for all classes.
library(lawstat)
levene.test(fbi_df_2016$hc_per100k,fbi_df_2016$con_uni_combo) # variances are equal
#levene.test(hc_df$fbi_2019_per100k,hc_df$permit) # variances are equal
#levene.test(hc_df$fbi_2019_per100k,hc_df$universl) # variances are equal
summary(best_mod)
##perform Tukey's multiple comparisons
library(multcomp)
pairwise<-glht(model_full, linfct = mcp(con_uni_combo= "Tukey"))
summary(pairwise)
levels(fbi_df_2016$con_uni_combo)
# #########################################################################
# INTERACTION ANALYSIS
# #########################################################################
# consider each cut as a subset
neither<-subset(fbi_df_2016,con_uni_combo=="Neither")
con<-subset(fbi_df_2016,con_uni_combo=="Confederate Only")
law<-subset(fbi_df_2016,con_uni_combo=="Background Check Only")
# fit separate regressions
gini_neither <- lm(hc_per100k~gini_index,data=neither)
gini_con <- lm(hc_per100k~gini_index,data=con)
gini_law <- lm(hc_per100k~gini_index,data=law)
# Create scatters:
plot(fbi_df_2016$gini_index, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Gini Index", xlab = 'Gini', ylab = 'Hate Crimes per 100k')
points(neither$gini_index, neither$hc_per100k, pch=2, col="blue")
points(con$gini_index, con$hc_per100k, pch=3, col="red")
points(law$gini_index, law$hc_per100k, pch=4, col="orange")
abline(gini_neither,lty=1, col="blue")
abline(gini_con,lty=2, col="red")
abline(gini_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
summary(red)
# #########################################################################
# fit separate regressions
trump_neither <- lm(hc_per100k~share_voters_voted_trump,data=neither)
trump_con <- lm(hc_per100k~share_voters_voted_trump,data=con)
trump_law <- lm(hc_per100k~share_voters_voted_trump,data=law)
# Create scatters:
plot(fbi_df_2016$share_voters_voted_trump, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Share Voters for Trump (2016)", xlab = 'Share Voted for Trump (2016)', ylab = 'Hate Crimes per 100k')
points(neither$share_voters_voted_trump, neither$hc_per100k, pch=2, col="blue")
points(con$share_voters_voted_trump, con$hc_per100k, pch=3, col="red")
points(law$share_voters_voted_trump, law$hc_per100k, pch=4, col="orange")
abline(trump_neither,lty=1, col="blue")
abline(trump_con,lty=2, col="red")
abline(trump_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# fit separate regressions
nw_neither <- lm(hc_per100k~share_non_white,data=neither)
nw_con <- lm(hc_per100k~share_non_white,data=con)
nw_law <- lm(hc_per100k~share_non_white,data=law)
# Create scatters:
plot(fbi_df_2016$share_non_white, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and % Non White", xlab = '% Non White', ylab = 'Hate Crimes per 100k')
points(neither$share_non_white, neither$hc_per100k, pch=2, col="blue")
points(con$share_non_white, con$hc_per100k, pch=3, col="red")
points(law$share_non_white, law$hc_per100k, pch=4, col="orange")
abline(nw_neither,lty=1, col="blue")
abline(nw_con,lty=2, col="red")
abline(nw_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# fit separate regressions
el_neither <- lm(hc_per100k~elasticity,data=neither)
el_con <- lm(hc_per100k~elasticity,data=con)
el_law <- lm(hc_per100k~elasticity,data=law)
# Create scatters:
plot(fbi_df_2016$elasticity, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Political Elasticity", xlab = 'Elasticity Score', ylab = 'Hate Crimes per 100k')
points(neither$elasticity, neither$hc_per100k, pch=2, col="blue")
points(con$elasticity, con$hc_per100k, pch=3, col="red")
points(law$elasticity, law$hc_per100k, pch=4, col="orange")
abline(el_neither,lty=1, col="blue")
abline(el_con,lty=2, col="red")
abline(el_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# BEST INTERACTION MODEL WITH PARTIAL F ANALYSIS
# #########################################################################
summary(best_mod)
inter_full<-lm(formula = hc_per100k ~ .^2, data = fin_data)
summary(inter_full)
inter_1<-lm(formula = hc_per100k ~ gini_index*con_uni_combo +
+ share_non_white*con_uni_combo
+ share_voters_voted_trump*con_uni_combo +
+ elasticity*con_uni_combo + share_voters_voted_trump*elasticity
+ share_non_white*elasticity, data = fin_data)
summary(inter_1)
# start removing preds and check partial F
red_inter_1 <-lm(formula = hc_per100k ~ gini_index*con_uni_combo +
+ share_non_white*con_uni_combo
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fin_data)
summary(red_inter_1)
anova(inter_1, red_inter_1)
# can support red_inter_1 from partial F. widdle down some more:
red_inter_1 <-lm(formula = hc_per100k ~ share_non_white*con_uni_combo
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fin_data)
summary(red_inter_1)
anova(inter_1, red_inter_1)
# keep reducing
red_inter_1 <-lm(formula = hc_per100k ~ share_non_white
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fin_data)
summary(red_inter_1)
anova(inter_1, red_inter_1)
red_inter_1 <-lm(formula = hc_per100k ~ share_non_white
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fin_data)
no_inter<-lm(formula = hc_per100k ~ share_non_white
+ share_voters_voted_trump + con_uni_combo +
+ elasticity, data = fin_data)
summary(no_inter)
summary(red_inter_1)
anova(no_inter, red_inter_1)
best_mod<-red_inter_1
summary(red_inter_1) # hierarchical principle: higher order (interaction) terms are significant, so must leave lower-order terms in
vif(no_inter)
# no multicolinearity
# #########################################################################
# check if it predicts 2019 HC data well
fbi_df_2019$hc_per100k<-sqrt(fbi_df_2019$hc_per100k)
n <- nrow(fbi_df_2016) # number of rows (28)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(best_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
#print((preds - testrow$hc_per100k)/testrow$hc_per100k)
print(preds - testrow$hc_per100k)
print(preds)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# does pretty well on 2016
# 2019
n <- nrow(fbi_df_2019) # number of rows (28)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2019[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2019[-c(i),] # training data is a DF with every row except the ith
preds<-predict(best_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
#print((preds - testrow$hc_per100k)/testrow$hc_per100k)
print(preds - testrow$hc_per100k)
print(preds)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# not the best, but not terrible
summary(red_inter_1)
par(mfrow=c(2,2))
plot(red_inter_1$fitted.values,red_inter_1$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(red_inter_1, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
##acf plot of residuals
acf(red_inter_1$residuals, main="ACF Lag Plot")
qqnorm(red_inter_1$residuals)
qqline(red_inter_1$residuals, col="red")
# check outliers again
# outlier analysis #
# residuals
res_full <- unname(red_inter_1[['residuals']]) # need to remove the field headers
#res_2016 # uncomment to see residuals
# outlier, influential & leverage points analysis
# studentized residuals
student.res<-rstandard(red_inter_1)
# externally studentized residuals
ext.student.res<-rstudent(red_inter_1)
sort(ext.student.res)
# plot residuals vs standardized residuals found above
n<-length(fin_data$hc_per100k)
p<-length(red_inter_1$coefficients)
##critical value using Bonferroni procedure
qt(1-0.05/(2*n), n-p-1)
par(mfrow=c(1,3))
plot(red_inter_1$fitted.values,res_full,main="Residuals")
plot(red_inter_1$fitted.values,student.res,main="Studentized Residuals")
#plot(best_mod$fitted.values,ext.student.res,main="Externally Studentized Residuals")
# calc values
plot(ext.student.res,main="Externally Studentized Residuals", ylim=c(-4,4))
abline(h=qt(1-0.05/(2*n), n-p-1), col="red")
abline(h=-qt(1-0.05/(2*n), n-p-1), col="red")
# no outliers in the response...
ext.student.res[abs(ext.student.res)>qt(1-0.05/(2*n), n-p-1)]
##leverages
lev_full<-lm.influence(red_inter_1)$hat
sort(lev_full)
par(mfrow=c(1,1))
plot(lev_full, main="Leverages", ylim=c(0,0.8))
abline(h=2*p/n, col="red")
# get leverage points
lev_full[lev_full>2*p/n]
# DC and VT leveraged in full
# influential observations
DFFITS<-dffits(red_inter_1)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
DFBETAS<-dfbetas(red_inter_1)
DFBETAS[abs(DFBETAS)>2/sqrt(n)]
COOKS<-cooks.distance(red_inter_1)
COOKS[COOKS>qf(0.5,p,n-p)]
# #########################################################################
# MODEL ANALYSIS WITHOUT DC IN THE MODEL
# #########################################################################
# Would need to rerun lines 1-20 in this code prior to running below
# remove DC
fbi_df_2016<-fbi_df_2016[!(row.names(fbi_df_2016) %in% c('DC')),]
fbi_df_2019<-fbi_df_2019[!(row.names(fbi_df_2019) %in% c('DC')),]
# rename hate crime columns to standardize
fbi_df_2016<-rename(fbi_df_2016, hc_per100k=avg_hatecrimes_per_100k_fbi)
fbi_df_2019<-rename(fbi_df_2019, hc_per100k=fbi_2019_per100k)
# drop NAs from HC values
fbi_df_2016<-fbi_df_2016 %>% drop_na(hc_per100k)
fbi_df_2019<-fbi_df_2019 %>% drop_na(hc_per100k)
# #########################################################################
# create box plots of categorical data. Notice difference in shape of confederate data -- confederate states had fewer HCs
# remaining two variables do not show the same difference
par(mfrow=c(1,2))
boxplot(fbi_df_2016$hc_per100k~fbi_df_2016$con_uni_combo, main = '2016 FBI: HC Rate by Levels', xlab = 'Confederate-Background Check Combo',
ylab = 'Hate Crimes per 100k (2016)', ylim=c(0,8))
grid()
boxplot(fbi_df_2019$hc_per100k~fbi_df_2019$con_uni_combo, main = '2019 FBI: HC Rate by Levels', xlab = 'Confederate-Background Check Combo',
ylab ='Hate Crimes per 100k (2019)', ylim=c(0,8))
grid()
pairs(fbi_df_2016, lower.panel=NULL)
# fit the full data -- not helpful except to look at correlation/multicollinearity
model_2016<-lm(hc_per100k~., data = fbi_df_2016)
# run VIF. notice high values (like HFR, suicide rates, share_population_in_metro_areas). We will remove these
vif(model_2016)
# remove correlated variables
fbi_df_2016<-fbi_df_2016[ , !(names(fbi_df_2016) %in% c('share_non_citizen', 'median_household_income', 'Fem_FS_S', 'Male_FS_S', 'share_population_in_metro_areas', 'HFR', 'share_population_with_high_school_degree'))]
# fit models again and run VIF
model_2016<-lm(hc_per100k~., data = fbi_df_2016)
vif(model_2016)
# check model summaries
summary(model_2016)
# produce scatter matrices again
pairs(fbi_df_2016, lower.panel=NULL, main = 'FBI Hate Crimes: 2016')
# #########################################################################
# MODEL SELECTION
# #########################################################################
##intercept only model
regnull <- lm(hc_per100k~1, data=fbi_df_2016)
##model with all predictors
regfull <- lm(hc_per100k~., data=fbi_df_2016)
##forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
for_model <- lm(formula = hc_per100k ~ con_uni_combo + gini_index + share_non_white,
data = fbi_df_2016)
back_model <- lm(formula = hc_per100k ~ gini_index + share_non_white + con_uni_combo,
data = fbi_df_2016)
both_model <- lm(formula = hc_per100k ~ con_uni_combo + gini_index + share_non_white,
data = fbi_df_2016)
step_wise<-lm(formula = hc_per100k ~ con_uni_combo + gini_index + share_non_white,
data = fbi_df_2016)
summary(for_model)
summary(back_model)
summary(both_model)
# all produce the same models; low adj R^2
par(mfrow=c(2,2))
plot(step_wise$fitted.values,step_wise$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(step_wise, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
##acf plot of residuals
acf(step_wise$residuals, main="ACF Lag Plot")
qqnorm(step_wise$residuals)
qqline(step_wise$residuals, col="red")
# doesn't need a transformation, but the final model w/ interaction does later
# notice that 1 is barely in the confidence band. Data aren't really normal, but that's okay for our purposes
# #########################################################################
fin_vars<-c('hc_per100k', 'gini_index', 'share_non_white',
'con_uni_combo')
# correlation matrix with the subset of variables
pairs(fbi_df_2016[fin_vars], lower.panel=NULL, main = 'Final Variables')
fin_data<-fbi_df_2016[fin_vars]
model_full<-lm(hc_per100k~., data = fin_data)
summary(model_full)
red<-lm(hc_per100k~gini_index+con_uni_combo, fin_data) # tested without share_non_white and trump share, but they're significant; remove elasticity
anova(red, model_full) # elasticity is not significant with alpha = 0.05
# #########################################################################
# Use the best-criteria processes to produce similar models
# #########################################################################
##perform all possible regressions (1st order)
allreg <- regsubsets(hc_per100k ~., data=fbi_df_2016, nbest=9) # sqrt
##create a "data frame" that stores the predictors in the various models considered as well as their various criteria
best <- as.data.frame(summary(allreg)$outmat)
best$p <- as.numeric(substr(rownames(best),1,1))+1
best$r2 <- summary(allreg)$rsq
best$adjr2 <- summary(allreg)$adjr2
best$mse <- (summary(allreg)$rss)/(dim(hc_df)[1]-best$p)
best$cp <- summary(allreg)$cp
best$bic <- summary(allreg)$bic
#best
##sort by various criteria
best[order(best$r2,decreasing = TRUE),] # our full_model
best[order(best$adjr2, decreasing = TRUE),] # our full_model
best[order(best$mse),] # our full_model
best[order(best$cp),] # our full_model
best[order(best$bic),] # not the full model. confederate, and Gini index
# same model with hate group might be decent. will check
challenger_mod<-lm(formula = hc_per100k ~ share_voters_voted_trump + share_non_white + gini_index +
con_uni_combo + elasticity, data = fbi_df_2016)
# same as model w DC in the previous section
summary(challenger_mod)
ch_mod_red<-lm(formula = hc_per100k ~ share_non_white + gini_index +
con_uni_combo, data = fbi_df_2016)
# removed elasticity and trump share
# tried removing share_non_white, but adj R^2 is very low. We should keep in for our purposes
summary(ch_mod_red)
anova(challenger_mod, ch_mod_red)
summary(red)
# keep prior model
best_mod <- ch_mod_red
summary(best_mod)
vif(best_mod)
# #########################################################################
# OUTLIER ANALYSIS
# #########################################################################
# residuals
res_full <- unname(best_mod[['residuals']]) # need to remove the field headers
#res_2016 # uncomment to see residuals
# outlier, influential & leverage points analysis
# studentized residuals
student.res<-rstandard(best_mod)
# externally studentized residuals
ext.student.res<-rstudent(best_mod)
# plot residuals vs standardized residuals found above
par(mfrow=c(1,3))
plot(best_mod$fitted.values,res_full,main="Residuals")
plot(best_mod$fitted.values,student.res,main="Studentized Residuals")
#plot(best_mod$fitted.values,ext.student.res,main="Externally Studentized Residuals")
# calc values
n<-length(fin_data$hc_per100k)
p<-length(best_mod$coefficients)
##critical value using Bonferroni procedure
qt(1-0.05/(2*n), n-p-1)
sort(ext.student.res)
plot(ext.student.res,main="Externally Studentized Residuals", ylim=c(-4,4))
abline(h=qt(1-0.05/(2*n), n-p-1), col="red")
abline(h=-qt(1-0.05/(2*n), n-p-1), col="red")
ext.student.res[abs(ext.student.res)>qt(1-0.05/(2*n), n-p-1)]
par(mfrow=c(1,1))
##leverages
lev_full<-lm.influence(best_mod)$hat
sort(lev_full)
plot(lev_full, main="Leverages", ylim=c(0,0.6))
abline(h=2*p/n, col="red")
# get leverage points
lev_full[lev_full>2*p/n]
# NY is a leverage point
# influential observations
DFFITS<-dffits(best_mod)
DFFITS[abs(DFFITS)>2*sqrt(p/n)]
# KY, PA, and WA are influential
DFBETAS<-dfbetas(best_mod)
DFBETAS[abs(DFBETAS)>2/sqrt(n)]
COOKS<-cooks.distance(best_mod)
COOKS[COOKS>qf(0.5,p,n-p)]
# #########################################################################
# SHRINKAGE ANALYSIS
# #########################################################################
# check shrinkage models to see if they align with model_full and have decent predictive power
# begin shrinkage analysis
x_2016<-model.matrix(hc_per100k~., fbi_df_2016)[,-1] # remove the first column of 1s representing the intercept
y_2016<-fbi_df_2016$hc_per100k
###### Lasso
##alpha=0 for ridge, alpha=1 for LASSO
##threshold value should be very small if multicollinearity is present. see what happens if thresh was set to a larger value
##we know theoretically the coeffs should be the same as lm when lambda is 0
lasso.r<-glmnet(x_2016,y_2016,alpha=1, lambda=0, thresh = 1e-14) #MLR
coefficients(lasso.r)
##MLR - produce the same thing as above **as long as thresh is small enough
result_2016<-lm(hc_per100k~.,fbi_df_2016)
summary(result_2016)
##split data
set.seed(12)
train<-sample(1:nrow(x_2016), nrow(x_2016)/2)
test<-(-train)
y.test<-y_2016[test]
##use CV to find optimal lambda based on training set
set.seed(12)
cv.out<-cv.glmnet(x_2016[train,],y_2016[train],alpha=1) # lasso regression
bestlam<-cv.out$lambda.min # value of lambda that minimizes MSE (the optimal value)
bestlam
plot(cv.out)
##fit lasso regression using training data
lasso.mod<-glmnet(x_2016[train,],y_2016[train],alpha=1,lambda=bestlam, thresh = 1e-14)
##test MSE with lambda=1
lasso.pred.0<-predict(lasso.mod,newx=x_2016[test,])
mean((lasso.pred.0-y.test)^2)
##fit lasso regression using training data
lasso.mod.1<-glmnet(x_2016[train,],y_2016[train],alpha=1,lambda=1, thresh = 1e-14)
##test MSE with lambda=1
lasso.pred.1<-predict(lasso.mod.1,newx=x_2016[test,])
mean((lasso.pred.1-y.test)^2)
# MSE is pretty high
# perform ridge
##alpha=0 for ridge, alpha=1 for LASSO
##threshold value should be very small if multicollinearity is present. see what happens if thresh was set to a larger value
##we know theoretically the coeffs should be the same as lm when lambda is 0
ridge.r<-glmnet(x_2016,y_2016,alpha=0, lambda=0, thresh = 1e-14)
coefficients(ridge.r)
##MLR - produce the same thing as above **as long as thresh is small enough
summary(result_2016)
##split data
set.seed(12)
train<-sample(1:nrow(x_2016), nrow(x_2016)/2)
test<-(-train)
y.test<-y_2016[test]
##use CV to find optimal lambda based on training set
set.seed(12)
cv.out.ridge<-cv.glmnet(x_2016[train,],y_2016[train],alpha=0) # ridge regression
bestlam.r<-cv.out.ridge$lambda.min # value of lambda that minimizes MSE (the optimal value)
bestlam.r
plot(cv.out.ridge)
##fit ridge regression using training data
ridge.mod<-glmnet(x_2016[train,],y_2016[train],alpha=0,lambda=bestlam.r, thresh = 1e-14)
##test MSE with lambda=1
ridge.pred.0<-predict(ridge.mod,newx=x_2016[test,])
mean((ridge.pred.0-y.test)^2)
##fit ridge regression using training data
ridge.mod.1<-glmnet(x_2016[train,],y_2016[train],alpha=0,lambda=1, thresh = 1e-14)
##test MSE with lambda=1
ridge.pred.1<-predict(ridge.mod.1,newx=x_2016[test,])
mean((ridge.pred.1-y.test)^2)
# lasso produced a slightly lower MSE
##Compare ridge with OLS using best lambda and all observations
out.lasso<-glmnet(x_2016,y_2016,alpha=1,lambda=bestlam,thresh = 1e-14)
out.ridge<-glmnet(x_2016,y_2016,alpha=0,lambda=bestlam.r,thresh = 1e-14)
out.ols<-glmnet(x_2016,y_2016,alpha=0, lambda=0, thresh = 1e-14)
cbind(coefficients(out.lasso), coefficients(out.ridge), coefficients(out.ols))
# lasso produced intercept-only model...
# once again, we can ignore the shrinkage methods; the reasons for using them don't really fit our context (high variance, high correlation)
# LOOCV
# cross val on the model we produced
n <- nrow(fbi_df_2016)
sum_ressqr <- 0 # start with ss = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(best_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
print(preds - testrow$hc_per100k)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# MSE ~ 1.073936, better than MSE from shrinkage techniques. I don't think we need to go into detail
# since we don't have multicollinearity and don't really need to worry about shrinkage methods
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(challenger_mod,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
print(preds - testrow$hc_per100k)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# challenger_mod has slightly lower MSE 1.023076
summary(challenger_mod)
summary(best_mod)
# test the above solution
##perform levene's test. Null states the variances are equal for all classes.
library(lawstat)
levene.test(fbi_df_2016$hc_per100k,fbi_df_2016$con_uni_combo) # variances aren't equal
#levene.test(hc_df$fbi_2019_per100k,hc_df$permit) # variances are equal
#levene.test(hc_df$fbi_2019_per100k,hc_df$universl) # variances are equal
summary(best_mod)
##perform Tukey's multiple comparisons
library(multcomp)
pairwise<-glht(model_full, linfct = mcp(con_uni_combo= "Tukey"))
summary(pairwise)
levels(fbi_df_2016$con_uni_combo)
# #########################################################################
# INTERACTION ANALYSIS
# #########################################################################
# consider neither confederate and background check as a subset
neither<-subset(fbi_df_2016,con_uni_combo=="Neither")
con<-subset(fbi_df_2016,con_uni_combo=="Confederate Only")
law<-subset(fbi_df_2016,con_uni_combo=="Background Check Only")
# fit separate regressions
gini_neither <- lm(hc_per100k~gini_index,data=neither)
gini_con <- lm(hc_per100k~gini_index,data=con)
gini_law <- lm(hc_per100k~gini_index,data=law)
# Create scatters:
plot(fbi_df_2016$gini_index, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Gini Index", xlab = 'Gini', ylab = 'Hate Crimes per 100k')
points(neither$gini_index, neither$hc_per100k, pch=2, col="blue")
points(con$gini_index, con$hc_per100k, pch=3, col="red")
points(law$gini_index, law$hc_per100k, pch=4, col="orange")
abline(gini_neither,lty=1, col="blue")
abline(gini_con,lty=2, col="red")
abline(gini_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
summary(red)
# #########################################################################
# fit separate regressions
trump_neither <- lm(hc_per100k~share_voters_voted_trump,data=neither)
trump_con <- lm(hc_per100k~share_voters_voted_trump,data=con)
trump_law <- lm(hc_per100k~share_voters_voted_trump,data=law)
# Create scatters:
plot(fbi_df_2016$share_voters_voted_trump, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Share Voters for Trump (2016)", xlab = 'Share Voted for Trump (2016)', ylab = 'Hate Crimes per 100k')
points(neither$share_voters_voted_trump, neither$hc_per100k, pch=2, col="blue")
points(con$share_voters_voted_trump, con$hc_per100k, pch=3, col="red")
points(law$share_voters_voted_trump, law$hc_per100k, pch=4, col="orange")
abline(trump_neither,lty=1, col="blue")
abline(trump_con,lty=2, col="red")
abline(trump_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# fit separate regressions
nw_neither <- lm(hc_per100k~share_non_white,data=neither)
nw_con <- lm(hc_per100k~share_non_white,data=con)
nw_law <- lm(hc_per100k~share_non_white,data=law)
# Create scatters:
plot(fbi_df_2016$share_non_white, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and % Non White", xlab = '% Non White', ylab = 'Hate Crimes per 100k')
points(neither$share_non_white, neither$hc_per100k, pch=2, col="blue")
points(con$share_non_white, con$hc_per100k, pch=3, col="red")
points(law$share_non_white, law$hc_per100k, pch=4, col="orange")
abline(nw_neither,lty=1, col="blue")
abline(nw_con,lty=2, col="red")
abline(nw_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# fit separate regressions
el_neither <- lm(hc_per100k~elasticity,data=neither)
el_con <- lm(hc_per100k~elasticity,data=con)
el_law <- lm(hc_per100k~elasticity,data=law)
# Create scatters:
plot(fbi_df_2016$elasticity, fbi_df_2016$hc_per100k, main="Hate-Crime Rate and Political Elasticity", xlab = 'Elasticity Score', ylab = 'Hate Crimes per 100k')
points(neither$elasticity, neither$hc_per100k, pch=2, col="blue")
points(con$elasticity, con$hc_per100k, pch=3, col="red")
points(law$elasticity, law$hc_per100k, pch=4, col="orange")
abline(el_neither,lty=1, col="blue")
abline(el_con,lty=2, col="red")
abline(el_law,lty=3, col="orange")
legend("bottomright", c("Neither","Confederate Only","Background-Check Only"), lty=c(1,2,3), pch=c(2,3,4), col=c("blue","red","orange"))
# #########################################################################
# BEST INTERACTION MODEL WITHOUT DC INCLUDED
# #########################################################################
summary(best_mod)
inter_full<-lm(formula = hc_per100k ~ .^2, data = fbi_df_2016)
summary(inter_full)
inter_1<-lm(formula = hc_per100k ~ gini_index*con_uni_combo +
+ share_non_white*con_uni_combo
+ share_voters_voted_trump*con_uni_combo +
+ elasticity*con_uni_combo + share_voters_voted_trump*elasticity
+ share_non_white*elasticity, data = fbi_df_2016)
summary(inter_1)
# start removing preds and check partial F
red_inter_1 <-lm(formula = hc_per100k ~ gini_index*con_uni_combo +
+ share_non_white*con_uni_combo
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2016)
summary(red_inter_1)
anova(inter_1, red_inter_1)
# can support red_inter_1 from partial F. widdle down some more:
red_inter_1 <-lm(formula = hc_per100k ~ share_non_white*con_uni_combo + gini_index
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2016)
summary(red_inter_1)
anova(inter_1, red_inter_1)
# keep reducing
red_inter_1 <-lm(formula = hc_per100k ~ share_non_white + gini_index
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2016)
summary(red_inter_1)
anova(inter_1, red_inter_1)
no_inter<-lm(formula = hc_per100k ~ share_non_white
+ share_voters_voted_trump + con_uni_combo +
+ elasticity, data = fbi_df_2016)
# keep Gini in since it helps adj R^2
summary(no_inter)
summary(red_inter_1)
anova(inter_1, red_inter_1)
best_mod<-red_inter_1
summary(red_inter_1) # hierarchical principle: higher order (interaction) terms are significant, so must leave lower-order terms in
red_inter_sum <-lm(formula = hc_per100k ~ share_non_white + gini_index
+ share_voters_voted_trump+con_uni_combo +
+ elasticity, data = fbi_df_2016)
summary(red_inter_sum)
anova(red_inter_sum, red_inter_1)
# favor model with interaction since we reject H_0 stating to remove
#vcov(red_inter_1)
vif(no_inter)
vif(red_inter_sum)
# check if it predicts 2019 HC data well
#fbi_df_2019$hc_per100k<-fbi_df_2019$hc_per100k
n <- nrow(fbi_df_2016) # number of rows (28)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2016[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2016[-c(i),] # training data is a DF with every row except the ith
preds<-predict(red_inter_1,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
#print((preds - testrow$hc_per100k)/testrow$hc_per100k)
print(preds - testrow$hc_per100k)
print(preds)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# best MSE yet. 0.7545 (but see below; technically model doesn't fit assumptions)
# 2019
n <- nrow(fbi_df_2019) # number of rows (28)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2019[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2019[-c(i),] # training data is a DF with every row except the ith
preds<-predict(red_inter_1,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
#print((preds - testrow$hc_per100k)/testrow$hc_per100k)
print(preds - testrow$hc_per100k)
print(preds)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# not the best; 2.187 MSE
par(mfrow=c(2,2))
plot(red_inter_1$fitted.values,red_inter_1$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(red_inter_1, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
##acf plot of residuals
acf(red_inter_1$residuals, main="ACF Lag Plot")
qqnorm(red_inter_1$residuals)
qqline(red_inter_1$residuals, col="red")
sqrt_red<-lm(formula = sqrt(hc_per100k) ~ share_non_white + gini_index
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2016)
sqrt_red_exGini<-lm(formula = sqrt(hc_per100k) ~ share_non_white
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2016)
anova(sqrt_red_exGini, sqrt_red)
summary(sqrt_red)
summary(sqrt_red_exGini)
par(mfrow=c(2,2))
plot(sqrt_red_exGini$fitted.values,sqrt_red_exGini$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(sqrt_red_exGini, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
##acf plot of residuals
acf(sqrt_red_exGini$residuals, main="ACF Lag Plot")
qqnorm(sqrt_red_exGini$residuals)
qqline(sqrt_red_exGini$residuals, col="red")
# rerun final model on 2019 data
sqrt_red_exGini<-lm(formula = sqrt(hc_per100k) ~ share_non_white
+ share_voters_voted_trump*con_uni_combo +
+ share_non_white*elasticity, data = fbi_df_2019)
summary(sqrt_red_exGini)
n <- nrow(fbi_df_2019) # number of rows (28)
sum_ressqr <- 0 # start with MSE = 0
for (i in 1:n) # loop through i = 1 to 28
{
testrow <- fbi_df_2019[c(i), ] # test DF is just the ith row
train_data <- fbi_df_2019[-c(i),] # training data is a DF with every row except the ith
preds<-predict(sqrt_red_exGini,testrow, type="response") # predict the response for the ith row
sum_ressqr <- sum_ressqr + (preds - testrow$hc_per100k)^2 # add the res^2 to the cumulative sum
#print((preds - testrow$hc_per100k)/testrow$hc_per100k)
print(preds - testrow$hc_per100k)
print(preds)
}
print(sum_ressqr/n) # avg MSE for the LOOCV
# doesn't predict well. Would need to redo the whole analysis for 2019. So the model doesn't seem to have the best predictive ability
# #########################################################################
# SMALLER MODEL ANALYSIS WITH NO INTERACTION
# #########################################################################
hate_DF<-read.csv('hate_crimes_full_vfin.csv', row.names = 'state_abbrev')
colnames(hate_DF)
#Need to set categorical variables so R can recognize them as such
hate_DF$confederate<-factor(hate_DF$confederate)
hate_DF$permit<-factor(hate_DF$permit) #State has permit to purchase law 1 = yes
hate_DF$universl<-factor(hate_DF$universl) #State has universal background checks 1 = yes
hate_DF$con_uni_combo<-factor(hate_DF$con_uni_combo)
#Looking at making a dataframe only including the variables left after removing these below
hate_DF<-hate_DF[ , !(names(hate_DF) %in% c('fbi_2019_per100k', 'hate_crimes_per_100k_splc', 'state_full', 'FIP', 'Year',
'hate_group_count_2019', 'FIP Year', 'HFR_se', 'BRFSS', 'GALLUP', 'GSS', 'PEW', 'HuntLic', 'GunsAmmo',
'BackChk', 'PewQChng', 'BS1', 'BS2', 'BS3', 'pk_count', 'Number.of.participating.agencies',
'Agencies.submitting.incident.reports', 'pop_covered', 'population', 'incidents', 'pk_percap',
'confederate', 'universl'))]
#Setting the reference level to be neither for universal and confederate combo. Should have 3 sepereate equations from this
levels(hate_DF$con_uni_combo)
hate_DF$con_uni_combo<-relevel(hate_DF$con_uni_combo, ref = "Neither")
levels(hate_DF$con_uni_combo)
#Used to remove all the rows that include NA values anywhere
hate_DF<-hate_DF %>% drop_na(avg_hatecrimes_per_100k_fbi)
#hate_DF<-hate_DF[!(row.names(hate_DF) %in% c('DC')),] #REMOVE THE DC NEED TO DO THAT
nrow(hate_DF)
pairs(hate_DF, lower.panel=NULL, main="Scatterplots of Predictors")
#Can see that a lot of variables have large amount of correlation
boxplot(hate_DF$avg_hatecrimes_per_100k_fbi~hate_DF$permit)
boxplot(hate_DF$avg_hatecrimes_per_100k_fbi~hate_DF$con_uni_combo)
# #########################################################################
#Run a full model to look at predictors to see how correlated they are
result<-lm(avg_hatecrimes_per_100k_fbi~., data = hate_DF)
vif(result) # HFR along with male and female suicide rate is highly correlated (18+ VIF)
summary(result)
#Remove the variables I don't want cause of high correlation from VIF calculation
hate_DF<-hate_DF[ , !(names(hate_DF) %in% c('median_household_income', 'share_population_with_high_school_degree', 'Fem_FS_S', 'HFR', 'share_voters_voted_trump',
'share_population_in_metro_areas', 'share_non_white', 'share_non_citizen', 'Male_FS_S'))]
# #########################################################################
#Intercept only model to set up for selection process
regnull <- lm(avg_hatecrimes_per_100k_fbi~1, data=hate_DF)
#Model with all 17 predictors for our model
regfull <- lm(avg_hatecrimes_per_100k_fbi~., data=hate_DF)
##forward selection, backward elimination, and stepwise regression
step(regnull, scope=list(lower=regnull, upper=regfull), direction="forward")
step(regfull, scope=list(lower=regnull, upper=regfull), direction="backward")
step(regnull, scope=list(lower=regnull, upper=regfull), direction="both")
#Forward selection model
forward_result <- lm(formula = avg_hatecrimes_per_100k_fbi ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity + share_unemployed_seasonal,
data = hate_DF)
summary(forward_result)
#Backward selection model suicide rate becomes important when going with this approach
backward_result <- lm(formula = avg_hatecrimes_per_100k_fbi ~ share_unemployed_seasonal +
gini_index + elasticity + hate_group_count_2016 + con_uni_combo,
data = hate_DF)
summary(backward_result)
#Both selections model
both_result<-lm(formula = avg_hatecrimes_per_100k_fbi ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity + share_unemployed_seasonal,
data = hate_DF)
summary(both_result)
# #########################################################################
#Full Model
result<-lm(avg_hatecrimes_per_100k_fbi~., data = hate_DF)
summary(result)
#Model chosen from backwards selection
resultsmall<-lm(formula = avg_hatecrimes_per_100k_fbi ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity + share_unemployed_seasonal,
data = hate_DF)
summary(resultsmall)
# ANSWER: The null hypothesis says that all missing variables are equal to 0. The alternative
# states that each of their slopes are nonzero so good for our model fit.
anova(resultsmall,result)
anova(result)
#Every sum of squares is given based on the fact that the previous predictor is in the model
#ANSWER: Go with the smaller model because you fail to reject the null since p value
#is greater than your significance level of 0.05
# #########################################################################
#Check to see if the linear assumptions are met if 1 falls within the range
par(mfrow=c(2,2))
plot(resultsmall$fitted.values,resultsmall$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(resultsmall, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
# Acf plot of residuals
acf(resultsmall$residuals, main="ACF Lag Plot")
qqnorm(resultsmall$residuals)
qqline(resultsmall$residuals, col="red")
#Need to perform a transformation since the linear assumptions are not met when looking at boxcox
#Took the square root because the boxcox and residual plot indicate the linear transformations aren't met
resulttrans <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity + share_unemployed_seasonal,
data = hate_DF)
summary(resulttrans)
# Plots to check whether linear assumptions Hold true
par(mfrow=c(2,2))
plot(resulttrans$fitted.values,resulttrans$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(resulttrans, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
# Acf plot of residuals
acf(resulttrans$residuals, main="ACF Lag Plot")
qqnorm(resulttrans$residuals)
qqline(resulttrans$residuals, col="red")
# #########################################################################
#Partial F test round two after the transformation
result2<-lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity + share_unemployed_seasonal,
data = hate_DF)
summary(result2)
#Model chosen from backwards selection
resultsmall2<-lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity, data = hate_DF)
summary(resultsmall2)
# ANSWER: The null hypothesis says that all missing variables are equal to 0. The alternative
# states that each of their slopes are nonzero so good for our model fit.
anova(resultsmall2,result2)
anova(result2)
final1 <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity, data = hate_DF)
summary(final1)
# #########################################################################
#Next take the transformation data and perform another partial F test
#Full Model
final1 <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016 + elasticity, data = hate_DF)
summary(final1)
#Model chosen from backwards selection
final2 <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016, data = hate_DF)
summary(final2)
# ANSWER: The null hypothesis says that all missing variables are equal to 0. The alternative
# states that each of their slopes are nonzero so good for our model fit.
anova(final2,final1)
#ANOTHER PARTIAL F TEST
final2 <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo +
hate_group_count_2016, data = hate_DF)
summary(final2)
#Model withe hategroupcount removed from the data
final3 <- lm(formula = sqrt(avg_hatecrimes_per_100k_fbi) ~ gini_index + con_uni_combo, data = hate_DF)
summary(final3)
# ANSWER: The null hypothesis says that all missing variables are equal to 0. The alternative
# states that each of their slopes are nonzero so good for our model fit.
anova(final3,final2)
# #########################################################################
# Plots to check whether linear assumptions Hold true
par(mfrow=c(2,2))
plot(final3$fitted.values,final3$residuals, main="Plot of Residuals against Fitted Values")
abline(h=0,col="red")
library(MASS)
boxcox(final3, lambda = seq(-1.25, 3, 1/10), main="Box-Cox Lambda Transform")
# Acf plot of residuals
acf(final3$residuals, main="ACF Lag Plot")
qqnorm(final3$residuals)
qqline(final3$residuals, col="red")
#Final Model Number 3 has gini_index and con_unit combo meaning you will have 3 equations with just Gini and the three different scenarios
#for the reference class.
|
95842e6bdf3829e57c710c6d597ed79e6fcf1fef | 2ba22f489011cfb61d6727ab522bf3904f78eefc | /man/plot.catatis.Rd | 039044c6d9996d6ea933d38ced644149888b27e2 | [] | no_license | cran/ClustBlock | 847297472d9cc6f05bad33b23fd78a48938dead7 | eed656e469929805b6c72f465912c965fe9f580f | refs/heads/master | 2023-07-07T05:26:15.786420 | 2023-06-29T17:00:02 | 2023-06-29T17:00:02 | 174,553,356 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,518 | rd | plot.catatis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.catatis.R
\name{plot.catatis}
\alias{plot.catatis}
\title{Displays the CATATIS graphs}
\usage{
\method{plot}{catatis}(x, Graph=TRUE, Graph_weights=TRUE, Graph_eig=TRUE,
axes=c(1,2), tit="CATATIS", cex=1, col.obj="blue", col.attr="red", ...)
}
\arguments{
\item{x}{object of class 'catatis'}
\item{Graph}{logical. Show the graphical representation? Default: TRUE}
\item{Graph_weights}{logical. Should the barplot of the weights be plotted? Default: TRUE}
\item{Graph_eig}{logical. Should the barplot of the eigenvalues be plotted? Only with Graph=TRUE. Default: TRUE}
\item{axes}{numerical vector (length 2). Axes to be plotted}
\item{tit}{string. Title for the graphical representation. Default: 'CATATIS'}
\item{cex}{numerical. Numeric character expansion factor; multiplied by par("cex") yields the final character size. NULL and NA are equivalent to 1.0.}
\item{col.obj}{numerical or string. Color for the objects points. Default: "blue"}
\item{col.attr}{numerical or string. Color for the attributes points. Default: "red"}
\item{...}{further arguments passed to or from other methods}
}
\value{
the CATATIS map
}
\description{
This function plots the CATATIS map and CATATIS weights
}
\examples{
\donttest{
data(straw)
res.cat=catatis(straw, nblo=114)
plot(res.cat, Graph_weights=FALSE, axes=c(1,3))
}
}
\seealso{
\code{\link{catatis}}
}
\keyword{CATA}
\keyword{RATA}
|
e8c6a4e04d421bcba80a285e58f0afa661fc646c | f5bc3a93a5482ded18b635c0c5ad173e07fcf6d2 | /sircomp.R | d6eb35a0ec29da68cfbe2d5f0adab2cbad84fe2e | [] | no_license | madhurima-nath/epi_model | 8b7155d0032a69a60e010a489025edf10b7daf45 | 7d9ad016e359eb87b5de995f80ef3408346c22d8 | refs/heads/master | 2023-08-16T13:06:27.841182 | 2021-03-16T19:43:23 | 2021-03-16T19:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,299 | r | sircomp.R | ##install packages
install.packages('deSolve')
## Load deSolve package
library(deSolve)
## Create an SIR function
sir <- function(time, state, parameters) {
with(as.list(c(state, parameters)), {
dS = -beta * S * I
dI = beta * S * I - gamma * I
dR = gamma * I
return(list(c(dS, dI, dR)))
})
}
### Set parameters
## Fraction in each compartment:
## Total population size = N = 1, Infected = 1*10^-5, Susceptibles = N - I, Recovered = 0
N_total = 1
I0 = 1e-05
S0 = N_total - I
R0 = 0.0
init = c(S = S0, I = I0, R = R0)
## beta: infection rate; gamma: recovery rate
parameters = c(beta = 0.5, gamma = 0.05)
## Time frame
days = seq(0, 200, by = 1)
## Solve using ode (General Solver for Ordinary Differential Equations)
out = ode(y = init, times = days, func = sir, parms = parameters)
## change to data frame
out = as.data.frame(out)
## Show data
head(out, 10)
out$time = NULL
## remove the time column from the dataframe, to make it easier to plot
## Plot
fig = matplot(x = days, y = out, type = "l",
xlab = "Time", ylab = "Fraction of People",
lwd = 3, lty = 1, bty = "l", col = 2:4)
box(which = "plot")
## Add legend
legend(140, 0.7, c("Susceptible", "Infected", "Recovered"),
pch = c(NA, NA, NA), lty = 1, col = 2:4, bty = "o", lwd = 3)
|
f197ec0f0b24b5eff2ccf1ca540778eeaa5afc6b | 6bb82b6ec315060653cc675155321b8b53e85ed9 | /03 R Syntax 1 (Data Typs and Strings)/03_2_List_Matrix_Array.R | 0982f5b7ac09b182aa85c696ea25662e4d781b60 | [] | no_license | inkyscope/R-for-Data-Analytics | d9d66a58fd67a5c4731c186010de281657bd82d6 | fd9e04e4184e7edee6ee08ca238ac4c24a83a5c7 | refs/heads/master | 2023-01-31T12:43:00.186966 | 2020-12-11T07:11:54 | 2020-12-11T07:11:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,003 | r | 03_2_List_Matrix_Array.R | # Part 2-1: Data Handling (List)
# Example of a list
listA <- list(1, 2, "a")
print(listA)
listA[[1]]
listA[c(1,2)]
names(listA)
names(listA) <- c("First", "Second", "Third")
listA[["Third"]]
listA$Third
# Data Handling: List
A <- list(name="Kang", salary = 10000, union = TRUE)
A
A$name
B <- list("Kang", 10000, TRUE)
B
B[[1]]
C <- vector(mode="list")
C[["name"]] <- "Kang"
C[["salary"]] <- 10000
C[["union"]] <- TRUE
C
C$name
C[["name"]]
C[[1]]
C1 <- C[[1]]
class(C1)
C1
C2 <- C[1]
class(C2)
C2
C$office <- "frontier"
C
C$salary <- NULL
C
tmplist <- list(a = list(1:5, c("a","b","c")), b = "Z", c = NA)
tmplist
unlist(tmplist)
unlist(tmplist, use.names = FALSE)
A <- list(1:3,25:29)
A
lapply(A,median)
sapply(A,median)
# Part 2-2: Data Handling (Matrix)
# Example of a matrix
A <- 1:6
dim(A)
print(A)
dim(A) <- c(2,3)
print(A)
B <- list(1,2,3,4,5,6)
print(B)
dim(B)
dim(B) <- c(2,3)
print(B)
D <- 1:12
dim(D) <- c(2,3,2)
print(D)
# Part 2-3: Data Handling (Matrix & Array)
A = matrix(1:15, nrow=5, ncol=3)
A
B = matrix(1:15, nrow=5, byrow = T)
B
C = matrix(nrow=2,ncol=2)
C[1,1] = 1
C[1,2] = 2
C[2,1] = 3
C[2,2] = 4
C
A = matrix(1:4, nrow=2, ncol=2)
B = matrix(seq(from=2,to=8,by=2), nrow=2, ncol=2)
A
B
A*B # Element-wise matrix multiplication
A %*% B # Matrix multiplication
A*3 # Matrix*Constant
A+B # Matrix Addition
C = matrix(1:15, nrow=5, ncol=3)
C
C[3,2]
C[2,]
C[,3]
C[2:4,2:3]
C[-1,]
C[1,] <- c(10, 11, 12)
C
A <- matrix(c(1:6), nrow=3, ncol=2)
A
A[A[,2]>=5,]
which(A>3)
A <- matrix(c(1:6), nrow=3, ncol=2)
apply(A,1,mean)
apply(A,2,mean)
A <- matrix(c(1:6), nrow=3, ncol=2)
B <- matrix(c(11:16), nrow=3, ncol=2)
A
B
rbind(A,B)
cbind(A,B)
cbind(A[,1],B[,2])
A <- matrix(c(1:6), nrow=3, ncol=2)
colnames(A)
rownames(A)
colnames(A) <- c("1st","2nd")
colnames(A)
rownames(A) <- c("First","Second","Third")
rownames(A)
A[,"1st",drop=FALSE]
A <- matrix(c(1:15), nrow=5, ncol=3)
B <- matrix(c(11:25), nrow=5, ncol=3)
A
B
C <- array(data=c(A,B),dim=c(3,2,2))
C |
42fddd771e453b3fd6026320bc2fa0af1346d708 | b9f2e470d568cde6eac087d771f9adbee117dd1a | /R/dbPars.R | c7df1938ddf600522d008ff9ef670261f07c451f | [] | no_license | cran/megaptera | 0e3852c12ab3d2ec0050d94bf82de74cf8ccc71f | 83855c9a44ba3f0280cd39c1bfdde1fccd25478f | refs/heads/master | 2020-05-16T15:12:09.188082 | 2014-10-30T00:00:00 | 2014-10-30T00:00:00 | 26,581,099 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,900 | r | dbPars.R | setClass("dbPars",
representation = list(
host = "character",
port = "numeric",
dbname = "character",
user = "character",
password = "character")
)
"dbPars" <- function(host = "localhost",
port = 5432,
dbname,
user = "postgres",
password){
dbname <- tolower(dbname)
## check if database exists ...
conn <- dbConnect(PostgreSQL(),
user = user, port = port)
sql <- paste("SELECT 1 FROM pg_database WHERE",
sql.wrap(dbname, term = "datname"))
if ( nrow(dbGetQuery(conn, sql)) == 1 ){
cat("\ndatabase '", dbname, "' exists", sep = "")
} else {
## .. and crate if it does not exist
cat("\ndatabase '", dbname, "' created", sep = "")
sql <- paste("CREATE DATABASE", dbname,
"WITH ENCODING='UTF8'",
"CONNECTION LIMIT=-1;")
dbSendQuery(conn, sql)
}
dbDisconnect(conn)
new("dbPars",
host = host, port = port,
dbname = dbname,
user = user, password = password
)
}
setMethod("show",
signature(object = "dbPars"),
function (object)
{
cat("PostgreSQL connection parameters:",
"\n host =", object@host,
"\n port =", object@port,
"\n dbname =", object@dbname,
"\n user =", object@user,
"\n password =", object@password
)
}
)
dbconnect <- function(dbPars){
if ( !inherits(dbPars, "dbPars") )
stop("dbPars is not of class 'dbPars'")
dbConnect(PostgreSQL(),
host = dbPars@host,
port = dbPars@port,
user = dbPars@user,
password = dbPars@password,
dbname = dbPars@dbname)
} |
e18eb1bcf50e28c85d3a532d762144ed26054c05 | e8c138d1b839752698e9ae7220287644e1f99424 | /man/closest_to.Rd | 091e10a3aa7d6babcb01d64546514cd97cf4a09d | [] | no_license | dtkaplan/checkr-v-0.1 | 7496f95367290b9313f81b07308b688b5a5e9aa1 | 71e73eaaddda73a26d6cacafee3fd9d9abd1f809 | refs/heads/master | 2021-09-03T18:21:21.054638 | 2018-01-11T01:40:01 | 2018-01-11T01:40:01 | 73,754,306 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,070 | rd | closest_to.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/closest_to.R
\name{closest_to}
\alias{closest_to}
\title{Locators based on solution code}
\usage{
closest_to(what)
}
\arguments{
\item{what}{either an object name or an identifying character string from the code generating
the object}
}
\value{
a function that takes a capture as input and returns a capture as output
}
\description{
\code{closest_to()} finds a matching line in the solution code. It then uses the
value of that line (in the solution code) to look for the best-matching
value in the student code. The function is not to be used stand-alone, but
rather in the \code{\link{soln_test}} function, which calls it once for the solution
code and again for the user code. For the solution code, it returns the matching line
but for the user code it returns the line whose value matches the most closely the
value of the line in the solution code. So long as there is some
value in the student code, no matter how different from the solution code's value,
a value will be returned.
}
|
0e4aa15ab1df0d21e99b4c684ecf5fd100736cf9 | 29e07daf4ff0277d07d44b1bde79a696c597d744 | /imp_dur.R | 0d9fd62e95f26accd27660a36fca35c5425a3408 | [
"CC0-1.0"
] | permissive | LimesLimits/Settlement_hierarchy | 43dbd6c9b296d4c52223a4b6c7e7a0e6923d9778 | be2cb222661145a416a7e7170c0a785ff9c1bd37 | refs/heads/main | 2023-04-18T21:20:15.209828 | 2022-09-09T12:40:20 | 2022-09-09T12:40:20 | 534,600,984 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 498 | r | imp_dur.R | # script imp_dur.R
# read the datafile
imp_dur <- read.csv("./IMP_DUR.txt", header=TRUE, sep=";")
ggplot(imp_dur, aes(x = IMP, y = FRQ, fill = DUR)) +
geom_bar(position = "fill", stat='identity') +
guides(fill = guide_legend(reverse=TRUE)) +
scale_fill_grey(start = 0.9, end = 0.1) +
ggtitle("Duration and ceramic classes") +
xlab("Ceramic class") +
ylab("Proportion") +
theme(panel.background = element_rect(fill="white")) +
theme(axis.ticks.x=element_blank())
|
9ff5c037a6f8f20a08e3a1250010ab8d80dd889f | 79b0d43549a50d0dfc950e420e23f921a7bde12a | /Phylogenetic Analyses/archived files/sCoRRE_calculating_functional_diversity.R | 4fcf5d1e1d25bbf2e5c0a191f9ec4591c7513165 | [] | no_license | mavolio/scorre | 55de4ef0c9bfd946b9257a9e876147a5d24462dd | d67a78da2cc627eae9ab25ee4cd8f931a5102200 | refs/heads/master | 2023-07-26T22:51:21.000447 | 2023-07-12T15:26:27 | 2023-07-12T15:26:27 | 226,844,825 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,970 | r | sCoRRE_calculating_functional_diversity.R | ###
### Calculate Functional dispersion for CoRRE data
###
### Last updated Dec 13, 2021
### Set up workspace
rm(list=ls())
setwd("C:\\Users\\wilco\\Dropbox\\shared working groups\\sDiv_sCoRRE_shared\\") #kevin's laptop
setwd('C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\CoRRE\\sDiv\\sDiv_sCoRRE_shared\\') #kim's laptop
library(FD)
library(tidyverse)
memory.limit(size=50000)
### Trait data
# Read in data
contTraits <- read.csv('CoRRE data\\trait data\\Final TRY Traits\\Imputed Continuous_Traits\\data to play with\\imputed_continuous_20220620.csv')%>%
select(-X.1, -X, -family, -genus, -observation)%>%
group_by(species_matched)%>%
summarise_all(funs(mean))%>%
ungroup()
traits <- read.csv('CoRRE data\\trait data\\sCoRRE categorical trait data - traits_complete_pre spot check_03102021.csv')%>%
full_join(contTraits) %>%
drop_na()
traitsOutliersRemoved <- traits %>%
filter(!leaf_type %in% c("microphyll","frond")) %>%
filter(!species_matched %in% c("Centrolepis aristata", "Centrolepis strigosa", "Acorus calamus"))
traitsScaled <- traitsOutliersRemoved %>% ## only scales continuous traits
mutate_at(vars(seed_dry_mass:seed_number), scale)
# Read in relative abundance data
sp_name_key <- read.csv("CoRRE data\\trait data\\corre2trykey.csv") %>%
dplyr::select(genus_species, species_matched) %>%
unique(.)
rel_abun_df_raw <- read.csv("CoRRE data\\CoRRE data\\community composition\\CoRRE_RelativeCover_Dec2021.csv") %>%
group_by(site_code, project_name, community_type, calendar_year, treatment_year, treatment, block,
plot_id, data_type, version, genus_species) %>%
summarize(relcov=max(relcov))
rel_abun_df <- rel_abun_df_raw %>%
left_join(sp_name_key, by="genus_species") %>%
filter(!is.na(species_matched)) %>%
dplyr::select(-genus_species) %>%
group_by(site_code, project_name, community_type, calendar_year, treatment_year, treatment, block,
plot_id, data_type, version, species_matched) %>%
summarize(relcov=sum(relcov))
# filter(rel_abun_df, site_code=="SIU" & plot_id==48 & treatment_year==5 & genus_species=="acalypha virginica")
# abund_species_vector <- unique(rel_abun_df$species_matched)
rel_abun_df[c(13080, 13081),]
rel_abun_env_df <- rel_abun_df %>%
spread(key=species_matched, value=relcov) %>%
dplyr::select(site_code:version)
rel_abun_sp_wide <- rel_abun_df %>%
spread(key=species_matched, value=relcov) %>%
dplyr::select(-site_code:-version)
# pplot_sp_wide <- pplot_abun_df %>%
# dplyr::select(-genus_species) %>%
# spread(key=species_matched, value=relcov) %>%
# dplyr::select(-site_code:-version)
#
# pplot_env_df <- pplot_abun_df %>%
# dplyr::select(-genus_species) %>%
# spread(key=species_matched, value=relcov) %>%
# dplyr::select(site_code:version)
#
# write.csv(pplot_sp_wide, file="paper 2_PD and FD responses\\data\\pplots species abundance wide.csv")
# write.csv(pplot_env_df, file="paper 2_PD and FD responses\\data\\pplots environmental data.csv")
dups <- rel_abun_df %>%
group_by(site_code, project_name, community_type, calendar_year, treatment_year, treatment, block,
plot_id, data_type, version, species_matched) %>%
summarize(length=length(relcov)) %>%
filter(length==2)
# write.csv(dups, file="paper 2_PD and FD responses\\data\\duplicates.csv") #no duplicates anymore
#Experimental information
# C:\Users\wilco\Dropbox\shared working groups\sDiv_sCoRRE_shared\CoRRE data\CoRRE data\community composition\CoRRE_ExperimentInfoMar2021.csv
# for matched_names (key for matching CORRE and TRY species names)
#C:\Users\wilco\Dropbox\shared working groups\sDiv_sCoRRE_shared\CoRRE data\CoRRE data\trait data\corre2trykey.csv
###
pplot_abun_df <- read.csv("CoRRE data\\CoRRE data\\community composition\\CoRRE_RelativeAbundanceMar2021.csv") %>%
filter(project_name=="pplots") %>%
left_join(sp_name_key, by="genus_species") %>%
drop_na(species_matched)
|
a163b037df0cbec585b5a76bdd34989d9fa9e875 | 1741d47aac4157ca801d60c0e2de064d8c31ba04 | /Test 4.R | d7b607d93e839d2d306e32d38808373cff0d9f88 | [] | no_license | ThejaswiniNayak/Using-Apriori-Algorithm | b6754495eb9f333a993421bdfafafe62a022fa3a | 6441d5f07f200ac1881c65f85f6ac28de2abcb6d | refs/heads/master | 2020-04-05T20:26:07.531929 | 2018-11-12T08:26:19 | 2018-11-12T08:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 737 | r | Test 4.R | setwd("~/Personal/Study/Crosssell and Upsell/Upsell_model/Initial Data")
library('arules')
txn= read.transactions("MBAEMCdata.csv",rm.duplicates = FALSE,format="single",sep=",",cols=c(1,2))
#inspect transaction data
head(txn,n=5)
txn
txn@itemInfo
image(txn)
#mine association rules
basket_rules = apriori(txn,parameter = list(sup=0.1,conf=0.8),
appearance = list (default="lhs",rhs="ISILON"),
control = list (verbose=F))
inspect(basket_rules)
#extract the top 10 rulkes with high lift
rules_high_lift = head(sort(basket_rules,by="lift"))
inspect(rules_high_lift)
rules_conf <- sort (basket_rules, by="confidence", decreasing=TRUE)
inspect(rules_conf)
help(apriori) |
d8623122ab17a6fe937bbe71f795bdd64db3218e | 93aff2e87580e6c358d42ccbb390aa328ddc275f | /plot4.R | 1bf23d09b12f0ff4e34a3085e9f0d6391a03c1b5 | [] | no_license | awulfes/ExData_Plotting1 | 7f007ed0a97df4dc7e8845ee08c4971eed9876a4 | 7a19c41f76608a429d9b38bbc1cb64ad853a37a5 | refs/heads/master | 2021-01-20T12:09:58.875513 | 2017-03-19T20:19:49 | 2017-03-19T20:19:49 | 85,499,922 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,452 | r | plot4.R |
producePlot4 <- function (inputfile = "./data/household_power_consumption.txt", outputfile = "plot2.png"){
# constants
date.start <- strptime(c("01/02/2007 00:00:00"),"%d/%m/%Y %H:%M:%S")
date.end <- strptime(c("02/02/2007 23:59:59"),"%d/%m/%Y %H:%M:%S")
# read data
print("Reading dataset...")
ds <- read.table(inputfile, header=TRUE, sep = ";", stringsAsFactors = FALSE, na.strings = "?")
# create new column convert date and time to POSIXlt
print("Convert date and time columns...")
ds$DateTime <- paste(ds$Date, ds$Time)
ds$DateTime <- strptime(ds$DateTime,"%d/%m/%Y %H:%M:%S")
ds$Date <- NULL
ds$Time <- NULL
# select only data from relevant date and remove NAs
print("Select dates 01/02/2007-02/02/2007...")
ds <- ds[ds$DateTime >= date.start & ds$DateTime <= date.end & !is.na(ds$DateTime),]
# define png device
print("Plot line graph")
png(file=outputfile,
width = 480,
height = 480)
# draw line graph
Sys.setlocale("LC_TIME", "C")
# set for multiple plots
par(mfcol=c(2,2))
# plot 1
plot(ds$DateTime, ds$Global_active_power,
type="l",
ylab = "Global Active Power (kilowatts)",
xlab = "")
# plot 2
# create initial plot and draw black line for metering_1
plot(ds$DateTime, ds$Sub_metering_1,
type="l",
ylab = "Energy sub metering",
xlab = "",
col = "black"
)
# draw red line for metering_2
lines(ds$DateTime, ds$Sub_metering_2,
type="l",
col = "red"
)
# draw red line for metering_3
lines(ds$DateTime, ds$Sub_metering_3,
type="l",
col = "blue"
)
# add legend
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd=1,
col = c("black", "red", "blue"),
bty = "n"
)
# plot 3
plot(ds$DateTime, ds$Voltage,
type = "l",
xlab = "datetime",
ylab = "Voltage")
# plot 4
plot(ds$DateTime, ds$Global_reactive_power,
type = "l",
xlab = "datetime",
ylab = "Global_reactive_power")
# close device
dev.off()
print("Finished")
}
# Create plot 4 and write to png file
# Please override with location of data set and desired output file
producePlot4(inputfile = "./data/household_power_consumption.txt", outputfile = "plot4.png") |
e80c6fb284b4d77d171e1a7d08dd80e780aa7937 | 3fc582b5ab0a1d2f778fd2582ce7c0bb670aa11d | /man/get_day.Rd | 3561f0883cede7ec1c8bdb0460ba1af4177ceae4 | [] | no_license | bluegulcy/aquacrop | 43b56a8becb9244fe3028af853f3c19c99bdd5dd | 50a1d815d04259e8792aba763fc53e0f896bf1a0 | refs/heads/master | 2020-09-02T10:55:40.510230 | 2019-10-08T10:03:12 | 2019-10-08T10:03:12 | 219,206,208 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 330 | rd | get_day.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Aqua_library.R
\name{get_day}
\alias{get_day}
\title{get parameter day from date string}
\usage{
get_day(x)
}
\arguments{
\item{x}{date, where year = x[[1]], DOY = x[[2]]}
}
\value{
day of the month
}
\description{
get parameter day from date string
}
|
b64163b0b6a575d64e1ecd908c0a974e665faaa0 | 6dff9f5e4c651f70cdd28d07d2fa5a7d54f0056b | /tests/testthat/test-save.R | d4f0ceda41c02e0227a30a8ffb8e627312566c79 | [
"MIT"
] | permissive | Programming-The-Next-Step-2021/Email_Agent_Template | 2513276fc92814ca42ae22989784ae9aa8ece0d3 | 765e19899d1f0ee0f8d4e7c77142804908105f67 | refs/heads/main | 2023-05-02T20:58:39.158020 | 2021-05-30T20:39:21 | 2021-05-30T20:39:21 | 364,247,285 | 0 | 0 | NOASSERTION | 2021-05-30T20:39:22 | 2021-05-04T12:23:18 | R | UTF-8 | R | false | false | 781 | r | test-save.R | library(shiny)
library(testthat)
library(shinytest)
### I extracted a code snippet of my server code and saved it in R/save.R
### I named the function save_file()
## the function is supposed to save the input data in a data frame and writes this data frame into a csv. file.
## If I test the function using a path to an empty, temporary csv file
##it should create the same excel file as the user_inputs1.csv excel file
##(the file that is created when saving the default inputs of the sidebar panel)
test_that("save_file() saves inputs", {
#create and save an empty excel file
path_csv <- tempfile()
write.csv(path_csv,row.names = FALSE)
expect_identical(save_file(path_csv),read.csv("~/UVA/programming next step/Email_Agent_Template/R/user_inputs1.csv"))
})
|
d954d70bd8431618a3d330feadfd2a88e841bd47 | 2c4dbf42a157b8691ad66da48a34c98e92407d18 | /R/scrap.R | 80d76269bbc280f888aa72fc89fbf99fe26c6e08 | [] | no_license | timkiely/spatially-conscious-ml-model | 05f829b8efb181fe4f0f1454427589a3443f0d1a | 3a81a9ce61a48dd8d34aca427370968f9580c2bd | refs/heads/master | 2021-10-10T12:19:08.269686 | 2019-01-10T16:39:12 | 2019-01-10T16:39:12 | 95,896,422 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,690 | r | scrap.R |
# for testing things out
# pluto1 <- read_rds("data/processing steps/p01_pluto_raw.rds")
# pad1 <- read_rds("data/processing steps/p02_pad_raw.rds")
# sales1 <- read_rds("data/processing steps/p03_sales_raw.rds")
# sales2 <- read_rds("data/processing steps/p04_sales_and_pad.rds")
# pluto2 <- read_rds("data/processing steps/p05_pluto_with_sales.rds")
# base1 <- read_rds("data/processing steps/p06_base_model_data.rds")
# zip_level <- read_rds("data/processing steps/p07_zipcode_model_data.rds")
# radii <- read_rds("data/processing steps/p08_radii_model_data.rds")
# "data/processing steps/p09_prob_of_sale_model_base.rds"
# "data/processing steps/p10_prob_of_sale_model_zipcode.rds"
# "data/processing steps/p11_prob_of_sale_model_radii.rds"
# "data/processing steps/p12_sale_price_model_base.rds"
# "data/processing steps/p13_sale_price_model_zipcode.rds"
# "data/processing steps/p14_sale_price_model_radii.rds"
# "data/processing steps/p15_prob_model_evaluations.rds"
# "data/processing steps/p16_sales_model_evaluations.rds"
source("R/helper/load-packages.R")
source("R/helper/source-files.R")
# base1 <- read_rds("data/processing steps/p06_base_model_data.rds")
# base_samp <- base1 %>% filter(BoroCode==1)
# write_rds(base_samp, "data/aux data/sample_p06_base_model_data.rds")
base1 <- read_rds("data/aux data/sample_p06_base_model_data.rds")
prob_evals <- read_rds("data/processing steps/p15_prob_model_evaluations.rds")
sale_evals <- read_rds("data/processing steps/p16_sales_model_evaluations.rds")
prob_base <- read_rds("data/processing steps/p09_prob_of_sale_model_base.rds")
# "data/processing steps/p10_prob_of_sale_model_zipcode.rds"
# "data/processing steps/p11_prob_of_sale_model_radii.rds"
sale_base <- read_rds("data/processing steps/p12_sale_price_model_base.rds")
sale_zip <- read_rds("data/processing steps/p13_sale_price_model_zipcode.rds")
sale_radii <- read_rds("data/processing steps/p14_sale_price_model_radii.rds")
data_frame(`a` = names(summary(base1$`SALE PRICE`))) %>%
bind_cols(data_frame(`Sale Price per SF` = scales::comma(round(summary(base1$`SALE PRICE`),2)))) %>%
filter(a!="NA's") %>%
rename(` ` = a) %>%
write_rds("Writing/Sections/tables and figures/sale_price_summary_table4.rds")
data_frame(`a` = names(summary(base1$Sold))) %>%
bind_cols(data_frame(`Sold` = scales::comma(round(summary(base1$Sold),2)))) %>%
filter(a!="NA's") %>%
rename(` ` = a) %>%
write_rds("Writing/Sections/tables and figures/sold_summary_table5.rds")
base1$`SALE PRICE` %>% summary()
min1 <-
radii %>%
select(Radius_Total_Sold_In_Year:Percent_Change_EMA_5_basic_mean_perc_change) %>%
summarise_all(min, na.rm = T) %>%
transpose() %>%
rename("Min" = V1)
median1 <-
radii %>%
select(Radius_Total_Sold_In_Year:Percent_Change_EMA_5_basic_mean_perc_change) %>%
summarise_all(median, na.rm = T) %>%
transpose() %>%
rename("Median" = V1)
mean1 <-
radii %>%
select(Radius_Total_Sold_In_Year:Percent_Change_EMA_5_basic_mean_perc_change) %>%
summarise_all(mean, na.rm = T) %>%
transpose() %>%
rename("Mean" = V1)
max1 <-
radii %>%
select(Radius_Total_Sold_In_Year:Percent_Change_EMA_5_basic_mean_perc_change) %>%
summarise_all(max, na.rm = T) %>%
transpose() %>%
rename("Max" = V1)
table_1 <-
data_frame(Feature = names(select(radii, Radius_Total_Sold_In_Year:Percent_Change_EMA_5_basic_mean_perc_change))) %>%
bind_cols(min1) %>%
bind_cols(median1) %>%
bind_cols(mean1) %>%
bind_cols(max1) %>%
mutate_at(vars(Min:Max), round, 2) %>%
mutate_at(vars(Min:Max), scales::comma)
write_rds(table_1, "Writing/Sections/tables and figures/table3.rds")
|
729eca5431e934ea3dc60065e9b79099f0699291 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/AlphaPart/tests/testthat/test-write-csv.R | d7f2947df15cc5a738030c9a87230a5b5f32b2d9 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,068 | r | test-write-csv.R | context("test-write-csv")
test_that("Check writing input for write.csv.AlphaPart", {
## Making sure we accept only the right class and one file name!
expect_error(write.csv.AlphaPart(data.frame(x=1:10)))
expect_error(write.csv.AlphaPart(data.frame(x=1:10), file=c("a", "b")))
})
test_that("Check writing process for write.csv.AlphaPart", {
## Partition additive genetic values
res <- AlphaPart(x=AlphaPart.ped, colPath="country", colBV="bv1")
## Write summary on the disk and collect saved file names
dirT <- tempdir()
fileName <- file.path(dirT, "AlphaPart")
retF <- write.csv(x=res, file=fileName)
## Check content of files
tmp <- read.csv2(file=retF[1])
expect_equal(tmp$agv1, res$agv1$agv1)
expect_equal(tmp$agv1_1, res$agv1$agv1_1)
## Clean up
files <- dir(path=dirT, pattern="AlphaPart*")
unlink(x=files)
})
###############################################################
###############################################################
###############################################################
###############################################################
#write.csv.summaryAlphaPart
test_that("Check writing input for write.csv.summaryAlphaPart", {
## Making sure we accept only the right class and one file name!
expect_error(write.csv.summaryAlphaPart(data.frame(x=1:10)))
expect_error(write.csv.summaryAlphaPart(data.frame(x=1:10), file=c("a", "b")))
})
test_that("Check writing process for write.csv.summaryAlphaPart", {
## Partition additive genetic values
res <- AlphaPart(x=AlphaPart.ped, colPath="country", colBV=c("bv1"))
## Summarize population by generation (=trend)
ret <- summary(res, by="gen")
## Write summary on the disk and collect saved file names
dirT <- tempdir()
fileName <- file.path(dirT, "AlphaPart")
retF <- write.csv(x=ret, file=fileName)
## Check content of files
col <- c("gen", "N", "Sum", "1", "2")
tmp <- read.csv2(file=retF[1])
expect_equal(tmp, ret$bv1)
## Clean up
files <- dir(path=dirT, pattern="AlphaPart*")
unlink(x=files)
})
|
9b10edfa16c2ade8c1e0314ad39ed4de41da9c90 | 40e46bdd1a3c128917625f7d691f253f632778a1 | /Task-3.R | 10b143ccf99a11e1ac7b223dea28508c4a3c1cfc | [] | no_license | AlexMartin-Eire/Consumer-Analytics-Big-Data- | 3c452f8ad8d0675fb0f1f587bde1bda1338912d1 | 98668d12d9d7210a3891c2b4d02055505318a018 | refs/heads/master | 2021-04-06T19:39:58.528711 | 2018-03-15T02:02:30 | 2018-03-15T02:02:30 | 125,298,431 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,040 | r | Task-3.R | # test for package existance and install
if (!is.element("tidyverse", installed.packages()))
install.packages("tidyverse", dep = T)
library(tidyverse)
cl <- as_tibble(read.csv("dh_causal_lookup.csv"))
pl <- as_tibble(read.csv("dh_product_lookup.csv"))
sl <- as_tibble(read.csv("dh_store_lookup.csv"))
tr <- as_tibble(read.csv("dh_transactions.csv"))
tr %>% left_join(pl) %>% filter(coupon == 1) %>% filter(commodity == "pasta") -> r1 # Purchased pasta with a coupon.
h <- unique(r1$household) # Each household, h is the same as x.
tr %>% left_join(pl) %>% filter(commodity == "pasta") -> r2
# All household transactions involving a coupon and the purchasing of pasta
filter(r2, !is.na(match(r2$household, h))) -> tmp
h2<-list() # Empty list
for (i in h) {
tmp %>% filter(household == i) %>% arrange(day, -coupon) -> temp1
if(temp1$coupon[1] == 1 & nrow(temp1) >1) {h2[length(h2)+1]<-i} }
length(h2) # Printed list containing (number) each household
#3009 for pasta.
#7378 pasta sauce
#3726 syrups
#1112 pancake mixes
|
af1617ae41d82c2ba38cbc24340d7ed0f4c1f4e7 | ef0dd25fc50a1912248ab962f21c3c44cf932ea8 | /simulation_class_1.R | 492191af0b9838314d5adb29b9ea4ae9063c70fc | [] | no_license | Sebastianjohncj/Rscript_practice_files | c36e2bf322f463c29a8eb80d549d0e415e828397 | ac127792af4faa602a78681a768c2f954c7d6d8b | refs/heads/main | 2023-04-06T15:13:43.184079 | 2021-04-14T14:25:23 | 2021-04-14T14:25:23 | 357,932,532 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 505 | r | simulation_class_1.R | cat("\014") # cleans screen
rm(list=ls(all=TRUE)) # remove variables in working memory
setwd("C:/Users/dell/Downloads")
ipl_data
summary(ipl_data)
MODEL1 <- lm(`Sold Price(US$)` ~ `Base Price(US$)` + `RUNS-S` + WKTS + INDIA + L25, data=ipl_data)
summary(MODEL1)
names(ipl_data)[names(ipl_data)=="Sold Price(US$)"]<-"SoldPrice"
MODEL2 <- lm(`SoldPrice` ~ `Base Price(US$)` + `RUNS-S` + `ODI-WKTS` + INDIA + L25 + `SR -B` + `BOW*WK-O`+`BOW*WK-I`, data=ipl_data)
summary(MODEL2)
|
374a88d218387e26c3f3a0c410f17f70ed5045e0 | a3c78700a65f10714471a0d307ab984e8a71644d | /base/db/man/pft.add.spp.Rd | 6eda5d84ea0e33ce2103f27813de39a4faed83b6 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PecanProject/pecan | e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f | ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c | refs/heads/develop | 2023-08-31T23:30:32.388665 | 2023-08-28T13:53:32 | 2023-08-28T13:53:32 | 6,857,384 | 187 | 217 | NOASSERTION | 2023-09-14T01:40:24 | 2012-11-25T23:48:26 | R | UTF-8 | R | false | true | 1,395 | rd | pft.add.spp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pft.add.spp.R
\name{pft.add.spp}
\alias{pft.add.spp}
\title{Associate species with a PFT.}
\usage{
pft.add.spp(pft, acronym = NULL, ID = NULL, test = TRUE, con = NULL, ...)
}
\arguments{
\item{pft}{String name of the PFT in the database}
\item{acronym}{Specie's Symbols. see \url{http://plants.usda.gov}}
\item{ID}{Species IDs in Bety. You can provide either IDs or Symbols as input, if you provide both ID and acronym, only acronym will be used.}
\item{test}{Runs the function in test mode. No species are actually added, but checks are run for existing species-PFT pairs, unmatched acronyms, missing species, or duplicate species}
\item{con}{Database connection object.}
\item{...}{optional arguements for connecting to database (e.g. password, user name, database)}
}
\value{
Function does not return a value but does print out diagnostic statements.
}
\description{
adds a list of species to a pft based on USDA Plants acronyms
}
\details{
This function is used to add PFT-Species pairs to the database table 'pfts_species'. In the initial implementation the PFT has to be defined already and the species are added based on their USDA Symbol (genus/species acronym). Multiple species can be added at once but only one PFT at a time.
The Symbols object are
}
\author{
Michael C. Dietze, Dongchen Zhang
}
|
f941077d12bf93c1a7fd9e609f9e278db96c0274 | a1c3aea577f7636f53386dde9ead621dce49a688 | /previous-analysis/every-species-2.R | 72ad9efb42f5a048a839235263392d2189299198 | [] | no_license | jacobpassfield/final-year-project | 512a4e121108cb093dcd01ae67a89018bd583075 | 8c95e303b44968e02334bb0f623fb5909eff2ba2 | refs/heads/main | 2023-04-22T15:09:25.074479 | 2021-05-10T09:12:28 | 2021-05-10T09:12:28 | 354,914,369 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,597 | r | every-species-2.R | library(tidyverse)
library(broom.mixed)
library(lme4)
library(patchwork)
library(ggeffects)
load(file = "data/data.RData")
data <- data %>% mutate(ScaledMeanSST = scale(MeanSST, center = T, scale = T))
data$Geogroup <- factor(data$Geogroup)
data$SurveyID <- factor(data$SurveyID)
by_species <- data %>%
group_by(TaxonomicName) %>%
nest()
dim(by_species)
head(by_species)
# Geogroup / SurveyID
species_model <- function(df) {
lmer(SizeClass ~ ScaledMeanSST + (1|Geogroup/SurveyID), REML=T, data = df)
}
by_species1 <- by_species %>%
mutate(model = map(data, species_model)) #byspecies$data
# SurveyID
species_model2 <- function(df) {
lmer(SizeClass ~ ScaledMeanSST + (1|SurveyID), REML=T, data = df)
}
by_species2 <- by_species %>%
mutate(model = map(data, species_model2)) #byspecies$data
head(by_species2)
# Use only SurveyID
# Are assumptions still met for PW?
PW_data <- data %>% filter(TaxonomicName %in% "Halichoeres margaritaceus")
PW_data <- PW_data %>% mutate(ScaledMeanSST = scale(MeanSST, center = T, scale = T))
PW_data$Geogroup <- factor(PW_data$Geogroup)
PW_data$SurveyID <- factor(PW_data$SurveyID)
PW.mm2 <- lmer(SizeClass ~ ScaledMeanSST + (1|SurveyID), REML = T, data = PW_data)
PW.pred.mm2 <- ggpredict(PW.mm2, terms = c("ScaledMeanSST")) # this gives overall predictions for the model
PWmmPlot2 <- ggplot(PW.pred.mm2) +
geom_line(aes(x = x, y = predicted), size = 2, colour = "blue") + # slope
geom_point(data = PW_data, # adding the raw data (scaled values)
aes(x = ScaledMeanSST, y = SizeClass), alpha = 0.1, size = 3) +
labs(y="Size class (cm)", x="Scaled Mean SST (°C)",
title = "Temperature versus size class",
subtitle = "Using a linear mixed-effects model") +
theme_minimal() +
theme(legend.position="none")
# Homegeneity.
homoPW <- ggplot(PW.mm2, aes(x= .fitted, y= .resid)) +
geom_point(shape=1, size=2) +
geom_hline(yintercept=0, col="red", linetype="dashed") +
geom_smooth(method = "loess", se=F) +
labs(title = "Residuals versus fitted values", y="Residuals", x="Fitted values") +
theme_classic()
# Normality.
normPW <- ggplot(PW.mm2, aes(x = .resid)) +
geom_histogram(bins = 10, fill = "white", colour = "black") +
labs(title = "Histogram of residuals", x = "Residuals", y = "Frequency") +
theme_classic()
# Independence.
indPW <- ggplot(PW_data, aes(x = ScaledMeanSST, y = resid(PW.mm2))) +
geom_point(shape = 1, size = 2) +
labs(title = "Explanatory variable vs residuals", x = "Scaled Mean SST (°C)", y = "Residuals") +
theme_classic()
summary(PW.mm2)
pdf(file = "figures/Figure9.pdf")
PWmmPlot2 + (homoPW / normPW / indPW) +
plot_annotation(tag_levels = c("A", "B", "C")) &
theme(plot.tag = element_text(face = 2, size = 15)) # & operator applies tag style to all plots
dev.off()
# Phew.
tidy <- by_species2 %>%
mutate(tidy = map(model, broom.mixed::tidy)) %>%
unnest(tidy, .drop = T)
head(tidy)
# Create data frame including species name and the estimate of the slope.
SST_est <- tidy %>%
select(TaxonomicName, effect, term, estimate) %>%
filter(term %in% c("ScaledMeanSST"))
head(SST_est)
dim(SST_est)
# Red numbers are in red.
# Show the value of the estimate.
index <- c(1:335) # Create index
# SSTestPlot <- SST_est %>%
# ggplot(aes(index, estimate)) + geom_point() + geom_hline(yintercept = 0, colour = "red") +
# labs(y = "Estimated coefficient for the sea surface temperature", x = "Index") +
# theme_classic()
#
# pdf(file = "figures/Figure11.pdf")
# SSTestPlot
# dev.off()
# What are the two points with an estimate greater than 15?
# Shown on plot
grt15 <- SST_est %>%
ggplot(aes(index, estimate, label=TaxonomicName)) +
geom_point() +
geom_hline(yintercept = 0, colour = "red") +
geom_text(aes(label = ifelse(estimate > 15, TaxonomicName, '')), size = 3, hjust= -0.1) +
labs(y = "Estimated coefficient for sea surface temperature", x = "Index") +
theme_classic()
pdf(file = "figures/Figure10.pdf")
grt15
dev.off()
# Checking their summaries
DN_data <- filter(data, TaxonomicName %in% "Dactylophora nigricans")
AG_data <- filter(data, TaxonomicName %in% "Achoerodus gouldii")
DN.mm <- lmer(SizeClass ~ ScaledMeanSST + (1|SurveyID), REML=T, data = DN_data)
AG.mm <- lmer(SizeClass ~ ScaledMeanSST + (1|SurveyID), REML=T, data = AG_data)
# Homegeneity.
homoDN <- ggplot(DN.mm, aes(x= .fitted, y= .resid)) +
geom_point(shape=1, size=2) +
geom_hline(yintercept=0, col="red", linetype="dashed") +
geom_smooth(method = "loess", span=100000, se=F) +
labs(title = "Residuals versus fitted values", y="Residuals", x="Fitted values") +
theme_classic()
homoAG <- ggplot(AG.mm, aes(x= .fitted, y= .resid)) +
geom_point(shape=1, size=2) +
geom_hline(yintercept=0, col="red", linetype="dashed") +
geom_smooth(method = "loess", se=F) +
labs(title = "Residuals versus fitted values", y="Residuals", x="Fitted values") +
theme_classic()
# Normality.
normDN <- ggplot(DN.mm, aes(x = .resid)) +
geom_histogram(bins = 10, fill = "white", colour = "black") +
labs(title = "Histogram of residuals", x = "Residuals", y = "Frequency") +
theme_classic()
normAG <- ggplot(AG.mm, aes(x = .resid)) +
geom_histogram(bins = 10, fill = "white", colour = "black") +
labs(title = "Histogram of residuals", x = "Residuals", y = "Frequency") +
theme_classic()
# Independence.
indDN <- ggplot(DN_data, aes(x = ScaledMeanSST, y = resid(DN.mm))) +
geom_point(shape = 1, size = 2) +
labs(title = "Explanatory variable vs residuals", x = "Scaled Mean SST (°C)", y = "Residuals") +
theme_classic()
indAG <- ggplot(AG_data, aes(x = ScaledMeanSST, y = resid(AG.mm))) +
geom_point(shape = 1, size = 2) +
labs(title = "Explanatory variable vs residuals", x = "Scaled Mean SST (°C)", y = "Residuals") +
theme_classic()
pdf(file = "figures/Figure11.pdf")
(homoDN + homoAG) / (normDN + normAG) / (indDN + indAG) &
plot_annotation(tag_levels = c("A", "B", "C", "D", "E", "F")) &
theme(plot.tag = element_text(face = 2, size = 15)) # & operator applies tag style to all plots
dev.off()
# Keep.
# Cautious to remove as observations with extreme values in ecology is interesting.
# Normality and independent assumptions are good.
# ROUNDED ESTIMATES TO THE NEAREST TENTH
SST_est$round_est <- round(SST_est$estimate, digits = 0.5)
nrow(subset(SST_est, round_est > 0)) # 129
nrow(subset(SST_est, round_est == 0)) # 7
nrow(subset(SST_est, round_est < 0)) # 199
# INCREASE
(129/335)*100 # 38.50746
# NEITHER INCREASE NOR DECREASE
(7/335)*100 # 2.089552
# DECREASE
(199/335)*100 # 59.40299
|
e168361d203cf6de8342224aae11979234349059 | 922aa270fa30066044e7ae475f31e4426b59cfac | /man-roxygen/arg_edgelist.R | 8ae89c54a7df0055ef68a0070c849221858ec281 | [] | permissive | jakobbossek/mcMST | 361a3708a3413126fbfe61f6ae930e3ee326356b | 4d5a18dfb79a9949c99fadf3a93c6f0f44b0cba3 | refs/heads/master | 2023-03-16T12:54:59.937066 | 2023-03-13T18:49:28 | 2023-03-13T18:49:28 | 96,212,733 | 2 | 3 | BSD-2-Clause | 2019-10-16T11:48:01 | 2017-07-04T11:51:54 | R | UTF-8 | R | false | false | 89 | r | arg_edgelist.R | #' @param edgelist [\code{matrix(2, k)}]
#' Matrix of edges (each column is one edge).
|
93325104349477cce8f593c0ce9e568d347d2dd1 | 0aea28942b6da9d02c5cb85fa1f840ae291c20bd | /GBMmulticlass.R | d6509939965b8cab98a7e47a6653fae630de2b7d | [] | no_license | orioli/triads | d98fcff09843836e7934d7a1a32c843fbd6f8941 | bb99df7fef8e360c0aa0cfaa0a516dbbf424705c | refs/heads/master | 2021-01-02T09:16:23.175030 | 2017-08-03T02:51:26 | 2017-08-03T02:51:26 | 99,175,480 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,936 | r | GBMmulticlass.R | # TAIWAN POLICE - using GBM to multiclass triads
#
# change this to your working directory
setwd("/Users/jse/DEsktop/TAIWAN POLICE")
library(gbm)
library(ggplot2)
## LOAD id file
id = read.csv("id.csv")
dim(id)
id[1:4,]
summary(id)
## LOAD case file
crp = read.csv("CRP.csv",encoding="utf-8")
dim(crp)
crp[1:4,]
as.character(crp[4,])
summary(crp) # took it as levels ok
df[1,]
colnames(crp)[3] <- c("ID")
### LEFT JOIN id.csv + crp.csv
df <- merge(x = crp[!duplicated(crp$ID),], y = id, by = "ID", all.y = TRUE)
dim(df)
dim(id)
dim(crp) # OK
df [1:22,1:9]
length(unique(id$ID))
## VIZ
hist(id$target)
qplot(id[!id$target=="Non",]$target, stat="count")
ggplot(id,target)
df[1,]
g <- ggplot(df, aes(Plane)) + geom_bar()+ coord_flip()
g <- ggplot(df, aes(CRP)) + geom_bar()+ coord_flip()
g <- ggplot(df, aes(TypeIIIDrugs)) + geom_bar()+ coord_flip()
g <- ggplot(df, aes(target)) + geom_bar()+ coord_flip()
g <- ggplot(df, aes(Role,fill=Role)) + geom_bar()+ coord_flip()
g
# PREPARE
df <- df[,-c(1)] # remove index
df <- df[,-c(16)] # remove address too many levels GBM cannot handle
df <- df[!(df$target=="Non"),]
df$Group <- c() # data leak
df$caseno <- c() # data leak
dim(df)
df[1:3,]
#SPLIT
df <- df[sample(nrow(df)),] # shuffle
df.train <- df[1:150,]
df.test <- df[151:329,]
## TRAIN
BST = gbm(target~.,data=df.train,
distribution='multinomial',
n.trees=200,
interaction.depth=4,
#cv.folds=5,
shrinkage=0.005)
summary(BST)
df.train$target
predBST = predict(BST,n.trees=200, newdata=df.test,type='response')
predBST[1:6,,]
df.test[1:6,]
## COMPUTE PRECISION
solution <- as.factor(colnames(predBST)[apply(predBST,1,which.max)])
df.test$target
solution.num <- rep(0,length(solution))
solution.num[as.character(solution) == as.character(df.test$target)] <- 1
mean(solution.num)
head(predBST)
# id.csv + crp.csv (first case only) 46.9%
# id.csv 40%
|
a207e888a6f1c32716b478e3d15ebb95c215ff7f | fa70b2a61f6c1e760151c3c04d0c7d55098231af | /tests/testthat/test-combinevariablesetsasbinary.R | 3f55f3e47a24a4b5c84ba7969d07c480a2f42608 | [] | no_license | Displayr/flipData | 383735ede9065ceec10b84b9d63a8bece6122257 | 9c24004e6e8dc70f856efc773a6a4a6f95db1489 | refs/heads/master | 2023-06-22T18:27:09.861659 | 2023-06-19T03:06:09 | 2023-06-19T03:06:51 | 59,714,768 | 0 | 4 | null | 2023-08-23T04:43:16 | 2016-05-26T02:54:37 | R | UTF-8 | R | false | false | 5,932 | r | test-combinevariablesetsasbinary.R | library(testthat)
library(verbs)
context("Combine variable sets as binary")
data(phone, package = "flipExampleData")
data(colas, package = "flipExampleData")
phone.names <- c("AAPT", "New Tel", "One-tel", "Optus", "Orange", "Telstra", "Virgin", "Vodafone", "Other 1", "Other 2", "Don't know")
unaided <- data.frame(phone[, paste0("Q5_", 1:11)])
colnames(unaided) <- phone.names
unaided[1, ] <- NA
unaided = (unaided == "Yes") * 1
attr(unaided, "questiontype") <- "PickAny"
attr(unaided, "dataset") <- "phone"
aided = data.frame(phone[, paste0("Q6_", 1:11)])
colnames(aided) <- phone.names
aided = (aided == "Yes") * 1
attr(aided, "questiontype") <- "PickAny"
attr(aided, "dataset") <- "phone"
Q4 = as.data.frame(colas[, c("q4a", "q4b", "q4c", "q4d", "q4e", "q4f")])
attr(Q4, "questiontype") <- "PickOneMulti"
attr(Q4, "dataset") <- "colas"
Q4.small = as.data.frame(colas[, c("q4a", "q4b", "q4c", "q4d")])
attr(Q4.small, "questiontype") <- "PickOneMulti"
attr(Q4.small, "dataset") <- "colas"
Q4.pepsi.light = colas[, "q4e"]
attr(Q4.pepsi.light, "questiontype") <- "PickOne"
attr(Q4.pepsi.light, "dataset") <- "colas"
Q4.pepsi.max = colas[, "q4f"]
attr(Q4.pepsi.max, "questiontype") <- "PickOne"
attr(Q4.pepsi.max, "dataset") <- "colas"
Q4.binary = CombineVariableSetsAsBinary(Q4)
Q4.binary = cbind(Q4.binary, "NET" = rep(TRUE, nrow(Q4.binary)))
attr(Q4.binary, "codeframe") <- list("Hate" = 1, "Dislike" = 2, "Neither like not dislike" = 3, "Love" = 4, "Like" = 5, "NET" = c(1,2,3,4,5))
attr(Q4.binary, "questiontype") <- "PickAny"
attr(Q4.binary, "dataset") <- "colas"
Q4.binary.small = CombineVariableSetsAsBinary(Q4)
attr(Q4.binary.small, "questiontype") <- "PickAny"
attr(Q4.binary.small, "dataset") <- "colas"
test_that("Single PickOne", {
asnumeric = flipTransformations::AsNumeric(Q4.pepsi.light, binary = TRUE)
colnames(asnumeric) = levels(Q4.pepsi.light)
asnumeric = asnumeric == 1
expect_equal(CombineVariableSetsAsBinary(Q4.pepsi.light), asnumeric)
})
test_that("Two PickOnes", {
pepsi.light.numeric = flipTransformations::AsNumeric(Q4.pepsi.light, binary = TRUE)
colnames(pepsi.light.numeric) = levels(Q4.pepsi.light)
pepsi.max.numeric = flipTransformations::AsNumeric(Q4.pepsi.max, binary = TRUE)
colnames(pepsi.max.numeric) = levels(Q4.pepsi.max)
input.args = list(pepsi.light.numeric, pepsi.max.numeric)
input.args[["match.elements"]] <- "Yes"
input.args[["elements.to.count"]] <- list(numeric = 1, categorical = NULL)
input.args[["ignore.missing"]] <- TRUE
pepsi.light.or.max = do.call(AnyOf, input.args)
expect_equal(CombineVariableSetsAsBinary(Q4.pepsi.light, Q4.pepsi.max), pepsi.light.or.max)
})
test_that("Many PickOnes are equivalent to a PickOneMulti", {
expect_equal(CombineVariableSetsAsBinary(Q4),
CombineVariableSetsAsBinary(colas[, "q4a"], colas[, "q4b"], colas[, "q4c"], colas[, "q4d"], colas[, "q4e"], colas[, "q4f"]))
})
test_that("Multliple PickOneMulti where one is the subset of the other", {
expect_equal(CombineVariableSetsAsBinary(Q4, Q4.small), CombineVariableSetsAsBinary(Q4))
})
test_that("Combining PickOnes and Pick Any", {
expect_equal(Q4.binary[, -ncol(Q4.binary)], CombineVariableSetsAsBinary(Q4.binary.small, Q4.pepsi.light, Q4.pepsi.max), check.attributes = FALSE)
})
test_that("Pick Any returns same data", {
expect_equal(CombineVariableSetsAsBinary(Q4.binary), Q4.binary[, -ncol(Q4.binary)], check.attributes = FALSE)
expect_equal(CombineVariableSetsAsBinary(Q4.binary, Q4.binary), Q4.binary[, -ncol(Q4.binary)], check.attributes = FALSE)
})
test_that("Missing data", {
input.args <- list(aided, unaided)
input.args[["match.elements"]] <- "Yes"
input.args[["elements.to.count"]] <- list(numeric = NA, categorical = NULL)
input.args[["ignore.missing"]] <- TRUE
n.missing <- do.call(Count, input.args)
expect_true(all(is.na(CombineVariableSetsAsBinary(aided, unaided)[n.missing == 2])))
expect_true(all(is.na(CombineVariableSetsAsBinary(aided, unaided, compute.for.incomplete = FALSE)[n.missing > 0])))
})
test_that("Filling in unmatched columns correctly", {
aided.2 <- aided
attr(aided.2, "originalquestiontype") <- "Pick Any"
expect_equal(fillInCategoriesWhenNotPresent(aided.2, colnames(aided.2)), aided.2)
expect_true(all(is.na(fillInCategoriesWhenNotPresent(aided.2, c(colnames(aided.2), "Hello"))[, "Hello"])))
Q4.pepsi.light.2 <- Q4.pepsi.light
Q4.pepsi.light.2[c(1,2,3)] <- NA
Q4.pepsi.light.binary <- CombineVariableSetsAsBinary(Q4.pepsi.light.2)
attr(Q4.pepsi.light.binary, "originalquestiontype") <- "Pick One"
expect_equal(which(is.na(fillInCategoriesWhenNotPresent(Q4.pepsi.light.binary, c(colnames(Q4.pepsi.light.binary), "Hello"))[, "Hello"])), c(1,2,3))
})
test_that("Unmatched columns included", {
aided.2 <- aided
colnames(aided.2)[11] <- "Hello"
combined <- CombineVariableSetsAsBinary(aided.2, unaided)
unique.cols <- unique(c(colnames(aided.2), colnames(unaided)))
expect_true(all(colnames(combined) %in% unique.cols))
expect_true(all(unique.cols %in% colnames(combined)))
expect_equal(as.numeric(combined[, "Hello"]), aided.2[, "Hello"])
})
test_that("Error messages", {
aided.2 <- aided
colnames(aided.2)[11] <- "Telstra"
expect_error(CombineVariableSetsAsBinary(aided.2, unaided), "duplicate")
test.case.1 <- factor(c("", "A", "B", "C","A", "B", "C"))
test.case.2 <- factor(c("A", "B", "C","A", "B", "C"), levels = c("", "A", "B", "C"))
expect_error(CombineVariableSetsAsBinary(test.case.1, test.case.2), "cases")
})
test_that("Blank factor labels", {
test.case.1 <- factor(c("", "A", "B", "C","A", "B", "C"))
test.case.2 <- factor(c("A", "A", "B", "C","A", "B", "C"), levels = c("", "A", "B", "C"))
expect_equal(colnames(CombineVariableSetsAsBinary(test.case.1, test.case.2)), c("", "A", "B", "C"))
})
|
20ea7246724a5e5a0a36c36f54de14dc3d6eadc1 | b8356867f878b89db8c9498abe81be00032f9b4f | /R/test.R | 6210fb997b77a4ab6fdb08f57c9f7da3df5017ad | [] | no_license | DavisVaughan/testpkgdowntitle | 81151a3a5bc920caa9fa159eaf16244aef78cc9f | d69c614bbd56682e4192dbcee1e285cbd9e7dffd | refs/heads/master | 2022-11-12T14:31:30.200352 | 2020-07-01T13:31:24 | 2020-07-01T13:31:24 | 276,385,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 121 | r | test.R | #' \Sexpr[stage=render,results=rd]{"hi"} Test
#'
#' @description
#' Hi there
#'
#' @export
test_fn <- function() {
1
}
|
60fd1038a63d76e6a4dea220aa9e6b67e994a824 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tm/examples/DirSource.Rd.R | 321ebd1034748c84a252ad8cdb056c7cdb6f335c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 161 | r | DirSource.Rd.R | library(tm)
### Name: DirSource
### Title: Directory Source
### Aliases: DirSource
### ** Examples
DirSource(system.file("texts", "txt", package = "tm"))
|
72cf4ac50af6a4a301dab0d392c4fe9740d9720e | 91afa3c4e8cca9dcb9b5146cafbf158c9865b3bd | /Rqtl2_ped.genABEL.R | 5891bef4b98c1b126ddfcf92ffa8cf01c917b9de | [] | no_license | jtlovell/eqtlanalysis | 5d995e14a270e61133a36f07fda7540c554ad7b9 | bc1d9d7b3b0c6dceb7aee71ea4ca8297deeacfec | refs/heads/master | 2020-04-06T06:40:19.501164 | 2015-07-22T16:17:21 | 2015-07-22T16:17:21 | 29,939,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,345 | r | Rqtl2_ped.genABEL.R | Rqtl2_ped.genABEL<-function(cross, phys,
ped.filename="out.ped",
map.filename="out.map",
ga.phefilename="phe.txt",
return.ga=T){
g<-pull.geno(cross)
m<-data.frame(pull.map(cross, as.table=T))
m$gene<-rownames(m)
phys<-phys[,c("gene","start")]
map<-merge(phys,m, by="gene")
map<-map[,c("chr","gene","pos","start")]
write.table(map, file=map.filename, sep="\t", row.names=F, col.names=F)
i<-data.frame(getid(cross)); colnames(i)<-"sample_id"
ped<-data.frame(cbind("FAM1",i$sample_id))
colnames(ped)<-c("family_id","sample_id")
ped$paternal_id<-0
ped$maternal_id<-0
ped$sex<-0
ped$affection=0
g[is.na(g)]<-"0/0"
g[g==1]<-"A/A"
g[g==2]<-"A/T"
g[g==3]<-"T/T"
ped<-cbind(ped,g)
write.table(ped, file=ped.filename, sep="\t", row.names=F, col.names=F, quote=F)
phe<-pull.pheno(cross)
phe<-phe[,c("id","Treatment",m$gene)]
phe$sex<-0
write.table(phe, file=ga.phefilename, sep="\t", quote=F, row.names=F)
if(return.ga){
convert.snp.ped(ped=ped.filename, mapfile=map.filename, outfile="genos.raw", wslash=T)
ga<-load.gwaa.data(phenofile = "phe.txt", genofile = "genos.raw",
force = TRUE, makemap = FALSE, sort = TRUE, id = "id")
return(ga)
}
}
|
2554f974328f81f0e36a666313366ac024619adf | 4f9e90b514964d836cd3ab21a0ae8d77e4dd1a31 | /courseproject/part1-shiny/weatherEventHarm/server.R | 0b3ee61f4202d47802b1d8c4c1bc59893797c911 | [] | no_license | Samhith/coursera-dataScientists-9-Developing-Data-Products | b58a02ec8740719c6c1b1bcdd06fa91813dcd087 | 8c25640f39d21db663186fff955c2bf12637e845 | refs/heads/master | 2021-01-24T14:46:36.310143 | 2014-12-25T07:30:12 | 2014-12-25T07:30:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,368 | r | server.R | library(latticeExtra)
library(maps)
library(choroplethrMaps)
source("wheatherAnalyse.R")
Sys.setlocale('LC_ALL', 'English')
loadAndStat_data()
data(state.regions)
shinyServer(
function(input, output) {
output$table_topEventType<-renderTable({
data<-switch(input$harmType,
"population" = topN_harm_health(input$topn),
"economic" = topN_cost(input$topn)
)
data
})
output$plot_pie<-renderPlot({
if (input$harmType=="population") {
data<-pie_health(input$topn)
pie(data$sum, labels=data$valid_type,col=rainbow(length(data$valid_type)), main="FATALITIES & INJURIES")
} else {
data<-pie_cost(input$topn)
pie(data$sum, labels=data$valid_type,col=rainbow(length(data$valid_type)), main="PROPDMG & CROPDMG")
}
})
output$plot_state<-renderPlot({
if (input$harmType=="population") {
data<-influnce_health(input$topn)
mapplot(region~harm_popu|valid_type, data = data,xlab="",map = map("state",plot = FALSE, fill = TRUE), colramp = colorRampPalette(c("blue","red")))
} else {
data<-influnce_cost(input$topn)
mapplot(region~cost_sum|valid_type, data = data,xlab="", map = map("state",plot = FALSE, fill = TRUE), colramp = colorRampPalette(c("blue","red")))
}
})
}
) |
a6fbd2ae89b18635bae8d83d803bc26ddcb368f5 | a2d6ef79ff389985df15520e176750c7a0fb8c04 | /R code/Parm Generation.R | e57615e2d86ffa59bff0497f11657983a2605ac9 | [] | no_license | akaboshi900306/hd-not-useful | 8c00b8ade16cc7b675c1cc3ca5cf234e611baac0 | 95a564882783ebd96377e15affef1a632ff61a81 | refs/heads/main | 2023-06-16T07:33:07.581278 | 2021-07-09T15:46:34 | 2021-07-09T15:46:34 | 381,160,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,696 | r | Parm Generation.R | library(bigrquery)
library(data.table)
library(bigQueryR)
library(stringr)
library(XML)
library(tidyverse)
#devtools::install_github("hadley/tidyverse")
setwd("//at2a5/vol4/DEPTS/TRAFFIC/PROJECTS/SCD/Supply Chain Analytics/Projects/Seasonal ASL (DO NOT DELETE - Nirjhar Raina)/Parm Upload/")
project = "analytics-supplychain-thd"
dataset = "SSNL_ASL_PROD"
upload_2 <- query_exec(project = project,
#dataset = dataset,
#destination_table = "IXL0858_ANALYTICS.JLQ_UPLOAD",
#write_disposition = "WRITE_TRUNCATE",
query = "
SELECT
Trumping_Level
, Type
, Store_Group
, Volume_Id
, Velocity_Id
, SKU
, SKU_Grp
, Store
, Parm_code
, CASE WHEN Parm_Value > 9999 THEN 9999 ELSE Parm_Value end as Parm_Value
, Eff_Begin_date
, Eff_End_date
, Start_Fscl_Wk_nbr
, End_Fscl_Wk_nbr
, Param_Desc --CHANGE DATE!
, OOTL_Reason_Code
FROM `analytics-supplychain-thd.SSNL_ASL_PROD.PARM_UPLOAD_WKLY` A
JOIN (
SELECT
A.CAL_DT, A.FSCL_WK_NBR
FROM `pr-edw-views-thd.SHARED.CAL_PRD_HIER_FD` A
JOIN (SELECT FSCL_YR_WK_KEY_VAL FROM `pr-edw-views-thd.SHARED.CAL_PRD_HIER_FD` A WHERE CAL_DT = CURRENT_DATE('America/New_York')) B
ON A.FSCL_YR_WK_KEY_VAL = B.FSCL_YR_WK_KEY_VAL
WHERE A.DAY_OF_WK_NBR = 1
GROUP BY 1,2
) F
ON A.UPLOAD_DT = F.CAL_DT
--WHERE TESTING_FIELD IS NULL
" ,
use_legacy_sql = FALSE,
max_pages = Inf,
billing = project)
upload<-as.data.table(upload_2)
colnames(upload)[colnames(upload)=="Trumping_Level"]<-"Trumping Level"
colnames(upload)[colnames(upload)=="Store_Group"]<-"Store Group"
colnames(upload)[colnames(upload)=="Volume_Id"]<-"Volume Id"
colnames(upload)[colnames(upload)=="Velocity_Id"]<-"Velocity Id"
colnames(upload)[colnames(upload)=="SKU_Grp"]<-"SKU Grp"
colnames(upload)[colnames(upload)=="Parm_code"]<-"Parm code"
colnames(upload)[colnames(upload)=="Parm_Value"]<-"Parm Value"
colnames(upload)[colnames(upload)=="Eff_Begin_date"]<-"Eff Begin date"
colnames(upload)[colnames(upload)=="Eff_End_date"]<-"Eff End date"
colnames(upload)[colnames(upload)=="Start_Fscl_Wk_nbr"]<-"Start Fscl Wk nbr"
colnames(upload)[colnames(upload)=="End_Fscl_Wk_nbr"]<-"End Fscl Wk nbr"
colnames(upload)[colnames(upload)=="Param_Desc"]<-"Param Desc"
colnames(upload)[colnames(upload)=="OOTL_Reason_Code"]<-"OOTL Reason Code"
counter<- nrow(upload)
i<-1
while (counter>0){
write<- min(999999,counter)
write.csv(upload[1:write],
str_c("//at2a5/vol4/DEPTS/TRAFFIC/PROJECTS/SCD/Supply Chain Analytics/Projects/Seasonal ASL (DO NOT DELETE - Nirjhar Raina)/Parm Upload/ssn_sfty_stk_soln_min_upload_",str_trim(str_replace_all(Sys.Date(),pattern = ":",replacement = "-")),"_",i,".csv"), row.names=FALSE, quote=FALSE, na="")
upload<-upload[-1:-write]
counter<-counter-write
i<- i+1
}
|
684557828b7a531c05369e53e9f62aa629eb4d4e | 340caf01c809a50b738188b4dfc9067522bd377a | /man/z.stats.Rd | 756a10c2342bd09697efc9edad9c5e29d9465d1b | [] | no_license | sunilgarg1/zFactor | 542180e69125cda88788b13eea2f0f70027771d2 | 145ab68d6ffe233764cd24449a0dafb7ac31dd09 | refs/heads/master | 2021-01-23T18:22:03.142561 | 2017-07-03T16:05:19 | 2017-07-03T16:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 832 | rd | z.stats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{z.stats}
\alias{z.stats}
\title{Get error summary statistics for any given compressibility correlation}
\usage{
z.stats(correlation = "DAK", pprRange = "lp", interval = "coarse")
}
\arguments{
\item{correlation}{identifier. Can be "HY", "DAK", "DPR" "N10", "SH"}
\item{pprRange}{low (lp) or high (hp) chart area of the Standing-Katz chart}
\item{interval}{quality of the Ppr scale. Coarse: every 1.0; Fine: every 0.5}
}
\description{
A quick way to show an error summary between any of the indicated correlations and
the Standing-Katz chart
}
\examples{
# error statistics for the Dranchuk-AbouKassem correlation
z.stats("DAK")
# error statistics for Hall-Yarborough correlation at steps of 0.5 in Ppr
z.stats("HY", interval = "fine")
}
|
45ec9df04e1ea7c83301cbf3dd837920e96cd756 | a8a8075aee934b380e4041fe38a7433761aab4c3 | /R/cpop_model.R | 6ae701367b97cb4bb71234902293f221f351ec14 | [] | no_license | kevinwang09/CPOP | 14d77f61cb9981e3718066921062dba4eb2c4a2c | 836260be426a65bf85d3fbd224ecae407d8408bf | refs/heads/master | 2022-05-22T19:25:29.365448 | 2022-04-18T11:55:11 | 2022-04-18T11:55:11 | 186,576,277 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 6,166 | r | cpop_model.R | #' @title CPOP modelling
#' @description CPOP is consisted of three steps. Step 1 is to select features common to
#' two transformed data. Note the input must be pairwise-differences between the original data columns.
#' Step 2 is to select features in constructed models that shared similar characteristics. Step 3 is to
#' construct a final model used for prediction.
#' @param x1 A data matrix of size n (number of samples) times p (number of features)
#' @param x2 A data matrix of size n (number of samples) times p (number of features)
#' Column names should be identical to z1.
#' @param y1 A vector of response variable. Same length as the number of rows of x1.
#' @param y2 A vector of response variable. Same length as the number of rows of x2.
#' @param w A vector of weights. Default to NULL, which uses `identity_dist`.
#' @param n_iter Number of iterations in Step 1 and 2. Default to 20.
#' @param alpha The alpha parameter for elastic net models. See the alpha argument in glmnet::glmnet. Default to 1.
#' @param n_features Breaking the CPOP-Step 1 loop if a certain number of features is reached. Default to 50.
#' @param s CV-Lasso lambda choice. Default to "lambda.min", see cv.glmnet in the glmnet package.
#' @param family family of glmnet
#' @param cpop1_method CPOP step 1 selection method. See documentations on `cpop1`. Default to "Normal".
#' @param cpop2_break Should CPOP-step2 loop be broken the first time. Default to TRUE.
#' @param cpop2_type Should CPOP-step2 select features based on sign of features of magnitude? Either "sign" (default) or "mag"..
#' @param cpop2_mag a threshold for CPOP-step2 when selecting features based on coefficient difference magnitude.
#' differential betas are removed
#' @param intercept Default to FALSE
#' @param ... Extra parameter settings for cv.glmnet in in the glmnet package.
#' @param z1 (Deprecated) a data matrix, columns are pairwise-differences between the original data columns.
#' @param z2 (Deprecated) a data matrix, columns are pairwise-differences between the original data columns.
#' @return A CPOP object containing:
#' \itemize{
#' \item model: the CPOP model as a glmnet object
#' \item coef_tbl: a tibble (data frame) of CPOP feature coefficients
#' \item cpop1_features: a vector of CPOP
#' }
#' @export
#' @examples
#' data(cpop_data_binary, package = 'CPOP')
#' ## Loading simulated matrices and vectors
#' x1 = cpop_data_binary$x1
#' x2 = cpop_data_binary$x2
#' y1 = cpop_data_binary$y1
#' y2 = cpop_data_binary$y2
#' set.seed(1)
#' cpop_result = cpop_model(x1 = x1, x2 = x2, y1 = y1, y2 = y2, alpha = 1, n_features = 10)
#' cpop_result
cpop_model <- function(
x1, x2, y1, y2, w = NULL,
n_features = 50, n_iter = 20, alpha = 1,
family = "binomial",
s = "lambda.min", cpop2_break = TRUE, cpop2_type = "sign", cpop2_mag = 1,
cpop1_method = "normal", intercept = FALSE, z1, z2, ...){
## Checking input models
if(missing(z1) | missing(z2)){
prep_result = prep_cpop(x1 = x1, x2 = x2)
z1 = prep_result$z1
z2 = prep_result$z2
assertthat::assert_that(nrow(x1) == length(y1))
assertthat::assert_that(nrow(x2) == length(y2))
}
if(missing(x1) | missing(x2)){
warning(
"Arguments `z1` and `z2` are deprecated. CPOP can still be performed.
Please use `x1` and `x2` in the future.")
assertthat::assert_that(nrow(z1) == length(y1))
assertthat::assert_that(nrow(z2) == length(y2))
}
## Checking binomial inputs
if(family == "binomial"){
assertthat::assert_that(is.factor(y1))
assertthat::assert_that(is.factor(y2))
assertthat::assert_that(identical(levels(y1), levels(y2)))
factor_levels = levels(y1)
} else {
factor_levels = NULL
}
cpop1_result = cpop1_iterate(
z1 = z1, z2 = z2, y1 = y1, y2 = y2, w = w,
n_features = n_features, n_iter = n_iter,
alpha = alpha, s = s,
family = family, cpop1_method = cpop1_method, ...)
cpop1_features = cpop1_result$cpop1_features
if(length(cpop1_features) == 0){
warning("No predictive features were selected in Step 1. Return NULL.")
return(NULL)
}
if (cpop2_type == "sign"){
cpop2_result = cpop2_sign(z1 = z1, z2 = z2, y1 = y1, y2 = y2,
cpop1_features = cpop1_features, s = s, n_iter = n_iter, family = family,
cpop2_break = cpop2_break, intercept = intercept)}
if (cpop2_type == "mag"){
cpop2_result = cpop2_mag(z1 = z1, z2 = z2, y1 = y1, y2 = y2,
cpop1_features = cpop1_features, s = s, n_iter = n_iter, family = family,
cpop2_break = FALSE, mag = cpop2_mag, intercept = intercept)}
if(length(cpop2_result) == 0){
warning("No predictive features were selected in Step 2. Return NULL.")
return(NULL)
}
cpop3_result = cpop3(z1 = z1, z2 = z2, y1 = y1, y2 = y2,
cpop2_result = cpop2_result, family = family, intercept = intercept)
if(cpop3_result$cpop_mode == "glmnet"){
coef1 = glmnet::coef.glmnet(cpop3_result$model1, s = s)
coef2 = glmnet::coef.glmnet(cpop3_result$model2, s = s)
coef_tbl = tibble::tibble(coef_name = rownames(coef1),
coef1 = as.vector(coef1),
coef2 = as.vector(coef2))
} else {
coef1 = stats::coefficients(cpop3_result$model1)
coef2 = stats::coefficients(cpop3_result$model2)
coef_tbl = tibble::tibble(coef_name = names(coef1),
coef1 = as.vector(coef1),
coef2 = as.vector(coef2))
}
result = c(cpop3_result,
coef_tbl = list(coef_tbl),
cpop1_features = list(cpop1_features),
step_features = list(cpop1_result$step_features),
family_params = list(list(family = family,
factor_levels = factor_levels)),
z1 = list(z1),
z2 = list(z2))
# class(result) = c("cpop", class(result))
class(result) = c("cpop")
return(result)
}
#' @export
print.cpop = function(x,...){
cat("CPOP model with ", length(x$feature), "features \n")
print(x$coef_tbl)
}
|
af0a1339b00390aa31c6b317cf078c5f39223f5f | 175fac3fd67978b72f7e79717bfd52cd2e964b4f | /pracs/DMDK-s.R | f4bab164ae243b55d04758be9dd09e2c5f4cf36f | [] | no_license | SPE-R/SPE | 0905425c09e567a720c90d9018d36405aff43cb7 | 7fcd5eebc4326d741c049af7edf9f009beb1fb3b | refs/heads/master | 2023-06-08T15:43:44.976096 | 2023-06-07T08:44:56 | 2023-06-07T08:44:56 | 93,295,452 | 5 | 4 | null | 2023-05-30T09:07:09 | 2017-06-04T06:45:23 | TeX | UTF-8 | R | false | false | 12,804 | r | DMDK-s.R | ### R code from vignette source 'DMDK-s.rnw'
###################################################
### code chunk number 1: DMDK-s.rnw:20-24
###################################################
options(width = 90,
prompt = " ", continue = " ",
SweaveHooks = list( fig = function()
par(mar = c(3,3,1,1),mgp = c(3,1,0)/1.6,las = 1,bty = "n")))
###################################################
### code chunk number 2: DMDK-s.rnw:55-60
###################################################
options(width = 90)
library(Epi)
library(popEpi)
library(tidyverse)
library(mgcv)
###################################################
### code chunk number 3: DMDK-s.rnw:63-67
###################################################
data(DMlate)
str(DMlate)
head(DMlate)
summary(DMlate)
###################################################
### code chunk number 4: DMDK-s.rnw:92-94
###################################################
with(DMlate, table(dead = !is.na(dodth),
same = (dodth == dox), exclude = NULL))
###################################################
### code chunk number 5: DMDK-s.rnw:98-105
###################################################
LL <- Lexis(entry = list(A = dodm-dobth,
P = dodm,
dur = 0),
exit = list(P = dox),
exit.status = factor(!is.na(dodth),
labels = c("Alive","Dead")),
data = DMlate)
###################################################
### code chunk number 6: DMDK-s.rnw:115-123
###################################################
LL <- Lexis(entry = list(A = dodm-dobth,
P = dodm,
dur = 0),
exit = list(P = dox),
exit.status = factor(!is.na(dodth),
labels = c("Alive","Dead")),
data = DMlate,
keep.dropped = TRUE)
###################################################
### code chunk number 7: DMDK-s.rnw:126-127
###################################################
attr(LL, 'dropped')
###################################################
### code chunk number 8: DMDK-s.rnw:131-133
###################################################
summary(LL)
head(LL)
###################################################
### code chunk number 9: DMDK-s.rnw:150-156
###################################################
stat.table(sex,
list(D = sum(lex.Xst == "Dead"),
Y = sum(lex.dur),
rate = ratio(lex.Xst == "Dead", lex.dur, 1000)),
margins = TRUE,
data = LL)
###################################################
### code chunk number 10: DMDK-s.rnw:195-200
###################################################
system.time(SL <- splitLexis(LL, breaks = seq(0,125,1/2), time.scale = "A"))
summary(SL) ; class(SL)
system.time(SL <- splitMulti(LL, A = seq(0,125,1/2)))
summary(SL) ; class(SL)
summary(LL)
###################################################
### code chunk number 11: DMDK-s.rnw:243-247
###################################################
r.m <- gam(cbind(lex.Xst == "Dead", lex.dur) ~ s(A, k = 20),
family = poisreg,
data = subset(SL, sex == "M"))
r.f <- update(r.m, data = subset(SL, sex == "F"))
###################################################
### code chunk number 12: DMDK-s.rnw:257-259
###################################################
r.m <- gam.Lexis(subset(SL, sex == "M"), ~ s(A, k = 20))
r.f <- gam.Lexis(subset(SL, sex == "F"), ~ s(A, k = 20))
###################################################
### code chunk number 13: DMDK-s.rnw:304-308
###################################################
nd <- data.frame(A = seq(20, 80, 0.5))
p.m <- ci.pred(r.m, newdata = nd)
p.f <- ci.pred(r.f, newdata = nd)
head(p.m)
###################################################
### code chunk number 14: a-rates
###################################################
matplot(nd$A, cbind(p.m, p.f) * 1000,
type = "l", col = rep(c("blue","red"),each = 3), lwd = c(3,1,1), lty = 1,
log = "y", xlab = "Age", ylab = "Mortality of DM ptt per 1000 PY")
###################################################
### code chunk number 15: A-rates
###################################################
matshade(nd$A, cbind(p.m,p.f) * 1000, plot = TRUE,
col = c("blue","red"), lty = 1, lwd = 3,
log = "y", xlab = "Age", ylab = "Mortality among DM ptt per 1000 PY")
###################################################
### code chunk number 16: DMDK-s.rnw:378-388
###################################################
Mcr <- gam.Lexis(subset(SL, sex == "M"),
~ s( A, bs = "cr", k = 10) +
s( P, bs = "cr", k = 10) +
s(dur, bs = "cr", k = 10))
summary(Mcr)
Fcr <- gam.Lexis(subset(SL, sex == "F"),
~ s( A, bs = "cr", k = 10) +
s( P, bs = "cr", k = 10) +
s(dur, bs = "cr", k = 10))
summary(Fcr)
###################################################
### code chunk number 17: plgam-default
###################################################
par(mfcol = c(3,2))
plot(Fcr, ylim = c(-3,3), col = "red")
plot(Mcr, ylim = c(-3,3), col = "blue",
lwd = 2, shade = TRUE, shade.col = adjustcolor("blue", alpha = 0.15))
###################################################
### code chunk number 18: DMDK-s.rnw:426-428
###################################################
anova(Mcr, r.m, test = "Chisq")
anova(Fcr, r.f, test = "Chisq")
###################################################
### code chunk number 19: DMDK-s.rnw:480-485
###################################################
pts <- seq(0, 12, 1/4)
nd <- data.frame(A = 50 + pts,
P = 1995 + pts,
dur = pts)
head(cbind(nd$A, ci.pred(Mcr, newdata = nd) * 1000))
###################################################
### code chunk number 20: rates
###################################################
plot(NA, xlim = c(50, 85), ylim = c(5, 400), log = "y",
xlab = "Age", ylab = "Mortality rate for DM patients")
for(ip in c(1995, 2005))
for(ia in c(50, 60, 70))
{
nd <- data.frame(A = ia + pts,
P = ip + pts,
dur = pts)
matshade(nd$A, ci.pred(Mcr, nd) * 1000, col = "blue", lty = 1 + (ip == 1995))
matshade(nd$A, ci.pred(Fcr, nd) * 1000, col = "red" , lty = 1 + (ip == 1995))
}
###################################################
### code chunk number 21: rates5
###################################################
Mcr <- gam.Lexis(subset(SL, sex == "M"),
~ s( A, bs = "cr", k = 10) +
s( P, bs = "cr", k = 10) +
s(dur, bs = "cr", k = 5))
summary(Mcr)
Fcr <- gam.Lexis(subset(SL, sex == "F"),
~ s( A, bs = "cr", k = 10) +
s( P, bs = "cr", k = 10) +
s(dur, bs = "cr", k = 5))
summary(Fcr)
gam.check(Mcr)
gam.check(Fcr)
###################################################
### code chunk number 22: rates-5
###################################################
plot(NA, xlim = c(50,80), ylim = c(0.9,100), log = "y",
xlab = "Age", ylab = "Mortality rate for DM patients")
abline(v = c(50,55,60,65,70), col = gray(0.8))
# for(ip in c(1995,2005))
ip <- 2005
for(ia in seq(50, 70, 5))
{
nd <- data.frame(A = ia + pts,
P = ip + pts,
dur = pts)
matshade(nd$A, rm <- ci.pred(Mcr, nd) * 1000, col = "blue", lwd = 2)
matshade(nd$A, rf <- ci.pred(Fcr, nd) * 1000, col = "red" , lwd = 2)
matshade(nd$A, ci.ratio(rm, rf), lwd = 2)
}
abline(h = 1, lty = "55")
###################################################
### code chunk number 23: DMDK-s.rnw:690-692
###################################################
SL$Am <- floor(SL$A + 0.25)
SL$Pm <- floor(SL$P + 0.25)
###################################################
### code chunk number 24: DMDK-s.rnw:696-704
###################################################
data(M.dk)
str(M.dk)
M.dk <- transform(M.dk, Am = A,
Pm = P,
sex = factor(sex, labels = c("M","F")))
head(M.dk)
str(SL)
str(M.dk)
###################################################
### code chunk number 25: DMDK-s.rnw:709-712
###################################################
SLr <- merge(SL, M.dk[,c("sex", "Am", "Pm", "rate")])
dim(SL)
dim(SLr)
###################################################
### code chunk number 26: DMDK-s.rnw:721-724
###################################################
SLi <- inner_join(SL, M.dk[,c("sex","Am","Pm","rate")])
dim(SL)
dim(SLi)
###################################################
### code chunk number 27: DMDK-s.rnw:737-755
###################################################
SLr$E <- SLr$lex.dur * SLr$rate / 1000
stat.table(sex,
list(D = sum(lex.Xst == "Dead"),
Y = sum(lex.dur),
E = sum(E),
SMR = ratio(lex.Xst == "Dead",E)),
data = SLr,
margin = TRUE)
stat.table(list(sex,
Age = cut(A,
breaks = c(0, 4:9*10, 100),
right = FALSE)),
list(D = sum(lex.Xst == "Dead"),
Y = sum(lex.dur),
E = sum(E),
SMR = ratio(lex.Xst == "Dead", E)),
margin = TRUE,
data = SLr)
###################################################
### code chunk number 28: DMDK-s.rnw:777-781
###################################################
msmr <- glm((lex.Xst == "Dead") ~ sex - 1 + offset(log(E)),
family = poisson,
data = subset(SLr, E > 0))
ci.exp(msmr)
###################################################
### code chunk number 29: DMDK-s.rnw:805-809
###################################################
msmr <- glm(cbind(lex.Xst == "Dead", E) ~ sex - 1,
family = poisreg,
data = subset(SLr, E > 0))
ci.exp(msmr)
###################################################
### code chunk number 30: DMDK-s.rnw:813-817
###################################################
(CM <- rbind(M = c(1,0),
W = c(0,1),
'M/F' = c(1,-1)))
round(ci.exp(msmr, ctr.mat = CM), 2)
###################################################
### code chunk number 31: SMReff
###################################################
Msmr <- gam(cbind(lex.Xst == "Dead", E) ~ s( A, bs = "cr", k = 10) +
s( P, bs = "cr", k = 10) +
s(dur, bs = "cr", k = 5),
family = poisreg,
data = subset(SLr, E > 0 & sex == "M"))
Fsmr <- update(Msmr, data = subset(SLr, E > 0 & sex == "F"))
summary(Msmr)
summary(Fsmr)
par(mfcol = c(3,2))
plot(Msmr, ylim = c(-1,2), col = "blue")
plot(Fsmr, ylim = c(-1,2), col = "red")
###################################################
### code chunk number 32: SMRsm
###################################################
plot(NA, xlim = c(50,80), ylim = c(0.5,5), log = "y",
xlab = "Age", ylab = "SMR relative to total population")
abline(v = c(50,55,60,65,70), col = gray(0.8))
# for(ip in c(1995,2005))
ip <- 2005
for(ia in c(50,60,70))
{
nd <- data.frame(A = ia + pts,
P = ip + pts,
dur = pts)
matshade(nd$A, rm <- ci.pred(Msmr, nd), col = "blue", lwd = 2)
matshade(nd$A, rf <- ci.pred(Fsmr, nd), col = "red" , lwd = 2)
matshade(nd$A, ci.ratio(rm, rf), lwd = 2, col = gray(0.5))
}
abline(h = 1, lty = "55")
###################################################
### code chunk number 33: DMDK-s.rnw:913-921
###################################################
Asmr <- gam(cbind(lex.Xst == "Dead", E) ~ sex +
sex:I(A - 60) +
sex:I(P - 2005) +
s(dur, k = 5),
family = poisreg,
data = subset(SLr, E > 0))
summary(Asmr)
round((ci.exp(Asmr, subset = "sex") - 1) * 100, 1)
###################################################
### code chunk number 34: SMRsl
###################################################
plot(NA, xlim = c(50,80), ylim = c(0.8,5), log = "y",
xlab = "Age", ylab = "SMR relative to total population")
abline(v = c(50,55,60,65,70), col = gray(0.8))
# for(ip in c(1995,2005))
ip <- 2005
for(ia in c(50,55,60,65,70))
{
nd <- data.frame(A = ia + pts,
P = ip + pts,
dur = pts)
matshade(nd$A, rm <- ci.pred(Asmr, cbind(nd,sex = "M")), col = "blue", lwd = 2)
matshade(nd$A, rf <- ci.pred(Asmr, cbind(nd,sex = "F")), col = "red" , lwd = 2)
matshade(nd$A, ci.ratio(rm, rf), lwd = 2, col = gray(0.5))
}
abline(h = 1, lty = "55")
|
ad3cf23e7e151f01c542c6a101cf8e4d0b8d8c07 | 29585dff702209dd446c0ab52ceea046c58e384e | /mem/R/calcular.map.R | 9b3a19660ac9d08aeb8423d653e3c88935c0a655 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 645 | r | calcular.map.R | calcular.map <-
function(i.datos){
datos<-as.vector(as.matrix(i.datos))
semanas<-length(datos)
maxsumasemanas<-array(dim=c(semanas,5))
for (s in 1:semanas){
sumasemanas<-numeric()
for (i in 1:(semanas+1-s)){
sumasemanas<-c(sumasemanas,sum(datos[i:(i+s-1)],na.rm=TRUE))
}
maxsumasemanas[s,1]<-s
maxsumasemanas[s,3]<-max.fix.na(sumasemanas)
maxsumasemanas[s,4]<-min((1:(semanas+1-s))[max.fix.na(sumasemanas)==sumasemanas])
maxsumasemanas[s,5]<-maxsumasemanas[s,4]+s-1
}
sumaanual<-sum(datos,na.rm=TRUE)
maxsumasemanas[,2]<-100*maxsumasemanas[,3]/sumaanual
return(maxsumasemanas)
}
|
c6aa21a552bb082c801b45595641dbe0bcb1c0d0 | 3999dab9955e6978db09161e3acb21200e4dd4d5 | /R/wrappers.R | a7afc1d484c40ca58d75aa2fa54d21ff4ed36252 | [] | no_license | moodymudskipper/reactibble | b2170fe0693700d42d75d896e68e1eaaa1c0b313 | 471112ce8a3a0d8e8bd917fb9f58cff2f61125f1 | refs/heads/master | 2023-02-11T00:11:08.038979 | 2021-01-12T11:38:24 | 2021-01-12T11:38:24 | 327,343,193 | 38 | 1 | null | 2021-01-08T20:17:27 | 2021-01-06T14:53:47 | R | UTF-8 | R | false | false | 7,089 | r | wrappers.R |
#' @export
`[[<-.reactibble` <- function(x, name, value) {
x <- strip_reactibble_class(x)
x[[name]] <- value
x <- refresh_if_relevant(x)
as_reactibble(x)
}
#' @export
`$<-.reactibble` <- function(x, name, value) {
x[[name]] <- value
x
}
#' @export
`[<-.reactibble` <- function(x, ..., value) {
cl <- class(x)
x <- strip_reactibble_class(x)
x[...] <- value
x <- refresh_if_relevant(x)
class(x) <- cl
x
}
#' @export
transform.reactibble <- function (`_data`, ...) {
warning("`transform` converts a 'reactibble' object to a static data frame, ",
"use `mutate.reactibble` to preserve reactive columns")
as.data.frame(materialize(`_data`))
}
#' @export
within.reactibble <- function (data, expr, ...) {
warning(
"Using `within` on a 'reactibble' object is discouraged and ",
"potentially unsafe, use `mutate.reactibble` instead")
cl <- class(data)
x <- strip_reactibble_class(data)
x <- eval.parent(substitute(within(x, expr, ...), environment()))
x <- refresh_if_relevant(x)
class(x) <- cl
x
}
#' @export
with.reactibble <- function (data, expr, ...) {
# this just makes sure the output is not a reactive column
warning(
"Using `with` on a 'reactibble' object is discouraged and potentially ",
"unsafe, use `mutate.reactibble` instead")
data <- eval.parent(substitute(with.default(data, expr, ...), environment()))
strip_reactive_col(data)
}
#' @export
`[.reactibble` <- function(x, ...){
x <- NextMethod()
refresh_if_relevant(x)
}
#' @export
#' @importFrom stats setNames
`names<-.reactibble` <- function(x, value) {
cl <- class(x)
x <- strip_reactibble_class(x)
# renaming arg for `substitute`
args <- setNames(lapply(value, as.symbol), names(x))
for (i in seq_along(x)) {
if(inherits(x[[i]], "reactive_col")) {
attr(x[[i]], "reactibble_col_def") <-
do.call(substitute, c(list(attr(x[[i]], "reactibble_col_def"), args)))
}
}
names(x) <- value
class(x) <- cl
x
}
# #' @export
# #' @method as.data.frame reactibble
# as.data.frame.reactibble <- function(
# x, row.names = NULL, optional = FALSE, ...) {
# x <- strip_reactibble_class(x)
# x[] <- lapply(x, strip_reactive_col)
# NextMethod()
# }
#
# #' Convert to tibble
# #'
# #' @param x react tibble object
# #' @param ... forwarded to tibble::as_tibble
# #' @param .rows forwarded to tibble::as_tibble
# #' @param .name_repair Treatment of problematic column names
# #' @param rownames rownames How to treat existing row names of a data frame or matrix
# #' @export
# as_tibble.reactibble <- function(
# x, ..., .rows = NULL,
# .name_repair = c("check_unique", "unique", "universal", "minimal"),
# rownames = pkgconfig::get_config("tibble::rownames", NULL)) {
# x <- strip_reactibble_class(x)
# x[] <- lapply(x, strip_reactive_col)
# NextMethod()
# }
#
#
# #' @export
# #' @method as.data.table reactibble
# as.data.table.reactibble <- function(x, keep.rownames = FALSE, ...) {
# x <- strip_reactibble_class(x)
# x[] <- lapply(x, strip_reactive_col)
# NextMethod()
# }
#
# # to avoid error with devtools::load_all()
# as.data.table <- NULL
#
#' @export
`[[.reactibble` <- function(x, ...) {
strip_reactive_col(.subset2(x, ...))
}
#' @export
`$.reactibble` <- function(x, ...) {
strip_reactive_col(.subset2(x, ...))
}
# This is necessary so dplyr::bind_rows reconstruct the reactibble and refreshes
# it right
#' @export
#' @param template template
#' @param data data
#' @rdname dplyr_methods
dplyr_reconstruct.reactibble <- function (data, template) {
# hack to retrieve attributes from all tables, might break if dplyr's code changes
dots <- get("dots", parent.frame(2))
reactive_col_attrs <- unlist(lapply(dots, function(x) {
lapply(x, attr, "reactibble_col_def")
}), FALSE)
reactive_col_attrs <- reactive_col_attrs[!duplicated(names(reactive_col_attrs))]
nms <- names(reactive_col_attrs)
data[] <- Map(function(x, y) {
attr(x, "reactibble_col_def") <- y
x
}, data, reactive_col_attrs)
class(data) <- class(template)
refresh_if_relevant(data)
}
#' @export
rbind.reactibble <- function(..., deparse.level = 1) {
warning(
"Using `rbind()` on a 'reactibble' object is discouraged and ",
"potentially unsafe, use `rt_bind_rows` instead")
data <- rbind.data.frame(..., deparse.level = 1)
# the main method does checks already so we do our checks
dots <- list(...)
rcs <- sapply(dots[[1]], inherits, "reactive_col")
nms <- names(which(rcs))
exprs1 <- sapply(.subset(dots[[1]], nms), attr, "reactibble_col_def")
for(input in dots[-1]) {
exprs <- sapply(.subset(input, nms), attr, "reactibble_col_def")
if(!identical(exprs, exprs1))
stop("Tried to bind a `reactive_col` to an incompatible object.")
}
refresh_if_relevant(data)
}
#' @export
cbind.reactibble <- function(..., deparse.level = 1) {
warning(
"Using `cbind()` on a 'reactibble' object is discouraged and ",
"potentially unsafe, use `rt_bind_cols` instead")
data <- cbind.data.frame(..., deparse.level = 1)
data <- as_reactibble(data)
refresh_if_relevant(data)
}
#' @export
#' @inheritParams dplyr::slice
#' @rdname dplyr_methods
slice.reactibble <- function(.data, ..., .preserve = FALSE) {
cl <- class(.data)
attrs <- lapply(.data, attr, "reactibble_col_def")
.data <- dplyr::slice(tibble::as_tibble(.data), ..., .preserve = TRUE)
.data[] <- Map(function(x, y) {
attr(x, "reactibble_col_def") <- y
x
}, .data, attrs)
class(.data) <- cl
refresh_if_relevant(.data)
}
#' Efficiently bind multiple data frames by row and column
#'
#' Counterpart of `dplyr::bind_rows` that works efficiently on *"reactibble"*
#' objects. While `bind_rows()` can be used on "reactibbles" (at time of writing),
#' it is brittle and inefficient, as it triggers more refreshes than necessary.
#'
#' @inheritParams dplyr::bind_rows
#'
#' @export
rt_bind_rows <- function(..., .id = NULL) {
dots <- lapply(list(...), tibble::as_tibble)
data <- dplyr::bind_rows(!!!dots, .id = .id)
data <- as_reactibble(data)
refresh_if_relevant(data)
}
#' Efficiently bind multiple data frames by row and column
#'
#' Counterpart of `dplyr::bind_cols` that works efficiently on *"reactibble"*
#' objects. `bind_cols()` will fail "reactibbles" so this new function was
#' required..
#'
#' @inheritParams dplyr::bind_cols
#'
#' @export
rt_bind_cols <- function(..., .id = NULL) {
dots <- lapply(list(...), tibble::as_tibble)
data <- dplyr::bind_cols(!!!dots, .id = .id)
data <- as_reactibble(data)
refresh_if_relevant(data)
}
#' Add rows to a reactibble
#'
#' Counterpart of `tibble::add_row` that works efficiently on *"reactibble"*
#' objects. Beware of using `add_row()` instead as it would return an out of sync `reactibble`
#'
#' @inheritParams tibble::add_row
#'
#' @export
rt_add_row <- function(.data, ..., .before = NULL, .after = NULL) {
.data <- tibble::as_tibble(.data)
.data <- tibble::add_row(.data, ..., .before = NULL, .after = NULL)
.data <- as_reactibble(.data)
refresh_if_relevant(.data)
}
|
23cb51d00e5bd22792b62d5803230116cce7030f | 80b8f237d1faed2e1786aada66bfdd08bf2fec68 | /man/dot-compute.wss.Rd | 48a8409c922ee7541da5e866c2d02bc0b8b06174 | [
"MIT"
] | permissive | TankredO/ckmeans | 17bb71210f4d28adb82cf5dbeb5401c658d42bd6 | f97d378e1a6ed328f263507dc0e5d83cae664478 | refs/heads/master | 2020-06-18T17:51:49.817535 | 2020-04-02T10:14:09 | 2020-04-02T10:14:09 | 196,389,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 395 | rd | dot-compute.wss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ckmeans.R
\name{.compute.wss}
\alias{.compute.wss}
\title{Helper function for bic_kmeans2}
\usage{
.compute.wss(x, f)
}
\arguments{
\item{x}{matrix with samples as rows and features as columns}
\item{f}{vector of cluster memberships for the samples}
}
\description{
Helper function for bic_kmeans2
}
|
5a47f94079844ca835717ed8c9a6d3a3685ebadc | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /Umatrix/R/esomTrainOnline.R | 29af873d2d52b10a1f52a50b7325ee5a94362164 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,220 | r | esomTrainOnline.R | esomTrainOnline<-function(WeightVectors,Data, StartRadius=10, EndRadius=1, Columns=80, Lines=50, StartLearningRate=1, EndLearningRate=1,
NeighbourhoodFunction="cone", Toroid=TRUE, Epochs=10, NeighbourhoodCooling="linear",
LearningRateCooling="linear", ShinyProgress=NULL){
# esomTrainOnline(WeightVectors, Data)
# Trains the WeightVectors based on Data
#
# INPUT
# WeightVectors(1:m,1:n) WeightVectors that will be trained
# n weights with m components each
# Data(1:m,1:n) vectors to be projected with WeightVectors
# n datapoints with m components each
# OPTIONAL
# StartRadius Start Value for the Radius in which will be searched for neighbours
# EndRadius End Value for the Radius in which will be searched for neighbours
# Lines Height of the grid
# Columns Width of the grid
# StartLearningRate startvalue for LearningRate
# EndLearningRate endvalue for LearningRate
# NeighbourhoodFunction Method of training / kind of neighbourhood
# Toroid should the grid be considered as a Toroid
# Epochs number of Epochs in which every DataPoint will be used for training once
# NeighbourhoodCooling cooling method for radius. "linear" is the only available option at the moment.
# LearningRateCooling cooling method for learningRate. "linear" is the only available option at the moment.
# ShinyProgress generate progress output for shiny if Progress Object is given
# OUTPUT
# result: WeightVectors(1:m,1:n) the adjusted Weight Vectors
# author: Florian Lerch
# esomTrainOnline(WeightVectors, Data)
distance = "euclidC"
# load the Rcpp package
#if(!require(Rcpp)){
# install.packages('Rcpp')
# library(Rcpp)
#}
#else{
# library(Rcpp)
#}
# load the cpp Functions that will be used
#path = paste0(.pfad,'/Umatrix/')
#source(paste0(path,'R/RcppExports.R'))
#sourceCpp(paste0(path,'src/addRowWiseC.cpp'))
#sourceCpp(paste0(path,'src/bestmatchC.cpp'))
#sourceCpp(paste0(path, 'src/esomTrainedWeightVectorsConeC.cpp'))
#sourceCpp(paste0(path, 'src/esomTrainedWeightVectorsGaussC.cpp'))
#sourceCpp(paste0(path, 'src/esomTrainedWeightVectorsMexicanHatC.cpp'))
# initialize Radius and LearningRate
Radius = StartRadius
LearningRate = StartLearningRate
# initialize Progress Object for shiny
if(!is.null(ShinyProgress))
ShinyProgress$set(message = "Train Esom", value = 0)
for(i in 1:Epochs){
# permutation of Data
ind <- sample(1:nrow(Data),nrow(Data))
Data <- Data[ind,]
# cool down Radius and LearningRate at the beginning of the epoch
if(NeighbourhoodCooling=="linear") Radius <- coolDownLinear(StartRadius,EndRadius,Epochs,i)
else if(NeighbourhoodCooling == "Lead In Lead Out") Radius <- coolDownLeadInLeadOut(StartRadius,EndRadius,Epochs,i)
else stop("The neighbourhoodcooling is not recognized")
# cool down learning rate
if(LearningRateCooling=="linear") LearningRate <- coolDownLinear(StartLearningRate,EndLearningRate,Epochs,i)
else if(LearningRateCooling=="Lead In Lead Out") LearningRate <- coolDownLeadInLeadOut(StartLearningRate,EndLearningRate,Epochs,i)
else stop("The learningratecooling ist not recognized")
# give feedback to user about the current status of execution
print(paste0("Epoch: ",i," started"))
# calculate neighbourhood pattern
Pattern <- gridNeighbourhoodPattern(Radius = Radius)
# find out if the same neighbour over both sides of the torus are possible
RadiusBiggerThanTorus <- ((Radius*2) >= Columns) | ((Radius*2) >= Lines)
for(DataPoint in 1:nrow(Data)){
# execute the actual epoch
WeightVectors <- esomTrainStep(WeightVectors, Data[DataPoint,], Radius, LearningRate, Pattern, NeighbourhoodFunction, Columns, Lines, Toroid, RadiusBiggerThanTorus)
}
# perecent of necessary epochs done
if(!is.null(ShinyProgress))
ShinyProgress$inc(1/Epochs, detail = paste("Epoch",i,"done"))
}
# give user feedback that training ist finished
print("---- Esom Training Finished ----")
WeightVectors
}
|
d42255f6bc220ffa6beae696a7f51232a546c2c4 | e9b35ef501ba66abe73dcac94c122a9b120d0e4e | /plot3.R | c952d5e2d9aa169050a821bfd2ac959b184704a2 | [] | no_license | danielle-adams/ExData_Plotting1 | 50087ed055003fb53ebfc92acdd12b0f0421eff4 | c196565768185cc3c0508fdc60aa54af66ea253d | refs/heads/master | 2020-12-26T00:53:10.591103 | 2015-05-09T05:57:10 | 2015-05-09T05:57:10 | 35,214,120 | 0 | 0 | null | 2015-05-07T10:34:27 | 2015-05-07T10:34:27 | null | UTF-8 | R | false | false | 1,241 | r | plot3.R | plot3 <- function()
{
library(sqldf)
data <- read.csv.sql(file = "household_power_consumption.txt",
sep = ";",
sql = "select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header = TRUE, row.names = NULL,
colClasses = c("character","character",
"numeric","numeric","numeric","numeric",
"numeric","numeric","numeric"))
Sys.setlocale("LC_TIME", "English")
data$DateAndTime <- strptime(paste(data$Date,data$Time),"%d/%m/%Y%H:%M:%S")
png(filename = "plot3.png", width = 480, height = 480)
plot(data$DateAndTime,data$Sub_metering_1,
type = "l",col = "black",
xlab = "",
ylab = "Energy sub metering")
lines(data$DateAndTime,data$Sub_metering_2,
col = "red")
lines(data$DateAndTime,data$Sub_metering_3,
col = "blue")
legend("topright",lty = 1,
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
} |
33edb2eb1b3ad08d09554baedbb232d501ff5c9a | 49ff0bc7c07087584b907d08e68d398e7293d910 | /mbg/mbg_core_code/mbg_central/LBDCore/man/accumulate.Rd | 5698fa758b20b6e9ea784805093bb2833ad6333b | [] | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | db7963836c9ce9cec3ca8da3a4645c4203bf1352 | 4219ee6b1fb122c9706078e03dd1831f24bdaa04 | refs/heads/master | 2023-07-30T07:05:28.802523 | 2021-09-27T12:11:17 | 2021-09-27T12:11:17 | 297,317,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 558 | rd | accumulate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accumulate.R
\name{accumulate}
\alias{accumulate}
\title{FUNCTION_TITLE}
\usage{
accumulate(mean, sd, months = c(1, 11, 24, 24), nsample = 1000)
}
\arguments{
\item{mean}{PARAM_DESCRIPTION}
\item{sd}{PARAM_DESCRIPTION}
\item{months}{PARAM_DESCRIPTION, Default: c(1, 11, 24, 24)}
\item{nsample}{PARAM_DESCRIPTION, Default: 1000}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if (interactive()) {
# EXAMPLE1
}
}
}
|
42ccf391cf3a5d473c9f670061a94abd46de5421 | 456d48a8739a347f8c8f5abfbfd6c1f3fb46a4a5 | /tests/testthat/test-set-at-median.R | c115e60f9cea72c77507f23305c2880f3828d6e2 | [
"MIT"
] | permissive | carlislerainey/separation | f9f27741854dd0b21175f753e0cdf8443a2a0a5b | cbe6e1fa12ff4507465e98fbe58740ae928bc9da | refs/heads/master | 2021-01-18T21:28:10.445895 | 2016-04-14T16:15:11 | 2016-04-14T16:15:11 | 25,212,781 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 896 | r | test-set-at-median.R |
context("set_at_median")
test_that("set_at_median returns a list for a data frame input", {
data(politics_and_need)
d <- politics_and_need
f <- oppose_expansion ~ gop_leg*percent_favorable_aca
X_pred_list <- set_at_median(f, d)
expect_true(is.list(X_pred_list))
})
# test_that("set_at_median returns a list for a named matrix input", {
# x <- cbind(rnorm(10), rnorm(10))
# colnames(x) <- c("one", "two")
# X_pred_list <- set_at_median(x)
# expect_true(is.list(X_pred_list))
# })
#
# test_that("set_at_median returns an error for an unnamed matrix input", {
# x <- cbind(rnorm(10), rnorm(10))
# expect_error(set_at_median(x))
# })
#
# test_that("set_at_median returns an error for a vector", {
# x <- rnorm(10)
# expect_error(set_at_median(x))
# })
#
# test_that("set_at_median returns an error for a scalar", {
# x <- 10
# expect_error(set_at_median(x))
# }) |
c04e2034ad6012d38295133bbcb611064889157d | 6b0a4605ff25543a9c5cc5a75486f2dc2a09d732 | /create_rnaseq.R | c2628c8b67d7baec734c634472fa77bba89b1528 | [] | no_license | iuabhmalat/cbio_peds | a288297e0f0cf6620c924dfcb2e06069b0fdb04f | 0e7ce1b551843d0d4ace14eed98b5077541137bb | refs/heads/main | 2023-05-22T00:00:31.740286 | 2021-06-10T13:54:11 | 2021-06-10T13:54:11 | 300,356,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,492 | r | create_rnaseq.R | library(dplyr)
library(tidyr)
library(stringr)
library(biomaRt)
dataset <- "c:/Users/abhmalat/OneDrive - Indiana University/RI cBioPortal/PEDS_brrhelm/TREEHOUSE"
cbio_study <- "c:/Users/abhmalat/OneDrive - Indiana University/cbio_PEDSTreehouse/study"
setwd(dataset)
raw_genes_old <- read.csv(file="TumorCompendium_v11_PolyA_hugo_log2tpm_58581genes_2020-04-09.tsv",
sep = "\t", header = FALSE, na = "NA", nrows = 1000)
colnames(raw_genes_old)[1] <- "Hugo_Symbol"
ensembl <- useMart(host = 'grch37.ensembl.org',
biomart = 'ENSEMBL_MART_ENSEMBL',
dataset = 'hsapiens_gene_ensembl')
hugo <- raw_genes$Hugo_Symbol[2:nrow(raw_genes)]
entrez <- getBM(c("hgnc_symbol", "entrezgene_id"), filters = "hgnc_symbol", values = raw_genes_old$Hugo_Symbol[2:nrow(raw_genes_old)], mart = ensembl, verbose = TRUE)
syn <- unique(raw_genes$Hugo_Symbol[2:nrow(raw_genes)])
synonym <- getBM(c("external_synonym", "hgnc_symbol"), filters = "external_synonym",
values = syn, ensembl)
entrez[, "hgnc_symbol"] <- as.factor(entrez[, "hgnc_symbol"])
foo <- unique(arrange(entrez, hgnc_symbol, entrezgene_id))
names(foo) <- c("Hugo_Symbol", "Entrez_Gene_Id")
genes_with_entrez <- left_join(raw_genes, foo, by = "Hugo_Symbol")
# Since we're not using headers, update row 1 with names for Hugo and Entrez columns
genes_with_entrez[1, "Hugo_Symbol"] <- "Hugo_Symbol"
genes_with_entrez[1, "Entrez_Gene_Id"] <- "Entrez_Gene_Id"
genes_final <- genes_with_entrez %>% dplyr::select(Hugo_Symbol, Entrez_Gene_Id, everything())
genes_final <- unique(genes_final)
setwd(cbio_study)
rnaDataFile <- "data_expression_rnaseq.txt"
if(file.exists(rnaDataFile))
file.remove(rnaDataFile)
file.create(rnaDataFile)
# Write to file
write.table(genes_final, rnaDataFile, sep="\t", col.names = FALSE, row.names = FALSE,
quote = FALSE, append = TRUE, na = "NA")
print("RNA Expression data file completed")
rnaMetaFile <- "meta_expression_rnaseq.txt"
f <- file(rnaMetaFile)
writeLines(c(
"cancer_study_identifier: peds_treehouse",
"genetic_alteration_type: MRNA_EXPRESSION",
"datatype: CONTINUOUS",
"stable_id: rna_seq_mrna",
"show_profile_in_analysis_tab: false",
"profile_name: mRNA expression",
"profile_description: RNA Gene Expression Log2TPM [Continuous]",
paste("data_filename: ", rnaDataFile)
), f
)
close(f)
print("RNA Expression metafile completed")
sampleIds <- raw_genes[1, 2:ncol(raw_genes)]
rnaCaseListFile <- "cases_rna_seq_mrna.txt"
setwd('case_lists')
if (file.exists(rnaCaseListFile)) {
file.remove(rnaCaseListFile)
}
file.create(rnaCaseListFile)
f <- file(rnaCaseListFile)
writeLines(c(
"cancer_study_identifier: peds_treehouse",
"stable_id: peds_treehouse_rna_seq_mrna",
"case_list_name: RNA Seq Log2TPM",
"case_list_description: RNA Seq Log2TPM [Continuous]",
paste("case_list_ids: ", paste(sampleIds, collapse = '\t'))
), f
)
close(f)
print("Case lists completed")
#raw_clinical <- read.csv(file = "clinical_TumorCompendium_v11_PolyA_2020-04-09.tsv",
# sep = "\t", header = TRUE, na.strings = c("N/A", "", "unavailable"))
#
#clinical <- raw_clinical
## colnames(clinical) <- toupper(colnames(clinical))
#
## Set site_donor_id as substring of th_sampleid before last _
#clinical$site_donor_id <- ifelse(is.na(clinical$site_donor_id) & startsWith(clinical$site_id, "TH"),
# substr(clinical$th_sampleid, 0, regexpr("_[^_]*$", clinical$th_sampleid) - 1),
# clinical$site_donor_id)
#
## find location of third underscore for each sample_id
## Turs out its 17 for 'Target' site_id and 13 for TCGA uniformly
##clinical$dashes <- str_locate_all(clinical$th_sampleid, "-")
#
#clinical$site_donor_id <- ifelse(is.na(clinical$site_donor_id) & tolower(clinical$site_id) == "target",
# substr(clinical$th_sampleid, 0, 17 - 1),
# clinical$site_donor_id)
#
#clinical$site_donor_id <- ifelse(is.na(clinical$site_donor_id) & tolower(clinical$site_id) == "tcga",
# substr(clinical$th_sampleid, 0, 13 - 1),
# clinical$site_donor_id)
#
## Update verbose values of yes to Yes
#clinical$pedaya <- ifelse(clinical$pedaya == "Yes, age < 30 years", "Yes", clinical$pedaya)
#
## clinical <- unique(clinical[, !(names(clinical) %in% "th_sampleid")])
## Final clinical data DF
#header <- data.frame(site_donor_id = c("#Patient ID", "#Patient ID", "#STRING", "#1", "PATIENT_ID"),
# age_at_dx = c("Age at Diagnosis", "Age at Diagnosis", "NUMBER", "1", "AGE"),
# gender = c("Gender", "Gender", "STRING", "1", "GENDER"),
# site_id = c("Site ID", "Site ID", "STRING", "1", "SITE_ID"),
# pedaya = c("Pediatric Adolescent and Young Adult", "Pediatric Adolescent and Young Adult", "STRING", "1", "PEDAYA"))
#
#clinical_final <- rbind(header, unique(clinical %>% dplyr::select(c("site_donor_id", "age_at_dx",
# "pedaya", "gender", "site_id"))))
#
#setwd(cbio_code)
#clinicalFile <- "data_clinical_patient.txt"
#
#if(file.exists(clinicalFile))
# file.remove(clinicalFile)
#file.create(clinicalFile)
#
## Write to file
#write.table(clinical_final, clinicalFile, sep="\t", col.names = FALSE, row.names = FALSE,
# quote = FALSE, append = TRUE, na = "NA")
#
#clinicalMetaFile <- "meta_clinical_patient.txt"
#
#f <- file(clinicalMetaFile)
#writeLines(c(
# "cancer_study_identifier: peds_treehouse",
# "genetic_alteration_type: CLINICAL",
# "datatype: PATIENT_ATTRIBUTES",
# paste("data_filename: ", clinicalFile)
#), f
#)
#close(f)
#print("Clinical completed")
# Write to file cancer types and subtypes
#write.table(unique(samples[, c("disease", "CANCER_TYPE")]), "cancer_types.tsv", sep="\t", col.names = TRUE, row.names = FALSE,
# quote = FALSE, append = TRUE, na = "")
#
#
#samples_genes <- as.data.table(colnames(raw_genes[2:ncol(raw_genes)]))
#
## Get samples with blank site_donor_id
#no_site_id <- raw_clinical %>% dplyr::filter(is.na(raw_clinical$site_donor_id))
#
## Set site_donor_id as substring of th_sampleid before last _
#no_site_id$site_donor_id <- substr(no_site_id$th_sampleid, 0, regexpr("_[^_]*$", no_site_id$th_sampleid) - 1)
#
#df_clinical <- rbind(raw_clinical %>% dplyr::filter(is.na(raw_clinical$site_donor_id)))
|
0bb6ec37f98543960d2be4eeeeb5c60a956e3b12 | 3f7e561091ac744e5119ffc4cde3d0d0a6f7b14b | /app.R | df05f921ab54876b52f9bf3aec170e9568625d5e | [] | no_license | rtaph/h2h | 55d4e12939f945307cc15b030fb36017337c79fd | 0d4c08faa9d7feead09675ad5d706947748a1d90 | refs/heads/main | 2023-04-17T20:02:08.705624 | 2021-04-30T19:18:19 | 2021-04-30T19:47:57 | 347,271,179 | 0 | 3 | null | 2021-04-30T19:18:59 | 2021-03-13T04:24:50 | HTML | UTF-8 | R | false | false | 10,202 | r | app.R | # Load libraries
library(dash)
library(dashHtmlComponents)
library(dashCoreComponents)
library(dashBootstrapComponents)
library(dashTable)
library(dplyr)
library(purrr)
library(stringr)
library(devtools)
library(here)
# Load custom functions and data
devtools::load_all(".")
# define column names
tbl_col_ids <- c("NAME", "LEVEL", "CRES")
tbl_col_names <- c("Name", "Level", "Country")
# Load CSS Styles
css <- custom_css()
legend <- "
**Join Rules for Graph Links**
- **R1:** Exact match on postal code, business name, business type, and business sub-type.
- **R2:** Exact match on postal code, business name, and business type.
- **R3:** Exact match on business name, business type, and business sub-type.
- **R4:** Exact match on business name and GPS coordinates.
- **R5:** Exact match on trade name and GPS coordinates.
- **R6:** Exact match on business name and trade name.
- **R7:** Exact match on business name only.
- **R8:** Business name in one year matches trade name in the next year (or vice versa).
- **R9:** Exact match on postal code with a phonetic match on the business name.
- **R10:** Exact match on postal code with a phonetic match on the trade name.
- **R11:** Exact match on postal code, phonetic match between business name and trade name.
- **R12:** Match to the StatsCan Inter-Company Ownership database.
"
# set cache
h2h_cache <- cachem::cache_disk(rappdirs::user_cache_dir("h2h"))
# app layout
app <- Dash$new(external_stylesheets = dbcThemes$BOOTSTRAP)
app$title("CoV Business Risk Dashboard")
app$layout(
dbcContainer(
list(
htmlH1("Company Risk Dashboard",
style = css$header
),
dbcRow(
list(
dbcCol( # INPUT CONTROLS
id = "options-panel",
style = css$box,
md = 4,
list(
htmlBr(),
dbcLabel("Company Name:"),
# dccDropdown(id = "typed_bname",
# options = map(bnames, ~ list(label = ., value = .)),
# value = "Gyoza Bar Ltd",
# clearable = TRUE,
# searchable = FALSE # TODO: test this out for speed
dccInput(
id = "typed_bname",
# value = "Listel Canada Ltd" # for testing
# value = "Westfair Foods Ltd"
value = "Gyoza Bar Ltd" # for testing
# value = "VLC Leaseholds Ltd" # for testing
),
htmlBr(),
htmlButton("Submit", n_clicks = 0, id="btn_submit"),
htmlBr(),
# dbcLabel("Address:"),
# dccInput(),
htmlDiv(id="input_bname", children = "Gyoza Bar Ltd", hidden = TRUE),
htmlHr(),
dccMarkdown(legend, style = css$small)
)
),
htmlBr(),
dbcCol( # PLOTTING PANEL
list(
dccTabs(id = "tabs", children = list(
dccTab(label = "License History", children = list(
htmlDiv(
list(
dbcCard(
list(
dbcRow(
list(
dbcCol(
list(
htmlH5("Risk Score", style=list("text-align"="center")),
dbcAlert(id = "overall_risk_score", style=list("text-align"="center"))
)
),
dbcCol(
list(
htmlH5(id = "card_score", style=list("text-align"="center")),
htmlDiv(id = "missing_data_comment"),
htmlDiv(id = "history_comment"),
htmlDiv(id = "location_comment"),
htmlDiv(id = "operative_score")
)
)
)
),
dbcCardBody(
htmlDiv(id = "network_div", children = list(
htmlIframe(
height = 500, width = 500,
id = "network_plot",
style = css$noborder
)
))
)
)
),
dbcCard(
list(
dbcCardBody()
)
)
)
)
)),
dccTab(label = "Business Details", children = list(
htmlDiv(
list(
htmlDiv(list(
dbcCard(
list(
dbcCardHeader('Business Summary'),
dbcCardBody(list(
# Business Type Table
dashDataTable(
id = 'co-type',
columns = list(label = "Primary Business Type", value = "PrimaryBusinessType"),
page_size = 10,
style_cell = css$tbl_fonts,
style_header = css$tbl_hrow,
style_cell_conditional = css$bs_tbl_align,
style_as_list_view = TRUE
)
)
)
))), style=css$horiz_split),
htmlDiv(list(
dbcCard(
list(
dbcCardHeader("Company Size"),
dbcCardBody(list(
dccGraph(id = "num_emp_plot")
))
)
)
), style=css$horiz_split),
htmlDiv(list(
dbcCard(
list(
dbcCardHeader('Inter-corporate Relationships'),
dbcCardBody(
list(
dashDataTable(
id = "related_co_table",
page_size = 10,
data = df_to_list(coi_data),
columns = map2(tbl_col_ids, tbl_col_names, function(col, col2) list(id = col, name = col2)),
style_cell_conditional = css$rc_tbl_colw,
fixed_columns = css$fixed_headers,
css = css$tbl_ovrflw,
style_data = css$ovrflow_ws,
style_as_list_view = TRUE,
style_header = css$tbl_hrow,
style_cell = css$tbl_fonts
))
)
)
)
))),
)
))
))
),
)
),
style = css$no_left_pad
),
htmlBr()
)
)
)
# Implement Submit button
app$callback(
output("input_bname", "children"),
list(
input("btn_submit", "n_clicks"),
state("typed_bname", "value")
),
function (n_click, value) {
closest_business_name(value)
}
)
# update related companies table on "Business Details" tab
app$callback(
list(output("related_co_table", "data")),
list(input("input_bname", "children")),
memoise::memoize(
make_related_co_table,
cache = h2h_cache
)
)
# update business summary on "Business Details" tab
app$callback(
list(
output("co-type", "data"),
output("co-type", "columns")
),
list(input("input_bname", "children")),
memoise::memoize(
make_co_type,
cache = h2h_cache
)
)
# plot number of employees for industry on "Business Details" tab
app$callback(
output("num_emp_plot", "figure"),
list(input("input_bname", "children")),
num_emp_plot
)
# update network plot on "License History" tab
app$callback(
output("network_plot", "srcDoc"),
list(
input("input_bname", "children")
),
memoise::memoize(function(x = "Gyoza Bar Ltd") {
viz <- viz_graph(x)
# workaround
tempfile <- here::here("network.html")
htmlwidgets::saveWidget(viz, file = tempfile)
paste(readLines(tempfile), collapse = "")
},
cache = h2h_cache)
)
app$callback(
output("card_score", "children"),
list(
input("input_bname", "children")
),
closest_business_name
)
app$callback(
output("missing_data_comment", "children"),
list(
input("input_bname", "children")
),
memoise::memoize(get_missing_data_comment,
cache = h2h_cache)
)
app$callback(
output("history_comment", "children"),
list(
input("input_bname", "children")
),
memoise::memoize(
get_history_comment,
cache = h2h_cache
)
)
app$callback(
output("location_comment", "children"),
list(
input("input_bname", "children")
),
memoise::memoize(
get_location_comment,
cache = h2h_cache
)
)
app$callback(
output("operative_score", "children"),
list(
input("input_bname", "children")
),
memoise::memoize(
get_operative_comment,
cache = h2h_cache
)
)
app$callback(
list(
output("overall_risk_score", "children"),
output("overall_risk_score", "color")
),
list(
input("input_bname", "children")
),
memoise::memoize(
get_overall_risk_score_card,
cache = h2h_cache
)
)
if (Sys.getenv("DYNO") == "") {
app$run_server(
debug = FALSE,
dev_tools_hot_reload = FALSE
)
} else {
app$run_server(host = "0.0.0.0")
}
|
88d8dcf065807ba0ff70cf591b729c8c12bf54a1 | bb172dfa75ed6d31687f95e71732b2478da82ae2 | /HW1-LawOfLargeNumbers.R | d75f53bfee40dc16a0029389ada1e3bd7a784820 | [] | no_license | ryanching93/RProgrammingPractice | 5b329579b15370d219191464c867e019e04f5521 | 2c1feebd06ee9856c3fd0517b504c8045deeb4dc | refs/heads/main | 2023-07-11T06:36:09.384644 | 2021-08-13T02:21:54 | 2021-08-13T02:21:54 | 395,461,474 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 736 | r | HW1-LawOfLargeNumbers.R | N <- 100 # input/specify sample size
x <- rnorm(N) # random normally distributed numbers
counter <- 0 # counter
for (i in x){ #iterate over vector of numbers
if(i < 1 & i > -1){ #condition to check where iterated variable falls
counter <- counter + 1 #increase counter if condition is met
}
}
answer <- counter/N # calculate hit-ratio
answer #print answer in console
Reflections:
- As sample size increases (100, 1000, 10000), the average of the results (counter/N) gets closer to the expected values.
- This proves the theory of LLN, as the number generated gets closer to 68.2% (normal distribution curve).
|
93dc4f9d6c3f7fc8737c7d9f528273b776d35f30 | 88780697a1ab8f19673a62163c660fa25d6a73e9 | /R/fplyr-package.R | 965ead1ce05c538f158a319e15d97e3dc72beec4 | [] | no_license | fmarotta/fplyr | 6b5ce95042ea07d13f70a850c4933d5748fecb7c | 422c714802a144db3a93521c80fa0c7dfd034d86 | refs/heads/master | 2023-09-05T20:36:17.560908 | 2023-08-24T12:35:43 | 2023-08-24T12:35:43 | 220,303,015 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,293 | r | fplyr-package.R | #' fplyr: Read, Process and Write
#'
#' This package provides a set of functions to quickly read files chunk by
#' chunk, apply a function to each chunk, and return the result. It is
#' especially useful when the files to be processed don't fit into the
#' RAM. Familiarity with the \code{data.table} package is essential in order
#' to use \code{fplyr}.
#'
#' @section Definitions:
#' \describe{
#' \item{Chunked file:}{A delimited file where many contiguous rows have
#' the same value on the first field. See the example below.}
#' \item{Block:}{Any portion of the chunked file such that the first field
#' does not change.}
#' \item{Chunk:}{Chunks are used internally; they consist of one or more block, but
#' regular users should not be concerned with them, and can consider chunks
#' and blocks as synonyms.}
#' }
#'
#' @section Main functions:
#' The main functions are \code{ffply} and \code{flply}. The former writes the processed
#' data into a file, while the latter returns it as a list. The former is also much faster.
#' There is also \code{fdply}, which returns a \code{data.table} and is useful to only read
#' a certain number of chunks from the file (one by default). \code{fmply} is useful
#' when the original file needs to be processed in many ways and each outcome must
#' be written to a different file.
#'
#' @section Note:
#' Throughout the documentation of this package, the word 'file' actually means
#' 'chunked file.'
#'
#' @section Examples:
#' A chunked file may look as follows:
#'
#' |**V1**|**V2**| **V3** |**V4**|
#' |------|------|--------|------|
#' | ID01 | ABC | Berlin | 0.1 |
#' | ID01 | DEF | London | 0.5 |
#' | ID01 | GHI | Rome | 0.3 |
#' | ID02 | ABC | Lisbon | 0.2 |
#' | ID02 | DEF | Berlin | 0.6 |
#' | ID02 | LMN | Prague | 0.8 |
#' | ID02 | OPQ | Dublin | 0.7 |
#' | ID03 | DEF | Lisbon | -0.1 |
#' | ID03 | LMN | Berlin | 0.01 |
#' | ID03 | XYZ | Prague | 0.2 |
#'
#' The important thing is that the first field has some contiguous lines that
#' take the same value. The values of the other fields are unimportant. This
#' package is useful to process this kind of files, block by block.
#'
#' @keywords internal
"_PACKAGE"
## usethis namespace: start
#' @import data.table
## usethis namespace: end
NULL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.