blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
085838df474ffeb7de221668ce269a5d71ed05a6 | 8cf8a38b34336822ac53776f8a95afb2eef74afe | /Plot1.R | b5bbdb388193a92b7e2ced0bea7a09a3896ad1ef | [] | no_license | DoanTrangNguyen/ExData_Plotting1 | 129a73123a3e789a981efb5e5a91cf34afd20fce | aa5fe2a586a5316dc07a96007b990d16e4eee7c0 | refs/heads/master | 2021-01-15T12:04:50.517188 | 2014-10-12T18:50:49 | 2014-10-12T18:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | Plot1.R | ## Plot 1 : Histogram of the Global_reactive_power
hist(as.numeric(as.character(Energy_dates$Global_active_power)), xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col= "Red")
dev.copy(png, file ="plot1.png", width = 480, height = 480)
dev.off()
|
289062e66d298e1268d4006d4c3311ec294aba7c | 7d8523b9d5474283200a9443dc9230c5beb73bf0 | /man/create_package.Rd | 42122fbfda82444c5b6ae63a24d290fb091d2075 | [
"MIT"
] | permissive | frictionlessdata/frictionless-r | 0bac64947354d65d55d8dd81a2f809c2b9e4a6b6 | 7dba18d98afe46480367ef2f878160efecb7e629 | refs/heads/main | 2023-09-05T04:36:33.046962 | 2023-06-20T14:10:45 | 2023-06-20T14:10:45 | 373,431,134 | 13 | 8 | NOASSERTION | 2023-09-14T07:07:02 | 2021-06-03T08:13:42 | R | UTF-8 | R | false | true | 857 | rd | create_package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_package.R
\name{create_package}
\alias{create_package}
\title{Create an empty Data Package}
\usage{
create_package()
}
\value{
List describing a Data Package.
}
\description{
Initiates a list describing a \href{https://specs.frictionlessdata.io/data-package/}{Data Package}.
This empty Data Package can be extended with metadata and resources (see
\code{\link[=add_resource]{add_resource()}}).
Added resources will make the Data Package meet \href{https://specs.frictionlessdata.io/tabular-data-package/}{Tabular Data Package}
requirements, so \code{profile} is set to \code{tabular-data-package}.
}
\examples{
# Create a Data Package
package <- create_package()
str(package)
}
\seealso{
Other create functions:
\code{\link{create_schema}()}
}
\concept{create functions}
|
7d987cbfe412c2751f115503913b52f8547fcbc1 | c401a32ff61704b1b04a28a1f3195144a1b5febd | /man/eq_create_label.Rd | b7f0bb3bd90f12cd8475ad3fe265e5a79f65ab98 | [] | no_license | MaxSunshine/Earthquake | dd43c38a881463743daa5a756791cc4bd06d4f51 | 757a828957d2c16ae856b02036b4dbd920ea1436 | refs/heads/master | 2021-07-11T21:55:28.092265 | 2017-10-17T11:53:42 | 2017-10-17T11:53:42 | 107,183,703 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,100 | rd | eq_create_label.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eq_map.R
\name{eq_create_label}
\alias{eq_create_label}
\title{Function eq_create_label}
\usage{
eq_create_label(df)
}
\arguments{
\item{df}{- A dataframe of containing 3 columns LABELS,EQ_PRIMARY and DEATHS.}
}
\description{
eq_create_label() takes the dataset as an argument and creates an HTML label
that can be used as the annotation text in the leaflet map. This function puts together a
character string for each earthquake that will show the cleaned location (as cleaned by the eq_location_clean()
function created in Module 1), the magnitude (EQ_PRIMARY), and the
total number of deaths (TOTAL_DEATHS), with boldface labels for
each ("Location", "Total deaths", and "Magnitude"). If an earthquake is missing values for any of these,
both the label and the value should be skipped for that element of the tag.
}
\examples{
library(magrittr)
labels<-readr::read_delim(system.file("extdata", "signif.txt", package="Earthquake"), delim = "\\t")
lables<-labels \%>\% eq_clean_data \%>\% eq_create_label
head(labels)
}
|
440eb5c66d50e4fb945fc4a8498668b32d15bfe0 | 23409c7f5b24e392437210bae4c1a82fc107168e | /lecture_13/fun_RCurl_gender.R | e6421d79ad9b00a24caea939bb22c7a6d9cc1992 | [
"Apache-2.0"
] | permissive | MagdaHa/MB2_R | bdbcdf68ecc505803b3d6201b2c2245ea3cfecb1 | 202f7273e8e6844151a49b0172ae11b809891d12 | refs/heads/master | 2020-04-02T11:05:49.858466 | 2019-02-16T15:48:59 | 2019-02-16T15:48:59 | 154,370,893 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,798 | r | fun_RCurl_gender.R | #############
####RCurl####
#############
library(RCurl)
library(bitops)
# get the data
x<- read.csv(textConnection(getURL("https://docs.google.com/spreadsheets/d/e/2PACX-1vTbXxJqjfY-voU-9UWgWsLW09z4dzWsv9c549qxvVYxYkwbZ9RhGE4wnEY89j4jzR_dZNeiWECW9LyW/pub?gid=0&single=true&output=csv")))
#inspect the data
x; summary(x)
#------------------------------------------------
library(reshape)
x2 <- melt(data=x)
library(ggplot2)
ggplot(x2, aes(x=variable, y=value))+geom_boxplot()
#-------------------------------------------------
#plot points plus boxplot and add jitter, adding variable"cumsum"
x.cs <- data.frame(variable=names(x), cs=t(cumsum(x)[nrow(x),]))
names(x.cs) <- c("variable", "cumsum")
x2 <- melt(data=x)
x3 <- merge(x.cs, x2, by.x="variable", all=T)
ggplot(x3, aes(x=variable, y=value, color=cumsum))+geom_point()
ggplot(x3, aes(x=variable, y=value, color=cumsum))+geom_boxplot(alpha=.5)+geom_point(alpha=.7, size=1.5, position=position_jitter(width=.25, height=.5))
#----------------------------------------------------
install.packages("gender")
library(gender)
library(genderdata)
x.g <- gender(names(x))
#-----------------------------------------------------
colnames(x.g)[1] <- "variable"
x4 <- merge(x3, x.g, by.x="variable", all=T)
a <- ggplot(x4, aes(x=variable, y=value, color=cumsum))+geom_boxplot()+facet_wrap(~gender)
a
#---------------------------------------------------------
#adjust the graph
a + coord_flip()
a+theme(axis.text.x=element_text(angle = 45, vjust=1, hjust=1))
#-------------------------------------------------------
#removing male names from female prolt and vice versa
a <- ggplot(x4, aes(x=variable, y=value, color=cumsum))+geom_boxplot()+facet_wrap(~gender, scales="free_x")
a+theme(axis.text.x=element_text(angle = 45, vjust=1, hjust=1))
|
5ebff73a39db54d811c83ecd5776e8f6c2dea78a | 2b1add69981a1cc7d4c32fe08b3b30a43a0dafb3 | /stillborns_asexuals_170103.R | d66e7dc946bb47fc865c69751ebee6a532cb931e | [] | no_license | Redsiana/Hexatopia | f4830ede8f66c6c6aa11e38af099551389bb1f4d | 3c42140901d0d2d3525825aa6012d40350365b69 | refs/heads/master | 2021-01-11T14:23:09.072745 | 2017-02-08T17:02:51 | 2017-02-08T17:02:51 | 81,350,086 | 0 | 0 | null | 2017-02-08T17:02:52 | 2017-02-08T16:26:21 | R | UTF-8 | R | false | false | 1,678 | r | stillborns_asexuals_170103.R | ### where viability selection occurs depending on the levels of homozygosity
# INPUT: babygenome, babysex, babyR, babyB, G
# OUTPUT: newbabygenome, newbabysex, newbabyR, newbabyB
# the relationship between heterozygosity and survival is:
# curve(expr = Ka /( 1 + 1*exp( -B*(x-M) ) ), from=0, to=1)
stillborn <- function( babygenome,
babysex,
babyR,
babyB,
B,
M,
Ka,
bsline,
pmut,
babyrepro,
G ){
# each juvenile's proportion of heterozygous loci
heteroz <- apply( babygenome == 1, 1 , sum ) / G
# each juvenile's survival probability to inbreeding depression
inbreeding <- Ka /( 1 + 1*exp( -B*(heteroz-M) )) + bsline
# each juvenile's actual survival
babysurvival <- as.logical( mapply( FUN = rbinom, prob = inbreeding, size = 1, n = 1 ) )
newbabygenome <- babygenome[ babysurvival, ]
newbabysex <- babysex[ babysurvival ]
newbabyR <- babyR[ babysurvival ]
newbabyB <- babyB[ babysurvival ]
newbabyrepro <- babyrepro[ babysurvival ]
nmutants <- rbinom(n = 1, size = sum( newbabysex == "fem" & newbabyrepro == "s" ), prob = pmut)
idmutants <- sample( 1: sum( newbabysex == "fem" & newbabyrepro == "s" ), nmutants)
newbabyrepro[ newbabysex == 'fem' & newbabyrepro == "s"][ idmutants ] <- "a"
return( list( newbabygenome = newbabygenome, newbabysex = newbabysex, newbabyR = newbabyR, newbabyB = newbabyB, newbabyrepro = newbabyrepro ))
}
|
1496f4db292ef002f519b926b03e3d81ad00849d | 09b01558d9439fea76b1b40f6931820484bf7731 | /practice_3pars.R | 956f7877bd6cf862a33b54beb9a476a1d896eabf | [] | no_license | dtsh2/LBV | 30c3d5c66537f31f6ba8341899e23dfa4bafb0c7 | 6a1e05211b90d2b32a279e0c5cc1efc9a153ac96 | refs/heads/master | 2021-01-01T19:38:18.497956 | 2017-11-13T03:52:39 | 2017-11-13T03:52:39 | 13,231,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 785 | r | practice_3pars.R | result1<-rep(1:3,3)
result2<-rep(1:3,3)
result3<-rep(1:3,3)
pars<-expand.grid(result1,result2,result3)
dim(pars)
res<-rnorm(length(pars[,1]))
results<-cbind(pars,res)
results$Var1[which(results[,4]==min(results[,4]))]
results$Var2[which(results[,4]==min(results[,4]))]
results$Var3[which(results[,4]==min(results[,4]))]
parsplot <- expand.grid(result1,result2)
parsplotres<-cbind(parsplot,result1)*NA
library(data.table)
d <- data.table(results)
rest<-d[, min(res, na.rm=TRUE), by=c("Var1","Var2")]
library(akima)
library(lattice)
library(tgp)
library(rgl)
library(fields)
rholab<-expression(symbol(rho))
betalab<-expression(symbol(beta))
zzg <- interp(rest$Var1,rest$Var2,rest$V1)
image(zzg,ann=T,ylab=rholab,xlab=betalab)
contour(zzg,add=T,labcex=1,drawlabels=T,nlevels=10)
|
a8efcca8920dc6f10055b785f9885374b2e8648c | 0d688e663e82ca38de40a8adcf601194d241555a | /R-packages/evalcast/man-roxygen/predictions_cards-template.R | 9a196f2d9a46241ac6550e03cbcde3e9a78c993a | [] | no_license | maskegger/covidcast | 386ddc3b430c7aa26e739188c7c37c919a31a17d | 51754be1e859f1f46ae664e4326dacaabd86868e | refs/heads/main | 2023-09-05T23:34:18.994038 | 2021-10-23T01:33:57 | 2021-10-23T01:33:57 | 440,409,824 | 1 | 0 | null | 2021-12-21T06:10:56 | 2021-12-21T06:10:56 | null | UTF-8 | R | false | false | 453 | r | predictions_cards-template.R | #' @return Long data frame of forecasts with a class of `predictions_cards`.
#' The first 4 columns are the same as those returned by the forecaster. The
#' remainder specify the prediction task, 10 columns in total:
#' `ahead`, `geo_value`, `quantile`, `value`, `forecaster`, `forecast_date`,
#' `data_source`, `signal`, `target_end_date`, and `incidence_period`. Here
#' `data_source` and `signal` correspond to the response variable only.
|
be0efeaaf2be54d0494c145be5cd1ddfa62b0f60 | 378d8fcfa2227494e8dad0d46e954d7de553c510 | /ipserver/ReportDaily.R | 03499ab5ed7d33cc32d2fe9b93bb5d461100882e | [] | no_license | lhfei/R-Programming | fbff7f1488a0beb696b5267c4293e81308c77fed | 63565ee9bbf029fcbac2f5425845e9d8438320a5 | refs/heads/master | 2021-08-15T00:41:51.474596 | 2017-11-17T02:06:27 | 2017-11-17T02:06:27 | 111,045,374 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,174 | r | ReportDaily.R | library(sqldf)
report_0621 <- read.delim("E:/R_Workspace/ipserver/data/2015-06-21/000000_0", header=FALSE)
report_0623 <- read.delim("E:/R_Workspace/ipserver/data/2015-06-23/000000_0", header=FALSE)
report_title <- c("Err", "Total", "CT", "ORIGIN")
names(report_0621) <- report_title
names(report_0623) <- report_title
report_0621_ct <- sqldf('select ct from report_0621 group by ct')
report_0621_daily <- sqldf('SELECT a.ERR, a.TOTAL, a.CT, a.ORIGIN FROM report_0621 a INNER JOIN report_0621_ct b ON a.CT = b.CT WHERE a.ERR IN ("303000", "208000")')
#report_daily_21 <- sqldf('select a.err, a.total, b.err, b.total, a.total/b.total, a.ct from report_0621_daily a, report_0621_daily b where a.ct = b.ct and a.err="30300" and b.err="208000"')
report_0623_ct <- sqldf('select ct from report_0623 group by ct')
report_0623_daily <- sqldf('SELECT a.ERR, a.TOTAL, a.CT, a.ORIGIN FROM report_0623 a INNER JOIN report_0623_ct b ON a.CT = b.CT WHERE a.ERR IN ("303000", "208000")')
write.csv(report_0621_daily, file="E:/R_Workspace/ipserver/data/report/report_0621_daily.csv")
write.csv(report_0623_daily, file="E:/R_Workspace/ipserver/data/report/report_0623_daily.csv") |
fa29fe9b6309777f7c120104832936bb1bc56ef5 | 9b4a5a10226c96670dcc9fcbb33cfeb06b561bf2 | /scripts/film_coordinates.R | 9c64166e80811467dba08508e9d9a0ef89e54a69 | [] | no_license | stephen-bird/ames_dem | f3f5ec95c9c1552d21c6c86ec95ed48a26931fad | faad7795a3431c3d312cf93febb9404299b93d3c | refs/heads/master | 2020-07-29T14:44:35.437343 | 2019-11-05T00:09:31 | 2019-11-05T00:09:31 | 209,848,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,124 | r | film_coordinates.R |
# This script calculates the film coordinates of the fiducial marks based on the calibration report.
# This is a Packrat project.
library(here)
library(dplyr)
source("scripts/pp_functions.R")
film_coord <-
data.frame(
fids = c("1", "2", "3", "4"),
x = c(-105.993, 106.001, -106.015, 106.015),
y = c(-106.006, 106.002, 105.982, -106.006)
)
fid_dist <-
data.frame(
fids = c("1_2", "1_3", "1_4", "2_3", "2_4", "3_4"),
# For example, 1_2 is distance between fiducial 1 and 2
dist_mm = c(299.814, 211.988, 212.015, 212.008, 212.007, 299.826)
)
# Enter the error in the film measurements as stated in the report. If none is
# given, estimate a reasonable value:
dist_err <- 0.003 # Stated error between two points
point_err <- sqrt((dist_err ^ 2) / 2) # Error of placing a single point
# Enter the angle between lines intersecting the fiducial marks
fid_angle <- 90 + 13 / 3600
# ------------------------------------------------------------------------------
# How many samples to generate?
n <- 10000
# Generate normal random samples of each x,y coordinate as given in the
# calibration report using the stated measurement error (e.g. sfilm_1x = sample
# film, fiducial 1, x):
set.seed(42)
sfilm_1x <- rnorm(n, mean = film_coord$x[1], sd = point_err)
sfilm_2x <- rnorm(n, mean = film_coord$x[2], sd = point_err)
sfilm_3x <- rnorm(n, mean = film_coord$x[3], sd = point_err)
sfilm_4x <- rnorm(n, mean = film_coord$x[4], sd = point_err)
sfilm_1y <- rnorm(n, mean = film_coord$y[1], sd = point_err)
sfilm_2y <- rnorm(n, mean = film_coord$y[2], sd = point_err)
sfilm_3y <- rnorm(n, mean = film_coord$y[3], sd = point_err)
sfilm_4y <- rnorm(n, mean = film_coord$y[4], sd = point_err)
# Gather into a df:
sfilm_coord <-
data.frame(sfilm_1x,
sfilm_2x,
sfilm_3x,
sfilm_4x,
sfilm_1y,
sfilm_2y,
sfilm_3y,
sfilm_4y)
# Make distance measurements among all fiducial points using the sample coordinates generated above:
sfilm_coord$sdist_12 <- hypo(sfilm_coord$sfilm_1x,sfilm_coord$sfilm_1y,sfilm_coord$sfilm_2x,sfilm_coord$sfilm_2y)
sfilm_coord$sdist_13 <- hypo(sfilm_coord$sfilm_1x,sfilm_coord$sfilm_1y,sfilm_coord$sfilm_3x,sfilm_coord$sfilm_3y)
sfilm_coord$sdist_14 <- hypo(sfilm_coord$sfilm_1x,sfilm_coord$sfilm_1y,sfilm_coord$sfilm_4x,sfilm_coord$sfilm_4y)
sfilm_coord$sdist_23 <- hypo(sfilm_coord$sfilm_2x,sfilm_coord$sfilm_2y,sfilm_coord$sfilm_3x,sfilm_coord$sfilm_3y)
sfilm_coord$sdist_24 <- hypo(sfilm_coord$sfilm_2x,sfilm_coord$sfilm_2y,sfilm_coord$sfilm_4x,sfilm_coord$sfilm_4y)
sfilm_coord$sdist_34 <- hypo(sfilm_coord$sfilm_3x,sfilm_coord$sfilm_3y,sfilm_coord$sfilm_4x,sfilm_coord$sfilm_4y)
# Generate normal random samples based on film measured distances and the calibrated measurement error:
set.seed(24)
sfilm_coord$fdist_12 <- rnorm(n, mean = fid_dist$dist_mm[1], sd = 0.003)
sfilm_coord$fdist_13 <- rnorm(n, mean = fid_dist$dist_mm[2], sd = 0.003)
sfilm_coord$fdist_14 <- rnorm(n, mean = fid_dist$dist_mm[3], sd = 0.003)
sfilm_coord$fdist_23 <- rnorm(n, mean = fid_dist$dist_mm[4], sd = 0.003)
sfilm_coord$fdist_24 <- rnorm(n, mean = fid_dist$dist_mm[5], sd = 0.003)
sfilm_coord$fdist_34 <- rnorm(n, mean = fid_dist$dist_mm[6], sd = 0.003)
# Calculate residuals^2 between distances measured on the film and distances sampled above:
sfilm_coord$res_12 <- (sfilm_coord$sdist_12 - sfilm_coord$fdist_12) ^ 2
sfilm_coord$res_13 <- (sfilm_coord$sdist_13 - sfilm_coord$fdist_13) ^ 2
sfilm_coord$res_14 <- (sfilm_coord$sdist_14 - sfilm_coord$fdist_14) ^ 2
sfilm_coord$res_23 <- (sfilm_coord$sdist_23 - sfilm_coord$fdist_23) ^ 2
sfilm_coord$res_24 <- (sfilm_coord$sdist_24 - sfilm_coord$fdist_24) ^ 2
sfilm_coord$res_34 <- (sfilm_coord$sdist_34 - sfilm_coord$fdist_34) ^ 2
# Remove any rows that exceedes the angle threhold between intersecting fiducial lines (remove if > 1 SD from calibration reported value):
sfilm_coord$angle <- 180 - intersect.angle(sfilm_coord)
angle_error <- sd(sfilm_coord$angle)
sfilm_coord <- filter(sfilm_coord,angle > fid_angle - angle_error & angle < fid_angle + angle_error)
# Minimize RSS and select the best row of sample data:
sfilm_coord$rss <- apply(select(sfilm_coord,res_12,res_13,res_14,res_23,res_24,res_34), 1, sum)
best_row <- which(sfilm_coord$rss == min(sfilm_coord$rss), arr.ind = TRUE)
# Subset the image_fids df by the best row:
best_data <- sfilm_coord[best_row,]
# Collect results:
derived_coord <- data.frame(x_revised = c(best_data$sfilm_1x,best_data$sfilm_2x,best_data$sfilm_3x,best_data$sfilm_4x),
y_revised = c(best_data$sfilm_1y,best_data$sfilm_2y,best_data$sfilm_3y,best_data$sfilm_4y))
film_coord <- cbind(film_coord,derived_coord)
# Derive residuals:
film_coord$xres <- film_coord$x - film_coord$x_revised
film_coord$yres <- film_coord$y - film_coord$y_revised
# Caluclate RMS of final solution:
film_rms <- sqrt(mean(c(film_coord$xres^2,film_coord$yres^2)))
film_rms
|
59f4465c2df9a0bb911cfd738de759393adabf35 | 32f2c862c462c6dcb41fa83e0eb7a9ab8a054e44 | /scripts/03_runBigRR.R | c5d55f31fea92253795ff703ec6457ba98ed302e | [] | no_license | JosueVega/Hwaviness | 5e36bfb26ee524d2eb4bc6bf9f9c95d0a3b07a58 | 5074a768e90e414791c6f2a1df7861aa9866cb2b | refs/heads/master | 2021-09-07T19:47:10.122962 | 2018-02-28T03:49:38 | 2018-02-28T03:49:38 | 112,789,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,150 | r | 03_runBigRR.R | #Josue Vega
#from script by Jason A Corwin, Modified by Rachel Fordyce
#to run bigRR on Linux GPU for GWAS
#---------------------------------------------------------------
rm(list=ls())
setwd("~/Documents/GitRepos/Hwaviness/data/")
#NVIDIA nvcc
#########################
# This makes the bigRR_update run through the GPU
# You need to do this first to mask the native 'bigRR_update' in the bigRR package
# one alternative to family = gaussian(link = identity) is family = poisson(link = log)
## RAN first time WITH POISSON. Lesion size expected to be Gaussian
bigRR_update <- function (obj, Z, family = gaussian(link = identity), tol.err = 1e-06,
tol.conv = 1e-08)
{
w <- as.numeric(obj$u^2/(1 - obj$leverage))
w[w < tol.err] <- tol.err
#if bigRR is having trouble with missing values (NAs) can add option impute=TRUE
#X is the genotype (MyX)
#y is the phenotype (dat)
bigRR(y = obj$y, X = obj$X, Z = Z, family = family, weight = w,
tol.err = tol.err, tol.conv = tol.conv, GPU = TRUE, impute = TRUE )
}
########################
#NOTE1 FROM RACHEL: we need bigRR1.3-9 to get GPU option
# must download R-Forge version bigRR1.3-9tar.gz and manually install
# https://r-forge.r-project.org/R/?group_id=1301
# install.packages("bigRR", repos="http://R-Forge.R-project.org")
#NOTE2 FROM RACHEL: need package 'gputools' but CRAN version fails to install
# must first install Nvidia's CUDA toolkit -current version is 7.5
# installed from developer.nvidia.com/cuda-downloads
library(bigRR) #check if version is 1.3-9
#Get genotype data
SNPs <- read.csv("03_bigRRinput/Domestication/hpbinSNP_bigRR_trueMAF20_50NA.csv", row.names = 1)
FullSNPs <- SNPs
SNPs <- FullSNPs
#add a column with position as chr.base
SNPs$Chr.Base <- do.call(paste, c(SNPs[c("X.CHROM","POS")], sep="."))
rownames(SNPs) <- SNPs[,96] #set the new column of chrom.base as rownames - this could maybe be written as: rownames(SNPs) <- SNPs$Chr.Base?
any(duplicated(SNPs$Chr.Base))#check that none are duplicated
SNPs <- SNPs[,4:95] #take out first three cols (X.CHROM, POS, REF) and new last col (Chr.Base). dim(SNPs) should now be [345485, 91], colnames(SNPs) are all Bc Isolates, rownames(SNPs) are all Chr.Base
ogSNPs <- SNPs
SNPs <- ogSNPs
#makes SNP states numeric (also transposes SNP matrix)
SNPs <- as.matrix(t(SNPs))
for(i in 1:dim(SNPs)[1]) {
SNPs[i,] <- as.numeric(SNPs[i,])
}
#read in phenotype data
Phenos <- read.csv("03_bigRRinput/Domestication/Sl_Pheno_bigRR_trueMAF20_50NA.csv", row.names = 1)
dat <- as.data.frame((Phenos[4])) #INSERT PHENOTYPE COLUMNS HERE
#e.g. LesionGreen as.data.frame(c(Phenos[,31:32],Phenos[,34:35]))
#should I remove reference (B05.10 I assume) phenotypes and genotypes from list?
#no: this is a T4 reference
# B05.10.Phenos <- dat[64,]
# dat <- dat[-64,]
outpt.HEM <- colnames(SNPs)
thresh.HEM <- list("pos0.95Thresh" = NA, "pos0.975Thresh" = NA, "pos0.99Thresh" = NA, "pos0.999Thresh" = NA, "neg0.95Thresh" = NA, "neg0.975Thresh" = NA, "neg0.99Thresh" = NA, "neg0.999Thresh" = NA)
con <- file("04_bigRRoutput/trueMAF20_20NA/test.log")
sink(con, append=TRUE)
sink(con, append=TRUE, type="message")
#Calculate HEMs for all phenotypes
for(i in 1:dim(dat)[2]) { #i will be each isolate
print(colnames(dat)[i])
MyX <- matrix(1, dim(dat)[1], 1) #good to here
#added try here
#testing with impute=T
Pheno.BLUP.result <- try(bigRR(y = dat[,i], X = MyX, Z = SNPs, GPU = TRUE, impute=TRUE))
# Pheno.BLUP.result <- try(bigRR(y = dat[,i], X = MyX, Z = SNPs, GPU = TRUE, impute=FALSE))
#can add try here as well
Pheno.HEM.result <- try(bigRR_update(Pheno.BLUP.result, SNPs))
outpt.HEM <- cbind(outpt.HEM, Pheno.HEM.result$u)
#Permute Thresholds for Phenos - this is what takes forever
perm.u.HEM <- vector()
for(p in 1:1000) {
if(p %% 10 == 0) {print(paste("Thresh sample:", p, "--", Sys.time()))}
try(temp.Pheno <- sample(dat[,i], length(dat[,i]), replace = FALSE))
try(temp.BLUP <- bigRR(y = temp.Pheno, X = MyX, Z = SNPs, GPU = TRUE, impute=TRUE),silent = TRUE)
try(temp.HEM <- bigRR_update(temp.BLUP, SNPs)) #REF change- was bigRR_update(Pheno.BLUP.result...
perm.u.HEM <- c(perm.u.HEM, temp.HEM$u)
}
#write.csv(perm.u.HEM, paste("PermEffects_",colnames(dat)[i],".csv",sep=""))
thresh.HEM$"pos0.95Thresh"[i] <- quantile(perm.u.HEM,0.95)
thresh.HEM$"pos0.975Thresh"[i] <- quantile(perm.u.HEM,0.975)
thresh.HEM$"pos0.99Thresh"[i] <- quantile(perm.u.HEM,0.99)
thresh.HEM$"pos0.999Thresh"[i] <- quantile(perm.u.HEM,0.999)
thresh.HEM$"neg0.95Thresh"[i] <- quantile(perm.u.HEM,0.05)
thresh.HEM$"neg0.975Thresh"[i] <- quantile(perm.u.HEM,0.025)
thresh.HEM$"neg0.99Thresh"[i] <- quantile(perm.u.HEM,0.01)
thresh.HEM$"neg0.999Thresh"[i] <- quantile(perm.u.HEM,0.001)
colnames(outpt.HEM)[i+1] <- paste(colnames(dat)[i],"HEM",sep=".")
}
# Restore output to console
sink()
sink(type="message")
#Give column names to the thresholds from the HEM list
for(j in 1:length(thresh.HEM)) {
names(thresh.HEM[[j]]) <- colnames(dat)
}
#RF-give row names to thresh.HEM and thresh.BLUP so that threshhold values will line up correctly with phenotypes, and you can see which threshold value is displayed
thresh.HEM$"pos0.95Thresh" <- c("pos 0.95 Thresh", thresh.HEM$"pos0.95Thresh")
thresh.HEM$"pos0.975Thresh" <- c("pos 0.975 Thresh", thresh.HEM$"pos0.975Thresh")
thresh.HEM$"pos0.99Thresh" <- c("pos 0.99 Thresh", thresh.HEM$"pos0.99Thresh")
thresh.HEM$"pos0.999Thresh" <- c("pos 0.999 Thresh", thresh.HEM$"pos0.999Thresh")
thresh.HEM$"neg0.95Thresh" <- c("neg 0.95 Thresh", thresh.HEM$"neg0.95Thresh")
thresh.HEM$"neg0.975Thresh" <- c("neg 0.975 Thresh", thresh.HEM$"neg0.975Thresh")
thresh.HEM$"neg0.99Thresh" <- c("neg 0.99 Thresh", thresh.HEM$"neg0.99Thresh")
thresh.HEM$"neg0.999Thresh" <- c("neg 0.999 Thresh", thresh.HEM$"neg0.999Thresh")
#Write results to output
write.csv(rbind(thresh.HEM$"pos0.95Thresh",thresh.HEM$"pos0.975Thresh",thresh.HEM$"pos0.99Thresh",thresh.HEM$"pos0.999Thresh",thresh.HEM$"neg0.95Thresh",thresh.HEM$"neg0.975Thresh",thresh.HEM$"neg0.99Thresh",thresh.HEM$"neg0.999Thresh",outpt.HEM),"04_bigRRoutput/trueMAF20_20NA/HWavi_trueMAF20_20NA.HEM.csv")
|
41434e6eddff83b72eba28a82fb56ad1c048f548 | d243b1765958531a7a5c7893f218f86c75d3cee0 | /код для графиков.r | f8fc9b42992c90075d505e5b91bab38d78dc18b3 | [] | no_license | EvgeniyaY/Project | 4bc134f45e4c5eada61558c90842bad4417b773a | bc70376ff38a554494700afbdfa2e89673080ca9 | refs/heads/main | 2023-01-30T10:09:59.003387 | 2020-12-11T07:26:14 | 2020-12-11T07:26:14 | 318,398,899 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,142 | r | код для графиков.r | library("corrplot", warn.conflicts = FALSE )
library("car", warn.conflicts = FALSE )
library("ggplot2", warn.conflicts = FALSE )
library("stargazer", warn.conflicts = FALSE )
library("magrittr", warn.conflicts = FALSE )
library("knitr", warn.conflicts = FALSE )
library("dplyr", warn.conflicts = FALSE )
library("tidyr", warn.conflicts = FALSE )
library("lmtest", warn.conflicts = FALSE )
library("olsrr", warn.conflicts = FALSE)
library("sandwich", warn.conflicts = FALSE )
library("Matching", warn.conflicts = FALSE )
library("tableone", warn.conflicts = FALSE )
library("kableExtra", warn.conflicts = FALSE )
library("xtable", warn.conflicts = FALSE )
library("magick", warn.conflicts = FALSE )
library("glmnet", warn.conflicts = FALSE )
library("grf", warn.conflicts = FALSE )
library("randomForest", warn.conflicts = FALSE )
library("pwt9", warn.conflicts = FALSE )
library("readxl", warn.conflicts = FALSE )
library("foreign", warn.conflicts = FALSE )
library("Synth", warn.conflicts = FALSE )
library("gridExtra", warn.conflicts = FALSE )
Data = read.csv("C://Users/Kate/Downloads/CIAN.csv", encoding = "UTF-8")[,-c(1,8,15)]
levels(Data$Комнат)=c("1-комн.","2-комн.","3-комн.","4-комн.","5-комн.","Апарт.своб.планировки", "Кв.своб.планировки", "Многокомнатная", "Студия")
levels(Data$Район)[5] = "р-н Ново-Савиновский"
data_frame("Переменная" = names(Data),
"Класс" = sapply(Data, class),
"Пример значений" = sapply(Data, function(x) paste0(x[20:30], collapse = "; ")),
row.names = NULL) %>%
kable(format = "latex", longtable = T) %>%
column_spec(2, width = "4em") %>%
column_spec(3, width = "25em")
CORR = cor(Data[,c(1:4,10)],use = "na.or.complete")
corrplot(CORR, type = "lower", tl.col = "black", tl.srt = 37, cl.cex = 0.55, tl.cex = 0.8, diag = F, order="FPC")
corrplot(CORR, type = "lower", tl.col = "black", tl.srt = 37, cl.cex = 0.55, tl.cex = 0.8, diag = F, order="FPC", method ="number")
ggplot(Data, aes(x = Цена, fill = Комнат)) +
geom_density(alpha=.6) +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен по количеству комнат", fill="Комнатность" )
ggplot(filter(Data, Цена > 10000000), aes(x = Цена)) +
geom_density() +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен" )
ggplot(Data, aes(x = Цена, fill = Тип.жилья)) +
geom_density(alpha=.6) +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен по типу жилья", fill="Тип жилья" )
ggplot(Data, aes(x = Цена, fill = Район)) +
geom_density(alpha=.6) +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен по районам", fill="Район" )
ggplot(Data, aes(x = Цена, fill = Отделка)) +
geom_density(alpha=.6) +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен по отделке", fill="Отделка" )
ggplot(Data, aes(x = Цена, fill = Ремонт)) +
geom_density(alpha=.6) +
theme(text = element_text(size=30)) +
labs(title="Плотность распределения цен по ремонту", fill="Ремонт" )
ggplot(Data, aes(x = Общая, y = Цена, color = factor(Комнат))) +
geom_point() +
theme(text = element_text(size=20)) +
labs(title="График разброса цен от общей площади", color="Комнатность", x="Общая площадь, м2") +
geom_smooth(method = lm)
ggplot(Data, aes(x = Общая, y = Цена, color = Район)) +
geom_point() +
theme(text = element_text(size=20)) +
labs(title="График разброса цен от общей площади", color="Район", x="Общая площадь, м2") +
geom_smooth(method = lm)
ggplot(Data, aes(x=Район, y=Цена, color = Район)) +
geom_boxplot() +
theme(text = element_text(size=20),
axis.text.x = element_text(angle=17,hjust = 0.85),
legend.position = "none")
ggplot(Data, aes(x=Район, y=Общая, color = Район)) +
geom_boxplot() +
theme(text = element_text(size=20),
axis.text.x = element_text(angle=17,hjust = 0.85),
legend.position = "none") +
labs(y = "Общая площадь, м2")
ggplot(Data, aes(x=as.factor(Ртаж), y=Цена, color = as.factor(Ртаж))) +
geom_boxplot() +
theme(text = element_text(size=20),
axis.text.x = element_text(hjust = 0.85),
legend.position = "none") +
labs(x = "Ртаж")
ggplot(Data, aes(x=Тип.жилья, y=Цена, color = Ремонт)) +
geom_boxplot() +
theme(text = element_text(size=20),
axis.text.x = element_text(hjust = 0.85,angle=17)) +
labs(x = "Тип жилья")
ggplot(Data, aes(x=Комнат, y=Цена, color = Комнат)) +
geom_boxplot() +
theme(text = element_text(size=20),
axis.text.x = element_text(hjust = 0.85,angle=17),
legend.position = "none") +
labs(x = "Количество комнат")
reg1 = lm(Data, formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж) + Комнат + Район + Построен + Ремонт + Санузел + Балкон.лоджия + РўРёРї.жилья)
reg1 = lm(Data, formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж))
reg2 = lm(Data, formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж) + Комнат + Район + Построен)
reg3 = lm(Data, formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж) + Ремонт + Санузел + Балкон.лоджия)
stargazer(reg1,reg2,reg3, font.size="footnotesize", header=FALSE, no.space=TRUE, single.row=TRUE, column.labels = c("Модель 1", "Модель 2", " Модель 3"), column.sep.width = "-5pt", table.placement = "H")
ols_plot_resid_lev(reg1)
ols_plot_resid_stud_fit(reg1)
p = ols_plot_resid_lev(reg1)
c = pull(p$leverage[,1])
reg1 = lm(Data[-c, ], formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж))
ols_plot_resid_lev(reg1)
ols_plot_resid_stud_fit(reg1)
high.value <- which(Data$Цена > 10000000)
DataNEW <- Data[-high.value, ]
reg1 = lm(Data, formula = log(Цена) ~ log(Общая) + log(Жилая) + log(РљСѓС…РЅСЏ) + log(Ртаж))
ols_plot_resid_lev(reg1)
ols_plot_resid_stud_fit(reg1)
qnt <- quantile(x$Цена, probs=c(.25, .75), na.rm = T)
H <- 1.5 * IQR(x$Цена, na.rm = T)
y[x$Цена > (qnt[2] + H),] <- NA
y
``` |
5d8083943346f293b11469583b38409f93b41eb3 | d1304a227d448491211daea45994152d8ebf1fbc | /05_DataTranformation.R | 45bdc8abc2966221fba235835d3910971a1ad7f9 | [] | no_license | jonleslie/R_for_Data_Science | 0a60cf015a9ac2b913fe0b437ec69e07b7c6b934 | ce0f3dcb8481cec017895bf3e624508f9a84772e | refs/heads/master | 2021-09-04T19:19:52.360656 | 2018-01-21T17:04:58 | 2018-01-21T17:04:58 | 82,564,396 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,971 | r | 05_DataTranformation.R | library(tidyverse)
library(nycflights13)
?flights
head(flights)
table(flights$carrier)
# 5.2 Filter
filter(flights, month == 1, day == 1)
df <- tibble(x = c(1, NA, 3))
df
# 5.2.4 Exercises
# 1
filter(flights, arr_delay >= 120)
filter(flights, dest == 'IAH' | dest == 'HOU')
filter(flights, carrier %in% c("UA", "AA", "DL"))
filter(flights, month %in% c(7,8,9))
filter(flights, arr_delay > 120 & dep_delay == 0)
filter(flights, dep_delay >= 60 & (dep_delay - arr_delay <=30))
filter(flights, dep_time <= 600)
# 2
?between
filter(flights, between(month, 7, 9))
table(is.na(flights$dep_time))
# 3
is.na(flights)
filter(flights, is.na(dep_time))
# 4
NA * 0
# 5.3.1 Exercises
# 1
arrange(flights, desc(is.na(dep_time)))
arrange(flights, desc(dep_time))
# 2
arrange(flights, desc(dep_delay))
arrange(flights, arr_time)
# 3
arrange(flights, air_time)
# 4
arrange(flights, distance)
arrange(flights, desc(distance))
# 5.4.1 Exercises
# 1
select(flights, c(dep_time, dep_delay, arr_time, arr_delay))
# 2
select(flights, arr_time, arr_time)
# 3
?one_of
# 5.5 Mutate
View(flights)
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time)
flights_sml
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60)
x <- 1:10
x
lag(x)
x != lag(x)
x - lag(x)
x
cumsum(x)
cummean(x)
y <- c(1, 2, 2, NA, 3, 4)
min_rank(y)
min_rank(c(1, 2, 2, 4, 5))
min_rank(c(1, 2, 2, 6, 5))
# 5.5.2 Exercises
flights_ex1 <- select(flights, ends_with('time'))
mutate(flights_ex1,
dep_hour = dep_time %/% 100,
dep_min = dep_time %% 100)
mutate(flights_ex1,
delta_time = arr_time - dep_time)
# 3
flights
# 4 Find the 10 most delayed flights using a ranking function.
# How do you want to handle ties? Carefully read the documentation for min_rank().
arrange(flights, desc(min_rank(dep_delay)))
# 5
1:3 + 1:10
1:3 + 1:9
# 5.6 Summarise
summarise(flights, delay = mean(dep_delay, na.rm = T))
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
# It looks like delays increase with distance up to ~750 miles
# and then decrease. Maybe as flights get longer there's more
# ability to make up delays in the air?
ggplot(data = delay, mapping = aes(x = dist, y = delay)) +
geom_point(aes(size = count), alpha = 1/3) +
geom_smooth(se = FALSE)
not_cancelled <- flights %>%
filter(!is.na(dep_delay), !is.na(arr_delay))
# 5.6.3 Counts
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay)
)
ggplot(data = delays, mapping = aes(x = delay)) +
geom_freqpoly(binwidth = 10)
delays <- not_cancelled %>%
group_by(tailnum) %>%
summarise(
delay = mean(arr_delay, na.rm = TRUE),
n = n()
)
ggplot(data = delays, mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
delays %>%
filter(n > 25) %>%
ggplot(mapping = aes(x = n, y = delay)) +
geom_point(alpha = 1/10)
not_cancelled %>%
count(tailnum)
not_cancelled %>%
count(tailnum, wt = distance)
# 5.6.7 Exercises
# 2 Come up with another approach that will give you the same
# output as not_cancelled %>% count(dest) and not_cancelled %>% count(tailnum,
# wt = distance) (without using count()).
not_cancelled %>%
count(dest)
not_cancelled %>%
group_by(dest) %>%
summarise(n = n())
not_cancelled %>%
count(tailnum, wt = distance)
not_cancelled %>%
group_by(tailnum) %>%
summarise(n = sum(distance))
# 4
flights %>%
group_by(year, month, day) %>%
filter(is.na(dep_time)) %>%
summarise(n = n())
flights %>%
group_by(year, month, day) %>%
filter(is.na(dep_time)) %>%
count(dep_time)
flights %>%
group_by(year, month, day) %>%
summarise(cancelled = sum(is.na(dep_time)),
n = n(),
mean_dep_delay = mean(dep_delay, na.rm = T),
prop_cancelled = cancelled/n) %>%
ggplot(mapping = aes(x = prop_cancelled, y = mean_dep_delay)) +
geom_point(alpha = 0.3)
# Which carrier has the worst delays
flights %>%
group_by(carrier) %>%
summarise(av_delays = mean(dep_delay, na.rm = T)) %>%
arrange(desc(av_delays))
flights %>%
group_by(carrier, dest) %>%
summarise(n())
# 6 For each plane, count the number of flights before the first delay of greater than 1 hour.
flights %>%
select(tailnum, dep_delay) %>%
filter(dep_delay > 0 & dep_delay <= 60) %>%
group_by(tailnum) %>%
count(sort = T)
# 5.7.1 Exercises
# 1
# 2
flights %>%
group_by(tailnum) %>%
mutate(prop_late = mean(dep_delay > 0, na.rm = T)) %>%
select(year:day, tailnum, prop_late) %>%
arrange(desc(prop_late))
# 3
flights %>%
group_by(dest) %>%
summarise(sum_delays = sum(dep_delay, na.rm = T)) %>%
arrange(desc(sum_delays))
|
6f4b6f96793952ced087a81e1e2ca335e85b6444 | 2f32bfb9cffd8f5847995bf6800422db99984ab2 | /man/heteMLE.Rd | 9d403d397829279f6f8a2fc155553a0e17ffc751 | [] | no_license | adenosinew/AMS597 | 750b8a09cb2134f0aee17494661e15734deeacc9 | 398f024eafb5b1aa27977bb1cb6eee6e1265753b | refs/heads/master | 2021-06-16T18:19:05.898122 | 2017-05-02T04:30:08 | 2017-05-02T04:30:08 | 84,480,955 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 853 | rd | heteMLE.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hete MLE.R
\name{heteMLE}
\alias{heteMLE}
\title{MLE based test of Lin and Stivers under heteroscedasticity}
\usage{
heteMLE(x, y, alternative = "two.sided")
}
\arguments{
\item{x}{a (non-empty) numeric vector of data values. For tumor data.}
\item{y}{a(non-empty) numeric vector of data values. For normal data}
\item{alternative}{a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or"less". You can specify just the initial letter.}
}
\value{
A list containing the following components
\code{statistic} the value of the corrected Z-test statistic.
\code{p.value} the p-value for the test.
}
\description{
MLE based test of Lin and Stivers under heteroscedasticity
}
\examples{
heteMLE(x,y,alternative="greater")
}
|
e2b9979dc0efd6f043adbc9f89e8a0ec0ea86a94 | e8d9d38e9f3a7b2d0a843d1a707479b4eeddf9e4 | /tests/testthat.R | 3bbd540e3dde24c0b5b38cd2064c50034eabfda5 | [
"MIT"
] | permissive | charliejhadley/gene.alignment.tables | db64294ee6ff80a6dc1d7c2ea43633c52a47db13 | 1542dc88f27ecb4acf4826267842fb21c2738275 | refs/heads/master | 2022-06-24T14:26:38.390903 | 2022-06-09T11:40:00 | 2022-06-09T11:40:00 | 110,682,612 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 86 | r | testthat.R | library(testthat)
library(gene.alignment.tables)
test_check("gene.alignment.tables")
|
e9d27cdedaf1f0874c76386ef80821cc51018f0c | d473a271deb529ed2199d2b7f1c4c07b8625a4aa | /TimeSeries/GARCH_Examples.R | 5ad208b1456297326f1ae8b2336699b48fdb814d | [] | no_license | yangboyubyron/DS_Recipes | e674820b9af45bc71852ac0acdeb5199b76c8533 | 5436e42597b26adc2ae2381e2180c9488627f94d | refs/heads/master | 2023-03-06T05:20:26.676369 | 2021-02-19T18:56:52 | 2021-02-19T18:56:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,465 | r | GARCH_Examples.R |
# http://shishirshakya.blogspot.com/2015/07/garch-model-estimation-backtesting-risk.html
# https://www.r-bloggers.com/a-practical-introduction-to-garch-modeling/
#----------------------------------------------------------
# BASIC EXAMPLE 1
gspec.ru <- ugarchspec(mean.model=list(
armaOrder=c(0,0)), distribution="std")
gfit.ru <- ugarchfit(gspec.ru, sp5.ret[,1])
coef(gfit.ru)
plot(sqrt(252) * gfit.ru$fit$sigma, type='l') # plot in-sample volatility estimates
#----------------------------------------------------------
# BASIC EXAMPLE 2
# requires Rmetrics suite
gfit.fg <- garchFit(data=sp5.ret[,1], cond.dist="std")
coef(gfit.fg)
plot(sqrt(252) * gfit.fg$sigma.t, type="l") # plot in-sample volatility estimates
#----------------------------------------------------------
# BASIC EXAMPLE 3
gfit.ts <- garch(sp5.ret[,1]) # It is restricted to the normal distribution
coef(gfit.ts)
plot(sqrt(252) * gfit.ts$fitted.values[, 1], type="l") # plot in-sample volatility estimates
#----------------------------------------------------------
# BASIC EXAMPLE 4
# This package fits an EGARCH model with t distributed errors
gest.te <- tegarch.est(sp5.ret[,1])
gest.te$par
gfit.te <- tegarch.fit(sp5.ret[,1], gest.te$par)
# The plotting function is pp.timeplot is an indication that
# the names of the input returns are available on the output —
# unlike the output in the other packages up to here.
pp.timeplot(sqrt(252) * gfit.te[, "sigma"])
|
47d9d82ca1ac2585b69aa2ef835b2c83633b0243 | 29585dff702209dd446c0ab52ceea046c58e384e | /PopGenKit/R/PICalc.R | 1d42f1ff3f45a55415161e89f24f5eadc84151b3 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,525 | r | PICalc.R | PICalc <-
function(datafile,ndigit=3)
{
input=read.table(file=datafile, sep='\t', colClasses='character')
noext= gsub("[.].*$","",datafile)
nloci=ncol(input)-2
#transformer pour trouver npops et popsizes
whereNbSamples=grep('NbSamples',input[,1])
npops=gsub( "[^0123456789]", "", input[whereNbSamples,1])
npops=as.numeric(npops)
whereSampleSize =grep('SampleSize',input[,1])
popsizes=as.numeric(c(gsub('[^0123456789]', "", input[whereSampleSize,1])))
#reconnaître les noms des pops
whereSampleName =grep('SampleName',input[,1])
popnames=(c(gsub('SampleName=', "", input[whereSampleName,1])))
#créer une matrice nind x nloci seulement
matinput=input[,3:ncol(input)]
xsums=rep(NA,times=nrow(matinput))
for (i in 1:nrow(matinput)){ xsums[i]=sum(nchar(matinput[i,])) }
emptyrows=which(xsums==0)
matinput=matinput[-emptyrows,]
#déterminer le nombre dallèles/locus pour formatter output
kvec=vector(mode='numeric',nloci)
for (i in 1:nloci)
{
alleles=matinput[,i]
vec=unique(alleles)
vec=paste(vec,collapse='')
vec=gsub( "[^[:alnum:]]", "", vec)
k=nchar(vec)/ndigit
kvec[i]=k
}
MAX=max(kvec)
#créer le tableau de résultats
results=matrix(NA,2*nloci,MAX)
missing=rep(NA, times=nloci)
nbk=rep(NA, times=nloci)
n.alleles=rep(NA, times=nloci)
PIC=rep(NA,times=nloci)
for (j in 1:nloci)
{
alleles=matinput[,j]
totaln=length(alleles)
vec=unique(alleles)
vec=paste(vec,collapse='')
vec=gsub( "[^[:alnum:]]", "", vec)
k=nchar(vec)/ndigit
sampsize=paste(alleles,collapse='')
sampsize=gsub( "[^[:alnum:]]", "", sampsize)
sampsize=(nchar(sampsize)/ndigit)
missingABS=length(grep('[?]',alleles))
missing[j]=round((100*(missingABS/totaln)),2)
nbk[j]=k
n.alleles[j]=sampsize/2
PICterm1=0
PICterm2=0
for (m in 1:k)
{
alleleID=substr(vec,(m*ndigit)-(ndigit-1),m*ndigit)
results[(2*j)-1,m]=alleleID
count=0
for (z in 1:length(alleles))
{
if (alleles[z]==alleleID) count=count+1
}
results[2*j,m]=round(count/sampsize,3)
PICterm1=(as.numeric(results[2*j,m])^2)+PICterm1
}
for (m in 1:(k-1))
{
for (n in (m+1):k)
{
PICterm2=(as.numeric(results[2*j,m])^2)*(as.numeric(results[2*j,n])^2)+PICterm2
}
}
PIC[j]=1-PICterm1-(2*PICterm2)
}
#trier les allèles en ordre croissant dans le output
for (j in 1:nloci)
{
ordre=order(results[(2*j)-1,])
results[(2*j)-1,]=results[(2*j)-1,ordre]
results[(2*j),]=results[(2*j),ordre]
}
#ajouter une colonne au début avec le no de locus et le % de données manquantes
loc.col=NULL
missing.col=NULL
k.col=NULL
n.alleles.col=NULL
for (i in 1:nloci) {
loc.col=c(loc.col,i,NA)
missing.col=c(missing.col,missing[i],NA)
k.col=c(k.col,nbk[i],NA)
n.alleles.col=c(n.alleles.col,n.alleles[i],NA)
}
table.results=cbind(loc.col,n.alleles.col,missing.col, k.col, results)
#mettre les cellules NA vides pour lesthétisme !
for (r in 1:nrow(table.results))
{
for (c in 1:ncol(table.results))
{
if (is.na(table.results[r,c])==T) table.results[r,c]=''
}
}
col.name=rep('',times=ncol(table.results))
col.name[1]= 'Locus#'
col.name[2]= 'n'
col.name[3]= 'Miss.%'
col.name[4]= 'k'
col.name[5]= 'Allele frequencies'
colnames(table.results)=col.name
filename=paste(noext,'_Overall_freq.txt',sep='')
write.table(table.results, file=filename, quote=F, row.names=F, col.names=T, sep='\t')
locrow=c(1:nloci)
PICtable=cbind(locrow,PIC)
return('PIC'=PICtable)
}
|
bcc4f1213ad79d8b087e6ef5d8e1e54a106f4097 | 8ff0c0346e79db661e29e763358c5ed799e5aad3 | /Data Mining Lab/FinalExam (deprecated, for now)/PartA/10/10.R | 792fd9296ee3c80872b4688de7481fd6ffe8165b | [] | no_license | vybhavjain/7thSemIse | c44e9ec92c24ce82c1338405f45d47510ea63c0f | 92d9bf39735fa01d10a994d69313f7d24a0c9874 | refs/heads/master | 2021-07-08T19:26:44.638136 | 2021-03-23T15:31:00 | 2021-03-23T15:31:00 | 229,064,968 | 1 | 1 | null | 2021-03-23T15:31:02 | 2019-12-19T13:54:38 | Java | UTF-8 | R | false | false | 405 | r | 10.R | #loading the data
data1 <- read.csv('climate.csv')
temp<- data1$avg_temp
temp_uncert <- data1$avg_temp_uncertain
#Y vs X
features <- temp_uncert~ temp # Y~X
#create the linear model
linearModel <- lm(features,data1)
#plot the linear model
plot(features)
abline(linearModel)
#predict value for some given data
test <- data.frame(temp=c(24))
predValue <- predict(linearModel,test)
print(predValue)
|
588d70c514886d928b75edf19939d4cbedb23689 | bfa86ffcf1438f737895a716254df49a503589e2 | /INLA.Lab/Laboratory/BUC4.Laboratory.R | 9c2d8a4256b0df4452b1a0ca2ec4192be5e5ee3d | [
"MIT"
] | permissive | jrvc/food-price-warning-indicator | a48dfa7bfe96ba6e0d58162c1ec5d8c152aa3efe | 62bfced2ce9f85ff8a655ba034fe0e69fa8850fc | refs/heads/master | 2021-05-16T16:27:25.249393 | 2018-01-19T18:05:55 | 2018-01-19T18:05:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,545 | r | BUC4.Laboratory.R | #
#
# Date: 4 June 2016
#
# R version: 3.2.2
#
rm(list=ls())
# install.packages("INLA", repos="https://www.math.ntnu.no/inla/R/stable")
# install.packages('sp')
# install.packages('spatstat')
# install.packages('mvtnorm')
# install.packages('lattice')
# install.packages('mgcv')
# install.packages('pixmap')
# install.packages('numDeriv')
# install.packages('fields')
wpath <- 'C:/JCMO.Trabajo/Seminars,Visits&Talks/16-06.BUC4/Laboratory/'
library('sp')
library('INLA')
library('spatstat')
library('mvtnorm')
library('lattice')
library('mgcv')
library('pixmap')
library('numDeriv')
library('fields')
source(paste(wpath,'Data_1/functions.r',sep=''))
# 2. A simple point process
# Reading in and gridding the data
# We read in the data as:
paul <- read.delim(paste(wpath,'Data_1/paul.txt',sep=''))
# type 5 is Andersonia heterophylla
data <- paul[paul$type=="5",]
x <- data$x/10
y <- data$y/10
# We transform the data into a point pattern object (using several commands from the library
# spatstat, for details check the library help files). Ignore the warning about duplicated
# points.
x.area <- 22
x.win <- owin(c(0, x.area),c(0, x.area))
data.pp=ppp(x,y,window=x.win)
plot(data.pp, main= " Andersonia heterophylla")
# We now need to transform the data, i.e. construct a grid with 30 x 30 cells
nrow <- 30
ncol <- nrow
x.grid <- quadrats(x.win,ncol,nrow)
# and count the number of points in each grid cell; note that this will be our response variable.
count.grid <- quadratcount(data.pp, tess=x.grid)
plot(count.grid)
# (b) Running a first model
# We have to transform the grid of counts into a vector (and we now use the notation from the
# slides for the response variable):
Y <- as.vector(count.grid)
# The number of grid cells
n <- ncol*nrow
# And calculate the area of the grid cells:
cell.area <- x.area^2/n
E <- rep(cell.area, n)
# INLA requires separate indices for each of the spatial vector and the error term.
I <- 1:n
J <- 1:n
# We have to specify a prior for the spatial vector
prior.spat <- c(1,0.00005) #Default!
hyper.spat <- list(prec=list(param=prior.spat))
# We can no specify the model formula
formula <- Y ~ 1+
f(I, model="rw2d", nrow=nrow, ncol=ncol, hyper=hyper.spat)+
f(J, model="iid")
# and run the model (this should take only a few seconds at most)
result <- inla( formula,data=data.frame(Y,I,J),
family="poisson",E=E, verbose=TRUE,
control.compute=list(dic=TRUE))
# We can look at a summary and a plot of the results
summary(result)
plot(result)
#
# -- END: BUC4.Laboratory.R -- |
115c4b1805c49c2a88a2d895d8b63d2b9aeddd2a | 5a76f3b4c6f46ecf335aa3685b2367852989ae3a | /cemitooldb/R/annot_functions.R | 74a49321457bbef353fd4c8be25fdede4c55ead5 | [] | no_license | pedrostrusso/cemitooldb | f1d0cd270c0356b65886f1d329d06f5ae7917c91 | 87328ce618e8aba98f8dd9ae2b8023c510d563da | refs/heads/master | 2021-01-25T12:36:56.617930 | 2018-08-30T16:23:07 | 2018-08-30T16:23:07 | 123,485,870 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,641 | r | annot_functions.R |
make_gds_annot <- function(gds){
eset <- GEOquery::GDS2eSet(gds)
annot <- pData(phenoData(eset))
annot$sample <- NULL
annot <- select_class(annot)
annot$SampleName <- rownames(annot)
return(annot)
}
select_class <- function(annot){
priority_list <- c("disease.state", "protocol", "agent", "genotype/variation")
if(any(priority_list %in% names(annot))){
for(col_name in priority_list){
if(col_name %in% names(annot)){
annot <- dplyr::rename(annot, "Class" = col_name)
break
}
}
}else{
if(ncol(annot) > 2){
# se numero de colunas for maior que dois, pegar a primeira coluna desde que nao seja "individual"
if(names(annot[, 1, drop=FALSE]) == "individual"){
annot <- dplyr::rename(annot, "Class" = names(annot[, 2, drop=FALSE]))
}else{
annot <- dplyr::rename(annot, "Class" = names(annot[, 1, drop=FALSE]))
}
}else{
annot <- dplyr::rename(annot, "Class" = names(annot[, 1, drop=FALSE]))
}
}
annot$Class <- gsub("'", "", annot$Class)
return(annot)
}
make_gse_annot <- function(gse){
annot <- data.frame("Sample_geo_accession"=gse[[1]]$geo_accession,
"Sample_title"=gse[[1]]$title,
"Sample_source_name_ch1"=gse[[1]]$source_name_ch1)
tmp <- pData(phenoData(gse[[1]]))
tmp <- tmp[, grepl("characteristics_ch1*", names(tmp))]
annot$Sample_characteristics_ch1 <- Reduce(function(x, y) paste(x, y, sep="; "), tmp)
return(annot)
}
|
3a69e5f4409f2a4bdf9ff87af69d7390bb58c0a2 | 7773a08777ff4774311644e991528e8c7e7666d0 | /plot2.R | f7397444f292e1fe3dd92371ceed8897b0b80aeb | [] | no_license | ArBaDaCarBa/ExData_Plotting1 | f7f9b6c7f2b4fe460fd1727501a96657e075bafd | ea4d8324d3e801079b5115a48027fc3089ed2240 | refs/heads/master | 2021-01-15T16:04:51.677721 | 2015-03-06T22:48:26 | 2015-03-06T22:48:26 | 31,544,870 | 0 | 0 | null | 2015-03-02T14:44:51 | 2015-03-02T14:44:49 | null | UTF-8 | R | false | false | 957 | r | plot2.R | # Making sure the labels are in english and not localized
Sys.setlocale("LC_TIME", "C")
# Loads the project data, assuming it's in data/household_power_consumption.txt
library(sqldf) # To avoid loading the whole file in memory
# Loading data of interest from file
hpc.data <<- read.csv.sql(
file = 'data/household_power_consumption.txt',
sql="SELECT Date, Time, Global_active_power
FROM file
WHERE Date in ('1/2/2007','2/2/2007')",
sep=';')
hpc.data$Date <- as.Date(hpc.data$Date, format('%d/%m/%Y'))
hpc.data$Time <- strptime(paste(hpc.data$Date, hpc.data$Time),
format='%Y-%m-%d %H:%M:%S')
# And there is no longer need of Date
hpc.data$Date <- NULL
png('plot2.png', width=480, height=480, bg='transparent')
# Plot of Global active power over time
plot(
hpc.data$Time, hpc.data$Global_active_power,
type='l',
xlab='',
ylab='Global Active Power (kilowatts)',
)
dev.off()
|
d8582ae6697c83682f14eb9a5b45c84fb37475a1 | ef2942bd239ff7c4c676fe65867566cdafd5efdd | /Scripts/BR/baton_rouge_concentration.R | c8972e083f1e6d0f6f3684e2d28f9d8da89b2761 | [] | no_license | jldbc/criminology_of_place | 237224daf8ead99249cb864a9aad29191c255212 | 61a9d5f6f80539cdc98b52544d46d8146615b6b0 | refs/heads/master | 2021-06-16T18:38:32.171811 | 2017-05-11T04:17:38 | 2017-05-11T04:17:38 | 72,176,328 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,139 | r | baton_rouge_concentration.R | ## remaining:
# - sacramento_county_2007_present
# - pitt_crime (can get pct. relationship, but can't plot or regress)
# - Oakland (some lat/long parsing needed)
# - Cincinnati (no plotting, but the block level data is good)
# - Austin (lat/long unreliable, but block level data looks good) -- this seems too concentrated. what am i missing?
# - Baton Rouge (geojoin)
# - NYC (geojoin)
# - Boston (geojoin) ** arcGIS. geocode to address. coerce to segs by given address ranges.
#####################################################################
# Baton Rouge
# to get segments, get location and geojoin shapefile for segment IDs. This format is too specific currently.
#####################################################################
setwd("/Users/jamesledoux/Documents/Research/Thesis/Data/Baton_Rouge")
library(rgdal)
library(data.table)
library(dplyr)
library(ggplot2)
#cleaned and merged DataFrame
#all incidents reported in seattle xxxx to present (2011 for now, it appears)
#note: this is incomplete data until I find a way to fix the failed merge from earlier
df = fread("baton_rouge_crime.csv", data.table=FALSE) |
d056066b0967b4e4afc4d728b899f603412b9c86 | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615839347-test.R | 2017bdf9eb7badac887eb3a04990517b76c96a11 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 153 | r | 1615839347-test.R | testlist <- list(x = c(1.39098954479748e-309, 2.85846620057912e-319, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(myTAI:::cpp_geom_mean,testlist)
str(result) |
e99dfff592169e5b999c87e31473e8f06f74e861 | 9c6359526c2c18bbbbeda7a8c1a1431f2e33cf09 | /R/functions.r | fbb83cca09f516b105b0964fa6c2d2f4bfe09dd6 | [
"MIT"
] | permissive | lsaravia/fireNL | 622855b76510e1edc39012df833fc12fedb4997e | 0e23b8e8a116e52db0427706c274ad4a01797755 | refs/heads/main | 2023-07-20T17:00:47.501844 | 2023-07-14T20:06:54 | 2023-07-14T20:06:54 | 301,216,269 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,610 | r | functions.r |
#' Extract images from DynamicFire NetLogo Model saved view
#'
#' The images were saved with the NetLogo extension CSV each 30 steps (ticks) after 7200 steps
#'
#' @param fname
#' @param plot
#'
#' @return
#' @export
#'
#' @examples
extract_patch_distr_nl <- function(fname,plot=FALSE){
#
# Extract parameters encoded in names
#
ss <- data.frame(str_split(tools::file_path_sans_ext(fname),"_",simplify=TRUE),stringsAsFactors = FALSE) %>% mutate_at(2:7,as.numeric)
plan(multisession)
p_df <- future_lapply( 2:length(fname), function(h){
png <- read_csv(paste0("Data/",fname[h-1]),col_names = c("i","j","value"), col_types = cols()) %>% filter(value!=55 & value!=0) %>% mutate(value= value>0)
png1 <- read_csv(paste0("Data/",fname[h]),col_names = c("i","j","value"), col_types = cols()) %>% filter(value!=55 & value!=0) %>% mutate(value= value>0)
dif <- anti_join(png1,png, by=c("i","j"))
#ggplot(dif, aes(y=i,x=j,fill=value)) +geom_raster() + theme_void()
#ggplot(png, aes(y=i,x=j,fill=value)) +geom_raster() + theme_void()
#ggplot(png1, aes(y=i,x=j,fill=value)) +geom_raster() + theme_void()
if( nrow(dif)>0) {
sm <- sparseMatrix(i=dif$i+1,j=dif$j+1,x=dif$value)
pl <- patchdistr_sews(as.matrix(sm))
if(plot) print(plot_distr(pl,best_only = FALSE) + ggtitle(paste("Days",ss[h,5])))
pl <- tibble::remove_rownames(data.frame(pl))
patch_distr <- patchsizes(as.matrix(sm))
pl <- pl %>% mutate(max_patch = max(patch_distr),size=as.numeric(ss[h,7])*as.numeric(ss[h,8]),tot_patch=sum(patch_distr),days = ss[h,6],
initial_forest_density= ss[h,2], fire_probability = ss[h,3], forest_dispersal_distance = ss[h, 4],
forest_growth= ss[h,5]
)
}
}, future.seed = TRUE)
plan(sequential)
patch <- bind_rows(p_df)
return(patch)
}
#' Evaluate patch distribution in a raster brick
#'
#' @param br raster with distribution data >0 is TRUE
#' @param returnEWS if TRUE returns the early warnings, FALSE returns the patch distribution
#'
#' @return a data frame with results
#' @export
#'
#' @examples
evaluate_patch_distr <- function(br,returnEWS=TRUE){
if( class(br)!="RasterLayer")
stop("Paramter br has to be a RasteLayer")
## Convert to TRUE/FALSE matrix
#
brTF <- as.matrix(br)
brTF <- brTF>0
# Extract Date from name of the band
#
brName <- str_sub( str_replace_all(names(br), "\\.", "-"), 2)
if( returnEWS ){
patch_distr <- patchdistr_sews(brTF)
patch_df <- tibble::remove_rownames(data.frame(patch_distr)) %>% mutate(date=brName)
} else {
patch_distr <- patchsizes(brTF)
patch_df <- tibble(size=patch_distr) %>% mutate(date=brName)
}
return(patch_df)
}
convert_to_sparse <- function(fire_bricks,region_name){
future::plan(multiprocess)
on.exit(future::plan(sequential))
require(Matrix)
p_df <- lapply( seq_along(fire_bricks), function(ii){
br <- brick(paste0("Data/",fire_bricks[ii]))
df <- future_lapply(seq_len(nbands(br)), function(x){
brName <- stringr::str_sub( stringr::str_replace_all(names(br[[x]]), "\\.", "-"), 2)
mm <- as.matrix(br[[x]]>0)
message(paste(x,"-", brName ,"Suma de fuegos", sum(mm)))
sm <- as(mm,"sparseMatrix")
summ <- as_tibble(summary(sm))
names(summ) <- c("i","j","data")
summ <- summ %>% mutate(t=x,date=brName) %>% dplyr::select(t,i,j,data,date)
})
#yy <- str_sub(names(br)[1],2,5)
df <- do.call(rbind,df) %>% mutate(region=region_name)
})
p_df <- do.call(rbind,p_df)
} |
42272104302887c556041246f15648c1b095f9c1 | 95cd8aee9871f5114553ea1388ec6123490c6b3c | /plot3.R | fb92344767d37ddf70ec9b0282fa50aa8323a8bd | [] | no_license | binaspora/ExData_Plotting1 | f4b67b1658a4596e87cc253985b5569d057fc7a1 | c811fef80b9b7ce662675a59f070b9f5e2374914 | refs/heads/master | 2021-01-01T06:33:26.515110 | 2014-06-08T20:39:48 | 2014-06-08T20:39:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 620 | r | plot3.R | # Load data
data <- read.csv2("./data/small_data.txt", dec=".");
data$utz <- paste(data$Date, data$Time, sep="-")
png(file="plot3.png")
dates <- strptime(data$utz, "%d/%m/%Y-%H:%M:%S")
# Print the plot
plot(dates, data$Sub_metering_1,
type="n",
xlab="",
ylab="Energy submetering"
)
points(dates, data$Sub_metering_1, type="l", col="black")
points(dates, data$Sub_metering_2, type="l", col="red")
points(dates, data$Sub_metering_3, type="l", col="blue")
legend("topright",
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black","blue","red"),
lwd=3
)
dev.off() |
01bf86857b63b75a2e29d553aa796526dc3a814e | 2724f6e874c9500b0df726c2d6903fe486b8cc84 | /R/to_overlay_data.R | 62fa1bbb180465bc8f714bf2b82dddb77343c6f7 | [
"MIT"
] | permissive | lagelab/Genoppi | 62006e9f123a1857580e5ebd88885c945483f6e3 | f78e7b3e523650e7ae211fc93b79dff3e28dd03f | refs/heads/master | 2023-08-31T09:51:59.007937 | 2022-10-12T14:17:17 | 2022-10-12T14:17:17 | 83,465,982 | 23 | 10 | null | null | null | null | UTF-8 | R | false | false | 1,628 | r | to_overlay_data.R | #' @title to overlay data
#' @description Adds columns to a data.frame (e.g. one used for overlay)
#' that are required for downstream functionality in genoppi/
#' @param df a data.frame
#' @param dataset optional string, if dataset is not provided in df.
#' @param rm.sig boolean. should signicicant items be removed?
#' @export
#' @family interactive
to_overlay_data <- function(df, dataset=NULL, rm.sig = F) {
cnames = colnames(df)
# remove all non-significant rows
if (!is.null(df$significant) & rm.sig){
df <- df[df$significant, ]
}
# if the following columns are not specified in the reference
# they are set to the default in this function.
if ('dataset' %nin% cnames) df$dataset = dataset
if ('label' %nin% cnames) df$label <- TRUE
if ('stroke' %nin% cnames) df$stroke <- TRUE
if ('col_significant' %nin% cnames) df$col_significant <- 'yellow'
if ('col_other' %nin% cnames) df$col_other <- 'grey'
if ('col_border' %nin% cnames) df$col_border <- 'black'
if ('alt_label' %nin% cnames) df$alt_label <- NA
if ('label_size' %nin% cnames) df$label_size = 3
if ('pLI' %nin% cnames) df$pLI <- NA
if ('shape' %nin% cnames) df$shape <- 21
if ('opacity' %nin% cnames) df$opacity <- 1
#if ('gg.size' %nin% cnames) df$gg.size <- 3.5 # deprecated
if ('size_gg' %nin% cnames) df$size_gg <- 3.5
if ('gene' %nin% cnames) df$gene <- NA
if ('size' %nin% cnames) df$size <- 9
if ('legend_order' %nin% cnames) df$legend_order <- NA
if ('symbol' %nin% cnames) df$symbol <- shape_to_symbol(df$shape)
#if ('symbol' %nin% cnames) df$symbol <- 'circle'
return(as.data.frame(df))
}
|
f9a84c2e302090b2efddc355be90dbb070cc3cb5 | 5b6a014264b1aed12d6e5894ee49bf3db2fe7750 | /man/dc.MergeTransactionsOnSameDate.Rd | 98b8e4812e5b7cff53fca3df0e9510c3e474336d | [
"MIT"
] | permissive | cnasher/BTYD | 5f91fc8b217aeae19c1ef0e01e0fa438a0f49c81 | af96f294cedaa892d563e6bcde945267b63229d8 | refs/heads/master | 2021-01-14T10:30:53.864487 | 2014-01-07T06:03:14 | 2014-01-07T06:03:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 638 | rd | dc.MergeTransactionsOnSameDate.Rd | \name{dc.MergeTransactionsOnSameDate}
\alias{dc.MergeTransactionsOnSameDate}
\title{Merge Transactions on Same Day}
\usage{
dc.MergeTransactionsOnSameDate(elog)
}
\arguments{
\item{elog}{event log, which is a data frame with columns
for customer ID ("cust"), date ("date"), and optionally
other columns such as "sales". Each row represents an
event, such as a transaction.}
}
\value{
Event log with transactions made by the same customer on
the same day merged into one transaction.
}
\description{
Updates an event log; any transactions made by the same
customer on the same day are combined into one
transaction.
}
|
59b3c3b109c3020514d53cd31e1c4901fbd53d76 | 12b6e3ae125d1e684ca91576dc0e29f6577dc2cb | /inst/extdata/template.R | b426fcab7ca224286620b4db1a7b8f6684aece16 | [] | no_license | simschul/my.utils | dbe92e721fe414a57fe6e0356439bc0458344aaf | 6c353f9fb88ed60f451ed249f7b910f4b00e2504 | refs/heads/master | 2023-06-21T16:55:06.964546 | 2023-06-20T09:05:15 | 2023-06-20T09:05:15 | 168,372,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,045 | r | template.R | #'
#'
#'
#' @author {{author}}
#' Date: {{date}}
#'
#' Content:
#'
############################################################################## #
##### load packages ############################################################
############################################################################## #
library(data.table)
library(tidyverse)
library(units)
library(ggforce)
library(my.utils)
############################################################################## #
##### settings #################################################################
############################################################################## #
options("datatable.print.class" = TRUE)
theme_set(theme_bw())
############################################################################## #
##### load data #############################################################
############################################################################## #
# THE END ---------------------------------------------------------------------
|
296964c13ff2ac872a90268621813cb2de7c1ecd | c1d5a107e2947b9d60c4fc6bc33426312dfded1b | /ui.R | 471c5bc1819174b9dc391c0df5c5d5195b449730 | [] | no_license | JorisVdBos/leningenvergelijker | f2756f07e2e6943cb4cf5d81cad7ac7223029511 | 096b5ab09ed937fb07f1d60a9690cb94e69b3819 | refs/heads/master | 2021-01-19T00:54:06.909925 | 2017-08-21T20:24:11 | 2017-08-21T20:24:11 | 87,216,330 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,824 | r | ui.R | # Header ----
dbHeader <- dashboardHeader(title = "Leningen vergelijker")
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem(text = "Inleiding",
tabName = "inleiding",
icon = icon("home")),
menuItem(text = "Simuleer lening",
tabName = "simLen",
icon = icon("calculator")),
menuItem(text = "Vergelijk leningen",
tabName = "vergLen",
icon = icon("line-chart")),
menuItem(text = "Meer informatie",
tabName = "meerInfo",
icon = icon("question"))
)
)
# Body ----
body <- dashboardBody(
shinyjs::useShinyjs(),
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "shinydashboard-0.5.1/shinydashboard.css"),
tags$link(rel = "stylesheet", type = "text/css", href = "AdminLTE-2.0.6/AdminLTE.min.css"),
tags$link(rel = "stylesheet", type = "text/css", href = "AdminLTE-2.0.6/_all-skins.min.css")
),
tabItems(
tabItem(
tabName = "inleiding",
h1("Welkom!"),
p("Welkom op mijn applicatie! Als je hier bent, wil dat zeggen dat je een lening wil aangaan bij de bank. Net als jij, wou ik de beste lening op de markt te pakken krijgen en samen met mijn partner klopte ik aan bij verschillende banken voor informatie. Om de verschillende leningen en banken onderling te vergelijken, maakten we een excelbestand met alle leningvoorstellen. We merkten dat zelfs met enkele excel truukjes we moeilijk vat kregen op de waarde van de verschillende voorstellen en we vaak teruggrepen naar de aflostabellen van de banken. Daarom maakte ik een leningsimulator in de computertaal 'R'. Zo konden we de leningen vergelijken hoe we het zelf wilden en begon ik ook vragen te beantwoorden in verband met inflatie, verzekeringskosten en beleggen. Ondertussen hebben we een lening afgesloten, en kwam het idee om mijn code in een applicatie te gieten, zodat andere mensen er gebruik van kunnen maken!."),
p("Om te beginnen, ga naar het tabblad 'Simuleer lening' om leningen toe te voegen en aflostabellen te simuleren. Om de verschillende leningen naast elkaar te vergelijken, ga naar het tabblad 'Vergelijk leningen'. Om meer over deze applicatie te weten te komen, ga naar het tabblad 'Meer informatie'. Veel succes met de zoektocht naar je perfecte lening!"),
h2("Privacy"),
p("Geen enkele informatie ingegeven in deze applicatie-website wordt ergens opgeslagen. Hij maakt geen gebruik van cookies of iets dergelijks. Om je gegevens te bewaren, kan je je bankvoorstellen exporteren in een '.feather' bestand en opslaan op je computer. Later kan je dit bestand opnieuw inladen om verder te werken. Opgepast: Dit betekent ook dat als je de pagina ververst, alle ingevoerde gegevens worden gewist!"),
HTML("<p>De code van deze applicatie is volledig openbaar en terug te vinden op <a href=\"https://github.com/JorisVdBos/leningenvergelijker\">mijn github account</a>.</p>")
),
# Lening simulatie ----
tabItem(
tabName = "simLen",
h1("Simuleer een lening"),
#p("Een huis of appartement gekocht? Proficiat! Maar hola, de zoektocht is nog niet afgelopen! Een goede lening vinden kan je duizenden euro's besparen, dus een nieuwe zoektocht gaat van start. Algouw ligt je keukentafel vol met papieren met letterlijk duizenden cijfertjes. Bank A geeft een betere rentevoet, maarja bank B heeft dan weer goedkopere verzekeringen! Economisch gezien moet je denken aan inflatie en zo weinig mogelijk lenen, maar fiscaal gezien moet je dan weer zo lang mogelijk lenen. Vriend 1 zegt dit en vriend 2 zegt dat, maar welke lening is nu de beste?"),
h2("Voorbeeld"),
p("Onderaan zie je al drie leningen als voorbeeld ingevuld. Zij stemmen overeen met het volgende volledig fictieve voorbeeld:"),
p("Tine heeft een appartement gekocht en wil een lening van 150.000 euro aangaan over 25 jaar. Ze verdient 1500 euro netto per maand en houdt 800 euro per maand over voor de lening en om te sparen. Na de aankoop van het huis heeft ze nog 3.000 euro aan spaargeld over, wat ze voor 65% in een beleggingsportefeille houdt. De laatste jaren brachten haar beleggingen haar een gemiddelde rente van 5% per jaar op."),
p("In bank 1 wordt haar een lening op vaste rentevoet aangeboden aan 2,5%. In de bank 2 raden ze een lening aan 1,9% aan op variabele rentevoet, met herziening om de drie jaar. Hun verzekeringen zijn goedkoper dan bank 1. Bank 3 is duurder dan de andere twee banken, maar zij hebben lagere dossierkosten. Zij raden een gecombineerde lening aan, van 100.000 euro aan 2,5% vast over 25 jaar en 50.000 euro variabel aan 1,9%, herzien om de drie jaar over 15 jaar."),
p("Selecteer een lening in de tabel en klik op de knop 'Start simulatie' om de aflostabel van de lening te bekijken. Bekijk ook zeker de grafieken onder het tabblad 'Grafiek'. Als je een vergelijking van de drie grafieken wil bekijken, ga dan naar 'vergelijk leningen'. Dit kan je terugvinden door op de drie streepjes te klikken bovenaan de pagina."),
h2("Zelf aan de slag"),
p("In de tab 'nieuwe lening' kan je zelf leningen aan deze tabel toevoegen."),
# Invoer simulator ----
tabsetPanel(
tabPanel(
"Opgeslagen leningen",
wellPanel(
p("In deze tabel vind je alle informatie over de leningen terug. Van links naar rechts vind je:"),
HTML("<ul>
<li>Het te lenen bedrag</li>
<li>Het type lening: Vast of variabel</li>
<li>Indien variabel hoeveel jaar tot herziening</li>
<li>De rentevoet</li>
<li>looptijd van de lening</li>
<li>Eenmalige kosten van de lening, zoals de dossierkosten</li>
<li>Maandelijkse kosten zijn de kosten van de rekeningen, bankkaarten en ook maandelijkse verzekeringen zoals bijvoorbeeld de schuldsaldoverzekering</li>
<li>Jaarlijkse kosten zijn bijvoorbeeld de brandverzekering</li>
<li>De inflatie. In België was deze 1,97 % en 2,20 % respectievelijk in 2016 en 2017</li>
<li>Je vermogen bij de start van de lening (na de aankoop van je huis.)</li>
<li>Je maandelijks inkomsten min de vaste kosten is het bedrag dat je overhoudt na het aftrekken van je vaste kosten zoals eten en elektriciteit van je maandelijkse loon. Dit is het bedrag dat je zal gebruiken om je lening af te betalen en de extra kosten te bekostigen. Het overschot van dit bedrag wordt gespaard en eventueel belegd.</li>
<li>Hoeveel procent van je spaarpot je in beleggingen zal steken</li>
<li>Hoeveel deze beleggingen zullen opbrengen. Er wordt aangenomen dat geld op de spaarrekening niets opbrengt!</li></ul>"),
br(),
br(),
dataTableOutput("leningenDT"),
fluidRow(
column(
width = 4
),
column(
width = 4,
align = "right",
br(),
actionButton(
"leningenVerw",
"Verwijder geselecteerde lening"
)
),
column(
width = 4,
align = "left",
br(),
div(id = "leningenVerwAllesDiv",
actionButton(
"leningenVerwAlles",
"Verwijder alle opgeslagen leningen")
),
div(id = "leningenVerwAlles2Div",
actionButton(
"leningenVerwAlles2",
"Ben je zeker?",
styleclass = "danger")
)
)
),
br(),
br(),
p("Sla deze tabel op, zodat je later verder kan werken:"),
fluidRow(
column(
width = 4,
""
),
column(
width = 4,
br(),
downloadButton(
"leningenExp",
"Exporteer tabel"
)
),
column(
width = 4,
br(),
fileInput(
"leningenImp",
"Importeer tabel",
multiple = FALSE,
accept = "RData"
),
div(id = "leningenImpError",
p(em(HTML("<font color='red'>Gelieve een naam in te voeren.</font>"))))
)
)
),
fluidRow(
column(
width = 8,
align="center",
br(),
br(),
actionButton(
"lenBereken2",
label = "Start simulatie",
styleclass = "success"),
br(),
div(id = "lenBereken2Error",
p(em(HTML("<font color='red'>Gelieve een lening aan te duiden in bovenstaande tabel.</font>")))),
br(),
br(),
br()
)
)
),
# Nieuwe lening ----
tabPanel(
"Nieuwe lening",
wellPanel(
fluidRow(
column(
width = 6,
textInput("lenBedr",
"Te lenen bedrag in euro:",
placeholder = "150000"),
div(id = "lenBedrError",
em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")),
br()),
br(),
radioButtons("lenVastOfVar",
"Variabel of vaste rentevoet",
choices = c("Vast", "Variabel")),
div(id = "lenVariabelOptie",
textInput("lenVarType",
"Herziening jaren:",
placeholder = "3"),
div(id = "lenVarTypeError",
em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")),
br()),
paste0("Opmerking: De simulatie gaat steeds van het slechste scenario uit: Dat bij de ",
"eerste herziening van de rentevoet, deze verdubbelt met een maximum van 2%.")),
textInput("lenRV",
"Rentevoet in %:",
placeholder = "2,5"),
div(id = "lenRVError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>"))),
br()),
textInput("lenJaar",
"Jaar:",
placeholder = "25"),
div(id = "lenJaarError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
br(),
actionButton(
"lenVoegToe",
"Voeg toe",styleclass = "warning"),
actionButton(
"lenLaatsteWeg",
"Verwijder laatse invoer"),
actionButton(
"lenAllesWeg",
"Verwijder alles"),
br(),
br(),
br(),
br(),
dataTableOutput("lenInputDT"),
div(id = "lenSamError",
p(em(HTML("<font color='red'>Voeg een lening toe met de knop 'Voeg toe'.
Op deze manier kan je je lening opdelen in verschillende delen!</font>"))))
),
column(
width = 6,
p(paste0("Optionele extra informatie waar de ",
"simulatie rekening mee kan houden:")),
checkboxInput(
"kostenCheck",
"Kosten"),
div(
id = "lenKostendiv",
textInput("lenKost1",
"Eenmalige kosten, zoals bijvoorbeeld dossierkosten: ",
placeholder = "500"),
div(id = "lenKost1Error",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
textInput("lenKostM",
paste0("Maandlijke kosten, zoals bijvoorbeeld ",
"bankrekeningkosten of schuldsaldo verzekering: "),
placeholder = "162,62"),
div(id = "lenKostMError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
textInput("lenKostJ",
paste0("Jaarlijkse kosten, zoals bijvoorbeeld ",
"brandverzekerning: "),
placeholder = "256,3")),
div(id = "lenKostJError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
checkboxInput(
"inflCheck",
"Inflatie",
value = FALSE),
div(
id = "lenInfldiv",
paste0("Naast extra berekingen worden",
"volgende waarden per maand aangepast aan de inflatie: ",
"Extra kosten van de lening, je maandelijks sparen. ",
"Deze worden normaal door de jaren wel aangepast door ",
"de bank en jezelf."),
textInput("lenInfl",
"Inflatie in percent per jaar: ",
placeholder = "2,0")),
div(id = "lenInflError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
checkboxInput(
"vermogenCheck",
"Vermogen bijhouden",
value = FALSE),
div(
id = "lenVermdiv",
textInput("lenVermStart",
"Vermogen bij start ingang lening:",
placeholder = "5000"),
div(id = "lenVermStartError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
textInput("lenVermInk",
"Beschikbaar maandelijks bedrag na vaste kosten:",
placeholder = "800"),
p(paste0("Voorbeeld: Je verdient 1500 euro netto. Na je vaste kosten zoals electriciteit, eten ",
"en andere diverse maandelijkse kosten, blijft er nog 800 euro over voor je lening en te sparen. ",
"Als de gesimuleerde lening een afbetaling van 600 euro uitkomt, zal het de overige 200 euro ",
"gerekend worden als spaargeld. Hieronder kan je nog specifieren of je dit bedrag belegt of niet.")),
div(id = "lenVermInkError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
textInput("lenVermBelPerc",
"Percentage van gespaard vermogen in beleggingen:",
value = 0,
placeholder = "45"),
div(id = "lenVermBelPercError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>")))),
textInput("lenVermBelOpbrPerc",
"Opbrengstpercentage van belegd vermogen per jaar:",
placeholder = "2.0"),
div(id = "lenVermBelOpbrPercError",
p(em(HTML("<font color='red'>Gelieve een correct getal in te geven.</font>"))))
)
)
)
),
fluidRow(
column(
width = 8,
align="center",
br(),
br(),
br(),
actionButton(
"lenBereken",
label = " Start simulatie",
styleclass = "success"),
br(),
br()
),
column(
width = 4,
"Deze lening opslaan:",
textInput("lenBank",
"Naam van de bank:"),
div(id = "lenBankError",
p(em(HTML("<font color='red'>Gelieve een naam in te voeren.</font>")))),
div(id = "lenBankError2",
p(em(HTML("<font color='red'>Deze naam bestaat al!</font>")))),
actionButton(
"lenOpslaan",
"Opslaan"),
div(id = "lenBankSucces",
p(em(HTML("<font color='green'>Je lening werd aan de tabel toegevoegd!</font>")))),
br(),
br()
)
)
)
),
# Lening resultaat ----
fluidPage(
div(id = "leningBerekenBds",
"Berekenen..."),
div(
id = "leningResultaat",
wellPanel(
uiOutput("lenBeschrijving")
),
tabsetPanel(
tabPanel(
"Aflostabel",
dataTableOutput("lenAflossingstabel"),
downloadButton(
"lenAflossingstabelExport",
"Exporteer aflossingstabel (.csv)"
)
),
tabPanel(
"Grafiek",
wellPanel(
uiOutput("grafiekKolommenUI"),
uiOutput("grafiekStartDatumUI"),
checkboxInput("grafiekInflatie", "Inflatie inrekenen"),
sliderInput("grafiekInflatiePerc", "Inflatie percentage:",
min = -10, max = 10, value = 2, step = 0.1),
checkboxInput("grafiekCumulatief", "Per maand")
),
wellPanel(
plotOutput("grafiekPlot")
),
dataTableOutput("grafiekTabel"),
downloadButton(
"grafiekExport",
"Exporteer grafiek data (.csv)"
)
)
)
)
)
),
# Vergelijk leningen ----
tabItem(
tabName = "vergLen",
h1("Vergelijk leningen"),
p("Op deze pagina worden al je ingegeven leningen naast elkaar gelegd en vergeleken. Wij vonden de grafiek die het vermogen weergeeft doorheen de jaren de meest doorslaggevende. Dit is namelijk het geld dat je in je handen overhoudt doorheen de jaren. Voor Tine lijkt in dat geval Bank 3 de beste keuze. Omdat ze in het begin meer afbetaalt, zal ze naar het einde van de 25 jaren veel kunnen sparen en uiteindelijk veel meer overhouden. Voor we deze colclusie kunnen trekken moeten we echter even stilstaan bij de aannames die bij de simulatie horen."),
h2("Belangrijke opmerkingen"),
p("De simulatie gaat er van uit dat het beschikbare bedrag er zal zijn doorheen de looptijd van de lening en dat het gespaarde geld nooit wordt aangesproken. We nemen zelfs aan dat je maandelijks bedrag meegroeit met de inflatie! Dit kan niet altijd het geval zijn, door ziekte, veranderen van werk, etc. kunnen maandelijkse inkomsten plots veranderen in goede of slechte zin. Ook kan het zijn dat het spaargeld wordt aangesproken voor een vakantie of dure aankoop. In onderstaande grafiek is het duidelijk dat Tine de eerste 15 jaar zeer weinig zal kunnen spenderen aan andere dingen dan aan de lening. Dit is een risico waar zeker aandacht aan gespendeerd moet worden. Om het risico te verkleinen zou ze nieuwe simulatie kunnen maken waarin het aandeel in de lening op 15 jaar kleiner is, wat het risico zou verkleinen."),
p("Een tweede aanname is dat het beleggingsopbrengstpercentage en de inflatie hetzelfde zal blijven gedurende de looptijd van de lening. Een slechte belegging kan hierdoor roet in het eten gooien. Ook hier is het aan te raden om een appel voor de dorst achter te houden om het risico te verkleinen."),
p("De variable rentevoeten worden aangenomen steeds naar het maximum te stijgen na de eerste herziening (banken noemen dit het 'worst-case scenario'). In de tijd van dit schrijven, in 2017, was dit de verwachting. In de vermogen grafiek van Tine zie je de aanpassing van de rentevoet in de 'knik' in de grafiek na drie jaar. (Deze is ook aanwezig in het voorstel van Bank 3, maar niet zo goed zichtbaar.) Het is onwaarschijnlijk, maar wel mogelijk dat de rentevoeten toch laag blijven, en Tine de volledige looptijd aan een lagere rentevoet terugbetaalt en dus beter uitkomt dan in de simulatie berekend werd."),
p("Een laatste aanname die de simulatie maakt is dat er geen vervroegde leningsafbetalingen gebeuren tijdens de looptijd. Vergeet niet dat je op eender welk moment een deel van je lening vervroegd kan aflossen. Soms kan het voordelig zijn je lening af te betalen."),
wellPanel(
dataTableOutput("vergLenInputDT"),
br(),
actionButton("vergLenButton", label = "Start vergelijking!", styleclass = "success")
),
div(id = "lenBerekenBds",
"Berekenen..."),
div(
id = "lenResultaat",
wellPanel(
dataTableOutput("vergLenOutputDT"),
downloadButton(
"vergLenAflossingstabelExport",
"Exporteer vergelijkingstabel (.csv)"
)
),
wellPanel(
uiOutput("vergGrafiekKolommenUI"),
uiOutput("vergGrafiekStartDatumUI"),
checkboxInput("vergGrafiekInflatie", "Inflatie inrekenen"),
sliderInput("vergGrafiekInflatiePerc", "Inflatie percentage:",
min = -10, max = 10, value = 2, step = 0.1),
checkboxInput("vergGrafiekCumulatief", "Per maand")
),
wellPanel(
plotOutput("vergGrafiekPlot")
),
dataTableOutput("vergGrafiekTabel"),
downloadButton(
"vergGrafiekExport",
"Exporteer grafiek data (.csv)"
)
)
),
tabItem(
tabName = "meerInfo",
h1("Meer informatie over deze applicatie"),
fluidPage(
fluidRow(HTML(paste0(
"<p>In de zomer van 2017 bouwde ik deze applicatie, geïnspireerd door mijn eigen zoektocht naar een lening. Alle vragen, aanbevelingen of opmerkingen over deze applicatie zijn uiterst welkom. Contacteer mij via <a href=\"mailto:joris.bdbossche@gmail.com\">mijn email</a> of via onderstaande kanalen.</p>",
"<p>De code van deze applicatie is volledig openbaar en terug te vinden op <a href=\"https://github.com/JorisVdBos/leningenvergelijker\">mijn github account</a>.</p>",
"<p>Mijn LinkedIn account:</p>",
"<br>",
"<script src=\"//platform.linkedin.com/in.js\" type=\"text/javascript\"></script>
<script type=\"IN/MemberProfile\" data-id=\"https://www.linkedin.com/in/joris-van-den-bossche-8a12b943\" data-format=\"inline\" data-related=\"false\"></script>",
"<br>",
"<p>Volg mij op Twitter!<br>
<a href=\"https://twitter.com/Joris_VdB_\" class=\"twitter-follow-button\" data-show-count=\"false\" data-size=\"large\">Follow @Joris_VdB_</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script></p>
"
)))
)
)
)
)
ui <- dashboardPage(skin = "green",
header = dbHeader,
sidebar = sidebar,
body = body)
|
3bf30b441972c93fff7e4b8931568fc579139b75 | 2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89 | /man/ProbeLevelTransform3.Rd | c4c864c1fc9a303f9e890798896a4b4e663f5b0f | [] | no_license | HenrikBengtsson/aroma.affymetrix | a185d1ef3fb2d9ee233845c0ae04736542bb277d | b6bf76f3bb49474428d0bf5b627f5a17101fd2ed | refs/heads/master | 2023-04-09T13:18:19.693935 | 2022-07-18T10:52:06 | 2022-07-18T10:52:06 | 20,847,056 | 9 | 4 | null | 2018-04-06T22:26:33 | 2014-06-15T03:10:59 | R | UTF-8 | R | false | false | 3,521 | rd | ProbeLevelTransform3.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ProbeLevelTransform3.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{ProbeLevelTransform3}
\docType{class}
\alias{ProbeLevelTransform3}
\title{The ProbeLevelTransform3 class}
\description{
Package: aroma.affymetrix \cr
\bold{Class ProbeLevelTransform3}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.core]{AromaTransform}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{Transform}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{ProbeLevelTransform3}}\cr
\bold{Directly known subclasses:}\cr
\emph{\link[aroma.affymetrix]{AbstractProbeSequenceNormalization}}, \link[aroma.affymetrix]{BaseCountNormalization}, \emph{\link[aroma.affymetrix]{BasePositionNormalization}}, \emph{\link[aroma.affymetrix]{LinearModelProbeSequenceNormalization}}, \link[aroma.affymetrix]{MatNormalization}, \link[aroma.affymetrix]{ScaleNormalization3}, \link[aroma.affymetrix]{UnitTypeScaleNormalization}\cr
public abstract static class \bold{ProbeLevelTransform3}\cr
extends \emph{\link[aroma.affymetrix]{ProbeLevelTransform}}\cr
This abstract class is specialized from \code{\link{ProbeLevelTransform}} and
provides methods to identify subsets and types of probes that are used
for fitting and/or updating the signals.
}
\usage{
ProbeLevelTransform3(dataSet=NULL, ..., unitsToFit="-XY", typesToFit=typesToUpdate,
unitsToUpdate=NULL, typesToUpdate="pm", shift=0)
}
\arguments{
\item{dataSet}{A \code{\link{AffymetrixCelSet}}.}
\item{...}{Arguments passed to the constructor of
\code{\link{ProbeLevelTransform}}.}
\item{unitsToFit}{The units from which the normalization curve should
be estimated. If \code{\link[base]{NULL}}, all are considered.}
\item{typesToFit}{Types of probes to be used when fitting the model.}
\item{unitsToUpdate}{The units to be updated.
If \code{\link[base]{NULL}}, all are considered.}
\item{typesToUpdate}{Types of probes to be updated.}
\item{shift}{An optional amount to shift data before fitting and updating.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\emph{No methods defined}.
\bold{Methods inherited from ProbeLevelTransform}:\cr
getRootPath
\bold{Methods inherited from Transform}:\cr
getOutputDataSet, getOutputFiles
\bold{Methods inherited from AromaTransform}:\cr
as.character, findFilesTodo, getAsteriskTags, getExpectedOutputFiles, getExpectedOutputFullnames, getFullName, getInputDataSet, getName, getOutputDataSet, getOutputDataSet0, getOutputFiles, getPath, getRootPath, getTags, isDone, process, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
24478034553978b1ef42cf8da01dd7b9ac24f4b8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/popkin/examples/fst.Rd.R | 2398d0375939705acd19bafbeb392d02350e95e7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,035 | r | fst.Rd.R | library(popkin)
### Name: fst
### Title: Extract FST from a population-level kinship matrix or vector of
### inbreeding coefficients
### Aliases: fst
### ** Examples
## Get FST from a genotype matrix
## Construct toy data
X <- matrix(c(0,1,2,1,0,1,1,0,2), nrow=3, byrow=TRUE) # genotype matrix
subpops <- c(1,1,2) # subpopulation assignments for individuals
## NOTE: for BED-formatted input, use BEDMatrix!
## "file" is path to BED file (excluding .bed extension)
# library(BEDMatrix)
# X <- BEDMatrix(file) # load genotype matrix object
## estimate the kinship matrix "Phi" from the genotypes "X"!
Phi <- popkin(X, subpops) # calculate kinship from X and optional subpop labels
w <- weightsSubpops(subpops) # can weigh individuals so subpopulations are balanced
Fst <- fst(Phi, w) # use kinship matrix and weights to calculate fst
Fst <- fst(Phi) # no weights implies uniform weights
inbr <- inbr(Phi) # if you extracted inbr for some other analysis...
Fst <- fst(inbr, w) # ...use this inbreeding vector as input too!
|
efa30023e3509e0678c6745f591d099db7d471d7 | faad2c40617e265b5b0dd393e5d11a117e79afbc | /Regression/script.R | a26177e32860c8eb71a4c51c68d7192349b2b197 | [] | no_license | Ankur3107/Vortex17-machine-learning | 73b653aed34bd616de056cad10e6df52357e43cf | be339fb03a67563dbaf318c906562d54d216d96e | refs/heads/master | 2021-01-09T05:52:21.936429 | 2017-02-04T11:55:00 | 2017-02-04T11:55:00 | 80,853,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,115 | r | script.R |
train_data <- read.csv("Dataset-2/competition_second_train.csv",header = FALSE)
test_data <- read.csv("Dataset-2/competition_second_test.csv",header = FALSE)
sample_submission <- read.csv("Dataset-2/competition_second_sample.csv")
str(train_data)
str(test_data)
names(train_data)
names(test_data)
names(sample_submission)
summary(train_data)
head(test_data)
test_data$V76 <- NA
all_data <- rbind(train_data,test_data)
###### NA imputation #########
# First function
naCol <- function(train){
return(colnames(train)[colSums(is.na(train)) > 0])
}
all_na_col <- naCol(all_data)
# Second function
missingTypeVariable <- function(df,nadf,n=18){
intType <- c()
factorType <- c()
for(i in 1:18)
{
if(class(df[,nadf[i]])=="integer")
intType <- c(intType,nadf[i])
else
factorType <- c(factorType,nadf[i])
}
return (list(intType=intType,factorType=factorType))
}
all_NA_Missing_Type <- missingTypeVariable(all_data,all_na_col)
all_NA_int_type <- unlist(all_NA_Missing_Type[1])
all_NA_factor_type <- unlist(all_NA_Missing_Type[2])
#integer type correlation with target
cor(train_data[,all_NA_int_type[1:3]],train_data$V76,use="pairwise.complete.obs")
#factor type correlation with target
library(ggplot2)
ggplot(train_data,aes(train_data$V76,train_data[,all_NA_factor_type[2]])) + geom_boxplot()
#imputing int type variable
all_data$V23[is.na(all_data[23])] <- 0
all_data$V55[is.na(all_data$V55)] <- 1980
qplot(all_data[4])
all_data$V4[is.na(all_data$V4)] <- 70
all_data$V4 <- ifelse(all_data$V4>150,70,all_data$V4)
#imputing factor type variable
summary(all_data[all_NA_factor_type])
#mice work
library(mice)
Dat1 <- subset(all_data, select=c(V7,V27,V28,V29,V30,
V32,V39,V54,V56,V59,
V60,V68,V69,V70))
imp <- mice(Dat1, m=3, maxit=10)
all_data$V7[is.na(all_data$V7)] <- imp$imp$V7$`3`
all_data$V27[is.na(all_data$V27)] <- imp$imp$V27$`3`
all_data$V28[is.na(all_data$V28)] <- imp$imp$V28$`3`
all_data$V29[is.na(all_data$V29)] <- imp$imp$V29$`3`
all_data$V30[is.na(all_data$V30)] <- imp$imp$V30$`3`
all_data$V32[is.na(all_data$V32)] <- imp$imp$V32$`3`
all_data$V39[is.na(all_data$V39)] <- imp$imp$V39$`3`
all_data$V54[is.na(all_data$V54)] <- imp$imp$V54$`3`
all_data$V56[is.na(all_data$V56)] <- imp$imp$V56$`3`
all_data$V59[is.na(all_data$V59)] <- imp$imp$V59$`3`
all_data$V60[is.na(all_data$V60)] <- imp$imp$V60$`3`
all_data$V68[is.na(all_data$V68)] <- imp$imp$V68$`3`
all_data$V69[is.na(all_data$V69)] <- imp$imp$V69$`3`
all_data$V70[is.na(all_data$V70)] <- imp$imp$V70$`3`
####### modelling ######
m_train_data <- all_data[1:1050,]
m_test_data <- all_data[1051:1460,]
m_test_data$V76 <- NULL
feature.names <- names(m_train_data)
feature.names <- feature.names[feature.names!= "V1" & feature.names!="V76"]
library(xgboost)
set.seed(1960)
h<-sample(nrow(m_train_data),floor(0.3*nrow(m_train_data)))
train_sample <- m_train_data[-h,]
train_val <- m_train_data[h,]
dval<-xgb.DMatrix(data=data.matrix(train_val[,feature.names]),label=train_val[,76])
dtrain<-xgb.DMatrix(data=data.matrix(train_sample[,feature.names]),label=train_sample[,76])
watchlist<-list(val=dval,train=dtrain)
xg.test <- m_test_data[,feature.names]
param <- list( objective = "reg:linear",
booster = "gbtree",
eta = 0.48,
max_depth = 4, #7
subsample = 0.9,
colsample_bytree = 0.9
)
set.seed(1429)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 2000,
verbose = 1,
early.stop.round = 100,
watchlist = watchlist,
maximize = TRUE
)
pred_noexp=predict(clf,data.matrix(m_test_data[,feature.names]))
solutionXgBoost<- data.frame(Id = m_test_data$V1, Prediction = pred_noexp)
write.csv(solutionXgBoost, file = 'solutionXgBoost.csv', row.names = F)
|
998cb79e32cfadef44d31950c61a64e4997a73a6 | 32886d6690229de10c70952678bc085af1395ff6 | /Hackerrank/Viral_Advertising.R | d35ffe242f7465e0429caecfac867e1c047c01aa | [] | no_license | jimmyyih518/ChallengeR | 4334276a24713b49d3af1aad28d73b6842e7096a | 3b9e0ca0cf5a2e4432f279296a6212c6a136f6ca | refs/heads/master | 2022-12-07T22:58:13.744239 | 2020-08-19T03:26:44 | 2020-08-19T03:26:44 | 280,044,701 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 409 | r | Viral_Advertising.R | # Enter your code here. Read input from STDIN. Print output to STDOUT
f <- file("stdin")
open(f)
n = as.numeric(readLines(f, n = 1))
viralAdvertising = function(n){
Day = 1
Shared = 5
Liked = floor(5/2)
Cumulative = 2
for(i in 2:n){
Day = i
Shared = floor(Shared/2)*3
Liked = floor(Shared/2)
Cumulative = Cumulative + Liked
}
return(Cumulative)
}
cat(viralAdvertising(n)) |
f7f2bf7d217f3a3d2d66096a22c1d44fbbf47a0d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Ake/examples/kpmfe.fun.Rd.R | 706dc5fa412589fe714ad1f19dab840864cdeed6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 526 | r | kpmfe.fun.Rd.R | library(Ake)
### Name: kpmfe.fun
### Title: Function for associated kernel estimation of p.m.f.
### Aliases: kpmfe.fun kpmfe.fun.default
### ** Examples
## A sample data with n=60.
V<-c(10,0,1,0,4,0,6,0,0,0,1,1,1,2,4,4,5,6,6,6,6,7,1,7,0,7,7,
7,8,0,8,12,8,8,9,9,0,9,9,10,10,10,10,0,10,10,11,12,12,10,12,12,
13,14,15,16,16,17,0,12)
##The bandwidth can be the one obtained by cross validation.
h<-0.081
## We choose Binomial kernel.
est<-kpmfe.fun(Vec=V,h,"discrete","bino")
##To obtain the normalizing constant:
est
|
81fe8ff4fefe33a8e42992e6694fbc9cccca1c53 | 39038eab7d051410289342195d84c79ca8afb692 | /man/wrt_data.Rd | 9d77f4cd3da179f7801400bf8873dbb63b80ab44 | [] | no_license | nathanvaughan1/DST | 4b57f41f1c6b9b05eabba9bdb0784e36cba88160 | 3bb6cd8c5466d301053391da577184e28b95a463 | refs/heads/master | 2021-06-19T18:34:43.163605 | 2021-01-14T18:45:46 | 2021-01-14T18:45:46 | 159,189,519 | 2 | 1 | null | 2020-02-04T00:26:17 | 2018-11-26T15:18:28 | R | UTF-8 | R | false | true | 870 | rd | wrt_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_ss_files.R
\name{wrt_data}
\alias{wrt_data}
\title{write data file}
\usage{
wrt_data(datlist, outfile, overwrite = TRUE, verbose = FALSE)
}
\arguments{
\item{datlist}{List object created by \code{\link{rd_data}}.}
\item{outfile}{Filename for where to write new data file.}
\item{overwrite}{Should existing files be overwritten? Default=TRUE.}
\item{verbose}{Should there be verbose output while running the file?}
}
\description{
write Stock Synthesis data file from list object in R which was probably
created using \code{\link{rd_data}}
}
\seealso{
\code{\link{rd_starter}},
\code{\link{rd_forecast}}, \code{\link{rd_ctl}},
\code{\link{wrt_starter}}, \code{\link{wrt_forecast}},
\code{\link{wrt_data}}, \code{\link{wrt_ctl}}
}
\author{
Ian Taylor
}
\keyword{data}
\keyword{export}
|
ac85b6e4d04cc6a54fc9195ccb9b51ce8a882707 | eacaccac552e598e5e9b84a390458031888cf25d | /tests/testthat.R | 20bfd6d9ff980f2a77a8429d0b3bee4e1cd32830 | [
"MIT"
] | permissive | konradzdeb/ClasslessFun | 895444967cc12c6b6d898e74fad3692418fc4630 | 23fa79ad2c09b31a0be52c41fbd687f8b7481a80 | refs/heads/master | 2020-04-10T21:35:28.116770 | 2018-12-22T14:32:38 | 2018-12-22T14:32:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 68 | r | testthat.R | library(testthat)
library(ClasslessFun)
test_check("ClasslessFun")
|
ed5cff50fcbfc49df772ffe906e1d513f779842c | 29a2daaa4aadaa7e04987cf1aaed0501362028ae | /additional_functions/mcmc.spatial.time.energy.full.marginal.R | 3dfdbda23793fb2e4712e38f860bb3ba498d0d47 | [] | no_license | lfcampos/Space-and-time-with-fixed-K | e7a58d5efcb4b280fe355431a096679149b3c836 | 50b15dfdb8dcc63251362a825e6303d3e037dcf5 | refs/heads/master | 2022-10-28T08:24:54.517237 | 2020-06-08T07:47:28 | 2020-06-08T07:47:28 | 101,098,105 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,758 | r | mcmc.spatial.time.energy.full.marginal.R | # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# input:
# w: source intensity (current) of length k_curr + 1
# allocate_curr: vector of allocations (current) of length obs_num
# mu_curr: matrix of source locations (current)
# eparas: energy parameters one per source
# ewt: energy parameters one per source
# bk: list of breakpoints (one vec of ln num_time_breaks + 1 for each souce)
# num_time_breaks: number of breaks considered
# lambda: current rate vectors for time arrival dirichlet
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Testing: plot(energy, (ewti*dgamma(energy,eparasi3,eparasi3/eparasi1)+(1-ewti)*dgamma(energy,eparasi4,eparasi4/eparasi2)), col = time_bin[[i]], pch = 19, cex = 0.2)
# plot(spatial, col = rgb(0, 0, 0, alpha = 2*dpsf), pch = 19)
# hist(energy, prob = TRUE, breaks = 100)
# tmp = seq(min(energy), max(energy), 1)
# lines(tmp, dgamma(tmp, eparasi1[1],eparasi2[1]))
# fix_runs = mcmc_runs
# rjmcmc_run = tt
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
mcmc.spatial.time.energy.full <- function(fix_runs,online_ordering,rjmcmc_run,w,allocate_curr,mu_curr,eparas,ewt_all,k_curr,mix_num,bk,num_time_breaks, lambda, time_bin){
# Number of time breaks is given for all sources
if(length(num_time_breaks) == 1){
num_time_breaks = rep(num_time_breaks, k_curr)
}
# # checks
# length(num_time_breaks) == k_curr
# length(bk) == num_time_breaks + 1
# Standard MCMC updates
for (t2 in 1:fix_runs){
# Update photon allocations
probs <- matrix(NA,mix_num,obs_num)
for(i in 1:k_curr){
dlambda = lambda[[i]][time_bin[[i]]]
# extract parameters for each photon according to their time bin
eparas_photon = eparas_all[[i]][time_bin[[i]]]
eparasi1 = unlist(sapply(eparas_photon, function(x){x[1,1]}))
eparasi2 = unlist(sapply(eparas_photon, function(x){x[2,1]}))
# calculate likelihoods
dpsf = psf_cpp(off.angle,ellip,slope,psf.norm,r0,spatial,mu_curr[,i])
dE = dgamma(energy,eparasi2,eparasi2/eparasi1)
probs[i,] = w[i]*dpsf*dE*dlambda
}
# update background probabilities
# subset background lambda accourding to photon time arrival then if enery is bounded above by the max allowed energy.
dlambda_back = lambda[[mix_num]][time_bin[[mix_num]]][energy <= max_back_energy]
# prob(background photon) = (mixture weight) * (psf equivalent unif) * (uniform energy dist'n) * (relative time intensity)
probs[mix_num,energy <= max_back_energy] <- w[mix_num]*(1/img_area)*(1/max_back_energy)*dlambda_back
probs[mix_num,energy > max_back_energy] <- 0
allocate_curr <- t(matrix(unlist(lapply(1:obs_num, function(i) rmultinom(1, 1, probs[,i]))),ncol=obs_num)) # Don't need to normalize as rmultinom function does it automatically
# Counts vector
mix_num <- k_curr+1
count_vector <- matrix(NA,mix_num,1)
count_vector[1:mix_num] <- apply(allocate_curr[,1:mix_num],2,sum)
# Update positions
mu_prop <- mu_curr
for (i in 1:k_curr){
index <- allocate_curr[,i]==1
if (count_vector[i]>0){
# Adaptive version (eventually ended to ensure convegence)
if (rjmcmc_run < adapt_end){
mu_prop[,i] <- rnorm(2,mu_curr[,i],mu_adapt_prop_sd/sqrt(count_vector[i]))
}
# Non-adaptive version
if (rjmcmc_run >= adapt_end){
mu_prop[,i] <- rnorm(2,mu_curr[,i],mu_fixed_prop_sd)
}
logr <- sum(log(psf_cpp(off.angle,ellip,slope,psf.norm,r0,spatial[index,],mu_prop[,i])))-sum(log(psf_cpp(off.angle,ellip,slope,psf.norm,r0,spatial[index,],mu_curr[,i])))
u <- runif(1,0,1)
if(is.na(logr)==0){
if (log(u) < logr){
mu_curr[,i] <- mu_prop[,i]
}
}
}
# Have to make sure that sources without phoons assigned move (doesn't effect likelihood)
if (count_vector[i]==0){
if (rjmcmc_run < adapt_end){
mu_curr[,i] <- c(runif(1,xlow,xup),runif(1,ylow,yup))
} else {
mu_curr[,i] <- rnorm(2,mu_curr[,i],mu_fixed_prop_sd)
}
}
}
# Order parameters by suspected sources intensities (associated with particular positions)
if (k_curr > 1 & online_ordering =="reference"){
to_order <- min(no_guess,k_curr)
next_index <- which.min(apply((mu_curr-mu_guess[,1])^2,2,sum))
next_index_store <- next_index
for (i in 2:to_order){
next_order <- order(apply((mu_curr-mu_guess[,i])^2,2,sum))
next_index_all <- setdiff(next_order,next_index_store)
next_index_store <- c(next_index_store,next_index_all[1])
}
indexmu <- c(next_index_store,setdiff(1:k_curr,next_index_store))
mu_curr <- mu_curr[,indexmu]
count_vector[1:k_curr] <- count_vector[indexmu]
allocate_curr[,1:k_curr] <- allocate_curr[,indexmu]
eparas_all <- eparas_all[indexmu]
num_time_breaks[1:k_curr] <- num_time_breaks[indexmu]
bk[1:k_curr] <- bk[indexmu]
time_bin[1:k_curr] <- time_bin[indexmu]
}
# Update weights
alpha <- rep(wprior,mix_num)
w <- rdirichlet(1,alpha+count_vector)
# Update lambda weights
for(i in 1:mix_num){
lambda0 <- rep(lambdaprior,num_time_breaks[i])
count_vector_lambda = table(cut(arrival_time[which(allocate_curr[,i] == 1)], bk[[i]]))
lambda[[i]] = rdirichlet(1,lambda0 + count_vector_lambda)
}
# Update spectral parameters (full model)
# Note, we keep energy parameters one per time-bin, for comparison
# to `mcmc.spatial.time.energy.full' but we make them all the same
for (i in 1:k_curr){
index <- which(allocate_curr[,i] == 1)
cspectral <- energy[index]
gmcurr <- eparas_all[[i]][[1]][1,1]
gacurr <- eparas_all[[i]][[1]][2,1]
gmprop <- rnorm(1,gmcurr,i*specm_sd) # proposal
gaprop <- rnorm(1,gacurr,speca_sd)
if ((gmprop > emean.min) & (gmprop < emean.max) & (gaprop > 0)){
logr <- spectral_post(gmprop,gaprop,NA,NA,NA,cspectral) - spectral_post(gmcurr,gacurr,NA,NA,NA,cspectral)
u <- runif(1,0,1)
if (log(u) < logr){
for(k in 1:num_time_breaks[i]){
eparas_all[[i]][[k]][,1] <- c(gmprop,gaprop)
}
}
}
} # end time break
}
# Output parameters and log-posterior
value <- list(c(k_curr,c(mu_curr),c(w)),allocate_curr, eparas_all, ewt_all,lambda, time_bin, bk, num_time_breaks)
return(value)
}
|
5bb98d9fada6219586158c95e56510c6f99a1bcd | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/lepton.R | aebf308d2489c46a07611ae8d90e73d740072c58 | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 192 | r | lepton.R | library("knitr")
library("rgl")
#knit("lepton.Rmd")
#markdownToHTML('lepton.md', 'lepton.html', options=c("use_xhml"))
#system("pandoc -s lepton.html -o lepton.pdf")
knit2html('lepton.Rmd')
|
55b0f61d6161029edc299fd8e296b024e7981b3d | baecfb4396b382c11bdb559caa514a96aef93cf3 | /plot4.R | 938787b76659dcea4f45f5a65a37dc6e4e485523 | [] | no_license | EllenB/Exploratory-Data-Analysis-Coursera- | fccf08fd2500fb00f0fa4ecca32e22b3332abe4e | 3f18cfdeaae754696cd1290add454f9b8a072055 | refs/heads/master | 2020-06-05T09:13:39.608952 | 2015-02-06T08:06:13 | 2015-02-06T08:06:13 | 30,400,949 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,106 | r | plot4.R | #######################################
## Project 1: Exploratory Data Analysis
## Ellen Brock
#######################################
## 1. Setting the working directory
setwd("E:/Ellen/coursera/ExpDatAn/project_1")
## 2. Reading in the data
data <- read.table("household_power_consumption.txt",header=T,sep=";",na.strings="?",
stringsAsFactors=F)
names(data)## Gives the names of which variables in the dataset
dim(data) #2075259 9: To check if we have read in the correct number of obs
str(data)
## 3. Change the data format:
data$Date<-as.Date(data$Date, format="%d/%m/%Y")
## 4. Subset the data to the period: from the following two days 2007-02-01 and 2007-02-02
data_short <-data[which(data$Date=='2007-02-01'|
data$Date=='2007-02-02'),]
## remove the row.names
row.names(data_short) <- NULL
## remove the original dataset from the directory
rm(data)
## 5. Create one variable that specifies the day and the time through the
## "paste" command and putting it in the date format
data_short$DateTime <- paste(data_short$Date, data_short$Time)
data_short$DateTime <- as.POSIXct(data_short$DateTime)
## 6. Create the plot and save this into a png file:
png(filename="plot4.png", width=480,height=480)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
## First plot
plot(data_short$Global_active_power~data_short$DateTime, type="l",
ylab="Global Active Power", xlab="")
## Second plot
plot(data_short$Voltage~data_short$DateTime, type="l",ylab="Voltage",
xlab="datatime")
## Third plot
plot(data_short$Sub_metering_1~data_short$DateTime, type="l",
ylab="Energy sub metering",xlab="")
lines(data_short$Sub_metering_2~data_short$DateTime,col="Red")
lines(data_short$Sub_metering_3~data_short$DateTime,col="Blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Fourth plot
plot(data_short$Global_reactive_power~data_short$DateTime,
type="l", ylab="Global_reactive_power",xlab="datetime")
dev.off()
|
ca575e56959a4eece7c49e0a3d3b4bd97d930ca4 | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /paws/R/networkmanager_interfaces.R | 88c7129885b36460525334bace167091ea77e932 | [
"Apache-2.0"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | false | 43,584 | r | networkmanager_interfaces.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include networkmanager_service.R
NULL
.networkmanager$associate_customer_gateway_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CustomerGatewayArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$associate_customer_gateway_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CustomerGatewayAssociation = structure(list(CustomerGatewayArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$associate_link_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$associate_link_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(LinkAssociation = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), LinkAssociationState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_device_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_device_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Device = structure(list(DeviceId = structure(logical(0), tags = list(type = "string")), DeviceArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_global_network_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Description = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_global_network_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetwork = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), GlobalNetworkArn = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_link_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string")), SiteId = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_link_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Link = structure(list(LinkId = structure(logical(0), tags = list(type = "string")), LinkArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), SiteId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_site_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$create_site_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Site = structure(list(SiteId = structure(logical(0), tags = list(type = "string")), SiteArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_device_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(location = "uri", locationName = "deviceId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_device_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Device = structure(list(DeviceId = structure(logical(0), tags = list(type = "string")), DeviceArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_global_network_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_global_network_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetwork = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), GlobalNetworkArn = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_link_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), LinkId = structure(logical(0), tags = list(location = "uri", locationName = "linkId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_link_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Link = structure(list(LinkId = structure(logical(0), tags = list(type = "string")), LinkArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), SiteId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_site_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), SiteId = structure(logical(0), tags = list(location = "uri", locationName = "siteId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$delete_site_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Site = structure(list(SiteId = structure(logical(0), tags = list(type = "string")), SiteArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$deregister_transit_gateway_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), TransitGatewayArn = structure(logical(0), tags = list(location = "uri", locationName = "transitGatewayArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$deregister_transit_gateway_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransitGatewayRegistration = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), TransitGatewayArn = structure(logical(0), tags = list(type = "string")), State = structure(list(Code = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$describe_global_networks_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "globalNetworkIds", type = "list")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$describe_global_networks_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworks = structure(list(structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), GlobalNetworkArn = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$disassociate_customer_gateway_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), CustomerGatewayArn = structure(logical(0), tags = list(location = "uri", locationName = "customerGatewayArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$disassociate_customer_gateway_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CustomerGatewayAssociation = structure(list(CustomerGatewayArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$disassociate_link_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(location = "querystring", locationName = "deviceId", type = "string")), LinkId = structure(logical(0), tags = list(location = "querystring", locationName = "linkId", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$disassociate_link_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(LinkAssociation = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), LinkAssociationState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_customer_gateway_associations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), CustomerGatewayArns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "customerGatewayArns", type = "list")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_customer_gateway_associations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(CustomerGatewayAssociations = structure(list(structure(list(CustomerGatewayArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), State = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_devices_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "deviceIds", type = "list")), SiteId = structure(logical(0), tags = list(location = "querystring", locationName = "siteId", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_devices_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Devices = structure(list(structure(list(DeviceId = structure(logical(0), tags = list(type = "string")), DeviceArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_link_associations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(location = "querystring", locationName = "deviceId", type = "string")), LinkId = structure(logical(0), tags = list(location = "querystring", locationName = "linkId", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_link_associations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(LinkAssociations = structure(list(structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), DeviceId = structure(logical(0), tags = list(type = "string")), LinkId = structure(logical(0), tags = list(type = "string")), LinkAssociationState = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_links_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), LinkIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "linkIds", type = "list")), SiteId = structure(logical(0), tags = list(location = "querystring", locationName = "siteId", type = "string")), Type = structure(logical(0), tags = list(location = "querystring", locationName = "type", type = "string")), Provider = structure(logical(0), tags = list(location = "querystring", locationName = "provider", type = "string")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_links_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Links = structure(list(structure(list(LinkId = structure(logical(0), tags = list(type = "string")), LinkArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), SiteId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_sites_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), SiteIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "siteIds", type = "list")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_sites_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Sites = structure(list(structure(list(SiteId = structure(logical(0), tags = list(type = "string")), SiteArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_transit_gateway_registrations_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), TransitGatewayArns = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "transitGatewayArns", type = "list")), MaxResults = structure(logical(0), tags = list(location = "querystring", locationName = "maxResults", type = "integer")), NextToken = structure(logical(0), tags = list(location = "querystring", locationName = "nextToken", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$get_transit_gateway_registrations_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransitGatewayRegistrations = structure(list(structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), TransitGatewayArn = structure(logical(0), tags = list(type = "string")), State = structure(list(Code = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$register_transit_gateway_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), TransitGatewayArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$register_transit_gateway_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TransitGatewayRegistration = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), TransitGatewayArn = structure(logical(0), tags = list(type = "string")), State = structure(list(Code = structure(logical(0), tags = list(type = "string")), Message = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(location = "uri", locationName = "resourceArn", type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(location = "querystring", locationName = "tagKeys", type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_device_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), DeviceId = structure(logical(0), tags = list(location = "uri", locationName = "deviceId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_device_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Device = structure(list(DeviceId = structure(logical(0), tags = list(type = "string")), DeviceArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Vendor = structure(logical(0), tags = list(type = "string")), Model = structure(logical(0), tags = list(type = "string")), SerialNumber = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SiteId = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_global_network_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_global_network_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetwork = structure(list(GlobalNetworkId = structure(logical(0), tags = list(type = "string")), GlobalNetworkArn = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_link_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), LinkId = structure(logical(0), tags = list(location = "uri", locationName = "linkId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_link_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Link = structure(list(LinkId = structure(logical(0), tags = list(type = "string")), LinkArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), SiteId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Bandwidth = structure(list(UploadSpeed = structure(logical(0), tags = list(type = "integer")), DownloadSpeed = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), Provider = structure(logical(0), tags = list(type = "string")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_site_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(GlobalNetworkId = structure(logical(0), tags = list(location = "uri", locationName = "globalNetworkId", type = "string")), SiteId = structure(logical(0), tags = list(location = "uri", locationName = "siteId", type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.networkmanager$update_site_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Site = structure(list(SiteId = structure(logical(0), tags = list(type = "string")), SiteArn = structure(logical(0), tags = list(type = "string")), GlobalNetworkId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), Location = structure(list(Address = structure(logical(0), tags = list(type = "string")), Latitude = structure(logical(0), tags = list(type = "string")), Longitude = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CreatedAt = structure(logical(0), tags = list(type = "timestamp")), State = structure(logical(0), tags = list(type = "string")), Tags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
|
64120445c1039053c1728fec50438d8a6bfa769a | 9e1308ea7e52098691c6ecccf48a1d47ea8665b2 | /man/geoflow_right.Rd | 892aeb008fea9556d5ceac6f35ca7a5283aeea77 | [] | no_license | gitter-badger/geoflow | 634c0d07f9dd02191d8c3a0c8f1777310867ab4d | abc49ed61f0c52c52b1577215954ced31e53e097 | refs/heads/master | 2020-08-20T17:23:14.026344 | 2019-10-18T07:34:00 | 2019-10-18T07:34:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 313 | rd | geoflow_right.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geoflow_right.R
\docType{data}
\name{geoflow_right}
\alias{geoflow_right}
\title{geoflow_right}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
geoflow_right
}
\description{
geoflow_right
}
\keyword{datasets}
|
b40195b3b7992760ab1062668aaaa8ee764db576 | e8e58947ae3193313e86f103b97d44c9ec862e39 | /man/plot.p_dependencies.Rd | c551f7c57478d42e15886179386510ecdfb3e4d8 | [] | no_license | trinker/pacmanDependencies | d9ae1837c66ef3592baf686fcf34efb9f1f89aa8 | b64479d295556bef903c33aacae82cbb585479d0 | refs/heads/master | 2021-01-01T17:16:14.897358 | 2014-10-21T04:04:10 | 2014-10-21T04:04:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,216 | rd | plot.p_dependencies.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{plot.p_dependencies}
\alias{plot.p_dependencies}
\title{Plots a p_dependencies Object}
\usage{
\method{plot}{p_dependencies}(x, legend = TRUE, legend.x = -1.5,
legend.y = -1.05, legend.cex = 0.8,
title = paste("Dependencies for the", attributes(x)[["package"]],
"Package"), ...)
}
\arguments{
\item{x}{The p_dependencies object.}
\item{legend}{logical. If \code{TRUE} a legend is plotted corresponding to
the dependency types.}
\item{legend.x}{the x co-ordinate to be used to position the legend. Can be
specified by keyword or in any way which is accepted by
\code{\link[grDevices]{xy.coords}}.}
\item{legend.y}{the y co-ordinate to be used to position the legend. Can be
specified by keyword or in any way which is accepted by
\code{\link[grDevices]{xy.coords}}.}
\item{legend.cex}{Character expansion factor relative to current
\code{par("cex")}.}
\item{title}{The title of the plot. Use \code{NULL} to not include a title.}
\item{\ldots}{Arguments passed to \code{\link[graphics]{legend}}.}
}
\description{
Plots a p_dependencies object.
}
\references{
Adapted from mran's Dependencies Graphs \url{http://mran.revolutionanalytics.com/}
}
|
9f1cf3166973e021b45c663d28516da5213e8c34 | 9969b02c26fa5388ac971b8212c761c6abf98efb | /inst/helperCode/compileConfLimits.r | 5b0189753f49cc5f67fd969ee8370e5d1b01c57f | [] | no_license | tmcd82070/CAMP_RST | 0cccd7d20c8c72d45fca31833c78cd2829afc169 | eca3e894c19936edb26575aca125e795ab21d99f | refs/heads/master | 2022-05-10T13:33:20.464702 | 2022-04-05T21:05:35 | 2022-04-05T21:05:35 | 10,950,738 | 0 | 0 | null | 2017-05-19T20:42:56 | 2013-06-25T21:24:52 | R | UTF-8 | R | false | false | 5,871 | r | compileConfLimits.r |
# we want to compile all of the confidence intervals in a sample with an exploding right endpoint.
# # 1. fill out this stuff.
# thePlatform <- 'CAMP_RST20160201'
# theRiver <- 'Sacramento River'
# #files <- list.files(paste0('//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/',thePlatform,'/Outputs/',theRiver))
# get the before
# 2. fill out stemB
stemB <- '//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/After Knot Adj'
filesB <- list.files(stemB)
ls_passageB <- filesB[grep('lifestage_passage_table.csv',filesB)]
passageB <- filesB[grep('passage_table.csv',filesB)]
openTheseB <- unique(data.frame(file=c(ls_passageB,passageB),stringsAsFactors=FALSE))
for(i in 1:nrow(openTheseB)){
if(substr(openTheseB$file[i],nchar(openTheseB$file[i]) - 26,nchar(openTheseB$file[i]) - 26 + 3) == 'life'){
openTheseB$type[i] <- 'life'
} else if(substr(openTheseB$file[i],nchar(openTheseB$file[i]) - 20,nchar(openTheseB$file[i]) - 20 + 2) == 'run'){
openTheseB$type[i] <- 'run'
} else {
openTheseB$type[i] <- 'summary'
}
}
bigDFB <- getTheData(openThese=openTheseB,thePlatform=thePlatform,theRiver=theRiver,stem=stemB,before=TRUE)
testByB <- unlist(strsplit(bigDFB$by,"--",fixed=TRUE))
bigDFB$testi <- testByB[c(TRUE,FALSE)]
bigDFB$by <- testByB[c(FALSE,TRUE)]
bigDFB$time <- ifelse(is.na(bigDFB$time),'--',bigDFB$time)
bigDFB$lifeStage <- as.character(droplevels(bigDFB$lifeStage))
bigDFB$lifeStage <- ifelse(is.na(bigDFB$lifeStage),'--',bigDFB$lifeStage)
# get the after
stemA <- '//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/After Times 2 for Eff'
filesA <- list.files(stemA)
ls_passageA <- filesA[grep('lifestage_passage_table.csv',filesA)]
passageA <- filesA[grep('passage_table.csv',filesA)]
openTheseA <- unique(data.frame(file=c(ls_passageA,passageA),stringsAsFactors=FALSE))
for(i in 1:nrow(openTheseA)){
if(substr(openTheseA$file[i],nchar(openTheseA$file[i]) - 26,nchar(openTheseA$file[i]) - 26 + 3) == 'life'){
openTheseA$type[i] <- 'life'
} else if(substr(openTheseA$file[i],nchar(openTheseA$file[i]) - 20,nchar(openTheseA$file[i]) - 20 + 2) == 'run'){
openTheseA$type[i] <- 'run'
} else {
openTheseA$type[i] <- 'summary'
}
}
bigDFA <- getTheData(openThese=openTheseA,thePlatform=thePlatform,theRiver=theRiver,stem=stemA,before=FALSE)
testByA <- unlist(strsplit(bigDFA$by,"--",fixed=TRUE))
bigDFA$testi <- testByA[c(TRUE,FALSE)]
bigDFA$by <- testByA[c(FALSE,TRUE)]
bigDFA$time <- ifelse(is.na(bigDFA$time),'--',bigDFA$time)
bigDFA$lifeStage <- as.character(droplevels(bigDFA$lifeStage))
bigDFA$lifeStage <- ifelse(is.na(bigDFA$lifeStage),'--',bigDFA$lifeStage)
bb <- function(x){
table(x,exclude=NULL)
}
bb(bigDFB$by)
bb(bigDFB$river)
bb(bigDFB$siteName)
bb(bigDFB$min.date)
bb(bigDFB$max.date)
bb(bigDFB$file)
bb(bigDFB$run)
bb(bigDFB$lifeStage)
bb(bigDFB$time)
bb(bigDFA$by)
bb(bigDFA$river)
bb(bigDFA$siteName)
bb(bigDFA$min.date)
bb(bigDFA$max.date)
bb(bigDFA$file)
bb(bigDFA$run)
bb(bigDFA$lifeStage)
bb(bigDFA$time)
# double folder compilation
bigDF <- merge(bigDFB,bigDFA,by=c('by','river','siteName','min.date','max.date','file','run','lifeStage','time'),all.x=TRUE,all.y=TRUE) # 'test.i' removed
bigDF <- bigDF[!is.na(bigDF$aEst) | !is.na(bigDF$bEst),]
bigDF <- bigDF[ ( (bigDF$aEst > 0 & !is.na(bigDF$aEst)) & (bigDF$bEst > 0 & !is.na(bigDF$bEst)) ) | ( is.na(bigDF$aEst) & (bigDF$bEst > 0 & !is.na(bigDF$bEst)) ) | ( (bigDF$aEst > 0 & !is.na(bigDF$aEst)) & is.na(bigDF$bEst) ) > 0,]
bigDF$passC <- bigDF$aEst/bigDF$bEst #round((bigDF$aEst - bigDF$bEst) / bigDF$bEst * 100,2)
bigDF$diffMag <- bigDF$bMag - bigDF$aMag
rownames(bigDF) <- NULL
write.csv(bigDF,'//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/bigTimes2DF.csv')
# single folder compilation
bigDF <- bigDFB
bigDF <- bigDF[!is.na(bigDF$bEst),]
bigDF <- bigDF[ bigDF$bEst > 0,]
rownames(bigDF) <- NULL
write.csv(bigDF,'//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/bigGapsDF.csv')
bigDF$bEst <- bigDF$bLCL <- bigDF$bUCL <- bigDF$bMag <- bigDF$bOOL <- bigDF$sequence.y <- bigDF$testi.y <- bigDF$passC <- bigDF$diffMag <- NULL
bigDF$sequence.x <- bigDF$testi.x <- NULL
# names(bigDF)[names(bigDF) == 'bEst'] <- 'aEst200'
# names(bigDF)[names(bigDF) == 'bLCL'] <- 'aLCL200'
# names(bigDF)[names(bigDF) == 'bUCL'] <- 'aUCL200'
# names(bigDF)[names(bigDF) == 'bMag'] <- 'aMag200'
# names(bigDF)[names(bigDF) == 'bOOL'] <- 'aOOL200'
names(bigDF)[names(bigDF) == 'aEst'] <- 'aEst5000'
names(bigDF)[names(bigDF) == 'aLCL'] <- 'aLCL5000'
names(bigDF)[names(bigDF) == 'aUCL'] <- 'aUCL5000'
names(bigDF)[names(bigDF) == 'aMag'] <- 'aMag5000'
names(bigDF)[names(bigDF) == 'aOOL'] <- 'aOOL5000'
bigDF0 <- read.csv('//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/before/bigDF.csv')
names(bigDF0)[names(bigDF0) == 'bEst'] <- 'bEst100'
names(bigDF0)[names(bigDF0) == 'bLCL'] <- 'bLCL100'
names(bigDF0)[names(bigDF0) == 'bUCL'] <- 'bUCL100'
names(bigDF0)[names(bigDF0) == 'bMag'] <- 'bMag100'
names(bigDF0)[names(bigDF0) == 'bOOL'] <- 'bOOL100'
names(bigDF0)[names(bigDF0) == 'aEst'] <- 'aEst100'
names(bigDF0)[names(bigDF0) == 'aLCL'] <- 'aLCL100'
names(bigDF0)[names(bigDF0) == 'aUCL'] <- 'aUCL100'
names(bigDF0)[names(bigDF0) == 'aMag'] <- 'aMag100'
names(bigDF0)[names(bigDF0) == 'aOOL'] <- 'aOOL100'
bigDF2 <- bigDF # 5000
BigDF <- merge(bigDF0,bigDF,by=c('by','river','siteName','min.date','max.date','file','run','lifeStage','time'),all.x=TRUE,all.y=TRUE)
BigDF2 <- merge(BigDF,bigDF2,by=c('by','river','siteName','min.date','max.date','file','run','lifeStage','time'),all.x=TRUE,all.y=TRUE)
write.csv(BigDF2,'//lar-file-srv/Data/PSMFC_CampRST/ThePlatform/CAMP_RST20160201/Outputs/Sacramento River/before/bigDF5000.csv')
|
30c9ae7db0780515ca009e8b558fcb769713982f | 21ce426513bad64d46ee887e7aeccb606a96ccda | /Projeto_PAEII_Script.R | 0dae560ad3ba79e7f78ccdf5e0a6db488b02bc2b | [] | no_license | victormmp/EC02 | d19314ed3edc06ed3d6d5954cd3dba9e28bd1f02 | b3aa0bd6174c1fda9dcfc612c9ca41bec405dc5f | refs/heads/master | 2020-08-01T10:18:19.540563 | 2019-10-01T02:42:08 | 2019-10-01T02:42:08 | 210,963,471 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,151 | r | Projeto_PAEII_Script.R | data_2016<-read.csv("imc_20162.csv");
data_2017<-read.csv("CS01_20172.csv", sep=";");
PPGEE_dados=data_2016[data_2016[2]=='PPGEE',];
Dados_Masculino_2016=PPGEE_dados[PPGEE_dados[3]=='M',];
Dados_Feminino_2016=PPGEE_dados[PPGEE_dados[3]=='F',];
Heigh_Maculino_2016=Dados_Masculino_2016[,4];
Heigh_Feminino_2016=Dados_Feminino_2016[,4];
Weight_Masculino_2016=Dados_Masculino_2016[,5];
Weight_Feminino_2016=Dados_Feminino_2016[,5];
#Calculo do IMC para popula????o feminina e masculina do ano de 2016.
IMC_masculino_2016=(Weight_Masculino_2016/((Heigh_Maculino_2016)*(Heigh_Maculino_2016)));
IMC_Feminino_2016=(Weight_Feminino_2016/((Heigh_Feminino_2016)*(Heigh_Feminino_2016)));
####################################################################################
Dados_Masculino_2017=data_2017[data_2017[3]=='M',];
Dados_Feminino_2017=data_2017[data_2017[3]=='F',];
Heigh_Maculino_2017=Dados_Masculino_2017[,2];
Heigh_Feminino_2017=Dados_Feminino_2017[,2];
Weight_Masculino_2017=Dados_Masculino_2017[,1];
Weight_Feminino_2017=Dados_Feminino_2017[,1];
#Calculo do IMC para popula????o feminina e masculina do ano de 2017.
IMC_masculino_2017=(Weight_Masculino_2017/((Heigh_Maculino_2017)*(Heigh_Maculino_2017)));
IMC_Feminino_2017=(Weight_Feminino_2017/((Heigh_Feminino_2017)*(Heigh_Feminino_2017)));
#Teste para verificar normalidade em popula????es femininas e masculinas de cada ano.
teste_normalidade_2016_feminino=shapiro.test(as.numeric(unlist(IMC_Feminino_2016)));
teste_normalidade_2017_feminino=shapiro.test(as.numeric(unlist(IMC_Feminino_2017)));
teste_normalidade_2016_masculino=shapiro.test(as.numeric(unlist(IMC_masculino_2016)));
teste_normalidade_2017_masculino=shapiro.test(as.numeric(unlist(IMC_masculino_2017)));
#Realizando o teste T para comparacao das medias.
#Teste entre populacoes masculinas de 2016 e 2017.
t.test(as.numeric(unlist(IMC_masculino_2016)),as.numeric(unlist(IMC_masculino_2017)), alternative='two.sided',mu=0, paired=FALSE,conf.level = 0.95);
#test que compara as medianas dos dois grupos femininos de 2016 e 2017.
wilcox.test(as.numeric(unlist(IMC_Feminino_2016)),as.numeric(unlist(IMC_Feminino_2017)))
|
5194699e09f298782e4f8761d97753aa9c1376ac | 5fd3ddd30766a4eae04069b44bfe4f85f9dfaa40 | /man/get.MayPrevent.Rd | 0ee87c72b96ce7bb4a767e869bd12e8eec71d2c9 | [] | no_license | Angelacheese/pharm | 1a18155194cbc6551f12e28083f2a01a347dd0fb | 9f7d600752641edb30353f4575d89a9db6cc67ab | refs/heads/master | 2022-10-06T03:13:18.425075 | 2019-07-18T08:09:56 | 2019-07-18T08:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 428 | rd | get.MayPrevent.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getMayPrevent.R
\name{get.MayPrevent}
\alias{get.MayPrevent}
\title{Get may prevent based on RxCui}
\usage{
get.MayPrevent(df, RxCuiColName = RxCui, cores = 16)
}
\arguments{
\item{df}{data.frame include RxCui}
\item{RxCuiColName}{A colum for RxCui of df}
\item{cores}{number of parallel operation}
}
\description{
Get may prevent based on RxCui
}
|
25fc551dbc533ce50a9154405880df9f35cf75d4 | aa6b6bf70b316244ff92673456daac851f1c35ee | /ultraseq/man/merge_sheets.Rd | e19af32ed3447c5a408b883e5c1aab33330123c1 | [] | no_license | flow-r/ultraseq | 742035de86d55019d5c52b1b9a2c1795ec6ac5b9 | 58f72866c5b2504c18fd3e472ae187b5471826c8 | refs/heads/master | 2022-10-05T01:22:44.717599 | 2016-05-12T03:55:19 | 2016-05-12T03:55:19 | 42,262,784 | 0 | 1 | null | 2016-05-25T15:57:52 | 2015-09-10T18:28:31 | R | UTF-8 | R | false | true | 668 | rd | merge_sheets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge_sheets.R
\name{merge_sheets}
\alias{merge_sheets}
\title{merge tables by row, and filter rows}
\usage{
merge_sheets(x, outfile, .filter = NA, ...)
}
\arguments{
\item{x}{a character vector of file to be merged}
\item{outfile}{path to the merged output file}
\item{.filter}{a filter string. EXPERIMENTAL}
\item{...}{other arguments supplied to params::read_sheet}
}
\description{
merge tables by row, and filter rows
}
\examples{
\dontrun{
df = merge_sheets(c("mutect.chr1.txt", "mutect.chr2.txt"),
outfile = "mutect.merged.txt", .filter = "judgement==KEEP")
dim(df)
}
}
|
cda9c1261d4778acd4856e1a6a83d2b9019d8dc3 | bc37ece62b92314aa42dca5d04eaed3409d49e41 | /R/plosauthor.R | b528bfbe7a4594c8058b5f21b73c549ff79a3157 | [] | no_license | bbolker/rplos | 9416af079729291b1816c7e61bcd18b0600fed78 | 325ec5de9577d6f00f2615a374a722792a702958 | refs/heads/master | 2021-01-15T17:55:48.968305 | 2013-09-16T14:12:26 | 2013-09-16T14:12:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 840 | r | plosauthor.R | #' Search PLoS Journals authors.
#'
#' @template plos
#' @return Author names, in addition to any other fields requested in a
#' data.frame.
#' @examples \dontrun{
#' plosfigtabcaps('ecology', 'id', 100)
#' plosfigtabcaps(terms='ecology', fields='figure_table_caption', limit=10)
#' }
#' @export
plosauthor <- function(terms = NA, fields = 'id', toquery = NA, start = 0,
limit = NA, returndf = TRUE, sleep = 6, ..., curl = getCurlHandle(),
key = getOption("PlosApiKey", stop("need an API key for PLoS Journals")))
{
searchplos(terms=paste('author:', '"', terms, '"', sep=""), fields = fields,
toquery = toquery, start = start, limit = limit,
returndf = returndf, sleep = 6, ..., curl = getCurlHandle(),
key = getOption("PlosApiKey", stop("need an API key for PLoS Journals")))
} |
1487c9aef755b4335d39bd69c0d7f8856dae3f39 | 2140b0f4a3556b5dc76a4e1749cbcb7ea1d0a61b | /scripts/packages.R | 2ee43c54b4e081e9730fad9c8ef59b2451b6e9e2 | [] | no_license | sashahafner/NH3MTC | aa53173461029f3c4446a69aa2bc1d1d50f48f60 | ae913b6036b9956205bda6fa3b93c0ed32b67e7a | refs/heads/master | 2023-04-08T10:20:44.339685 | 2021-04-08T11:40:24 | 2021-04-08T11:40:24 | 300,247,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 52 | r | packages.R |
library(reshape2)
library(ggplot2)
library(dplyr)
|
fcbb2098008bfbf47e5aa89d6455ece04ac632e7 | ed1392d548de962abd2dd64f698c22115031c046 | /test_database.R | 99e5e6de55fd0ebad0ba78c6b800cc8807b1ed94 | [] | no_license | marzuf/TAD_DE_pipeline_v2_TopDom | bc479b0a1b6ea105e0743b4075319b68d6f6cacf | 5cdfe398c7948b3d2b74ff1754fb152d5b3b44af | refs/heads/master | 2020-04-07T20:27:37.669622 | 2018-12-08T21:25:48 | 2018-12-08T21:25:48 | 158,689,069 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,297 | r | test_database.R | suppressPackageStartupMessages(library(GOSemSim, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(org.Hs.eg.db, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
registerDoMC(30)
my_genes <- c("835", "5261" ,"241" )
combineSemSimMethod = "BMA"
semSimMetric = "Wang"
hsGO <- godata('org.Hs.eg.db', ont="BP", computeIC = FALSE)
cat("... test in lapply\n")
topTADs_semSim <- lapply(c(1:100), function(x) {
# cat("... compute TRUE semantic similarity for TAD:", x, "\n")
# tad_genes <- topTADs_genes[[x]]
tad_semSim <- mgeneSim(genes=my_genes,
semData=hsGO,
combine=combineSemSimMethod,
measure=semSimMetric,
verbose=FALSE)
})
cat("***lapply done\n")
cat("... test in foreach\n")
topTADs_semSim <- foreach(i=c(1:100)) %dopar% {
# cat("... compute TRUE semantic similarity for TAD:", x, "\n")
# tad_genes <- topTADs_genes[[x]]
tad_semSim <- mgeneSim(genes=my_genes,
semData=hsGO,
combine=combineSemSimMethod,
measure=semSimMetric,
verbose=FALSE)
}
cat("***foreach done\n")
cat("... test foreach in lapply \n")
topTADs_semSim <- lapply(c(1:100), function(x) {
# cat("... compute TRUE semantic similarity for TAD:", x, "\n")
# tad_genes <- topTADs_genes[[x]]
tad_semSim <- mgeneSim(genes=my_genes,
semData=hsGO,
combine=combineSemSimMethod,
measure=semSimMetric,
verbose=FALSE)
topTADs_semSim <- foreach(i=c(1:100)) %dopar% {
# cat("... compute TRUE semantic similarity for TAD:", x, "\n")
# tad_genes <- topTADs_genes[[x]]
tad_semSim <- mgeneSim(genes=my_genes,
semData=hsGO,
combine=combineSemSimMethod,
measure=semSimMetric,
verbose=FALSE)
}
})
|
af23e5072fdd08009d884c1d33ced320a3aef411 | ef8d66ebaeaf27fa1aed1cf01ebd70ce8224c5cd | /man/get_results_in_folder.Rd | 739d9bd6a6ebe01b9ec042bf1cc30491cac1e6fb | [] | no_license | Alice-MacQueen/CDBNgenomics | dd6c8026156d91be7f12a9857d0ebeb89c32c384 | 6b00f48eb1c6eec848f11416d7a5fd752cd778bd | refs/heads/master | 2021-07-08T06:15:56.774003 | 2020-08-12T19:28:32 | 2020-08-12T19:28:32 | 178,261,021 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 776 | rd | get_results_in_folder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cdbn_bigsnp2mashr.R
\name{get_results_in_folder}
\alias{get_results_in_folder}
\title{Identify phenotype names from bigsnpr results in a folder.}
\usage{
get_results_in_folder(path = ".", pattern = "*.rds")
}
\arguments{
\item{path}{File path to the files from bigsnpr, a character string.
Defaults to the current working directory.}
\item{pattern}{Pattern within the filename to match. Default is "*.rds".}
}
\value{
A vector of phenotype names.
}
\description{
Creates a vector of phenotype names from bigsnpr results.
}
\examples{
\dontrun{get_results_in_folder(path = system.file("inst/extdata",
package = "switchgrassGWAS"))}
\dontrun{get_results_in_folder(path = "path/to/gwas/results")}
}
|
725191bd899f4f38eaedabc310579efe16f24eea | 932e32833a2969f146c05074be3d742b1cc8a566 | /code/preproc_aggregates.R | 24b601311f362baa54ec1f8b0339df8a8925e3e7 | [] | no_license | jibietr/expedia_case_study | 9548e0b61004ae912fafa0b64b9842fbbe6de94f | 59d1e7fb4a835f59a478814b0d7a678d23a970a2 | refs/heads/master | 2021-01-23T08:10:54.910312 | 2017-02-01T21:16:36 | 2017-02-01T21:16:36 | 80,533,744 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,629 | r | preproc_aggregates.R |
# Compute partner and market aggregates:
# num bookings, total booking, mean booking value
# market share, partner share, etc.
library(plyr)
library(reshape)
PATH <- "www/"
fname <- paste(PATH,'expedia_casestudy_20170127.csv',sep='')
bookings <- read.csv(fname,sep=",",header=TRUE,fill=TRUE)
colnames(bookings) <- c("partner_id","mkt","bkg_value")
bookings$bkg_id <- seq(nrow(bookings))
# compute aggregates...
aggr.prt.mkt <- ddply(bookings,.(partner_id,mkt),summarize,
tot_prt_mkt_bkg_value = sum(bkg_value),
num_bkgs = length(bkg_value),
mean_bkg_value=mean(bkg_value))
# compute market aggregates
aggr.mkt <- ddply(aggr.prt.mkt,.(mkt),summarize,
num_partners = length(partner_id),
tot_mkt_bkg_value = sum(tot_prt_mkt_bkg_value),
mean_bkg_value=mean(mean_bkg_value),
num_bkgs=sum(num_bkgs))
aggr.prt <- ddply(aggr.prt.mkt,.(partner_id),summarize,
num_markets = length(mkt),
tot_prt_bkg_value = sum(tot_prt_mkt_bkg_value),
mean_bkg_value=mean(mean_bkg_value),
num_bkgs=sum(num_bkgs))
# compute market share
total_mkt_value <- aggr.mkt[,c("mkt","tot_mkt_bkg_value")]
aggr.prt.mkt <- merge(aggr.prt.mkt,total_mkt_value,all.x=TRUE)
aggr.prt.mkt$mkt_share <- aggr.prt.mkt$tot_prt_mkt_bkg_value/aggr.prt.mkt$tot_mkt_bkg_value
# compute partner share
total_prt_value <- aggr.prt[,c("partner_id","tot_prt_bkg_value")]
aggr.prt.mkt <- merge(aggr.prt.mkt,total_prt_value,all.x=TRUE)
aggr.prt.mkt$prt_share <- aggr.prt.mkt$tot_prt_mkt_bkg_value/aggr.prt.mkt$tot_prt_bkg_value
# write output
PATH <- "../data/"
fname <- paste(PATH,'aggregates.rda',sep='')
save(aggr.prt.mkt,aggr.mkt,aggr.prt,file=fname)
|
7b5b6cf86fda30a3f1abf70fdc9ab0a715955c11 | d541ac8c98d952b3c7ef0593469a468645f904c2 | /R/maf_tab.R | e7d896db653eaaa2a56b4fdede8b6b117b4f7a45 | [] | no_license | pdiakumis/varpr | a65a5c29580a5a2902dd31eb9645914c72c10f93 | 2d84597a45765cf3a8a0a5754ab29d703ddfb350 | refs/heads/master | 2021-04-26T22:18:22.954030 | 2016-05-11T08:05:28 | 2016-05-11T08:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,172 | r | maf_tab.R | #' Returns a summary of a MAF variable (i.e. 1KG or ESP6500)
#'
#' \code{maf_tab} reads in a MAF vector (i.e. 1KG or ESP6500) and outputs the
#' number of variants satisfying certain conditions.
#'
#' @param maf_vec The vector containing the MAF values.
#' @return An integer vector with the counts of each condition
#' @seealso \code{\link{sum}} and \code{\link{setNames}}.
#' @examples
#' \dontrun{
#' maf_tab(vars$aaf.1KG) # assumes you have a vars data frame
#' maf_tab(vars$esp6500_all) # assumes you have a vars data frame
#' }
#' @export
maf_tab <- function(maf_vec) {
stopifnot(is.atomic(maf_vec), is.numeric(maf_vec))
novel <- sum(is.na(maf_vec))
avail <- sum(!is.na(maf_vec))
total <- sum(novel, avail)
novel_pc5 <- sum(is.na(maf_vec) | (!is.na(maf_vec) & maf_vec <= 0.05))
# variants have an alt. allele frequency less than 1, 5 and 10%??
perc <- c(1, 5, 10)
perc_vec <- setNames(vector("integer", length(perc)), paste0("pc", perc))
perc_vec[] <- sapply(perc, function(pc) {
sum(!is.na(maf_vec) & maf_vec <= (pc / 100))
})
# now join all together
c(total = total, avail = avail, novel = novel,
novelpc5 = novel_pc5, perc_vec)
}
|
3d59bf788857ee895fa9dc1ea6e905df340abade | fbf2073978337d333e6b88b35812ac12e1401020 | /tmp_tests/measles2.R | 1caf5521faad553358a3d61c3253648aa3a39c68 | [] | no_license | allistho/spatPomp | 615918f643afcd1cdd91d6f0dc1b453cdcd4923f | 3041a01f56ab0ae73b1742030a54a57ff1a793d6 | refs/heads/master | 2021-05-21T18:18:10.889759 | 2020-09-27T01:05:42 | 2020-09-27T01:05:42 | 252,750,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,254 | r | measles2.R | png(filename="measles2-%02d.png",res=100)
library(magrittr)
library(plyr)
library(reshape2)
library(ggplot2)
library(spatPomp3)
options(
stringsAsFactors=FALSE,
encoding="UTF-8"
)
## ----read_data-----------------------------------------------------------
read.csv("measlesUKUS.csv",stringsAsFactors=FALSE) %>% subset(country=="UK") -> x
ddply(x,~loc,summarize,mean.pop=mean(pop)) %>% arrange(-mean.pop) -> meanpop
x2 <- mutate(x,loc=ordered(loc,levels=meanpop$loc))
measles_wide <- dcast(x2,decimalYear~loc,value.var="cases")
measles_long <- measles_wide %>% tidyr::gather(LONDON:IPSWICH, key = "unit", value = "cases") %>% arrange(decimalYear)
unit_index <- unique(measles_long[["unit"]])
names(unit_index) <- 1:length(unit_index)
## ----plot_data,fig.width=9,fig.height=8,cache=TRUE,echo=FALSE------------
subset(x,x$loc %in% meanpop$loc[1:20]) %>% mutate(loc=ordered(loc,levels=meanpop$loc)) %>% ggplot(aes(x=decimalYear,y=cases))+
geom_line()+
scale_y_continuous(breaks=c(0,4,40,400,4000),trans=scales::log1p_trans())+
facet_wrap(~loc,ncol=4)+theme(text=element_text(size=7))
## ----spatPomp_object-----------------------------------------------------
D <- 3
obs_names <- "cases"
measles_long <- measles_long %>% dplyr::filter(unit %in% unique(measles_long[['unit']])[1:D])
colnames(measles_long)[1] <- c("year")
measles_long <- subset(measles_long,measles_long$year>1949.99)
## ----covar---------------------------------------------------------------
pop_wide <- dcast(x2,decimalYear~loc,value.var="pop")[,1:(D+1)]
colnames(pop_wide) <- c("year",paste0("pop",1:D))
births_wide <- dcast(x2,decimalYear~loc,value.var="rec")[,1:(D+1)]
birthrate_wide <- births_wide[,-1]*26 ## total annual birth rate for each city
lag <- 3*26 ## lag for birthrate, in number of biweeks
tmp <- matrix(NA,nrow=lag,ncol=ncol(birthrate_wide))
colnames(tmp) <- colnames(birthrate_wide)
lag_birthrate_wide <- rbind(tmp,birthrate_wide[1:(nrow(birthrate_wide)-lag),])
colnames(lag_birthrate_wide) <- paste0("birthrate",1:D)
rownames(lag_birthrate_wide) <- rownames(birthrate_wide)
measles_covar <- cbind(pop_wide,lag_birthrate_wide)
measles_covar <- measles_covar %>% tidyr::gather(pop1:birthrate3, key = 'cov', value = 'val')
measles_covar <- measles_covar %>% mutate(unit = stringr::str_extract(cov,"[0123456789]+$"))
measles_covar <- measles_covar %>% mutate(cov = stringr::str_extract(cov,"^[a-z]+"))
measles_covar <- measles_covar %>% tidyr::spread(key = cov, value = val)
measles_covar <- measles_covar %>% mutate(unit = unit_index[unit])
## ----dist----------------------------------------------------------------
library(geosphere)
s2 <- subset(x,x$biweek==1& x$year==1944 & x$country=="UK")
s3 <- subset(s2,select=c("lon","lat"))
rownames(s3) <- s2$loc
s4 <- s3[meanpop$loc,]
long_lat <- s4[1:D,]
dmat <- matrix(0,D,D)
for(d1 in 1:D) {
for(d2 in 1:D) {
dmat[d1,d2] <- round(distHaversine(long_lat[d1,],long_lat[d2,]) / 1609.344,1)
}
}
p <- meanpop[1:D,2]
v_by_g <- matrix(0,D,D)
dist_mean <- sum(dmat)/(D*(D-1))
p_mean <- mean(p)
for(d1 in 2:D){
for(d2 in 1:(d1-1)){
v_by_g[d1,d2] <- (dist_mean*p[d1]*p[d2]) / (dmat[d1,d2] * p_mean^2)
v_by_g[d2,d1] <- v_by_g[d1,d2]
}
}
to_C_array <- function(v)paste0("{",paste0(v,collapse=","),"}")
v_by_g_C_rows <- apply(v_by_g,1,to_C_array)
v_by_g_C_array <- to_C_array(v_by_g_C_rows)
v_by_g_C <- Csnippet(paste0("const double v_by_g[",D,"][",D,"] = ",v_by_g_C_array,"; "))
v_by_g_C
## ----rprocess------------------------------------------------------------
states <- c("S","E","I","R","C","W")
state_names <- paste0(rep(states,each=D),1:D)
## initial value parameters
ivp_names <- paste0(state_names[1:(4*D)],"_0")
## regular parameters
he10_rp_names <- c("alpha","iota","R0","cohort","amplitude","gamma","sigma","mu","sigmaSE","rho","psi")
rp_names <- c(he10_rp_names,"D","g")
## all parameters
param_names <- c(rp_names,ivp_names)
rproc <- Csnippet("
double beta, br, seas, foi, dw, births;
double rate[6], trans[6];
double *S = &S1;
double *E = &E1;
double *I = &I1;
double *R = &R1;
double *C = &C1;
double *W = &W1;
const double *pop = &pop1;
const double *birthrate = &birthrate1;
int d,e;
// term-time seasonality
t = (t-floor(t))*365.25;
if ((t>=7&&t<=100) || (t>=115&&t<=199) || (t>=252&&t<=300) || (t>=308&&t<=356))
seas = 1.0+amplitude*0.2411/0.7589;
else
seas = 1.0-amplitude;
// transmission rate
beta = R0*(gamma+mu)*seas;
for (d = 0 ; d < D ; d++) {
// cohort effect
if (fabs(t-floor(t)-251.0/365.0) < 0.5*dt)
br = cohort*birthrate[d]/dt + (1-cohort)*birthrate[d];
else
br = (1.0-cohort)*birthrate[d];
// expected force of infection
foi = pow( (I[d]+iota)/pop[d],alpha);
// Do we still need iota in a spatPomp version?
// See also discrepancy between Joonha and Daihai versions
// Daihai didn't raise pop to the alpha power
for (e=0; e < D ; e++) {
if(e != d)
foi += g * v_by_g[d][e] * (pow(I[e]/pop[e],alpha) - pow(I[d]/pop[d],alpha)) / pop[d];
}
// white noise (extrademographic stochasticity)
dw = rgammawn(sigmaSE,dt);
rate[0] = beta*foi*dw/dt; // stochastic force of infection
// These rates could be outside the d loop if all parameters are shared between units
rate[1] = mu; // natural S death
rate[2] = sigma; // rate of ending of latent stage
rate[3] = mu; // natural E death
rate[4] = gamma; // recovery
rate[5] = mu; // natural I death
// Poisson births
births = rpois(br*dt);
// transitions between classes
reulermultinom(2,S[d],&rate[0],dt,&trans[0]);
reulermultinom(2,E[d],&rate[2],dt,&trans[2]);
reulermultinom(2,I[d],&rate[4],dt,&trans[4]);
S[d] += births - trans[0] - trans[1];
E[d] += trans[0] - trans[2] - trans[3];
I[d] += trans[2] - trans[4] - trans[5];
R[d] = pop[d] - S[d] - E[d] - I[d];
W[d] += (dw - dt)/sigmaSE; // standardized i.i.d. white noise
C[d] += trans[4]; // true incidence
}
")
## ----initializer---------------------------------------------------------
measles_initializer <- Csnippet("
double *S = &S1;
double *E = &E1;
double *I = &I1;
double *R = &R1;
double *C = &C1;
double *W = &W1;
const double *S_0 = &S1_0;
const double *E_0 = &E1_0;
const double *I_0 = &I1_0;
const double *R_0 = &R1_0;
const double *pop = &pop1;
double m;
int d;
for (d = 0; d < D; d++) {
m = pop[d]/(S_0[d]+E_0[d]+I_0[d]+R_0[d]);
S[d] = nearbyint(m*S_0[d]);
E[d] = nearbyint(m*E_0[d]);
I[d] = nearbyint(m*I_0[d]);
R[d] = nearbyint(m*R_0[d]);
W[d] = 0;
C[d] = 0;
}
")
## ----he_mles-------------------------------------------------------------
read.csv(text="
town,loglik,loglik.sd,mu,delay,sigma,gamma,rho,R0,amplitude,alpha,iota,cohort,psi,S_0,E_0,I_0,R_0,sigmaSE
LONDON,-3804.9,0.16,0.02,4,28.9,30.4,0.488,56.8,0.554,0.976,2.9,0.557,0.116,0.0297,5.17e-05,5.14e-05,0.97,0.0878
BIRMINGHAM,-3239.3,1.55,0.02,4,45.6,32.9,0.544,43.4,0.428,1.01,0.343,0.331,0.178,0.0264,8.96e-05,0.000335,0.973,0.0611
LIVERPOOL,-3403.1,0.34,0.02,4,49.4,39.3,0.494,48.1,0.305,0.978,0.263,0.191,0.136,0.0286,0.000184,0.00124,0.97,0.0533
MANCHESTER,-3250.9,0.66,0.02,4,34.4,56.8,0.55,32.9,0.29,0.965,0.59,0.362,0.161,0.0489,2.41e-05,3.38e-05,0.951,0.0551
LEEDS,-2918.6,0.23,0.02,4,40.7,35.1,0.666,47.8,0.267,1,1.25,0.592,0.167,0.0262,6.04e-05,3e-05,0.974,0.0778
SHEFFIELD,-2810.7,0.21,0.02,4,54.3,62.2,0.649,33.1,0.313,1.02,0.853,0.225,0.175,0.0291,6.04e-05,8.86e-05,0.971,0.0428
BRISTOL,-2681.6,0.5,0.02,4,64.3,82.6,0.626,26.8,0.203,1.01,0.441,0.344,0.201,0.0358,9.62e-06,5.37e-06,0.964,0.0392
NOTTINGHAM,-2703.5,0.53,0.02,4,70.2,115,0.609,22.6,0.157,0.982,0.17,0.34,0.258,0.05,1.36e-05,1.41e-05,0.95,0.038
HULL,-2729.4,0.39,0.02,4,42.1,73.9,0.582,38.9,0.221,0.968,0.142,0.275,0.256,0.0371,1.2e-05,1.13e-05,0.963,0.0636
BRADFORD,-2586.6,0.68,0.02,4,45.6,129,0.599,32.1,0.236,0.991,0.244,0.297,0.19,0.0365,7.41e-06,4.59e-06,0.964,0.0451
",stringsAsFactors=FALSE) -> he10_mles
if(D>10) stop("Code only designed for D<=10")
test_params <- c(
unlist(he10_mles[1,he10_rp_names]),
D=D,
g=100,
he10_mles[1:D,"S_0"],
he10_mles[1:D,"E_0"],
he10_mles[1:D,"I_0"],
he10_mles[1:D,"R_0"]
)
names(test_params) <- param_names
## ----dmeasure------------------------------------------------------------
measles_dmeas <- Csnippet("
const double *C = &C1;
const double *cases = &cases1;
double m,v;
double tol = pow(1.0e-18,D);
int d;
lik = 0;
for (d = 0; d < D; d++) {
m = rho*C[d];
v = m*(1.0-rho+psi*psi*m);
if (cases[d] > 0.0) {
lik += log(pnorm(cases[d]+0.5,m,sqrt(v)+tol,1,0)-pnorm(cases[d]-0.5,m,sqrt(v)+tol,1,0)+tol);
} else {
lik += log(pnorm(cases[d]+0.5,m,sqrt(v)+tol,1,0)+tol);
}
}
if(!give_log) lik = exp(lik);
")
## ----rmeasure------------------------------------------------------------
measles_rmeas <- Csnippet("
const double *C = &C1;
double *cases = &cases1;
double m,v;
double tol = pow(1.0e-18,D);
int d;
for (d = 0; d < D; d++) {
m = rho*C[d];
v = m*(1.0-rho+psi*psi*m);
cases[d] = rnorm(m,sqrt(v)+tol);
if (cases[d] > 0.0) {
cases[d] = nearbyint(cases[d]);
} else {
cases[d] = 0.0;
}
}
")
measles <- spatPomp(measles_long,
units = "unit",
times = "year",
t0 = min(measles_long$year)-1/26,
unit_statenames = c('S','E','I','R','C','W'),
global_statenames = c('P'),
covar = measles_covar,
tcovar = "year",
rprocess=euler.sim(rproc, delta.t=2/365),
zeronames = c(paste0("C",1:D),paste0("W",1:D)),
paramnames=param_names,globals=v_by_g_C,
initializer=measles_initializer,
dmeasure=measles_dmeas,
rmeasure=measles_rmeas)
## ----sim_test------------------------------------------------------------
set.seed(8375621)
sim <- simulate(measles,params=test_params)
## ----sim_plot,fig.width=9,fig.height=8,eval=T----------------------------
sim2 <- as.data.frame(sim)
subset(sim2,select=!grepl("^W",colnames(sim2))) %>% melt(id.vars="time") -> sim3
ggplot(sim3, aes(x=time,y=value))+
geom_line()+
facet_wrap(~variable,ncol=D)+theme(text=element_text(size=10))+
scale_y_continuous(breaks=c(0,100,10000,1e6),trans=scales::log1p_trans())
## ----vec_dmeasure--------------------------------------------------------
vec_dmeas <- function(y, x, t, params, log = FALSE, ...){
lik = numeric(length = D)
for(i in 1:D){
m = params["rho"]*x[paste("C", i, sep = "")]
v = m*(1.0 - params["rho"] + params["psi"]*params["psi"]*m)
tol = (1e-18)^D
if(y[obs_names[i]]>0.0){
lik[i] = log(pnorm(y[obs_names[i]] + 0.5, mean = m, sd = sqrt(v) + tol) - pnorm(y[obs_names[i]]-0.5, mean = m, sd = sqrt(v) + tol)+tol)
}
else{
lik[i] = log(pnorm(y[obs_names[i]] + 0.5, mean = m, sd = sqrt(v) + tol) + tol)
}
}
if(!log) return(exp(lik))
else return(lik)
}
## ----dunit_measure-------------------------------------------------------
unit_dmeas <- Csnippet("
double m = rho*C;
double v = m*(1.0-rho+psi*psi*m);
double tol = 1.0e-18;
if (cases > 0.0) {
lik = pnorm(cases+0.5,m,sqrt(v)+tol,1,0)-pnorm(cases-0.5,m,sqrt(v)+tol,1,0)+tol;
} else {
lik = pnorm(cases+0.5,m,sqrt(v)+tol,1,0)+tol;
}
")
measles <- spatPomp(measles_long,
units = "unit",
times = "year",
t0 = min(measles_long$year)-1/26,
unit_statenames = c('S','E','I','R','C','W'),
global_statenames = c('P'),
covar = measles_covar,
tcovar = "year",
rprocess=euler.sim(rproc, delta.t=2/365),
zeronames = c(paste0("C",1:D),paste0("W",1:D)),
paramnames=param_names,globals=v_by_g_C,
initializer=measles_initializer,
dmeasure=measles_dmeas,
dunit_measure=unit_dmeas,
rmeasure=measles_rmeas)
## ----naive_pfilter3, eval = T--------------------------------------------
pfilter3(measles, params = test_params, Np=1000, tol = (1e-17)^3) -> pf1
## ----naive_pfilter, eval = T---------------------------------------------
pfilter(measles, params = test_params, Np=1000, tol = (1e-17)^3) -> pf2
dev.off()
|
67d3d21b54c5731b862bf4fe26743d0e7b49c1e1 | e66f339cd2b6277f104c228470c97987148b1915 | /CASHW/include.R | 1a5b0d313ec83f0ac07df01e0e223fd6eeb7b7dc | [
"MIT"
] | permissive | scoyote/ViyaRace | cec12d6c87046138bed31305a0ae92cc349cd871 | 503c76e9904780c99961e0051a74c679047468c2 | refs/heads/master | 2021-01-18T17:32:54.044049 | 2017-06-09T21:43:51 | 2017-06-09T21:43:51 | 70,178,492 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 801 | r | include.R | library(swat)
library(tidyr)
s <- CAS('localhost', 5570, authfile='~./authinfo')
#files <- reactiveValues()
#list_files <- function(sessionID,caslibID){
# files <- tryCatch({
# return(unnest(data.frame(cas.table.fileInfo(sessionID,caslib=caslibID))))
# },
# error = function(err){return(NULL)}
# )
# return(files[,4])
#}
tables <- reactiveValues()
list_tables <- function(sessionID,caslibID) {
tables <- tryCatch({
tb <- unnest(data.frame(cas.table.tableInfo(sessionID,caslib=caslibID)))
names(tb) <- sub("TableInfo.", "", names(tb))
return(tb)
},
error = function(err){ return(NULL)}
)
print(tables)
return(tables)
}
caslibs <- reactiveValues()
list_caslibs <- function(sessionID) {
caslibs <- data.frame(cas.table.caslibInfo(sessionID))
return(caslibs[,1])
}
|
d169eaf953627e2de55464fdc31b154c987042ba | 888eb6041144ac34c7ed0d17684f856a4e3b95fd | /tests/testthat/test-compare-designs.R | 57a250357cc2e77cc0919a6962bcb3be12302979 | [] | no_license | reuning/DeclareDesign | c5ae645ae7e661469ff4c9a54f252c69619b2e51 | b089b97397c6d95f334c129fdc0fb8fccb00d4d6 | refs/heads/master | 2023-08-19T19:30:45.244887 | 2021-10-17T13:11:15 | 2021-10-17T13:11:15 | 417,950,314 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,652 | r | test-compare-designs.R | context("Compare Designs")
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <-
declare_potential_outcomes(Y_Z_0 = noise, Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_assignment <- declare_assignment(Z = complete_ra(N, m = 25))
pate <- declare_inquiry(pate = mean(Y_Z_1 - Y_Z_0))
sate <- declare_inquiry(sate = mean(Y_Z_1 - Y_Z_0))
pate_estimator <- declare_estimator(Y ~ Z, inquiry = pate)
sate_estimator <- declare_estimator(Y ~ Z, inquiry = sate)
reveal <- declare_reveal()
my_design_1 <- my_population +
my_potential_outcomes +
pate +
my_assignment +
reveal +
pate_estimator
my_design_2 <- my_population +
my_potential_outcomes +
sate +
my_assignment +
reveal +
sate_estimator
test_that("compare_designs works", {
diagnosis_1 <- diagnose_design(my_design_1, sims = 2, bootstrap_sims = FALSE)
diagnosis_2 <- diagnose_design(my_design_2, sims = 2, bootstrap_sims = FALSE)
# designs not in list, no names, names are imputed
comparison <- diagnose_design(my_design_1, my_design_2, sims = 2, bootstrap_sims = FALSE)
expect_equal(as.character(comparison$diagnosands$design), c("my_design_1", "my_design_2"))
# designs in list, no names, names are imputed
comparison <- diagnose_design(list(my_design_1, my_design_2), sims = 2, bootstrap_sims = FALSE)
expect_equal(as.character(comparison$diagnosands$design), c("design_1", "design_2"))
# designs not in list, all names, names used
comparison <- diagnose_design(d1 = my_design_1, d2 = my_design_2, sims = 2, bootstrap_sims = FALSE)
expect_equal(as.character(comparison$diagnosands$design), c("d1", "d2"))
# designs in list, all names, names used
comparison <- diagnose_design(list(d1 = my_design_1, d2 = my_design_2), sims = 2, bootstrap_sims = FALSE)
expect_equal(as.character(comparison$diagnosands$design), c("d1", "d2"))
# designs not in list, some names, available names used
comparison <- diagnose_design(my_design_1, a_design_2 = my_design_2, sims = 2, bootstrap_sims = FALSE)
expect_true(all(as.character(comparison$diagnosands$design) %in% c("my_design_1", "a_design_2")))
# designs not in list, duplicated names used, error
expect_error(comparison <- diagnose_design(d1 = my_design_1, d1 = my_design_2, sims = 2, bootstrap_sims = FALSE))
# designs in list, duplicated names used, error
expect_error(comparison <- diagnose_design(list(d1 = my_design_1, d1 = my_design_2), sims = 2, bootstrap_sims = FALSE))
})
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <-
declare_potential_outcomes(Y_Z_0 = noise, Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_assignment <- declare_assignment(Z = complete_ra(N, m = 25))
pate <- declare_inquiry(pate = mean(Y_Z_1 - Y_Z_0))
sate <- declare_inquiry(sate = mean(Y_Z_1 - Y_Z_0))
pate_estimator <- declare_estimator(Y ~ Z, inquiry = pate)
sate_estimator <- declare_estimator(Y ~ Z, inquiry = sate)
reveal <- declare_reveal()
my_special_step <- declare_inquiry(ATE = 5)
my_design_3 <- my_population +
my_potential_outcomes +
pate +
my_special_step +
my_assignment +
reveal +
pate_estimator
my_design_4 <- my_population +
my_potential_outcomes +
sate +
my_assignment +
reveal +
sate_estimator
test_that("compare works", {
a <- compare_design_code(my_design_3, my_design_4)
b <- compare_design_summaries(my_design_3, my_design_4)
c <- compare_design_data(my_design_3, my_design_4)
d <- compare_design_inquiries(my_design_3, my_design_4)
e <- compare_design_estimates(my_design_3, my_design_4)
f <- compare_designs(my_design_3, my_design_4)
})
|
2140fdc7f14737eb6c27e73e36cccb9e8bfbaeb8 | 558246c24bf0a611246c7343af3f391ff2485d9c | /man/proffer-package.Rd | 3ba8b1aea2ac951f41f6d407bb69dd3ec0be3aa3 | [
"MIT"
] | permissive | krlmlr/proffer | ae7eecbd53d8e3d7b586e4bfd88a199b533e7002 | 97bb0c81a45a498f4b7ab42f616ccc8c9863d1ba | refs/heads/master | 2020-12-01T16:28:26.788070 | 2019-12-27T02:30:46 | 2019-12-27T02:30:46 | 230,698,058 | 0 | 0 | NOASSERTION | 2019-12-29T03:33:47 | 2019-12-29T03:33:46 | null | UTF-8 | R | false | true | 1,034 | rd | proffer-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{proffer-package}
\alias{proffer-package}
\alias{proffer}
\title{proffer: profile R code with pprof}
\description{
It can be challenging to find sources of
slowness in large workflows, and the proffer package can help.
Proffer runs R code and displays summaries
to show where the code is slowest. Proffer leverages
the pprof utility to create highly efficient, clear, easy-to-read
interactive displays that help users find ways to reduce runtime.
The package also contains helpers to convert profiling data
to and from pprof format and visualize existing profiling data files.
For documentation, visit \url{https://r-prof.github.io/proffer}.
}
\examples{
# TBD
\dontrun{
# Start a pprof virtual server in the background.
px <- pprof(replicate(1e2, sample.int(1e4)))
# Terminate the server.
px$kill()
}
}
\references{
\url{https://github.com/r-prof/proffer}
}
\author{
William Michael Landau \email{will.landau@gmail.com}
}
|
12266fd996c9523cca4778ee9057d0fcd1a722a1 | f92da6d51a6630f87c980b081fedf85910029c18 | /testdata/list.v2.rd | b1778692a76138eab88ce254b4e96c6a21d337ab | [] | no_license | hydrocul/roonda | 9d643edf89bbc3ab29dbbbf70ced484d18757882 | 44ae7e9179ffbd92993a1e26735b2032a26b6140 | refs/heads/master | 2021-01-13T02:06:11.249588 | 2014-03-05T16:04:45 | 2014-03-05T16:04:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 367 | rd | list.v2.rd | sh v2
(println "perl")
(perl
(assign lst (list 1 2 3 "abc"))
(dump lst))
(println "ruby")
(ruby
(assign lst (list 1 2 3 "abc"))
(dump lst))
(println "python2")
(python2
(assign lst (list 1 2 3 "abc"))
(dump lst))
(println "python3")
(python3
(assign lst (list 1 2 3 "abc"))
(dump lst))
(println "php")
(php
(assign lst (list 1 2 3 "abc"))
(dump lst))
|
c17973aef54fa39edd53815b409f2ee9001299c7 | 3c8428519e5fe135d3fb9a4da1db37c4557e2251 | /WetlandModel/modelManyRuns.R | a7d5982638458a16d7d838ed1c072f8967863e7b | [] | no_license | ecospatial/NAS_2016 | 5e8c4e7c6d6fe0b107f4078d0f9922257a256398 | f0982d3af944c2da9f165687e9539f86dd0255ae | refs/heads/master | 2022-11-23T16:42:43.966845 | 2020-07-30T21:54:37 | 2020-07-30T21:54:37 | 109,192,347 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,812 | r | modelManyRuns.R | setwd("WetlandModel/")
library(rjags)
library(sp)
library(magrittr)
library(raster)
library(rgdal)
library(rgeos)
library(RPostgreSQL)
library(postGIStools)
source("../RUtilityFunctions/createModels.R")
source("../RUtilityFunctions/codaSamplesDIC.R")
source("loadTHK99.R")
loadTHK99data(local=T, regions="ALL")
runModel = function(modParams, getData = F)
{
# CONFIG ------------------------------------------------------------------
vegIdx = modParams$vegIdx
params = c("RSLR","WH","TR","CS", vegIdx)
response = modParams$response
regions = modParams$regions
barrierIslands = F #Include barrier islands
randomIntercept = modParams$intercept
properRandomEffects = T
phoneNotifications = !getData
if (phoneNotifications)
{
library(RPushbullet)
pbPost("note", "NAS_2016", sprintf("Starting data prep for a %s region model with parameters %s, response %s, and %srandom intercept.", regions, paste(params,collapse=","), response, if (!randomIntercept) "no" else ""))
}
# # Database Connection and Loading -----------------------------------------
# source("../../config/postgresqlcfg.R")
# if(exists("user") || exists("pw")) {
# con <- dbConnect(PostgreSQL(), dbname = db, user = user,
# host = host, port = port,
# password = pw)
# rm(pw);rm(user)
# }
#
# huc2 = get_postgis_query(con, "SELECT * FROM huc2 WHERE huc2.HUC2 IN ('12','13','08','03')", geom_name = "geom")
#
# dbDisconnect(con)
# Load Local Data (TODO: Make DB) -----------------------------------------
if (regions == "2" | regions == "3")
{
HUClevel = "HUC2"
} else if (regions == "ALL") {
HUClevel = "HUC4"
} else {
if (phoneNotifications){pbPost("note", "NAS_2016", "MODEL RUN ERROR: Unsupported number of regions.")}
stop("UNSUPPORTED NUMBER OF REGIONS: Use either ALL (HUC4), 2 or 3 (HUC2).")
}
HUCfilename = gsub("HUC(\\d*)", "WBDHU\\1", HUClevel)
HUC = readOGR(sprintf("C:/DATA/HUC/HUC_shapes/%s.shp", HUCfilename), HUCfilename)
thk99buff = readOGR("C:/DATA/EarthEngine/T1/thk99buff.shp", "thk99buff")
# # Visualize removing wetland changes of 0
# plot(thk99buff, col=NA, border=NA)
# plot(huc4, add=T)
# plot(thk99buff, add=T, col="green", border=NA)
# plot(thk99buff[thk99buff$WET > 0,], add=T, col="red", border=NA)
# Remove buffers without wetland change
thk99buff = thk99buff[thk99buff$WET > 0,]
# Remove barrier islands if chosen
if (!barrierIslands)
{
shoreline = readOGR("C:/Users/GCRLWuHardy/Documents/General Maps/Coastlines/USCoast_h_L1.shp", "USCoast_h_L1")
shoreline = spTransform(shoreline, proj4string(thk99buff))
shoreline = crop(shoreline, thk99buff)
thk99buff = thk99buff[!is.na(over(thk99buff, geometry(shoreline))),]
}
# Extract HUC and region to each buffer
HUC = spTransform(HUC, proj4string(thk99buff))
hucZone = over(thk99buff,HUC[,HUClevel])
thk99buff[[HUClevel]] = factor(hucZone[[HUClevel]])
if (HUClevel == "HUC4") {
thk99buff$region = as.numeric(thk99buff[[HUClevel]])
} else if (HUClevel == "HUC2") {
if (regions == 2)
{
thk99buff$region = sapply(thk99buff$HUC2, function(x){
if (x == "03" | x == "12" | x == "13")
return(1)
else
return(2)
})
} else if (regions == 3) {
thk99buff$region = sapply(thk99buff$HUC2, function(x){
if (x == "12" | x == "13") # West Gulf
return(1)
else if (x == "08") # LA
return(2)
else
return(3) # East Gulf (03)
})
}
}
#Visualize regions
colF = function(x){
rainbow(length(unique(thk99buff[[HUClevel]])))[x]
}
plot(thk99buff, col=NA, border=NA)
plot(HUC[HUC[[HUClevel]] %in% unique(thk99buff[[HUClevel]]),], add=T)
plot(thk99buff, add=T, col=sapply(thk99buff$region, colF), border=NA)
#plot(thk99buff[thk99buff@data$ORIG_FID == 1845,], add=T, col="white", border="black", lwd=3)
# Normalize Data ----------------------------------------------------------
thk99buff_n = data.frame(sapply(thk99buff@data[c(params)], function(x){scale(x)}))
thk99buff_n = cbind(thk99buff_n, region=thk99buff$region)
thk99buff_n = cbind(thk99buff_n, logWET=thk99buff$logWET)
thk99buff_n = cbind(thk99buff_n, logPCT=thk99buff$logPCT)
thk99buff_n = cbind(thk99buff_n, WET=thk99buff$WET)
thk99buff_n = cbind(thk99buff_n, PCT=thk99buff$PCT)
tryCatch({
is.null(thk99buff_n[response])
}, error= function(e){
if (phoneNotifications){pbPost("note", "NAS_2016", "MODEL RUN ERROR: Response not included in data.")}
stop("RESPONSE NOT INCLUDED IN DATA, SEE 'Normalize Data' SECTION IN CODE")
})
# Arrange Data for JAGS ---------------------------------------------------
regions = length(unique(thk99buff_n$region))
data = append(list(Nobs=nrow(thk99buff_n), Nregion=regions), thk99buff_n)
if (getData)
{
return(data)
}
# Create Models -----------------------------------------------------------
folderName = sprintf("%s-%sR-%s", response, regions, vegIdx)
if (barrierIslands)
{
folderName = paste0(folderName, "-BaIs")
}
if (randomIntercept)
{
folderName = paste0(folderName, "-rB0")
}
if (!properRandomEffects)
{
folderName = paste0("FRE)", folderName)
}
models = createModels(response, params, randomIntercept, folderName, properRandomEffects = properRandomEffects)
# Run Each Model in JAGS --------------------------------------------------
if (!dir.exists("Results"))
{
dir.create("Results")
}
resultsDir = sprintf("Results/%s", folderName)
if (!dir.exists(resultsDir))
{
dir.create(resultsDir)
}
write.table("modelNo\tfixed\trandom\tDIC", sprintf("%s/DIC_%s.txt", resultsDir, folderName), row.names=F, quote=F, sep="\t")
modelFiles = list.files(paste0("Models/", folderName), pattern="^\\d*.txt")
if (phoneNotifications){pbPost("note", "NAS_2016", sprintf("Started running a %s region model with parameters %s, response %s, and %srandom intercept.", regions, paste(params,collapse=","), response, if (!randomIntercept) "no " else ""))}
Sys.time()
for(modelFile in modelFiles)
{
i = as.numeric(gsub("(\\d*)\\.txt", "\\1", modelFile))
if (file.exists(sprintf("%s/%s.RData", resultsDir, i)))
{
print(sprintf("Skipping model %s; it already has been ran", i))
next()
}
model = jags.model(sprintf("Models/%s/%s.txt", folderName, i),
data = data,
n.chains=3,
n.adapt=2000)
output = coda.samples.dic(model = model,
variable.names=c("b0", paste0("b", params)),
n.iter=20000,
thin=1)
fixed = paste(na.omit(models[i,1:length(params)]),collapse=",")
random = paste(na.omit(models[i,(length(params)+1):(length(params)*2)]),collapse=",")
write(sprintf("%s\t%s\t%s\t%s", i, fixed, random, output$dic$deviance + output$dic$penalty),
file = sprintf("%s/DIC_%s.txt", resultsDir, folderName),
append = T)
save(output,file=sprintf("%s/%s.RData", resultsDir, i))
}
Sys.time()
if (phoneNotifications){pbPost("note", "NAS_2016", "MODEL RUN COMPLETE!!!")}
}
#response, regions, vegidx, randomIntercept
responses = c("logWET", "logPCT")
vegIdxs = c("NDMI")
regionses = c("ALL", "3")
intercepts = c(T, F)
l = list(response=responses, regions=regionses, vegIdx=vegIdxs, intercept=intercepts)
combos = as.data.frame(do.call(expand.grid, l))
combos$response = as.character(combos$response)
combos$vegIdx = as.character(combos$vegIdx)
combos$region = as.character(combos$region)
for(i in 1:nrow(combos))
{
runModel(c(combos[i,]))
}
|
c40bfb55a132a1d96ecb12e83cb38864d4bb8141 | ef1d6fa0df37fa552c4c4625e6e9cb974e8482f0 | /man/stab.fs.Rd | a0859e52286f900022b91bc136a84ba9e2bdf85f | [] | no_license | bhklab/genefu | 301dd37ef91867de8a759982eb9046d3057723af | 08aec9994d5ccb46383bedff0cbfde04267d9c9a | refs/heads/master | 2022-11-28T09:22:02.713737 | 2022-05-30T15:35:53 | 2022-05-30T15:35:53 | 1,321,876 | 17 | 15 | null | 2022-11-07T11:52:05 | 2011-02-02T21:06:25 | R | UTF-8 | R | false | true | 2,658 | rd | stab.fs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stab.fs.R
\name{stab.fs}
\alias{stab.fs}
\title{Function to quantify stability of feature selection}
\usage{
stab.fs(fsets, N, method = c("kuncheva", "davis"), ...)
}
\arguments{
\item{fsets}{list of sets of selected features, each set of selected
features may have different size.}
\item{N}{total number of features on which feature selection is performed.}
\item{method}{stability index (see details section).}
\item{...}{additional parameters passed to stability index (penalty
that is a numeric for Davis' stability index, see details section).}
}
\value{
A numeric that is the stability index.
}
\description{
This function computes several indexes to quantify feature selection
stability. This is usually estimated through perturbation of the original
dataset by generating multiple sets of selected features.
}
\details{
Stability indices may use different parameters. In this version only the
Davis index requires an additional parameter that is penalty, a numeric
value used as penalty term.
Kuncheva index (kuncheva) lays in [-1, 1], An index of -1 means no
intersection between sets of selected features, +1 means that all the
same features are always selected and 0 is the expected stability of a
random feature selection.
Davis index (davis) lays in [0,1], With a penalty term equal to 0, an
index of 0 means no intersection between sets of selected features
and +1 means that all the same features are always selected. A penalty
of 1 is usually used so that a feature selection performed with no or
all features has a Davis stability index equals to 0. None estimate of
the expected Davis stability index of a random feature selection was
published.
}
\examples{
set.seed(54321)
# 100 random selection of 50 features from a set of 10,000 features
fsets <- lapply(as.list(1:100), function(x, size=50, N=10000) {
return(sample(1:N, size, replace=FALSE))} )
names(fsets) <- paste("fsel", 1:length(fsets), sep=".")
# Kuncheva index
stab.fs(fsets=fsets, N=10000, method="kuncheva")
# close to 0 as expected for a random feature selection
# Davis index
stab.fs(fsets=fsets, N=10000, method="davis", penalty=1)
}
\references{
Davis CA, Gerick F, Hintermair V, Friedel CC, Fundel K, Kuffner R, Zimmer R
(2006) "Reliable gene signatures for microarray classification: assessment
of stability and performance", Bioinformatics, 22(19):356-2363.
Kuncheva LI (2007) "A stability index for feature selection", AIAP'07:
Proceedings of the 25th conference on Proceedings of the 25th IASTED
International Multi-Conference, pages 390-395.
}
\seealso{
\link{stab.fs.ranking}
}
|
9c235c0d7f4fd33a0f7231d788d410cbbc75a950 | bced527f03bc21c12e98633b75554aecb592191c | /Reduce_AcousticStationsData.R | 67d8ebb86641887cefc8c3f9fd51532ddd007acc | [] | no_license | mezo-research/tagga | ea5af2914397f2c8fc15f64fc10de9ed54855615 | b4d661f13024df0f2dfe5bdf73ef620fdb26469c | refs/heads/master | 2021-01-16T23:05:42.150126 | 2016-10-12T05:23:10 | 2016-10-12T05:23:10 | 69,429,968 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,028 | r | Reduce_AcousticStationsData.R | ## Date: 4 October 2016
## Creator: Christina Smith
## Purpose: Reduce data in AcousticStations_reduced_2hr.csv to contain the min
## data required for the mapping and time series plots and save in
## AcousticStations_maptime.csv
##
## NOTES: Date is not read in as an R date
library(dplyr)
## Read in reduced Acoustic Station Data.
station_data <- read.csv("AcousticStations_reduced_2hr.csv")
## transform data: add extra column with unit count, group by date, life stage
## and station (keeping station region and coordinates), and finally add
## all fish of each life stage recorded at each station on a particular day
min_data <- station_data %>%
mutate(number = 1) %>%
mutate(Date = as.Date(Date, "%d/%m/%Y")) %>%
group_by(Date, Life_stage, StationID, Station_Region, Latitude, Longitude) %>%
summarise(Number_fish = sum(number))
## Write data to comma separated file
write.table(min_data, file = "AcousticStations_maptime.csv", sep = ",",
row.names = FALSE) |
5e4bb23eb8a030bb63c67ff7da7a04bd4015651c | 85a3a70cdf78f11d180282ec540b5d6572db1f21 | /Analysis/6b_modelling.R | aa40d4d1faf4fa01ffa1cb2b4c421d42d4a8cae6 | [] | no_license | warren-james/Motivation_and_Decisions | 8d5fa8c8890da885fbd36e90a20a6e7e4ffdd010 | aedba9ed7bd1cbc3ca25de0e27ac71b9864af1e8 | refs/heads/master | 2020-05-17T09:45:49.261037 | 2020-02-26T16:49:31 | 2020-02-26T16:49:31 | 183,638,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,340 | r | 6b_modelling.R | #### modelling penguin ####
# This script is to work on modelling the data
# from the penguin version of the task with other
# control versions (using instructed and practice from
# the transfer paper)
#### Library ####
library(brms)
library(rethinking)
library(rstan)
library(tidybayes)
library(tidyverse)
#### constants ####
Screen_dist <- 60
x_res <- 1920
x_width <- 54
ppcm <- x_res/x_width
# NB: setting seed to make results reproducible
set.seed(12345)
#### Functions ####
# get visual degrees
get_VisDegs <- function(size,distance){
((2*atan2(size,(2*distance)))*180)/pi
}
#### load in data ####
load("scratch/model_data")
#### STAN: Beta ####
#### STAN: Accuracy ~ group ####
# replicating the BRMS version essentially
model_data <- df_all %>%
group_by(participant, group) %>%
summarise(accuracy = mean(correct)) %>%
ungroup()
m_matrix <- model.matrix(accuracy ~ group, data = model_data)
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data$accuracy,
X = m_matrix
)
m_stan_group <- stan(
file = "modelling/models/stan_model_np.stan",
data = stan_df,
chains = 1,
warmup = 2000,
iter = 4000,
refresh = 100
)
# save above
save(model_data, file = "modelling/model_data/betaacc_1")
save(m_stan_group, file = "modelling/model_outputs/m_stan_group_beta_acc")
# same again with normalising priors
m_stan_group_p <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 2000,
iter = 4000,
refresh = 100
)
# save
save(m_stan_group_p, file = "modelling/model_outputs/m_stan_group_beta_acc_p")
# same again with skewed priors
m_stan_group_pdata <- stan(
file = "modelling/models/stan_model_pfdata2.stan",
data = stan_df,
chains = 1,
warmup = 2000,
iter = 4000,
refresh = 100
)
# save above
save(m_stan_group_pdata, file = "modelling/model_outputs/m_stan_group_beta_acc_pdata")
#### STAN: Predicted Accuracy ####
# same as above but now on expected accuracy
model_data <- df_all %>%
group_by(participant, group) %>%
summarise(pred_accuracy = mean(accuracy)) %>%
ungroup()
m_matrix <- model.matrix(pred_accuracy ~ group, data = model_data)
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data$pred_accuracy,
X = m_matrix
)
m_stan_group_exp <- stan(
file = "modelling/models/stan_model_np.stan",
data = stan_df,
chains = 1,
warmup = 2000,
iter = 4000,
refresh = 100
)
save(model_data, file = "modelling/model_data/beta_exp_np")
save(m_stan_group_exp, file = "modelling/model_outputs/m_stan_group_beta_exp_np")
# same again with new (skewed) priors
m_stan_group_exp_pdata <- stan(
file = "modelling/models/stan_model_pfdata2.stan",
data = stan_df,
chains = 1,
warmup = 2000,
iter = 4000,
refresh = 100
)
save(m_stan_group_exp_pdata, file = "modelling/model_outputs/m_stan_group_beta_exp_pdata")
#### STAN: acc ~ group * acc_type ####
model_data <- df_all %>%
group_by(participant, group) %>%
summarise(Raw = mean(correct),
Predicted = mean(accuracy)) %>%
gather(c(Raw, Predicted),
key = "acc_type",
value = "accuracy") %>%
ungroup()
# model_matrix
m_matrix <- model.matrix(accuracy ~ (group + acc_type)^2,
data = model_data)
# stan_df
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data$accuracy,
X = m_matrix
)
m_stan_both <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
# save
save(model_data, file = "modelling/model_data/beta_3")
save(m_stan_both, file = "modelling/model_outputs/m_stan_both")
#### STAN: try bernoulli? ####
# real model
model_data <- df_all %>%
select(participant, group, correct)
m_matrix <- model.matrix(correct ~ group, data = model_data)
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data$correct,
X = m_matrix
)
# WIP, takes far too long, not sure why
m_stan_berno <- stan(
file = "modelling/models/stan_berno.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
save(model_data, file = "modelling/model_data/berno_1")
save(m_stan_berno, file = "modelling/model_outputs/m_stan_berno_1")
#### STAN: add in dist_type ####
#### STAN: Actual Accuracy ####
model_data <- df_all %>%
group_by(participant, dist_type, group) %>%
summarise(Accuracy = mean(correct)) %>%
mutate(Accuracy = (Accuracy + 1e-5)*0.9999)
m_matrix <- model.matrix(Accuracy ~ (group + dist_type)^2, data = model_data)
model_data_new <- model_data %>%
rownames_to_column(var = "row_num")
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data$Accuracy,
X = m_matrix
)
m_stan_group_dist <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
#### STAN: Expected Accuracy and dist_type ####
model_data_new <- df_all %>%
group_by(participant, group, dist_type) %>%
summarise(pred_accuracy = mean(accuracy)) %>%
ungroup() %>%
rownames_to_column(var = "row_num")
m_matrix <- model.matrix(pred_accuracy ~ (group + dist_type)^2, data = model_data_new)
stan_df <- list(
N = nrow(model_data),
K = ncol(m_matrix),
y = model_data_new$pred_accuracy,
X = m_matrix
)
m_stan_group_dist_exp <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
#### STAN: Add in Delta ####
# setup the data: scale separation
model_data_scaled <- df_all %>%
mutate(scaled_sep = separation/max(separation)) %>%
group_by(participant, scaled_sep, separation, group) %>%
summarise(act_acc = (mean(correct)+ 1e-5)*0.9999,
exp_acc = mean(accuracy)) %>%
ungroup()
#### STAN: Actual Acc ~ (distance + group)^2 ####
m_matrix <- model.matrix(act_acc ~ (group + scaled_sep)^2, data = model_data_scaled)
stan_df <- list(
N = nrow(model_data_scaled),
K = ncol(m_matrix),
y = model_data_scaled$act_acc,
X = m_matrix
)
m_stan_group_scaled_acc <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
# save data amd model
save(model_data_scaled, file = "modelling/model_data/model_data_scaled")
save(m_stan_group_scaled_acc, file = "modelling/model_outputs/m_stan_group_scaled_acc")
# same again with priors based on data
m_stan_group_scaled_acc <- stan(
file = "modelling/models/stan_model_pfdata.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
# save data amd model
save(model_data_scaled, file = "modelling/model_data/model_data_scaled")
save(m_stan_group_scaled_acc, file = "modelling/model_outputs/m_stan_group_scaled_acc")
#### STAN: Exp acc ~ (distance + group)^2 ####
m_matrix <- model.matrix(exp_acc ~ (group + scaled_sep)^2, data = model_data_scaled)
stan_df <- list(
N = nrow(model_data_scaled),
K = ncol(m_matrix),
y = model_data_scaled$exp_acc,
X = m_matrix
)
m_stan_group_scaled_exp <- stan(
file = "modelling/models/stan_model.stan",
data = stan_df,
chains = 1,
warmup = 1000,
iter = 2000,
refresh = 100
)
# save model
save(m_stan_group_scaled_acc, file = "modelling/model_outputs/m_stan_group_scaled_exp")
|
e29ad72b1ef122d021e4f3a11818019bc3ed3c94 | 2e3d6b8b717ed1bd34bf97f96eb2784e2ab8e8be | /src/preprocessing_gls2000_yt.R | 575f455bb4b7734e189ea6a3a2fb9f02dea198c4 | [
"MIT"
] | permissive | environmentalinformatics-marburg/orthoptera_prediction | 54f9553ce359c9d5e0f6d14f197fb91f7327a509 | 856553c048b07c21761ef3ad84ab451b74de5b5e | refs/heads/master | 2020-12-24T07:06:14.958051 | 2018-06-14T11:54:38 | 2018-06-14T11:54:38 | 37,680,583 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,476 | r | preprocessing_gls2000_yt.R | # Preprocess dataset by combining field and satellite observations
if(Sys.info()["sysname"] == "Windows"){
source("D:/orthoptera/orthoptera_prediction/src/00_set_environment.R")
} else {
source("/media/tnauss/myWork/analysis/orthoptera/orthoptera_prediction/src/00_set_environment.R")
}
# Prepare GLS2000 dataset ------------------------------------------------------
gls <- stack(paste0(filepath_landsat, "gls2000.tif"))
# mapview(gls) + obsv_shp_arc
ndvi <- (gls[[4]] - gls[[3]]) / (gls[[4]] + gls[[3]])
# Prepare orthoptera observations ----------------------------------------------
obsv <- read_excel(paste0(filepath_obsv, "Grasshopper-Data.xlsx"))
obsv <- as.data.frame(obsv)
obsv$date_observation <- format(as.Date(obsv$date, "%d/%m/%Y"), "%Y-%j")
obsv_shp_wgs <- obsv
coordinates(obsv_shp_wgs) <- ~coordW+coordN
projection(obsv_shp_wgs) <- CRS("+init=epsg:32737")
obsv_shp_arc <- obsv
coordinates(obsv_shp_arc) <- ~coordW+coordN
projection(obsv_shp_arc) <- CRS("+init=epsg:21037")
obsv_shp_wgs <- spTransform(obsv_shp_wgs, crs(ndvi))
obsv_shp_arc <- spTransform(obsv_shp_arc, crs(ndvi))
# mapview(obsv_shp_wgs)+obsv_shp_arc
# Extract GLS2000 data ---------------------------------------------------------
ndvi_obs <- lapply(c(obsv_shp_wgs, obsv_shp_arc), function(obsv_shp){
ndvi_plots <- extract(ndvi, obsv_shp, sp = TRUE)
colnames(ndvi_plots@data)[ncol(ndvi_plots@data)] <- "NDVI"
ndvi_plots_buffer <- extract(ndvi, obsv_shp, buffer = 60.0)
ndvi_plots_buffer_stat <- lapply(seq(length(ndvi_plots_buffer)), function(i){
data.frame(ID = obsv_shp@data[i,"ID"],
NDVI_mean = mean(ndvi_plots_buffer[[i]]),
NDVI_median = median(ndvi_plots_buffer[[i]]),
NDVI_sd = sd(ndvi_plots_buffer[[i]]),
NDVI_min = min(ndvi_plots_buffer[[i]]),
NDVI_max = max(ndvi_plots_buffer[[i]]))
})
ndvi_plots_buffer_stat <- do.call("rbind", ndvi_plots_buffer_stat)
merge(ndvi_plots, ndvi_plots_buffer_stat)
})
colnames(ndvi_obs[[1]]@data)[28:33] <- paste0(colnames(ndvi_obs[[1]]@data)[28:33], "_WGS")
colnames(ndvi_obs[[2]]@data)[28:33] <- paste0(colnames(ndvi_obs[[2]]@data)[28:33], "_ARC")
ndvi_plots_final <- merge(ndvi_obs[[1]], ndvi_obs[[2]]@data)
head(ndvi_plots_final@data)
saveRDS(ndvi_plots_final,
file = paste0(filepath_results, "ndvi_plots_final.RDS"))
saveRDS(as.data.frame(ndvi_plots_final),
file = paste0(filepath_results, "ndvi_plots_final_df.RDS"))
|
e8e6e815beab1b240b6a09cc06c3a7609e3026e0 | b9577e6d62bcdfba4c142a2d4b7bd3c207face6d | /plot4.R | f307e818b937e8febe196634269d4757caffacfc | [] | no_license | simon-zhang36/ExData_Plotting1 | 36ad8eb3634dde1a1c20759c3f592dea7bb9b085 | 20794c4ed4cfba3c509c57ec366a35a74930df8e | refs/heads/master | 2021-01-16T20:29:43.808058 | 2015-03-05T11:23:19 | 2015-03-05T11:23:19 | 22,815,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,050 | r | plot4.R | unzip("./data/exdata-data-household_power_consumption.zip",files='./data',unzip = 'internal')
data <- read.delim('./household_power_consumption.txt',sep=";",na.strings='?')
data1 <- rbind(subset(data,data$Date == '1/2/2007'),subset(data,data$Date == '2/2/2007'))
data1 <- data.frame(data1,stringsAsFactors=FALSE)
data1$"Date/Time" <- paste(data1$Date,data1$Time)
data1$"Date/Time" <- strptime(data1$"Date/Time","%d/%m/%Y %H:%M:%S")
png(filename="plot4.png",width=480,height=480,units="px")
par(mfrow=c(2,2))
plot(data1$"Date/Time",data1$Global_active_power,type='l',main='',xlab='',ylab = 'Global Active Power')
plot(data1$"Date/Time",data1$Voltage,type='l',main='',xlab='datatime',ylab='Voltage')
{
plot(data1$"Date/Time",data1$Sub_metering_1,type='l',col='black',ylab='Energy sub metering',xlab='')
lines(data1$"Date/Time",data1$Sub_metering_2,type='l',col='red')
lines(data1$"Date/Time",data1$Sub_metering_3,type='l',col='blue')
}
plot(data1$"Date/Time",data1$Global_reactive_power,type='l',xlab='datatime',ylab="Global_reactive_power")
dev.off() |
4de34263586f3a0421208a86036512b828dad65e | 9d764bdee702291a1d522dd9426cb3dccdfa4023 | /R/deprecated.R | 5b82e895f6add7e043abd50a684021dd0216cfed | [] | no_license | cran/osmose | 8290c74decb5d8501af64c1683345e7c9bc27b85 | a31c8bafe5afe93a7d231c76b9f77b01f0e395ea | refs/heads/master | 2022-04-25T22:47:05.623763 | 2020-04-28T12:00:03 | 2020-04-28T12:00:03 | 113,894,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,088 | r | deprecated.R |
# to keep back compatibility for a while
#' @rdname read_osmose
#' @export
osmose2R = function(path = NULL, version = "v3r2", species.names = NULL, ...) {
.Deprecated("read_osmose")
read_osmose(path=path, version=version, species.names=species.names, ...)
}
#' @rdname get_var
#' @export
getVar = function(object, what, how, ...) {
.Deprecated("get_var")
get_var(object=object, what=what, how=how, ...)
}
#' @rdname run_osmose
#' @export
runOsmose = function(input, parameters=NULL, output="output", log="osmose.log",
version="4.1.0", osmose=NULL, java="java",
options=NULL, verbose=TRUE, clean=TRUE) {
.Deprecated("run_osmose")
run_osmose(input = input, parameters = parameters, output = output,
log = log, version = version, osmose = osmose, java = java,
options = options, verbose = verbose, clean = clean)
}
#' @rdname write_osmose
#' @export
write.osmose = function(x, file) {
.Deprecated("write_osmose")
write.table(x=x, file=file, sep=",", col.names=NA, quote=FALSE)
}
|
c8957710979c2e0ad7556581c21a985dee8c56c2 | 2d5496f7d71050c42555df50f6ee002104544e70 | /cachematrix.R | df0618ab64b373474e98c7bb41178e86fd1f1fcb | [] | no_license | joseroshan/ProgrammingAssignment2 | d9b6baa6e40f0d35491bcf5f56655ba0c832e6f6 | b0b36babb0c5bd354418c5e9281fd474c17d9c0b | refs/heads/master | 2021-01-15T09:56:42.374307 | 2014-11-22T03:06:06 | 2014-11-22T03:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 813 | r | cachematrix.R | # Description for makeCacheMatrix
#First we set the value of the matrix, then get the value of the matrix
#Then then we set the inverse and then get the value of the inverse
#inv variable contains the cache inverse matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
# This function evaluates if the inverse is already in cache,
#then it fetches it and returns the inverse , otherwise it computes the inverse and returns it
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
96e9412569d4454a550cecbe42fa7df185d549d6 | 0a3c8bb331b01471ea0d581741d7049a6f6bc380 | /plot3.R | 7acb45004cf22059729e54bb0696010493090e0e | [] | no_license | bbrewington/Coursera-EDA-Plotting1 | f85cdcf67ff1e7877c9bee9aac6469df5e5cdd56 | 858388e3dd11025d27f10cc82c74b5a5161d4248 | refs/heads/master | 2021-01-12T22:12:13.470089 | 2015-08-03T15:09:59 | 2015-08-03T15:09:59 | 33,826,903 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,633 | r | plot3.R | ## Coursera - Exploratory Data Analysis
## Course Project # 1
## Author: Brent Brewington, (github: bbrewington)
## Plot3.R
# Get data from file "household_power_consumption", and save to data frame "DF"
if(!("household_power_consumption.txt" %in% list.files())){
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
DF <- read.table(unz(temp, "household_power_consumption.txt"),
header=TRUE,sep=";",na.strings="?",stringsAsFactors=FALSE)
unlink(temp)
} else{
DF <- read.table("household_power_consumption.txt",
header=TRUE,sep=";",na.strings="?",stringsAsFactors=FALSE)
}
# Create new data frame "DF_subset", which only includes Feb 1, 2007 - Feb 2, 2007
DF_subset <- subset(DF, Date == "1/2/2007" | Date == "2/2/2007")
# Convert DF_subset$Date to POSIXct and save to new column called "DateTime"
DateTime <- as.POSIXct(paste(as.Date(DF_subset$Date,"%d/%m/%Y"), DF_subset$Time), format="%Y-%m-%d %H:%M:%S")
DF_subset <- cbind(DF_subset, DateTime = DateTime)
# Create line plot and save to png file "plot3.png" in the working directory
png(file="plot3.png", width=480, height=480)
with(DF_subset, plot(DateTime, Sub_metering_1, ylab="Energy sub metering", xlab="",type = "n"))
with(DF_subset, lines(DateTime, Sub_metering_1, col = "black"))
with(DF_subset, lines(DateTime, Sub_metering_2, col = "red"))
with(DF_subset, lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=c(1,1,1), col=c("black","red","blue"))
dev.off() |
0ca29104f475f9a6acf6ef3e57939a01d6295df1 | f5f142e469ba0526a2768a509630c8b5156b1fcb | /man/assign_values_to_leaves_nodePar.Rd | e2b84e9d4e144ce6c83416f4512de4fec2b59211 | [] | no_license | JohnMCMa/dendextend | 350ca633b439b8964eec739ba9247c9527ae37f4 | 1e25e5bf786d943b3aa651f4257336462187d43c | refs/heads/master | 2021-01-18T16:05:01.686085 | 2017-03-30T14:15:29 | 2017-03-30T14:15:29 | 86,709,713 | 0 | 0 | null | 2017-03-30T14:06:03 | 2017-03-30T14:06:03 | null | UTF-8 | R | false | true | 1,837 | rd | assign_values_to_leaves_nodePar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attr_access.R
\name{assign_values_to_leaves_nodePar}
\alias{assign_values_to_leaves_nodePar}
\title{Assign values to nodePar of dendrogram's leaves}
\usage{
assign_values_to_leaves_nodePar(dend, value, nodePar,
warn = dendextend_options("warn"), ...)
}
\arguments{
\item{dend}{a dendrogram object}
\item{value}{a new value vector for the nodePar attribute. It should be
the same length as the number of leaves in the tree. If not, it will recycle
the value and issue a warning.}
\item{nodePar}{the value inside nodePar to adjust.}
\item{warn}{logical (default from dendextend_options("warn") is FALSE).
Set if warning are to be issued, it is safer to keep this at TRUE,
but for keeping the noise down, the default is FALSE.}
\item{...}{not used}
}
\value{
A dendrogram, after adjusting the nodePar attribute in all of its leaves,
}
\description{
Go through the dendrogram leaves and updates the values inside its nodePar
If the value has Inf then the value in edgePar will not be changed.
}
\examples{
\dontrun{
dend <- USArrests[1:5,] \%>\% dist \%>\% hclust("ave") \%>\% as.dendrogram
# reproduces "labels_colors<-"
# although it does force us to run through the tree twice,
# hence "labels_colors<-" is better...
plot(dend)
dend <- assign_values_to_leaves_nodePar(dend=dend, value = c(3,2), nodePar = "lab.col")
plot(dend)
dend <- assign_values_to_leaves_nodePar(dend, 1, "pch")
plot(dend)
# fix the annoying pch=1:
dend <- assign_values_to_leaves_nodePar(dend, NA, "pch")
plot(dend)
# adjust the cex:
dend <- assign_values_to_leaves_nodePar(dend, 19, "pch")
dend <- assign_values_to_leaves_nodePar(dend, 2, "lab.cex")
plot(dend)
str(unclass(dend))
get_leaves_attr(dend, "nodePar", simplify=FALSE)
}
}
\seealso{
\link{get_leaves_attr}
}
|
f0785e6a020a35fc8eaceb2297a31466ffb5363f | af72407b36c1ee3182f3a86c3e73071b31456702 | /R/one_hot.R | 91f5f037367ddea560dfbd45cde03f5bba83a741 | [
"MIT"
] | permissive | bcjaeger/ipa | f89746d499500e0c632b8ca2a03904054dc12065 | 2e4b80f28931b8ae6334d925ee8bf626b45afe89 | refs/heads/master | 2021-07-12T20:52:23.778632 | 2020-04-26T16:44:01 | 2020-04-26T16:44:01 | 207,016,384 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,808 | r | one_hot.R |
#' One hot encoding
#'
#' A faster implementation of [mltools::one_hot] with less options.
#'
#' @param data a data frame
#'
#' @return a `data.table` with one-hot encoded factors.
#'
#' @note One-hot-encoding converts an unordered categorical vector
#' (i.e. a factor) to multiple binarized vectors where each binary
#' vector of 1s and 0s indicates the presence of a class (i.e. level)
#' of the of the original vector.
#'
#' @export
#'
#' @examples
#' n <- 10
#'
#' data <- data.frame(
#' V1 = seq(n),
#' V2 = factor(sample(letters[1:3], n, replace = TRUE)),
#' V3 = seq(n) / 10,
#' V4 = factor(sample(letters[5:6], n, replace = TRUE))
#' )
#'
#' data$V1[1] <- NA
#' data$V3[c(6,7)] <- NA
#' data$V2[1:2] <- NA
#' data$V4[2] <- NA
#'
#' one_hot(data)
one_hot <- function (data){
output_fun <- switch (class(data)[1],
'data.frame' = as.data.frame,
'matrix' = as.matrix,
'tbl_df' = tibble::as_tibble,
'data.table' = function(x) x,
stop("unrecognized type for data", call. = FALSE)
)
if(!is.data.table(data)){
DT <- as.data.table(data)
} else {
DT <- copy(data)
}
if(any(sapply(DT, is.character))){
chr_cols <- names(DT)[sapply(DT, is.character)]
for(col in chr_cols)
data.table::set(DT, j = col, value = as.factor(DT[[col]]))
}
# Will use these original names to help re-order the output
DT_names <- names(DT)
# will use the factor info about DT to connect
# one-hot columns to original factors
fctr_info <- get_factor_info(DT)
for(i in seq_along(fctr_info$cols)){
# the idea is to make a matrix for each factor
# with nrow = nrow(DT) and ncol = length of factor levels.
mat <- matrix(0,
nrow = nrow(DT),
ncol = length(fctr_info$lvls[[i]])
)
colnames(mat) <- fctr_info$keys[[i]]
# missing values of the factor become missing rows
mat[is.na(DT[[fctr_info$cols[i]]]), ] <- NA_integer_
# we will one-hot encode the matrix and then bind it to DT,
# replacing the original factor column. Go through the matrix
# column by column, where each column corresponds to a level
# of the current factor (indexed by i). Flip the values
# of the j'th column to 1 whenever the current factor's value
# is the j'th level.
for (j in seq(ncol(mat))) {
# find which rows to turn into 1's. These should be the
# indices in the currect factor where it's value is equal
# to the j'th level.
hot_rows <- which(
DT[[fctr_info$cols[i]]] == fctr_info$lvls[[i]][j]
)
# after finding the rows, flip the values from 0 to 1
if(!purrr::is_empty(hot_rows)){
mat[hot_rows , j] <- 1
}
}
DT[, fctr_info$cols[i]] <- NULL
DT <- cbind(DT, mat)
}
OH_names <- DT_names
for (i in seq_along(fctr_info$cols)){
OH_names <- insert_vals(
vec = OH_names,
where = which(fctr_info$cols[i] == OH_names),
what = fctr_info$keys[[i]]
)
}
data.table::setcolorder(DT, OH_names)
output_fun(DT)
}
one_hot_vec <- function(x, ncats){
x <- x + 1
mat <- matrix(0, ncol = ncats, nrow = length(x))
for(i in seq(ncats)) mat[x==i, i] <- 1
mat
}
insert_vals <- function(vec, where, what){
stopifnot(
typeof(what) == typeof(vec),
where >= 1 & where <= length(vec)
)
if(where == 1){
if(length(vec) == 1) return(c(what)) else return(c(what, vec[-1]))
}
if(where == length(vec)) return(c(vec[1:(length(vec)-1)], what))
vec_left <- vec[1:(where-1)]
vec_right <- vec[(where+1):length(vec)]
c(vec_left, what, vec_right)
}
one_hot_chr <- function(x, lvls){
mt <- matrix(0, nrow = length(x), ncol = length(lvls))
for(i in seq_along(lvls)){
indx <- which(x == lvls[i])
if(!is_empty(indx)) mt[indx, i] <- 1
}
mt
}
|
9d89c19f85842b24c5e3a1b61ee4110e404bfd66 | 5cfcd8980a97d63c97eb2bba6b662e8278c901e9 | /test_simon/aktuelle_version/aktuelle_version/R_test/tabs_select_lag.R | 95b09d29183f8f21cd5667e4a4c7831a52dbd2f9 | [] | no_license | lubrunn/DSP_App | e040de46407515c4cdfad50c9706d405b111a0d6 | e477454e59d90749d3af203ba4b1a3ddb7e7946e | refs/heads/main | 2023-03-21T13:02:01.383511 | 2021-03-17T23:50:35 | 2021-03-17T23:50:35 | 348,858,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,000 | r | tabs_select_lag.R |
numeric_features <- tabsetPanel(
id = "tabs_for_var",
type = "hidden",
tabPanel("1",
selectInput("var_1", "Select varaible", choices = ""),
numericInput("num_1","Chose length of moving average",min=1,value = 1),
numericInput("num_2","Chose Autoregressive lags for",min=1,value = 1),
actionButton("addButton", "UPLOAD!"),
actionButton("finish", "Finish!"),
actionButton("reset_cus", "Reset!")
)
# tabPanel("2",
# selectInput("var_2", "Select varaible", choices = ""), #could I use var_1 here?
# numericInput("num_3","Chose length of moving average",min=1,value = 1),
# numericInput("num_4","Chose Autoregressive lags for",min=1,value = 1),
# selectInput("var_3", "Select varaible", choices = ""),
# numericInput("num_5","Chose length of moving average",min=1,value = 1),
# numericInput("num_6","Chose Autoregressive lags for",min=1,value = 1)
# )
)
model_specification <- tabsetPanel(
id = "mod_spec",
type = "hidden",
tabPanel("default"),
tabPanel("custom",
numericInput("mtry","number of predictors that will be randomly sampled",min = 2,max=30,step = 1,value = 20),
numericInput("trees","number of trees contained in the ensemble",min = 50,max=1000,step = 10,value = 200),
numericInput("min_n","minimum number of data points in a node",min = 1,max=20,step = 1,value = 3),
numericInput("tree_depth","maximum depth of the tree",min = 1,max=50,step = 1,value = 8),
numericInput("learn_rate","rate at which the boosting algorithm adapts",min = 0.005,max=0.1,step = 0.001,value = 0.01),
numericInput("loss_reduction","reduction in the loss function required to split further",min = 0.005,max=0.1,step = 0.001,value = 0.01),
numericInput("sample_size","amount of data exposed to the fitting routine",min = 0.1,max=1,step = 0.1,value = 0.7)
),
tabPanel("hyperparameter_tuning",
numericInput("trees_hyp","number of predictors that will be randomly sampled",min = 50,max=1000,step = 10,value = 200),
numericInput("grid_size","size of tuning grid",min = 10,max=100,step = 5,value = 30)
)
)
model_specification_for <- tabsetPanel(
id = "mod_spec_for",
type = "hidden",
tabPanel("default"),
tabPanel("custom",
numericInput("mtry1","number of predictors that will be randomly sampled",min = 2,max=30,step = 1,value = 20),
numericInput("trees1","number of trees contained in the ensemble",min = 50,max=1000,step = 10,value = 200),
numericInput("min_n1","minimum number of data points in a node",min = 1,max=20,step = 1,value = 3),
numericInput("tree_depth1","maximum depth of the tree",min = 1,max=50,step = 1,value = 8),
numericInput("learn_rate1","rate at which the boosting algorithm adapts",min = 0.005,max=0.1,step = 0.001,value = 0.01),
numericInput("loss_reduction1","reduction in the loss function required to split further",min = 0.005,max=0.1,step = 0.001,value = 0.01),
numericInput("sample_size1","amount of data exposed to the fitting routine",min = 0.1,max=1,step = 0.1,value = 0.7)
),
tabPanel("hyperparameter_tuning",
numericInput("trees_hyp1","number of predictors that will be randomly sampled",min = 50,max=1000,step = 10,value = 200),
numericInput("grid_size1","size of tuning grid",min = 10,max=100,step = 5,value = 30)
)
)
custom_lag_tab <- tabsetPanel(
id = "lag_tab",
type = "hidden",
tabPanel("default"),
tabPanel("custom",
selectInput("correlation_type", "Chose type of correlation:", choices = c("ACF","PACF")),
uiOutput("correlation_plot_choice"),
numeric_features
# actionButton("reset_arma", "clear selected")
)
)
|
d767e8379ee3467ed656e9356b205dcefda80621 | d329dc15440c05c56e7858724261548d748da89e | /readData.R | 088d56f714c5b6175de34dd038726b49613dd926 | [] | no_license | tuangd/ExData_Plotting1 | 67de40e301881c5e3b49c9c7ed63c69034ed09d9 | 2b05ec1778f915f8712a22e1dd55ea936a226301 | refs/heads/master | 2021-01-18T11:28:52.325350 | 2014-11-08T09:05:49 | 2014-11-08T09:05:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,500 | r | readData.R | ##Name: readDate.R
##Author: Tuang Dheandhanoo
##For coursera Exporatory Data Analysis : Project One
##Loading data from the UC Irvine Machine Learning Repository
## “Individual household electric power consumption Data Set”
## Read in the data files only the 2 days that we want to make the plot
## that dates are 2007-02-01 and 2007-02-02
## With a little bit of calculation and trial and error, I can pinpoint those lines
hpc <- read.csv (file = "household_power_consumption.txt",
na.strings = "?",
stringsAsFactors = FALSE,
skip = 66636,
nrows = 2880,
sep = ";",
col.names = c("Date",
"Time",
"Global_active_power",
"Global_reactive_power",
"Voltage",
"Global_intensity",
"Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"))
## create a copy to manipulate Date and Time
hpc2 <- hpc
## modify Date format in hpc2
hpc2$Date <- as.Date(hpc$Date, "%d/%m/%Y")
## prepare another vector to store Date_Time format
x <- paste(hpc2$Date, hpc2$Time)
Date_Time <- strptime(x, format = "%Y-%m-%d %H:%M:%S")
## cbind Date_Time with hpc2 to create another data.frame
hpc3 <- cbind(Date_Time, hpc2)
#then these should be able to use to plot
|
dafedbd8e2001626b9aa621acdd1fcf2ae5e084d | aa9375b62a31700c22b989d8163ba674389151a3 | /real_data_analysis[1].R | 09303ff7f5fac3da035e37a9368e2a742880eecc | [] | no_license | uthjz/none | 929628391e727d5d1d7dcbac674130ab18321540 | 249cfdbac2b20fe61336e45d9c726d617a78ee0f | refs/heads/master | 2021-06-05T09:00:29.739766 | 2018-06-13T20:21:44 | 2018-06-13T20:21:44 | 34,471,390 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,776 | r | real_data_analysis[1].R | # prepare data and run missing data model
rm(list=ls())
wkdir="C:/Users/JZhang/Dropbox/Missing_Data" # home
wkdir="C:/Users/jz250/Dropbox/Missing_Data" # duke
setwd(wkdir)
load("../PPMI/Clean_Data/data0118.RData")
#####################################
# four file name MDS_UPDRS_surv, event_table, screen_post, ID_surv
head(MDS_UPDRS_surv)
UPDRS_temp=MDS_UPDRS_surv
########################################
# delete several column
UPDRS_temp$INFODT=NULL
head(UPDRS_temp)
dim(UPDRS_temp) #4921 69
dim(UPDRS_temp[,c(3:61)]) # 4921 59
##########################
# force level start from 1
UPDRS=UPDRS_temp[,c(3:61)]+1
test_NA=rep(0, nrow(UPDRS))
sum(UPDRS[1,])
for(i in 1: nrow(UPDRS)){
if(is.na(sum(UPDRS[i,]))) test_NA[i]=1
}
test_NA-UPDRS_temp$rr
sum(abs(test_NA-UPDRS_temp$rr)) # 1
UPDRS_temp[which(test_NA-UPDRS_temp$rr !=0 ),] # patno 4070 has baseline NA
UPDRS_temp[UPDRS_temp$PATNO==4070,]
###############################################################
# need delete 4070's baseline
# UPDRS_valid
dim(MDS_UPDRS_surv) # 4921 70
UPDRS_valid=MDS_UPDRS_surv[which(abs(test_NA-MDS_UPDRS_surv$rr) ==0 ),]
dim(UPDRS_valid) # 4920 70
UPDRS_valid$INFODT=NULL
dim(UPDRS_valid) # 4920 69
UPDRS=UPDRS_valid[,c(3:61)]+1
dim(UPDRS) # 4920 59
head(UPDRS)
test_NA=rep(0, nrow(UPDRS))
# sum(UPDRS[1,])
for(i in 1: nrow(UPDRS)){
if(is.na(sum(UPDRS[i,]))) test_NA[i]=1
}
sum(abs(test_NA-UPDRS_valid$rr) ) # 0 good
sum(UPDRS_valid$rr) # 351, 351 missing
################################
# replace NA as -1, otherwise STAN will not recognize
for(i in 1: nrow(UPDRS)){
if(is.na(sum(UPDRS[i,]))) UPDRS[i,]= -1
}
summary(unlist(UPDRS)) # no na good
num_subject=length(unique(UPDRS_valid$PATNO)); num_subject # 421
num_obs= nrow(UPDRS_valid); num_obs # 4920
num_item=ncol(UPDRS); num_item # 59
num_ordi=5
subj_long=UPDRS_valid$subject
length(subj_long)
Y_ordi=UPDRS
length(which(Y_ordi[,1]==1)); length(which(Y_ordi[,1]==2)); length(which(Y_ordi[,1]==3));length(which(Y_ordi[,1]==-1))
p0=length(which(Y_ordi[,1]==1))/num_obs; p0 # 0.6132114
log(p0/(1-p0)); # 0.4608314
a0=0.5
age_subj=screen_post$age
head(screen_post)
gender_subj=screen_post$gender
#######################################
# tee and time use year
tee=event_table$tee/12
head(tee)
time_obs=UPDRS_valid$time/12
head(time_obs, 20)
event=event_table$status
head(event)
rr_obs=UPDRS_valid$rr
head(rr_obs,20)
tail(rr_obs, 20)
first_last_obs=UPDRS_valid$first_last
head(first_last_obs, 20)
tail(first_last_obs, 20)
library(rstan)
data <- list(num_obs=num_obs, num_subject=num_subject,
num_item=num_item, num_ordi=num_ordi, subj_long=subj_long, Y_ordi=Y_ordi,
a0=a0, time_obs=time_obs, age_norm=age_subj, gender_subj=gender_subj,
tee=tee, event=event, rr_obs=rr_obs, first_last_obs=first_last_obs)
pars <- c("beta", "alpha", "a_ordi", "b_ordi",
"Omega", "Var_U", "Var_e", "w", "eta",
"sd_U", "sd_e",
"gam", "nu","h0" )
inits01 <- list(U=matrix(0.1, num_subject, 2), Omega= diag(2), Var_e=1,
Var_U=rep(2, 2), ee=rep(0, num_obs),
beta=rep(0,2), alpha=0.1,
a_random=rep(0.9, num_item), b_random= rep(0.3, num_item),
delta=matrix(1, nrow=num_item, ncol=num_ordi-2), gam=-0.1, nu=0.3, h0=0.005, w=-8, eta=1 )
inits01 <- list(c1=inits01)
inits02 <- list(U=matrix(0.2, num_subject, 2), Omega= diag(2), Var_e=2,
Var_U=rep(1, 2), ee=rep(0.1, num_obs),
beta=rep(0.5,2), alpha=0.2,
a_random=rep(1, num_item), b_random= rep(0.5, num_item),
delta=matrix(0.5, nrow=num_item, ncol=num_ordi-2), gam=0.1, nu=0.2, h0=0.002, w=-5, eta=2 )
inits02 <- list(c1=inits02)
#############################################
model_file<-"./missing_11_real.stan"
time0<-Sys.time()
fit1<- stan(file=model_file, data=data, pars=pars, init=inits01, thin=1, chains=1, iter=3500, warmup=2500, seed=1234)
Sys.time()-time0 # 6.527301 hours
print(fit1, digits=3)
time0<-Sys.time()
fit2<- stan(file=model_file, data=data, pars=pars, init=inits02, thin=1, chains=1, iter=3500, warmup=2500, seed=1234)
Sys.time()-time0 # 2.031872 days
print(fit2, digits=3)
pars_est= c("beta", "alpha", "Omega", "Var_U", "Var_e", "w", "eta",
"sd_U", "sd_e",
"gam", "nu","h0" )
main_rst=summary(fit2, pars=pars_est, probs=c(0.025,0.975))$summary
library(xtable)
xtable(main_rst[,c(1,3:5)], digits=3)
|
18d8b24141c1e33369ed54d8d673ca368c30d891 | a17cf22be2304c96d267fc1b68db7b7279c4a293 | /man/addMapSet.Rd | 977d55bdb6584c3b1f852f41f15fe64c16dbce51 | [] | no_license | robertdouglasmorrison/DuffyTools | 25fea20c17b4025e204f6adf56c29b5c0bcdf58f | 35a16dfc3894f6bc69525f60647594c3028eaf93 | refs/heads/master | 2023-06-23T10:09:25.713117 | 2023-06-15T18:09:21 | 2023-06-15T18:09:21 | 156,292,164 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 910 | rd | addMapSet.Rd | \name{addMapSet}
\alias{addMapSet}
\title{
Add Organism Annotation MapSet to DuffyTools environment
}
\description{
Loads an additional organism annotation to the set of knowm organisms.
}
\usage{
addMapSet(mapset)
}
\arguments{
\item{mapset}{
a MapSet object, as created by 'importMapSet'
}
}
\details{
A MapSet is the complete description of an organism's annotation, covering
chromosomes, gene, and exons. Adding a MapSet for an already loaded
organism overwrites the previous annotation.
See \code{\link{MapSets}} for an overview of organism annotations.
}
\value{
If successfully loaded, the speciesID of this new organism.
}
\seealso{
\code{\link{addTarget}}, for combining multiple organisms
\code{\link{exportCurrentMapSet}}, for saving an annotation to text files
\code{\link{importMapSet}}, for bundling annotation text files back into a MapSet object
}
|
605940107082662742f2fb955a617227abe8c0de | 707c1a8e277d5ea19b9957eef5bc358da7033400 | /R/header.R | 1c3a9f8dae566f669b9f0154bbaa5c2d5f311b32 | [] | no_license | cran/oro.dicom | 19ff8090388460f8dc70f0af1b0df8a43f2db625 | e97981f364d9cd684cf07d8b11473cd495b51712 | refs/heads/master | 2020-04-15T22:47:46.810824 | 2019-10-28T18:40:05 | 2019-10-28T18:40:05 | 17,698,129 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 12,340 | r | header.R | ##
## Copyright (c) 2010-2011 Brandon Whitcher
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following
## disclaimer in the documentation and/or other materials provided
## with the distribution.
## * Neither the name of Rigorous Analytics Ltd. nor the names of
## its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## $Id: $
##
#' Construct Data Frame from DICOM Headers
#'
#' A data frame is created given the valid DICOM fields provided by the user.
#'
#'
#' @param hdrs List object of DICOM headers.
#' @param stringsAsFactors Logical variable to be passed to \code{data.frame}.
#' @param collapse Character string used to \code{paste} DICOM group, element
#' and value fields.
#' @param colSort Logical variable (default = \code{TRUE}) to sort column names
#' in the table.
#' @param verbose Flag to provide text-based progress bar (default =
#' \code{FALSE}).
#' @param debug Logical variable (default = \code{FALSE}) that regulates to
#' display of intermediate processing steps.
#' @return Data frame where the rows correspond to images and the columns
#' correspond to the UNION of all DICOM fields across all files in the list.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @references Whitcher, B., V. J. Schmid and A. Thornton (2011). Working with
#' the DICOM and NIfTI Data Standards in R, \emph{Journal of Statistical
#' Software}, \bold{44} (6), 1--28. \url{http://www.jstatsoft.org/v44/i06}
#'
#' Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @keywords misc
#' @export dicomTable
#' @importFrom utils setTxtProgressBar txtProgressBar write.table
dicomTable <- function(hdrs, stringsAsFactors=FALSE, collapse="-",
colSort=TRUE, verbose=FALSE, debug=FALSE) {
myMerge <- function(df1, df2) {
if (anyDuplicated(names(df1)) != 0) {
warning("Duplicated group-element tags have been removed!")
df1 <- df1[, ! duplicated(names(df1))]
}
if (anyDuplicated(names(df2)) != 0) {
warning("Duplicated group-element tags have been removed!")
df2 <- df2[, ! duplicated(names(df2))]
}
if (! all(names(df2) %in% names(df1))) {
newCols <- names(df2)[! names(df2) %in% names(df1)]
## newcols <- setdiff(names(df2), names(df1)) # removes duplicates!
newDf <- as.data.frame(lapply(newCols, function(i, x) rep(NA, x),
x = nrow(df1)))
names(newDf) <- newCols
df1 <- cbind(df1, newDf)
}
if (! all(names(df1) %in% names(df2))) {
newCols <- names(df1)[! names(df1) %in% names(df2)]
## newCols <- setdiff(names(df1), names(df2)) # removes duplicates!
newDf <- as.data.frame(lapply(newCols, function(i, x) rep(NA, x),
x = nrow(df2)))
names(newDf) <- newCols
df2 <- cbind(df2, newDf)
}
rbind(df1, df2)
}
## Use first record to establish data.frame
csv <- data.frame(matrix(hdrs[[1]]$value, 1, nrow(hdrs[[1]])),
stringsAsFactors=stringsAsFactors)
names(csv) <-
paste(sub("^-", "", gsub("[^0-9]+", "-", hdrs[[1]]$sequence)),
as.vector(apply(hdrs[[1]][,1:3], 1, paste, collapse=collapse)),
sep="")
## Loop through all records and "merge" them
if ((nhdrs <- length(hdrs)) > 1) {
if (verbose) {
cat(" ", nhdrs, "files to be processed by dicomTable()", fill=TRUE)
tpb <- txtProgressBar(min=0, max=nhdrs, style=3)
}
for (l in 2:nhdrs) {
if (debug) {
cat(" l =", l, fill=TRUE)
}
if (verbose) {
setTxtProgressBar(tpb, l)
}
temp <- data.frame(matrix(hdrs[[l]]$value, 1, nrow(hdrs[[l]])),
stringsAsFactors=stringsAsFactors)
names(temp) <-
paste(sub("^-", "", gsub("[^0-9]+", "-", hdrs[[l]]$sequence)),
as.vector(apply(hdrs[[l]][,1:3], 1, paste, collapse=collapse)),
sep="")
old.nrow <- nrow(csv)
csv <- myMerge(csv, temp)
if (nrow(csv) == old.nrow) {
warning("Duplicate row was _not_ inserted in data.frame (csv)")
csv <- rbind(csv, NA)
}
}
if (verbose) {
close(tpb)
}
row.names(csv) <- names(hdrs)
}
if (colSort) {
return(csv[, order(names(csv))])
} else {
return(csv)
}
}
#' Extract Single Field from DICOM Headers
#'
#' A particular DICOM field is extracted for a collection of DICOM headers.
#'
#' The DICOM field is extracted from each DICOM header and placed into a
#' vector.
#'
#' @param hdrs List object of DICOM headers.
#' @param string DICOM field name.
#' @param numeric Logical; values are converted to numbers when \code{TRUE}.
#' @param names Logical; file names are kept with elements of the vector.
#' @param inSequence Logical; whether or not to look into SequenceItem
#' elements.
#' @return Vector of values from the requested DICOM field.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @seealso \code{\link{readDICOM}}
#' @references Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @keywords misc
#' @examples
#'
#' x <- readDICOMFile(system.file("dcm/Abdo.dcm", package="oro.dicom"))
#' seriesDescription <- extractHeader(x$hdr, "SeriesDescription", numeric=FALSE)
#' IOP <- extractHeader(x$hdr, "ImageOrientationPatient", numeric=FALSE)
#'
#' @export extractHeader
extractHeader <- function(hdrs, string, numeric=TRUE, names=FALSE,
inSequence=TRUE) {
if (is.data.frame(hdrs)) {
hdrs <- list(hdrs)
}
out.list <- lapply(hdrs,
function(hdr, string, inSequence) {
if (inSequence) {
sequence <- FALSE
} else {
sequence <- nchar(hdr$sequence) > 0
}
index <- which(hdr$name %in% string & !sequence)
if (sum(index) > 0) {
hdr$value[index]
} else {
NA
}
}, string=string, inSequence=inSequence)
out.names <- names(out.list)
out.vec <- unlist(out.list)
if (numeric) {
out.vec <- as.numeric(out.vec)
}
if (names) {
names(out.vec) <- out.names
} else {
out.vec <- as.vector(out.vec)
}
return(out.vec)
}
#' Converts DICOM Header Field to a Matrix
#'
#' Converts a vector of DICOM header information, assuming there are multiple
#' entries per element of the vector, into a matrix.
#'
#'
#' @param hdr is the result from extracting information from a DICOM header
#' field; e.g., using \code{extractHeader}.
#' @param ncol is the number of columns.
#' @param sep is the character string required to split entries in the header
#' field.
#' @param byrow is a logical variable (default = \code{TRUE}) telling the
#' routine to populate the matrix by rows then columns.
#' @return Matrix with \code{length(hdr)} rows and \code{ncol} columns.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @seealso \code{\link{extractHeader}}, \code{\link{matrix}}
#' @references Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @keywords misc
#' @examples
#'
#' x <- readDICOMFile(system.file("dcm/Abdo.dcm", package="oro.dicom"))
#' pixelSpacing <- extractHeader(x$hdr, "PixelSpacing", numeric=FALSE)
#' pSmat <- header2matrix(pixelSpacing, ncol=2)
#' IOP <- extractHeader(x$hdr, "ImageOrientationPatient", numeric=FALSE)
#' IOPmat <- header2matrix(IOP, ncol=6)
#'
#' @export header2matrix
header2matrix <- function(hdr, ncol, sep=" ", byrow=TRUE) {
matrix(as.numeric(unlist(strsplit(hdr, sep))), ncol=ncol, byrow=byrow)
}
#' Match String to DICOM Header Field
#'
#' A convenient wrapper function that utilizes internal functions to match
#' character strings with the DICOM header information.
#'
#'
#' @param hdr is the result from extracting information from a DICOM header
#' field; e.g., using \code{extractHeader}.
#' @param string is a character string to be matched with the DICOM header.
#' @return A logical vector of length \code{length(hdr)}.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @seealso \code{\link{extractHeader}}
#' @references Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @examples
#'
#' x <- readDICOMFile(system.file("dcm/Abdo.dcm", package="oro.dicom"))
#' modality <- extractHeader(x$hdr, "Modality", numeric=FALSE)
#' matchHeader(modality, "mr") # case insensitive by default
#'
#' @export matchHeader
matchHeader <- function(hdr, string) {
ifelse(is.na(hdr), FALSE, regexpr(string, hdr, ignore.case=TRUE) > -1)
}
#' Write DICOM Table to ASCII File
#'
#' A wrapper to \code{write.table} specifically for DICOM tables.
#'
#' This function is a straightforward wrapper to \code{write.table}.
#'
#' @param dtable The DICOM table.
#' @param filename Name of the file to be created.
#' @param ... Additional parameters to be passed to \code{write.table}.
#' @return None.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @seealso \code{\link{write.table}}
#' @references Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @keywords file
#' @export writeHeader
writeHeader <- function(dtable, filename, ...) {
write.table(dtable, filename, quote=FALSE, sep="\t", ...)
}
#' Check String Against DICOM Header Field to Produce Error Message or NEXT
#'
#' A function designed to \code{break} out of loops given information (or the
#' lack thereof) contained in the DICOM header.
#'
#'
#' @param dcm is the DICOM list structure.
#' @param string is a character string to be matched with the DICOM header.
#' @param reference is the scalar/vector of character strings to check against
#' the DICOM header output.
#' @param str.warning is a text string for the warning.
#' @param htmlfile is the \pkg{hwriter} object for the HTML file (default =
#' \code{NULL}.
#' @param heading is the HTML tag <H?> (default = \code{3}).
#' @param numeric is the argument to be passed to \code{matchHeader}.
#' @return An expression to be evaluated and HTML content.
#' @author Brandon Whitcher \email{bwhitcher@@gmail.com}
#' @seealso \code{\link{extractHeader}}, \code{\link{matchHeader}}
#' @references Digital Imaging and Communications in Medicine (DICOM)\cr
#' \url{http://medical.nema.org}
#' @export nextHeader
nextHeader <- function(dcm, string, reference, str.warning,
htmlfile=NULL, heading=3, numeric=FALSE) {
header <- extractHeader(dcm$hdr, string=string, numeric=numeric)
for (i in 1:length(reference)) {
if (any(matchHeader(header, string=reference[i]))) {
if (! is.null(htmlfile)) {
requireNamespace("hwriter", quietly=TRUE)
hwriter::hwrite(str.warning, htmlfile, heading=3)
} else {
warning(str.warning)
}
return(expression(next))
}
}
invisible()
}
|
b80cd28956f0ee674499e9d7184dc5ea4710a2b3 | 0380fa1fb6d21960d2b08cfa81aec2a84a7cbbed | /R/Scale.R | 7cf9781ee8f59f165843418bc77aedcb525a8377 | [] | no_license | AdamRahman/scagnostics2 | 4a6d4e28e460445a8c0d21abcc3705f1bb172f5e | 2e5a29fe9b5c42c32fd6c3957aacbad44e798820 | refs/heads/master | 2022-11-25T09:17:53.374906 | 2020-08-02T17:34:07 | 2020-08-02T17:34:07 | 284,509,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 366 | r | Scale.R | Scale <- function(Data){
#Find the Max of Each column
MaxVec <- sapply(seq_len(ncol(Data)), function(x,Data)max(Data[,x]), Data=Data)
#Find the Min of each Column
MinVec <- sapply(seq_len(ncol(Data)), function(x,Data)min(Data[,x]), Data=Data)
#Do Max-Min Scaling
ScaledData <- (t(Data) - MinVec)/(MaxVec - MinVec)
return(t(ScaledData))
} |
0c86849aa403d02e3094fb887a24d628c01a23d9 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /gRbase/R/xxx_gmwr-depends.R | 564f7744215edd0a58696723f8178e1009a3e161 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 581 | r | xxx_gmwr-depends.R | ######################################################################
##
#' @title Functions from Graphical Modelling with R book
#'
#' @description Functions that must be retained to make code from
#' gmwr-book work
#'
#' @name gmwr_book
##
######################################################################
## Note to self: Check xxx_downstream content for function from gmwr
## book that must be retained.
#'
#' @param object An object to be coerced.
#' @param result The format to be coerced to.
#'
NULL
#' @export
#' @rdname gmwr_book
as.adjMAT <- g_gn2xm_
|
b022e0ece92f4e99f74ff93f435614c5295cc8c8 | c75c35563e781a037c88e6c1cf1a7e6161890aee | /plumber-job/api-check.R | 5c8592c256f2556ac9786219b6205551ddee30e1 | [
"Apache-2.0"
] | permissive | chsafouane/background-jobs | 40d18692082bb446707bf7ad304755d483110b1d | 75c87a6543c7e2949715d02864c0858051476861 | refs/heads/master | 2020-07-29T15:35:47.074690 | 2019-08-15T17:19:00 | 2019-08-15T17:19:00 | 209,862,530 | 0 | 0 | Apache-2.0 | 2019-09-20T19:06:54 | 2019-09-20T19:05:06 | null | UTF-8 | R | false | false | 840 | r | api-check.R | # A script to check Plumber endpoints
library(httr)
# Check echo endpoint
test_that("/echo endpoint", {
echo_resp <- GET("http://127.0.0.1:5762/echo?msg=heyo")
expect_equal(status_code(echo_resp), 200)
expect_equal(headers(echo_resp)[["content-type"]], "application/json")
expect_equal(content(echo_resp)[["msg"]][[1]], "The message is: 'heyo'")
})
# Check plot endpoint
test_that("/plot endpoint", {
plot_resp <- GET("http://127.0.0.1:5762/plot")
expect_equal(status_code(plot_resp), 200)
expect_equal(headers(plot_resp)[["content-type"]], "image/png")
})
# Check sum endpoint
test_that("/sum endpoint", {
sum_resp <- POST("http://127.0.0.1:5762/sum?a=4&b=2")
expect_equal(status_code(sum_resp), 200)
expect_equal(headers(sum_resp)[["content-type"]], "application/json")
expect_equal(content(sum_resp)[[1]], 6)
})
|
3a15cca66035a05222a9d9fa43bbce19e2d61f69 | b0968118cc2f1ddcdc5e355c1f74acde64345f67 | /validationPacBio.R | 2a4a377b1c3145290207c2ed0f7a3f8a0bf456a8 | [] | no_license | SebGuelfi/annotationAgnosticeQTL | 98cb132cbfc7d50d3ec0d553cffbcb651c120627 | ef6d9e8e24130952eb2d05345360469c7ba8a179 | refs/heads/master | 2021-05-04T00:47:51.470075 | 2018-06-18T16:43:12 | 2018-06-18T16:43:12 | 120,352,965 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,446 | r | validationPacBio.R | ## annotation.
library(rtracklayer)
brain.pacBio <- import.gff("/home/sguelfi/pacBio/brain.all_size.5merge.collapsed.gff")
load("/home/sguelfi/projects/R/hipp/data/results/final_derfinder.rda")
brain.pacBio <- brain.pacBio[as.character(brain.pacBio$type)=="exon",]
ann.reg <- cbind(ann.reg,regNames=rownames(ann.reg))
intron <- ann.reg %>% filter(intron>=1 & intergenic==0 & exon==0)
intergenic <- ann.reg %>% filter(intron==0 & intergenic>=1 & exon==0)
exonic <- ann.reg%>% filter(intron==0 & intergenic==0 & exon>=1)
expr.gr.exonic <- expr.gr[as.character(exonic$regNames)]
expr.gr.intronic <- expr.gr[as.character(intron$regNames)]
expr.gr.intergenic <- expr.gr[as.character(intergenic$regNames)]
seqlevels(expr.gr.exonic) <- paste0('chr',seqlevels(expr.gr.exonic))
seqlevels(expr.gr.intronic) <- paste0('chr',seqlevels(expr.gr.intronic))
seqlevels(expr.gr.intergenic) <- paste0('chr',seqlevels(expr.gr.intergenic))
(table(countOverlaps(expr.gr.exonic,brain.pacBio,type="within")>0)/length(expr.gr.exonic))*100
(table(countOverlaps(expr.gr.intronic,brain.pacBio,type="within")>0)/length(expr.gr.intronic))*100
(table(countOverlaps(expr.gr.intergenic,brain.pacBio,type="within")>0)/length(expr.gr.intergenic))*100
brain.pacBio <- import.gff("/home/sguelfi/pacBio/IsoSeq_Alzheimer_2016edition_polished.confident.fusion.hg38.gff")
load("/home/sguelfi/projects/R/hipp/data/results/final_derfinder.rda")
brain.pacBio <- brain.pacBio[as.character(brain.pacBio$type)=="exon",]
ann.reg <- cbind(ann.reg,regNames=rownames(ann.reg))
intron <- ann.reg %>% filter(intron>=1 & intergenic==0 & exon==0)
intergenic <- ann.reg %>% filter(intron==0 & intergenic>=1 & exon==0)
exonic <- ann.reg%>% filter(intron==0 & intergenic==0 & exon>=1)
expr.gr.exonic <- expr.gr[as.character(exonic$regNames)]
expr.gr.intronic <- expr.gr[as.character(intron$regNames)]
expr.gr.intergenic <- expr.gr[as.character(intergenic$regNames)]
seqlevels(expr.gr.exonic) <- paste0('chr',seqlevels(expr.gr.exonic))
seqlevels(expr.gr.intronic) <- paste0('chr',seqlevels(expr.gr.intronic))
seqlevels(expr.gr.intergenic) <- paste0('chr',seqlevels(expr.gr.intergenic))
(table(countOverlaps(expr.gr.exonic,brain.pacBio,type="within")>0)/length(expr.gr.exonic))*100
(table(countOverlaps(expr.gr.intronic,brain.pacBio,type="within")>0)/length(expr.gr.intronic))*100
(table(countOverlaps(expr.gr.intergenic,brain.pacBio,type="within")>0)/length(expr.gr.intergenic))*100
|
f7f21497ed76857b770a476a53ea810d996738be | 7a479824d7397ad9e02578e942ea9b0a714edb3e | /man/grpregOverlap.Rd | 0b5bbaaf608a096875964b88eafe372de2077f5a | [] | no_license | YaohuiZeng/grpregOverlap | cb30341875b3a792acd638c840a7002b904580b2 | d82b001b2c9c3e535137435d686925b4ee7c7f0f | refs/heads/master | 2023-03-05T18:44:07.002528 | 2020-08-09T17:30:03 | 2020-08-09T17:30:03 | 36,340,861 | 18 | 7 | null | 2023-02-17T15:10:03 | 2015-05-27T03:40:48 | R | UTF-8 | R | false | false | 10,241 | rd | grpregOverlap.Rd | \name{grpregOverlap}
\alias{grpregOverlap}
\title{
Fit penalized regression models with overlapping grouped variables
}
\description{
Fit the regularization paths of linear, logistic, Poisson or Cox models with
overlapping grouped covariates based on the latent group lasso approach
(Jacob et al., 2009; Obozinski et al., 2011). Latent group MCP/SCAD as well as
bi-level selection methods, namely the group exponential lasso (Breheny, 2015)
and the composite MCP (Huang et al., 2012) are also available.
This function is a useful wrapper to the \code{grpreg} package's
\code{grpreg} and \code{grpsurv} (depending on the \code{family})
functions. Arguments can be passed through to these functions using
\code{...}, see \code{\link[grpreg]{grpreg}} and
\code{\link[grpreg]{grpsurv}} for usage and more details.
}
\usage{
grpregOverlap(X, y, group,
family=c("gaussian","binomial", "poisson", "cox"),
returnX.latent = FALSE, returnOverlap = FALSE, ...)
}
\arguments{
\item{X}{
The design matrix, without an intercept. \code{grpregOverlap} calls
\code{grpreg}, which standardizes the data and includes an intercept by default.
}
\item{y}{
The response vector, or a matrix in the case of multitask learning. For survival analysis, \code{y} is the time-to-event outcome - a two-column matrix or
\code{\link[survival]{Surv}} object. The first column is the
time on study (follow up time); the second column is a binary
variable with 1 indicating that the event has occurred and 0
indicating (right) censoring. See \code{\link[grpreg]{grpreg}} and \code{\link[grpreg]{grpsurv}} for more details.
}
\item{group}{
Different from that in \code{grpreg}, \code{group} here must be a list of vectors,
each containing integer indices or character names of variables in the group.
variables that not belong to any groups will be disgarded.
}
\item{family}{
Either "gaussian", "binomial", or 'cox', depending on the response. If
\code{family} is missing, it is set to be 'gaussian'. Specify \code{family} = 'cox' for survival analysis (Cox models).
}
\item{returnX.latent}{
Return the new expanded design matrix? Default is FALSE. Note the storage size
of this new matrix can be very large. Note: the name of this argument
was recently changed so that returnX can be passed through to
\code{\link[grpreg]{grpreg}} (in which case it will return the
group-orthonormalized design.
}
\item{returnOverlap}{
Return the matrix containing overlapps? Default is FALSE. It is a square matrix
\eqn{C} such that \eqn{C[i, j]} is the number of overlapped variables between
group i and j. Diagonal value \eqn{C[i, i]} is therefore the number of
variables in group i.
}
\item{...}{
Used to pass options (e.g., `group.multiplier`) to
\code{\link[grpreg]{grpreg}}. Note: the \code{returnX} argument will
not be passed through, since this will cause \code{grpregOverlap} to
store X.latent in the fitted model object.
}
}
\details{
The latent group lasso approach extends the group lasso to group variable selection
with overlaps. The proposed \emph{latent group lasso} penalty is formulated in a
way such that it's equivalent to a classical non-overlapping group lasso problem
in an new space, which is expanded by duplicating the columns of overlapped variables.
For technical details, see (Jacob et al., 2009) and (Obozinski et al., 2011).
\code{grpregOverlap} takes input design matrix \code{X} and grouping information
\code{group}, and expands {X} to the new, non-overlapping space. It then calls
\code{grpreg} for modeling fitting based on group decent algorithm. Unlike
in \code{grpreg}, the interface for group bridge-penalized method is not implemented.
The expanded design matrix is named \code{X.latent}. It is a returned value in the fitted
object, provided \code{returnX.latent} is TRUE. The latent coeffecient (or norm) vector then
corresponds to that. Note thaT when constructing \code{X.latent}, the columns in \code{X}
corresponding to those variables not included in \code{group} will be removed automatically.
For more detailed explanation for the penalties and algorithm, see \code{\link[grpreg]{grpreg}}.
}
\value{
An object with S3 class \code{"grpregOverlap"} or \code{"grpsurvOverlap"} (for Cox models), which inherits \code{"grpreg"},
with following variables.
\item{beta}{
The fitted matrix of coefficients. The number of rows is equal to the number
of coefficients, and the number of columns is equal to \code{nlambda}.
}
\item{family}{Same as above.}
\item{group}{Same as above.}
\item{lambda}{
The sequence of \code{lambda} values in the path.
}
\item{alpha}{Same as above.}
\item{loss}{
A vector containing either the residual sum of squares (\code{"gaussian"}) or
negative log-likelihood (\code{"binomial"}) or negative partial log-likelihood (\code{"cox"}) of the fitted model at each value of \code{lambda}.}
\item{n}{Number of observations.}
\item{penalty}{Same as above.}
\item{df}{
A vector of length \code{nlambda} containing estimates of effective
number of model parameters all the points along the regularization path.
For details on how this is calculated, see Breheny and Huang (2009).
}
\item{iter}{
A vector of length \code{nlambda} containing the number of iterations until
convergence at each value of \code{lambda}.
}
\item{group.multiplier}{
A named vector containing the multiplicative constant applied to each group's
penalty.
}
\item{beta.latent}{
The fitted matrix of latent coefficients. The number of rows is equal to the number
of coefficients, and the number of columns is equal to \code{nlambda}.
}
\item{incidence.mat}{
Incidence matrix: I[i, j] = 1 if group i contains variable j; otherwise 0.
}
\item{grp.vec}{
A vector of consecutive integers indicating grouping information of variables. This
is equivalent to argument \code{group} in \code{\link[grpreg]{grpreg}}.
}
\item{overlap.mat}{
A square matrix \eqn{C} where \eqn{C[i, j]} is the number of overlapped
variables between group i and j. Diagonal value \eqn{C[i, i]} is therefore the
number of variables in group i. Only returned if \code{returnOverlap} is TRUE.
}
\item{X.latent}{
The new expanded design matrix for the latent group lasso formulation. The
variables are reordered according to the order of groups. Only returned if
\code{returnX.latent} is TRUE.
}
\item{W}{Matrix of \code{exp(beta)} values for each subject over all
\code{lambda} values. (For Cox models only)}
\item{time}{Times on study. (For Cox models only)}
\item{fail}{Failure event indicator. (For Cox models only)}
}
\references{
\itemize{
\item Zeng, Y., and Breheny, P. (2016). Overlapping Group Logistic Regression with Applications to Genetic Pathway Selection. \emph{Cancer Informatics}, \strong{15}, 179-187. \url{http://doi.org/10.4137/CIN.S40043}.
\item Jacob, L., Obozinski, G., and Vert, J. P. (2009, June). Group lasso with overlap and graph lasso. \emph{In Proceedings of the 26th annual international conference on machine learning, ACM}: 433-440. \url{http://www.machinelearning.org/archive/icml2009/papers/471.pdf}
\item Obozinski, G., Jacob, L., and Vert, J. P. (2011). Group lasso with overlaps: the latent group lasso approach. \url{http://arxiv.org/abs/1110.0413}.
\item Breheny, P. and Huang, J. (2009) Penalized methods for bi-level variable selection. \emph{Statistics and its interface}, \strong{2}: 369-380. \url{http://myweb.uiowa.edu/pbreheny/publications/Breheny2009.pdf}
\item Huang J., Breheny, P. and Ma, S. (2012). A selective review of group selection in high dimensional models. \emph{Statistical Science}, \strong{27}: 481-499. \url{http://myweb.uiowa.edu/pbreheny/publications/Huang2012.pdf}
\item Breheny P and Huang J (2015). Group descent algorithms for nonconvex penalized linear and logistic regression models with grouped predictors. \emph{Statistics and Computing}, \strong{25}: 173-187.\url{http://myweb.uiowa.edu/pbreheny/publications/group-computing.pdf}
\item Breheny P and Huang J (2009). Penalized methods for bi-level variable selection. \emph{Statistics and Its Interface}, \strong{2}: 369-380. \url{http://myweb.uiowa.edu/pbreheny/publications/Breheny2009.pdf}
\item Breheny P (2014). R package 'grpreg'. \url{https://CRAN.R-project.org/package=grpreg/grpreg.pdf}
}
}
\author{
Yaohui Zeng and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui-zeng@uiowa.edu>
}
\seealso{
\code{\link{cv.grpregOverlap}}, \code{\link{cv.grpsurvOverlap}}, \code{\link[=plot.grpregOverlap]{plot}},
\code{\link[=select.grpregOverlap]{select}}, \code{\link[grpreg]{grpreg}}, \code{\link[grpreg]{grpsurv}}.
}
\examples{
## linear regression, a simulation demo.
set.seed(123)
group <- list(gr1 = c(1, 2, 3), gr2 = c(1, 4), gr3 = c(2, 4, 5),
gr4 = c(3, 5), gr5 = c(6))
beta.latent.T <- c(5, 5, 5, 0, 0, 0, 0, 0, 5, 5, 0) # true latent coefficients.
# beta.T <- c(5, 5, 10, 0, 5, 0), true variables: 1, 2, 3, 5; true groups: 1, 4.
X <- matrix(rnorm(n = 6*100), ncol = 6)
X.latent <- expandX(X, group)
y <- X.latent \%*\% beta.latent.T + rnorm(100)
fit <- grpregOverlap(X, y, group, penalty = 'grLasso')
# fit <- grpregOverlap(X, y, group, penalty = 'grMCP')
# fit <- grpregOverlap(X, y, group, penalty = 'grSCAD')
head(coef(fit, latent = TRUE)) # compare to beta.latent.T
plot(fit, latent = TRUE)
head(coef(fit, latent = FALSE)) # compare to beta.T
plot(fit, latent = FALSE)
cvfit <- cv.grpregOverlap(X, y, group, penalty = 'grMCP')
plot(cvfit)
head(coef(cvfit))
summary(cvfit)
## logistic regression, real data, pathway selection
data(pathway.dat)
X <- pathway.dat$expression
group <- pathway.dat$pathways
y <- pathway.dat$mutation
fit <- grpregOverlap(X, y, group, penalty = 'grLasso', family = 'binomial')
plot(fit)
str(select(fit))
str(select(fit,criterion="AIC",df="active"))
\dontrun{
cvfit <- cv.grpregOverlap(X, y, group, penalty = 'grLasso', family = 'binomial')
coef(cvfit)
predict(cvfit, X, type='response')
predict(cvfit, X, type = 'class')
plot(cvfit)
plot(cvfit, type = 'all')
summary(cvfit)
}
}
\keyword{grpregOverlap}
\keyword{models}
|
baa142026f0ec3b7bbbf15735e4e7ff522fb7eb3 | 1bcf2b2e09ed1912c316670a1e63d1ac6b150454 | /automatisation .R | ccc125a743c48dbd8efa1c2d66f21c3e2f797fe6 | [] | no_license | Jpaete/automatisation-traitement-base-de-donn-es | c87e356b5c96185c94a146defe0cdb8a13177878 | 3387934d1ef772737218813718afb2e546155af1 | refs/heads/main | 2023-05-05T14:21:09.618422 | 2021-05-26T11:18:54 | 2021-05-26T11:18:54 | 370,638,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 55,366 | r | automatisation .R | #packages ----
library(readxl)
#imports ----
base_dep =
base_a_dep = attach(base_dep)
base_conditions_com_dep =
base_conditions_proj_dep = )
#selection etablissements publics ----
#condition
coeff_etablissements_publics_dep = ifelse(base_conditions_com_dep=="public"|
base_conditions_com_dep=="departement"|
base_conditions_com_dep=="département"|
base_conditions_com_dep=="centrale"|
base_conditions_com_dep=="mines"|
base_conditions_com_dep=="cfa"|
base_conditions_com_dep=="agglo"|
base_conditions_com_dep=="agglomération"|
base_conditions_com_dep=="agglomeration"|
base_conditions_com_dep=="chu"|
base_conditions_com_dep=="lycée"|
base_conditions_com_dep=="lycee"|
base_conditions_com_dep=="collège"|
base_conditions_com_dep=="college"|
base_conditions_com_dep=="commune"|
base_conditions_com_dep=="département"|
base_conditions_com_dep=="departement"|
base_conditions_com_dep=="ecole"|
base_conditions_com_dep=="école"|
base_conditions_com_dep=="cnrs"|
base_conditions_com_dep=="recherche"|
base_conditions_com_dep=="ifremer"|
base_conditions_com_dep=="INRA"|
base_conditions_com_dep=="intra"|
base_conditions_com_dep=="gip"|
base_conditions_com_dep=="GIP"|
base_conditions_com_dep=="inserm"|
base_conditions_com_dep=="irt"|
base_conditions_com_dep=="metropole"|
base_conditions_com_dep=="oniris"|
base_conditions_com_dep=="région"|
base_conditions_com_dep=="region"|
base_conditions_com_dep=="SCNF"|
base_conditions_com_dep=="scnf"|
base_conditions_com_dep=="gare"|
base_conditions_com_dep=="gares"|
base_conditions_com_dep=="mixte"|
base_conditions_com_dep=="université"|
base_conditions_com_dep=="universite"|
base_conditions_com_dep=="villes"|
base_conditions_com_dep=="chambre"|
base_conditions_com_dep=="villes"|
base_conditions_com_dep=="ville"|
base_conditions_com_dep=="publique"|
base_conditions_com_dep=="publiques"|
base_conditions_com_dep=="publics"|
base_conditions_com_dep=="communes"|
base_conditions_com_dep=="inrae"|
base_conditions_com_dep=="d'agglomération"|
base_conditions_com_dep=="d'agglomeration"|
base_conditions_com_dep=="greta"|
base_conditions_com_dep=="office"|
base_conditions_com_dep=="emploi"|
base_conditions_com_dep=="edf"|
base_conditions_com_dep=="gdf"|
base_conditions_com_dep=="communal"|
base_conditions_com_dep=="communales"|
base_conditions_com_dep=="communaux"|
base_conditions_com_dep=="communale"|
base_conditions_com_dep=="cci"|
base_conditions_com_dep=="régional"|
base_conditions_com_dep=="régionale"|
base_conditions_com_dep=="régionales"|
base_conditions_com_dep=="régionaux"|
base_conditions_com_dep=="regional"|
base_conditions_com_dep=="regionale"|
base_conditions_com_dep=="regionales"|
base_conditions_com_dep=="regionaux", 1,0)
##enleve les na 2
coeff_etablissements_publics_dep[is.na(coeff_etablissements_publics_dep)] = 0
##fait la somme des lignes
coeff_etablissements_publics_2_dep= rowSums(coeff_etablissements_publics_dep)
##selectionne dans le tableau les beneficiaires
beneficiaires_dep = base_a_dep$`Nom du bénéficiaire`
##tableau de verification
etablissements_public_select_dep = data.frame(beneficiaires_dep,coeff_etablissements_publics_2_dep)
View(etablissements_public_select_dep)
##export tableau
write.csv2(etablissements_public_select_dep,"/etablissements_public_select_dep.csv")
#selection projets verts ----
##condition
coeff_projets_verts_dep= ifelse( base_conditions_proj_dep== "développement"| base_conditions_proj_dep== "développement,"| base_conditions_proj_dep== "développement."| base_conditions_proj_dep== "développement)"| base_conditions_proj_dep== "(développement"| base_conditions_proj_dep== "(développement)"| base_conditions_proj_dep== "'développement"|
base_conditions_proj_dep== "énergétique"| base_conditions_proj_dep== "énergétique,"| base_conditions_proj_dep== "énergétique."| base_conditions_proj_dep== "énergétique)"| base_conditions_proj_dep== "(énergétique"| base_conditions_proj_dep== "(énergétique)"| base_conditions_proj_dep== "'énergétique"|
base_conditions_proj_dep== "rénovation"| base_conditions_proj_dep== "rénovation,"| base_conditions_proj_dep== "rénovation."| base_conditions_proj_dep== "rénovation)"| base_conditions_proj_dep== "(rénovation"| base_conditions_proj_dep== "(rénovation)"| base_conditions_proj_dep== "'rénovation"|
base_conditions_proj_dep== "natura"| base_conditions_proj_dep== "natura,"| base_conditions_proj_dep== "natura."| base_conditions_proj_dep== "natura)"| base_conditions_proj_dep== "(natura"| base_conditions_proj_dep== "(natura)"| base_conditions_proj_dep== "'natura"|
base_conditions_proj_dep== "amélioration"| base_conditions_proj_dep== "amélioration,"| base_conditions_proj_dep== "amélioration."| base_conditions_proj_dep== "amélioration)"| base_conditions_proj_dep== "(amélioration"| base_conditions_proj_dep== "(amélioration)"| base_conditions_proj_dep== "'amélioration"|
base_conditions_proj_dep== "restauration"| base_conditions_proj_dep== "restauration,"| base_conditions_proj_dep== "restauration."| base_conditions_proj_dep== "restauration)"| base_conditions_proj_dep== "(restauration"| base_conditions_proj_dep== "(restauration)"| base_conditions_proj_dep== "'restauration"|
base_conditions_proj_dep== "marais"| base_conditions_proj_dep== "marais,"| base_conditions_proj_dep== "marais."| base_conditions_proj_dep== "marais)"| base_conditions_proj_dep== "(marais"| base_conditions_proj_dep== "(marais)"| base_conditions_proj_dep== "'marais"|
base_conditions_proj_dep== "conservation"| base_conditions_proj_dep== "conservation,"| base_conditions_proj_dep== "conservation."| base_conditions_proj_dep== "conservation)"| base_conditions_proj_dep== "(conservation"| base_conditions_proj_dep== "(conservation)"| base_conditions_proj_dep== "'conservation"|
base_conditions_proj_dep== "ressources"| base_conditions_proj_dep== "ressources,"| base_conditions_proj_dep== "ressources."| base_conditions_proj_dep== "ressources)"| base_conditions_proj_dep== "(ressources"| base_conditions_proj_dep== "(ressources)"| base_conditions_proj_dep== "'ressources"|
base_conditions_proj_dep== "réhabilitation"| base_conditions_proj_dep== "réhabilitation,"| base_conditions_proj_dep== "réhabilitation."| base_conditions_proj_dep== "réhabilitation)"| base_conditions_proj_dep== "(réhabilitation"| base_conditions_proj_dep== "(réhabilitation)"| base_conditions_proj_dep== "'réhabilitation"|
base_conditions_proj_dep== "remplacement"| base_conditions_proj_dep== "remplacement,"| base_conditions_proj_dep== "remplacement."| base_conditions_proj_dep== "remplacement)"| base_conditions_proj_dep== "(remplacement"| base_conditions_proj_dep== "(remplacement)"| base_conditions_proj_dep== "'remplacement"|
base_conditions_proj_dep== "préservation"| base_conditions_proj_dep== "préservation,"| base_conditions_proj_dep== "préservation."| base_conditions_proj_dep== "préservation)"| base_conditions_proj_dep== "(préservation"| base_conditions_proj_dep== "(préservation)"| base_conditions_proj_dep== "'préservation"|
base_conditions_proj_dep== "parcs "| base_conditions_proj_dep== "parcs ,"| base_conditions_proj_dep== "parcs ."| base_conditions_proj_dep== "parcs )"| base_conditions_proj_dep== "(parcs "| base_conditions_proj_dep== "(parcs )"| base_conditions_proj_dep== "'parcs "|
base_conditions_proj_dep== "énergie"| base_conditions_proj_dep== "énergie,"| base_conditions_proj_dep== "énergie."| base_conditions_proj_dep== "énergie)"| base_conditions_proj_dep== "(énergie"| base_conditions_proj_dep== "(énergie)"| base_conditions_proj_dep== "'énergie"|
base_conditions_proj_dep== "favoriser"| base_conditions_proj_dep== "favoriser,"| base_conditions_proj_dep== "favoriser."| base_conditions_proj_dep== "favoriser)"| base_conditions_proj_dep== "(favoriser"| base_conditions_proj_dep== "(favoriser)"| base_conditions_proj_dep== "'favoriser"|
base_conditions_proj_dep== "industrielle"| base_conditions_proj_dep== "industrielle,"| base_conditions_proj_dep== "industrielle."| base_conditions_proj_dep== "industrielle)"| base_conditions_proj_dep== "(industrielle"| base_conditions_proj_dep== "(industrielle)"| base_conditions_proj_dep== "'industrielle"|
base_conditions_proj_dep== "naturels"| base_conditions_proj_dep== "naturels,"| base_conditions_proj_dep== "naturels."| base_conditions_proj_dep== "naturels)"| base_conditions_proj_dep== "(naturels"| base_conditions_proj_dep== "(naturels)"| base_conditions_proj_dep== "'naturels"|
base_conditions_proj_dep== "quais"| base_conditions_proj_dep== "quais,"| base_conditions_proj_dep== "quais."| base_conditions_proj_dep== "quais)"| base_conditions_proj_dep== "(quais"| base_conditions_proj_dep== "(quais)"| base_conditions_proj_dep== "'quais"|
base_conditions_proj_dep== "transfromation"| base_conditions_proj_dep== "transfromation,"| base_conditions_proj_dep== "transfromation."| base_conditions_proj_dep== "transfromation)"| base_conditions_proj_dep== "(transfromation"| base_conditions_proj_dep== "(transfromation)"| base_conditions_proj_dep== "'transfromation"|
base_conditions_proj_dep== "voie "| base_conditions_proj_dep== "voie ,"| base_conditions_proj_dep== "voie ."| base_conditions_proj_dep== "voie )"| base_conditions_proj_dep== "(voie "| base_conditions_proj_dep== "(voie )"| base_conditions_proj_dep== "'voie "|
base_conditions_proj_dep== "(ges)"| base_conditions_proj_dep== "(ges),"| base_conditions_proj_dep== "(ges)."| base_conditions_proj_dep== "(ges))"| base_conditions_proj_dep== "((ges)"| base_conditions_proj_dep== "((ges))"| base_conditions_proj_dep== "'(ges)"|
base_conditions_proj_dep== "améliorer"| base_conditions_proj_dep== "améliorer,"| base_conditions_proj_dep== "améliorer."| base_conditions_proj_dep== "améliorer)"| base_conditions_proj_dep== "(améliorer"| base_conditions_proj_dep== "(améliorer)"| base_conditions_proj_dep== "'améliorer"|
base_conditions_proj_dep== "cyclable"| base_conditions_proj_dep== "cyclable,"| base_conditions_proj_dep== "cyclable."| base_conditions_proj_dep== "cyclable)"| base_conditions_proj_dep== "(cyclable"| base_conditions_proj_dep== "(cyclable)"| base_conditions_proj_dep== "'cyclable"|
base_conditions_proj_dep== "durable"| base_conditions_proj_dep== "durable,"| base_conditions_proj_dep== "durable."| base_conditions_proj_dep== "durable)"| base_conditions_proj_dep== "(durable"| base_conditions_proj_dep== "(durable)"| base_conditions_proj_dep== "'durable"|
base_conditions_proj_dep== "itinéraire"| base_conditions_proj_dep== "itinéraire,"| base_conditions_proj_dep== "itinéraire."| base_conditions_proj_dep== "itinéraire)"| base_conditions_proj_dep== "(itinéraire"| base_conditions_proj_dep== "(itinéraire)"| base_conditions_proj_dep== "'itinéraire"|
base_conditions_proj_dep== "protection"| base_conditions_proj_dep== "protection,"| base_conditions_proj_dep== "protection."| base_conditions_proj_dep== "protection)"| base_conditions_proj_dep== "(protection"| base_conditions_proj_dep== "(protection)"| base_conditions_proj_dep== "'protection"|
base_conditions_proj_dep== "valorisation"| base_conditions_proj_dep== "valorisation,"| base_conditions_proj_dep== "valorisation."| base_conditions_proj_dep== "valorisation)"| base_conditions_proj_dep== "(valorisation"| base_conditions_proj_dep== "(valorisation)"| base_conditions_proj_dep== "'valorisation"|
base_conditions_proj_dep== "vélo"| base_conditions_proj_dep== "vélo,"| base_conditions_proj_dep== "vélo."| base_conditions_proj_dep== "vélo)"| base_conditions_proj_dep== "(vélo"| base_conditions_proj_dep== "(vélo)"| base_conditions_proj_dep== "'vélo"|
base_conditions_proj_dep== "eau"| base_conditions_proj_dep== "eau,"| base_conditions_proj_dep== "eau."| base_conditions_proj_dep== "eau)"| base_conditions_proj_dep== "(eau"| base_conditions_proj_dep== "(eau)"| base_conditions_proj_dep== "'eau"|
base_conditions_proj_dep== "estuaire"| base_conditions_proj_dep== "estuaire,"| base_conditions_proj_dep== "estuaire."| base_conditions_proj_dep== "estuaire)"| base_conditions_proj_dep== "(estuaire"| base_conditions_proj_dep== "(estuaire)"| base_conditions_proj_dep== "'estuaire"|
base_conditions_proj_dep== "performance"| base_conditions_proj_dep== "performance,"| base_conditions_proj_dep== "performance."| base_conditions_proj_dep== "performance)"| base_conditions_proj_dep== "(performance"| base_conditions_proj_dep== "(performance)"| base_conditions_proj_dep== "'performance"|
base_conditions_proj_dep== "aménagement"| base_conditions_proj_dep== "aménagement,"| base_conditions_proj_dep== "aménagement."| base_conditions_proj_dep== "aménagement)"| base_conditions_proj_dep== "(aménagement"| base_conditions_proj_dep== "(aménagement)"| base_conditions_proj_dep== "'aménagement"|
base_conditions_proj_dep== "isolation"| base_conditions_proj_dep== "isolation,"| base_conditions_proj_dep== "isolation."| base_conditions_proj_dep== "isolation)"| base_conditions_proj_dep== "(isolation"| base_conditions_proj_dep== "(isolation)"| base_conditions_proj_dep== "'isolation"|
base_conditions_proj_dep== "amélioration"| base_conditions_proj_dep== "amélioration,"| base_conditions_proj_dep== "amélioration."| base_conditions_proj_dep== "amélioration)"| base_conditions_proj_dep== "(amélioration"| base_conditions_proj_dep== "(amélioration)"| base_conditions_proj_dep== "'amélioration"|
base_conditions_proj_dep== "responsable"| base_conditions_proj_dep== "responsable,"| base_conditions_proj_dep== "responsable."| base_conditions_proj_dep== "responsable)"| base_conditions_proj_dep== "(responsable"| base_conditions_proj_dep== "(responsable)"| base_conditions_proj_dep== "'responsable"|
base_conditions_proj_dep== "thermique"| base_conditions_proj_dep== "thermique,"| base_conditions_proj_dep== "thermique."| base_conditions_proj_dep== "thermique)"| base_conditions_proj_dep== "(thermique"| base_conditions_proj_dep== "(thermique)"| base_conditions_proj_dep== "'thermique"|
base_conditions_proj_dep== "bois"| base_conditions_proj_dep== "bois,"| base_conditions_proj_dep== "bois."| base_conditions_proj_dep== "bois)"| base_conditions_proj_dep== "(bois"| base_conditions_proj_dep== "(bois)"| base_conditions_proj_dep== "'bois"|
base_conditions_proj_dep== "changement"| base_conditions_proj_dep== "changement,"| base_conditions_proj_dep== "changement."| base_conditions_proj_dep== "changement)"| base_conditions_proj_dep== "(changement"| base_conditions_proj_dep== "(changement)"| base_conditions_proj_dep== "'changement"|
base_conditions_proj_dep== "chaudières"| base_conditions_proj_dep== "chaudières,"| base_conditions_proj_dep== "chaudières."| base_conditions_proj_dep== "chaudières)"| base_conditions_proj_dep== "(chaudières"| base_conditions_proj_dep== "(chaudières)"| base_conditions_proj_dep== "'chaudières"|
base_conditions_proj_dep== "développement"| base_conditions_proj_dep== "développement,"| base_conditions_proj_dep== "développement."| base_conditions_proj_dep== "développement)"| base_conditions_proj_dep== "(développement"| base_conditions_proj_dep== "(développement)"| base_conditions_proj_dep== "'développement"|
base_conditions_proj_dep== "énergies"| base_conditions_proj_dep== "énergies,"| base_conditions_proj_dep== "énergies."| base_conditions_proj_dep== "énergies)"| base_conditions_proj_dep== "(énergies"| base_conditions_proj_dep== "(énergies)"| base_conditions_proj_dep== "'énergies"|
base_conditions_proj_dep== "modernisation"| base_conditions_proj_dep== "modernisation,"| base_conditions_proj_dep== "modernisation."| base_conditions_proj_dep== "modernisation)"| base_conditions_proj_dep== "(modernisation"| base_conditions_proj_dep== "(modernisation)"| base_conditions_proj_dep== "'modernisation"|
base_conditions_proj_dep== "revitalisation"| base_conditions_proj_dep== "revitalisation,"| base_conditions_proj_dep== "revitalisation."| base_conditions_proj_dep== "revitalisation)"| base_conditions_proj_dep== "(revitalisation"| base_conditions_proj_dep== "(revitalisation)"| base_conditions_proj_dep== "'revitalisation"|
base_conditions_proj_dep== "sanitaire"| base_conditions_proj_dep== "sanitaire,"| base_conditions_proj_dep== "sanitaire."| base_conditions_proj_dep== "sanitaire)"| base_conditions_proj_dep== "(sanitaire"| base_conditions_proj_dep== "(sanitaire)"| base_conditions_proj_dep== "'sanitaire"|
base_conditions_proj_dep== "vélo"| base_conditions_proj_dep== "vélo,"| base_conditions_proj_dep== "vélo."| base_conditions_proj_dep== "vélo)"| base_conditions_proj_dep== "(vélo"| base_conditions_proj_dep== "(vélo)"| base_conditions_proj_dep== "'vélo"|
base_conditions_proj_dep== "ailmentation"| base_conditions_proj_dep== "ailmentation,"| base_conditions_proj_dep== "ailmentation."| base_conditions_proj_dep== "ailmentation)"| base_conditions_proj_dep== "(ailmentation"| base_conditions_proj_dep== "(ailmentation)"| base_conditions_proj_dep== "'ailmentation"|
base_conditions_proj_dep== "eau"| base_conditions_proj_dep== "eau,"| base_conditions_proj_dep== "eau."| base_conditions_proj_dep== "eau)"| base_conditions_proj_dep== "(eau"| base_conditions_proj_dep== "(eau)"| base_conditions_proj_dep== "'eau"|
base_conditions_proj_dep== "biopolymères"| base_conditions_proj_dep== "biopolymères,"| base_conditions_proj_dep== "biopolymères."| base_conditions_proj_dep== "biopolymères)"| base_conditions_proj_dep== "(biopolymères"| base_conditions_proj_dep== "(biopolymères)"| base_conditions_proj_dep== "'biopolymères"|
base_conditions_proj_dep== "aquatique"| base_conditions_proj_dep== "aquatique,"| base_conditions_proj_dep== "aquatique."| base_conditions_proj_dep== "aquatique)"| base_conditions_proj_dep== "(aquatique"| base_conditions_proj_dep== "(aquatique)"| base_conditions_proj_dep== "'aquatique"|
base_conditions_proj_dep== "chauffage"| base_conditions_proj_dep== "chauffage,"| base_conditions_proj_dep== "chauffage."| base_conditions_proj_dep== "chauffage)"| base_conditions_proj_dep== "(chauffage"| base_conditions_proj_dep== "(chauffage)"| base_conditions_proj_dep== "'chauffage"|
base_conditions_proj_dep== "cyclables"| base_conditions_proj_dep== "cyclables,"| base_conditions_proj_dep== "cyclables."| base_conditions_proj_dep== "cyclables)"| base_conditions_proj_dep== "(cyclables"| base_conditions_proj_dep== "(cyclables)"| base_conditions_proj_dep== "'cyclables"|
base_conditions_proj_dep== "efficiente"| base_conditions_proj_dep== "efficiente,"| base_conditions_proj_dep== "efficiente."| base_conditions_proj_dep== "efficiente)"| base_conditions_proj_dep== "(efficiente"| base_conditions_proj_dep== "(efficiente)"| base_conditions_proj_dep== "'efficiente"|
base_conditions_proj_dep== "lutte"| base_conditions_proj_dep== "lutte,"| base_conditions_proj_dep== "lutte."| base_conditions_proj_dep== "lutte)"| base_conditions_proj_dep== "(lutte"| base_conditions_proj_dep== "(lutte)"| base_conditions_proj_dep== "'lutte"|
base_conditions_proj_dep== "prévention"| base_conditions_proj_dep== "prévention,"| base_conditions_proj_dep== "prévention."| base_conditions_proj_dep== "prévention)"| base_conditions_proj_dep== "(prévention"| base_conditions_proj_dep== "(prévention)"| base_conditions_proj_dep== "'prévention"|
base_conditions_proj_dep== "ventilation"| base_conditions_proj_dep== "ventilation,"| base_conditions_proj_dep== "ventilation."| base_conditions_proj_dep== "ventilation)"| base_conditions_proj_dep== "(ventilation"| base_conditions_proj_dep== "(ventilation)"| base_conditions_proj_dep== "'ventilation"|
base_conditions_proj_dep== "améliorer"| base_conditions_proj_dep== "améliorer,"| base_conditions_proj_dep== "améliorer."| base_conditions_proj_dep== "améliorer)"| base_conditions_proj_dep== "(améliorer"| base_conditions_proj_dep== "(améliorer)"| base_conditions_proj_dep== "'améliorer"|
base_conditions_proj_dep== "biodiversité"| base_conditions_proj_dep== "biodiversité,"| base_conditions_proj_dep== "biodiversité."| base_conditions_proj_dep== "biodiversité)"| base_conditions_proj_dep== "(biodiversité"| base_conditions_proj_dep== "(biodiversité)"| base_conditions_proj_dep== "'biodiversité"|
base_conditions_proj_dep== "baie"| base_conditions_proj_dep== "baie,"| base_conditions_proj_dep== "baie."| base_conditions_proj_dep== "baie)"| base_conditions_proj_dep== "(baie"| base_conditions_proj_dep== "(baie)"| base_conditions_proj_dep== "'baie"|
base_conditions_proj_dep== "canaux"| base_conditions_proj_dep== "canaux,"| base_conditions_proj_dep== "canaux."| base_conditions_proj_dep== "canaux)"| base_conditions_proj_dep== "(canaux"| base_conditions_proj_dep== "(canaux)"| base_conditions_proj_dep== "'canaux"|
base_conditions_proj_dep== "conforme"| base_conditions_proj_dep== "conforme,"| base_conditions_proj_dep== "conforme."| base_conditions_proj_dep== "conforme)"| base_conditions_proj_dep== "(conforme"| base_conditions_proj_dep== "(conforme)"| base_conditions_proj_dep== "'conforme"|
base_conditions_proj_dep== "douce"| base_conditions_proj_dep== "douce,"| base_conditions_proj_dep== "douce."| base_conditions_proj_dep== "douce)"| base_conditions_proj_dep== "(douce"| base_conditions_proj_dep== "(douce)"| base_conditions_proj_dep== "'douce"|
base_conditions_proj_dep== "écologique"| base_conditions_proj_dep== "écologique,"| base_conditions_proj_dep== "écologique."| base_conditions_proj_dep== "écologique)"| base_conditions_proj_dep== "(écologique"| base_conditions_proj_dep== "(écologique)"| base_conditions_proj_dep== "'écologique"|
base_conditions_proj_dep== "énergétique "| base_conditions_proj_dep== "énergétique ,"| base_conditions_proj_dep== "énergétique ."| base_conditions_proj_dep== "énergétique )"| base_conditions_proj_dep== "(énergétique "| base_conditions_proj_dep== "(énergétique )"| base_conditions_proj_dep== "'énergétique "|
base_conditions_proj_dep== "energie"| base_conditions_proj_dep== "energie,"| base_conditions_proj_dep== "energie."| base_conditions_proj_dep== "energie)"| base_conditions_proj_dep== "(energie"| base_conditions_proj_dep== "(energie)"| base_conditions_proj_dep== "'energie"|
base_conditions_proj_dep== "fleuve"| base_conditions_proj_dep== "fleuve,"| base_conditions_proj_dep== "fleuve."| base_conditions_proj_dep== "fleuve)"| base_conditions_proj_dep== "(fleuve"| base_conditions_proj_dep== "(fleuve)"| base_conditions_proj_dep== "'fleuve"|
base_conditions_proj_dep== "kwhep/m²/an"| base_conditions_proj_dep== "kwhep/m²/an,"| base_conditions_proj_dep== "kwhep/m²/an."| base_conditions_proj_dep== "kwhep/m²/an)"| base_conditions_proj_dep== "(kwhep/m²/an"| base_conditions_proj_dep== "(kwhep/m²/an)"| base_conditions_proj_dep== "'kwhep/m²/an"|
base_conditions_proj_dep== "marine"| base_conditions_proj_dep== "marine,"| base_conditions_proj_dep== "marine."| base_conditions_proj_dep== "marine)"| base_conditions_proj_dep== "(marine"| base_conditions_proj_dep== "(marine)"| base_conditions_proj_dep== "'marine"|
base_conditions_proj_dep== "mer"| base_conditions_proj_dep== "mer,"| base_conditions_proj_dep== "mer."| base_conditions_proj_dep== "mer)"| base_conditions_proj_dep== "(mer"| base_conditions_proj_dep== "(mer)"| base_conditions_proj_dep== "'mer"|
base_conditions_proj_dep== "multimodal"| base_conditions_proj_dep== "multimodal,"| base_conditions_proj_dep== "multimodal."| base_conditions_proj_dep== "multimodal)"| base_conditions_proj_dep== "(multimodal"| base_conditions_proj_dep== "(multimodal)"| base_conditions_proj_dep== "'multimodal"|
base_conditions_proj_dep== "normes"| base_conditions_proj_dep== "normes,"| base_conditions_proj_dep== "normes."| base_conditions_proj_dep== "normes)"| base_conditions_proj_dep== "(normes"| base_conditions_proj_dep== "(normes)"| base_conditions_proj_dep== "'normes"|
base_conditions_proj_dep== "nucléaire"| base_conditions_proj_dep== "nucléaire,"| base_conditions_proj_dep== "nucléaire."| base_conditions_proj_dep== "nucléaire)"| base_conditions_proj_dep== "(nucléaire"| base_conditions_proj_dep== "(nucléaire)"| base_conditions_proj_dep== "'nucléaire"|
base_conditions_proj_dep== "performances"| base_conditions_proj_dep== "performances,"| base_conditions_proj_dep== "performances."| base_conditions_proj_dep== "performances)"| base_conditions_proj_dep== "(performances"| base_conditions_proj_dep== "(performances)"| base_conditions_proj_dep== "'performances"|
base_conditions_proj_dep== "protéger"| base_conditions_proj_dep== "protéger,"| base_conditions_proj_dep== "protéger."| base_conditions_proj_dep== "protéger)"| base_conditions_proj_dep== "(protéger"| base_conditions_proj_dep== "(protéger)"| base_conditions_proj_dep== "'protéger"|
base_conditions_proj_dep== "protégés"| base_conditions_proj_dep== "protégés,"| base_conditions_proj_dep== "protégés."| base_conditions_proj_dep== "protégés)"| base_conditions_proj_dep== "(protégés"| base_conditions_proj_dep== "(protégés)"| base_conditions_proj_dep== "'protégés"|
base_conditions_proj_dep== "renouvelables"| base_conditions_proj_dep== "renouvelables,"| base_conditions_proj_dep== "renouvelables."| base_conditions_proj_dep== "renouvelables)"| base_conditions_proj_dep== "(renouvelables"| base_conditions_proj_dep== "(renouvelables)"| base_conditions_proj_dep== "'renouvelables"|
base_conditions_proj_dep== "sensibilation"| base_conditions_proj_dep== "sensibilation,"| base_conditions_proj_dep== "sensibilation."| base_conditions_proj_dep== "sensibilation)"| base_conditions_proj_dep== "(sensibilation"| base_conditions_proj_dep== "(sensibilation)"| base_conditions_proj_dep== "'sensibilation"|
base_conditions_proj_dep== "sensibiliser"| base_conditions_proj_dep== "sensibiliser,"| base_conditions_proj_dep== "sensibiliser."| base_conditions_proj_dep== "sensibiliser)"| base_conditions_proj_dep== "(sensibiliser"| base_conditions_proj_dep== "(sensibiliser)"| base_conditions_proj_dep== "'sensibiliser"|
base_conditions_proj_dep== "thermostatiques"| base_conditions_proj_dep== "thermostatiques,"| base_conditions_proj_dep== "thermostatiques."| base_conditions_proj_dep== "thermostatiques)"| base_conditions_proj_dep== "(thermostatiques"| base_conditions_proj_dep== "(thermostatiques)"| base_conditions_proj_dep== "'thermostatiques"|
base_conditions_proj_dep== "ventilation"| base_conditions_proj_dep== "ventilation,"| base_conditions_proj_dep== "ventilation."| base_conditions_proj_dep== "ventilation)"| base_conditions_proj_dep== "(ventilation"| base_conditions_proj_dep== "(ventilation)"| base_conditions_proj_dep== "'ventilation"|
base_conditions_proj_dep== "verte"| base_conditions_proj_dep== "verte,"| base_conditions_proj_dep== "verte."| base_conditions_proj_dep== "verte)"| base_conditions_proj_dep== "(verte"| base_conditions_proj_dep== "(verte)"| base_conditions_proj_dep== "'verte"|
base_conditions_proj_dep== "vertes"| base_conditions_proj_dep== "vertes,"| base_conditions_proj_dep== "vertes."| base_conditions_proj_dep== "vertes)"| base_conditions_proj_dep== "(vertes"| base_conditions_proj_dep== "(vertes)"| base_conditions_proj_dep== "'vertes"|
base_conditions_proj_dep== "électricité"| base_conditions_proj_dep== "électricité,"| base_conditions_proj_dep== "électricité."| base_conditions_proj_dep== "électricité)"| base_conditions_proj_dep== "(électricité"| base_conditions_proj_dep== "(électricité)"| base_conditions_proj_dep== "'électricité"|
base_conditions_proj_dep== "énergie"| base_conditions_proj_dep== "énergie,"| base_conditions_proj_dep== "énergie."| base_conditions_proj_dep== "énergie)"| base_conditions_proj_dep== "(énergie"| base_conditions_proj_dep== "(énergie)"| base_conditions_proj_dep== "'énergie"|
base_conditions_proj_dep== "environnement"| base_conditions_proj_dep== "environnement,"| base_conditions_proj_dep== "environnement."| base_conditions_proj_dep== "environnement)"| base_conditions_proj_dep== "(environnement"| base_conditions_proj_dep== "(environnement)"| base_conditions_proj_dep== "'environnement"|
base_conditions_proj_dep== "insectes"| base_conditions_proj_dep== "insectes,"| base_conditions_proj_dep== "insectes."| base_conditions_proj_dep== "insectes)"| base_conditions_proj_dep== "(insectes"| base_conditions_proj_dep== "(insectes)"| base_conditions_proj_dep== "'insectes"|
base_conditions_proj_dep== "(ges)"| base_conditions_proj_dep== "(ges),"| base_conditions_proj_dep== "(ges)."| base_conditions_proj_dep== "(ges))"| base_conditions_proj_dep== "((ges)"| base_conditions_proj_dep== "((ges))"| base_conditions_proj_dep== "'(ges)"|
base_conditions_proj_dep== "transition"| base_conditions_proj_dep== "transition,"| base_conditions_proj_dep== "transition."| base_conditions_proj_dep== "transition)"| base_conditions_proj_dep== "(transition"| base_conditions_proj_dep== "(transition)"| base_conditions_proj_dep== "'transition"|
base_conditions_proj_dep== "biomarqueurs"| base_conditions_proj_dep== "biomarqueurs,"| base_conditions_proj_dep== "biomarqueurs."| base_conditions_proj_dep== "biomarqueurs)"| base_conditions_proj_dep== "(biomarqueurs"| base_conditions_proj_dep== "(biomarqueurs)"| base_conditions_proj_dep== "'biomarqueurs"|
base_conditions_proj_dep== "bioregate"| base_conditions_proj_dep== "bioregate,"| base_conditions_proj_dep== "bioregate."| base_conditions_proj_dep== "bioregate)"| base_conditions_proj_dep== "(bioregate"| base_conditions_proj_dep== "(bioregate)"| base_conditions_proj_dep== "'bioregate"|
base_conditions_proj_dep== "biothérapies"| base_conditions_proj_dep== "biothérapies,"| base_conditions_proj_dep== "biothérapies."| base_conditions_proj_dep== "biothérapies)"| base_conditions_proj_dep== "(biothérapies"| base_conditions_proj_dep== "(biothérapies)"| base_conditions_proj_dep== "'biothérapies"|
base_conditions_proj_dep== "chaleur"| base_conditions_proj_dep== "chaleur,"| base_conditions_proj_dep== "chaleur."| base_conditions_proj_dep== "chaleur)"| base_conditions_proj_dep== "(chaleur"| base_conditions_proj_dep== "(chaleur)"| base_conditions_proj_dep== "'chaleur"|
base_conditions_proj_dep== "co2"| base_conditions_proj_dep== "co2,"| base_conditions_proj_dep== "co2."| base_conditions_proj_dep== "co2)"| base_conditions_proj_dep== "(co2"| base_conditions_proj_dep== "(co2)"| base_conditions_proj_dep== "'co2"|
base_conditions_proj_dep== "CO2"| base_conditions_proj_dep== "CO2,"| base_conditions_proj_dep== "CO2."| base_conditions_proj_dep== "CO2)"| base_conditions_proj_dep== "(CO2"| base_conditions_proj_dep== "(CO2)"| base_conditions_proj_dep== "'CO2"|
base_conditions_proj_dep== "condensation"| base_conditions_proj_dep== "condensation,"| base_conditions_proj_dep== "condensation."| base_conditions_proj_dep== "condensation)"| base_conditions_proj_dep== "(condensation"| base_conditions_proj_dep== "(condensation)"| base_conditions_proj_dep== "'condensation"|
base_conditions_proj_dep== "cyclistes"| base_conditions_proj_dep== "cyclistes,"| base_conditions_proj_dep== "cyclistes."| base_conditions_proj_dep== "cyclistes)"| base_conditions_proj_dep== "(cyclistes"| base_conditions_proj_dep== "(cyclistes)"| base_conditions_proj_dep== "'cyclistes"|
base_conditions_proj_dep== "déplacements"| base_conditions_proj_dep== "déplacements,"| base_conditions_proj_dep== "déplacements."| base_conditions_proj_dep== "déplacements)"| base_conditions_proj_dep== "(déplacements"| base_conditions_proj_dep== "(déplacements)"| base_conditions_proj_dep== "'déplacements"|
base_conditions_proj_dep== "diversité"| base_conditions_proj_dep== "diversité,"| base_conditions_proj_dep== "diversité."| base_conditions_proj_dep== "diversité)"| base_conditions_proj_dep== "(diversité"| base_conditions_proj_dep== "(diversité)"| base_conditions_proj_dep== "'diversité"|
base_conditions_proj_dep== "doux "| base_conditions_proj_dep== "doux ,"| base_conditions_proj_dep== "doux ."| base_conditions_proj_dep== "doux )"| base_conditions_proj_dep== "(doux "| base_conditions_proj_dep== "(doux )"| base_conditions_proj_dep== "'doux "|
base_conditions_proj_dep== "eaux"| base_conditions_proj_dep== "eaux,"| base_conditions_proj_dep== "eaux."| base_conditions_proj_dep== "eaux)"| base_conditions_proj_dep== "(eaux"| base_conditions_proj_dep== "(eaux)"| base_conditions_proj_dep== "'eaux"|
base_conditions_proj_dep== "électrique"| base_conditions_proj_dep== "électrique,"| base_conditions_proj_dep== "électrique."| base_conditions_proj_dep== "électrique)"| base_conditions_proj_dep== "(électrique"| base_conditions_proj_dep== "(électrique)"| base_conditions_proj_dep== "'électrique"|
base_conditions_proj_dep== "énergétiques"| base_conditions_proj_dep== "énergétiques,"| base_conditions_proj_dep== "énergétiques."| base_conditions_proj_dep== "énergétiques)"| base_conditions_proj_dep== "(énergétiques"| base_conditions_proj_dep== "(énergétiques)"| base_conditions_proj_dep== "'énergétiques"|
base_conditions_proj_dep== "énergies"| base_conditions_proj_dep== "énergies,"| base_conditions_proj_dep== "énergies."| base_conditions_proj_dep== "énergies)"| base_conditions_proj_dep== "(énergies"| base_conditions_proj_dep== "(énergies)"| base_conditions_proj_dep== "'énergies"|
base_conditions_proj_dep== "environnemental"| base_conditions_proj_dep== "environnemental,"| base_conditions_proj_dep== "environnemental."| base_conditions_proj_dep== "environnemental)"| base_conditions_proj_dep== "(environnemental"| base_conditions_proj_dep== "(environnemental)"| base_conditions_proj_dep== "'environnemental"|
base_conditions_proj_dep== "environnementaux"| base_conditions_proj_dep== "environnementaux,"| base_conditions_proj_dep== "environnementaux."| base_conditions_proj_dep== "environnementaux)"| base_conditions_proj_dep== "(environnementaux"| base_conditions_proj_dep== "(environnementaux)"| base_conditions_proj_dep== "'environnementaux"|
base_conditions_proj_dep== "gares"| base_conditions_proj_dep== "gares,"| base_conditions_proj_dep== "gares."| base_conditions_proj_dep== "gares)"| base_conditions_proj_dep== "(gares"| base_conditions_proj_dep== "(gares)"| base_conditions_proj_dep== "'gares"|
base_conditions_proj_dep== "green"| base_conditions_proj_dep== "green,"| base_conditions_proj_dep== "green."| base_conditions_proj_dep== "green)"| base_conditions_proj_dep== "(green"| base_conditions_proj_dep== "(green)"| base_conditions_proj_dep== "'green"|
base_conditions_proj_dep== "humides"| base_conditions_proj_dep== "humides,"| base_conditions_proj_dep== "humides."| base_conditions_proj_dep== "humides)"| base_conditions_proj_dep== "(humides"| base_conditions_proj_dep== "(humides)"| base_conditions_proj_dep== "'humides"|
base_conditions_proj_dep== "lutter"| base_conditions_proj_dep== "lutter,"| base_conditions_proj_dep== "lutter."| base_conditions_proj_dep== "lutter)"| base_conditions_proj_dep== "(lutter"| base_conditions_proj_dep== "(lutter)"| base_conditions_proj_dep== "'lutter"|
base_conditions_proj_dep== "mobilités"| base_conditions_proj_dep== "mobilités,"| base_conditions_proj_dep== "mobilités."| base_conditions_proj_dep== "mobilités)"| base_conditions_proj_dep== "(mobilités"| base_conditions_proj_dep== "(mobilités)"| base_conditions_proj_dep== "'mobilités"|
base_conditions_proj_dep== "mobilité"| base_conditions_proj_dep== "mobilité,"| base_conditions_proj_dep== "mobilité."| base_conditions_proj_dep== "mobilité)"| base_conditions_proj_dep== "(mobilité"| base_conditions_proj_dep== "(mobilité)"| base_conditions_proj_dep== "'mobilité"|
base_conditions_proj_dep== "renouvelables"| base_conditions_proj_dep== "renouvelables,"| base_conditions_proj_dep== "renouvelables."| base_conditions_proj_dep== "renouvelables)"| base_conditions_proj_dep== "(renouvelables"| base_conditions_proj_dep== "(renouvelables)"| base_conditions_proj_dep== "'renouvelables"|
base_conditions_proj_dep== "restaurer"| base_conditions_proj_dep== "restaurer,"| base_conditions_proj_dep== "restaurer."| base_conditions_proj_dep== "restaurer)"| base_conditions_proj_dep== "(restaurer"| base_conditions_proj_dep== "(restaurer)"| base_conditions_proj_dep== "'restaurer"|
base_conditions_proj_dep== "substances"| base_conditions_proj_dep== "substances,"| base_conditions_proj_dep== "substances."| base_conditions_proj_dep== "substances)"| base_conditions_proj_dep== "(substances"| base_conditions_proj_dep== "(substances)"| base_conditions_proj_dep== "'substances"|
base_conditions_proj_dep== "alimentation"| base_conditions_proj_dep== "alimentation,"| base_conditions_proj_dep== "alimentation."| base_conditions_proj_dep== "alimentation)"| base_conditions_proj_dep== "(alimentation"| base_conditions_proj_dep== "(alimentation)"| base_conditions_proj_dep== "'alimentation"|
base_conditions_proj_dep== "norme"| base_conditions_proj_dep== "norme,"| base_conditions_proj_dep== "norme."| base_conditions_proj_dep== "norme)"| base_conditions_proj_dep== "(norme"| base_conditions_proj_dep== "(norme)"| base_conditions_proj_dep== "'norme"|
base_conditions_proj_dep== "adaptation"| base_conditions_proj_dep== "adaptation,"| base_conditions_proj_dep== "adaptation."| base_conditions_proj_dep== "adaptation)"| base_conditions_proj_dep== "(adaptation"| base_conditions_proj_dep== "(adaptation)"| base_conditions_proj_dep== "'adaptation"|
base_conditions_proj_dep== "recyclage"| base_conditions_proj_dep== "recyclage,"| base_conditions_proj_dep== "recyclage."| base_conditions_proj_dep== "recyclage)"| base_conditions_proj_dep== "(recyclage"| base_conditions_proj_dep== "(recyclage)"| base_conditions_proj_dep== "'recyclage"|
base_conditions_proj_dep== "biologique"| base_conditions_proj_dep== "biologique,"| base_conditions_proj_dep== "biologique."| base_conditions_proj_dep== "biologique)"| base_conditions_proj_dep== "(biologique"| base_conditions_proj_dep== "(biologique)"| base_conditions_proj_dep== "'biologique"|
base_conditions_proj_dep== "biologiques"| base_conditions_proj_dep== "biologiques,"| base_conditions_proj_dep== "biologiques."| base_conditions_proj_dep== "biologiques)"| base_conditions_proj_dep== "(biologiques"| base_conditions_proj_dep== "(biologiques)"| base_conditions_proj_dep== "'biologiques"|
base_conditions_proj_dep== "biologie"| base_conditions_proj_dep== "biologie,"| base_conditions_proj_dep== "biologie."| base_conditions_proj_dep== "biologie)"| base_conditions_proj_dep== "(biologie"| base_conditions_proj_dep== "(biologie)"| base_conditions_proj_dep== "'biologie"|
base_conditions_proj_dep== "bioressources"| base_conditions_proj_dep== "bioressources,"| base_conditions_proj_dep== "bioressources."| base_conditions_proj_dep== "bioressources)"| base_conditions_proj_dep== "(bioressources"| base_conditions_proj_dep== "(bioressources)"| base_conditions_proj_dep== "'bioressources"|
base_conditions_proj_dep== "climatique"| base_conditions_proj_dep== "climatique,"| base_conditions_proj_dep== "climatique."| base_conditions_proj_dep== "climatique)"| base_conditions_proj_dep== "(climatique"| base_conditions_proj_dep== "(climatique)"| base_conditions_proj_dep== "'climatique"|
base_conditions_proj_dep== "écologiques"| base_conditions_proj_dep== "écologiques,"| base_conditions_proj_dep== "écologiques."| base_conditions_proj_dep== "écologiques)"| base_conditions_proj_dep== "(écologiques"| base_conditions_proj_dep== "(écologiques)"| base_conditions_proj_dep== "'écologiques"|
base_conditions_proj_dep== "énergie/environnement"| base_conditions_proj_dep== "énergie/environnement,"| base_conditions_proj_dep== "énergie/environnement."| base_conditions_proj_dep== "énergie/environnement)"| base_conditions_proj_dep== "(énergie/environnement"| base_conditions_proj_dep== "(énergie/environnement)"| base_conditions_proj_dep== "'énergie/environnement"|
base_conditions_proj_dep== "éoliens"| base_conditions_proj_dep== "éoliens,"| base_conditions_proj_dep== "éoliens."| base_conditions_proj_dep== "éoliens)"| base_conditions_proj_dep== "(éoliens"| base_conditions_proj_dep== "(éoliens)"| base_conditions_proj_dep== "'éoliens"|
base_conditions_proj_dep== "eurovélo"| base_conditions_proj_dep== "eurovélo,"| base_conditions_proj_dep== "eurovélo."| base_conditions_proj_dep== "eurovélo)"| base_conditions_proj_dep== "(eurovélo"| base_conditions_proj_dep== "(eurovélo)"| base_conditions_proj_dep== "'eurovélo"|
base_conditions_proj_dep== "éolien"| base_conditions_proj_dep== "éolien,"| base_conditions_proj_dep== "éolien."| base_conditions_proj_dep== "éolien)"| base_conditions_proj_dep== "(éolien"| base_conditions_proj_dep== "(éolien)"| base_conditions_proj_dep== "'éolien"|
base_conditions_proj_dep== "eurovéloroute"| base_conditions_proj_dep== "eurovéloroute,"| base_conditions_proj_dep== "eurovéloroute."| base_conditions_proj_dep== "eurovéloroute)"| base_conditions_proj_dep== "(eurovéloroute"| base_conditions_proj_dep== "(eurovéloroute)"| base_conditions_proj_dep== "'eurovéloroute"|
base_conditions_proj_dep== "hydraulique"| base_conditions_proj_dep== "hydraulique,"| base_conditions_proj_dep== "hydraulique."| base_conditions_proj_dep== "hydraulique)"| base_conditions_proj_dep== "(hydraulique"| base_conditions_proj_dep== "(hydraulique)"| base_conditions_proj_dep== "'hydraulique"|
base_conditions_proj_dep== "hydrauliques"| base_conditions_proj_dep== "hydrauliques,"| base_conditions_proj_dep== "hydrauliques."| base_conditions_proj_dep== "hydrauliques)"| base_conditions_proj_dep== "(hydrauliques"| base_conditions_proj_dep== "(hydrauliques)"| base_conditions_proj_dep== "'hydrauliques"|
base_conditions_proj_dep== "l'écosystème"| base_conditions_proj_dep== "l'écosystème,"| base_conditions_proj_dep== "l'écosystème."| base_conditions_proj_dep== "l'écosystème)"| base_conditions_proj_dep== "(l'écosystème"| base_conditions_proj_dep== "(l'écosystème)"| base_conditions_proj_dep== "'l'écosystème"|
base_conditions_proj_dep== "l'environnement"| base_conditions_proj_dep== "l'environnement,"| base_conditions_proj_dep== "l'environnement."| base_conditions_proj_dep== "l'environnement)"| base_conditions_proj_dep== "(l'environnement"| base_conditions_proj_dep== "(l'environnement)"| base_conditions_proj_dep== "'l'environnement"|
base_conditions_proj_dep== "quai"| base_conditions_proj_dep== "quai,"| base_conditions_proj_dep== "quai."| base_conditions_proj_dep== "quai)"| base_conditions_proj_dep== "(quai"| base_conditions_proj_dep== "(quai)"| base_conditions_proj_dep== "'quai"|
base_conditions_proj_dep== "température"| base_conditions_proj_dep== "température,"| base_conditions_proj_dep== "température."| base_conditions_proj_dep== "température)"| base_conditions_proj_dep== "(température"| base_conditions_proj_dep== "(température)"| base_conditions_proj_dep== "'température"|
base_conditions_proj_dep== "températures"| base_conditions_proj_dep== "températures,"| base_conditions_proj_dep== "températures."| base_conditions_proj_dep== "températures)"| base_conditions_proj_dep== "(températures"| base_conditions_proj_dep== "(températures)"| base_conditions_proj_dep== "'températures"|
base_conditions_proj_dep== "végétale"| base_conditions_proj_dep== "végétale,"| base_conditions_proj_dep== "végétale."| base_conditions_proj_dep== "végétale)"| base_conditions_proj_dep== "(végétale"| base_conditions_proj_dep== "(végétale)"| base_conditions_proj_dep== "'végétale"|
base_conditions_proj_dep== "végétaux"| base_conditions_proj_dep== "végétaux,"| base_conditions_proj_dep== "végétaux."| base_conditions_proj_dep== "végétaux)"| base_conditions_proj_dep== "(végétaux"| base_conditions_proj_dep== "(végétaux)"| base_conditions_proj_dep== "'végétaux"|
base_conditions_proj_dep== "végétal"| base_conditions_proj_dep== "végétal,"| base_conditions_proj_dep== "végétal."| base_conditions_proj_dep== "végétal)"| base_conditions_proj_dep== "(végétal"| base_conditions_proj_dep== "(végétal)"| base_conditions_proj_dep== "'végétal"|
base_conditions_proj_dep== "végétales"| base_conditions_proj_dep== "végétales,"| base_conditions_proj_dep== "végétales."| base_conditions_proj_dep== "végétales)"| base_conditions_proj_dep== "(végétales"| base_conditions_proj_dep== "(végétales)"| base_conditions_proj_dep== "'végétales"|
base_conditions_proj_dep== "végétation"| base_conditions_proj_dep== "végétation,"| base_conditions_proj_dep== "végétation."| base_conditions_proj_dep== "végétation)"| base_conditions_proj_dep== "(végétation"| base_conditions_proj_dep== "(végétation)"| base_conditions_proj_dep== "'végétation"|
base_conditions_proj_dep== "vélo"| base_conditions_proj_dep== "vélo,"| base_conditions_proj_dep== "vélo."| base_conditions_proj_dep== "vélo)"| base_conditions_proj_dep== "(vélo"| base_conditions_proj_dep== "(vélo)"| base_conditions_proj_dep== "'vélo"|
base_conditions_proj_dep== "véloroutes"| base_conditions_proj_dep== "véloroutes,"| base_conditions_proj_dep== "véloroutes."| base_conditions_proj_dep== "véloroutes)"| base_conditions_proj_dep== "(véloroutes"| base_conditions_proj_dep== "(véloroutes)"| base_conditions_proj_dep== "'véloroutes"|
base_conditions_proj_dep== "véloroute"| base_conditions_proj_dep== "véloroute,"| base_conditions_proj_dep== "véloroute."| base_conditions_proj_dep== "véloroute)"| base_conditions_proj_dep== "(véloroute"| base_conditions_proj_dep== "(véloroute)"| base_conditions_proj_dep== "'véloroute"|
base_conditions_proj_dep== "veloroute"| base_conditions_proj_dep== "veloroute,"| base_conditions_proj_dep== "veloroute."| base_conditions_proj_dep== "veloroute)"| base_conditions_proj_dep== "(veloroute"| base_conditions_proj_dep== "(veloroute)"| base_conditions_proj_dep== "'veloroute"|
base_conditions_proj_dep== "éolien"| base_conditions_proj_dep== "éolien,"| base_conditions_proj_dep== "éolien."| base_conditions_proj_dep== "éolien)"| base_conditions_proj_dep== "(éolien"| base_conditions_proj_dep== "(éolien)"| base_conditions_proj_dep== "'éolien" ,1,0)
##enleve les na 2 ----
coeff_projets_verts_dep[is.na(coeff_projets_verts_dep)] = 0
##fait la somme des lignes
coeff_projets_verts_2_dep = rowSums(coeff_projets_verts_dep )
##selectionne dans le tableau les projets
projets_dep = base_a_dep$`Intitulé du projet`
descr_dep = base_a_dep$`Résumé de l'opération`
##tableau de verification
projets_select_dep = data.frame(projets_dep,descr_dep,coeff_projets_verts_2_dep)
##export tableau
write.csv2(projets_select_dep,"/projet_select_dep.csv" )
#merge des différents tableaux ----
## imports des préselections
etablissements_public_select_corr_dep =
## merge ----
tableau_select_dep = data.frame(etablissements_public_select_corr_dep$beneficiaires_dep ,
etablissements_public_select_corr_dep$oui_non_1,
base_a_dep$`Code postal du bénéficaire`,
base_a_dep$`Date de début de l'opération`,
base_a_dep$`Date de fin de l'opération`,
base_a_dep$`Montant UE programmé`,
base_a_dep$`Total des dépenses éligibles`,
projets_select_dep
)
View(tableau_select_dep)
##export
write.csv2(tableau_select_dep, "/tableau_recapitulatif_dep.csv")
|
2f819e2f5a5beda15255bb80c820d36b927cfb0e | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /immer/R/immer_ccml.R | cef03851934ff6c0a31cd595da2653ca3e0c6884 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,998 | r | immer_ccml.R | ## File Name: immer_ccml.R
## File Version: 0.17
immer_ccml <- function( dat, weights=NULL, irtmodel="PCM", A=NULL, b_fixed=NULL, control=NULL )
{
time <- list( start=Sys.time() )
CALL <- match.call()
#-- data processing
dat0 <- dat
I <- ncol(dat)
dat <- as.matrix(dat)
dat_resp <- 1 - is.na(dat)
dat[ is.na(dat) ] <- 0
maxK <- apply( dat, 2, max )
K <- max(maxK)
N <- nrow(dat)
if (is.null(weights)){
weights <- rep(1,N)
}
W <- sum(weights)
#-- count frequencies
dfr <- immer_ccml_proc_freq( dat=dat, dat_resp=dat_resp, K=K, weights=weights )
dfr <- as.data.frame(dfr)
colnames(dfr) <- c("ll_index", "item1", "item2", "score", "cat1", "cat2", "n")
dfr <- dfr[ dfr$ll_index > 0, ]
sz <- rowsum( dfr$n, dfr$ll_index )
dfr$ntot <- sz[dfr$ll_index]
#-- create design matrix if not provided
#... INCLUDE IT LATER
#-- initial values xsi
xsi <- rep( 0, dim(A)[3] )
names(xsi) <- dimnames(A)[[3]]
if ( is.null(b_fixed) ){
b_fixed <- matrix(0, nrow=I, ncol=K+1)
}
#-- preparation optimization
ll_index1 <- dfr$ll_index - 1
par0 <- as.vector(xsi)
A_ <- as.vector(A)
item10 <- dfr$item1 - 1
item20 <- dfr$item2 - 1
cat1 <- dfr$cat1
cat2 <- dfr$cat2
max_ll_index <- max(dfr$ll_index)
n <- dfr$n
ntot <- dfr$ntot
#- define optimization function
opt_fct <- function(par){
immer_ccml_opt_function_par( b_fixed=b_fixed, A_=A_, par=par, ll_index1=ll_index1,
item10=item10, item20=item20, cat1=cat1, cat2=cat2, n=n, ntot=ntot,
max_ll_index=max_ll_index )
}
#- define gradient
grad_fct <- function(par){
immer_ccml_gradient_par( b_fixed=b_fixed, A_=A_, par=par, ll_index1=ll_index1, item10=item10, item20=item20,
cat1=cat1, cat2=cat2, n=n, ntot=ntot, max_ll_index=max_ll_index )
}
#--- optimization
res <- nlminb_result <- stats::nlminb( start=par0, objective=opt_fct, gradient=grad_fct, control=control )
par <- coef <- res$par
names(par) <- names(coef) <- names(xsi)
objective <- res$objective
#-- calculate intercept matrix
b <- immer_ccml_calc_item_intercepts( b_fixed=b_fixed, A_=A_, par=par )
#-- calculate standard errors
res <- immer_ccml_se( b_fixed=b_fixed, A_=A_, par=par, ll_index1=ll_index1, item10=item10, item20=item20,
cat1=cat1, cat2=cat2, n=n, ntot=ntot, max_ll_index=max_ll_index, h=1e-4 )
J <- res$xpd_mat
H <- res$obs_mat
colnames(J) <- rownames(J) <- names(xsi)
colnames(H) <- rownames(H) <- names(xsi)
J1 <- MASS::ginv(J)
V <- H %*% J1 %*% H
se <- sqrt(diag(V))
#-- information criteria
ic <- list( objective=objective, np=length(xsi), N=N, I=I )
H1 <- MASS::ginv(H)
tr_HJ <- immer_trace( J %*% H1 )
ic$dev <- 2*ic$objective
ic$CLAIC <- ic$dev + 2*tr_HJ
ic$CLBIC <- ic$dev + log(ic$N)*tr_HJ
ic$R <- NA
ic$ND <- NA
#-- output xsi parameters
xsi_out <- data.frame( est=coef, se=se )
rownames(xsi_out) <- names(xsi)
#-- output item parameters
item <- b[,-1,drop=FALSE]
colnames(item) <- paste0("Cat", 1:K)
M <- colSums( dat * dat_resp * weights ) / colSums( dat_resp * weights )
item <- data.frame( item=colnames(dat), M=M, item )
#--- output
time$end <- Sys.time()
time$diff <- time$end - time$start
description <- "Composite Conditional Maximum Likelihood Estimation"
res <- list( coef=coef, vcov=V, se=se, b=b, objective=objective, nlminb_result=nlminb_result, A=A,
weights=weights, b_fixed=b_fixed, K=K, maxK=maxK, N=N, W=W, ic=ic, suff_stat=dfr,
xsi_out=xsi_out, item=item, time=time,
CALL=CALL, description=description)
class(res) <- "immer_ccml"
return(res)
}
|
3370ef96039acfdb342a9b6422290ef66713d6fa | 2df6af2843c19136e94f126045d47b4f0bbc4749 | /log_sad.R | c93221b80efe5ca0d9a4f0f71d55c7b66102941c | [] | no_license | Bio3SS/Exponential_figures | 1e79041ffb0d1037250dd1a6172f2074c4958f09 | 42ae3a0134d6f275ff78dafc25a540abfde1fd9e | refs/heads/master | 2023-04-07T12:20:35.608485 | 2023-03-22T09:52:52 | 2023-03-22T09:52:52 | 48,778,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 53 | r | log_sad.R | geometricPlot(lam=0.75, ylab="Moths", logscale=TRUE)
|
02281ff56c6311aee9c2659850d567843f261f13 | d330864571f3214030efda9e061e075788da7e61 | /Clase 04-20/Ejercicios.R | a237946468d8266702140a32a53c60cd53175071 | [] | no_license | daniel-mahmoodi/EstadisticaII | b8cbe4ed2412e404a8a3b72ec7909c9d57064679 | 9ee4fbfa208675db8ca1821e8e9b62f9c4186a2f | refs/heads/main | 2023-06-09T03:31:57.291223 | 2021-06-24T20:43:53 | 2021-06-24T20:43:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,741 | r | Ejercicios.R | # 1- Remítase al conjunto de datos 04 – Births (disponible en el aula virtual
# , en la carpeta base de datos) y realice las pruebas necesarias para
# responder a las preguntas:
# a) ¿Los pesos de los recién nacidos se distribuye normalmente?
library(readxl)
library(nortest)
Births <- read_excel("Documents/2021-1/EstadisticaII/Clase 04-20/04 - Births.xlsx")
View(Births)
lillie.test(Births$`BIRTH WEIGHT`)
# b)¿La proporción de bebes de sexo femenino es igual a 50%?
# Cambia 0 por mujer, 1 por hombre
Births$`GENDER (1=M)` = factor(Births$`GENDER (1=M)`, levels = c(0,1), labels=c("Mujer", "Hombre"))
total = length(Births$`GENDER (1=M)`)
print(Births$`GENDER (1=M)`)
# Hallamos la cantidad de mujeres en los nacimientos
fem = 0
for (val in Births$`GENDER (1=M)`){
if (val == "Mujer"){
fem = fem + 1
}
}
print (fem)
prop.test(fem, total, p=0.50, alternative="two.sided", conf.level=0.95)
# c) ¿Los partos ocurren todos los días con la misma frecuencia?
# La frecuencia deberia ser la misma todos los dias
# Por tanto, la frecuencia relativa por dia es de 1/7
probs = c(1/7, 1/7, 1/7, 1/7, 1/7, 1/7, 1/7)
# Creamos una tabla con la relacion de frecuencias
# Para poder utilizar la funcion chisq.test
table = with(Births, table(ADMITTED))
print(table) # Relación de frecuencias
chisq.test(table, p=probs) # Prueba chi cuadrado de bondad de ajuste
# d) ¿El seguro y el día de admisión son variables independientes?
# Crear tabla de contingencia
tabla_contingencia = xtabs(~INSURANCE+ADMITTED, data=Births)
print(tabla_contingencia)
# Aplicar prueba chi cuadrado para la independencia de variables
test = chisq.test(tabla_contingencia, correct=FALSE)
print(test)
print("Expected counts: ")
print(test$expected)
|
809540d0e8748ca454081110581d4b45ce42ffef | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/RobustGaSP/man/pred_rgasp.Rd | 96b136ed4f651f37535795516485850fe6d3c14a | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,302 | rd | pred_rgasp.Rd | \name{pred_rgasp}
\alias{pred_rgasp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Prediction for robust GaSP model
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
A function to make prediction on robust GaSP models after the robust GaSP model has been constructed.
}
\usage{
pred_rgasp(beta, nu, input, X, zero_mean,output, testing_input,
X_testing, L, LX, theta_hat, sigma2_hat,
q_025, q_975, r0, kernel_type, alpha,method,interval_data)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{beta}{
%% ~~Describe \code{beta} here~~
inverse-range parameters.
}
\item{nu}{
%% ~~Describe \code{nu} here~~
noise-variance ratio parameter.
}
\item{input}{
%% ~~Describe \code{input} here~~
input matrix.
}
\item{X}{
%% ~~Describe \code{X} here~~
the mean basis function i.e. the trend function.
}
\item{zero_mean}{
%% ~~Describe \code{zero_mean} here~~
The mean basis function is zero or not.
}
\item{output}{
%% ~~Describe \code{output} here~~
output matrix.
}
\item{testing_input}{
%% ~~Describe \code{testing_input} here~~
testing input matrix.
}
\item{X_testing}{
%% ~~Describe \code{X_testing} here~~
mean/trend matrix of testing inputs.
}
\item{L}{
%% ~~Describe \code{L} here~~
a lower triangular matrix for the cholesky decomposition of \code{R}, the correlation matrix.
}
\item{LX}{
%% ~~Describe \code{LX} here~~
a lower triangular matrix for the cholesky decomposition of $X^tR^{-1}X$.
\ifelse{html}{\out{X^tR^{-1}X}}{\eqn{4(X^tR^{-1}X}{X^tR^{-1}X}}
}
\item{theta_hat}{
%% ~~Describe \code{theta_hat} here~~
estimated mean/trend parameters.
}
\item{sigma2_hat}{
%% ~~Describe \code{sigma2_hat} here~~
estimated variance parameter.
}
\item{q_025}{
%% ~~Describe \code{qt_025} here~~
0.025 quantile of \code{t} distribution.
}
\item{q_975}{
%% ~~Describe \code{qt_975} here~~
0.975 quantile of \code{t} distribution.
}
\item{r0}{
%% ~~Describe \code{r0} here~~
a matrix of absolute difference between inputs and testing inputs.
}
\item{kernel_type}{
%% ~~Describe \code{kernel_type} here~~
type of kernel. \code{matern_3_2} and \code{matern_5_2} are \code{Matern kernel} with roughness parameter 3/2 and 5/2 respectively. \code{pow_exp} is power exponential kernel with roughness parameter alpha. If \code{pow_exp} is to be used, one needs to specify its roughness parameter alpha.
}
\item{alpha}{
%% ~~Describe \code{alpha} here~~
Roughness parameters in the kernel functions.
}
\item{method}{
method of parameter estimation. \code{post_mode} means the marginal posterior mode is used for estimation. \code{mle} means the maximum likelihood estimation is used. \code{mmle} means the maximum marginal likelihood estimation is used. The \code{post_mode} is the default method.
}
\item{interval_data}{
a boolean value. If \code{T}, the interval of the data will be calculated. If \code{F}, the interval of the mean of the data will be calculated.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
A list of 4 elements. The first is a vector for predictive mean for testing inputs. The second is a vector for lower quantile for 95\% posterior credible interval and the third is the upper quantile for 95\% posterior credible interval for these testing inputs. The last is a vector of standard deviation of each testing inputs.
}
\references{
Mengyang Gu. (2016). Robust Uncertainty Quantification and Scalable Computation for Computer Models with Massive Output. Ph.D. thesis. Duke University.
}
\author{
%% ~~who you are~~
\packageAuthor{RobustGaSP}
Maintainer: \packageMaintainer{RobustGaSP}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{predict.rgasp}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{internal} |
00ff3b0162b0e6d0bca6db04fde8a9e90c3b076f | 866156074cc04714bd1063dfb057e07b9643cb14 | /R/add_batchim.R | 774bb8d05f65db85b25b04c59dfaa2a236c68856 | [
"MIT"
] | permissive | mrchypark/glueko | 2d0525f5eac43c0a41fef8d6d17b101605ddb23e | 87ecb606bf246dac888f7e2d744b5f7ecce0a189 | refs/heads/master | 2020-04-04T05:14:04.793602 | 2018-11-05T20:40:53 | 2018-11-05T20:40:53 | 155,738,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 154 | r | add_batchim.R | #' add liul batchim
#'
#' @param textko text in korean utf8
#' @export
add_liul_done <- function(textko){
res <- bathim_done(textko, 8)
return(res)
}
|
80deb6b7419d4fd5cb4a34ac8e8ea2c55905885b | 63e94f95af05f1ba975f04a18d87f87369f6f577 | /02 Data Wrangling/DataWrangling6 - chart.R | a845f8b05e6a83c494f3098b08684af1c4cc8597 | [] | no_license | ryudkin0/DV_FinalProject | ac98b1d010c837ea63f940b3ece81ace39e14152 | 703031b63cbbdb8d064df847871b3d4eeda857c0 | refs/heads/master | 2021-01-02T22:32:00.264918 | 2015-05-13T20:52:42 | 2015-05-13T20:52:42 | 35,530,648 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 363 | r | DataWrangling6 - chart.R | DATA3 <- MBSALL %>% mutate(RATE_DIST = cume_dist(ORIG_RATE)) %>% filter(RATE_DIST >= .5) %>%
arrange(RATE_DIST) %>% left_join(UNEMPLOYMENT,by = 'STATE') %>%
select(STATE,INTEREST_RATE = ORIG_RATE,UNEMPLOYMENT_RATE) %>% tbl_df %>%
group_by(STATE) %>% summarise(INTEREST_RATE_AVG = mean(INTEREST_RATE),UNEMPLOYMENT_RATE_AVG = mean(UNEMPLOYMENT_RATE))
|
5edbe8c7e04d7119ac1f3382ee638be4d799d8d4 | f25da8bde5fbb1b47a44038a17df25ae755f57f3 | /R/lcmw.R | e089bfebd4111e48a2e81de58983a37d0a156281 | [] | no_license | jjvanderwal/SDMTools | 1141fbe616dc2aa5e99825f24c9f2a1c803b926b | 2eee12d07557e2b22cd31a302c92ddb820eb4859 | refs/heads/master | 2021-05-15T01:28:49.306927 | 2014-08-05T09:30:34 | 2014-08-05T09:30:34 | 15,891,285 | 6 | 5 | null | 2018-10-10T13:11:53 | 2014-01-14T03:57:10 | R | UTF-8 | R | false | false | 4,120 | r | lcmw.R | #' Least Cost Moving Windows Calculation
#'
#' This is a moving window that for each cell returns the minimum 'cost' based
#' on surrounding data cells and some dispersal distance cost.
#'
#' This method moves over the matrix of values, summing the moving window cost
#' \code{mw} and the matrix \code{mat}, returning the minimum cost value. This
#' was created to estimate the least cost path through time for all cells in a
#' matrix (see example).
#'
#' @param mat a matrix of values that can be based on a raster dataset. Lower
#' values should represent lower cost. The matrix can be a raster of class
#' 'asc' (adehabitat package), 'RasterLayer' (raster package) or
#' 'SpatialGridDataFrame' (sp package)
#' @param mw a distance-cost matrix to be applied to each cell of 'mat'. This
#' matrix can be dispersal costs. Lower values should represent lower cost.
#' @param mnc an integer value representing the radius for 'mw' in number of
#' cells.
#' @return A matrix of values of the same dimensions and class as input
#' \code{mat}
#' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com}
#' @examples
#'
#'
#' #create a simple object of class 'asc'
#' tasc = as.asc(matrix(1:100,nr=10,nc=10)); print(tasc)
#'
#' #show the input matrix
#' print(tasc[1:10,1:10])
#'
#' #vary the moving windows
#'
#' ###no cost window of 2 cell radius
#' tcost = matrix(0,nr=5,nc=5); print(tcost)
#' out = lcmw(tasc, tcost, 2); print(out[1:10,1:10])
#'
#' ###no cost with a circular radius of 2
#' tcost = matrix(NA,nr=5,nc=5)
#' #populate the distances
#' for (y in 1:5){
#' for (x in 1:5){
#' tcost[y,x] = sqrt((3-y)^2 + (3-x)^2)
#' }
#' }
#'
#' #remove distance values > max.num.cells
#' tcost[which(tcost>2)]=NA
#'
#' #no cost matrix
#' tcost1 = tcost; tcost1[is.finite(tcost1)]=1; print(tcost1)
#' out = lcmw(tasc, tcost1, 2); print(out[1:10,1:10])
#'
#' #linear cost
#' tcost = tcost/2; print(tcost)
#' out = lcmw(tasc, tcost, 2); print(out[1:10,1:10])
#'
#'
#' @export
#' @useDynLib SDMTools getmin movewindow
lcmw <-
function(mat,mw,mnc) {
#check input for class for returning info
if (class(mat) == 'asc') {
attrib = attributes(mat)
} else if (any(class(mat) %in% 'RasterLayer')) {
attrib = mat; mat = asc.from.raster(mat)
} else if (any(class(mat) == 'SpatialGridDataFrame')) {
attrib = mat; mat = asc.from.sp(mat)
} else {
attrib = attributes(mat)
}
#buffer edges by full number of distance cells
#define the shifts in mat to account for a moving window...
vals = expand.grid(Y=-mnc:mnc,X=-mnc:mnc) #define all shifts
vals$cost = mw[(mnc+1)+cbind(vals$Y,vals$X)];vals=na.omit(vals) #extract the cost of associated with the move
nrow.vals = nrow(vals)
#cycle through and get the output
if (nrow.vals <5000) {
return(.Call("movewindow",mat,as.integer(vals$X),as.integer(vals$Y),as.numeric(vals$cost),PACKAGE='SDMTools'))
} else {
num.subsets = nrow.vals%/%2000
#run the first set of 2000
tmin = 1; tmax = 2000
#print a status
cat('0%...')
#create the first part of the moving window
out = .Call("movewindow",mat,as.integer(vals$X[tmin:tmax]),as.integer(vals$Y[tmin:tmax]),as.numeric(vals$cost[tmin:tmax]),PACKAGE='SDMTools')
#cycle through the remaining data
for (i in 1:num.subsets){
if (i<num.subsets){
tmin = i*2000+1; tmax = (i+1)*2000
} else {
tmin = i*2000+1; tmax = nrow.vals
}
cat(round(tmin/nrow.vals*100,1),'%...',sep='')
out2 = .Call("movewindow",mat,as.integer(vals$X[tmin:tmax]),as.integer(vals$Y[tmin:tmax]),as.numeric(vals$cost[tmin:tmax]),PACKAGE='SDMTools')
out = .Call("getmin",out,out2,PACKAGE='SDMTools')
if (dim(out)[1] != dim(mat)[1] | dim(out)[2] != dim(mat)[2]) print('error in dimensions...check output')
}
cat('done\n')
}
#reset the attributes of the input
if (any(class(attrib) %in% 'RasterLayer')) {
attrib = setValues(attrib, as.vector(t(t(unclass(out))[dim(out)[2]:1,]))); return(attrib)
} else if (any(class(attrib) == 'SpatialGridDataFrame')) {
attrib@data[1] = as.vector(unclass(out)[,dim(out)[2]:1]); return(attrib)
} else {
attributes(out) = attrib; return(out)
}
}
|
857f7d571d964516c2135ebb6fbef9e624a63202 | fd2a324a9505ed29e6136a06216edce999fa97a1 | /R/GLMM_MCMCwrapper.R | 7ebf93dd0fb954a57d1e1756448740819e206f5c | [] | no_license | cran/mixAK | 995c88ac9b1f70ab2dac51b4fc1347b9b1356eed | adc4c2229d8ad3573e560fd598158e53e5d1da76 | refs/heads/master | 2022-09-27T10:45:02.953514 | 2022-09-19T13:46:13 | 2022-09-19T13:46:13 | 17,697,529 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,228 | r | GLMM_MCMCwrapper.R | ##
## PURPOSE: Generalized linear mixed model with possibly several response variables
## and normal mixtures in the distribution of random effects
## - wrapper to main simulation to allow vectorized call and parallel computation
##
## AUTHOR: Arnost Komarek (LaTeX: Arno\v{s}t Kom\'arek)
## arnost.komarek[AT]mff.cuni.cz
##
## LOG: 20111102 created
## 20170315 .C call uses registered routines
##
## FUNCTIONS: GLMM_MCMCwrapper
##
## ======================================================================
## *************************************************************
## GLMM_MCMCwrapper
## *************************************************************
GLMM_MCMCwrapper <- function(chain=1, data,
prior.alpha, init.alpha,
scale.b, prior.b, init.b,
prior.eps, init.eps,
Cpar, nMCMC, store, keep.chains, silent)
{
thispackage <- "mixAK"
########## ========== Parameters from inits ========== ##########
########## =========================================== ##########
Csigma_eps <- init.eps[[chain]]$sigma
CgammaInv_eps <- init.eps[[chain]]$gammaInv
if (data$dimb){
CK_b <- init.b[[chain]]$K
Cw_b <- c(init.b[[chain]]$w, rep(0, prior.b$Kmax - init.b[[chain]]$K))
if (data$dimb == 1){
Cmu_b <- c(init.b[[chain]]$mu, rep(0, prior.b$Kmax - init.b[[chain]]$K))
CLi_b <- c(init.b[[chain]]$Li, rep(0, prior.b$Kmax - init.b[[chain]]$K))
}else{
Cmu_b <- c(t(init.b[[chain]]$mu), rep(0, data$dimb*(prior.b$Kmax - init.b[[chain]]$K)))
CLi_b <- c(init.b[[chain]]$Li, rep(0, data$LTb*(prior.b$Kmax - init.b[[chain]]$K)))
}
CgammaInv_b <- init.b[[chain]]$gammaInv
Cdf_b <- init.b[[chain]]$df
Cr_b <- init.b[[chain]]$r - 1
Cbb <- as.numeric(t(init.b[[chain]]$b))
}else{
CK_b <- 0
Cw_b <- 0
Cmu_b <- 0
CLi_b <- 0
CgammaInv_b <- 0
Cdf_b <- 0
Cr_b <- 0
Cbb <- 0
}
Calpha <- init.alpha[[chain]]
########## ========== Some additional parameters ##########
########## ===================================== ##########
if (prior.b$priorK == "fixed") lsum_Ir_b <- Cpar$I * CK_b
else lsum_Ir_b <- 1
CshiftScale_b <- c(scale.b$shift, scale.b$scale)
########## ========== MCMC simulation ========== ##########
########## ================================================================== ##########
if (!silent){
cat(paste("\nChain number ", chain, "\n==============\n", sep=""))
cat(paste("MCMC sampling started on ", date(), ".\n", sep=""))
}
MCMC <- .C(C_GLMM_MCMC,
Y_c = as.double(Cpar$Y_c),
Y_d = as.integer(Cpar$Y_d),
nonSilent_keepChain_nMCMC_R_cd_dist = as.integer(c(as.integer(!silent), store, nMCMC, Cpar$R_cd, Cpar$dist)),
I_n = as.integer(c(Cpar$I, Cpar$n)),
X = as.double(Cpar$X),
#XtX = as.double(ifit$CXtX), ### REMOVED ON 21/10/2009, XtX is computed directly in C++
Z = as.double(Cpar$Z),
#ZitZi = as.double(ifit$CZitZi), ### REMOVED ON 20/10/2009, ZitZi is computed directly in C++
p_fI_q_rI = as.integer(Cpar$p_fI_q_rI),
shiftScale_b = as.double(CshiftScale_b),
priorDouble_eps = as.double(Cpar$priorDouble_eps),
priorInt_b = as.integer(Cpar$priorInt_b),
priorDouble_b = as.double(Cpar$priorDouble_b),
priorDouble_alpha = as.double(Cpar$priorDouble_alpha),
tune_scale_alpha_b = as.double(Cpar$tune_scale_alpha_b),
sigma_eps = as.double(Csigma_eps),
gammaInv_eps = as.double(CgammaInv_eps),
K_b = as.integer(CK_b),
w_b = as.double(Cw_b),
mu_b = as.double(Cmu_b),
Q_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax, 1)),
Sigma_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax, 1)),
Li_b = as.double(CLi_b),
gammaInv_b = as.double(CgammaInv_b),
df_b = as.double(Cdf_b),
r_b = as.integer(Cr_b),
r_b_first = integer(Cpar$I),
alpha = as.double(Calpha),
b = as.double(Cbb),
b_first = double(length(Cbb)),
chsigma_eps = double(ifelse(Cpar$R_cd["R_c"], Cpar$R_cd["R_c"] * nMCMC["keep"], 1)),
chgammaInv_eps = double(ifelse(Cpar$R_cd["R_c"], Cpar$R_cd["R_c"] * nMCMC["keep"], 1)),
chK_b = integer(ifelse(data$dimb, nMCMC["keep"], 1)),
chw_b = double(ifelse(data$dimb, prior.b$Kmax * nMCMC["keep"], 1)),
chmu_b = double(ifelse(data$dimb, data$dimb * prior.b$Kmax * nMCMC["keep"], 1)),
chQ_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax * nMCMC["keep"], 1)),
chSigma_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax * nMCMC["keep"], 1)),
chLi_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax * nMCMC["keep"], 1)),
chgammaInv_b = double(ifelse(data$dimb, data$dimb * nMCMC["keep"], 1)),
chdf_b = double(ifelse(data$dimb, prior.b$Kmax * nMCMC["keep"], 1)),
chorder_b = integer(ifelse(data$dimb, prior.b$Kmax * nMCMC["keep"], 1)),
chrank_b = integer(ifelse(data$dimb, prior.b$Kmax * nMCMC["keep"], 1)),
chMeanData_b = double(ifelse(data$dimb, data$dimb * nMCMC["keep"], 1)),
chCorrData_b = double(ifelse(data$dimb, data$LTb * nMCMC["keep"], 1)),
chalpha = double(ifelse(data$lalpha, data$lalpha * nMCMC["keep"], 1)),
chb = double(ifelse(data$dimb, ifelse(store["b"], Cpar$I * data$dimb * nMCMC["keep"], Cpar$I * data$dimb), 1)),
chGLMMLogL = double(nMCMC["keep"]),
chLogL = double(nMCMC["keep"]),
naccept_alpha = integer(Cpar$R_cd["R_c"] + Cpar$R_cd["R_d"]),
naccept_b = integer(Cpar$I),
pm_eta_fixed = double(Cpar$sumCn),
pm_eta_random = double(Cpar$sumCn),
pm_meanY = double(Cpar$sumCn),
pm_stres = double(Cpar$sumCn),
pm_b = double(ifelse(data$dimb, data$dimb * Cpar$I, 1)),
pm_w_b = double(ifelse(data$dimb, prior.b$Kmax, 1)),
pm_mu_b = double(ifelse(data$dimb, data$dimb * prior.b$Kmax, 1)),
pm_Q_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax, 1)),
pm_Sigma_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax, 1)),
pm_Li_b = double(ifelse(data$dimb, data$LTb * prior.b$Kmax, 1)),
pm_indGLMMLogL = double(Cpar$I),
pm_indLogL = double(Cpar$I),
pm_indLogpb = double(Cpar$I),
sum_Ir_b = integer(lsum_Ir_b),
sum_Pr_b_b = double(lsum_Ir_b),
iter = as.integer(0),
err = as.integer(0),
PACKAGE=thispackage)
if (!silent) cat(paste("MCMC sampling finished on ", date(), ".\n", sep=""))
if (MCMC$err) stop("Something went wrong.")
########## ========== State of MCMC (last and first kept) ========== ##########
########## ========================================================= ##########
if (data$dimb){
state.w_b <- as.numeric(MCMC$w_b[1:MCMC$K_b])
state_first.w_b <- as.numeric(MCMC$chw_b[1:MCMC$chK_b[1]])
names(state.w_b) <- paste("w", 1:MCMC$K_b, sep="")
names(state_first.w_b) <- paste("w", 1:MCMC$chK_b[1], sep="")
state.r_b <- as.numeric(MCMC$r_b + 1)
state_first.r_b <- as.numeric(MCMC$r_b_first + 1)
names(state.r_b) <- names(state_first.r_b) <- paste("r", 1:Cpar$I, sep="")
state.gammaInv_b <- as.numeric(MCMC$gammaInv_b)
state_first.gammaInv_b <- as.numeric(MCMC$chgammaInv_b[1:data$dimb])
names(state_first.gammaInv_b) <- names(state.gammaInv_b) <- paste("gammaInv", 1:data$dimb, sep="")
if (data$dimb == 1){
state.mu_b <- as.numeric(MCMC$mu_b[1:MCMC$K_b])
state_first.mu_b <- as.numeric(MCMC$chmu_b[1:MCMC$chK_b[1]])
names(state.mu_b) <- paste("mu", 1:MCMC$K_b, sep="")
names(state_first.mu_b) <- paste("mu", 1:MCMC$chK_b[1], sep="")
state.Li_b <- as.numeric(MCMC$Li_b[1:MCMC$K_b])
state_first.Li_b <- as.numeric(MCMC$chLi_b[1:MCMC$chK_b[1]])
names(state.Li_b) <- paste("Li", 1:MCMC$K_b, sep="")
names(state_first.Li_b) <- paste("Li", 1:MCMC$chK_b[1], sep="")
state.Sigma_b <- (1 / state.Li_b)^2
state_first.Sigma_b <- (1 / state_first.Li_b)^2
names(state.Sigma_b) <- paste("Sigma", 1:MCMC$K_b, sep="")
names(state_first.Sigma_b) <- paste("Sigma", 1:MCMC$chK_b[1], sep="")
state.Q_b <- as.numeric(MCMC$Q_b[1:MCMC$K_b])
state_first.Q_b <- as.numeric(MCMC$chQ_b[1:MCMC$chK_b[1]])
names(state.Q_b) <- paste("Q", 1:MCMC$K_b, sep="")
names(state_first.Q_b) <- paste("Q", 1:MCMC$chK_b[1], sep="")
state.b <- as.numeric(MCMC$b)
state_first.b <- as.numeric(MCMC$b_first)
names(state.b) <- names(state_first.b) <- 1:Cpar$I
}else{
state.mu_b <- matrix(MCMC$mu_b[1:(data$dimb*MCMC$K_b)], ncol=data$dimb, byrow=TRUE)
state_first.mu_b <- matrix(MCMC$chmu_b[1:(data$dimb*MCMC$chK_b[1])], ncol=data$dimb, byrow=TRUE)
rownames(state.mu_b) <- paste("j", 1:MCMC$K_b, sep="")
rownames(state_first.mu_b) <- paste("j", 1:MCMC$chK_b[1], sep="")
colnames(state.mu_b) <- colnames(state_first.mu_b) <- paste("m", 1:data$dimb, sep="")
state.Li_b <- as.numeric(MCMC$Li_b[1:(data$LTb*MCMC$K_b)])
state_first.Li_b <- as.numeric(MCMC$chLi_b[1:(data$LTb*MCMC$chK_b[1])])
names(state.Li_b) <- paste("Li", rep(1:MCMC$K_b, each=data$LTb), rep(data$naamLTb, MCMC$K_b), sep="")
names(state_first.Li_b) <- paste("Li", rep(1:MCMC$chK_b[1], each=data$LTb), rep(data$naamLTb, MCMC$chK_b[1]), sep="")
state.Sigma_b <- matrix(NA, ncol=data$dimb, nrow=data$dimb*MCMC$K_b)
rownames(state.Sigma_b) <- paste("j", rep(1:MCMC$K_b, each=data$dimb), ".", rep(1:data$dimb, MCMC$K_b), sep="")
colnames(state.Sigma_b) <- paste("m", 1:data$dimb, sep="")
for (j in 1:MCMC$K_b){
tmpSigma <- matrix(0, nrow=data$dimb, ncol=data$dimb)
tmpSigma[lower.tri(tmpSigma, diag=TRUE)] <- state.Li_b[((j-1)*data$LTb+1):(j*data$LTb)]
tmpSigma <- tmpSigma %*% t(tmpSigma)
tmpSigma <- chol2inv(chol(tmpSigma))
state.Sigma_b[((j-1)*data$dimb+1):(j*data$dimb),] <- tmpSigma
}
state_first.Sigma_b <- matrix(NA, ncol=data$dimb, nrow=data$dimb*MCMC$chK_b[1])
rownames(state_first.Sigma_b) <- paste("j", rep(1:MCMC$chK_b[1], each=data$dimb), ".", rep(1:data$dimb, MCMC$chK_b[1]), sep="")
colnames(state_first.Sigma_b) <- paste("m", 1:data$dimb, sep="")
for (j in 1:MCMC$chK_b[1]){
tmpSigma <- matrix(0, nrow=data$dimb, ncol=data$dimb)
tmpSigma[lower.tri(tmpSigma, diag=TRUE)] <- state_first.Li_b[((j-1)*data$LTb+1):(j*data$LTb)]
tmpSigma <- tmpSigma %*% t(tmpSigma)
tmpSigma <- chol2inv(chol(tmpSigma))
state_first.Sigma_b[((j-1)*data$dimb+1):(j*data$dimb),] <- tmpSigma
}
state.Q_b <- as.numeric(MCMC$Q_b[1:(data$LTb*MCMC$K_b)])
state_first.Q_b <- as.numeric(MCMC$chQ_b[1:(data$LTb*MCMC$chK_b[1])])
names(state.Q_b) <- paste("Q", rep(1:MCMC$K_b, each=data$LTb), rep(data$naamLTb, MCMC$K_b), sep="")
names(state_first.Q_b) <- paste("Q", rep(1:MCMC$chK_b[1], each=data$LTb), rep(data$naamLTb, MCMC$chK_b[1]), sep="")
state.b <- matrix(MCMC$b, ncol=data$dimb, nrow=Cpar$I, byrow=TRUE)
state_first.b <- matrix(MCMC$b_first, ncol=data$dimb, nrow=Cpar$I, byrow=TRUE)
colnames(state.b) <- colnames(state_first.b) <- paste("b", 1:data$dimb, sep="")
rownames(state.b) <- rownames(state_first.b) <- 1:Cpar$I
}
nCompTotal_b<- sum(MCMC$chK_b)
freqK_b <- table(MCMC$chK_b)
propK_b <- prop.table(freqK_b)
}else{
state.w_b <- state.r_b <- state.gamma_b <- state.mu_b <- state.Li_b <- state.Sigma_b <- state.Q_b <- state.b <- 0
state_first.w_b <- state_first.r_b <- state_first.gamma_b <- state_first.mu_b <- state_first.Li_b <- state_first.Sigma_b <- state_first.Q_b <- state_first.b <- 0
}
if (data$lalpha){
state.alpha <- as.numeric(MCMC$alpha)
state_first.alpha <- as.numeric(MCMC$chalpha[1:data$lalpha])
names(state.alpha) <- names(state_first.alpha) <- paste("alpha", 1:data$lalpha, sep="")
}else{
state.alpha <- state_first.alpha <- 0
}
if (Cpar$R_cd["R_c"]){
state.sigma_eps <- as.numeric(MCMC$sigma_eps)
state_first.sigma_eps <- as.numeric(MCMC$chsigma_eps[1:Cpar$R_cd["R_c"]])
names(state.sigma_eps) <- names(state_first.sigma_eps) <- paste("sigma", 1:Cpar$R_cd["R_c"], sep="")
state.gammaInv_eps <- as.numeric(MCMC$gammaInv_eps)
state_first.gammaInv_eps <- as.numeric(MCMC$chgammaInv_eps[1:Cpar$R_cd["R_c"]])
names(state.gammaInv_eps) <- names(state_first.gammaInv_eps) <- paste("gammaInv", 1:Cpar$R_cd["R_c"], sep="")
}else{
state.sigma_eps <- state.gammaInv_eps <- 0
state_first.sigma_eps <- state_first.gammaInv_eps <- 0
}
########## ========== Performance of MCMC ========== ##########
########## ========================================= ##########
prop.accept.alpha <- MCMC$naccept_alpha / (nMCMC["keep"] * nMCMC["thin"])
if (data$R > 1) names(prop.accept.alpha) <- data$name.response
prop.accept.b <- MCMC$naccept_b / (nMCMC["keep"] * nMCMC["thin"])
########## ========== Create a list to be returned ========== ##########
########## ================================================== ##########
RET <- list(iter = MCMC$iter,
nMCMC = nMCMC,
dist = data$dist,
R = c(Rc=as.numeric(Cpar$R_cd["R_c"]), Rd=as.numeric(Cpar$R_cd["R_d"])),
p = data$p,
q = data$q,
fixed.intercept = data$fixed.intercept,
random.intercept = data$random.intercept,
lalpha = data$lalpha,
dimb = data$dimb,
prior.alpha = prior.alpha,
prior.b = prior.b,
prior.eps = prior.eps)
if (data$lalpha){
RET$init.alpha <- init.alpha[[chain]]
RET$state.first.alpha <- state_first.alpha
RET$state.last.alpha <- state.alpha
RET$prop.accept.alpha <- prop.accept.alpha
}
if (data$dimb){
RET$init.b <- init.b[[chain]]
RET$state.first.b <- list(b = state_first.b,
K = as.numeric(MCMC$chK_b[1]),
w = state_first.w_b,
mu = state_first.mu_b,
Sigma = state_first.Sigma_b,
Li = state_first.Li_b,
Q = state_first.Q_b,
gammaInv = state_first.gammaInv_b,
r = state_first.r_b)
RET$state.last.b <- list(b = state.b,
K = as.numeric(MCMC$K_b),
w = state.w_b,
mu = state.mu_b,
Sigma = state.Sigma_b,
Li = state.Li_b,
Q = state.Q_b,
gammaInv = state.gammaInv_b,
r = state.r_b)
RET$prop.accept.b <- prop.accept.b
RET$scale.b <- scale.b
RET$freqK_b <- freqK_b
RET$propK_b <- propK_b
}
if (Cpar$R_cd["R_c"]){
RET$init.eps <- init.eps[[chain]]
RET$state.first.eps <- list(sigma = state_first.sigma_eps,
gammaInv = state_first.gammaInv_eps)
RET$state.last.eps <- list(sigma = state.sigma_eps,
gammaInv = state.gammaInv_eps)
}
########## ========== Posterior means of quantities computed in C++ ========== ##########
########## =================================================================== ##########
RET$poster.mean.y <- list()
used <- 0
s <- 1
while (s <= Cpar$R_cd["R_c"]){
ns <- Cpar$n[((s-1)*Cpar$I+1):(s*Cpar$I)]
index <- (used+1):(used + sum(ns))
used <- index[length(index)]
RET$poster.mean.y[[s]] <- data.frame(id = rep(1:Cpar$I, ns),
observed = Cpar$Y_c[index],
fitted = as.numeric(MCMC$pm_meanY[index]),
stres = as.numeric(MCMC$pm_stres[index]),
eta.fixed = as.numeric(MCMC$pm_eta_fixed[index]),
eta.random = as.numeric(MCMC$pm_eta_random[index]))
s <- s + 1
}
used2 <- 0
while (s <= Cpar$R_cd["R_c"] + Cpar$R_cd["R_d"]){
ns <- Cpar$n[((s-1)*Cpar$I+1):(s*Cpar$I)]
index <- (used+1):(used + sum(ns))
used <- index[length(index)]
index2 <- (used2+1):(used2 + sum(ns))
used2 <- index2[length(index2)]
RET$poster.mean.y[[s]] <- data.frame(id = rep(1:Cpar$I, ns),
observed = Cpar$Y_d[index2],
fitted = as.numeric(MCMC$pm_meanY[index]),
stres = as.numeric(MCMC$pm_stres[index]),
eta.fixed = as.numeric(MCMC$pm_eta_fixed[index]),
eta.random = as.numeric(MCMC$pm_eta_random[index]))
s <- s + 1
}
names(RET$poster.mean.y) <- colnames(data$y)
if (data$dimb){
MCMC$pm_b <- matrix(MCMC$pm_b, ncol=data$dimb, byrow=TRUE)
RET$poster.mean.profile <- as.data.frame(MCMC$pm_b)
colnames(RET$poster.mean.profile) <- paste("b", 1:data$dimb, sep="")
RET$poster.mean.profile$Logpb <- as.numeric(MCMC$pm_indLogpb)
RET$poster.mean.profile$Cond.Deviance <- as.numeric(-2 * MCMC$pm_indLogL)
RET$poster.mean.profile$Deviance <- as.numeric(-2 * MCMC$pm_indGLMMLogL)
if (prior.b$priorK == "fixed"){
##### I am not sure whether the posterior means (especially of variance components) are useful!
##### In any case, they should be used with care
##### -----------------------------------------------------------------------------------------
RET$poster.mean.w_b <- as.numeric(MCMC$pm_w_b)
names(RET$poster.mean.w_b) <- paste("w", 1:prior.b$Kmax, sep="")
RET$poster.mean.mu_b <- matrix(MCMC$pm_mu_b, nrow=prior.b$Kmax, ncol=data$dimb, byrow=TRUE)
rownames(RET$poster.mean.mu_b) <- paste("j", 1:prior.b$Kmax, sep="")
colnames(RET$poster.mean.mu_b) <- paste("m", 1:data$dimb, sep="")
RET$poster.mean.Q_b <- RET$poster.mean.Sigma_b <- RET$poster.mean.Li_b <- list()
for (j in 1:prior.b$Kmax){
tmpQ <- matrix(0, nrow=data$dimb, ncol=data$dimb)
tmpQ[lower.tri(tmpQ, diag=TRUE)] <- MCMC$pm_Q_b[((j-1)*data$LTb+1):(j*data$LTb)]
tmpQ[upper.tri(tmpQ, diag=FALSE)] <- t(tmpQ)[upper.tri(t(tmpQ), diag=FALSE)]
RET$poster.mean.Q_b[[j]] <- tmpQ
tmpSigma <- matrix(0, nrow=data$dimb, ncol=data$dimb)
tmpSigma[lower.tri(tmpSigma, diag=TRUE)] <- MCMC$pm_Sigma_b[((j-1)*data$LTb+1):(j*data$LTb)]
tmpSigma[upper.tri(tmpSigma, diag=FALSE)] <- t(tmpSigma)[upper.tri(t(tmpSigma), diag=FALSE)]
RET$poster.mean.Sigma_b[[j]] <- tmpSigma
tmpLi <- matrix(0, nrow=data$dimb, ncol=data$dimb)
tmpLi[lower.tri(tmpLi, diag=TRUE)] <- MCMC$pm_Li_b[((j-1)*data$LTb+1):(j*data$LTb)]
RET$poster.mean.Li_b[[j]] <- tmpLi
}
names(RET$poster.mean.Q_b) <- names(RET$poster.mean.Sigma_b) <- names(RET$poster.mean.Li_b) <- paste("j", 1:prior.b$Kmax, sep="")
}
}else{
RET$poster.mean.profile <- data.frame(LogL = as.numeric(MCMC$pm_indLogL),
Deviance = as.numeric(-2 * MCMC$pm_indGLMMLogL))
}
########## ========== Clustering based on posterior P(alloc = k | y) or on P(alloc = k | theta, b, y) ========== ##########
########## ======================================================================================================== ##########
if (data$dimb){
if (prior.b$priorK == "fixed"){
if (CK_b == 1){
RET$poster.comp.prob_u <- RET$poster.comp.prob_b <- matrix(1, nrow = Cpar$I, ncol = 1)
}else{
### Using mean(I(r=k))
MCMC$sum_Ir_b <- matrix(MCMC$sum_Ir_b, ncol = CK_b, nrow = Cpar$I, byrow = TRUE)
Denom <- apply(MCMC$sum_Ir_b, 1, sum)
RET$poster.comp.prob_u <- MCMC$sum_Ir_b / matrix(rep(Denom, CK_b), ncol = CK_b, nrow = Cpar$I)
### Using mean(P(r=k | theta, b, y))
MCMC$sum_Pr_b_b<- matrix(MCMC$sum_Pr_b_b, ncol = CK_b, nrow = Cpar$I, byrow = TRUE)
RET$poster.comp.prob_b <- MCMC$sum_Pr_b_b/ matrix(rep(Denom, CK_b), ncol = CK_b, nrow = Cpar$I)
}
}
}
########## ========== Additional posterior summaries ========== ##########
########## =================================================================== ##########
qProbs <- c(0, 0.025, 0.25, 0.5, 0.75, 0.975, 1)
nSumm <- c("Mean", "Std.Dev.", "Min.", "2.5%", "1st Qu.", "Median", "3rd Qu.", "97.5%", "Max.")
mean.Deviance <- -2 * mean(MCMC$chGLMMLogL, na.rm=TRUE)
quant.Deviance <- 2 * quantile(-MCMC$chGLMMLogL, prob=qProbs, na.rm=TRUE)
sd.Deviance <- 2 * sd(MCMC$chGLMMLogL, na.rm=TRUE)
summ.Deviance <- c(mean.Deviance, sd.Deviance, quant.Deviance)
mean.Cond.Deviance <- -2 * mean(MCMC$chLogL, na.rm=TRUE)
quant.Cond.Deviance <- 2 * quantile(-MCMC$chLogL, prob=qProbs, na.rm=TRUE)
sd.Cond.Deviance <- 2 * sd(MCMC$chLogL, na.rm=TRUE)
summ.Cond.Deviance <- c(mean.Cond.Deviance, sd.Cond.Deviance, quant.Cond.Deviance)
RET$summ.Deviance <- data.frame(Deviance = summ.Deviance, Cond.Deviance = summ.Cond.Deviance)
rownames(RET$summ.Deviance) <- nSumm
if (data$lalpha){
MCMC$chalpha <- matrix(MCMC$chalpha, ncol=data$lalpha, byrow=TRUE)
colnames(MCMC$chalpha) <- paste("alpha", 1:data$lalpha, sep="")
if (data$lalpha == 1){
mean.alpha <- mean(MCMC$chalpha, na.rm=TRUE)
quant.alpha <- quantile(MCMC$chalpha, prob=qProbs, na.rm=TRUE)
sd.alpha <- sd(as.numeric(MCMC$chalpha), na.rm=TRUE)
RET$summ.alpha <- c(mean.alpha, sd.alpha, quant.alpha)
names(RET$summ.alpha) <- nSumm
}else{
mean.alpha <- apply(MCMC$chalpha, 2, mean, na.rm=TRUE)
quant.alpha <- apply(MCMC$chalpha, 2, quantile, prob=qProbs, na.rm=TRUE)
sd.alpha <- apply(MCMC$chalpha, 2, sd, na.rm=TRUE)
RET$summ.alpha <- rbind(mean.alpha, sd.alpha, quant.alpha)
RET$summ.alpha <- as.data.frame(RET$summ.alpha)
rownames(RET$summ.alpha) <- nSumm
}
}
if (data$dimb){
MCMC$chMeanData_b <- matrix(MCMC$chMeanData_b, ncol=data$dimb, byrow=TRUE)
MCMC$chCorrData_b <- matrix(MCMC$chCorrData_b, ncol=data$LTb, byrow=TRUE)
colnames(MCMC$chMeanData_b) <- paste("b.Mean.", 1:data$dimb, sep="")
colnames(MCMC$chCorrData_b) <- paste("b.Corr", data$naamLTb, sep="")
colnames(MCMC$chCorrData_b)[((0:(data$dimb-1))*(2*data$dimb - (0:(data$dimb-1)) + 1))/2 + 1] <- paste("b.SD.", 1:data$dimb, sep="")
if (data$dimb == 1){
meanb.Mean <- mean(MCMC$chMeanData_b, na.rm=TRUE)
quantb.Mean <- quantile(MCMC$chMeanData_b, prob=qProbs, na.rm=TRUE)
sdb.Mean <- sd(as.numeric(MCMC$chMeanData_b), na.rm=TRUE)
RET$summ.b.Mean <- c(meanb.Mean, sdb.Mean, quantb.Mean)
names(RET$summ.b.Mean) <- nSumm
meanb.SDCorr <- mean(MCMC$chCorrData_b, na.rm=TRUE)
quantb.SDCorr <- quantile(MCMC$chCorrData_b, prob=qProbs, na.rm=TRUE)
sdb.SDCorr <- sd(as.numeric(MCMC$chCorrData_b), na.rm=TRUE)
RET$summ.b.SDCorr <- c(meanb.SDCorr, sdb.SDCorr, quantb.SDCorr)
names(RET$summ.b.SDCorr) <- nSumm
}else{
meanb.Mean <- apply(MCMC$chMeanData_b, 2, mean, na.rm=TRUE)
quantb.Mean <- apply(MCMC$chMeanData_b, 2, quantile, prob=qProbs, na.rm=TRUE)
sdb.Mean <- apply(MCMC$chMeanData_b, 2, sd, na.rm=TRUE)
RET$summ.b.Mean <- rbind(meanb.Mean, sdb.Mean, quantb.Mean)
rownames(RET$summ.b.Mean) <- nSumm
meanb.SDCorr <- apply(MCMC$chCorrData_b, 2, mean, na.rm=TRUE)
quantb.SDCorr <- apply(MCMC$chCorrData_b, 2, quantile, prob=qProbs, na.rm=TRUE)
sdb.SDCorr <- apply(MCMC$chCorrData_b, 2, sd, na.rm=TRUE)
RET$summ.b.SDCorr <- rbind(meanb.SDCorr, sdb.SDCorr, quantb.SDCorr)
rownames(RET$summ.b.SDCorr) <- nSumm
}
}
if (Cpar$R_cd["R_c"]){
MCMC$chsigma_eps <- matrix(MCMC$chsigma_eps, ncol=Cpar$R_cd["R_c"], byrow=TRUE)
colnames(MCMC$chsigma_eps) <- paste("sigma", 1:Cpar$R_cd["R_c"], sep="")
if (Cpar$R_cd["R_c"] == 1){
mean.sigma_eps <- mean(MCMC$chsigma_eps, na.rm=TRUE)
quant.sigma_eps <- quantile(MCMC$chsigma_eps, prob=qProbs, na.rm=TRUE)
sd.sigma_eps <- sd(as.numeric(MCMC$chsigma_eps), na.rm=TRUE)
RET$summ.sigma_eps <- c(mean.sigma_eps, sd.sigma_eps, quant.sigma_eps)
names(RET$summ.sigma_eps) <- nSumm
}else{
mean.sigma_eps <- apply(MCMC$chsigma_eps, 2, mean, na.rm=TRUE)
quant.sigma_eps <- apply(MCMC$chsigma_eps, 2, quantile, prob=qProbs, na.rm=TRUE)
sd.sigma_eps <- apply(MCMC$chsigma_eps, 2, sd, na.rm=TRUE)
RET$summ.sigma_eps <- rbind(mean.sigma_eps, sd.sigma_eps, quant.sigma_eps)
rownames(RET$summ.sigma_eps) <- nSumm
}
}
########## ========== Chains for model parameters ========== ##########
########## ================================================= ##########
if (keep.chains){
RET$Deviance <- as.numeric(-2 * MCMC$chGLMMLogL)
RET$Cond.Deviance <- as.numeric(-2 * MCMC$chLogL)
if (data$dimb){
##### Chains for parameters of mixture distribution of b
##### -----------------------------------------------------
RET$K_b <- as.numeric(MCMC$chK_b)
MCMC$K_b <- NULL
RET$w_b <- as.numeric(MCMC$chw_b[1:nCompTotal_b])
MCMC$chw_b <- NULL
RET$mu_b <- as.numeric(MCMC$chmu_b[1:(data$dimb*nCompTotal_b)])
MCMC$chmu_b <- NULL
RET$Li_b <- as.numeric(MCMC$chLi_b[1:(data$LTb*nCompTotal_b)])
MCMC$chLi_b <- NULL
RET$Q_b <- as.numeric(MCMC$chQ_b[1:(data$LTb*nCompTotal_b)])
MCMC$chQ_b <- NULL
RET$Sigma_b <- as.numeric(MCMC$chSigma_b[1:(data$LTb*nCompTotal_b)])
MCMC$chSigma_b <- NULL
RET$gammaInv_b <- matrix(MCMC$chgammaInv_b, ncol=data$dimb, byrow=TRUE)
colnames(RET$gammaInv_b) <- paste("gammaInv", 1:data$dimb, sep="")
MCMC$chgammaInv_b <- NULL
RET$order_b <- as.numeric(MCMC$chorder_b[1:nCompTotal_b] + 1)
MCMC$chorder_b <- NULL
RET$rank_b <- as.numeric(MCMC$chrank_b[1:nCompTotal_b] + 1)
MCMC$chrank_b <- NULL
if (prior.b$priorK == "fixed"){
RET$w_b <- matrix(RET$w_b, ncol=prior.b$Kmax, byrow=TRUE)
colnames(RET$w_b) <- paste("w", 1:prior.b$Kmax, sep="")
RET$mu_b <- matrix(RET$mu_b, ncol=data$dimb*prior.b$Kmax, byrow=TRUE)
colnames(RET$mu_b) <- paste("mu.", rep(1:prior.b$Kmax, each=data$dimb), ".", rep(1:data$dimb, prior.b$Kmax), sep="")
RET$Li_b <- matrix(RET$Li_b, ncol=data$LTb*prior.b$Kmax, byrow=TRUE)
colnames(RET$Li_b) <- paste("Li", rep(1:prior.b$Kmax, each=data$LTb), rep(data$naamLTb, prior.b$Kmax), sep="")
RET$Q_b <- matrix(RET$Q_b, ncol=data$LTb*prior.b$Kmax, byrow=TRUE)
colnames(RET$Q_b) <- paste("Q", rep(1:prior.b$Kmax, each=data$LTb), rep(data$naamLTb, prior.b$Kmax), sep="")
RET$Sigma_b <- matrix(RET$Sigma_b, ncol=data$LTb*prior.b$Kmax, byrow=TRUE)
colnames(RET$Sigma_b) <- paste("Sigma", rep(1:prior.b$Kmax, each=data$LTb), rep(data$naamLTb, prior.b$Kmax), sep="")
RET$order_b <- matrix(RET$order_b, ncol=prior.b$Kmax, byrow=TRUE)
colnames(RET$order_b) <- paste("order", 1:prior.b$Kmax, sep="")
RET$rank_b <- matrix(RET$rank_b, ncol=prior.b$Kmax, byrow=TRUE)
colnames(RET$rank_b) <- paste("rank", 1:prior.b$Kmax, sep="")
}
##### Chains for characteristics of the mixture distribution of b
##### --------------------------------------------------------------
RET$mixture_b <- as.data.frame(cbind(MCMC$chMeanData_b, MCMC$chCorrData_b))
MCMC$chMeanData_b <- NULL
MCMC$chCorrData_b <- NULL
##### Chains for random effects b
##### ------------------------------
if (store["b"]){
RET$b <- matrix(MCMC$chb, ncol=data$dimb*Cpar$I, byrow=TRUE)
MCMC$chb <- NULL
colnames(RET$b) <- paste("b.", rep(1:Cpar$I, each=data$dimb), ".", rep(1:data$dimb, Cpar$I), sep="")
}
}
if (data$lalpha){
##### Chains for regression coefficients alpha
##### ------------------------------------------
RET$alpha <- MCMC$chalpha
MCMC$chalpha <- NULL
}
if (Cpar$R_cd["R_c"]){
##### Chains for parameters of distribution of residuals
##### -----------------------------------------------------
RET$sigma_eps <- MCMC$chsigma_eps
MCMC$chsigma_eps <- NULL
RET$gammaInv_eps <- matrix(MCMC$chgammaInv_eps, ncol=Cpar$R_cd["R_c"], byrow=TRUE)
colnames(RET$gammaInv_eps) <- paste("gammaInv", 1:Cpar$R_cd["R_c"], sep="")
MCMC$chgammaInv_eps <- NULL
}
}
########## ========== Additional objects (added on 08/26/2010) ========== ##########
########## ============================================================== ##########
RET$relabel_b <- list(type="mean", par=1) #### default re-labeling is performed using the first margin of the mixture means
RET$Cpar <- Cpar
class(RET) <- "GLMM_MCMC"
return(RET)
}
|
99391e1d65087672a2c85aa203d9822da9bcfede | 23617caf7005786cd089b7f73f7da604b5ae3d6f | /man/spawning_no.Rd | afc529f06120aa70598e8d2460b2be05ac82771b | [] | no_license | poissonconsulting/lexr | bedc665d09be4856a92a982d3cd912af5975a025 | 8d6cdd07af61a2ef7b8bf635500ecccb52700e92 | refs/heads/master | 2021-07-19T13:11:13.160924 | 2021-02-16T20:37:35 | 2021-02-16T20:37:35 | 48,457,451 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 902 | rd | spawning_no.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spawning.R
\name{spawning_no}
\alias{spawning_no}
\title{No Spawning}
\usage{
spawning_no(detection, period)
}
\arguments{
\item{detection}{A data.frame of the detection data for the capture.}
\item{period}{A data.frame of the periods.}
}
\description{
A function that simply returns FALSE for every time period.
}
\details{
To identify spawning events when making analysis data pass a custom
function in place of spawning_no. The function has to take the same
arguments and return a logical vector even if there are no detections
for an individual. It should do this by returning FALSE for all
periods outside the spawning window and NA for all periods inside
the spawning window if no information is available.
To see the columns and types in detection and period view
the function definition for \code{spawning_no}.
}
|
5d6b500d321387bdc5c8b365fadba625c9ab551f | 3c2715e0dfade25fbedb65aaa21b99a677c2e1d2 | /Implementation_data.R | 0c170077e33cbf53036af862d286caddf2abeef5 | [] | no_license | AakashAhuja30/Topic-Modelling-using-Latent-Dirichlet-Allocation-Algorithm | 02be9586f013cfcf332e3249e1e5cbc532643e7c | f75cc4b7687287dc2cfbb7f536e091e1b549d26b | refs/heads/main | 2023-01-14T05:54:25.086423 | 2020-11-10T15:24:08 | 2020-11-10T15:24:08 | 311,695,379 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,457 | r | Implementation_data.R | #Setting the working directory
setwd(dirname(rstudioapi::callFun("getActiveDocumentContext")$path))
#Load files into corpus
setwd('pp4data/artificial/')
filenames<-list.files(path = getwd(), pattern = "")
temp1<-list.files(path = getwd(), pattern = "*.csv")
filenames<-setdiff(filenames, temp1)
myFiles.sorted <- sort(filenames)
split <- strsplit(myFiles.sorted, ' ')
split <- as.numeric(split)
myFiles.correct.order <- myFiles.sorted[order(split)]
# Read files into a list of docs
artificial_data<-suppressWarnings(lapply(myFiles.correct.order, readLines))
Whole_code_starts_artificial <- Sys.time()
result_final_artificial<-Main_function(artificial_data,K=2,top_words = 3)
Whole_code_ends_artificial <- Sys.time()
run_time_artificial<- Whole_code_ends_artificial - Whole_code_starts_artificial
#Loading 20 Newsgroups Data
setwd(dirname(rstudioapi::callFun("getActiveDocumentContext")$path))
setwd('pp4data/20newsgroups/')
filenames<-list.files(path = getwd(), pattern = "")
temp1<-list.files(path = getwd(), pattern = "*.csv")
filenames<-setdiff(filenames, temp1)
myFiles.sorted <- sort(filenames)
split <- strsplit(myFiles.sorted, ' ')
split <- as.numeric(split)
myFiles.correct.order <- myFiles.sorted[order(split)]
# Read files into a list of docs
Twenty_newsgroup_data<-suppressWarnings(lapply(myFiles.correct.order, readLines))
Whole_code_starts_Twenty_newsgroup <- Sys.time()
result_final_Twenty_newsgroup<-Main_function(Twenty_newsgroup_data,K=20,top_words = 5)
Whole_code_ends_Twenty_newsgroup <- Sys.time()
run_time_Twenty_newsgroup<- Whole_code_ends_Twenty_newsgroup - Whole_code_starts_Twenty_newsgroup
#Task 2: Classification
#Importing data for logistic regression
label_data<-read.csv('index.csv', header = F)
label_data<-label_data[2]
#LDA CLASSIFICATION using this label data
start_time_lr1 <- Sys.time()
sums_lr1<-replicate(30,W_Map_test(result_final_Twenty_newsgroup[[1]],label_data))
end_time_lr1 <- Sys.time()
#Plots LDA
setwd(dirname(rstudioapi::callFun("getActiveDocumentContext")$path))
png(file="DocumentTopicRepresentation.png")
GraphPlot(sums_lr1,label_data, "Document Topic Representation")
graphics.off()
#BOW CLASSIFICATION using this label data
start_time_lr2 <- Sys.time()
sums_lr2<-replicate(30,W_Map_test(result_final_Twenty_newsgroup[[2]],label_data))
end_time_lr2 <- Sys.time()
#Plots bag of words
png(file="BagOfWords.png")
GraphPlot(sums_lr2,label_data,"Bag of words Representation")
graphics.off()
|
a3593ba98b288c328b3c2a8cdf86af824b0f5d1e | e528e7eff68ef09776510840c154ccbc581b67c4 | /plot3.R | 70cd8a7bad041f43ed54b1cbee71639082d5eaf7 | [] | no_license | thehighepopt/ExData_Plotting1 | 2609758d6fb0d1a5c1a27d7f50b31b4a5eca39b6 | 5eb0f39f6dda439374f2cb7358f6115319447ead | refs/heads/master | 2020-05-29T11:08:01.484443 | 2015-01-11T22:37:44 | 2015-01-11T22:37:44 | 29,101,906 | 0 | 0 | null | 2015-01-11T18:50:12 | 2015-01-11T18:50:12 | null | UTF-8 | R | false | false | 1,130 | r | plot3.R | ## directory is a character string representing the directory where
## you unpacked the power consumption data set, assuming you didn't
## rename the folders.
plotting <- function(directory) {
setwd(directory)
electric <- read.table("./exdata-data-household_power_consumption/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 2880, skip = 66636)
colnames(electric) <- unlist(read.table("./exdata-data-household_power_consumption/household_power_consumption.txt", sep = ";", nrows = 1))
electric$Date <- as.Date(electric$Date, format = "%d/%m/%Y")
##electric$Time <- strptime(electric$Time,"%H:%M:%S")
electric$DateTime <- as.POSIXct(with(electric, paste(Date,Time)), tz ="GMT")
png(filename = "plot3.png", width = 480, height = 480)
with(electric, {
plot(DateTime,Sub_metering_1, type = "l", xlab = "", ylab = "Energy Sub metering")
lines(DateTime, Sub_metering_2, col ="red")
lines(DateTime, Sub_metering_3, col = "blue")
legend("topright", pch = 45, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
} )
dev.off()
}
|
d37eb47ca728ed381a63afabad1feaa9720794ec | f6eec6a9ef806648387faa1f3cdb45ff5e2aad9b | /DataAnalysis/scripts/R/automotive02/eval_automotive02.r | fef0d8dd61e45a3208946c9410d765313c615c08 | [] | no_license | PettTo/Measuring-Stability-of-Configuration-Sampling | 60bddf9ce7dd0a202729b1402cee0ef5db9080d2 | 38605256ee857c07a6e765a936fd007788ee9d18 | refs/heads/master | 2021-07-02T20:14:17.203544 | 2021-01-15T07:31:38 | 2021-01-15T07:31:38 | 213,128,169 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,577 | r | eval_automotive02.r | ########## Color Def ############
# tubs blue light
tubsBlueLight <- rgb(102,180,211,255, maxColorValue = 255)
# tubs green light
tubsGreenLight <- rgb(172,193,58,255, maxColorValue = 255)
# tubs orange
tubsOrange <- rgb(255,109,0,255, maxColorValue = 255)
# tubs purple
tubsPurple <- rgb(138,48,127,255, maxColorValue = 255)
# tubs yellow
tubsYellow <- rgb(255,200,42,255, maxColorValue = 255)
tubsRed <- rgb(190,30,60,255, maxColorValue = 255)
#TUBS blue dark
tubsBlue <- rgb(0,63,87,255, maxColorValue = 255)
########## Automotive2 ############
setwd("C:/Users/t.pett/Documents/Repositories/Measuring-Stability-of-Configuration-Sampling/DataAnalysis/data/automotive02/stability_csv/")
wd <- getwd()
show(wd)
### Preparations msoc data
msoc <- read.csv(file="./msoc/combined_procedures.csv",header=TRUE, sep=";",colClasses=c("NULL",NA,NA))
show(msoc)
msocRandom <- c(msoc$random)
msocIncling <- c(msoc$Incling)
### Preparations roic data
roic <- read.csv(file="./roic/combined_procedures.csv",header=TRUE, sep=";",colClasses=c("NULL",NA,NA))
show(roic)
roicRandom <- c(roic$random)
roicIncling <- c(roic$Incling)
### Preparations icst data
icst <- read.csv(file="./icst/combined_procedures.csv",header=TRUE, sep=";",colClasses=c("NULL",NA,NA))
show(icst)
icstRandom <- c(icst$Random)
icstIncling <- c(icst$IncLing)
### Preparation Placeholder
placeholder <- c(rep(NaN,length(roicIncling)))
### preparation for scatter
y <- c(
placeholder,
roicRandom,
roicIncling,
placeholder,
msocRandom,
msocIncling,
placeholder,
icstRandom,
icstIncling,
placeholder
)
x <- c(
rep(0,length(placeholder)),
rep(1,length(roicRandom)),
rep(2,length(roicIncling)),
rep(3,length(placeholder)),
rep(4,length(msocRandom)),
rep(5,length(msocIncling)),
rep(5,length(placeholder)),
rep(7,length(icstRandom)),
rep(8,length(icstIncling)),
rep(9,length(icstIncling))
)
show(x)
show(y)
pdf(file='./plots/automo_scatter.pdf', width=9, height=6)
plot(x,y, las=2, ylim=c(0,1),main="Automotive02 Scatterplot",
axes=FALSE,
#par(mar = c(7, 8, 6, 2) + 0.1),
#pch=c(19,19,19,17,17,17),
#col=c(rep(tubsBlue,length(incling)), rep(tubsRed,length(random))),
pch=c(
rep(0,length(placeholder)),
rep(2,length(roicRandom)),
rep(5,length(roicIncling)),
rep(0,length(placeholder)),
rep(2,length(msocRandom)),
rep(5,length(msocIncling)),
rep(0,length(placeholder)),
rep(2,length(icstRandom)),
rep(5,length(icstIncling)),
rep(0,length(placeholder))
),
col=c(
rep(tubsBlue,length(placeholder)),
rep(tubsBlue,length(roicRandom)),
rep(tubsBlue,length(roicIncling)),
rep(tubsBlue,length(placeholder)),
rep(tubsRed,length(msocRandom)),
rep(tubsRed,length(msocIncling)),
rep(tubsBlue,length(placeholder)),
rep(tubsOrange,length(icstRandom)),
rep(tubsOrange,length(icstIncling)),
rep(tubsOrange,length(placeholder))
),
cex=c(
rep(1.3,length(placeholder)),
rep(1.3,length(roicRandom)),
rep(1.3,length(roicIncling)),
rep(1.3,length(placeholder)),
rep(1.3,length(msocRandom)),
rep(1.3,length(msocIncling)),
rep(1.3,length(placeholder)),
rep(1.3,length(icstRandom)),
rep(1.3,length(icstIncling)),
rep(1.3,length(placeholder))
)
)
axis(1,
pos=c(0,0),
at=c(0:9),
labels=c("","Random","IncLing","","Random","IncLing","","Random","IncLing",""),
las=2)
axis(2,
pos=c(0,0),
at=c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1),
labels=c("0","0.1","0.2","0.3","0.4","0.5","0.6","0.7","0.8","0.9","1"),
las=1 )
axis(3,
pos=c(1,0),
at=c(0,1.5,4.5,7.5,9),
labels=c("","simple matching","1:1 Matching","N:M Matching",""),
las=1 )
axis(2,
pos=c(3,0),
lwd.ticks = 0,
labels = FALSE,
#at=c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1),
#labels=c("0","0.1","0.2","0.3","0.4","0.5","0.6","0.7","0.8","0.9","1"),
las=1 )
axis(2,
pos=c(6,0),
lwd.ticks = 0,
labels = FALSE,
#at=c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1),
#labels=c("0","0.1","0.2","0.3","0.4","0.5","0.6","0.7","0.8","0.9","1"),
las=1 )
axis(2,
pos=c(9,0),
lwd.ticks = 0,
labels = FALSE,
#at=c(0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1),
#labels=c("0","0.1","0.2","0.3","0.4","0.5","0.6","0.7","0.8","0.9","1"),
las=1 )
grid(col=c("gray60"),lty="dotted", lwd = 0.6)
dev.off()# |
573ab16743c234e0f12e8bd4c7b2e1012bc2f1db | c21a2001e5e9618ef9432c92883a9fe21be05009 | /RShiny/global.R | cb3d64181150465407b3875572f55e6f2607f880 | [] | no_license | yannick-yf/ipfy-dashboard | 69ec26589d2aaa0db387e52a3e460a0e3af50903 | ebf40e1d8bd1cf5ba73871d84106a75d02f2130c | refs/heads/main | 2023-03-28T00:01:48.889091 | 2021-03-04T20:39:27 | 2021-03-04T20:39:27 | 309,138,835 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 238 | r | global.R | # global.R
library(shiny)
library(dplyr)
library(tidyr)
library(grid)
library(ggplot2)
library(scales)
library(shiny.i18n)
i18n <- Translator$new(translation_json_path='translations/translation.json')
i18n$set_translation_language('en') |
b08359852ec125f889be6f3e0ad68219816e7769 | 0ffa59c5cc992c00d0ffd6cc7e21f47d1fb5c5b9 | /filtering.R | a276c7291e611347e498fe91fea0746eaf2261c4 | [] | no_license | mariabuechner/GI_Optimization | eaf2cfc59431d82d7936161b116de0f8e161213c | 34f41e3a2e2140d2963517381098672b9c882ef6 | refs/heads/master | 2021-01-18T23:44:42.537669 | 2015-12-11T15:06:25 | 2015-12-11T15:06:25 | 47,817,098 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,003 | r | filtering.R | filters = list.files(path="filters", pattern="*.csv")
filtering.readFilter <- function(inputFile) {
filePath = sprintf("filters/%s", inputFile)
inputFilter = read.csv(filePath) # 3 columns labeled 'energy' [keV] and 'mu' [cm2/g] and 'density' [g/cm3]
return(inputFilter)
}
filtering.interpolateFilter <- function(filter, energies) {
# Interpolate absorption coefficients to eneries of input spectrum
interpolatedFilter = approx(filter$energy, filter$mu, energies)
# Make data frame
interpolatedFilter = data.frame(interpolatedFilter)
# Rename entries
names(interpolatedFilter) <- c("energy", "mu")
return(interpolatedFilter)
}
filtering.filterEnergies <- function(filter, filterThickness, energies, photons) {
interpolatedFilter = filtering.interpolateFilter(filter, energies)
filteredSpectrum <- data.frame(energy = energies,
photons = photons * exp(-interpolatedFilter$mu * filter$density[1] * filterThickness))
return(filteredSpectrum)
} |
5c83c069b6f4578b305c0f2d1606df5845fa957e | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /FENmlm/man/FENmlm-package.Rd | fce67db45130ccb18bf4336344435b6d4f8d22e1 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,541 | rd | FENmlm-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FENmlm.R
\docType{package}
\name{FENmlm-package}
\alias{FENmlm}
\alias{FENmlm-package}
\title{Fixed Effects Nonlinear Maximum Likelihood Models}
\description{
Efficient estimation of multiple fixed-effects maximum likelihood models with, possibly, non-linear in parameters right hand sides. Standard-errors can easily be clustered. It also includes tools to seamlessly export (to Latex) the results of various estimations.
}
\details{
This package efficiently estimates maximum likelihood models with multiple fixed-effect (i.e. large factor variables).
The core function is \code{\link[FENmlm]{femlm}} which estimates maximum likelihood models with, possibly, non-linear in parameters right hand sides. The ML families available are: poisson, negative binomial, logit and Gaussian.
Several features are also included such as the possibility to easily compute different types of standard-errors (including multi-way clustering).
It is possible to compare the results of severeal estimations by using the function \code{\link[FENmlm]{res2table}}, and to export them to Latex using \code{\link[FENmlm]{res2tex}}.
}
\references{
Berg\\'e, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
}
\author{
\strong{Maintainer}: Laurent Berge \email{laurent.berge@uni.lu}
}
|
eb29919ff76113b6c1781a448081412ba8db4e07 | 688a5d9f3f4f076df650fb8bd1f4454840316ff9 | /R/functions_master_thesis.R | 0263ae3779496428f8e3f6f8306e7a062d2b10d3 | [] | no_license | nikosbosse/epipredictr | 8edbec2b28cea705c26a3af021cb3235c3ec3ee5 | 4ca20704632d1b4c97679415b4c552490379b679 | refs/heads/master | 2021-01-03T02:31:41.101027 | 2020-04-01T15:03:25 | 2020-04-01T15:03:25 | 239,878,247 | 1 | 1 | null | 2020-04-01T15:03:26 | 2020-02-11T22:31:41 | C++ | UTF-8 | R | false | false | 13,696 | r | functions_master_thesis.R | #' setup work environment
#'
#' not needed later on.
#'
#' @return nothing
#' @examples
#' \notrun{
#' inc <- my_load_data()
#' }
#'
#'
my_setup <- function(){
options(max.print = 2000)
par(family = "Serif")
options(width=as.integer(160))
options(max.print = 36000)
#library(tidyverse)
library(ggplot2)
theme_set(theme_get() + theme(text = element_text(family = 'Serif')))
library(rstan)
#options(mc.cores = parallel::detectCores())
options(mc.cores = 4)
rstan_options(auto_write = TRUE)
library(extraDistr)
#library(EpiEstim)
library(shinystan)
library(bsts)
library(matrixStats)
}
# ================================================= #
# ================================================= #
# ================================================= #
# ================================================= #
my_EpiEstim_stan <- function(past_incidences){
inc <- past_incidences
t <- length(inc)
l <- list(t = t, past_incidences = inc, tau = 7)
stanfit2 <- rstan::stan(file = "../stan/estimate_R_EpiEstim_rebuild.stan",
data = l,
iter = 4000, warmup = 800, thin = 1, control = list(adapt_delta = 0.97))
s1 <- summary(stanfit2)$summary %>%
as.data.frame() %>%
rownames_to_column("var") %>%
filter(grepl("^R", var)) %>%
select(-var) %>%
mutate(estimate="stan") %>%
select(c(1,4,8,11)) %>%
mutate(id=1:n())
s1 <- s1[17:nrow(s1),] %>%
mutate(id=1:n())
colnames(s1) <- c("mean", "low", "high", "estimate", "id")
result <- list(R = s1, stanfit = stanfit2)
}
# ================================================= #
# ================================================= #
my_stan_bsts <- function(past_r, n_pred = 10){
t <- length(past_r)
l <- list(t = t, past_r = past_r, n_pred = n_pred)
stanfit2 <- rstan::stan(file = "../stan/bayesian_structural_time_series_model_r.stan" ,
data = l,
iter = 4000, warmup = 800, thin = 1, control = list(adapt_delta = 0.97))
sum <- summary(stanfit2)$summary
sum <- sum %>% as.data.frame(rownames(sum)) %>% mutate(var = rownames(sum))
params <- sum %>% filter(sum$var %in% c("sigma_epsilon", "sigma_eta", "phi", "D"))
rownames(params) <- params$var
predicted <- sum %>% filter(grepl("^r_pred", var))
rownames(predicted) <- predicted$var
res <- list(params = params, predicted = predicted, stanfit = stanfit2)
r <- res$predicted
r <- r[,c(1,4,8)]
colnames(r) <- c("mean", "low", "high")
ggplot(r, aes(x = 1:n_pred, y = mean, ymin = low, ymax = high)) + geom_line() + geom_ribbon(alpha = 0.5)
return(res)
}
my_F <- function(predictions, k){
return(sum(predictions <= k) / length(predictions))
}
# ================================================= #
# ================================================= #
my_centrality <- function(u){
sum(u > 0.25 & u < 0.75)/length(u) - 0.5
}
# ================================================= #
# ================================================= #
## ranked probability score
my_RPS <- function(true_values, samples){
t <- length(true_values)
rps <- numeric(t)
for (j in 1:t){
m <- max(samples[,j])
#i <- 1:m
#rps[t] <- 0
for (i in 1:m){
rps[j] <- rps[j] + (my_F(samples[,j], i) - (i >= true_values[j]))^2
}
}
return(rps)
}
# ================================================= #
# ================================================= #
## David-Sebastiani-Score
my_DSS <- function(true_values, samples){
t <- length(true_values)
dss <- numeric(t)
for (j in 1:t){
mu_sample <- mean(samples[, j])
sd_sample <- sd(samples[, j])
dss[j] <- ((true_values[j] - mu_sample) / sd_sample)^2 + 2 * log(sd_sample)
}
return(dss)
}
# ================================================= #
# ================================================= #
## Function to get the infectiousness. Already
## implemented in stan, but I want to double
## check the results
my_infectiousness <- function(past_incidences, n_pred = 0){
## inputs: past_incidences
## number of timesteps to forecast n_pred
t <- length(past_incidences)
w <- rep(0, times = t + n_pred)
for (i in 1:(t + n_pred)){
if (i > 40){
w[i] = 0;
} else {
w[i] = pgamma(i + 0.5, 2.706556, 0.1768991) - pgamma(i - 0.5, 2.706556, 0.1768991);
}
}
infectiousness <- rep(0.000001, times = t)
for (s in 2:t){
infectiousness[s] = 0;
for (i in 1:(s - 1)){
infectiousness[s] = infectiousness[s] + past_incidences[i] * w[s - i];
}
#infectiousness[1:10] <- 1
infectiousness_weekly <- rep(0, times = t/7)
for (i in 1:length(infectiousness_weekly)){
infectiousness_weekly[i] <- sum(infectiousness[(7*(i-1)+1):(7*i)])
}
infectiousness_pred = 0
for (i in 1:(t)){
infectiousness_pred = infectiousness_pred + past_incidences[i] * w[t + 1 - i]
}
}
l <- list(weights = w, infectiousness = infectiousness, infectiousness_one_ahead = infectiousness_pred, infectiousness_weekly = infectiousness_weekly)
return(l)
## output:
## weights (cut after 40 periods)
## calculated infectiousness
## one step ahead calculation of infectiousness
}
# ================================================= #
# ================================================= #
## get next expected incidence
## my_infectiousness(inc) = my_next_expected_incidence(inc[-length(inc)])
## now also implemented in my_infectiousness)()
my_next_expected_incidence <- function(past_incidences){
t <- length(past_incidences)
# note that we have t instead of t-1 here as in the master thesis
adjusted_affected <- 0
for (s in 1:t){
## implementation for continuous numbers
# weight <- ddgamma(t + 1 - s, 2.706556, 0.1768991)
## implementation for integers
## not that here there is no cut-off value that sets every weight for periods > 40 to 0.
weight <- pgamma(t + 1 - s + 0.5, 2.706556, 0.1768991) - pgamma(t + 1 - s - 0.5, 2.706556, 0.1768991)
adjusted_affected <- adjusted_affected + past_incidences[s] * weight
}
return(adjusted_affected)
}
# ================================================= #
# ================================================= #
my_infection_overview <- function(past_incidences){
tmp <- my_infectiousness(past_incidences)
data.frame(incidences = past_incidences,
infectiousness = tmp$infectiousness,
#weights_rel = rev(tmp$weights)
weights = tmp$weights
)
## output weights: outputs the weights for the
## corresponding number of periods. w[1] = 1 period
## away. The weights do not at all correspond to the
## columns next to it!
## weights_rel: weights relative to the one after the last period
}
# ================================================= #
# ================================================= #
## unsure what this does. delete?
my_plot_r_pred <- function(predicted){
mean_R <- rowMeans(predicted)
quantiles <- rowQuantiles(predicted, probs=c(0.05, 0.95))
days <- 1:nrow(predicted)
q <- ggplot() + geom_line(aes(x=days, y=mean_R)) +
geom_ribbon(aes(x=days, ymin=quantiles[,1],
ymax=quantiles[,2]),alpha=0.3)
}
# ================================================= #
# ================================================= #
## plot 2 histograms. useful for comparing prior and posterior
my_plot_two_histograms <- function(vector1, vector2,
breaks = 100,
upper_limit = NULL){
if(!is.null(upper_limit)){
vector1 <- vector1[vector1 < upper_limit]
vector2 <- vector2[vector2 < upper_limit]
}
## set breakpoints and define minimum
## breakpoint a and maximum breakpoint b
a <- min(c(vector1, vector2))
b <- max(c(vector1, vector2))
## define axis
ax <- pretty(a:b, n = breaks)
while(min(ax) > a | max(ax) < b){
if (min(ax) > a){
a <- a - (ax[2] - ax[1])
}
if (max(ax) < b){
b <- b + (ax[2] - ax[1])
}
ax <- pretty(a:b, n = breaks)
}
## make histogram A and B
plot1 <- hist(vector1, breaks = ax, plot = FALSE)
plot1$density = plot1$counts/sum(plot1$counts)
plot2 <- hist(vector2, breaks = ax, plot = FALSE)
plot2$density = plot2$counts/sum(plot2$counts)
## set correct font
par(family = "Serif")
## define two colors
col1 <- rgb(168,209,225,max = 255, alpha = 75)
col2 <- rgb(248,183,193, max = 255, alpha = 75)
## plot and add 2nd plot to first
plot(plot1, col = col1, xlab = "vec1 is blue, vec2 is pink", xlim = c(a, b))
plot(plot2, col = col2, add = TRUE)
}
# ================================================= #
# ================================================= #
## diagnostic functions to visualize the evolution of
## delta and R under different circumstances
my_evolution_delta <- function(delta0 = 0, D = -0.02, phi = 0, n = 100, random = F, sigma = 0.5){
deltas = rep(0, n)
for (i in 2:n){
deltas[i] <- D + phi * (deltas[i-1] - D)
if (random){
deltas[i] <- deltas[i] + rnorm(1, 0, sigma)
}
}
return(deltas)
}
my_evolution_R <- function(n = 100, sigma_epsilon = 0.5, ...){
R <- rep(0, n)
R[1] <- 1
delta <- my_evolution_delta(n = n, ...)
for (i in 2:n){
mean <- R[i-1] + delta[i]
while (R[i] <= 0){
R[i] <- rnorm(1, mean, sigma_epsilon)
}
}
return(R)
}
## results:
## the phi determines how quickly the thing will revert to a constant
## trend. See this paper: http://www.unofficialgoogledatascience.com/2017/07/fitting-bayesian-structural-time-series.html
# ================================================= #
# ================================================= #
## functions to extract and to plot the posterior predictive
## values against the values that were actually observed
my_extract <- function(stanfitobject, var = "Inc_post"){
tmp <- extract(stanfitobject)
tmp <- getElement(tmp, var)
apply(tmp, MARGIN = 2, FUN = mean)
}
my_pred_vs_true_inc_plot <- function(y_true,
y_pred,
vert_lines = NULL){
ymin <- min(c(y_true, y_pred))
ymax <- max(c(y_true, y_pred))
plot(y_true, type = "l", col = "grey", family = "Serif", ylim = c(ymin, ymax))
lines(y_pred, col = "red", lwd = 3)
if (!is.null(vert_lines) && vert_lines > 0){
abline(v = vert_lines, col = "blue", lty = 2)
}
}
# ================================================= #
# ================================================= #
## fit the stan model iteratively
my_iterative_fit <- function(past_incidences,
n_pred = 14,
interval = 0,
start_period = 30,
tau = 7,
stanfile = "../stan/combined_EpiEstim_bsts_only_sigma_eta.stan"){
## inputs:
## vector past incidences
## number of periods to forecast into the future
## interval between predictions
time <- Sys.time()
if (interval == 0) interval <- n_pred
total_n <- (length(past_incidences))
current_n <- start_period
## calculate how many fits you need.
runs <- ceiling((total_n - start_period) / interval)
# predictions <- numeric(0)
res <- list()
i <- 0
while (current_n < total_n){
print("run ", as.character(i), "of ", as.character(runs))
index <- 1:current_n
if ((length(index)) > total_n) {index <- i:total_n}
inc <- past_incidences[index]
T <- length(inc)
l <- list(T = T, past_incidences = inc, tau = tau, n_pred = n_pred)
stanfit <- rstan::stan(file = stanfile,
data = l,
iter = 2000, warmup = 1000, thin = 1, control = list(adapt_delta = 0.99))
i <- i + 1
res[[i]] <- stanfit
current_n <- start_period + i * interval
}
print(time - Sys.time())
return(res)
}
# ================================================= #
# ================================================= #
## use iterative fits of the stan model to plot
## predicted vs. actual cases of Ebola
my_iter_pred_vs_true <- function(inc,
n_pred = 14,
interval = 0,
start_period = 30,
tau = 7,
stanfile = "../stan/combined_EpiEstim_bsts_only_sigma_eta.stan"){
if (interval == 0) interval <- n_pred
l <- my_iterative_fit(past_incidences = inc,
n_pred = n_pred,
interval = interval,
start_period = start_period,
tau = tau,
stanfile = stanfile)
n_total <- length(inc)
predictions <- lapply(l, my_extract, var = "I_pred")
predictions <- unlist(predictions, use.names = FALSE)
if ((n_total - start_period) > interval){
vert_lines = seq(interval, n_total - start_period, interval)
} else {
vert_lines = -1
}
try(my_pred_vs_true_inc_plot(y_true = inc[(start_period + 1):n_total],
y_pred = predictions,
vert_lines = vert_lines))
return(list(predictions = predictions, stanfitobjects = l))
}
# ================================================= #
# ================================================= #
my_load_conflict_data <- function(){
conflicts <- read.csv("../data/2018-08-03-2020-01-19-Democratic_Republic_of_Congo.csv", stringsAsFactors = F)
cols_to_keep <- c("event_date",
"event_type",
"sub_event_type",
"admin1",
"admin2")
conflicts <- conflicts[, colnames(conflicts) %in% cols_to_keep]
Sys.setlocale("LC_TIME", "C")
conflicts$event_date <- as.Date(conflicts$event_date, format = "%d %B %Y")
nkivu <- conflicts[conflicts$admin1 == "Nord-Kivu", ]
nkivu$counts <- 1
confl_inc <- aggregate(counts ~ event_date, data=nkivu, FUN="sum")
return(confl_inc)
}
# ================================================= #
# ================================================= #
## aggregate data by week
my_make_weekly <- function(vector){
t = length(vector)
weekly <- rep(0, times = t/7)
for (i in 1:length(weekly)){
weekly[i] <- sum(vector[(7*(i-1)+1):(7*i)])
}
return(weekly)
} |
142226bd5a175177802852914c508dff835274a6 | 3d06553cd3588cfe2b1c862a2c2331c6cd8ec5d6 | /code/misc_plots/twodim_regression.r | ab91425c531ab97b6729ad5d89e97b07f5325127 | [] | no_license | wfithian/optimal-inference | 696eb914bb89c75135996cdc6eb31a169510b06a | 7c249c4fa0082b92f14de7b59a27484e315461df | refs/heads/master | 2020-05-30T15:49:12.143712 | 2017-04-15T08:24:37 | 2017-04-15T08:24:37 | 34,808,308 | 11 | 7 | null | null | null | null | UTF-8 | R | false | false | 2,538 | r | twodim_regression.r |
# Fig 5a
pdf("fullvred.pdf",height=4.3,width=4)
par(mar=c(4.1,4.1,3.1,0.1))
y <- t(c(2.9,2.5))
plot(y,xlim=c(-5,5),ylim=c(-5,5),xlab=expression(Y[1]),ylab=expression(Y[2]),
main="Conditioning Sets")#"Full vs. Reduced Model: First Step",asp=1)
polygon(c(0,10,10),c(0,10,-10),lty=2,col="#F4E918")
polygon(c(0,-10,-10),c(0,10,-10),lty=2,col="#F4E918")
abline(h=0)
abline(v=0)
text(2,.5,"A")
text(y+c(.3,-.4),labels="Y")
lines(c(y[2],10),c(y[2],y[2]),lwd=2,col="brown")
lines(c(-y[2],-10),c(y[2],y[2]),lwd=2,col="brown")
points(y,pch=16)
dev.off()
unnorm.cond.dens <- function(x) (1-2*pnorm(-abs(x)))*dnorm(x) #p(|Y2| < |Y1| = x)
cond.dens <- function(x) unnorm.cond.dens(x) / 2 / integrate(function(u) unnorm.cond.dens(u),0,10)$value
# Fig 5b
pdf("fullvredNulls.pdf",height=4.3,width=4)
par(mar=c(4.1,4.1,3.1,0.1),yaxs="i")
x <- seq(-6,6,.01)
plot(x,(abs(x)>2.5)*dnorm(x)/2/pnorm(-2.5),ylim=c(0,1.4),lty=1,
col="brown",type="l",
main="Conditional Null Distributions",
ylab="Density",xlab=expression(Y[1]))
polygon(c(x,0),c(cond.dens(x),0),lty=2,col="#F4E918")
lines(x,(abs(x)>2.5)*dnorm(x)/2/pnorm(-2.5),col="brown")
legend("topleft",legend=c("Saturated Model","Selected Model","Observed Value"),lty=1:3,bg="white", col=c("brown","black","black"))
#norm.y <- sqrt(sum(y^2))
#curve((abs(x)>2.5)*dbeta((x/norm.y)^2,.5,.5)*abs(x/norm.y)/norm.y/2,-norm.y,norm.y,add=T)
abline(v=2.9,lty=3)
dev.off()
# p-values for selected and saturated models
2*integrate(function(x) cond.dens(x), 2.9,10)$value
pnorm(-2.9)/pnorm(-2.5)
B <- 10000
mu <- c(5,5)
pvals <- NULL
for(b in 1:B) {
y <- mu + rnorm(2)
if(abs(y[1]) > abs(y[2])) {
pvals <- rbind(pvals, c(
2*integrate(function(x) cond.dens(x), abs(y[1]),10)$value,
pnorm(-abs(y[1]))/pnorm(-abs(y[2]))
))
}
}
mean(pvals[,1]<.05)
mean(pvals[,2]<.05)
#hist(cos(2*pi*runif(1000000)),freq=F,breaks=seq(-1,1,.025))
#curve(dbeta(x^2,.5,.5)*abs(x),-1,1,add=T)
pdf("fullvredXty.pdf",height=4.3,width=4)
par(mar=c(4.1,4.1,2.1,0.1))
y <- t(c(2.9,2.5))
plot(y,xlim=c(-5,5),ylim=c(-5,5),
xlab=expression(paste(X[1],"' Y",sep="")),
ylab=expression(paste(X[2],"' Y",sep="")),
main="Full vs. Reduced Model: First Step",asp=1)
polygon(c(0,10,10),c(0,10,-10),lty=2,col="#F0F0FF")
polygon(c(0,-10,-10),c(0,10,-10),lty=2,col="#F0F0FF")
abline(h=0)
abline(v=0)
text(2,.5,"A")
text(y+c(.3,-.4),labels="X'Y")
lines(c(y[2],10),c(y[2],y[2]),lwd=2,col="blue")
lines(c(-y[2],-10),c(y[2],y[2]),lwd=2,col="blue")
points(y)
dev.off()
|
76ef02df83ed15876232315cf9a2c0bdb7e2c12d | ed2601d98727354cd8f5bdd7dcac7bc06197dfbe | /linear-classification/labo/Labo3-Sol.R | c7007ce0a4703fb644c500c6b1e4e5e3f79f0d78 | [] | no_license | agserrano/bdma-2017 | f9c8805ee7ffae08c4cfa5fb4c3d9ba2027e56ab | 9dbdbbf54e3a605329c5c0b8f61f1e7bf85a5bbd | refs/heads/master | 2021-01-19T12:23:26.817000 | 2017-04-04T19:11:03 | 2017-04-04T19:11:03 | 82,307,188 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 76 | r | Labo3-Sol.R | # Labo3
setwd("/Users/tonet/Documents/posgrado/linear-classification/labo")
|
216f804247fa0af10db885e723ee88d6e18ae98a | d0def2fa0dc0d9de733965604256d7659932269d | /巨人前车/ggplot2/chapter5——ColorBrewer.r | 76814a810a2462b8dee31cc5249dcf4c643cf6a6 | [] | no_license | wangpanqiao/NoteBook-1 | a26f9ebad43a29ff2143c457c383795f879259f9 | 3beceebaa9a982c57537fe2cb4415f6bb5a904ce | refs/heads/master | 2020-12-09T05:08:08.428661 | 2019-07-10T08:20:57 | 2019-07-10T08:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,265 | r | chapter5——ColorBrewer.r | ##《R语言商务图表与数据可视化》
##课程讲师——杜雨
##课程机构——天善智能
########第五章——R语言与数据可视化用色规范与标准########
##5.1 R语言基础预设配色系统
#1、R语言基础预定义颜色
library("scales")
library("ggplot2")
colors()
show_col(colors(),labels = FALSE)
show_col(sample(colors(),100),labels = FALSE)
colors()[1:10]
sample(colors(),100)
ggplot(mpg,aes(class,displ))+
geom_bar(stat="identity",fill="steelblue")
ggplot(mpg,aes(class,displ))+
geom_bar(aes(fill = class),stat="identity")
palette <- sample(colors(),7)
ggplot(mpg,aes(class,displ))+
geom_bar(aes(fill = class),stat="identity") +
scale_fill_manual(values = palette)
length(unique(mpg$class))
length(palette)
#2、五大预设配色版
show_col(sample(rainbow(1000),replace = FALSE),labels = FALSE)
show_col(sample(heat.colors(1000),replace = FALSE),labels = FALSE)
show_col(sample(terrain.colors(1000),replace = FALSE),labels = FALSE)
show_col(sample(topo.colors(1000),replace = FALSE),labels = FALSE)
show_col(sample(cm.colors(1000),replace = FALSE),labels = FALSE)
par(mfrow=c(1,5),mar=c(0.5,0.5,2,0.5),xaxs="i",yaxs="i")
n<-1000
barplot(rep(1,times=n),col=rainbow(n),border=rainbow(n),horiz=T,axes=F,main="Rainbow Color")
barplot(rep(1,times=n),col=heat.colors(n),border=heat.colors(n),horiz=T,axes=F,main="Heat.Colors")
barplot(rep(1,times=n),col=terrain.colors(n),border=terrain.colors(n),horiz=T,axes=F,main="Terrain.Colors")
barplot(rep(1,times=n),col=topo.colors(n),border=topo.colors(n),horiz=T,axes=F,main="Topo.Colors")
barplot(rep(1,times=n),col=cm.colors(n),border=cm.colors(n),horiz=T,axes=F,main="Cm.Colors")
dev.off()
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = rainbow(7))
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = heat.colors(7))
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = terrain.colors(7))
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = topo.colors(7))
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = cm.colors(7))
#3、colorRampPalette函数自定义色板
patellte <- colorRampPalette(c("red", "green","orange",'blue','yellow'))
show_col(patellte(100000),labels = FALSE,border = NA)
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill=class),stat="identity") + scale_fill_manual(values = patellte(n = 7))
#4、gray(0:n/n)
show_col(gray(0:10000/10000),labels = FALSE,border = NA)
ggplot(mpg,aes(class,displ)) + geom_bar(aes(fill = class),stat="identity") + scale_fill_manual(values = gray(0:6/6))
#5、hsv函数
x <- seq(1,4)/4
ndx <- expand.grid(x, x, x)
mycolor <- hsv(ndx[,3],ndx[,2],ndx[,1],alph = .5)
show_col(mycolor,labels = FALSE)
ggplot(mpg,aes(class,displ)) +
geom_bar(aes(fill=class),stat="identity") +
scale_fill_manual(values = sample(mycolor,7))
####颜色标度的主要参数解释:
limits #可用类型(离散)/度量区间范围(连续)
breaks #指定显示刻度位置
labels #对应刻度位置图例文本标签
values #对应类别呈现的颜色(透明度、大小、形状、线条、线条等),仅用于自定义标度场景(scale_xxx_manual())
#关于默认情况下显示的颜色与分类变量子类别顺序如何匹配,是否可以自定义?
#1、如果分类变量不是有序因子变量:
#1.1 默认情况下values顺序与类别变量的名称首字母顺序一一对应
mydata1 <- data.frame(
name = LETTERS[1:5],
value = runif(5,1,100)
)
color = colors()[sample(1:100,5)]
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill= name),stat="identity") +
scale_fill_manual(values = color )
unique(mydata1$name)
show_col(color,labels=F)
#1.2 如果values对应的色值向量是一个命名向量,且名称为类别变量的类别名称,则最终颜色会与类别一一对应
color <- c("red","grey","orange","yellow","green")
names(color) <- LETTERS[sample(1:5,5)]
color <- c('B' = 'red' , 'A' = 'grey' , 'D' = 'orange' , 'E' = 'yellow' , 'C' = 'green')
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_manual(values = color )
names(color)
show_col(color,labels=T)
#2、 有序因子变量情况下,图例顺序与因子顺序一致,颜色顺序仍然符合上述规则:
color <- c("red","grey","orange","yellow","green")
show_col(color,labels=T)
mydata1$class <- ordered(mydata1$name,levels = LETTERS[c(3,2,1,5,4)])
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=class),stat="identity") +
scale_fill_manual(values = color)
show_col(color,labels=T)
#####5.2 配色系统及扩展包接口调用#####
#Diverging(div)
#BrBG, PiYG, PRGn, PuOr, RdBu, RdGy, RdYlBu, RdYlGn, Spectral
#Qualitative(qual)
#Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
#Sequential(seq)
#Blues, BuGn, BuPu, GnBu, Greens, Greys, Oranges, OrRd,
#PuBu, PuBuGn, PuRd, Purples, RdPu, Reds, YlGn, YlGnBu, YlOrBr, YlOrRd
mydata1 <- data.frame(
name = LETTERS[1:6],
value = runif(6,1,100)
)
#使用type+index进行指定色盘
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(type = 'div',palette = 1 , direction = 1)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(type = 'qual',palette = 1 , direction = 1)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(type = 'seq',palette = 1 , direction = 1)
#使用name指定色板
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(palette = 'Blues' , direction = 1)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(palette = 'BuGn' , direction = 1)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(palette = 'Greens' , direction = 1)
#色盘顺序指定——direction=1,默认顺序,-1则相反
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_brewer(palette = 'Greens' , direction = -1)
#离散色盘连续化封装函数——scale_fill_distiller
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill = value),stat="identity") +
scale_fill_distiller(palette = 'Greens' , direction = 1)
###5.2 scales::brewer_pal()
brewer_pal(type = "seq", palette = 1, direction = 1)
#
show_col(brewer_pal()(9))
show_col(brewer_pal("div")(5))
show_col(brewer_pal(palette = "Greens")(5))
# Can use with gradient_n to create a continous gradient
cols <- brewer_pal("div")(5)
show_col(gradient_n_pal(cols)(seq(0, 1, length.out = 1000)), labels = FALSE, borders =NA)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_manual(values = brewer_pal()(6))
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_manual(values = brewer_pal(direction = -1)(6))
######5.3 R语言RcolorBrewer在线配色网站及其使用详解######
library('RColorBrewer')
#查看色板类型
display.brewer.all(type = "all") #查看所有色板
display.brewer.all(type = "seq") #查看单色渐变色板
display.brewer.all(type = "div") #查看双色渐变色板
display.brewer.all(type = "qual") #查看离散(分类)色板
#查看指定主题色板
display.brewer.pal(9, "BuGn") ###以可视化面板的形式树池色板
brewer.pal(9,"BuGn") ###以色值向量的形式输出文本向量
display.brewer.pal(9,"Blues")
#查看色板在图形中的效果:
par(mfrow=c(1,5),mar=c(1,1,2,1),xaxs="i", yaxs="i")
mycolors<-brewer.pal(9, "BuGn")
barplot(rep(1,times=9),col=mycolors,border=mycolors,axes=FALSE, horiz=T,main="MyColors of BuGn ")
mycolors<-brewer.pal(9, "OrRd")
barplot(rep(1,times=9),col=mycolors,border=mycolors,axes=FALSE, horiz=T,main="MyColors of OrRd")
mycolors<-brewer.pal(9, "YlGn")
barplot(rep(1,times=9),col=mycolors,border=mycolors,axes=FALSE, horiz=T,main="MyColors of YlGn")
mycolors<-brewer.pal(9, "Oranges")
barplot(rep(1,times=9),col=mycolors,border=mycolors,axes=FALSE, horiz=T,main="MyColors of Oranges")
mycolors<-brewer.pal(9, "Blues")
barplot(rep(1,times=9),col=mycolors,border=mycolors,axes=FALSE, horiz=T,main="MyColors of Blues")
dev.off()
#组合色板
b1<-brewer.pal(9, "BuGn");b2<-brewer.pal(9,"Blues")
c<-c(b1[c(1,3,5,7,9)],b2[c(2,4,6,8)])
show_col(c,labels=F)
c<-c(50,30,50,70,90,40)
names(c)<-LETTERS[1:6]
library(plyr)
mydata<-data.frame(c)
ggplot(data=mydata,aes(x=factor(1),y=c,fill=row.names(mydata)))+
geom_bar(stat="identity",width=1,col="white")+
coord_polar(theta = "y",start=0)+
scale_fill_brewer(palette="Greens")+
guides(fill=guide_legend(title=NULL)) +
theme_void()
#####5.4 ggthemes主题包简介#####
library("ggthemes")
m1<-economist_pal()(6)
show_col(m1)
mydata$class <- row.names(mydata)
ggplot(data=mydata,aes(x=factor(1),y=c,fill=class))+
geom_bar(stat="identity",width=1,col="white")+
coord_polar(theta = "y",start=0)+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank())+
scale_fill_economist()+
guides(fill=guide_legend(reverse=TRUE,title=NULL))
m2<-wsj_pal()(6)
show_col(m2)
ggplot(data=mydata,aes(x=factor(1),y=c,fill=class))+
geom_bar(stat="identity",width=1,col="white")+
coord_polar(theta = "y",start=0)+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank())+
scale_fill_wsj()+
guides(fill=guide_legend(reverse=TRUE,title=NULL))
#WSJ背景色
ggthemes_data$wsj$bg
gray green blue brown
"#efefef" "#e9f3ea" "#d4dee7" "#f8f2e4"
show_col(ggthemes_data$wsj$bg)
#WSJ主题色
ggthemes_data$wsj$palettes #主题色
$rgby
yellow red blue green
"#d3ba68" "#d5695d" "#5d8ca8" "#65a479"
$red_green
green red
"#088158" "#ba2f2a"
$black_green
black gray ltgreen green
"#000000" "#595959" "#59a77f" "#008856"
$dem_rep
blue red gray
"#006a8e" "#b1283a" "#a8a6a7"
$colors6
red blue gold green orange black
"#c72e29" "#016392" "#be9c2e" "#098154" "#fb832d" "#000000"
show_col(ggthemes_data$wsj$palettes$rgby)
show_col(ggthemes_data$wsj$palettes$red_green)
show_col(ggthemes_data$wsj$palettes$black_green)
show_col(ggthemes_data$wsj$palettes$dem_rep)
show_col(ggthemes_data$wsj$palettes$colors6)
mytheme <- ggthemes_data
#economist背景色:
ggthemes_data$economist$bg
#economist主题色:
ggthemes_data$economist$fg
###
scale_colour/fill_economist(stata = FALSE, ...)
scale_colour/fill_wsj(stata = FALSE, ...)
mydata1 <- data.frame(
name = LETTERS[1:6],
value = runif(6,1,100)
)
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_economist() +
theme_economist()
ggplot(mydata1,aes(name,value)) +
geom_bar(aes(fill=name),stat="identity") +
scale_fill_wsj() +
theme_wsj()
#####5.5 其他配色类扩展包简介——ggtech、ggthemer#####
###5.5.1 ggthch
#devtools::install_github('hadley/ggplot2')
library(ggplot2)
library(ggtech)
data<-diamonds[diamonds$color %in%LETTERS[4:7], ]
#airbnb style
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)+
theme_airbnb_fancy() +
scale_fill_tech(theme="airbnb") +
labs(title="Airbnb theme",
subtitle="now with subtitles for ggplot2 >= 2.1.0")
#etsy style
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)+
theme_tech(theme="facebook") +
scale_fill_tech(theme="facebook") +
labs(title="Facebook theme",
subtitle="now with subtitles for ggplot2 >= 2.1.0")
#google style
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)+
theme_tech(theme="google") +
scale_fill_tech(theme="google") +
labs(title="Google theme",
subtitle="now with subtitles for ggplot2 >= 2.1.0")
#tiwtter style
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)+
theme_tech(theme="twitter") +
scale_fill_tech(theme="twitter") +
labs(title="Twitter theme",
subtitle="now with subtitles for ggplot2 >= 2.1.0")
#5.5.2 ggthemr
devtools::install_github('cttobin/ggthemr')
library('ggthemr')
###启动主题
#ggthemr('dust')
ggthemr('flat')
#ggthemr('flat dark')
#ggthemr('camoflauge')
#ggthemr('chalk')
#ggthemr('copper')
#ggthemr('earth')
#ggthemr('fresh')
#ggthemr('grape')
#ggthemr('grass')
#ggthemr('greyscale')
#ggthemr('light')
#ggthemr('lilac')
#ggthemr('pale')
#ggthemr('sea')
#ggthemr('sky')
#ggthemr('solarized')
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)
###回复系统默认主题
ggthemr_reset()
ggplot(data,aes(carat,fill=color))+geom_histogram(bins=30)
#5.5.3 ggsci
#install.packages("devtools") #官方镜像
#devtools::install_github("road2stat/ggsci") #github仓库
library('ggsci')
library("scales")
show_col(pal_d3("category10")(10))
show_col(pal_d3("category20")(20))
show_col(pal_d3("category20b")(20))
show_col(pal_d3("category20c")(20))
library("ggsci")
library("ggplot2")
library("gridExtra")
data("diamonds")
p1 = ggplot(subset(diamonds, carat >= 2.2),
aes(x = table, y = price, colour = cut)) +
geom_point(alpha = 0.7) +
geom_smooth(method = "loess", alpha = 0.05, size = 1, span = 1) +
theme_bw()
p2 = ggplot(subset(diamonds, carat > 2.2 & depth > 55 & depth < 70),
aes(x = depth, fill = cut)) +
geom_histogram(colour = "black", binwidth = 1, position = "dodge") +
theme_bw()
###NPG
p1_npg = p1 + scale_color_npg()
p2_npg = p2 + scale_fill_npg()
grid.arrange(p1_npg, p2_npg, ncol = 2)
###AAAS
p1_aaas = p1 + scale_color_aaas()
p2_aaas = p2 + scale_fill_aaas()
grid.arrange(p1_aaas, p2_aaas, ncol = 2)
###NEJM
p1_nejm = p1 + scale_color_nejm()
p2_nejm = p2 + scale_fill_nejm()
grid.arrange(p1_nejm, p2_nejm, ncol = 2)
###Lancet
p1_lancet = p1 + scale_color_lancet()
p2_lancet = p2 + scale_fill_lancet()
grid.arrange(p1_lancet, p2_lancet, ncol = 2)
###JAMA
p1_jama = p1 + scale_color_jama()
p2_jama = p2 + scale_fill_jama()
grid.arrange(p1_jama, p2_jama, ncol = 2)
###JCO
p1_jco = p1 + scale_color_jco()
p2_jco = p2 + scale_fill_jco()
grid.arrange(p1_jco, p2_jco, ncol = 2)
###UCSCGB
p1_ucscgb = p1 + scale_color_ucscgb()
p2_ucscgb = p2 + scale_fill_ucscgb()
grid.arrange(p1_ucscgb, p2_ucscgb, ncol = 2)
###D3
p1_d3 = p1 + scale_color_d3()
p2_d3 = p2 + scale_fill_d3()
###LocusZoom
p1_locuszoom = p1 + scale_color_locuszoom()
p2_locuszoom = p2 + scale_fill_locuszoom()
grid.arrange(p1_locuszoom, p2_locuszoom, ncol = 2)
grid.arrange(p1_d3, p2_d3, ncol = 2)
###IGV
p1_igv_default = p1 + scale_color_igv()
p2_igv_default = p2 + scale_fill_igv()
grid.arrange(p1_igv_default, p2_igv_default, ncol = 2)
###UChicago
p1_uchicago = p1 + scale_color_uchicago()
p2_uchicago = p2 + scale_fill_uchicago()
grid.arrange(p1_uchicago, p2_uchicago, ncol = 2)
###Star Trek
p1_startrek = p1 + scale_color_startrek()
p2_startrek = p2 + scale_fill_startrek()
grid.arrange(p1_startrek, p2_startrek, ncol = 2)
###Tron Legacy
p1_tron = p1 + scale_color_tron() +
theme_dark() +
theme(panel.background = element_rect(fill = "#2D2D2D"),
legend.key = element_rect(fill = "#2D2D2D"))
p2_tron = p2 + theme_dark() + theme(
panel.background = element_rect(fill = "#2D2D2D")) +
scale_fill_tron()
grid.arrange(p1_tron, p2_tron, ncol = 2)
###Futurama
p1_futurama = p1 + scale_color_futurama()
p2_futurama = p2 + scale_fill_futurama()
grid.arrange(p1_futurama, p2_futurama, ncol = 2)
###Rick and Morty
p1_rickandmorty = p1 + scale_color_rickandmorty()
p2_rickandmorty = p2 + scale_fill_rickandmorty()
grid.arrange(p1_rickandmorty, p2_rickandmorty, ncol = 2)
###The Simpsons
p1_simpsons = p1 + scale_color_simpsons()
p2_simpsons = p2 + scale_fill_simpsons()
grid.arrange(p1_simpsons, p2_simpsons, ncol = 2)
###Continuous Color Palettes
library("reshape2")
data("mtcars")
cor = cor(unname(cbind(mtcars, mtcars, mtcars, mtcars)))
cor_melt = melt(cor)
p3 = ggplot(cor_melt,
aes(x = Var1, y = Var2, fill = value)) +
geom_tile(colour = "black", size = 0.3) +
theme_bw() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank())
###GSEA
p3_gsea = p3 + scale_fill_gsea()
p3_gsea_inv = p3 + scale_fill_gsea(reverse = TRUE)
grid.arrange(p3_gsea, p3_gsea_inv, ncol = 2)
|
8c045ebfaa842f527db247eeca32c098f0352995 | 813c0351a359a10f376b81b1f4ef22c2b047d660 | /man/nmfgpu4R.Rd | ac6b764d82b1f9889cba3a27b58b2d0b9b999028 | [] | no_license | cran/nmfgpu4R | d8db65b9abdaaa82c73cbbbfea663235ddb9c380 | b1bf243def94ab8c03fe8f5ace8c54fe25a4baf6 | refs/heads/master | 2021-01-21T14:44:03.878761 | 2016-10-17T12:41:21 | 2016-10-17T12:41:21 | 57,228,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 636 | rd | nmfgpu4R.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nmfgpu4R.R
\docType{package}
\name{nmfgpu4R}
\alias{nmfgpu4R}
\alias{nmfgpu4R-package}
\title{R binding for computing non-negative matrix factorizations using CUDA}
\description{
R binding for the libary \emph{nmfgpu} which can be used to compute Non-negative Matrix Factorizations (NMF) using CUDA hardware
acceleration.
}
\details{
The main function to use is \code{\link{nmf}} which can be configured using various arguments.
In addition to it a few helper functions are provided, but they aren't necessary for using \code{\link{nmf}}.
}
|
7042b998de69184ff9e41cd6bf99857982af067a | e04c0d423fde5be2567111b6983cc91e63c93232 | /R/create_job.R | 53aea25608de2275dd00ce5a50439f69f49ed609 | [] | no_license | RafiKurlansik/bricksteR | b42b3b3556ef3394b7e7801568a8e228083ad336 | 9199ab34dda462601186c25cf8655483f0bbe408 | refs/heads/master | 2022-10-28T14:35:21.875280 | 2022-10-06T15:36:30 | 2022-10-06T15:36:30 | 227,508,502 | 25 | 6 | null | 2021-07-15T11:59:22 | 2019-12-12T03:04:36 | R | UTF-8 | R | false | false | 6,021 | r | create_job.R | #' Create a new Job on Databricks
#'
#' This function will create a new job on Databricks, but will not run it. To
#' run a job, see \code{\link{run_job}} or \code{\link{runs_submit}}.
#'
#' The API endpoint for creating a job is '2.0/jobs/create'. For all details
#' on API calls please see the official documentation at
#' \url{https://docs.databricks.com/dev-tools/api/latest/}.
#'
#' @param name A string representing the name of the job. It is encouraged
#' to choose a unique name for each job.
#' @param notebook_path A string representing the path to a Databricks notebook in the
#' workspace.
#' @param file The path to a local .R or .Rmd file. Will be imported to the
#' workspace at the \emph{notebook_path}.
#' @param job_config A JSON formatted string or file specifying the details of the job, i.e., the
#' name, cluster spec, and so on.
#' @param workspace A string representing the web workspace of your Databricks
#' instance. E.g., "https://eastus2.azuredatabricks.net" or
#' "https://demo.cloud.databricks.com".
#' @param token A valid authentication token generated via User Settings in
#' Databricks or via the Databricks REST API 2.0. If none is provided, netrc will be used.
#' @param verbose If true, will pretty print the success or failure of the
#' request and add a `job_id` variable to the R environment. Defaults to TRUE.
#' @param ... additional arguments to be passed, i.e., overwrite = 'false' when
#' importing a file to run as a job.
#' @return A list with two elements - the complete API response and the job ID.
#' @examples
#' # Default JSON used
#' create_job(path = "/Shared/R/brickster_tutorial", # A notebook in the workspace
#' workspace = "https://dbc-z64b06b4-d212.cloud.databricks.com", # The workspace of your Databricks instance
#' token = "dapi30912309sdfdsa9iu09") # The valid auth token
#'
#' # Passing custom JSON
#' job_config <- '{"name": "New R Job",
#' "new_cluster": {
#' "spark_version": "7.3.x-scala2.12",
#' "node_type_id": "i3.xlarge",
#' "aws_attributes": {
#' "availability": "ON_DEMAND"
#' },
#' "num_workers": 2,
#' "email_notifications": {
#' "on_start": [],
#' "on_success": [],
#' "on_failure": []
#' },
#' "notebook_task": {
#' "notebook_path": "/Shared/R/brickster_tutorial"
#' }
#' }
#' }'
#'
#' # Specifying the path now unnecessary
#' create_job(job_config,
#' workspace = "https://dbc-z64b06b4-d212.cloud.databricks.com",
#' token = "dapi310240980a9dgqwebdsfadsf21")
create_job <- function(name = "R Job",
file = NULL,
notebook_path,
job_config = "default",
workspace,
token = NULL,
verbose = T,
...) {
# Import R file to workspace if needed
if (!is.null(file)) {
import_response <- import_to_workspace(file = file,
notebook_path = notebook_path,
overwrite = ...,
workspace = workspace,
token = token,
verbose = F)
# If import fails, exit
if (import_response$status_code[1] != 200) {
return(message(paste0(
"Unable to import file. Please check the response code:\n\n",
jsonlite::prettify(import_response)
)))
}
}
# Check for job config in JSON file
if (file.exists(job_config)) {
job_config <- toJSON(fromJSON(job_config), auto_unbox = T)
}
# Default small cluster spec
if (job_config == "default") {
job_config <- paste0('{
"name": "', name, '",
"new_cluster": {
"spark_version": "7.3.x-scala2.12",
"node_type_id": "i3.xlarge",
"num_workers": 2
},
"email_notifications": {
"on_start": [],
"on_success": [],
"on_failure": []
},
"notebook_task": {
"notebook_path": "', notebook_path, '"
}
}')
}
# Make request, using netrc by default
if (is.null(token)) {
use_netrc <- httr::config(netrc = 1)
res <- httr::with_config(use_netrc, {
httr::POST(url = paste0(workspace, "/api/2.0/jobs/create"),
httr::content_type_json(),
body = job_config)})
}
else {
# Authenticate with token
headers <- c(
Authorization = paste("Bearer", token)
)
# Using token for authentication instead of netrc
res <- httr::POST(url = paste0(workspace, "/api/2.0/jobs/create"),
httr::add_headers(.headers = headers),
httr::content_type_json(),
body = job_config)
}
# Handling successful API request
if (res$status_code[1] == 200) {
job_id <- jsonlite::fromJSON(rawToChar(res$content))[[1]]
if (verbose == T) {
message(paste0(
"Status: ", res$status_code[1],
"\nJob \"", name, "\" created.",
"\nJob ID: ", job_id
))
}
}
# Handling unsuccesful request
else {
job_id <- NA
if (verbose == T) {
message(paste0(
"Status: ", res$status_code[1],
"\nThe request was not successful:\n\n", jsonlite::prettify(res)
))
}
}
# Return response
reslist <- list(response = res,
job_id = job_id)
}
|
99e0a670e0207251a4b0ed0ed568b527b034bf34 | dfd5e44293393f83019bb905bb96cb1d6353903a | /R/02_combined-driver-effect-plot.R | 1b5c18c1ffb524b32c62e8e56ddd477bbbad00c2 | [] | no_license | jdunic/local-marine-meta | efe61516e0492d0bfa9532e76b256b639aa92689 | b5ba23d901f8d3e785a35b60dbe93f40966d1434 | refs/heads/master | 2021-03-19T09:33:43.926716 | 2019-02-20T22:00:56 | 2019-02-20T22:00:56 | 43,651,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,509 | r | 02_combined-driver-effect-plot.R | #Libraries
library(tidyverse)
library(ggplot2)
library(metafor)
#the data
source('00_functions.R')
fl_combined <- readr::read_csv("../Data_outputs/fl_combined.csv") %>%
mutate(Study.ID = factor(Study.ID)) %>%
# This study was a duplicate
filter(Study.ID != 'Shimanaga') %>%
# Keller study
filter(Study.ID != '172') %>%
# Study 136 - Enfermeria should have been classified as having an event -
# 'shrimp farming' and 'tidal restriction'
filter(Site != 'Enfermeria')
no_event2 <- filter(fl_combined, Event != 'Yes')
#The model
drivers_unscaled <-
rma.mv(yi = yi_SppR_ROM, V = vi_SppR_ROM,
data = no_event2, #%>% mutate(scaled_invs = mean_invs * 10^-3),
random = ~ 1 | Study.ID,
mods = ~ Duration * (mean_invs + sliced_ltc + mean_nuts))
#Now setup conditions for predictions
temp = c(-1, -0.5, -0.01, 0.01, 0.5, 1)
invs = seq(from = 0, to = 160000, by = 1000)
duration = as.vector(c(5, 10,15, 20), mode = 'integer')
nuts = seq(from = 0, to = 200, by = 2)
#make the predictions
prediction_df <-
crossing(duration, invs, temp, nuts) %>%
mutate(invs_dur = invs*duration, temp_dur = temp*duration, nuts_dur = nuts*duration)
prediction_mat <- (as.matrix(prediction_df))
dimnames(prediction_mat) <- NULL
#format for plotting
g_preds_raw <-
bind_cols(prediction_df,
predict.rma(object = drivers_unscaled, newmods = prediction_mat) %>%
as_data_frame())
beepr::beep()
g_preds <- g_preds_raw %>%
mutate(change = case_when(ci.ub < 0 ~ 'Loss', ci.lb > 0 ~ 'Gain', ci.lb < 0 & ci.ub > 0 ~ 'No change')) %>%
mutate(change = factor(change, level = c('Gain', 'No change', 'Loss'))) %>%
mutate(duration = stringr::str_c(duration, "years", sep=" ")) %>%
mutate(duration = factor(duration, levels = c('5 years', '10 years',
'15 years', '20 years')))
# %>%
#
# mutate(nuts_factor = case_when(nuts == nut_quantiles$`0%` ~ 'low (0)',
# nuts == nut_quantiles$`50%` ~ 'median (0.46)',
# nuts == nut_quantiles$`100%` ~ 'max (185)')) %>%
# mutate(temp = case_when(temp <= -0.5 ~ '< -0.5',
# temp > -0.5 & temp < 0 ~ '-0.5 to 0',
# temp > 0 & temp < 0.5 ~ '0 to -0.5',
# temp >= 0.5 ~ '> 0.5')) %>%
# mutate(temp = factor(temp, levels = c('< -0.5',
# '-0.5 to 0',
# '0 to -0.5',
# '> 0.5'))) %>%
# mutate(duration = case_when(duration == 5 ~ '5 years',
# duration == 20 ~ '20 years')) %>%
#Plot!
dev.new(width = 8.5, height = 5.5)
ggplot() +
theme(legend.background = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
plot.background = element_blank(),
strip.text.y = element_text(angle = 0),
axis.text.x = element_text(angle = 45, hjust = 1),
plot.title = element_text(hjust = 0.5, size = 10)) +
geom_raster(data = filter(g_preds), aes(x = invs, y = nuts,
fill = change, alpha=abs(pred)),
interpolate=TRUE) +
scale_fill_manual(values = c('#0571b0', 'grey90', '#ca0020'),
guide = guide_legend("Direction of\nrichness change")) +
scale_alpha(guide = guide_legend("Absolute\nmagnitude\n(LRR)")) +
facet_grid(duration ~ temp) +
xlab('\n Invasion potential') +
ylab('Nutrient use\n') +
labs(colour = 'LRR') +
guides(colour = guide_legend(override.aes = list(size = 3))) +
ggtitle(expression(" Temperature change ("*degree*"C)"))
grid.text('Figure 3', hjust = 6.75, vjust = 22.5)
beepr::beep()
dev.copy2pdf(file = '../figures/Figure_3.pdf', width = 8.5, height = 5.5)
####### marignal effects
library(modelr)
make_marg_data <- function(avar){
avar_x <- enquo(avar)
name_x <- quo_name(avar_x)
print(avar_x)
print(name_x)
# vars <- quos(mean_invs, sliced_ltc, mean_nuts)
dat <- no_event2 %>% data_grid(
Duration = seq_range(Duration, 3),
!!name_x := seq_range(!!avar_x, 200))
vars <- c("mean_invs", "sliced_ltc", "mean_nuts")
vars <- vars[vars!=name_x]
values <- map(vars, ~median(no_event2[[.]], na.rm=T))
names(values) <- vars
cbind(dat, values, list(var = name_x))
}
marg_data_frame <- bind_rows(
make_marg_data(mean_invs),
make_marg_data(sliced_ltc),
make_marg_data(mean_nuts)
)
pred_frame <- as_tibble(predict(drivers_unscaled, newdata = marg_data_frame))
######## A little animation
library(gganimate)
theme_set( theme_bw(base_size=17) +
theme(legend.background = element_blank(),
legend.key = element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
plot.background = element_blank(),
strip.text.y = element_text(angle = 0),
axis.text.x = element_text(angle = 45, hjust = 1)))
#make the predictions
surface_df <- no_event2 %>%
data_grid(duration = 20,
invs = seq_range(invs, 101),
temp = seq(-1,1,length.out = 101),
nuts = seq_range(nuts, 101))
crossing(duration, invs, temp, nuts)
surface_mat <- surface_df %>%
mutate(invs_dur = invs*duration, temp_dur = temp*duration, nuts_dur = nuts*duration) %>%
as.matrix()
dimnames(surface_mat) <- NULL
#format for plotting
g_surface <-
bind_cols(surface_df,
predict.rma(object = drivers_unscaled, newmods = surface_mat) %>%
as_tibble()) %>%
mutate(change = case_when(ci.ub < 0 ~ 'Loss', ci.lb > 0 ~ 'Gain', ci.lb < 0 & ci.ub > 0 ~ 'No change')) %>%
mutate(change = factor(change, level = c('Gain', 'No change', 'Loss')))
beepr::beep()
g_surface <- g_surface %>% group_by(nuts, invs) %>%
slice(1L) %>%
ungroup %>%
select(nuts, invs) %>%
mutate(pointgroup = 1:n()) %>%
right_join(g_surface)
saveRDS(g_surface, file = "../Data_outputs/g_surface.Rds")
#g_surface <- readRDS("../Data_outputs/g_surface.Rds")
anim <- ggplot(g_surface %>% mutate(temp = factor(round(temp,2))),
aes(x = invs, y = nuts, color = change,
fill = change, alpha=abs(pred),
group = pointgroup)) +
geom_raster(interpolate=TRUE) +
scale_fill_manual(values = c('#0571b0', 'grey90', '#ca0020'),
guide = guide_legend("Direction of\nRichness Change")) +
scale_alpha(guide = guide_legend("Absolute\nMagnitude\n(LRR)")) +
xlab('\n Invasion potential\n(Metric tonnes cargo in 2011') +
ylab('Nutrient use\n(Metric tonnes N and P fertilizer from 2007-2011\n') +
labs(colour = 'LRR') +
guides(colour = guide_legend(override.aes = list(size = 3))) +
transition_states(temp)+
enter_fade() +
exit_fade() +
ggtitle("Temperature Change: {closest_state} deg C per decade")
animate(anim, width = 700, height = 500, nframes=250)
anim_save("../figures/surface_anim.gif")
beepr::beep()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.