blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8b15041ab6c400e4df6ebb30f6a9f7b55534e54
|
2221162d446fe0c9569e1a9c78c2e1e3d60fe98f
|
/Step3_Process_Copernicus_LAI_for_all_sites.R
|
06e5af8542f4dd79333cb1255561a27855b3b543
|
[] |
no_license
|
aukkola/PLUMBER2
|
f55a6e91a54b511c1efe457d4f6067c9b557cd20
|
93885013c48d4ae59f933fb253e9aa9ed8fad1fe
|
refs/heads/master
| 2022-10-26T15:13:27.535291
| 2022-10-18T22:12:47
| 2022-10-18T22:12:47
| 173,685,168
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,487
|
r
|
Step3_Process_Copernicus_LAI_for_all_sites.R
|
library(ncdf4)
library(raster)
library(zoo)
library(parallel)
#clear R environment
rm(list=ls(all=TRUE))
#Set path (where site LAI data lives)
path <- "/srv/ccrc/data04/z3509830/Fluxnet_data/All_flux_sites_processed/"
#Which fluxnet data to use?
flx_path <- "/srv/ccrc/data04/z3509830/Fluxnet_data/All_flux_sites_processed_PLUMBER2/"
#Source functions
source(paste0(flx_path, "/scripts/functions/sample_raster_NA.R"))
source(paste0(flx_path, "/scripts/functions/get_site_lai_for_year.R"))
#Create output folder for met files with processed LAI time series
outdir <- paste0(flx_path, "/all_sites_no_duplicates/Nc_files/Met_with_LAI/")
#Get sites
site_files <- list.files(outdir, full.names=TRUE)
#Open file handles
site_nc <- lapply(site_files, nc_open, write=TRUE)
#Get site codes
site_codes <- sapply(site_nc, function(x) ncatt_get(x, varid=0, "site_code")$value)
### Get LAI data ###
#Set LAI path
lai_path <- "/srv/ccrc/data51/z3466821/LAI/M0040186/processed/Monthly"
#Get files
lai_files <- list.files(lai_path, pattern="LAI_monthlymax_", full.names=TRUE)
#Should span 1999-2017
if(length(lai_files) != 19) stop("incorrect LAI files found")
#Set copernicus start and end year
copernicus_startyr <- 1999
copernicus_endyr <- 2017
lai_outdir <- paste0(path, "/Copernicus_LAI_time_series/")
#Get site values for each time slice at a time
#(very slow to read in all the LAI data in one go)
#Initialise
site_tseries <- list()
#Get site coordinates
lat <- lapply(site_nc, ncvar_get, varid="latitude")
lon <- lapply(site_nc, ncvar_get, varid="longitude")
coords <- mapply(function(lon, lat) matrix(c(lon, lat), ncol=2),
lon=lon, lat=lat, SIMPLIFY=FALSE)
#"NO-Blv" is so far north, no Copernicus data available
#Loop through LAI time slices
for (y in 1:length(lai_files)) {
#Print progress
print(paste0("Processing year ", y, "/", length(lai_files)))
#LAI output file (save as slow to process)
outfile_lai <- paste0(lai_outdir, "/Copernicus_monthly_LAI_focal_mean_",
y+copernicus_startyr-1, ".nc")
#Check if file already processed
if (file.exists(outfile_lai)) {
lai_averaged <- brick(outfile_lai, varname="LAI")
#Else process and save to file
} else {
#Read LAI time slice
lai <- brick(lai_files[y])
#Average LAI so that each pixel represents the mean of itself
#and the neigbouring pixels
lai_averaged <- brick(lapply(1:nlayers(lai), function(x)
focal(lai[[x]], w=matrix(1,3,3), fun=mean)))
writeRaster(lai_averaged, outfile_lai, varname="LAI", overwrite=TRUE)
}
#Initialise parallel cores
cl <- makeCluster(getOption('cl.cores', 12))
clusterExport(cl, 'coords')
clusterExport(cl, 'lai_averaged')
clusterExport(cl, 'sample_raster_NA')
clusterExport(cl, 'get_site_lai_for_year')
clusterEvalQ(cl, library(raster))
#Initialise
if (y == 1) site_tseries <- lapply(1:length(site_nc), function(x) vector())
#Get values for the year for each site (quite slow so use parallel)
vals_for_year <- parLapply(cl, 1:length(site_nc), function(x) get_site_lai_for_year(coords[[x]], lai_averaged))
#Append year values
site_tseries <- lapply(1:length(site_tseries), function(x) append(site_tseries[[x]], vals_for_year[[x]]))
stopCluster(cl)
}
#Check that no missing LAI values
if (any(sapply(site_tseries, function(x) any(is.na(x))))) {
stop("Missing LAI values present")
}
##########################
### Loop through sites ###
##########################
for (s in 1:length(site_nc)) {
print(paste0("Processing site ", s, "/", length(site_nc)))
#Set time stamps (do inside loop as gets adjusted below)
lai_time <- seq.Date(from=as.Date("1999-01-01"), by="month",
length.out=length(site_tseries[[s]]))
#Smooth LAI time series with spline (and cap negative values)
smooth_lai_ts = tryCatch(smooth.spline(lai_time, site_tseries[[s]])$y,
error= function(e) NA)
if (all(is.na(smooth_lai_ts))) {
warning(paste0("could not process site", site_codes[s], ", missing values in LAI"))
next
}
smooth_lai_ts[smooth_lai_ts < 0] <- 0
##########################
### Create climatology ###
##########################
#Copernicus has 12 time steps per year
no_tsteps <- length(which(grepl(copernicus_startyr, lai_time)))
#Initialise
copernicus_clim <- vector(length=no_tsteps)
for (c in 1:no_tsteps) {
#Indices for whole years
inds <- seq(c, by=no_tsteps, length.out=length(lai_time)/no_tsteps)
#Calculate average for time step
copernicus_clim[c] <- mean(smooth_lai_ts[inds])
}
###################################
### Calculate running anomalies ###
###################################
#Initialise
lai_clim_anomalies <- rep(NA, length(lai_time))
#Repeat climatology for whole time series
copernicus_clim_all <- rep_len(copernicus_clim, length(lai_time))
#Calculate running mean anomaly (+/- 6 months either side of each time step)
anomaly <- rollmean(smooth_lai_ts - copernicus_clim_all, k=12, fill=NA)
#Add rolling mean anomaly to climatology
lai_clim_anomalies <- copernicus_clim_all + anomaly
#Check if remaining NA values from missing time steps, gapfill if found
if (any(is.na(lai_clim_anomalies))) {
#Find missing values
missing <- which(is.na(lai_clim_anomalies))
#Repeat climatology for all years and gapfill time series
clim_all_yrs <- rep(copernicus_clim, floor(length(lai_time)/no_tsteps))
lai_clim_anomalies[missing] <- clim_all_yrs[missing]
}
###################################
### Match with site time series ###
###################################
#Each year has 12 time steps
#Get timing info for site
site_start_time <- ncatt_get(site_nc[[s]], "time")$units
site_time <- ncvar_get(site_nc[[s]], "time")
site_tstep_size <- 86400 / (site_time[2] - site_time[1])
#Extract year
startyr <- as.numeric(substr(site_start_time, start=15, stop=18))
obs_length <- length(site_time)
nyr <- round(obs_length/(site_tstep_size*365))
endyr <- startyr + nyr - 1
#Add climatological values to smoothed lai time series
#Overwrite original time series with new extended data
#If start year earlier than Copernicus
if (startyr < copernicus_startyr) {
#LAI data
lai_clim_anomalies <- append(rep(copernicus_clim, copernicus_startyr - startyr),
lai_clim_anomalies)
#Time vector
lai_time <- append(seq.Date(as.Date(paste0(startyr, "-01-01")), by="month",
length.out=no_tsteps * (copernicus_startyr - startyr)),
lai_time)
}
#If end year later than Copernicus
if (endyr > copernicus_endyr) {
#LAI data
lai_clim_anomalies <- append(smooth_lai_ts,
rep(lai_clim_anomalies, endyr - copernicus_endyr))
#Time vector
lai_time <- append(lai_time,
seq.Date(as.Date(paste0(copernicus_endyr+1, "-01-01")), by="month",
length.out=no_tsteps * (endyr - copernicus_endyr)))
}
#Find modis time step corresponding to site start time
start_ind <- which(lai_time == paste0(startyr, "-01-01"))
end_ind <- tail(which(grepl(endyr, lai_time)), 1) #Last index of end year
#Extract MODIS time steps matching site
copernicus_ts_for_site <- lai_clim_anomalies[start_ind:end_ind]
copernicus_time_for_site <- lai_time[start_ind:end_ind]
#Repeat Copernicus time series to create a time series matching site time step
copernicus_tseries <- vector()
#Loop through time steps
for (t in 1:length(copernicus_time_for_site)) {
#Last time step
if (t == length(copernicus_time_for_site)) {
#Use the number of time steps that ensures final time series matches the length of site data
copernicus_tseries <- append(copernicus_tseries, rep(copernicus_ts_for_site[t],
length(site_time) - length(copernicus_tseries)))
#All other time steps
} else {
time_diff <- copernicus_time_for_site[t+1] - copernicus_time_for_site[t]
#Repeat each days estimate by the number of days and time steps per day
copernicus_tseries <- append(copernicus_tseries, rep(copernicus_ts_for_site[t],
as.numeric(time_diff * site_tstep_size)))
}
}
#Check that the number of time steps match
if (length(copernicus_tseries) != length(site_time)) stop("MODIS and site time steps don't match")
######################################
### Add LAI time series to NC file ###
######################################
#Save to file
# Define variable:
laivar = ncvar_def('LAI_Copernicus', '-', list(site_nc[[s]]$dim[[1]], site_nc[[s]]$dim[[2]], site_nc[[s]]$dim[[3]]),
missval=-9999, longname='Copernicus Global Land Service leaf area index')
# Add variable and then variable data:
site_nc[[s]] = ncvar_add(site_nc[[s]], laivar)
ncvar_put(site_nc[[s]], 'LAI_Copernicus', copernicus_tseries)
#Close file handle
nc_close(site_nc[[s]])
}
|
96c2dce7de3a53f60c489acebc742141b820518a
|
80ee145d21975068bd722749697382db78575471
|
/inst/plot.yrmax.R
|
94d2877e1bb85d2b3d0bdd0949400b52e3dccf6e
|
[] |
no_license
|
jukent/climod
|
9d5f28afd4b7b083792e6bf872c375c5f8d5d2b8
|
203d57875c38a57e8e3b8ad01ce9d692506d3e44
|
refs/heads/master
| 2021-11-23T20:55:40.727516
| 2021-11-16T19:24:23
| 2021-11-16T19:24:23
| 193,576,176
| 0
| 0
| null | 2021-11-16T22:15:20
| 2019-06-24T20:35:09
|
R
|
UTF-8
|
R
| false
| false
| 971
|
r
|
plot.yrmax.R
|
#library(climod)
library(devtools)
#load_all("climod")
load_all("~/climod")
## Call as: Rscript plot.yrmax.R label save png var
args <- c("prec rcp85 HadGEM2-ES WRF ftlogan",
"v1.lof.bc/dmaps/prec.rcp85.HadGEM2-ES.WRF.ftlogan.Rdata",
"test.yrmax.png",
"prec")
## Comment out this line for testing
args <- commandArgs(trailingOnly=TRUE)
label <- args[1]
load(args[2])
outfile <- args[3]
v <- args[4]
png(outfile, units="in", res=120, width=7, height=7)
data <- lapply(annmax, `[[`, v)
rng <- range(unlist(data), na.rm=TRUE)
xlab <- paste("raw", v, paste0("(",units,")"))
ylab <- paste("BC", v, paste0("(",units,")"))
plot(0,0, xlim=rng, ylim=rng, type="n", xlab=xlab, ylab=ylab, main=label)
abline(0,1)
points(data$rawfut, data$fixfut, col="red")
points(data$rawcur, data$fixcur, col="blue")
points(data$obs, data$obs)
legend("bottomright", c("obs","cur","fut"),
col=c("black","blue","red"), pch=1)
dev.off()
|
2de27b10fb884ea9b01beec2e23fcc37d56236db
|
14f49deb33be70bae1235ddc76d892036d4d7f02
|
/man/calc_genoprob.Rd
|
dbdb8fad58cc200343ac2187cd8dc504df95a73a
|
[] |
no_license
|
lian0090/F2imputation
|
cbdf15103393523f6d34eb2ae9428c83e46252eb
|
0d7f06e14bb1bd1add345bb0ac719e107280510e
|
refs/heads/master
| 2020-04-19T00:05:07.714085
| 2015-05-09T17:37:26
| 2015-05-09T17:37:26
| 35,248,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
rd
|
calc_genoprob.Rd
|
\name{calc_genoprob}
\alias{calc_genoprob}
\title{
calculate genotype probabilities
}
\description{
calculate genotype probabilities
}
\usage{
calc_genoprob(l_geno, r_geno, recombinations = c("l", "r", "total"), genotypes = c(1, 2, 3),...)
}
\arguments{
\item{l_geno}{
left genotype
}
\item{r_geno}{
right genotype
}
\item{recombinations}{
recombination rates
}
\item{genotypes}{
genotypes symbols 1,2,3
}
\item{...}{unused}
}
\author{
Lian Lian
}
\details{coming}
\examples{print("yes")}
|
c692d3b3d7191f666c52df9f2d44c41212a16978
|
3d055e533cb367cc396d278506cf6517165e2941
|
/global.R
|
79bec4f691d3ff55d13d6f968349d7cc93484671
|
[
"MIT"
] |
permissive
|
dKvale/forecast_submit
|
ed2e758ffa1c2130464a5803b7bb90103932d1f9
|
4a9ba34822d903b7b39d2d743902d8946ab351c2
|
refs/heads/main
| 2021-11-12T21:51:59.643242
| 2017-05-28T20:24:30
| 2017-05-28T20:24:30
| 77,480,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,478
|
r
|
global.R
|
# library(rsconnect)
# deployApp()
aqi_cities <- c("Minneapolis - St. Paul",
"Northern Metro",
"Southern Metro",
"Brainerd",
"Bemidji",
"Detroit Lakes",
"Duluth",
"Ely",
"Fargo - Moorhead",
"Fond du Lac - Cloquet",
"Grand Forks",
"Grand Portage",
"Hibbing - Virginia",
"International Falls",
"Mankato",
"Marshall",
"Red Lake Nation",
"Rochester",
"Saint Cloud")
cities <- read.csv("data-raw//city_locations.csv", stringsAsFactors = F)
cities <- subset(cities, City %in% aqi_cities)
city_list <- cities$city
aqi_colors <- c("#FFF", # White
"#9BF59B",
"#ffff00",
"#ff7e00",
"#ff0000",
"#99004c")
## prepare the OAuth token and set up the target sheet:
## - do this interactively
## - do this EXACTLY ONCE
#library(googlesheets)
#shiny_token <- gs_auth() # authenticate w/ your desired Google identity here
#saveRDS(shiny_token, "shiny_app_token.rds")
## if you version control your app, don't forget to ignore the token file!
## e.g., put it into .gitignore
#sheets <- gs_ls()
#ss <- gs_title("forecast_table.csv")
googlesheets::gs_auth(token = "shiny_app_token.rds")
#ss <- googlesheets::gs_title("forecast_table")
#gs_delete(ss)
#gs_ls()
# Default table ----------------------------#
#base_tbl <- tibble(date = "2011-01-01",
# city = "Minneapolis - St. Paul",
# today = 0,
# day1 = 0,
# day2 = 0,
# day3 = 0,
# day4 = 0,
# day5 = 0,
# description = "Today, there will be no wind. We are terribly sorry.")
#write.csv(base_tbl, "forecast_table.csv", row.names = F)
# Load base table ----------------------------#
#gs_upload("forecast_table.csv")
ss <- googlesheets::gs_title("forecast_table")
#gs_edit_cells(ss, input = "2011-01-01", anchor = "A2")
reset_content <- googlesheets::gs_read(ss, col_types = paste0(rep("c", 9), collapse = ""))
#write.csv(reset_content, "reset_content.csv", row.names = F)
# Assign colors in JS
#rowCallback = JS('function(nRow, aData, iDisplayIndex, iDisplayIndexFull) {
# if (parseFloat(aData[3]) > 0)
# $("td:eq(3)", nRow).css("background-color", "#9BF59B");
# if (parseFloat(aData[3]) > 50)
# $("td:eq(3)", nRow).css("background-color", "#ffff00");
# if (parseFloat(aData[3]) > 100)
# $("td:eq(3)", nRow).css("background-color", "#ff7e00");
# if (parseFloat(aData[3]) > 150)
# $("td:eq(3)", nRow).css("background-color", "#ff0000");
# if (parseFloat(aData[3]) > 200)
# $("td:eq(3)", nRow).css("background-color", "#99004c");
# if (parseFloat(aData[3]) > 100)
# $("td:eq(3)", nRow).css("font-weight", "bold");
# }'))
|
92e50f352dc5c1e6fc292a6d0a25febbf7b84c58
|
66e157ab0e5a83d28aaeb46cf66faca445c4f304
|
/Exploratory_Data_Analysis/plot3.R
|
9dcec057e067507f8a85744f230f2691d8a3e59c
|
[] |
no_license
|
Jacobo-Arias/DataScienceCoursera
|
518a0d6b4233eae7befadcb88eb75380ce175f26
|
0d4b480d1247f88c81aa180b29d522d29e484440
|
refs/heads/master
| 2023-01-03T23:36:33.038610
| 2020-10-31T15:06:59
| 2020-10-31T15:06:59
| 276,781,524
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
r
|
plot3.R
|
library(lubridate)
library(dplyr)
data <- read.table("household_power_consumption.txt", header = T, sep = ";", colClasses = "character")
data$Date <- dmy((data$Date))
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
dates <- c(ymd("2007-02-01", ymd("2007-02-02")))
data <- data %>% filter(Date == dates[1] | Date == dates[2])
plot(data$Sub_metering_1, type = 'l', axes = FALSE,
ylab = "Energy sub metering", xlab = '')
lines(data$Sub_metering_2, type = 'l', col = "red")
lines(data$Sub_metering_3, type = 'l', col = "blue")
axis(1, at = c(0, 1400, 2900), labels = c("Thu", "Fri", "Sat"))
axis(2,at = c(0, 10, 20, 30), labels = c(0, 10, 20, 30))
box()
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1, col = c("black", "red", "blue"))
dev.copy(png, "plot3.png")
dev.off()
|
332fb3a62a4f42aa76dfa51b58c76273bb839fd8
|
1997cbc1503b3375594d1517b411d35fe12cbc5d
|
/plots.R
|
9fa8283b2417a6d55794064da429a9dea14122de
|
[] |
no_license
|
toyin54/R_Projects
|
a297ac9f64b778c09585ad932118dd311548407e
|
9b157cfcd9a0c8a105e587cc28b12882aa5d5f96
|
refs/heads/main
| 2023-07-15T19:42:42.176790
| 2021-08-26T19:07:10
| 2021-08-26T19:07:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 609
|
r
|
plots.R
|
library(dslabs)
data(olive)
head(olive)
olive$palmitic
plot(olive$palmitic ,olive$palmitoleic)
boxplot(palmitic ~ region, data = olive)
boxplot(palmitoleic ~ region , data = olive)
library(dplyr)
library(ggplot2)
library(dslabs)
data(gapminder)
gapminder %>%
mutate(dollars_per_day = gdp/population/365) %>%
filter(continent == "Africa" & year %in% c(1970, 2010) & !is.na(dollars_per_day) & !is.na(infant_mortality)) %>%
ggplot(aes(dollars_per_day, infant_mortality, color = region, label = country)) +
geom_text() +
scale_x_continuous(trans = "log2") +
facet_grid(year~.)
|
6cc99660b2f7a8ccf9f6afa84f8e18009d02a906
|
8422cc78e7e377f708eda8f1ce1bdbc0be5f0503
|
/man/insertSuddenDeath.Rd
|
5b41d9a63d65ade230ded8500aabd74c7833d128
|
[] |
no_license
|
yutannihilation/TotsuzenOfDeathAddin
|
28f621d947f2793b28df81a004ec4ddd061ea207
|
22d347332f92927aec24714d4f2ab6470164cf11
|
refs/heads/master
| 2021-01-13T15:51:38.057969
| 2016-12-19T15:22:04
| 2016-12-19T15:22:04
| 76,861,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 248
|
rd
|
insertSuddenDeath.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insertSuddenDeath.R
\name{insertSuddenDeath}
\alias{insertSuddenDeath}
\title{Insert Totuzen No Shi}
\usage{
insertSuddenDeath()
}
\description{
Insert Totuzen No Shi
}
|
91e45f5f10962bea3963932a693035964cd45e56
|
bd7fed9e0370e70f4fc625e0fe45578070ec417a
|
/stats/correlacao.R
|
b413906fee80553cec06aec13ca117a7637e2b3e
|
[] |
no_license
|
caiosainvallio/r
|
56773766794b2e5a85f90e383a8b4190529a9999
|
e82a03459cc9a5fe7f503a5c7dc3037e0d650c83
|
refs/heads/master
| 2023-03-15T00:44:43.407493
| 2021-03-25T20:45:39
| 2021-03-25T20:45:39
| 266,214,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,738
|
r
|
correlacao.R
|
# Correlação
library(tidyverse)
ggplot(data.frame(x = c(-4, 4)), aes(x)) +
mapply(function(mean, sd, col) {
stat_function(fun = dnorm,
args = list(mean = mean,
sd = sd),
aes(col = col), size = 3)
},
mean = rep(0, 3),
sd = c(1, .5, 2),
col = c("1", "0.5", "2")) +
scale_colour_brewer("Desvio\nPadrão", palette = "Set1",
guide = guide_legend(ncol = 1,
nrow = 3,
byrow = TRUE))
# A correlação mensura o quanto de variação em unidades de desvio padrão duas variáveis estão associadas. Por exemplo, uma
# correlação de +0.8 indica que conforme X varia 1 desvio padrão, observa-se uma variação de 0.8 desvio padrão em Y e vice-versa
correlação <- seq(-1, 1, 0.2)
x <- 1:50
y <- rnorm(50, sd = 10)
complemento <- function(y, correlação, x) {
y.perp <- residuals(lm(x ~ y))
correlação * sd(y.perp) * y + y.perp * sd(y) * sqrt(1 - correlação^2)
}
X <- data.frame(z = as.vector(sapply(correlação,
function(correlação) complemento(y, correlação, x))),
correlação = ordered(rep(signif(correlação, 2),
each = length(y))),
y = rep(y, length(correlação)))
ggplot(X, aes(y, z, group = correlação)) +
geom_rug(sides = "b") +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", color = "Red", se = FALSE) +
facet_wrap(~ correlação, scales = "free", labeller = "label_both", ncol = 4) +
theme(legend.position = "none")
# Pessupostos da Correlação --------------------------------------------------------------------------------------------------------
# Correlação deve ser aplicada somente em variáveis contínuas, intervalares ou ordinais. Correlação não podem ser usadas para
# variáveis nominais (também chamada de categóricas).
# A lógica por trás desse pressuposto é que magnitudes de associação somente podem ser mensuradas em variáveis que de alguma
# maneira sejam “mensuráveis e comparáveis numericamente” entre si.
# As variáveis tem que possuir um critério de linearidade entre elas. Isto quer dizer que quanto mais/menos de x mais/menos de y.
# Tipos de correlação --------------------------------------------------------------------------------------------------------------
## Correlação de Pearson:
# a correlação de Pearson é a correlação mais utilizada em análises estatísticas. Ela é uma técnica paramétrica e possui
# o pressuposto de que ambas as variáveis são distribuídas conforme uma distribuição Normal. Caso tenha dados que violem o
# pressuposto da normalidade, correlação de Pearson não é o tipo de correlação que você deva usar.
## Correlação de Spearman:
# a correlação de Spearman é uma técnica não-paramétrica, sendo a alternativa quando os dados violam o pressuposto de
# normalidade, pois não faz nenhuma suposição que os dados estejam distribuídos de acordo com uma distribuição específica.
## Correlação de Kendall:
# Assim, como a correlação de Spearman, a correlação de Kendall também é uma técnica não-paramétrica. Sendo também uma
# alternativa viável quando os dados violam o pressuposto de normalidade, pois não faz nenhuma suposição que os dados
# estejam distribuídos de acordo com uma distribuição específica.
# Quando usar Kendall ou Spearman? ------------------------------------------------------------------------------------------------
# Ambas devem ser usadas quando o pressuposto de normalidade para ambas ou qualquer uma das variáveis que estão sendo
# correlacionadas for violado.
# Sugerimos usar a correlação de Kendall, especialmente quando estamos tratando de amostras pequenas (n<100).
# Porém, há alguns cenários que a correlação de Spearman é melhor indicada: “Se a variável ordinal, Y, tem um grande número
# de níveis (digamos, cinco ou seis ou mais), então pode-se usar o coeficiente de correlação de classificação de Spearman
# para medir a força da associação entre X e Y”
# Como mensurar? -------------------------------------------------------------------------------------------------------------------
library(mnormt) # biblioteca para gerar distribuições multivariadas Normais e não-Normais.
medias <- c(0, 10)
covariancias <- matrix(c(1, 0.6, 0.6, 1), 2, 2)
mv_normal <- as.data.frame(rmnorm(50, medias, covariancias))
mv_student <- as.data.frame(rmt(50, medias, covariancias, df = 1))
# testar prtessuposto de normalidade
# H0 do teste de Shapiro-Wilk é de que “os dados são distribuídos conforme uma distribuição Normal.”
shapiro.test(mv_normal$V1)
shapiro.test(mv_normal$V2)
# ambos os testes para as distribuições multivariadas Normais geram p-valores acima de 0.05 o que faz com que
# falhemos em rejeitar a hipótese nula de que “os dados são distribuídos conforme uma distribuição Normal.”
shapiro.test(mv_student$V1)
shapiro.test(mv_student$V2)
# ambos os testes para as distribuições multivariadas t de Student geram p-valores abaixo de 0.05 o que faz com que a
# hipótese nula de que “os dados são distribuídos conforme uma distribuição Normal” é rejeitada.
# Teste de correlação -------------------------------------------------------------------------------------------------------------
# Além de computarmos o valor da correlação entre duas variáveis, é possível também realizar um teste estatístico de
# hipótese nula sobre a correlação de duas variáveis.
# A hipótese nula H0 nesse caso é de que “as variáveis possuem correlação igual a zero”.
# method = "pearson" – Correlação de Pearson.
# method = "spearman" – Correlação de Spearman.
# method = "kendall" – Correlação de Kendall.
# Pearson ------------------------------------------------------------------------------------------------------------------------
# Paramétrico
cor(mv_normal$V1, mv_normal$V2, method = "pearson")
cor.test(mv_normal$V1, mv_normal$V2, method = "pearson")
# Spearman -----------------------------------------------------------------------------------------------------------------------
# Não paramétrico
cor(mv_student$V1, mv_student$V2, method = "spearman")
cor.test(mv_student$V1, mv_student$V2, method = "spearman")
# Kendall ------------------------------------------------------------------------------------------------------------------------
# Não paramétrico
cor(mv_student$V1, mv_student$V2, method = "kendall")
cor.test(mv_student$V1, mv_student$V2, method = "kendall")
|
9939166b51906a34218cf8b836cb699ccb6cf4df
|
238e8cfab127d1bbfe9d75d976cf4b610d5a6fcb
|
/exampleApps/bp_skeleton/app.R
|
0cd807ea10e7f653478d6608da1f0f1a0c95b0a5
|
[] |
no_license
|
lsw5077/shiny_workshop
|
6eda195b4d6bb9dc1fd2605db5a5d33b2df5e990
|
d0f08dd02133a3a900a216eadad94a650aa731f1
|
refs/heads/main
| 2023-03-10T17:12:01.139122
| 2021-02-22T19:59:05
| 2021-02-22T19:59:05
| 324,008,199
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,103
|
r
|
app.R
|
# This app skeleton shows best practices for shiny.
# The reigning logic is: Do things once.
# Load libraries
library(shiny)
# Load data:
## datasets to display
## supporting datasets like palettes and options
## If you were loading an R data file,
## remembering that we use
## relative paths
##it would look like:
# data <- readRDS("data/data.rds")
# Source functions, remembering that we use
# relative paths
source("bpFunctions.R")
# Define UI
ui <- fluidPage(
# Define UI, remembering to use accessible interfaces
# color-blind safe colors
# screen-readable text, equations
# tool tips for data.
# acknowledgements page for citations and funding
)
# Define server logic
server <- function(input, output) {
# compartmentalize server for ready debugging.
# process data based on user inputs
data <- reactive({
})
# Make outputs based on data processed per
# user inputs
output$plot <- renderPlot({
data <- data()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
eb46985df34f203c17c404dd07d2830c2d5628b2
|
74797c25ebc6f06fa01b608cb1ed6ce2574d3083
|
/dataforgraph1.R
|
22746f79a5cbe1c71fc46740fd4fefc3e567274e
|
[] |
no_license
|
anhnguyendepocen/Rgraphs
|
c533196de8e5d34a925bea11bd7c6b57d85d52b9
|
e306d8ccf06a99193b494aa2d55b11c48d36a28a
|
refs/heads/master
| 2022-02-15T17:07:38.394802
| 2019-07-05T14:51:04
| 2019-07-05T14:51:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
dataforgraph1.R
|
# Data Import for Plots
#Case-1 : bigmart
fullurl="https://docs.google.com/spreadsheets/d/13tWJP6fcWNE7FObz4Cn_-_R0us7qIFialUy1m9nwBYY/edit#gid=356784553"
library(gsheet)
data1 = as.data.frame(gsheet2tbl(fullurl))
names(data1)
str(data1)
dim(data1)
#Case-1 : landdata
fullurl2="https://docs.google.com/spreadsheets/d/13tWJP6fcWNE7FObz4Cn_-_R0us7qIFialUy1m9nwBYY/edit#gid=971806452"
library(gsheet)
data2 = as.data.frame(gsheet2tbl(fullurl2))
names(data2)
str(data2)
dim(data2)
|
83d11a019ee68d7b940c35885fc54783a12c42c6
|
698d30bec746a9eee62a1718d21959032ad121ae
|
/cachematrix.R
|
66d036fe9b6be54914a66abdcfd32d19049edab7
|
[] |
no_license
|
szekendia/ProgrammingAssignment2
|
4303f6490bd3a4dff657ff631f5984dea66b268a
|
c16b52be0b3a49ad9e9b65bd6a6bbd6c75dfbd32
|
refs/heads/master
| 2021-01-21T20:52:50.804661
| 2015-05-22T16:05:59
| 2015-05-22T16:05:59
| 36,075,880
| 0
| 0
| null | 2015-05-22T14:14:47
| 2015-05-22T14:14:47
| null |
UTF-8
|
R
| false
| false
| 2,355
|
r
|
cachematrix.R
|
## Coursera : R-Programming : Programming assignment 2
## Github: https://github.com/szekendia/ProgrammingAssignment2
## 1. makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## Sample run:
## 0. mx <- matrix(c(1,3,2,4,6,7,4,8,12),3,3)
## makeCacheMatrix
## -------------------------------------
## 1. fmx <- makeCacheMatrix()
## 2. fmx$set(mx) ## cache the mx matrix
## 3. fmx$get() ## retrieve the matrix from cache
makeCacheMatrix <- function(cached_matrix = matrix()) {
## in: cached_matrix. Random, numeric, invertible(!) square matrix. Sample run: matrix(1:9,3,3)
cached_inv_matrix <- NULL
## Cache the mx matrix. Sample run: fmx$set()
## Reset invert matrix variable
f_set <- function(p_matrix) {
cached_matrix <<- p_matrix
cached_inv_matrix <<- NULL
}
## Retrieve cached matrix. Sample run: fmx$get()
f_get <- function() cached_matrix
## Cache the inverted cached matrix into global environment variable. Sample run: fmx$setsolve()
f_setsolve <- function(solve) cached_inv_matrix <<- solve
## in: solve. Cached, numeric, inverse square matrix. Called from function cacheSolve: f$setsolve(inv_matrix)
## Retrieve the cached, inverse matrix. Sample run: fmx$getsolve()
f_getsolve <- function() cached_inv_matrix
list(
set = f_set,
get = f_get,
setsolve = f_setsolve,
getsolve = f_getsolve
)
}
## 2. cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache.
## cacheSolve
## -------------------------------------
## Sample run:
## 1. cacheSolve(fmx)
cacheSolve <- function(f, ...) {
## in: f (function). The variable the makeCacheMatrix function is assigned into. (Sample run: fmx)
## Set inv_matrix by calling makeCacheMatrix$getsolve
inv_matrix <- f$getsolve()
## If inv_matrix has value, then getting cached data
if(!is.null(inv_matrix)) {
message("getting cached data")
return(inv_matrix)
}
## set inv_matrix by calling makeCacheMatrix$setsolve
data <- f$get()
inv_matrix <- solve(data)
f$setsolve(inv_matrix)
inv_matrix
}
|
24ed84d9143a6c39d3ec5d36bf4b12ce0ee24def
|
8a7d3fcd34fa07e8444ef067b1c1e85b142d42a2
|
/R/SpatialPolygonsToWKBPolygon.R
|
263ffad6783a755fa01242e87ee333ec570eb3fd
|
[] |
no_license
|
ianmcook/wkb
|
7d5b13a06e4bef849357495b4694f4e1fb49b880
|
728675cc849690c451619241c498dd62f2b849cb
|
refs/heads/master
| 2021-01-17T14:02:21.859056
| 2019-12-05T19:03:05
| 2019-12-05T19:03:05
| 31,404,515
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,060
|
r
|
SpatialPolygonsToWKBPolygon.R
|
# Convert a SpatialPolygons or SpatialPolygonsDataFrame object
# to a well-known binary (WKB) geometry representation of polygons
#' Convert SpatialPolygons to \acronym{WKB} MultiPolygon
#'
#' Converts an object of class \code{SpatialPolygons} or
#' \code{SpatialPolygonsDataFrame} to a list of well-known binary
#' (\acronym{WKB}) geometry representations of type MultiPolygon.
#'
#' This function is called by the \code{\link{writeWKB}} function. Call the
#' \code{\link{writeWKB}} function instead of calling this function directly.
#'
#' @param obj an object of class
#' \code{\link[sp:SpatialPolygons-class]{SpatialPolygons}} or
#' \code{\link[sp:SpatialPolygonsDataFrame-class]{SpatialPolygonsDataFrame}}.
#' @param endian The byte order (\code{"big"} or \code{"little"}) for encoding
#' numeric types. The default is \code{"little"}.
#' @return A \code{list} with class \code{AsIs}. The length of the returned list
#' is the same as the length of the argument \code{obj}. Each element of the
#' returned list is a \code{\link[base]{raw}} vector consisting of a
#' well-known binary (\acronym{WKB}) geometry representation of type
#' MultiPolygon.
#'
#' When this function is run in TIBCO Enterprise Runtime for R
#' (\acronym{TERR}), the return value has the SpotfireColumnMetaData attribute
#' set to enable TIBCO Spotfire to recognize it as a \acronym{WKB} geometry
#' representation.
#' @examples
#' # load package sp
#' library(sp)
#'
#' # create an object of class SpatialPolygons
#' triangle <- Polygons(
#' list(
#' Polygon(data.frame(x = c(2, 2.5, 3, 2), y = c(2, 3, 2, 2)))
#' ), "triangle")
#' rectangles <- Polygons(
#' list(
#' Polygon(data.frame(x = c(0, 0, 1, 1, 0), y = c(0, 1, 1, 0, 0))),
#' Polygon(data.frame(x = c(0, 0, 2, 2, 0), y = c(-2, -1, -1, -2, -2)))
#' ), "rectangles")
#' Sp <- SpatialPolygons(list(triangle, rectangles))
#'
#' # convert to WKB MultiPolygon
#' wkb <- wkb:::SpatialPolygonsToWKBMultiPolygon(Sp)
#'
#' # use as a column in a data frame
#' ds <- data.frame(ID = names(Sp), Geometry = wkb)
#'
#' # calculate envelope columns and cbind to the data frame
#' coords <- wkb:::SpatialPolygonsEnvelope(Sp)
#' ds <- cbind(ds, coords)
#' @seealso \code{\link{writeWKB}}, \code{\link{SpatialPolygonsEnvelope}}
#' @noRd
SpatialPolygonsToWKBMultiPolygon <- function(obj, endian) {
wkb <- lapply(X = obj@polygons, FUN = function(mymultipolygon) {
rc <- rawConnection(raw(0), "r+")
on.exit(close(rc))
if(endian == "big") {
writeBin(as.raw(0L), rc)
} else {
writeBin(as.raw(1L), rc)
}
writeBin(6L, rc, size = 4, endian = endian)
mypolygons <- mymultipolygon@Polygons
writeBin(length(mypolygons), rc, size = 4, endian = endian)
lapply(X = mypolygons, FUN = function(mypolygon) {
if(endian == "big") {
writeBin(as.raw(0L), rc)
} else {
writeBin(as.raw(1L), rc)
}
writeBin(3L, rc, size = 4, endian = endian)
writeBin(1L, rc, size = 4, endian = endian)
coords <- mypolygon@coords
writeBin(nrow(coords), rc, size = 4, endian = endian)
apply(X = coords, MARGIN = 1, FUN = function(coord) {
writeBin(coord[1], rc, size = 8, endian = endian)
writeBin(coord[2], rc, size = 8, endian = endian)
NULL
})
})
rawConnectionValue(rc)
})
if(identical(version$language, "TERR")) {
attr(wkb, "SpotfireColumnMetaData") <-
list(ContentType = "application/x-wkb", MapChart.ColumnTypeId = "Geometry")
}
I(wkb)
}
#' Convert SpatialPolygons to \acronym{WKB} Polygon
#'
#' Converts an object of class \code{SpatialPolygons} or
#' \code{SpatialPolygonsDataFrame} to a list of well-known binary
#' (\acronym{WKB}) geometry representations of type Polygon.
#'
#' This function is called by the \code{\link{writeWKB}} function. Call the
#' \code{\link{writeWKB}} function instead of calling this function directly.
#'
#' @param obj an object of class
#' \code{\link[sp:SpatialPolygons-class]{SpatialPolygons}} or
#' \code{\link[sp:SpatialPolygonsDataFrame-class]{SpatialPolygonsDataFrame}}.
#' @param endian The byte order (\code{"big"} or \code{"little"}) for encoding
#' numeric types. The default is \code{"little"}.
#' @return A \code{list} with class \code{AsIs}. The length of the returned list
#' is the same as the length of the argument \code{obj}. Each element of the
#' returned list is a \code{\link[base]{raw}} vector consisting of a
#' well-known binary (\acronym{WKB}) geometry representation of type Polygon.
#'
#' When this function is run in TIBCO Enterprise Runtime for R
#' (\acronym{TERR}), the return value has the SpotfireColumnMetaData attribute
#' set to enable TIBCO Spotfire to recognize it as a \acronym{WKB} geometry
#' representation.
#' @examples
#' # load package sp
#' library(sp)
#'
#' # create an object of class SpatialPolygons
#' triangle <- Polygons(
#' list(
#' Polygon(data.frame(x = c(2, 2.5, 3, 2), y = c(2, 3, 2, 2)))
#' ), "triangle")
#' rectangles <- Polygons(
#' list(
#' Polygon(data.frame(x = c(0, 0, 1, 1, 0), y = c(0, 1, 1, 0, 0))),
#' Polygon(data.frame(x = c(0, 0, 2, 2, 0), y = c(-2, -1, -1, -2, -2)))
#' ), "rectangles")
#' Sp <- SpatialPolygons(list(triangle, rectangles))
#'
#' # convert to WKB Polygon
#' wkb <- wkb:::SpatialPolygonsToWKBPolygon(Sp)
#'
#' # use as a column in a data frame
#' ds <- data.frame(ID = names(Sp), Geometry = wkb)
#'
#' # calculate envelope columns and cbind to the data frame
#' coords <- wkb:::SpatialPolygonsEnvelope(Sp)
#' ds <- cbind(ds, coords)
#' @seealso \code{\link{writeWKB}}, \code{\link{SpatialPolygonsEnvelope}}
#' @noRd
SpatialPolygonsToWKBPolygon <- function(obj, endian) {
wkb <- lapply(X = obj@polygons, FUN = function(mypolygon) {
rc <- rawConnection(raw(0), "r+")
on.exit(close(rc))
if(endian == "big") {
writeBin(as.raw(0L), rc)
} else {
writeBin(as.raw(1L), rc)
}
writeBin(3L, rc, size = 4, endian = endian)
rings <- mypolygon@Polygons
writeBin(length(rings), rc, size = 4, endian = endian)
lapply(X = rings, FUN = function(ring) {
coords <- ring@coords
writeBin(nrow(coords), rc, size = 4, endian = endian)
apply(X = coords, MARGIN = 1, FUN = function(coord) {
writeBin(coord[1], rc, size = 8, endian = endian)
writeBin(coord[2], rc, size = 8, endian = endian)
NULL
})
})
rawConnectionValue(rc)
})
if(identical(version$language, "TERR")) {
attr(wkb, "SpotfireColumnMetaData") <-
list(ContentType = "application/x-wkb", MapChart.ColumnTypeId = "Geometry")
}
I(wkb)
}
#' Envelope of SpatialPolygons
#'
#' Takes an object of class \code{SpatialPolygons} or
#' \code{SpatialPolygonsDataFrame} and returns a data frame with six columns
#' representing the envelope of each object of class \code{Polygons}.
#'
#' This function is called by the \code{\link{writeEnvelope}} function. Call the
#' \code{\link{writeEnvelope}} function instead of calling this function
#' directly.
#'
#' @param obj an object of class
#' \code{\link[sp:SpatialPolygons-class]{SpatialPolygons}} or
#' \code{\link[sp:SpatialPolygonsDataFrame-class]{SpatialPolygonsDataFrame}}.
#' @return A data frame with six columns named XMax, XMin, YMax, YMin, XCenter,
#' and YCenter. The first four columns represent the corners of the bounding
#' box of each object of class \code{Polygons}. The last two columns represent
#' the center of the bounding box of each object of class \code{Polygons}. The
#' number of rows in the returned data frame is the same as the length of the
#' argument \code{obj}.
#'
#' When this function is run in TIBCO Enterprise Runtime for R
#' (\acronym{TERR}), the columns of the returned data frame have the
#' SpotfireColumnMetaData attribute set to enable TIBCO Spotfire to recognize
#' them as containing envelope information.
#' @seealso \code{\link{writeEnvelope}}
#'
#' Example usage at \code{\link{SpatialPolygonsToWKBPolygon}}
#' @noRd
#' @importFrom sp bbox
SpatialPolygonsEnvelope <- function(obj) {
coords <- as.data.frame(t(vapply(X = obj@polygons, FUN = function(mypolygon) {
c(XMax = bbox(mypolygon)["x", "max"],
XMin = bbox(mypolygon)["x", "min"],
YMax = bbox(mypolygon)["y", "max"],
YMin = bbox(mypolygon)["y", "min"],
XCenter = mypolygon@labpt[1],
YCenter = mypolygon@labpt[2])
}, FUN.VALUE = rep(0, 6))))
if(identical(version$language, "TERR")) {
attr(coords$XMax, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "XMax")
attr(coords$XMin, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "XMin")
attr(coords$YMax, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "YMax")
attr(coords$YMin, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "YMin")
attr(coords$XCenter, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "XCenter")
attr(coords$YCenter, "SpotfireColumnMetaData") <- list(MapChart.ColumnTypeId = "YCenter")
}
coords
}
|
3b9dac58fe55b8c6f58404eca7c0b8cd8cdcc8eb
|
bcd629b7f5fa3e8d3f1f5aa4a144ac5ae0425ed3
|
/R/pva-class.R
|
6cb8a56d169a51d913ae7383113db9e357b91ec6
|
[] |
no_license
|
psolymos/PVAClone
|
555217dab78000da720ed4f25b58bf0336b46d96
|
6b0d7e7d7e8dd10bebdbe6813bbafa14735cf45d
|
refs/heads/master
| 2016-09-15T05:47:48.368792
| 2016-03-12T04:12:46
| 2016-03-12T04:12:46
| 25,499,294
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
pva-class.R
|
## this declares S4 class pvamodel
setClass("pvamodel",
representation(
growth.model="character",
obs.error="character",
model="dcModel",
genmodel="dcModel",
p="integer",
support="matrix",
params="character",
varnames="character",
fixed="nClones",
fancy="character",
transf="function", # original --> diagn
backtransf="function", # diagn --> original
logdensity="function",
neffective="function"))
## this declares inheritance and extension for 'pva' S4 class
setClass("pva",
representation(
observations="numeric",
model="pvamodel",
summary="matrix",
dcdata="dcFit"),
contains = c("dcmle"))
|
0e363cfb6f4abfc6181e336d04ea89dc51fe4340
|
1fbacbe1c6b5798ba5c68a7c87e3040109b59271
|
/tests/testthat.R
|
a40e7a71f722b6139e252b0f2896aa6ef6aa0c18
|
[
"MIT"
] |
permissive
|
milandv/MapAgora
|
52f8a94988de3e26f6459cf422e3afaa842ee671
|
72ec01474d2ca130a96882ec31862c3929673736
|
refs/heads/main
| 2023-04-17T07:42:07.070381
| 2021-04-19T22:34:31
| 2021-04-19T22:34:31
| 362,169,760
| 0
| 0
|
NOASSERTION
| 2021-04-27T15:54:41
| 2021-04-27T15:54:40
| null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(MapAgora)
test_check("MapAgora")
|
ed6701c6788769192fe5bb436330f23d52f2344d
|
d808b3477960ae45f395d641c9e9aba74a2eff38
|
/R/as.monthly.r
|
0f548d985f2c327c11024f06a06e9fdd74a6e4e5
|
[] |
no_license
|
lucasvenez/precintcon
|
c2fe339498c097c55eba002864d84980d3ff62ec
|
2d7a1f8234e1e3add387eb8aa302290b9e64381b
|
refs/heads/master
| 2020-04-06T07:12:43.075710
| 2016-07-17T01:05:45
| 2016-07-17T01:05:45
| 22,642,150
| 10
| 10
| null | 2020-02-16T13:02:46
| 2014-08-05T12:13:47
|
R
|
UTF-8
|
R
| false
| false
| 1,003
|
r
|
as.monthly.r
|
#' @include as.precintcon.monthly.r
NULL
#' @name as.monthly
#' @author Lucas Venezian Povoa \email{lucasvenez@@gmail.com}
#' @aliases as.precintcon.monthly as.monthly
#' @title Convert a daily precipitation serie to a monthly serie
#' @description Converts a daily precipitation serie to a monthly serie.
#' @usage as.monthly(object)
#' @param object a precintcon.daily object or a data.frame containing
#' 33 or 3 columns
#' @return A data.frame (precintcon.monthly) containing the following variables:
#' \itemize{
#' \item \code{year} is the year.
#' \item \code{month} is the month.
#' \item \code{precipitation} is the precipitation amount in millimeters.
#' }
#' @seealso
#' \code{\link{pplot.lorenz}}
#' \code{\link{read.data}}
#' @examples
#' ## Loading the daily precipitation serie.
#' #
#' data(daily)
#'
#' ## Converting precipitation
#' #
#' as.monthly(daily)
#' @keywords monthly precipitation
#' @export
as.monthly <- function(object) {
return(as.precintcon.monthly(object))
}
|
22ba4dc83eb984da9993c50da0b017f4f2ed4120
|
d8296f1d63f98bd5c1bd71bdd75bc22a2a9eff65
|
/src/transition_page.R
|
64d39352c3f7bb810d5d710f418b7dfb5a20fc8f
|
[] |
no_license
|
lcamus/osstat
|
40727150a7dc41ec4af24bc636411321377fb3df
|
c89870001ca1b9fb6ff35d39c0fb6beeb4f38a97
|
refs/heads/master
| 2021-01-19T00:05:09.673471
| 2018-05-31T07:22:43
| 2018-05-31T07:22:43
| 87,143,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,290
|
r
|
transition_page.R
|
#matrix transition pageIdAction
saveNetwork2html <- function (tl, file, selfcontained = T, libdir = "./lib") {
if (is.null(libdir)) {
libdir <- paste(tools::file_path_sans_ext(basename(file)),
"_files", sep = "")
}
htmltools::save_html(tl, file = file, libdir = libdir)
if (selfcontained) {
if (!htmlwidgets:::pandoc_available()) {
stop("Saving a widget with selfcontained = TRUE requires pandoc. For details see:\n",
"https://github.com/rstudio/rmarkdown/blob/master/PANDOC.md")
}
htmlwidgets:::pandoc_self_contained_html(file, "output.html")
unlink(libdir, recursive = TRUE)
}
return(htmltools::tags$iframe(src= file, height = "400px", width = "100%", style="border:0;"))
}
extendRepositoryPage <- function() {
suppressPackageStartupMessages(require(dplyr))
fSiteHierarchy <- "./data/refSiteHierarchy.RData"
url.root <- "https://www.euro-area-statistics.org/"
#refine repository page:
pr2 <- pr[pr$bad==F,]
pr2$bad <- NULL
pr2$pg <- sub("^.*sdw-wsrest\\.ecb\\.europa\\.eu/service/data/(\\w{2,3})/(.+)$","/bankscorner/\\1/export/\\2",pr2$pg)
pr2$pg <- sub("^.*sdw\\.ecb\\.europa\\.eu/datastructure.do$","/outlink/sdw/datastructure",pr2$pg)
pr2$pg <- sub("^.*sdw\\.ecb\\.(europa\\.eu|int)/(\\w+)?(\\.do)?$","/outlink/sdw/\\2",pr2$pg)
pr2$pg <- sub("^file:.+$","local",pr2$pg)
pr2$pg <- sub("^.*www\\.(\\w+\\.)?(ecb|bankingsupervision)\\.europa\\.eu/.*$","/outlink/ECB",pr2$pg)
pr2$pg <- sub("^.+trans(late)?.*$","/shared/translate",pr2$pg)
pr2$pg <- sub("^.*www\\.imf\\.org.*$","/outlink/IMF",pr2$pg)
pr2$pg <- sub("^.*www\\.ebf-fbe\\.eu.*$","/outlink/EBF",pr2$pg)
pr2$pg <- sub("^.+ec\\.europa\\.eu.*$","/outlink/EC",pr2$pg)
pr2$pg <- sub("^.*www\\.youtube\\.com.*$","/outlink/YouTube",pr2$pg)
pr2$pg <- sub("^.+sdmx\\.org.*$","/outlink/SDMX.org",pr2$pg)
pr2$pg <- sub("^.+/insights-atom\\.xml$","/shared/rss",pr2$pg)
pr2$pg <- sub("^.*/embed.*$","/shared/embed",pr2$pg)
pr2$pg <- sub("^.*/data$","/shared/data",pr2$pg)
pr2$pg <- sub("^.*/www\\.oecd\\.org.*$","/outlink/OECD",pr2$pg)
pr2$pg <- sub("^.*/www\\.compareyourcountry\\.org.*$","/outlink/OECD",pr2$pg)
pr2$pg <- sub("^/classic/banks-corner$","/bankscorner/",pr2$pg)
pr2$pg <- sub("^.+/banks-corner-(\\w{2,3})/\\w{2,3}codelist\\.xlsx$","/bankscorner/\\1",pr2$pg)
pr2$pg <- sub("^(/classic)?/banks-corner-(\\w{2,3})$","/bankscorner/\\2",pr2$pg)
pr2$pg <- sub("^/classic/(.+)$","/insights/\\1",pr2$pg)
pr2$pg <- sub("^/((\\w|-)+)$","/indicators/\\1",pr2$pg)
pr2$pg <- sub("^/$","/homepage",pr2$pg)
ncbs <- c("http://www.nbb.be/","http://www.bundesbank.de/","http://www.eestipank.ee/","http://www.centralbank.ie/","http://www.bankofgreece.gr/","http://www.bde.es/","http://www.banque-france.fr/","http://www.bancaditalia.it/","http://www.centralbank.gov.cy/","http://www.bank.lv/","http://www.lb.lt/","http://www.bcl.lu/","http://www.centralbankmalta.org/","http://www.dnb.nl/","http://www.oenb.at/","http://www.bportugal.pt/","http://www.bsi.si/","http://www.nbs.sk/","http://www.suomenpankki.fi")
invisible(lapply(ncbs,function(x){
pr2$pg <<- sub(paste0(x,".*$"),"/outlink/NCBs",pr2$pg)
}))
f <- grep("^/bankscorner/\\w{2,3}/export/.+$",pr2$pg)
pr2[f,]$args <- sub("^/bankscorner/\\w{2,3}/export/","",pr2[f,]$pg)
pr2[f,]$pg <- strsplit(pr2[f,]$pg,"/(\\w|\\.|\\+)+$")
rm(f)
pr2$pg <- tolower(pr2$pg)
#reflect site hierarchy:
if (file.exists(fSiteHierarchy))
load(fSiteHierarchy) else
{
require("rvest")
h <- read_html(url.root) %>%
html_nodes("body > section:nth-child(3) > div:nth-child(1) > div > ul") %>% html_children()
refSiteHierarchy <- setNames(data.frame(matrix(ncol = 3, nrow = 0),stringsAsFactors=F),
c("parent","child.pg","child.lib"))
invisible(lapply(h,function(x){
parent <- html_children(x)[1] %>% html_text() %>% tolower() %>% gsub(pattern=" ",replacement="-")
children <- html_children(x)[2] %>% html_children()
invisible(lapply(children,function(y){
child.pg <- y %>% html_children() %>% html_attr(name="href") %>% strsplit(split="?",fixed=T) %>% unlist() %>% head(1)
child.lib <-y %>% html_children() %>% html_text() %>% gsub(pattern=" ",replacement="-")
refSiteHierarchy[nrow(refSiteHierarchy)+1,] <<- c(parent,child.pg,child.lib)
}))
}))
refSiteHierarchy$child.pg <- tolower(paste("/indicators",refSiteHierarchy$child.pg,sep="/"))
refSiteHierarchy$child.path <- tolower(paste("/indicators",refSiteHierarchy$parent,refSiteHierarchy$child.lib,sep="/"))
save(refSiteHierarchy,file=fSiteHierarchy)
rm(url.root,fSiteHierarchy)
}
pr2 <- left_join(pr2,refSiteHierarchy[,c("child.pg","child.path")],by=c("pg"="child.pg"))
pr2[!is.na(pr2$child.path),]$pg <- pr2[!is.na(pr2$child.path),]$child.path
pr2$child.path <- NULL
#update sum group:
pr2 <- pr2 %>% group_by(pg) %>% mutate(n.sum=sum(n))
return(pr2)
} #extendRepositoryPage
extendActions <- function(pr2) {
aa <- a[a$type!="search",]
aa <- left_join(aa,pr2[,c("pageIdAction","pg")],by="pageIdAction")
aa[is.na(aa$pg),]$pg<- "ERR"
aa$prev.a <- lag(aa$pg)
aa$next.a <- lead(aa$pg)
aa <- aa %>% group_by(idVisit) %>% mutate(prev.a=ifelse(row_number()==1,"BEGIN",prev.a))
aa <- aa %>% group_by(idVisit) %>% mutate(next.a=ifelse(row_number()==n(),"END",next.a))
return(aa)
} #extendActions
genTransitionMatrix <- function(pr2,aa) {
#create network:
dn <- sort(unique(pr2$pg))
m <- matrix(0,nrow=length(dn)+2,ncol=length(dn)+2,dimnames=list(c(dn,"ERR","BEGIN"),c(dn,"ERR","END")))
rm(dn)
invisible(apply(aa,1,function(x) {
val.prev <- x["prev.a"]
val.next <- x["next.a"]
val.next <- gsub(" ","",val.next)
pia <- gsub(" ","",x["pg"])
m[pia,val.next] <<- m[pia,val.next]+1
if(val.prev=="BEGIN")
m["BEGIN",pia] <<- m["BEGIN",pia]+1
}))
return(m)
} #genTransitionMatrix
setGroup <- function() {
groups <- data.frame(label=c("indicators","insights","bankscorner","shared","outlink","event"),
color=c("#6fb871","#5cbde3","#D9685E","#004996","darkorange","darkmagenta"),
desc=c("indicators","Insights into euro area statistics","Banks' Corner",
"shared pages and features (incl. homepage)",
"outlinks to external pages (institutional websites and web ressources)",
"events related to visit (begin, end, error and save to local)"
),stringsAsFactors=F)
groups$color <- sapply(groups$color,function(x)paste0("#",paste(as.hexmode(col2rgb(x)),collapse="")))
return(groups)
} #setGroup
groups <- setGroup()
getJSEventHandler <- function(e) {
require(htmlwidgets)
f <- paste0("./src/js/",e,".js")
func <- JS(readChar(f, file.info(f)$size))
return(func)
} #getJSEventHandler
genNetwork <- function(m) {
require(visNetwork)
getTitle <- function() {
require(htmltools)
getPageDesc <- function() {
}
getBouncing <- function(node.incoming,node.outcoming) {
if (node.outcoming %in% c("BEGIN","END") | node.incoming=="END")
bouncing <- "-"
else {
incoming <- nrow(aa[aa$pg==node.outcoming & aa$prev.a==node.incoming,])
end.visit <- nrow(aa[aa$pg==node.outcoming & aa$prev.a==node.incoming & aa$next.a=="END",])
bouncing <- round(100*end.visit/incoming,0)
bouncing <- paste0(as.character(round(100*end.visit/incoming,0)),"%")
}
return(bouncing)
}
getNode <- function(node.index) {
# depth <- ncol(m)
depth <- 1
#incoming:
if (node.index==dim(m)[2]+1) { #virtual node BEGIN
incoming <- data.frame(matrix(c("-","",""))[,c(1,1,1)],stringsAsFactors=F)
} else {
incoming.node <- names(head(sort(m[,node.index],decreasing=T),depth))
incoming.traffic <- m[incoming.node,node.index]
incoming.bouncing <- sapply(incoming.node,function(x) getBouncing(x,c(colnames(m),"BEGIN")[node.index]))
incoming <- data.frame(incoming.node,incoming.traffic,incoming.bouncing,stringsAsFactors=F)
}
#outcoming:
if (node.index==dim(m)[2]) { #virtual node END
outcoming <- data.frame(matrix(c("-","",""))[,c(1,1,1)],stringsAsFactors=F)
} else {
if (node.index==dim(m)[2]+1) node.index <- dim(m)[1] #virtual node BEGIN
outcoming.node <- names(head(sort(m[node.index,],decreasing=T),depth))
outcoming.traffic <- m[node.index,outcoming.node]
outcoming.bouncing <- sapply(outcoming.node,function(x) getBouncing(c(colnames(m),"BEGIN")[node.index],x))
outcoming <- data.frame(outcoming.node,outcoming.traffic,outcoming.bouncing,stringsAsFactors=F)
}
ntb <- c("node","freq","bouncing")
require(DT)
res <- list(
datatable(outcoming,colnames=ntb,options=list(pageLength=5))
)
return(res)
} #getNode
incoming <- c(colSums(m),0)
outcoming <- c(rowSums(m[1:nrow(m)-1,1:ncol(m)-1]),0,rowSums(m)[dim(m)[1]])
bouncing <- round((incoming-outcoming)/incoming*100,0)
bouncing <- sapply(seq_along(bouncing),function(x){
if (is.infinite(bouncing[x]) | c(colnames(m),"BEGIN")[x]=="END")
res <- "-"
else
res <- paste0(as.character(bouncing[x]),"%")
return(res)
})
sketch <- lapply(1:(ncol(m)+1),function(x){
getNode(x)
})
res <- lapply(seq_along(sketch),function(x){
lib <- paste0(gsub("/"," > ",sub("/$","",sub("^/","",c(colnames(m),"BEGIN")[x]))))
return(htmltools::withTags(div(
h3(lib),
table(
tr(td("incoming traffic"),td(incoming[x],class="figure")),
tr(td("outcoming traffic"),td(outcoming[x],class="figure")),
tr(td("bouncing rate"),td(bouncing[x],class="figure"))
))
))
})
res <- sapply(res,as.character)
return(res)
} #getTitle
setNodes <- function() {
nodes <- data.frame(id=c(colnames(m),"BEGIN"),
# label=c(sub("^/(\\w{3})\\w+/",paste0("\\1","~"),colnames(m)),"BEGIN"),
label=c(sub("^/\\w+/","",colnames(m)),"BEGIN"),
value=c(colSums(m),rowSums(m)[dim(m)[1]]),
title=getTitle(),
stringsAsFactors=F)
nodes$group <- sub("^/(\\w+)/.*$","\\1",nodes$id)
nodes[nodes$id %in% c("BEGIN","END","ERR","local"),]$group <- "event"
# nodes[nodes$group=="event",]$label <- paste0("eve~",nodes[nodes$group=="event",]$label)
# nodes[nodes$id=="/homepage",c("group","label")] <- c("shared","sha~homepage")
nodes[nodes$id=="/homepage",]$group <- "shared"
# nodes[nodes$id=="/bankscorner/",]$label <- "ban~bankscorner"
nodes <- nodes[unlist(lapply(groups$label,function(x) which(nodes$group %in% x))),]
return(nodes)
} #setNodes
nodes <- setNodes()
edges <- setNames(data.frame(matrix(ncol=3, nrow=0),stringsAsFactors=F),c("from","to","value"))
invisible(mapply(function(r,c){
if (m[r,c]!=0) edges[nrow(edges)+1,] <<- c(rownames(m)[r],colnames(m)[c],m[r,c])
},row(m),col(m)))
#
network <- visNetwork(nodes,edges,
main="Our statistics network",
submain=paste0("(from ",min(a$date)," to ",max(a$date),")")) %>%
visLegend(main="group", useGroups=F,
addNodes=data.frame(label=groups$label,color=groups$color,shape="square",
title=groups$desc)) %>%
visOptions(highlightNearest=list(enabled=T, degree=0),
nodesIdSelection=list(enabled=T,useLabels=F),
# selectedBy=list(variable="group",selected="indicators",values=groups$label)) %>%
selectedBy=list(variable="group",values=groups$label)) %>%
visInteraction(navigationButtons=T,hover=T,
tooltipStyle = '
position: fixed;
visibility: hidden;
white-space: pre-wrap;
background-color: #ffffca;
padding: 15px;
z-index: 1;
border-radius: 30px;') %>%
visPhysics(stabilization=F,solver="forceAtlas2Based") %>%
visNodes(font=list(strokeWidth=1)) %>%
visEvents(hoverNode=getJSEventHandler("hoverNode"),
blurNode=getJSEventHandler("blurNode"),
selectNode=getJSEventHandler("selectNode"),
stabilized=getJSEventHandler("stabilized"),
startStabilizing="function(e){window.network=this;}"
# startStabilizing=getJSEventHandler("startStabilizing")
)
invisible(apply(groups,1,function(x){
assign("network",
network %>% visGroups(groupname=as.character(x[1]),color=as.character(x[2])),
envir=parent.env(environment()))
}))
return(network)
} #genNetwork
genSrcDatatables <- function(m,nodes.ref) {
suppressPackageStartupMessages(require(dplyr))
getBouncing <- function(node.incoming,node.outcoming) {
if (node.outcoming %in% c("BEGIN","END") | node.incoming=="END")
bouncing <- "-"
else {
incoming <- nrow(aa[aa$pg==node.outcoming & aa$prev.a==node.incoming,])
end.visit <- nrow(aa[aa$pg==node.outcoming & aa$prev.a==node.incoming & aa$next.a=="END",])
bouncing <- round(100*end.visit/incoming,0)
bouncing <- paste0(as.character(round(100*end.visit/incoming,0)),"%")
}
return(bouncing)
}
getNode <- function(node.index) {
depth <- ncol(m)
node.id <- ifelse(node.index==dim(m)[2]+1,"BEGIN",colnames(m)[node.index])
#incoming:
if (node.index==dim(m)[2]+1) { #virtual node BEGIN
incoming <- data.frame(matrix(c(node.id,"event",0,rep("-",3)),nrow=1),stringsAsFactors=F)
} else {
incoming.node <- names(head(sort(m[,node.index],decreasing=T),depth))
incoming.traffic <- m[incoming.node,node.index]
incoming.bouncing <- sapply(incoming.node,function(x) getBouncing(x,c(colnames(m),"BEGIN")[node.index]))
incoming <- data.frame(rep(node.id,depth),
left_join(setNames(data.frame(matrix(incoming.node,ncol=1),stringsAsFactors=F),"id"),nodes.ref,by="id")$group,
1:depth,
sub("^[a-z]+/(.+)$","\\1",sub("^/(.+)","\\1",incoming.node)),
as.character(incoming.traffic),
incoming.bouncing,
stringsAsFactors=F)
}
#outcoming:
if (node.index==dim(m)[2]) { #virtual node END
outcoming <- data.frame(matrix(c(node.id,"event",0,rep("-",3)),nrow=1),stringsAsFactors=F)
} else {
if (node.index==dim(m)[2]+1) node.index <- dim(m)[1] #virtual node BEGIN
outcoming.node <- names(head(sort(m[node.index,],decreasing=T),depth))
outcoming.traffic <- m[node.index,outcoming.node]
outcoming.bouncing <- sapply(outcoming.node,function(x) getBouncing(c(colnames(m),"BEGIN")[node.index],x))
outcoming <- data.frame(rep(node.id,depth),
left_join(setNames(data.frame(matrix(outcoming.node,ncol=1),stringsAsFactors=F),"id"),nodes.ref,by="id")$group,
1:depth,
sub("^[a-z]+/(.+)$","\\1",sub("^/(.+)","\\1",outcoming.node)),
as.character(outcoming.traffic),
outcoming.bouncing,
stringsAsFactors=F)
}
rnfb <- c("ref","group","rank","node","freq","bouncing")
incoming[,3] <- as.numeric(incoming[,3]) #rank
outcoming[,3] <- as.numeric(outcoming[,3])
res <- list(
setNames(incoming[incoming[,5]!="0",],rnfb),
setNames(outcoming[outcoming[,5]!="0",],rnfb)
)
return(res)
} #getNode
gotnodes <- lapply(1:(ncol(m)+1),function(x){
getNode(x)
})
res <- list(
bind_rows(lapply(gotnodes,`[[`,1)),
bind_rows(lapply(gotnodes,`[[`,2))
)
return(res)
} #genSrcDatatables
genDatatables <- function(t,cap) {
require(DT)
require(htmltools)
res <- datatable(t,rownames=F,height=250,width=600,fillContainer=F,autoHideNavigation=T,
filter="none",class="compact",
caption=htmltools::tags$caption(
htmltools::em(cap)
),
options=list(pageLength=5,dom = 'tpr',autoWidth=T,
columnDefs = list(list(visible=F, searchable=T, targets=0),
list(searchable=F,targets=c(1:5)),
list(width = '10px', targets=c(1,2)),
list(width = '200px', targets=3),
list(width = '25px', targets=c(4,5)),
list(
targets=3,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 25 ?",
"'<span title=\"' + data + '\">' + data.substr(0,25) + '...</span>' : data;",
"}")),
list(className = 'dt-center', targets=c(2,4,5))
)
)) %>%
formatStyle(
"group",
backgroundColor=styleEqual(groups$label,groups$color),
color=styleEqual(groups$label,groups$color)
)
return(res)
} #genDatatables
displayNetwork <- function(n,t) {
require(visNetwork)
require(htmltools)
# browsable(
withTags(tagList(list(
tags$html(
tags$head(
tags$style('td.figure {font-weight:bold; text-align: center;}
* {font-family: "Century Gothic", CenturyGothic, AppleGothic, sans-serif !important;}'
),
tags$script(HTML(getJSEventHandler("selectNodeById")))
)
,
tags$body(
tags$table(
tags$tr(
tags$td(n,rowspan=2,width="70%",valign="top")
,
tags$td(t[[1]],width="30%")
)
,
tags$tr(
tags$td(t[[2]],width="30%")
)
)
)
)
)))
# )
} #displayNetwork
pr2 <- extendRepositoryPage()
save(pr2,file="./data/pr2.RData")
aa <- extendActions(pr2)
m <- genTransitionMatrix(pr2,aa)
n <- genNetwork(m)
sd <- genSrcDatatables(m,n$x$nodes)
tbl <- lapply(seq_along(sd),function(x)genDatatables(sd[[x]],c("Incoming","Outcoming")[x]))
res <- displayNetwork(n,tbl)
browsable(res)
save_html(res,"network.html")
|
30249666c97f6164065de54eda7f88e073d8cad2
|
8ba4e3b939d3d4fe9c9b07d13ea6a05ab5e57d01
|
/man/remove_time_info.Rd
|
575fbd3d3e3908582d8bbfac51669542f695d756
|
[
"BSD-2-Clause"
] |
permissive
|
COMHIS/fennica
|
8ce782cbdff37582c586820b3c26a4f549c2f7d3
|
f7f2e52b6e0b62a44a2c8106bb1ab5dd3edcc743
|
refs/heads/master
| 2023-08-19T03:01:34.138530
| 2022-04-20T17:23:36
| 2022-04-20T17:23:36
| 107,188,766
| 5
| 2
|
NOASSERTION
| 2023-07-04T08:44:37
| 2017-10-16T22:14:44
|
R
|
UTF-8
|
R
| false
| true
| 607
|
rd
|
remove_time_info.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_time_info.R
\name{remove_time_info}
\alias{remove_time_info}
\title{Remove Time Info}
\usage{
remove_time_info(x, verbose = FALSE, months = NULL)
}
\arguments{
\item{x}{Vector (time field)}
\item{verbose}{verbose}
\item{months}{months to remove}
}
\value{
Polished vector
}
\description{
Remove time information.
}
\details{
Remove months, year terms and numerics
}
\examples{
\dontrun{x2 <- remove_time_info(x)}
}
\references{
See citation("fennica")
}
\author{
Leo Lahti \email{leo.lahti@iki.fi}
}
\keyword{utilities}
|
e4a50b95c786e2653dbade258337737b363fca6d
|
b6b746c44ea977f62d8bb7c98137b477b931ca64
|
/R/pMaxCorrNor.R
|
750751bc6404f30e61650b2c31f6e787defa2abb
|
[] |
no_license
|
cran/NSM3
|
77f776ba5f934652800ecb0c3fbc8f87a7428571
|
7318aa2e0e6bf4f2badf8d0ae014f297650347f4
|
refs/heads/master
| 2022-08-30T00:39:28.420513
| 2022-08-16T21:40:02
| 2022-08-16T21:40:02
| 17,681,120
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 174
|
r
|
pMaxCorrNor.R
|
pMaxCorrNor<-function(x,k,rho){
inner.int<-function(s){
pnorm((s*sqrt(rho)+x)/sqrt(1-rho))^k*dnorm(s)
}
return(1-integrate(inner.int,-Inf,Inf)$value)
}
|
f111ff049fc88856ba120994827c63b9d57fb91f
|
2b82f18243c6bea2cfff559f1325c8dba1c08bbd
|
/tests/testthat/test_utils.R
|
b2b0f752511274f92507177cc0370a9d9173274c
|
[
"MIT",
"LicenseRef-scancode-unknown"
] |
permissive
|
hadinh1306/RealEstateR
|
4d684d1f40087b32a3b9b73b6adcf604d2eceb61
|
e9761bf65c9d138e0ab1c2b21dc0e8683e497da3
|
refs/heads/master
| 2020-03-08T02:23:30.960571
| 2018-05-22T03:51:37
| 2018-05-22T03:51:37
| 127,858,603
| 1
| 0
|
MIT
| 2018-05-22T03:51:38
| 2018-04-03T06:01:00
|
R
|
UTF-8
|
R
| false
| false
| 1,050
|
r
|
test_utils.R
|
context("utils.R")
# -----------------------------------------------------------------------------
# check_response_type
# -----------------------------------------------------------------------------
test_that("'check_response_type()' return xml document properly", {
uri <- paste0("http://www.zillow.com/webservice/ProReviews.htm?zws-id=", "abcd", "&screenname=", "mwalley0", "&output=json")
response <- httr::GET(uri)
expect_error(check_response_type(response))
})
# -----------------------------------------------------------------------------
# check_status()
# -----------------------------------------------------------------------------
test_that("'check_status()' handle error response properly", {
zwsid <- "missing"
address <- "random"
citystatezip <- "random"
response <- httr::GET(paste0("http://www.zillow.com/webservice/GetSearchResults.htm?zws-id=",
zwsid,"&address=", address,
"&citystatezip=", citystatezip))
expect_error(check_status(response))
})
|
efc03cebb51b663f83d9ee7d7290337d1c357faf
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CARLIT/R/barplotEQR.R
|
8fd2d2d25d3596673fa195a2dcd0381ab765f877
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,064
|
r
|
barplotEQR.R
|
barplotEQR <-
function(EQR.df){cols <- rep(NA, length(unique(EQR.df[,1])))
for(n in 1:length(cols)){
if(as.numeric(paste(unique(EQR.df[n,5])))<=1) {
if(as.numeric(paste(unique(EQR.df[,5])))[n]>0.75){cols[n]=c("blue")}
else {if(as.numeric(paste(unique(EQR.df[,5])))[n]>0.60){cols[n]=c("green")}
else{if(as.numeric(paste(unique(EQR.df[,5])))[n]>0.40){cols[n]=c("yellow")}
else{if(as.numeric(paste(unique(EQR.df[,5])))[n]>0.25){cols[n]=c("orange")}
else{cols[n]=c("red")}}
}
}
}
}
barplot(unique(EQR.df[,5]), ylim=c(0, 1.1), ylab="", xlim=c(0, (length(unique(EQR.df[,1]))+2)), col=cols)
h <- c(0.25, 0.40, 0.60, 0.75, 1)
for (i in 1:length(h)){
abline(h=h[i], lty=2)
}
par(new=T)
barplot(unique(EQR.df[,5]), names.arg=unique(EQR.df[,1]), ylim=c(0, 1.1), ylab="EQR", xlim=c(0, (length(unique(EQR.df[,1]))+2)), col=cols)
ES <- c("Bad", "Poor", "Moderate", "Good", "High")
for (i in 1:length(h)){
text(((length(unique(EQR.df[,1])))+(((length(unique(EQR.df[,1]))+2)-(length(unique(EQR.df[,1]))))/1.5)), h[i]-0.1, ES[i], pos=4)
}
}
|
91e81c74fe04fd093c25b8579311fad76b188634
|
8bca5c874501967d245b87f46915d5049eb8e472
|
/helpers.R
|
a01aef0a82fe9d182d1eaa770db3fe9e900d20c9
|
[] |
no_license
|
mlol1/app
|
16e27bfb384d381874ef98dd4364014a669c6eed
|
9b92eb8eacf28e36499c5646035da93e17dc645d
|
refs/heads/master
| 2016-08-05T00:53:09.304724
| 2014-08-26T11:56:37
| 2014-08-26T11:56:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,165
|
r
|
helpers.R
|
load(url('http://gadm.org/data/rda/IRL_adm1.RData'))
# loads an Object "gadm" with shape of Ireland
countiesa <- gadm
countiesa<- (gadm$NAME_1)
percent_spplot <- function(var, color, legend.title, min = 0, max = 100) {
# generate vector of fill colors for spplot
shades <- colorRampPalette(c("white", color))(100)
# constrain gradient to percents that occur between min and max
var <- pmax(var, min)
var <- pmin(var, max)
percents <- as.integer(cut(var, 100,
include.lowest = TRUE, ordered = TRUE))
fills <- shades[percents]
#
plot.new()
# plot choropleth map
print(spplot(gadm,"NAME_1",colorkey=TRUE, col.regions = fills, add = TRUE,lty = 0))
# overlay county borders
print(spplot(gadm,"NAME_1", col = "black", col.regions = fills, add = TRUE, lty = 1, lwd = 1))
# add a legend
inc <- (max - min) / 4
legend.text <- c(paste0(min, "%"),
paste0(min + inc, "%"),
paste0(min + 2 * inc, "%"),
paste0(min + 3 * inc, "%"),
paste0(max, "%"))
legend("bottomleft",
legend = legend.text,
fill = shades[c(25,50,75)],
title = legend.title)
}
|
5e7ed52199bf64833f6a6de1f00468c37727295d
|
ca10db69dbc6aebd20e6c567bce7c61dbf27d728
|
/Press_model_toget_Theta_out.R
|
9b233ec2ca8f0504b1bfcfe2965122e047f0252b
|
[] |
no_license
|
DeepanJayaraman/R_codes
|
8d68ccb0ed2f68b8c3cdc762f48862a90e95ead9
|
8d1a56342c086ab7c8ff41523e4b36621e99e386
|
refs/heads/main
| 2023-03-23T09:42:14.032503
| 2021-03-25T18:14:20
| 2021-03-25T18:14:20
| 351,532,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,733
|
r
|
Press_model_toget_Theta_out.R
|
# SIEMENS DATA OPTIMIZATION
# Crossvalidation
# Set working directory
setwd("D:\\R\\Seimens")
library(SPOT)
N <- 10 # sample size ... N= 10,20,50,100
load(file=(paste("SD_L2_investigation_",N,"_samples",".Rdata",sep="")))
# Preallocation
PRESSRMS_KRG_v_lmom = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_1_lmom = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_2_lmom = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_3_lmom = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_4_lmom = matrix(nrow = 1,ncol=100);
PRESSRMS_KRG_v = matrix(nrow = 1,ncol=100); PRESSRMS_KRG_1 = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_2 = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_3 = matrix(nrow = 1,ncol=100);PRESSRMS_KRG_4 = matrix(nrow = 1,ncol=100);
## Mean and STD of responces
# Mean and standard deviation are calculated at each design point
K = 100 #repetitions
## responce - Standard deviation
response_v <- von_out + 3*sd_von_out
response_1 <- con1_out + 3*sd_con1_out
response_2 <- con2_out + 3*sd_con2_out
response_3 <- con3_out + 3*sd_con3_out
response_4 <- con4_out + 3*sd_con4_out
## responce - Lmoment
response_v_lmom <- von_out + 3*l2_von_out
response_1_lmom <- con1_out + 3*l2_con1_out
response_2_lmom <- con2_out + 3*l2_con2_out
response_3_lmom <- con3_out + 3*l2_con3_out
response_4_lmom <- con4_out + 3*l2_con4_out
# Preallocation
surrkrg_v = matrix(nrow = 1,ncol=100);surrkrg_1 = matrix(nrow = 1,ncol=100);surrkrg_2 = matrix(nrow = 1,ncol=100);surrkrg_3 = matrix(nrow = 1,ncol=100);surrkrg_4 = matrix(nrow = 1,ncol=100);
surrkrg_v_lmom = matrix(nrow = 1,ncol=100);surrkrg_1_lmom = matrix(nrow = 1,ncol=100);surrkrg_2_lmom = matrix(nrow = 1,ncol=100);surrkrg_3_lmom = matrix(nrow = 1,ncol=100);surrkrg_4_lmom = matrix(nrow = 1,ncol=100);
Objective = matrix(nrow=100,ncol=1)
cons1 = matrix(nrow=100,ncol=1)
cons2 = matrix(nrow=100,ncol=1)
cons3 = matrix(nrow=100,ncol=1)
cons4 = matrix(nrow=100,ncol=1)
Objective_lmom = matrix(nrow=100,ncol=1)
cons1_lmom = matrix(nrow=100,ncol=1)
cons2_lmom = matrix(nrow=100,ncol=1)
cons3_lmom = matrix(nrow=100,ncol=1)
cons4_lmom = matrix(nrow=100,ncol=1)
## SURFACE MODELING OF VON MISES STRESS
X <- Designvariable
Y <- response_v
control=list(regr=regpoly1,corr=corrgauss)
for (k in 1:K){
Z<-Y[,k]
surrkrg_v <- buildKrigingDACE(X,Z,control)
Objective[k] <- list(assign(paste("surrkrg_v", k, sep = ""), surrkrg_v))
}
Y <- response_v_lmom
control=list(regr=regpoly1,corr=corrgauss)
for (k in 1:K){
Z<-Y[,k]
surrkrg_v_lmom <- buildKrigingDACE(X,Z,control)
Objective_lmom[k] <- list(assign(paste("surrkrg_v_lmom", k, sep = ""), surrkrg_v_lmom))
}
save(Objective,Objective_lmom,file="PressModel_10samples_out.Rdata")
|
8b402367b9d65c8569d45aa2663ccbbde1defa05
|
2142b40687d515d3f333b88523d359c03c6a1172
|
/man/show-RadioSig-method.Rd
|
0fbfae15e5f05cd8ce72797187c903ceabf96a92
|
[] |
no_license
|
cran/RadioGx
|
c862e4da96f78f4f95aa4849d1c743f848d60978
|
1e0fa17a562cfca7ab2bb2b304779b8c2f5299b2
|
refs/heads/master
| 2020-12-22T17:38:58.011543
| 2019-12-19T22:00:02
| 2019-12-19T22:00:02
| 236,877,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 646
|
rd
|
show-RadioSig-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signatureClass.R
\docType{methods}
\name{show,RadioSig-method}
\alias{show,RadioSig-method}
\title{Show RadioGx Signatures}
\usage{
\S4method{show}{RadioSig}(object)
}
\arguments{
\item{object}{\code{RadioSig}}
}
\value{
Prints the RadioGx Signatures object to the output stream, and returns invisible NULL.
}
\description{
Show RadioGx Signatures
}
\examples{
data(Cleveland_small)
rad.sensitivity <- radSensitivitySig(Cleveland_small, mDataType="rna",
nthread=1, features = fNames(Cleveland_small, "rna")[1])
rad.sensitivity
}
|
9df2956404956bc2b631e256a95678a183ae857c
|
2a4ec36153b8071fea2c8cb50224c68f4b320d8e
|
/R/Main.R
|
8e14652d56281686010b3509edaff23d9c43ee10
|
[] |
no_license
|
ohdsi-studies/PCE
|
c3e964bcbed98c8dafb15cd6a0aa22963c51c1c8
|
9414098c13d348b665ecda422f835636425e9e3d
|
refs/heads/master
| 2023-03-21T04:13:47.153177
| 2021-02-11T14:23:06
| 2021-02-11T14:23:06
| 303,699,210
| 3
| 4
| null | 2021-03-09T16:45:16
| 2020-10-13T12:40:51
|
R
|
UTF-8
|
R
| false
| false
| 25,435
|
r
|
Main.R
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of PCE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the PCE Study.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cdmDatabaseName Shareable name of the database
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the target population cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param setting A data.frame with the tId, oId, model triplets to run - if NULL it runs all possible combinations
#' @param sampleSize How many patients to sample from the target population
#' @param recalibrate Recalibrate the model intercept and slop
#' @param recalibrateInterceptOnly Recalibrate the intercept only.
#' @param riskWindowStart The start of the risk window (in days) relative to the startAnchor.
#' @param startAnchor The anchor point for the start of the risk window. Can be "cohort start" or "cohort end".
#' @param riskWindowEnd The end of the risk window (in days) relative to the endAnchor parameter
#' @param endAnchor The anchor point for the end of the risk window. Can be "cohort start" or "cohort end".
#' @param firstExposureOnly Should only the first exposure per subject be included? Note that this is typically done in the createStudyPopulation function,
#' @param removeSubjectsWithPriorOutcome Remove subjects that have the outcome prior to the risk window start?
#' @param priorOutcomeLookback How many days should we look back when identifying prior outcomes?
#' @param requireTimeAtRisk Should subject without time at risk be removed?
#' @param minTimeAtRisk The minimum number of days at risk required to be included
#' @param includeAllOutcomes (binary) indicating whether to include people with outcomes who are not observed for the whole at risk period
#' @param standardCovariates Use this to add standard covariates such as age/gender
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/). Do not use a folder on a network drive since this greatly impacts
#' performance.
#' @param createCohorts Create the cohortTable table with the target population and outcome cohorts?
#' @param createTable1 Create the Table 1 - a characteristic of the target populations
#' @param runAnalyses Run the model development
#' @param aggregateCohorts Run this after runAnalyses to calculate the performance for combination of males and females, black and non-black
#' @param viewShiny View the results as a shiny app
#' @param packageResults Should results be packaged for later sharing?
#' @param minCellCount The minimum number of subjects contributing to a count before it can be included
#' in packaged results.
#' @param verbosity Sets the level of the verbosity. If the log level is at or higher in priority than the logger threshold, a message will print. The levels are:
#' \itemize{
#' \item{DEBUG}{Highest verbosity showing all debug statements}
#' \item{TRACE}{Showing information about start and end of steps}
#' \item{INFO}{Show informative information (Default)}
#' \item{WARN}{Show warning messages}
#' \item{ERROR}{Show error messages}
#' \item{FATAL}{Be silent except for fatal errors}
#' }
#' @param cdmVersion The version of the common data model
#' @param overwrite T overwrite the results, F only runs analyses that are currently empty
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cdmDatabaseName = 'shareable name of the database'
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' outcomeId = 1,
#' oracleTempSchema = NULL,
#' riskWindowStart = 1,
#' startAnchor = 'cohort start',
#' riskWindowEnd = 365,
#' endAnchor = 'cohort start',
#' outputFolder = "c:/temp/study_results",
#' createCohorts = T,
#' runAnalyses = T,
#' aggregateCohorts = T,
#' viewShiny = F,
#' packageResults = F,
#' minCellCount = 10,
#' verbosity = "INFO",
#' cdmVersion = 5)
#' }
#'
#' @export
execute <- function(connectionDetails,
cdmDatabaseSchema,
cdmDatabaseName = 'friendly database name',
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema = cohortDatabaseSchema,
setting = NULL,
sampleSize = NULL,
recalibrate = F,
recalibrateInterceptOnly = F,
riskWindowStart = 1,
startAnchor = 'cohort start',
riskWindowEnd = 365,
endAnchor = 'cohort start',
firstExposureOnly = F,
removeSubjectsWithPriorOutcome = F,
priorOutcomeLookback = 99999,
requireTimeAtRisk = F,
minTimeAtRisk = 1,
includeAllOutcomes = T,
outputFolder,
createCohorts = F,
createTable1 = F,
runAnalyses = F,
aggregateCohorts = T,
viewShiny = F,
packageResults = F,
minCellCount = 10,
verbosity = "INFO",
cdmVersion = 5,
overwrite = T) {
if (!file.exists(file.path(outputFolder,cdmDatabaseName)))
dir.create(file.path(outputFolder,cdmDatabaseName), recursive = TRUE)
ParallelLogger::addDefaultFileLogger(file.path(outputFolder,cdmDatabaseName, "log.txt"))
## add existing model protocol code?
if (createCohorts) {
ParallelLogger::logInfo("Creating cohorts")
createCohorts(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
outputFolder = file.path(outputFolder, cdmDatabaseName))
}
if(runAnalyses){
# add standardCovariates if included
analysisSettings <- getAnalyses(setting, outputFolder,cdmDatabaseName)
for(i in 1:nrow(analysisSettings)){
ParallelLogger::logInfo(paste0('Running ',analysisSettings$analysisId[i]))
if(!overwrite){
plpRsultFolderExists <- dir.exists(file.path(outputFolder,cdmDatabaseName,analysisSettings$analysisId[i], 'plpResult','model'))
if(plpRsultFolderExists){ParallelLogger::logInfo(paste0('Result exists for ',analysisSettings$analysisId[i], ' not overwritting'))}
} else{
plpRsultFolderExists <- F
}
if(!plpRsultFolderExists){
pathToStandard <- system.file("settings", gsub('_model.csv','_standard_features.csv',analysisSettings$model[i]), package = "PCE")
if(file.exists(pathToStandard)){
standTemp <- read.csv(pathToStandard)$x
standSet <- list()
length(standSet) <- length(standTemp)
names(standSet) <- standTemp
for(j in 1:length(standSet)){
standSet[[j]] <- T
}
pathToInclude <- system.file("settings", gsub('_model.csv','_standard_features_include.csv',analysisSettings$model[i]), package = "PCE")
incS <- read.csv(pathToInclude)$x
standSet$includedCovariateIds <- incS
standardCovariates <- do.call(FeatureExtraction::createCovariateSettings,standSet)
} else{
standardCovariates <- NULL
}
#getData
ParallelLogger::logInfo("Extracting data")
plpData <- tryCatch({getData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cdmDatabaseName = cdmDatabaseName,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = analysisSettings$targetId[i],
outcomeId = analysisSettings$outcomeId[i],
oracleTempSchema = oracleTempSchema,
model = analysisSettings$model[i],
standardCovariates = standardCovariates,
firstExposureOnly = firstExposureOnly,
sampleSize = sampleSize,
cdmVersion = cdmVersion)},
error = function(e){ParallelLogger::logError(e); return(NULL)})
if(!is.null(plpData)){
# get table 1
table1 <- tryCatch({getTable1(plpData)}, error = function(e){ParallelLogger::logError(e); return(NULL)})
#create pop
ParallelLogger::logInfo("Creating population")
population <- tryCatch({PatientLevelPrediction::createStudyPopulation(plpData = plpData,
outcomeId = analysisSettings$outcomeId[i],
riskWindowStart = riskWindowStart,
startAnchor = startAnchor,
riskWindowEnd = riskWindowEnd,
endAnchor = endAnchor,
firstExposureOnly = firstExposureOnly,
removeSubjectsWithPriorOutcome = removeSubjectsWithPriorOutcome,
priorOutcomeLookback = priorOutcomeLookback,
requireTimeAtRisk = requireTimeAtRisk,
minTimeAtRisk = minTimeAtRisk,
includeAllOutcomes = includeAllOutcomes)},
error = function(e){ParallelLogger::logError(e); return(NULL)})
# if less than 10 outcomes dont run
if(sum(population$outcomeCount >0)<10){
ParallelLogger::logInfo('Less that 10 outcomes so not running...')
}
if(sum(population$outcomeCount >0)>=10){
if(!is.null(population)){
# apply the model:
plpModel <- list(model = getModel(analysisSettings$model[i]),
analysisId = analysisSettings$analysisId[i],
hyperParamSearch = NULL,
index = NULL,
trainCVAuc = NULL,
modelSettings = list(model = analysisSettings$model[i],
modelParameters = NULL),
metaData = NULL,
populationSettings = attr(population, "metaData"),
trainingTime = NULL,
varImp = data.frame(covariateId = getModel(analysisSettings$model[i])$covariateId,
covariateValue = getModel(analysisSettings$model[i])$points),
dense = T,
cohortId = analysisSettings$cohortId[i],
outcomeId = analysisSettings$outcomeId[i],
covariateMap = NULL,
predict = predictExisting(model = analysisSettings$model[i])
)
attr(plpModel, "type") <- 'existing'
class(plpModel) <- 'plpModel'
ParallelLogger::logInfo("Applying and evaluating model")
result <- tryCatch({PatientLevelPrediction::applyModel(population = population,
plpData = plpData,
plpModel = plpModel)},
error = function(e){ParallelLogger::logError(e); return(NULL)})
if(!is.null(result)){
result$inputSetting$database <- cdmDatabaseName
result$inputSetting$modelSettings <- list(model = 'existing model', name = analysisSettings$model[i], param = getModel(analysisSettings$model[i]))
result$inputSetting$dataExtrractionSettings$covariateSettings <- plpData$metaData$call$covariateSettings
result$inputSetting$populationSettings <- attr(population, "metaData")
result$executionSummary <- list()
result$model <- plpModel
result$analysisRef <- list()
result$covariateSummary <- tryCatch({PatientLevelPrediction:::covariateSummary(plpData = plpData, population = population, model = plpModel)},
error = function(e){ParallelLogger::logError(e); return(NULL)})
if(recalibrate){
# add code here
# recalibrate each time 2/3/5/10 years and add to prediction plus save values
predictionWeak <- result$prediction
### Extract data
# this has to be modified per model...
#1- 0.9533^exp(x-86.61) = p
#log(log(1-p)/log(0.9533))+86.61 = x
if(analysisSettings$model[i] == "pooled_female_non_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9665))- 29.18
}else if(analysisSettings$model[i] == "pooled_male_non_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9144)) + 61.18
}else if(analysisSettings$model[i] == "pooled_female_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9533))+86.61
} else{
lp <- log(log(1-predictionWeak$value)/log(0.8954)) + 19.54
}
#t <- predictionWeak$survivalTime # observed follow up time
t <- apply(cbind(predictionWeak$daysToCohortEnd, predictionWeak$survivalTime), 1, min)
y <- ifelse(predictionWeak$outcomeCount>0,1,0) # observed outcome
extras <- c()
for(yrs in c(2,3,5,10)){
t_temp <- t
y_temp <- y
y_temp[t_temp>365*yrs] <- 0
t_temp[t_temp>365*yrs] <- 365*yrs
S<- survival::Surv(t_temp, y_temp)
#### Intercept + Slope recalibration
f.slope <- survival::coxph(S~lp)
h.slope <- max(survival::basehaz(f.slope)$hazard) # maximum OK because of prediction_horizon
lp.slope <- stats::predict(f.slope)
p.slope.recal <- 1-exp(-h.slope*exp(lp.slope))
predictionWeak$value <- p.slope.recal
predictionWeak$new <- p.slope.recal
colnames(predictionWeak)[ncol(predictionWeak)] <- paste0('value',yrs,'year')
# TODO save the recalibration stuff somewhere?
extras <- rbind(extras,
c(analysisSettings$analysisId[i],"validation",paste0("h.slope_",yrs),h.slope),
c(analysisSettings$analysisId[i],"validation",paste0("f.slope_",yrs),f.slope$coefficients['lp']))
}
result$prediction <- predictionWeak # use 10 year prediction value
performance <- PatientLevelPrediction::evaluatePlp(result$prediction, plpData)
# reformatting the performance
performance <- reformatePerformance(performance,analysisSettings$analysisId[i])
result$performanceEvaluation <- performance
result$performanceEvaluation$evaluationStatistics <- rbind(result$performanceEvaluation$evaluationStatistics,extras)
}
if(recalibrateInterceptOnly & !recalibrate){
# recalibrate each time 2/3/5/10 years and add to prediction plus save values
predictionWeak <- result$prediction
### Extract data
# this has to be modified per model...
#1- 0.9533^exp(x-86.61) = p
#log(log(1-p)/log(0.9533))+86.61 = x
if(analysisSettings$model[i] == "pooled_female_non_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9665))- 29.18
}else if(analysisSettings$model[i] == "pooled_male_non_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9144)) + 61.18
}else if(analysisSettings$model[i] == "pooled_female_black_model.csv"){
lp <- log(log(1-predictionWeak$value)/log(0.9533))+86.61
} else{
lp <- log(log(1-predictionWeak$value)/log(0.8954)) + 19.54
}
#t <- predictionWeak$survivalTime # observed follow up time
t <- apply(cbind(predictionWeak$daysToCohortEnd, predictionWeak$survivalTime), 1, min)
y <- ifelse(predictionWeak$outcomeCount>0,1,0) # observed outcome
extras <- c()
for(yrs in c(2,3,5,10)){
t_temp <- t
y_temp <- y
y_temp[t>365*yrs] <- 0
t_temp[t>365*yrs] <- 365*yrs
S<- survival::Surv(t_temp, y_temp)
f.intercept <- survival::coxph(S~offset(lp))
h.intercept <- max(survival::basehaz(f.intercept)$hazard) # maximum OK because of prediction_horizon
p.intercept.recal <- 1-exp(-h.intercept*exp(lp-mean(lp)))
predictionWeak$value <- p.intercept.recal
predictionWeak$new <- p.intercept.recal
colnames(predictionWeak)[ncol(predictionWeak)] <- paste0('value',yrs,'year')
# TODO save the recalibration stuff somewhere?
extras <- rbind(extras,
c(analysisSettings$analysisId[i],"validation",paste0("h.intercept_",yrs),h.intercept))
}
result$prediction <- predictionWeak # use 10 year prediction value
performance <- PatientLevelPrediction::evaluatePlp(result$prediction, plpData)
# reformatting the performance
performance <- reformatePerformance(performance,analysisSettings$analysisId[i])
result$performanceEvaluation <- performance
result$performanceEvaluation$evaluationStatistics <- rbind(result$performanceEvaluation$evaluationStatistics,extras)
}
# CUSTOM CODE FOR SURVIVAL METRICS
#=======================================
# here we add the 2/3/5 year surivival metrics to prediction
result <- tryCatch({getSurvivialMetrics(plpResult = result,
recalibrate = recalibrate | recalibrateInterceptOnly,
analysisId = analysisSettings$analysisId[i],
model = analysisSettings$model[i])},
error = function(e){ParallelLogger::logError(e); return(result)})
#=======================================
if(!dir.exists(file.path(outputFolder,cdmDatabaseName))){
dir.create(file.path(outputFolder,cdmDatabaseName))
}
ParallelLogger::logInfo("Saving results")
PatientLevelPrediction::savePlpResult(result, file.path(outputFolder,cdmDatabaseName,analysisSettings$analysisId[i], 'plpResult'))
saveRDS(table1, file.path(outputFolder,cdmDatabaseName,analysisSettings$analysisId[i], 'plpResult','table1.rds'))
ParallelLogger::logInfo(paste0("Results saved to:",file.path(outputFolder,cdmDatabaseName,analysisSettings$analysisId[i])))
} # result not null
} # population not null
} # count >= 10
}# plpData not null
}# overwrite or non exists
}
}
if(aggregateCohorts == T){
agg <- tryCatch({getAggregatePerm(outputFolder,cdmDatabaseName)},
error = function(e){ParallelLogger::logError(e);
ParallelLogger::logInfo("Aggregate cohorts failed...")})
}
if (packageResults) {
ParallelLogger::logInfo("Packaging results")
packageResults(outputFolder = file.path(outputFolder,cdmDatabaseName),
minCellCount = minCellCount)
}
# [TODO] add create shiny app
viewer <- TRUE
if(viewShiny) {
viewer <- tryCatch({
PatientLevelPrediction::viewMultiplePlp(file.path(outputFolder,cdmDatabaseName))},
error = function(e){ParallelLogger::logError(e);
ParallelLogger::logInfo("No results to view...")})
}
return(viewer)
}
|
7b22d8462b0d2cb5b1ba58c679fe4d2e6ba89e55
|
dfe66a5577ee81999033ab48168354fb87396fb1
|
/Project1/Zapytanie5.R
|
fb340ff02205a2d8db3b7db7e9f3e69fa4e06ac2
|
[] |
no_license
|
Dragemil/learning-r-projects
|
9f0edadca472ade0f1dfc72b2046dcfc36da3e2d
|
75aefe534d0de639a55f3846e0a851c9f57e5ab3
|
refs/heads/master
| 2023-02-01T21:07:21.848719
| 2020-12-17T12:49:08
| 2020-12-17T12:49:08
| 322,305,182
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,041
|
r
|
Zapytanie5.R
|
library(sqldf)
library(dplyr)
library(data.table)
#5
source("WczytanieDanych.R")
##5.1
Query5_1 <- function() {
sqldf(
"SELECT
Posts.Title,
CmtTotScr.CommentsTotalScore
FROM (
SELECT
PostID,
UserID,
SUM(Score) AS CommentsTotalScore
FROM Comments
GROUP BY PostID, UserID
) AS CmtTotScr
JOIN Posts ON Posts.ID=CmtTotScr.PostID AND Posts.OwnerUserId=CmtTotScr.UserID
WHERE Posts.PostTypeId=1
ORDER BY CmtTotScr.CommentsTotalScore DESC
LIMIT 10"
)
}
##5.2
Query5_2 <- function() {
CmtTotScr <- aggregate(Comments$Score, by = Comments[, c("UserId", "PostId")], sum)
CmtTotScr["CommentsTotalScore"] <- CmtTotScr["x"]
CmtTotScr <- CmtTotScr[,-3]
Query5_2 <- merge(Posts[Posts$PostTypeId == 1,], CmtTotScr, by.x = c("Id", "OwnerUserId"), by.y = c("PostId", "UserId"))
Query5_2 <- Query5_2[order(-Query5_2$CommentsTotalScore),] %>% head(10)
Query5_2 <- Query5_2[,c("Title", "CommentsTotalScore")]
}
print(all_equal(Query5_1, Query5_2, ignore_row_order = FALSE))
##5.3
Query5_3 <- function() {
CmtTotScr <- Comments %>%
group_by(PostId, UserId) %>%
summarise(CommentsTotalScore = sum(Score))
Query5_3 <- Posts %>%
filter(PostTypeId == 1) %>%
inner_join(CmtTotScr, by = c("Id" = "PostId", "OwnerUserId" = "UserId")) %>%
arrange(desc(CommentsTotalScore)) %>%
head(10) %>%
select(Title, CommentsTotalScore)
}
print(all_equal(Query5_1, Query5_3, ignore_row_order = FALSE))
##5.4
Query5_4 <- function() {
CmtTotScr <- as.data.table(Comments)
CmtTotScr <- CmtTotScr[, .(CommentsTotalScore = sum(Score)), keyby = .(UserId, PostId)]
Query5_4 <- merge(as.data.table(Posts)[PostTypeId == 1], CmtTotScr, by.x = c("Id", "OwnerUserId"), by.y = c("PostId", "UserId"))
Query5_4 <- setorder(Query5_4, -CommentsTotalScore)
Query5_4 <- Query5_4[1:10, .(Title, CommentsTotalScore)]
}
print(all_equal(Query5_1, Query5_4, ignore_row_order = FALSE))
|
a45202261fb3d46d6197fca4506506559ca61bb5
|
29086ee3be50fa04c8ab55d5d9f55bf9abd213e8
|
/R Code Examples/IntroductionToR/6.DataFrames/ExploreDataFrame.R
|
ccef1f649b2a5fbc0c1ee7c7f8d240c79fba100d
|
[] |
no_license
|
analystfreakabhi/DataScience_guide
|
dc8bc967ffc892e307240633f5128b67e3d92142
|
61960c52e5079feb89505c8c8cd5030b0364a865
|
refs/heads/master
| 2021-01-20T00:21:45.316416
| 2017-06-05T02:58:43
| 2017-06-05T02:58:43
| 89,119,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,165
|
r
|
ExploreDataFrame.R
|
# Explore the Data Frame
# Dataset
# observations
# variables
# Data Frame
# specificallt for datasets
# row > observations
# columns = variables (age, name, etc)
# contain elements of different types
# elements in same column: same type
# Create Data Frame
# Import from data source
# CSV file
# relational database (e.g. SQL)
# software packages (Excel, SPSS...)
name <- c("Anne","Pete","Frank","Julia","Cath")
age <- c(28,30,21,39,35)
child <- c(FALSE, TRUE, TRUE, FALSE, TRUE)
df <- data.frame(name,age,child)
df
# Name Data Frame
names(df) <- c("Name","Age","Child")
df
# or
df <- data.frame(Name = name, Age = age, Child = child)
df
# Data Frame Structure
str(df)
df <- data.frame(name, age, child, stringsAsFactors = FALSE)
str(df)
# DataCamp
# Have a look at your data set
# Print the first observations of mtcars
head(mtcars)
# Print the last observations of mtcars
tail(mtcars)
# Print the dimensions of mtcars
dim(mtcars)
# Have a look at the structure
# Investigate the structure of the mtcars data set
str(mtcars)
# Creating a data frame
# Definition of vectors
planets <- c("Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune")
type <- c("Terrestrial planet", "Terrestrial planet", "Terrestrial planet",
"Terrestrial planet", "Gas giant", "Gas giant", "Gas giant", "Gas giant")
diameter <- c(0.382, 0.949, 1, 0.532, 11.209, 9.449, 4.007, 3.883)
rotation <- c(58.64, -243.02, 1, 1.03, 0.41, 0.43, -0.72, 0.67)
rings <- c(FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE)
# Create a data frame: planets_df
planets_df <- data.frame(planets, type, diameter, rotation, rings)
# Display the structure of planets_df
str(planets_df)
# Creating a data frame (2)
# Encode type as a factor: type_factor
type_factor <- factor(type)
# Construct planets_df: strings are not converted to factors!
planets_df <- data.frame(planets, type_factor, diameter, rotation, rings, stringsAsFactors = FALSE)
# Display the structure of planets_df
str(planets_df)
# Rename the data frame columns
# Improve the names of planets_df
names(planets_df) <- c("name","type","diameter","rotation","has_rings")
planets_df
|
f441d5aeaca9f1e5c2d1f65a1572c01d03fdfb05
|
159116807c9df0042f0483cb4906e0a9e231146c
|
/data_cleaning_script.R
|
3c07083cc41cf538057d1c36c371ae275e4e1f1c
|
[] |
no_license
|
nceas-coding-club/sb-restaurants
|
407cab8d6fd1f07c222d49a3d07f94a91654b5ab
|
cc3a453a53c865297b7408ca05ea357fc8d7b8b5
|
refs/heads/master
| 2022-07-18T23:46:03.457944
| 2020-01-23T17:10:53
| 2020-01-23T17:10:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
data_cleaning_script.R
|
## Data Cleaning Script ##
# load libraries
library(tidyverse)
library(ggmap)
# load in and prep data
opened <- read_csv("data/opened_restaurants.csv") %>%
select(-X5, -X6) %>%
unite("key", c("Name", "Address"), remove = FALSE)
closed <- read_csv("data/closed_restaurants.csv") %>%
unite("key", c("Name", "Address"), remove = FALSE)
# join the open and closed data by the new key column
master_data <- opened %>%
full_join(closed)
# check for repeates in the data
mismatches <- master_data %>%
group_by(Name) %>%
add_tally()
# note budda bowls opened twice at same place so did old town coffee overall pretty clean data though
## geocode section ##
# set api
#myAPI <- read_file("api.txt") # this lives on my computer sorry
register_google(key = myAPI)
geo_code_ref <- master_data %>%
distinct(Address) %>%
mutate(state = "California") %>%
unite("address_state", c("Address", "state"), sep = ", ", remove = FALSE) %>%
mutate_geocode(address_state)
# idk why these didnt all work the first time I think it was some account info updating
check_missings <- geo_code_ref %>%
filter(is.na(lat)) %>%
select(-lat, -lon) %>%
mutate_geocode(address_state)
# google maps didn't like the "#" for whatever reason
check_missings2 <- check_missings %>%
filter(is.na(lat)) %>%
select(-lat, -lon) %>%
mutate(address_state = str_remove(address_state, "#")) %>%
mutate_geocode(address_state)
# attaching the different segments to a master sheet
geocode_ref <- geo_code_ref %>%
filter(!is.na(lat)) %>%
bind_rows(check_missings) %>%
filter(!is.na(lat)) %>%
bind_rows(check_missings2) %>%
select(-address_state, -state)
write_csv(master_data, "data/master_data.csv")
write_csv(geocode_ref, "data/geocode_ref.csv")
|
4c4eec247dd05e478a614eb1fc721748aea51a28
|
82eb805cc710a936340b96aab8ecb87b07b810d3
|
/datautils/io.R
|
6088a513946472b71ca127d729475b8044e63b02
|
[
"BSD-3-Clause"
] |
permissive
|
sebwink/datautils
|
cee8eddaf8e113205281bdbd707a9a1d8bdce4c7
|
ad97145a161348917831e883765ef3803c04e0c8
|
refs/heads/master
| 2020-03-24T20:35:37.049978
| 2018-07-31T08:39:18
| 2018-07-31T08:39:18
| 142,986,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
r
|
io.R
|
load_count_matrix <- function(path2countMatrix) {
# ---
count_matrix <- read.csv(gzfile(path2countMatrix), header = TRUE, check.names = FALSE)
rownames(count_matrix) <- count_matrix[,"id"]
count_matrix <- subset(count_matrix, select=-c(id))
return(count_matrix)
}
|
dc0fad535c208ad870eef00af7d5b824be68ce3b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/languageR/examples/pvals.fnc.Rd.R
|
cf31349b40333b6104b736972416f622e64e56cb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 884
|
r
|
pvals.fnc.Rd.R
|
library(languageR)
### Name: pvals.fnc
### Title: Compute p-values and MCMC confidence intervals for mixed models
### Aliases: pvals.fnc
### Keywords: regression
### ** Examples
## Not run:
##D data(primingHeid)
##D library(lme4)
##D
##D # remove extreme outliers
##D primingHeid = primingHeid[primingHeid$RT < 7.1,]
##D
##D # fit mixed-effects model
##D
##D # we will stay as close to the older optimizer of lme4 as possible -
##D # this requires the optimx package and using the control option of lmer()
##D
##D require(optimx)
##D require(lmerTest)
##D
##D primingHeid.lmer = lmer(RT ~ RTtoPrime * ResponseToPrime +
##D Condition + (1|Subject) + (1|Word), data = primingHeid,
##D control=lmerControl(optimizer="optimx",optCtrl=list(method="nlminb")))
##D summary(primingHeid.lmer)
##D anova(primingHeid.lmer)
##D
## End(Not run)
|
537bec6283e974041a1304586e493c1e0a0f2aff
|
67a7683b1901db9941b6ed61bcf3f7129f657d11
|
/inst/shinyApps/MethodEvalViewer/server.R
|
3e0d753ecf6c573a93f0db9352e8bfe57af8c9e6
|
[
"Apache-2.0"
] |
permissive
|
odysseusinc/MethodEvaluation
|
5a51796c66622ff7448c2f2fd2cff09052e87dc6
|
2d1d0775744486e10f3f850ed753e155365a94d8
|
refs/heads/master
| 2023-01-24T06:56:42.437017
| 2020-12-03T12:44:37
| 2020-12-03T12:44:37
| 313,928,398
| 0
| 0
| null | 2020-11-18T12:55:18
| 2020-11-18T12:31:17
| null |
UTF-8
|
R
| false
| false
| 11,853
|
r
|
server.R
|
library(shiny)
library(DT)
source("plots.R")
shinyServer(function(input, output, session) {
observe({
if (input$evalType == "Comparative effect estimation") {
choices <- methods$method[methods$comparative == TRUE]
} else {
choices <- methods$method
}
updateCheckboxGroupInput(session, "method", choices = choices, selected = choices)
})
filterEstimates <- reactive({
subset <- estimates[estimates$database == input$database, ]
if (input$mdrr != "All") {
subset <- subset[!is.na(subset$mdrrTarget) & subset$mdrrTarget < as.numeric(input$mdrr), ]
if (input$evalType == "Comparative effect estimation") {
subset <- subset[!is.na(subset$mdrrComparator) & subset$mdrrComparator < as.numeric(input$mdrr), ]
}
}
subset <- subset[subset$method %in% input$method, ]
if (input$stratum != "All") {
subset <- subset[subset$stratum == input$stratum, ]
}
if (input$calibrated == "Calibrated") {
subset$logRr <- subset$calLogRr
subset$seLogRr <- subset$calSeLogRr
subset$ci95Lb <- subset$calCi95Lb
subset$ci95Ub <- subset$calCi95Ub
subset$p <- subset$calP
}
return(subset)
})
selectedEstimates <- reactive({
if (is.null(input$performanceMetrics_rows_selected)) {
return(NULL)
}
subset <- filterEstimates()
if (nrow(subset) == 0) {
return(NULL)
}
subset <- subset[subset$method == performanceMetrics()$Method[input$performanceMetrics_rows_selected] &
subset$analysisId == performanceMetrics()$"<span title=\"Analysis variant ID\">ID</span>"[input$performanceMetrics_rows_selected], ]
if (nrow(subset) == 0) {
return(NULL)
}
return(subset)
})
output$tableCaption <- renderUI({
subset <- filterEstimates()
subset <- unique(subset[, c("targetId", "comparatorId", "oldOutcomeId", "targetEffectSize")])
ncCount <- sum(subset$targetEffectSize == 1)
pcCount <- sum(subset$targetEffectSize != 1)
return(HTML(paste0("<strong>Table S.1</strong> Metrics based on ",
ncCount,
" negative and ",
pcCount,
" positive controls")))
})
performanceMetrics <- reactive({
subset <- filterEstimates()
if (nrow(subset) == 0) {
return(data.frame())
}
combis <- unique(subset[, c("method", "analysisId")])
if (input$trueRr == "Overall") {
computeMetrics <- function(i) {
forEval <- subset[subset$method == combis$method[i] & subset$analysisId == combis$analysisId[i], ]
nonEstimable <- round(mean(forEval$seLogRr >= 99), 2)
# forEval <- forEval[forEval$seLogRr < 99, ]
roc <- pROC::roc(forEval$targetEffectSize > 1, forEval$logRr, algorithm = 3)
auc <- round(pROC::auc(roc), 2)
mse <- round(mean((forEval$logRr - log(forEval$trueEffectSize))^2), 2)
coverage <- round(mean(forEval$ci95Lb < forEval$trueEffectSize & forEval$ci95Ub > forEval$trueEffectSize),
2)
meanP <- round(-1 + exp(mean(log(1 + (1/(forEval$seLogRr^2))))), 2)
type1 <- round(mean(forEval$p[forEval$targetEffectSize == 1] < 0.05), 2)
type2 <- round(mean(forEval$p[forEval$targetEffectSize > 1] >= 0.05), 2)
return(c(auc = auc,
coverage = coverage,
meanP = meanP,
mse = mse,
type1 = type1,
type2 = type2,
nonEstimable = nonEstimable))
}
combis <- cbind(combis, as.data.frame(t(sapply(1:nrow(combis), computeMetrics))))
} else {
# trueRr <- input$trueRr
computeMetrics <- function(i) {
forEval <- subset[subset$method == combis$method[i] & subset$analysisId == combis$analysisId[i] &
subset$targetEffectSize == input$trueRr, ]
nonEstimable <- round(mean(forEval$seLogRr >= 99), 2)
# forEval <- forEval[forEval$seLogRr < 99, ]
mse <- round(mean((forEval$logRr - log(forEval$trueEffectSize))^2), 2)
coverage <- round(mean(forEval$ci95Lb < forEval$trueEffectSize & forEval$ci95Ub > forEval$trueEffectSize),
2)
meanP <- round(-1 + exp(mean(log(1 + (1/(forEval$seLogRr^2))))), 2)
if (input$trueRr == "1") {
auc <- NA
type1 <- round(mean(forEval$p < 0.05), 2)
type2 <- NA
} else {
negAndPos <- subset[subset$method == combis$method[i] & subset$analysisId == combis$analysisId[i] &
(subset$targetEffectSize == input$trueRr | subset$targetEffectSize == 1), ]
roc <- pROC::roc(negAndPos$targetEffectSize > 1, negAndPos$logRr, algorithm = 3)
auc <- round(pROC::auc(roc), 2)
type1 <- NA
type2 <- round(mean(forEval$p[forEval$targetEffectSize > 1] >= 0.05), 2)
}
return(c(auc = auc,
coverage = coverage,
meanP = meanP,
mse = mse,
type1 = type1,
type2 = type2,
nonEstimable = nonEstimable))
}
combis <- cbind(combis, as.data.frame(t(sapply(1:nrow(combis), computeMetrics))))
}
colnames(combis) <- c("Method",
"<span title=\"Analysis variant ID\">ID</span>",
"<span title=\"Area under the receiver operator curve\">AUC</span>",
"<span title=\"Coverage of the 95% confidence interval\">Coverage</span>",
"<span title=\"Geometric mean precision (1/SE^2)\">Mean Precision</span>",
"<span title=\"Mean Squared Error\">MSE</span>",
"<span title=\"Type I Error\">Type I error</span>",
"<span title=\"Type II Error\">Type II error</span>",
"<span title=\"Fraction where estimate could not be computed\">Non-estimable</span>")
return(combis)
})
output$performanceMetrics <- renderDataTable({
selection <- list(mode = "single", target = "row")
options <- list(pageLength = 10, searching = FALSE, lengthChange = TRUE)
isolate(if (!is.null(input$performanceMetrics_rows_selected)) {
selection$selected <- input$performanceMetrics_rows_selected
options$displayStart <- floor(input$performanceMetrics_rows_selected[1]/10) * 10
})
data <- performanceMetrics()
if (nrow(data) == 0) {
return(data)
}
table <- DT::datatable(data,
selection = selection,
options = options,
rownames = FALSE,
escape = FALSE)
colors <- c("lightblue", "lightblue", "lightblue", "pink", "pink", "pink", "pink")
mins <- c(0, 0, 0, 0, 0, 0, 0)
maxs <- c(1, 1, max(data[, 5]), max(data[, 6]), 1, 1, 1)
for (i in 1:length(colors)) {
table <- DT::formatStyle(table = table,
columns = i + 2,
background = styleColorBar(c(mins[i], maxs[i]), colors[i]),
backgroundSize = "98% 88%",
backgroundRepeat = "no-repeat",
backgroundPosition = "center")
}
return(table)
})
output$estimates <- renderPlot({
subset <- selectedEstimates()
if (is.null(subset)) {
return(NULL)
} else {
subset$Group <- as.factor(paste("True hazard ratio =", subset$targetEffectSize))
return(plotScatter(subset))
}
})
output$details <- renderText({
subset <- selectedEstimates()
if (is.null(subset)) {
return(NULL)
} else {
method <- as.character(subset$method[1])
analysisId <- subset$analysisId[1]
description <- analysisRef$description[analysisRef$method == method & analysisRef$analysisId ==
analysisId]
return(paste0(method, " analysis ", analysisId, ": ", description))
}
})
outputOptions(output, "details", suspendWhenHidden = FALSE)
observeEvent(input$showSettings, {
subset <- selectedEstimates()
method <- as.character(subset$method[1])
analysisId <- subset$analysisId[1]
description <- analysisRef$description[analysisRef$method == method & analysisRef$analysisId ==
analysisId]
details <- analysisRef$details[analysisRef$method == method & analysisRef$analysisId == analysisId]
showModal(modalDialog(title = paste0(method, " analysis. ", analysisId, ": ", description),
pre(details),
easyClose = TRUE,
footer = NULL,
size = "l"))
})
output$rocCurves <- renderPlot({
subset <- selectedEstimates()
if (is.null(subset)) {
return(NULL)
} else {
subset$trueLogRr <- log(subset$targetEffectSize)
return(plotRocsInjectedSignals(logRr = subset$logRr,
trueLogRr = subset$trueLogRr,
showAucs = TRUE))
}
})
output$hoverInfoEstimates <- renderUI({
# Hover-over adapted from https://gitlab.com/snippets/16220
subset <- selectedEstimates()
if (is.null(subset)) {
return(NULL)
}
subset$Group <- as.factor(paste("True hazard ratio =", subset$targetEffectSize))
hover <- input$plotHoverInfoEstimates
point <- nearPoints(subset, hover, threshold = 50, maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0)
return(NULL)
# calculate point position INSIDE the image as percent of total dimensions from left (horizontal) and
# from top (vertical)
left_pct <- (hover$x - hover$domain$left)/(hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - hover$y)/(hover$domain$top - hover$domain$bottom)
# calculate distance from left and bottom side of the picture in pixels
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
# create style property fot tooltip background color is set so tooltip is a bit transparent z-index
# is set so we are sure are tooltip will be on top
style <- paste0("position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:",
left_px - 125,
"px; top:",
top_px - 150,
"px; width:250px;")
# actual tooltip created as wellPanel
estimate <- paste0(formatC(exp(point$logRr), digits = 2, format = "f"),
" (",
formatC(point$ci95Lb, digits = 2, format = "f"),
"-",
formatC(point$ci95Ub, digits = 2, format = "f"),
")")
if (point$comparative) {
text <- paste0("<b> target: </b>",
point$targetName,
"<br/>",
"<b> comparator: </b>",
point$comparatorName,
"<br/>")
} else {
text <- paste0("<b> exposure: </b>", point$targetName, "<br/>")
}
if (point$nesting) {
text <- paste0(text, "<b> nesting: </b>", point$nestingName, "<br/>")
}
text <- paste0(text,
"<b> outcome: </b>",
point$outcomeName,
"<br/>",
"<b> estimate: </b>",
estimate,
"<br/>")
div(style = "position: relative; width: 0; height: 0", wellPanel(style = style, p(HTML(text))))
})
})
|
0a481a07801e61a66d280d97500829599a0e0058
|
19f7fc6594fcbce0c4e1c76ae455887976eb6859
|
/man/FLSqueezeSpace.Rd
|
d6e6ee59f6966666221321a2b64fbf4389de363d
|
[] |
no_license
|
AnanyaT/AdapteR
|
696d16ebed0df716614c1d90c7f6b1092953cb69
|
3842d3e662f8e4fdf5709df41deada875aff9a2e
|
refs/heads/master
| 2021-01-18T17:10:47.836244
| 2016-06-03T09:57:24
| 2016-06-03T09:57:24
| 59,019,034
| 0
| 0
| null | 2016-05-17T12:08:53
| 2016-05-17T12:08:52
| null |
UTF-8
|
R
| false
| true
| 573
|
rd
|
FLSqueezeSpace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FLStringFunctions.R
\name{FLSqueezeSpace}
\alias{FLSqueezeSpace}
\title{Remove extra spaces in strings}
\usage{
FLSqueezeSpace(object)
}
\arguments{
\item{object}{FLVector of characters}
}
\value{
FLVector with extra spaces removed
}
\description{
Removes extra spaces from elements
}
\section{Constraints}{
row vectors are not supported currently.
}
\examples{
widetable <- FLTable("FL_DEMO", "tblstringID", "stringID")
flv <- widetable[1:6,"string"]
resultflvector <- FLSqueezeSpace(flv)
}
|
dce4763568b860e697cef20f6c35d00e117a7678
|
dc35bc08b2d9eebf783d0a1445d3120b8d83494d
|
/R/report.r
|
82f01b1202d81ca8e83f0e574bb3cf81f3068728
|
[
"MIT"
] |
permissive
|
hbc/CHBUtils
|
c83272fd84922fc6fa7e5d34e42b2199552afd5c
|
a226ceeced2353ee2c7ba2eb22f52bf90c023898
|
refs/heads/master
| 2021-01-15T15:27:14.308614
| 2017-10-26T18:01:19
| 2017-10-26T18:01:19
| 14,430,820
| 2
| 5
| null | 2017-10-26T18:01:20
| 2013-11-15T17:35:32
|
R
|
UTF-8
|
R
| false
| false
| 9,683
|
r
|
report.r
|
i_create_fastqc_figure<-function(path,samples,out){
require(ggplot2)
require(Nozzle.R1)
############QUALITY PER NT###############
tfqc<-data.frame()
for (f in samples){
src<-paste(path,f,"/qc/fastqc/fastqc_data.txt",sep="")
endnt<-system(paste("grep -n END ",src," | head -2 | tail -1| sed 's/:/\t/' | cut -f 1"),
intern = TRUE)
endnt<-as.integer(endnt)
fqc<-suppressMessages(read.table(src,nrows=endnt-14,skip=12,sep="\t"))
names(fqc)<-c("nt","mean","median","lowQ","upQ","10Q","90Q")
fqc$sample<-substr(f,1,21)
tfqc<-rbind(tfqc,fqc)
}
tfqcFile<-paste(out,"tfqc.txt",sep="")
write.table(tfqc,tfqcFile,row.names=F,quote=F,sep="\t")
p<-ggplot(tfqc,aes(factor(nt),mean)) +
#geom_boxplot() +
geom_jitter(position=position_jitter(width=0.3),aes(factor(nt), mean,colour=factor(sample)))+
geom_abline(intercept = 30,slope=0)+
ylim(25,42)+
theme_bw(base_size = 12) +
theme(axis.text.x = element_blank())+
labs(list(x="nucleotide",y="score",colour="Samples"))
ffqcFile="ffqc.jpg"
ffqcHFile="ffqcH.pdf"
jpeg(paste(out, ffqcFile, sep="" ) ,width=600,height=400,quality=100 );
print(p)
dev.off();
pdf(paste( out, ffqcHFile, sep="" ) );
print(p)
dev.off();
# create a figure and make it available for exporting
FRfqc <- newFigure( ffqcFile, fileHighRes=ffqcHFile, exportId="FIGURE_FASTQC",
"This figure shows the quality per nucleotide for each sample.
Any value above 30 is quite good quality." );
################################################
return(FRfqc)
}
i_create_rnaseqc_figure<-function(path,samples,out){
require(ggplot2)
require(Nozzle.R1)
############COVERAGE###############
trqccov<-data.frame()
for (f in samples){
print (f)
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_low.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"low"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_medium.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"medium"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_high.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"high"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
}
trqccovFile<-paste(out,"trqccov.txt",sep="")
write.table(trqccov,trqccovFile,row.names=F,quote=F,sep="\t")
trqccov$type<-factor(trqccov$type,levels=c("low","medium","high"))
p<-ggplot(trqccov,aes(pos,log2(V1+0.5),colour=sample)) +
geom_line()+
theme_bw(base_size = 12) +
theme(axis.text.x = element_blank())+
labs(list(x="gene",y="coverage",colour="Samples"))+
facet_wrap(~type,nrow=3)
stdFile="frqccov.jpg"
highFile= "frqccovH.jpg"
jpeg( paste( out,stdFile , sep="" ),width=600,height=400,quality=100 );
print(p)
dev.off(frqccovFile);
pdf( paste( out, highFile, sep="" ));
print(p)
dev.off();
# create a figure and make it available for exporting
FIG <- newFigure( stdFile, fileHighRes=highFile,exportId="FIGURE_COV",
"This shows the gene coverage by reads.
A good signal is an equal coverage along gene." );
################################################
return(FIG)
}
i_create_gene_coverage<-function(path,samples,out){
require(ggplot2)
require(Nozzle.R1)
############COVERAGE###############
trqccov<-data.frame()
for (f in samples){
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_low.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"low"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_medium.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"medium"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
src<-paste(path,f,"/qc/rnaseqc/meanCoverageNorm_high.txt",sep="")
temp<-read.table(src,skip=1)
temp$type<-"high"
temp$pos<-1:nrow(temp)
temp$sample<-substr(f,1,21)
trqccov<-rbind(trqccov,temp)
}
trqccovFile<-paste(out,"trqccov.txt",sep="")
write.table(trqccov,trqccovFile,row.names=F,quote=F,sep="\t")
trqccov$type<-factor(trqccov$type,levels=c("low","medium","high"))
p<-ggplot(trqccov,aes(pos,log2(V1+0.5),colour=sample)) +
geom_line()+
theme_bw(base_size = 12) +
theme(axis.text.x = element_blank())+
labs(list(x="gene",y="coverage",colour="Samples"))+
facet_wrap(~type,nrow=3)
stdFile="frqccov.jpg"
highFile="frqccovH.pdf"
jpeg( paste( out ,stdFile , sep="" ) ,width=600,height=400,quality=100 );
print(p)
dev.off();
pdf( paste( out , highFile, sep="" ) );
print(p)
dev.off();
# create a figure and make it available for exporting
FIG <- newFigure( stdFile, fileHighRes=highFile,exportId="FIGURE_COV",
"This shows the gene coverage by reads.
A good signal is an equal coverage along the gene." );
################################################
return(FIG)
}
i_create_count_top<-function(path,ssamples,out){
require(Nozzle.R1)
mainfolder<-dir(path,pattern="project")
############COUNTS###############
counts<-read.table(paste(sep="",path,mainfolder,"/annotated_combined.counts"),header=T,sep="\t")
names(counts)<-substr(names(counts),1,21)
counts$mean<-rowMeans(counts[,2:(ncol(counts)-1)])
counts<-counts[order(-counts$mean),]
counts<-counts[counts$mean>0,]
countsFile<-"counts.txt"
write.table(counts,paste(out,countsFile,sep=""),row.names=F,quote=F,sep="\t")
short<-counts[1:30,]
TAB <- newTable(short , file=countsFile, exportId="TABLE_COUNTS",
"Top genes" );
return(TAB)
}
i_create_distribution_counts<-function(path,samples,out,condition){
require(ggplot2)
require(Nozzle.R1)
require(reshape)
require(DESeq2)
mainfolder<-dir(path,pattern="project")
counts<-read.table(paste(sep="",path,mainfolder,"/combined.counts"),header=T,row.names=1,sep="\t")
names(counts)<-substr(names(counts),1,21)
keep<-rowSums(counts>0)>=1
counts<-counts[keep,]
cshape<-suppressMessages(melt(counts,by=1))
p<-ggplot(cshape,aes(factor(variable),log2(value+0.1))) +
geom_boxplot() +
theme_bw(base_size = 12) +
theme(axis.text.x = element_blank()) +
labs(list(x="",y="log2(raw counts)"))
stdFile= "cbox.jpg"
jpeg( paste( out,stdFile, sep="" ),width=600,height=400,quality=100 );
print(p)
dev.off();
# create a figure and make it available for exporting
FIG1 <- newFigure( stdFile,exportId="FIGURE_COUNTS",
"Distribution of raw counts" );
ma<-counts
design<-data.frame(row.names=names(ma),condition=condition)
dse<-DESeqDataSetFromMatrix(countData = ma,
design=~condition,
colData=design)
rld<-rlogTransformation(dse)
p<-ggplot(suppressMessages(melt(assay(rld))),aes(factor(X2),value)) +
geom_boxplot() +
theme_bw(base_size = 12) +
theme(axis.text.x = element_blank())+
labs(list(x="",y="log2(normalized counts)"))
stdFile= "cboxnorm.jpg"
jpeg( paste( out,stdFile, sep="" ),width=600,height=400,quality=100 );
print(p)
dev.off();
# create a figure and make it available for exporting
FIG2 <- newFigure( stdFile,exportId="FIGURE_COUNTSNORM",
"Distribution of normalized counts" );
cum<-assay(rld)
cum<-as.data.frame(apply(cum,2,sort))
cum$pos<-nrow(cum):1
cum<- suppressMessages(melt(cum,id.vars="pos"))
p<-ggplot(cum,aes(pos,value,colour=variable)) +
geom_point() +
theme_bw(base_size = 12) +
labs(list(x="sorted genes",y="log2(normalized counts)"))
sdtFile="fcum.jpg"
highFile="fcumH.pdf"
jpeg(paste( out,sdtFile , sep="" ) ,width=600,height=400,quality=100 );
print(p)
dev.off()
pdf( paste( out, highFile, sep="" ) )
print(p)
dev.off()
# create a figure and make it available for exporting
FIG3<- newFigure( sdtFile, fileHighRes=highFile,exportId="FIGURE_CUM",
"Cumulative expression by sample." )
p<-mds(assay(rld),condition)
p<-p + theme_bw(base_size = 12) +
geom_text(aes(one,two,label=label))+
scale_color_brewer(palette="Set1")
stdFile= "mds.jpg"
jpeg( paste( out,stdFile, sep="" ),width=600,height=400,quality=100 )
print(p)
dev.off()
# create a figure and make it available for exporting
FIG4 <- newFigure( stdFile,exportId="FIGURE_MDS",
"Multidimensional scaling plot" );
return(list(FIG1,FIG2,FIG3,FIG4))
}
i_create_mapping_stats<-function(path,samples,out){
require(Nozzle.R1)
############ALIGNED#################
trqc<-data.frame()
for (f in samples){
src<-paste(path,f,"/qc/rnaseqc/metrics.tsv",sep="")
rqc<-read.table(src,sep="\t",skip=1)
rqc$sample<-substr(f,1,21)
trqc<-rbind(trqc,rqc)
}
trqc<-trqc[,c(1,19,17,8,20,23,7,38)]
names(trqc)<-c("Sample","Total","Mapped","Mapping rate","rRNA","transcripts","Exonic rate","Bases MM rate")
tFile<-"trqc.txt"
write.table(trqc,paste(out,tFile,sep=""),row.names=F,quote=F,sep="\t")
# create a table and make it available for exporting
TAB <- newTable( trqc, file=tFile, exportId="TABLE_RNASEQC",
"Stats of mapping reads " );
################################################
return(TAB)
}
|
0aa9019455aa8cafd55cbfb6f876ac111cd21d24
|
007a16939d87119e0a9bb0412b79a9ff9d3d014b
|
/man/read_bat_6800.Rd
|
b4f047a96ebbcfff3758bb5e76b476fcf3664579
|
[
"MIT"
] |
permissive
|
zhujiedong/readphoto
|
186069c5073e2b5c67292ec4364a7ead31503093
|
e65aaca72e6aede8214878a7243f2be1b5a14bd9
|
refs/heads/master
| 2021-06-15T03:01:31.940049
| 2021-04-14T04:54:33
| 2021-04-14T04:54:33
| 159,325,617
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 534
|
rd
|
read_bat_6800.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_bat_6800.R
\name{read_bat_6800}
\alias{read_bat_6800}
\title{read all raw data in one file measured by LI-6800}
\usage{
read_bat_6800(file_dir, data_start = 56)
}
\arguments{
\item{file_dir}{is the file directory only contains all the measured
raw data.}
\item{data_start}{the start of your data(without headline)}
}
\description{
help to read all raw data files with a command.
}
\examples{
\dontrun{
library(readphoto)
read_bat_6800('./6800')
}
}
|
0a7f9b0977cc4555aee945b1ef2614f319b3a7f7
|
bb62e7f31de13f31325da05ad42dc50ca7afc65a
|
/Cubic Splines Interpolation/TrazadorCubicoSujetoVersionsimple.R
|
d5f4b8eac7ae1a2e389f929f58ac3d45a9fdd50b
|
[] |
no_license
|
BowixKamaze/MetodosNumericos
|
1c897e2ddf4716199c668c9e21d1197e46038155
|
a6fa7f4af5759480b9c57c9bc749e464caa0df3b
|
refs/heads/master
| 2023-01-22T03:20:14.841763
| 2020-12-09T19:23:31
| 2020-12-09T19:23:31
| 297,142,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
r
|
TrazadorCubicoSujetoVersionsimple.R
|
rm(list= ls())
graphics.off()
x<-c(0,1,2,3)
a<-c(1,exp(1),exp(2),exp(3))
FPO<- 1
FPN<- exp(3)
# -------------------------------------------------
TrazadorCubicoSujeto<-function(n,x,a,FPO,FPN){
h<-c()
alfa<-c()
b<-c()
c<-c()
d<-c()
l<-c()
z<-c()
u<-c()
#PASO 1
for(i in 1:n){
h[i]= x[i+1]-x[i]
}
#PASO 2
alfa[1] = (3*(a[2]-a[1])/h[1]) - 3*FPO
alfa[n+1]= (3*FPN) - 3 *(a[n+1]-a[n])/h[n]
#PASO 3
for(i in 2:n){
alfa[i]= (3/h[i])*(a[i+1]-a[i]) - (3/h[i-1])*(a[i]-a[i-1])
}
#PASO 4
l[1]=2*h[1]
u[1]=0.5
z[1]=alfa[1]/l[1]
#PASO 5
for(i in 2:n){
l[i] = 2*(x[i+1]-x[i-1]) - h[i-1]*u[i-1]
u[i] = h[i]/l[i]
z[i] = (alfa[i]-h[i-1]*z[i-1])/l[i]
}
#PASO 6
l[n+1] = h[n] * (2-u[n])
z[n+1] = (alfa[n+1] - h[n]*z[n])/l[n+1]
c[n+1]=z[n+1]
#PASO 7
for(j in n:1 ){
c[j] = z[j] - u[j]*c[j+1]
b[j] = ((a[j+1]-a[j])/ h[j]) - h[j]*(c[j+1] + 2*c[j])/3
d[j] = (c[j+1]-c[j])/(3*h[j])
}
#PASO 8
b[n+1] = 0
c[n+1] = 0
d[n+1] = 0
result=cbind(a,b,c,d)
return(result)
}
polinomios<-function(n,x,a){
ecuacion<-list()
matriz=TrazadorCubicoSujeto(n,x,a,FPO,FPN)
for(i in 1:n){
ecuacion[[i]] = paste(matriz[i,1],"+",matriz[i,2],"*(x-",x[i],")","+",matriz[i,3],"*(x-",x[i],")^2","+",matriz[i,4],"*(x-",x[i],")^3")
}
return(ecuacion)
}
resultado_trazador=TrazadorCubicoSujeto(3,x,a,FPO,FPN)
resultado_trazador
prueba=polinomios(3,x,a)
resultado_trazador
prueba
|
dedb70653b2dba3fce59df689f4924782eabc56e
|
197bf433e3b4dd450b017fea950d9f6002b505e8
|
/challenge.R
|
f7e2aab9a24e92c39612bbe3f04b9df022052d7b
|
[] |
no_license
|
nupippo/playlab_data_analysis_challenge
|
f0316ea3706b9a3a9f5c92b80bd6c90138f74e46
|
4397aabdcca3cb656e386a3c6ffbad49423722e8
|
refs/heads/master
| 2020-05-20T04:45:51.465510
| 2015-08-16T14:15:39
| 2015-08-16T14:15:39
| 40,818,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,631
|
r
|
challenge.R
|
####playlab
#### Import data to dataset
dataset <- read.csv("D:/playlab/cleaned_data.csv")
#### convert millisecond timestamp to datetime format
dataset$date <- as.POSIXlt(as.POSIXct(dataset$Timestamp/1000, origin="1970-01-01", tz = "GMT"))
summary(dataset$date)
#### We found min/max date is 2014-08-17 00:00:13 and 2014-08-18 01:26:09 (GMT. time)
library("ggplot2")
library("reshape2")
#### overall DAU (on 2014-08-17)
day17 <- dataset[(dataset$date$mday == "17") == TRUE,]
dau.day <- unique(day17$Session)
length(dau.day)
#### overall DAU per version
version.list <- sort(as.character(unique(day17$Version)))
dau.version = array(length(version.list))
#hist(dau.version)
# qplot(dau.version,xlab = "Version",binwidth = 15)
# ggplot(data = as.numeric(dau.version))
#### find won player score
player.won <- dataset[grep("Success", dataset$Result)+1,]
#### find won or fail player
player.success <- dataset[grep("Success", dataset$Result),]
player.failure <- dataset[grep("Failure", dataset$Result),]
level.list <- sort(unique(dataset$LevelNumber))
range = length(version.list) * length(level.list)
won.version.summary <- data.frame(version= character(range),level = numeric(range),max= numeric(range), mean= numeric(range), min = numeric(range),median = numeric(range),stringsAsFactors=FALSE)
online.time <- data.frame(max= numeric(length(version.list)),mean= numeric(length(version.list)),min= numeric(length(version.list)),sum = numeric(length(version.list)))
count <- length(version.list) * length(level.list)
win.rate <- data.frame(Version= character(count),Level= integer(count), Success= numeric(count),Failure = numeric(count))
k <- 1
for(i in 1:length(version.list)){
####dau
dau.version[i] <- length(unique(day17$Session[day17$Version==version.list[i]]))
remove(time.diff)
time.diff <- array()
####How long do players remain in the game per game version ?
session <- unique(dataset$Session[dataset$Version==version.list[i]])
for(x in 1:length(session)){
time.max <- max(dataset$Timestamp[dataset$Session==session[x]])
time.min <- min(dataset$Timestamp[dataset$Session==session[x]])
time.diff[x] <- time.max-time.min
}
time in game
online.time$max[i] <- max(time.diff)
online.time$mean[i] <- mean(time.diff)
online.time$min[i] <- min(time.diff)
online.time$sum[i] <- sum(time.diff)
won.version <- player.won[player.won$Version == version.list[i],]
for(j in 1:length(level.list)){
####find won in version and level
won.level <- won.version[won.version$LevelNumber==level.list[j],]
won.version.summary$version[k] <- version.list[i]
won.version.summary$level[k] <- level.list[j]
if(nrow(won.level) != 0){
player.won.level <- head(won.level$Score,ceiling(nrow(won.version)/10))
won.version.summary$max[k] <- max(player.won.level)
won.version.summary$mean[k] <- mean(player.won.level)
won.version.summary$min[k] <- min(player.won.level)
won.version.summary$median[k] <- median(won.level$Score)
}
#### find Success or Failure
win.rate$Version[k] <- version.list[i]
win.rate$Level[k] <- level.list[j]
win.rate$Success[k] <- nrow(player.success[player.success$Version == version.list[i] & player.success$LevelNumber == level.list[j],])
win.rate$Failure[k] <- nrow(player.failure[player.failure$Version == version.list[i] & player.failure$LevelNumber == level.list[j],])
k <- k+1
}
}
won.version.summary.m <- melt(won.version.summary, id.var="version")
|
788df2146c52ece7906db6b577607f8625bab776
|
fd7cf10a09951074be1374bf3db180c3452c076c
|
/calDist.R
|
8ba81a60aa308331b2de2c2fdb6bf5a011b4a76c
|
[] |
no_license
|
kirk760099/R_leafletProxy-master
|
6201c84cbd39613f445aa917f3b71d5d35169e3f
|
b64c742560628c8c644b8e641fe2252d3d42cd7b
|
refs/heads/master
| 2020-12-02T16:07:39.403189
| 2016-09-11T16:05:06
| 2016-09-11T16:05:06
| 67,939,636
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
calDist.R
|
library(geosphere)
#rentLocate$X 經度 rentLocate$Y 緯度 retailer某家零售業裡面有經緯度
dist<- function( rentLocate, retailer){
x <- c(rentLocate$Response_X,rentLocate$Response_Y)
for (j in 1:nrow(retailer)){
y <- c(retailer$lon[j],retailer$lat[j])
if (j==1){dist = distHaversine(x,y)}
dist <- ifelse(distHaversine(x,y)<dist, distHaversine(x,y), dist)
}
#return dist
}
|
77bced05a52fd15719f26b4cab543ab9b888b826
|
05af48d83669c392b976f946f47dfa9fe38de4f8
|
/demo/tests.R
|
5a7ac5d96e2b18e3c707dda1c38524ac6c66fe55
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
phillc73/pinhooker
|
fcead5c4dc980b05ed334c5e9207adc3975ac00c
|
2ec15d413b67d9d7d2083ad7c5d54bb64a2abb83
|
refs/heads/master
| 2021-07-21T08:14:24.564048
| 2021-02-14T18:55:18
| 2021-02-14T18:55:18
| 49,740,394
| 5
| 1
| null | 2016-02-08T10:55:47
| 2016-01-15T19:30:26
|
Rebol
|
UTF-8
|
R
| false
| false
| 811
|
r
|
tests.R
|
# Load RSQLite library to load SQLite files
library(RSQLite)
# Test load Rds data file
testrds <- readRDS("bloodstockSalesData.rds")
# If present, test load CSV and SQLite data files
testcsv <- read.csv("bloodstockSalesData.csv", sep=",", as.is = TRUE)
con <- dbConnect(SQLite(), "bloodstockSalesData.sqlite")
sql1 <- paste("SELECT * FROM bloodstockSalesData", sep="")
testsqlite <- dbGetQuery(con, sql1)
dbDisconnect(con)
# Inspect structure of data files
str(testrds)
str(testcsv)
str(testsqlite)
# Example to remove specific records from data files, then re-save file
testrds <- testrds[testrds$saleDate != "2015-10-15", ]
saveRDS(testrds, "data/bloodstockSalesData.rds")
testcsv <- testcsv[testcsv$saleDate != "2015-10-15", ]
write.csv(testcsv, "data/bloodstockSalesData.csv", row.names=FALSE, na="")
|
9ad0e0ecc89854074841496da9daf1235e1a85e1
|
bd21a32d8baf53669f6e65c388d41aab38569ce5
|
/man/hist_cure_fractions.Rd
|
d5326fecf0dc0e41642cdb6d94b0702847be3ca9
|
[] |
no_license
|
han-tun/rstanbmcm
|
e5b8f9a315ff09e11a11bd9816e555f5ce7aedce
|
76d2c423fdd5d0a63040176df1068867a81da8e8
|
refs/heads/main
| 2023-01-06T09:38:31.432306
| 2020-11-03T15:20:05
| 2020-11-03T15:20:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 313
|
rd
|
hist_cure_fractions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hist_cure_fractions.R
\name{hist_cure_fractions}
\alias{hist_cure_fractions}
\title{hist_cure_fractions}
\usage{
hist_cure_fractions(stan_out)
}
\arguments{
\item{stan_out}{Nested list}
}
\value{
}
\description{
hist_cure_fractions
}
|
949675d397cc75aa3f82d3931ce07e8b1d70db79
|
6a4af3ff24f3e661a597158a2202109f978bc30e
|
/ITT Github MNAR Time 2 Auxiliary.R
|
314cd0e306995511403336073177ef50f1592df6
|
[] |
no_license
|
cdmcdermott221/PhD-Code
|
f25bbf7b19e31390b71c43be4b798a96843b7d83
|
9dbfbcea7c1cc9627b16f88c249e2b61c0f4b91e
|
refs/heads/master
| 2023-05-05T19:04:52.472017
| 2021-05-22T23:39:42
| 2021-05-22T23:39:42
| 183,238,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43,214
|
r
|
ITT Github MNAR Time 2 Auxiliary.R
|
##########################################
### ITT Chapter code/Noncompliance#######
##########################################
#Creating blank elements to fill in from loops
#Coefficient, standard error, p value, coverage
#full analysis set
Undeleted_coef = NULL
Undeleted_SE = NULL
Undeleted_p = NULL
UnDel_coverage = matrix(nrow=1000, ncol=1)
#linear mixed model/modified Per protocol (PP)
LMM_coef=NULL
LMM_se = NULL
LMM_p = NULL
LMM_coverage = matrix(nrow=1000, ncol=1)
#linear mixed model/modified Per protocol (PP)2
LMM_coef2=NULL
LMM_se2 = NULL
LMM_p2 = NULL
LMM_coverage2 = matrix(nrow=1000, ncol=1)
#complete case analysis/per protocol (PP)
CCA_coef=NULL
CCA_se = NULL
CCA_p = NULL
CCA_coverage = matrix(nrow=10000, ncol=1)
#complete case analysis/per protocol (PP)2
CCA_coef2 = NULL
CCA_se2 = NULL
CCA_p2 = NULL
CCA_coverage2 = matrix(nrow=1000, ncol=1)
#Last observation carried forward (0% compliant, retain treatment effect)
LOCF_coef=NULL
LOCF_se = NULL
LOCF_p = NULL
LOCF_coverage = matrix(nrow=1000, ncol=1)
#mean imputation
Mean_coef=NULL
Mean_se = NULL
Mean_p = NULL
Mean_coverage = matrix(nrow=1000, ncol=1)
#multiple imputation
implist_coef=NULL
implist_se = NULL
imp_pvalue=NULL
imp_coverage = matrix(nrow=1000, ncol=1)
#multiple imputation
implist_coef2 = NULL
implist_se2 = NULL
imp_pvalue = NULL
imp_coverage2 = matrix(nrow=1000, ncol=1)
#Intention to Treat (ITT) 80% compliant
Switch_coef80=NULL
Switch_se80 = NULL
Switch_pvalue80=NULL
Switch_coverage80 = matrix(nrow=1000, ncol=1)
#Intention to Treat (ITT) 80% compliant
Switch_coef802=NULL
Switch_se802 = NULL
Switch_pvalue802=NULL
Switch_coverage802 = matrix(nrow=1000, ncol=1)
#full information maximum likelihood/structural equation model
OpenMx_coef = NULL
OpenMx_SE = NULL
OpenMx_coverage = matrix(nrow=1000, ncol=)
#for p-value for SEM
mxp = data.frame(pvalue1 = rep(2, length=1000), pvalue2 = rep(2, length=1000))
#full information maximum likelihood/structural equation model
OpenMx_coef2 = NULL
OpenMx_SE2 = NULL
OpenMx_coverage2 = matrix(nrow=1000, ncol=)
#for p-value for SEM
mxp2 = data.frame(pvalue1 = rep(2, length=1000), pvalue2 = rep(2, length=1000))
###########################################
###### Simulates 1000 X #######################
###########################################
for (i in 1:1000) {
#creation of dataset with 180 individuals
dtTrial <- genData(120)
#Adding treatment, control or treatment, randomly assignd to half of participants
dtTrial <- trtAssign(dtTrial, n=2)
#Adding Gender, randomly assigned half male half female
dtTrial = trtAssign(dtTrial, n=2, balanced=TRUE, grpName = "Gender")
#creating a correlation matrix among the 5 repeated measurements
C = matrix(c(1, 0.7, 0.7, 0.7, 0.7,
0.7, 1, 0.7, 0.7, 0.7,
0.7, 0.7, 1, 0.7, 0.7,
0.7, 0.7, 0.7, 1, 0.7,
0.7, 0.7, 0.7, 0.7, 1), nrow=5)
#creating dataset of the 5 repeated measures, length is same as dtTrial. Mean is 0 at baselie and SD is 1
dt = genCorData(length(dtTrial$id), mu=c(0, 0, 0, 0, 0), sigma=c(1, 1, 1, 1, 1), corMatrix=C)
#creating the Treatment*Time effects. Four because 4 follow-up times
gen.effect = defDataAdd(varname = "Tr1", dist = "normal", formula = 0.10, variance = 0.10)
gen.effect = defDataAdd(gen.effect, varname = "Tr2", dist = "normal", formula = 0.10, variance = 0.10)
gen.effect = defDataAdd(gen.effect, varname = "Tr3", dist = "normal", formula = 0.10, variance = 0.10)
gen.effect = defDataAdd(gen.effect, varname = "Tr4", dist = "normal", formula = 0.10, variance = 0.10)
#Adding age as covariate
gen.effect = defDataAdd(gen.effect, varname = "Age", dist = "uniform", formula = "18;60")
#combined participant and effects information
dtTrial = addColumns(gen.effect, dtTrial)
dtTrial <- cbind(dtTrial, Outcome_1 = dt$V1)
dtTrial <- cbind(dtTrial, Outcome_2 = dt$V2)
dtTrial <- cbind(dtTrial, Outcome_3 = dt$V3)
dtTrial <- cbind(dtTrial, Outcome_4 = dt$V4)
dtTrial <- cbind(dtTrial, Outcome_5 = dt$V5)
#adding the treatment*time effects to those in the intervention group
dtTrial$Outcome_1=dtTrial$Outcome_1
dtTrial$Outcome_2=dtTrial$Outcome_2 + (dtTrial$Tr1*dtTrial$trtGrp)
dtTrial$Outcome_3=dtTrial$Outcome_3 + (dtTrial$Tr1*dtTrial$trtGrp) + (dtTrial$Tr2*dtTrial$trtGrp)
dtTrial$Outcome_4=dtTrial$Outcome_4 + (dtTrial$Tr1*dtTrial$trtGrp) + (dtTrial$Tr2*dtTrial$trtGrp) + (dtTrial$Tr3*dtTrial$trtGrp)
dtTrial$Outcome_5=dtTrial$Outcome_5 + (dtTrial$Tr1*dtTrial$trtGrp) + (dtTrial$Tr2*dtTrial$trtGrp) + (dtTrial$Tr3*dtTrial$trtGrp) + (dtTrial$Tr4*dtTrial$trtGrp)
#recording the simulated values for the "worst" case of 0% compliant and drop back to control group/baseline
dtTrial$ITT_2 = dt$V2
dtTrial$ITT_3 = dt$V3
dtTrial$ITT_4 = dt$V4
dtTrial$ITT_5 = dt$V5
#getting just the variables necessary
dtTrial = dtTrial[, c("id", "trtGrp", "Outcome_1", "Outcome_2", "Outcome_3", "Outcome_4", "Outcome_5", "Gender", "Age", "ITT_2", "ITT_3", "ITT_4", "ITT_5", "Tr1", "Tr2", "Tr3", "Tr4")]
dtTrial1 = dtTrial[, c("id", "trtGrp", "Outcome_1", "Outcome_2", "Outcome_3", "Outcome_4", "Outcome_5", "Gender", "Age")]
#transforming to long format
dtTrial_long <- melt(dtTrial1,
# ID variables - all the variables to keep but not split apart on
id.vars=c("id", "trtGrp", "Gender", "Age"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2", "Outcome_3", "Outcome_4", "Outcome_5" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
dtTrial_long$Time = recode(dtTrial_long$Time, " 'Outcome_1'=1")
dtTrial_long$Time = recode(dtTrial_long$Time, " 'Outcome_2'=2")
dtTrial_long$Time = recode(dtTrial_long$Time, " 'Outcome_3'=3")
dtTrial_long$Time = recode(dtTrial_long$Time, " 'Outcome_4'=4")
dtTrial_long$Time = recode(dtTrial_long$Time, " 'Outcome_5'=5")
#ensuring it's one slope over the 5 time-points by keeping it as an integer
dtTrial_long$Time=as.integer(dtTrial_long$Time)
#linear mixed model of full analysis set
#covariance structure as compound symmetry
lmm_fas <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=dtTrial_long, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results of coefficient, SE, and p value
TestSum = summary(lmm_fas)$tTable[4,1]
TestSum2 = summary(lmm_fas)$tTable[4,2]
TestSum3 = summary(lmm_fas)$tTable[4,5]
#add to objects per loop
Undeleted_coef = rbind(Undeleted_coef, TestSum)
Undeleted_SE = rbind(Undeleted_SE, TestSum2)
Undeleted_p = rbind(Undeleted_p, TestSum3)
## Coverage: should be 0.10 for treatment*time effect
Coverage = c(0.10)
#getting 95% confidence interval
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
UnDel_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#################################################
######## Introducing noncompliance ###############
#################################################
#sorting it by outcome value for MNAR deletions
dtTrial = dtTrial[order(dtTrial$Outcome_2, decreasing=F), ]
#ordering by trtGrp since only noncompliant in treatment group
dtTrial = dtTrial[order(dtTrial$trtGrp), ]
#create object length of control group, all 1s
Del = rep(1, length(dtTrial$Outcome_3[dtTrial$trtGrp==0]))
#create object length of intervention group, blank for now
Del1 = rep(NA, length(dtTrial$Outcome_3[dtTrial$trtGrp==1]))
#subsetting those in treatment group
x = subset(dtTrial,trtGrp==1)
#creating probabilities ranging from .01 to .40, average is 0.20 or 20%
test2 = defData(varname = "w", dist = "uniform", formula = "1;40")
test1= genData(length(x$Outcome_2), test2)
test1 = test1[, "w"]
#dividing by 100 to get probabilities
test1=test1/100
#sorting small to large to make it that larger outcome values more likely to be noncompliant
test1 = sort(test1$w, decreasing=FALSE)
#v is the opposite probability, for the sample() code
v = 1-test1
#creating the auxiliary variable, which is only correlated with the missingness probability for the treatment group
#control group is just a random variable with mean of 0 and sd of 1
library(dplyr)
library(faux)
Aux = c(rnorm(length(Del), 0, 1), rnorm_pre(test1, mu=0, sd=1, r=0.9))
detach(package:faux)
detach(package:dplyr)
dtTrial$Aux = Aux
#creating loop for each individual in the dataset in treatment group
#each person has their own probability of being noncompliant or not
for (j in 1:length(x$Outcome_2))
{
Del1[j] = sample(c(0,1), size = 1, prob=c(test1[j], v[j]))
}
#combining the Del object (all 1s) with the new Del1 object
dtTrial$Del2 = c(Del, Del1)
#creating blanks if Del2 is a zero. Will fill in with different noncompliant values later.
#if someone is noncompliant at Time 2, will be noncompliant for remainder of trial
dtTrial$Outcome_2_05MNAR = ifelse(dtTrial$Del2==0, NA, dtTrial$Outcome_2)
dtTrial$Outcome_3_05MNAR = ifelse(dtTrial$Del2==0, NA, dtTrial$Outcome_3)
dtTrial$Outcome_4_05MNAR = ifelse(dtTrial$Del2==0, NA, dtTrial$Outcome_4)
dtTrial$Outcome_5_05MNAR = ifelse(dtTrial$Del2==0, NA, dtTrial$Outcome_5)
dtTrial = dtTrial[order(id), ]
#creating new dataset for the "worst case" scenario
dtTrial3 = dtTrial
#filling in the blanks with the "ITT2" etc values. This represents if they switched into the control group.
dtTrial3$Outcome_2_05MNAR = ifelse(is.na(dtTrial3$Outcome_2_05MNAR), dtTrial3$ITT_2, dtTrial3$Outcome_2_05MNAR)
dtTrial3$Outcome_3_05MNAR = ifelse(is.na(dtTrial3$Outcome_3_05MNAR), dtTrial3$ITT_3, dtTrial3$Outcome_3_05MNAR)
dtTrial3$Outcome_4_05MNAR = ifelse(is.na(dtTrial3$Outcome_4_05MNAR), dtTrial3$ITT_4, dtTrial3$Outcome_4_05MNAR)
dtTrial3$Outcome_5_05MNAR = ifelse(is.na(dtTrial3$Outcome_5_05MNAR), dtTrial3$ITT_5, dtTrial3$Outcome_5_05MNAR)
#just getting the variables of interest
dtTrial2 = dtTrial3[, c("id", "trtGrp", "Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR", "Gender", "Age")]
#transforming to long format
dtTrial_long2 <- melt(dtTrial2,
# ID variables - all the variables to keep but not split apart on
id.vars=c("id", "trtGrp", "Gender", "Age"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
dtTrial_long2$Time = recode(dtTrial_long2$Time, " 'Outcome_1'=1")
dtTrial_long2$Time = recode(dtTrial_long2$Time, " 'Outcome_2_05MNAR'=2")
dtTrial_long2$Time = recode(dtTrial_long2$Time, " 'Outcome_3_05MNAR'=3")
dtTrial_long2$Time = recode(dtTrial_long2$Time, " 'Outcome_4_05MNAR'=4")
dtTrial_long2$Time = recode(dtTrial_long2$Time, " 'Outcome_5_05MNAR'=5")
dtTrial_long2$Time=as.integer(dtTrial_long2$Time)
#linear mixed model of the "worst case" (switch into control group) data
lmm_worst <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=dtTrial_long2, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
TestSum = summary(lmm_worst)$tTable[4,1]
TestSum2 = summary(lmm_worst)$tTable[4,2]
TestSum3 = summary(lmm_worst)$tTable[4,5]
#recording results
Worst_coef = rbind(Worst_coef, TestSum)
Worst_se = rbind(Worst_se, TestSum2)
Worst_p = rbind(Worst_p, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
Worst_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#########################################
######## Intention to Treat Datasets ######
#########################################
#this time, the blanks are filled in with 80% of the treatment effect for the remainder of the trial
#since 80% compliant
Outcome_2_ITT= ifelse(is.na(dtTrial$Outcome_2_05MNAR), (dtTrial$Outcome_1 + dtTrial$Tr1*.80), dtTrial$Outcome_2)
Outcome_3_ITT= ifelse(is.na(dtTrial$Outcome_3_05MNAR), (dtTrial$Outcome_1 + dtTrial$Tr1*.80 + dtTrial$Tr2*0.80), dtTrial$Outcome_3)
Outcome_4_ITT= ifelse(is.na(dtTrial$Outcome_4_05MNAR), (dtTrial$Outcome_1 + dtTrial$Tr1*.80 + dtTrial$Tr2*0.80 + dtTrial$Tr3*0.80), dtTrial$Outcome_4)
Outcome_5_ITT= ifelse(is.na(dtTrial$Outcome_5_05MNAR), (dtTrial$Outcome_1 + dtTrial$Tr1*.80 + dtTrial$Tr2*0.80 + dtTrial$Tr3*0.80 + dtTrial$Tr4*0.80), dtTrial$Outcome_5)
#creating new datasets
rm(Data1)
Data1 = cbind(dtTrial, Outcome_2_ITT, Outcome_3_ITT, Outcome_4_ITT, Outcome_5_ITT)
#For the Per Protocol dataset--with the blanks for those who are noncompliant
Data1_cca = Data1[, c("id", "trtGrp", "Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR", "Gender", "Age", "Aux")]
#for the ITT80 dataset (80%compliant)
Data1_ITT = Data1[, c("id", "trtGrp", "Outcome_1", "Outcome_2_ITT", "Outcome_3_ITT", "Outcome_4_ITT", "Outcome_5_ITT", "Gender", "Age", "Aux")]
#transforming modified per protocol dataset to long format
Data1_long <- melt(Data1_cca,
# ID variables - all the variables to keep but not split apart on
id.vars=c("id", "trtGrp", "Gender", "Age", "Aux"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
Data1_long$Time = recode(Data1_long$Time, " 'Outcome_1'=1")
Data1_long$Time = recode(Data1_long$Time, " 'Outcome_2_05MNAR'=2")
Data1_long$Time = recode(Data1_long$Time, " 'Outcome_3_05MNAR'=3")
Data1_long$Time = recode(Data1_long$Time, " 'Outcome_4_05MNAR'=4")
Data1_long$Time = recode(Data1_long$Time, " 'Outcome_5_05MNAR'=5")
#transforming ITT80 to long format
Data1_long_ITT <- melt(Data1_ITT,
# ID variables - all the variables to keep but not split apart on
id.vars=c("id", "trtGrp", "Gender", "Age", "Aux"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_ITT", "Outcome_3_ITT", "Outcome_4_ITT", "Outcome_5_ITT" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
Data1_long_ITT$Time = recode(Data1_long_ITT$Time, " 'Outcome_1'=1")
Data1_long_ITT$Time = recode(Data1_long_ITT$Time, " 'Outcome_2_ITT'=2")
Data1_long_ITT$Time = recode(Data1_long_ITT$Time, " 'Outcome_3_ITT'=3")
Data1_long_ITT$Time = recode(Data1_long_ITT$Time, " 'Outcome_4_ITT'=4")
Data1_long_ITT$Time = recode(Data1_long_ITT$Time, " 'Outcome_5_ITT'=5")
########################################
### Modified Per Protocol ##########
########################################
Data1_long$Time=as.integer(Data1_long$Time)
#liner mixed model, modified per protocol
lmm_modpp <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data1_long, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_modpp)$tTable[4,1]
TestSum2 = summary(lmm_modpp)$tTable[4,2]
TestSum3 = summary(lmm_modpp)$tTable[4,5]
LMM_coef = rbind(LMM_coef, TestSum)
LMM_se = rbind(LMM_se, TestSum2)
LMM_p = rbind(LMM_p, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
LMM_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#liner mixed model, modified per protocol with the auxiliary interaction variable with time and treatment group
lmm_modpp2 <- lme(Outcome ~ Time + trtGrp + trtGrp*Time + Aux*trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data1_long, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_modpp2)$tTable[5,1]
TestSum2 = summary(lmm_modpp2)$tTable[5,2]
TestSum3 = summary(lmm_modpp2)$tTable[5,5]
LMM_coef2 = rbind(LMM_coef2, TestSum)
LMM_se2 = rbind(LMM_se2, TestSum2)
LMM_p2 = rbind(LMM_p2, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
LMM_coverage2[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
####################################
##### complete case analysis #####
####### Per Protocol ############
####################################
#forcing out participants who were noncompliant
Data2 = na.omit(Data1_cca)
#transforming to long format
Data2_long <- melt(Data2,
# ID variables - all the variables to keep but not split apart on
id.vars=c("id", "trtGrp", "Gender", "Age", "Aux"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
Data2_long$Time = recode(Data2_long$Time, " 'Outcome_1'=1")
Data2_long$Time = recode(Data2_long$Time, " 'Outcome_2_05MNAR'=2")
Data2_long$Time = recode(Data2_long$Time, " 'Outcome_3_05MNAR'=3")
Data2_long$Time = recode(Data2_long$Time, " 'Outcome_4_05MNAR'=4")
Data2_long$Time = recode(Data2_long$Time, " 'Outcome_5_05MNAR'=5")
Data2_long$Time=as.integer(Data2_long$Time)
#linear mixed model of per protocol
lmm_pp <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data2_long, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_pp)$tTable[4,1]
TestSum2 = summary(lmm_pp)$tTable[4,2]
TestSum3 = summary(lmm_pp)$tTable[4,5]
CCA_coef = rbind(CCA_coef, TestSum)
CCA_se = rbind(CCA_se, TestSum2)
CCA_p = rbind(CCA_p, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
CCA_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#linear mixed model of per protocol, but with auxiliary variable included
lmm_pp2 <- lme(Outcome ~ Time + trtGrp + trtGrp*Time + Aux*trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data2_long, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_pp2)$tTable[5,1]
TestSum2 = summary(lmm_pp2)$tTable[5,2]
TestSum3 = summary(lmm_pp2)$tTable[5,5]
CCA_coef2 = rbind(CCA_coef2, TestSum)
CCA_se2 = rbind(CCA_se2, TestSum2)
CCA_p2 = rbind(CCA_p2, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
CCA_coverage2[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
######## ITT, 80% compliant ############
Data1_long_ITT$Time=as.integer(Data1_long_ITT$Time)
#linear mixed model on ITT80 data
lmm_80 <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data1_long_ITT, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_80)$tTable[4,1]
TestSum2 = summary(lmm_80)$tTable[4,2]
TestSum3 = summary(lmm_80)$tTable[4,5]
Switch_coef80 = rbind(Switch_coef80, TestSum)
Switch_se80 = rbind(Switch_se80, TestSum2)
Switch_pvalue80 = rbind(Switch_pvalue80, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
Switch_coverage80[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
######## ITT, 80% compliant ############
Data1_long_ITT$Time=as.integer(Data1_long_ITT$Time)
#linear mixed model on ITT80 data
lmm_80 <- lme(Outcome ~ Time + trtGrp + trtGrp*Time + Aux*trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=Data1_long_ITT, na.action="na.omit",
method = "ML",
control=(msMaxIter=100))
#recording results
TestSum = summary(lmm_80)$tTable[5,1]
TestSum2 = summary(lmm_80)$tTable[5,2]
TestSum3 = summary(lmm_80)$tTable[5,5]
Switch_coef802 = rbind(Switch_coef802, TestSum)
Switch_se802 = rbind(Switch_se802, TestSum2)
Switch_pvalue802 = rbind(Switch_pvalue802, TestSum3)
#getting 95% confidence interval for coverage
Upper = TestSum + (TestSum2*1.96)
Lower = TestSum - (TestSum2 * 1.96)
Switch_coverage802[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#################################################
######## structural equation model #############
#################################################
Data5=Data1_cca
#labelling manifest variables (observed variables) and latent variables (I = intercept, S = slope)
SingleLevelModel1= mxModel(
"One Level Model", type="RAM", manifestVars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR",
"Outcome_5_05MNAR", "trtGrp"),
latentVars=c("I", "S"),
mxData(observed=Data5, type="raw"),
#compound symmetry covariance structure
mxPath(from=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(1,1,1,1,1),
labels=c("variance1","variance1", "variance1", "variance1", "variance1")),
mxPath(from="Outcome_1", to=c("Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8, .8, .8, .8),
labels=c("covar1", "covar1", "covar1", "covar1")),
mxPath(from="Outcome_2_05MNAR", to=c("Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8,.8,.8),
labels=c("covar1", "covar1", "covar1")),
mxPath(from="Outcome_3_05MNAR", to=c("Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8,.8),
labels=c("covar1", "covar1")),
mxPath(from="Outcome_4_05MNAR", to=c("Outcome_5_05MNAR"), arrows=2, free=T, values=.8,
labels="covar1"),
#one slope (S) from T1 to T5
mxPath(from=c("I", "S"), arrows=2, connect="unique.pairs", free=F, values=0),
#for latent growth curves, the intercept loadings have to be all 1s from each repeated measure
mxPath(from="I", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(1, 1, 1,1,1)),
#and the slope loadings have to be 0,1,2,3,4 from earliest to latest repeated measure
mxPath(from="S", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(0,1,2,3,4)),
#manifest means not estimated
mxPath(from="one", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(0, 0, 0, 0, 0)),
#latent means estimated
mxPath(from="one", to=c("I", "S"), arrows=1, free=T, values=c(1, 1),
labels=c("meanI", "meanS")),
#getting the regression coefficients for treatment onto intercept and slope
mxPath(from="trtGrp", to=c("I", "S"), arrows=1, free=TRUE, values=c(.5, .5),
labels=c("ireg", "Sreg")),
#manifest mean for trtGrp not estimated
mxPath(from="one", to="trtGrp", arrows=1, free=F, values=0),
#variance for trtGrp not estimated
mxPath(from="trtGrp", arrows=2, free=F, values=1))
#running this model using mxRun()
SingleLevelModel1 = mxRun(SingleLevelModel1)
#recording coefficient and SE
OpenMx_stats2 = SingleLevelModel1$output$estimate[2]
OpenMx_coef = rbind(OpenMx_coef, OpenMx_stats2)
OpenMx_stats3 = SingleLevelModel1$output$standardErrors[2]
OpenMx_SE = rbind(OpenMx_SE, OpenMx_stats3)
#getting p value for the coefficient
m2 = mxModel(SingleLevelModel1, mxCI(c("Sreg"))) # list the things you want CIs for.
m2 = mxRun(m2, intervals= T)
x2 = m2$output$confidenceIntervals
#recording if confidence interval does not contain 0 (then significant)
mxp[i,1] = ifelse(x2[1,1] > 0, 1,
ifelse(x2[1,3] < 0, 1, 0))
#getting 95% confidence interval for coverage
Upper = OpenMx_stats2 + (OpenMx_stats3*1.96)
Lower = OpenMx_stats2 - (OpenMx_stats3*1.96)
OpenMx_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#labelling manifest variables (observed variables) and latent variables (I = intercept, S = slope)
SingleLevelModel2= mxModel(
"One Level Model", type="RAM", manifestVars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR",
"Outcome_5_05MNAR", "trtGrp", "Aux"),
latentVars=c("I", "S"),
mxData(observed=Data5, type="raw"),
#compound symmetry covariance structure
mxPath(from=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(1,1,1,1,1),
labels=c("variance1","variance1", "variance1", "variance1", "variance1")),
mxPath(from="Outcome_1", to=c("Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8, .8, .8, .8),
labels=c("covar1", "covar1", "covar1", "covar1")),
mxPath(from="Outcome_2_05MNAR", to=c("Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8,.8,.8),
labels=c("covar1", "covar1", "covar1")),
mxPath(from="Outcome_3_05MNAR", to=c("Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=2, free=T, values=c(.8,.8),
labels=c("covar1", "covar1")),
mxPath(from="Outcome_4_05MNAR", to=c("Outcome_5_05MNAR"), arrows=2, free=T, values=.8,
labels="covar1"),
#one slope (S) from T1 to T5
mxPath(from=c("I", "S"), arrows=2, connect="unique.pairs", free=F, values=0),
#for latent growth curves, the intercept loadings have to be all 1s from each repeated measure
mxPath(from="I", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(1, 1, 1,1,1)),
#and the slope loadings have to be 0,1,2,3,4 from earliest to latest repeated measure
mxPath(from="S", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(0,1,2,3,4)),
#manifest means not estimated
mxPath(from="one", to=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR"), arrows=1, free=F,
values=c(0, 0, 0, 0, 0)),
#latent means estimated
mxPath(from="one", to=c("I", "S"), arrows=1, free=T, values=c(1, 1),
labels=c("meanI", "meanS")),
#getting the regression coefficients for treatment onto intercept and slope
mxPath(from="trtGrp", to=c("I", "S"), arrows=1, free=TRUE, values=c(.5, .5),
labels=c("ireg", "Sreg")),
#manifest mean for trtGrp not estimated
mxPath(from="one", to="trtGrp", arrows=1, free=F, values=0),
#variance for trtGrp not estimated
mxPath(from="trtGrp", arrows=2, free=F, values=1),
mxPath(from = "Aux", to = c("trtGrp", "S", "I"), free=T, values = c(0.5, 0.5, 0.5)),
mxPath(from = "Aux", arrows = 2, free=F, values=1),
mxPath(from="one", to="Aux", arrows=1, free=F, values=0)
)
#running this model using mxRun()
SingleLevelModel2 = mxRun(SingleLevelModel2)
#recording coefficient and SE
OpenMx_stats2 = SingleLevelModel2$output$estimate[2]
OpenMx_coef2 = rbind(OpenMx_coef2, OpenMx_stats2)
OpenMx_stats3 = SingleLevelModel2$output$standardErrors[2]
OpenMx_SE2 = rbind(OpenMx_SE2, OpenMx_stats3)
#getting p value for the coefficient
m2 = mxModel(SingleLevelModel2, mxCI(c("Sreg"))) # list the things you want CIs for.
m2 = mxRun(m2, intervals= T)
x2 = m2$output$confidenceIntervals
#recording if confidence interval does not contain 0 (then significant)
mxp2[i,1] = ifelse(x2[1,1] > 0, 1,
ifelse(x2[1,3] < 0, 1, 0))
#getting 95% confidence interval for coverage
Upper = OpenMx_stats2 + (OpenMx_stats3*1.96)
Lower = OpenMx_stats2 - (OpenMx_stats3*1.96)
OpenMx_coverage2[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#################################
##### multiple imputation ######
#################################
#setting up blank imputation model
ini1 = mice(Data1_cca, maxit=0)
#predictor matrix
pred = ini1$pred
#method list for each variable
meth = ini1$meth
#leave blank for fully observed variables
meth[c("id", "trtGrp", "Gender", "Age", "Outcome_1")] = ""
#predictor variables include TrtGrp, Gender, Age, and other outcome variables
pred["Outcome_2_05MNAR", ] = c(0, 1, 1, 0, 1, 1, 1, 1, 1, 0)
pred["Outcome_3_05MNAR", ] = c(0, 1, 1, 1, 0, 1, 1, 1, 1, 0)
pred["Outcome_4_05MNAR", ] = c(0, 1, 1, 1, 1, 0, 1, 1, 1, 0)
pred["Outcome_5_05MNAR", ] = c(0, 1, 1, 1, 1, 1, 0, 1, 1, 0)
#method to impute is "norm", for normally-distributed continous variables
meth[c("Outcome_5_05MNAR", "Outcome_4_05MNAR", "Outcome_3_05MNAR", "Outcome_2_05MNAR")] = "norm"
#run imputation, 40 datasets
imps = mice(Data1_cca, meth=meth, pred=pred, maxit=100, m = 40, pri=F)
#getting just the appropriate imputed datasets from object
com_MNAR05 = complete(imps, "long")
#Next, changing to further long format via "Time"
impMNAR05_ITT <- melt(com_MNAR05,
# ID variables - all the variables to keep but not split apart on
id.vars=c(".imp", ".id", "id", "trtGrp", "Gender", "Age", "Aux"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
#Renaming values as 1, 2, and 3 in Time variable
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_1'=1")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_2_05MNAR'=2")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_3_05MNAR'=3")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_4_05MNAR'=4")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_5_05MNAR'=5")
#creating objects to fill
rm(implist_c)
implist_c= NULL
rm(implist_s)
implist_s= NULL
rm(implist_p)
implist_p=NULL
#loop to analyse each imputed dataset, to be pooled
for (j in 1:40){
impMNAR05_ITT$Time=as.integer(impMNAR05_ITT$Time)
lmm_mi <- lme(Outcome ~ Time + trtGrp + trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=impMNAR05_ITT, subset=c(impMNAR05_ITT$.imp==j), na.action="na.omit",
method = "ML",
control=(msMaxIter=1000))
#recording coefficient, SE, and p value results
implist_c = rbind(implist_c, summary(lmm_mi)$tTable[4,1])
implist_s = rbind(implist_s, summary(lmm_mi)$tTable[4,2])
implist_p = rbind(implist_p, summary(lmm_mi)$tTable[4,5])
}
#using rubins rules, pooling results
combined.results <- mi.meld(q = implist_c, se = implist_s)
imp_p = mean(implist_p)
#recording pooled results
implist_coef=rbind(implist_coef, combined.results$q.mi)
implist_se=rbind(implist_se, combined.results$se.mi)
imp_pvalue=rbind(imp_pvalue, imp_p)
#getting 95% confidence interval for coverage
estimates = combined.results$q.mi
ses = combined.results$se.mi
Upper = estimates + (ses * 1.96)
Lower = estimates - (ses * 1.96)
imp_coverage[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
#setting up blank imputation model
ini1 = mice(Data1_cca, maxit=0)
#predictor matrix
pred = ini1$pred
#method list for each variable
meth = ini1$meth
#leave blank for fully observed variables
meth[c("id", "trtGrp", "Gender", "Age", "Outcome_1", "Aux")] = ""
#predictor variables include TrtGrp, Gender, Age, and other outcome variables
pred["Outcome_2_05MNAR", ] = c(0, 1, 1, 0, 1, 1, 1, 1, 1, 1)
pred["Outcome_3_05MNAR", ] = c(0, 1, 1, 1, 0, 1, 1, 1, 1, 1)
pred["Outcome_4_05MNAR", ] = c(0, 1, 1, 1, 1, 0, 1, 1, 1, 1)
pred["Outcome_5_05MNAR", ] = c(0, 1, 1, 1, 1, 1, 0, 1, 1, 1)
#method to impute is "norm", for normally-distributed continous variables
meth[c("Outcome_5_05MNAR", "Outcome_4_05MNAR", "Outcome_3_05MNAR", "Outcome_2_05MNAR")] = "norm"
#run imputation, 40 datasets
imps = mice(Data1_cca, meth=meth, pred=pred, maxit=100, m = 40, pri=F)
#getting just the appropriate imputed datasets from object
com_MNAR05 = complete(imps, "long")
#Next, changing to further long format via "Time"
impMNAR05_ITT <- melt(com_MNAR05,
# ID variables - all the variables to keep but not split apart on
id.vars=c(".imp", ".id", "id", "trtGrp", "Gender", "Age", "Aux"),
# The source columns
measure.vars=c("Outcome_1", "Outcome_2_05MNAR", "Outcome_3_05MNAR", "Outcome_4_05MNAR", "Outcome_5_05MNAR" ),
# Name of the destination column that will identify the original
# column that the measurement came from
variable.name="Time",
value.name="Outcome")
#Renaming values as 1, 2, and 3 in Time variable
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_1'=1")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_2_05MNAR'=2")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_3_05MNAR'=3")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_4_05MNAR'=4")
impMNAR05_ITT$Time = recode(impMNAR05_ITT$Time, " 'Outcome_5_05MNAR'=5")
#creating objects to fill
rm(implist_c)
implist_c= NULL
rm(implist_s)
implist_s= NULL
rm(implist_p)
implist_p=NULL
#loop to analyse each imputed dataset, to be pooled
for (j in 1:40){
impMNAR05_ITT$Time=as.integer(impMNAR05_ITT$Time)
lmm_mi <- lme(Outcome ~ Time + trtGrp + trtGrp*Time + Aux*trtGrp*Time,
random = list(id = ~1),
corr = corCompSymm(form= ~Time),
data=impMNAR05_ITT, subset=c(impMNAR05_ITT$.imp==j), na.action="na.omit",
method = "ML",
control=(msMaxIter=1000))
#recording coefficient, SE, and p value results
implist_c = rbind(implist_c, summary(lmm_mi)$tTable[5,1])
implist_s = rbind(implist_s, summary(lmm_mi)$tTable[5,2])
implist_p = rbind(implist_p, summary(lmm_mi)$tTable[5,5])
}
#using rubins rules, pooling results
combined.results <- mi.meld(q = implist_c, se = implist_s)
imp_p = mean(implist_p)
#recording pooled results
implist_coef2 = rbind(implist_coef2, combined.results$q.mi)
implist_se2 = rbind(implist_se2, combined.results$se.mi)
imp_pvalue2 = rbind(imp_pvalue2, imp_p)
#getting 95% confidence interval for coverage
estimates = combined.results$q.mi
ses = combined.results$se.mi
Upper = estimates + (ses * 1.96)
Lower = estimates - (ses * 1.96)
imp_coverage2[i, 1] = ifelse(Coverage < Upper & Coverage > Lower, T, F)
print(i)
}
########### FINISHED!!! ##############################
## Full analysis set treatment effect
Trt1_effect=c(mean(Undeleted_coef[,1]), mean(Undeleted_SE[,1]), length(which(Undeleted_p[,1] > 0.05)))
Trt1_effect
## Slope/effect bias of other methods
#multiple imputation
imp_bias = c((Trt1_effect[1]-mean(implist_coef[,1])), (Trt1_effect[2]-mean(implist_se[,1])), length(which(imp_pvalue[,1] > 0.05)))
## Slope/effect bias of other methods
#multiple imputation
imp_bias2 = c((Trt1_effect[1]-mean(implist_coef2[,1])), (Trt1_effect[2]-mean(implist_se2[,1])), length(which(imp_pvalue2[,1] > 0.05)))
#Modified Per Protocol
LMM_bias = c((Trt1_effect[1]-mean(LMM_coef[,1])), (Trt1_effect[2]-mean(LMM_se[,1])), length(which(LMM_p[,1] > 0.05)))
#Modified Per Protocol
LMM_bias2 = c((Trt1_effect[1]-mean(LMM_coef2[,1])), (Trt1_effect[2]-mean(LMM_se2[,1])), length(which(LMM_p2[,1] > 0.05)))
#Per Protocol
CCA_bias = c((Trt1_effect[1]-mean(CCA_coef[,1])), (Trt1_effect[2]-mean(CCA_se[,1])), length(which(CCA_p[,1] > 0.05)))
#Per Protocol
CCA_bias2 = c((Trt1_effect[1]-mean(CCA_coef2[,1])), (Trt1_effect[2]-mean(CCA_se2[,1])), length(which(CCA_p2[,1] > 0.05)))
#80% compliant, ITT
ITT80_Bias = c((Trt1_effect[1]-mean(Switch_coef80[,1])), (Trt1_effect[2]-mean(Switch_se80[,1])), length(which(Switch_pvalue80[,1] > 0.05)))
ITT80_Bias2 = c((Trt1_effect[1]-mean(Switch_coef802[,1])), (Trt1_effect[2]-mean(Switch_se802[,1])), length(which(Switch_pvalue802[,1] > 0.05)))
#structural equation model
SEM_bias = c((Trt1_effect[1]-mean(OpenMx_coef[,1])), (Trt1_effect[2]-mean(OpenMx_SE[,1])), length(which(mxp[,1] ==0)))
#structural equation model
SEM_bias2 = c((Trt1_effect[1]-mean(OpenMx_coef2[,1])), (Trt1_effect[2]-mean(OpenMx_SE2[,1])), length(which(mxp2[,1] ==0)))
#combining results into dataset
Slope1Bias = rbind(Trt1_effect, CCA_bias, CCA_bias2, LMM_bias, LMM_bias2, SEM_bias, SEM_bias2, ITT80_Bias, ITT80_Bias2,
imp_bias, imp_bias2)
### getting monte carlo standard errors ###
## modPP Coefficient MCSEs
z = rep(mean(LMM_coef[,1]), times=1000)
MCSE_LMMCoef_1 = sqrt(sum((LMM_coef[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(LMM_se[,1]), times=1000)
MCSE_LMMSE_1 = sqrt(sum((LMM_se[,1] - z)^2)/999000)
## modPP Coefficient MCSEs
z = rep(mean(LMM_coef2[,1]), times=1000)
MCSE_LMMCoef_2 = sqrt(sum((LMM_coef2[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(LMM_se2[,1]), times=1000)
MCSE_LMMSE_2 = sqrt(sum((LMM_se2[,1] - z)^2)/999000)
## SEM Coefficient MCSEs
z = rep(mean(OpenMx_coef[,1]), times=1000)
MCSE_SEMCoef_1 = sqrt(sum((OpenMx_coef[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(OpenMx_SE[,1]), times=1000)
MCSE_SEMSE_1 = sqrt(sum((OpenMx_SE[,1] - z)^2)/999000)
## SEM Coefficient MCSEs
z = rep(mean(OpenMx_coef2[,1]), times=1000)
MCSE_SEMCoef_2 = sqrt(sum((OpenMx_coef2[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(OpenMx_SE2[,1]), times=1000)
MCSE_SEMSE_2 = sqrt(sum((OpenMx_SE2[,1] - z)^2)/999000)
## imputation Coefficient MCSEs
z = rep(mean(implist_coef), times=1000)
MCSE_impCoef_1 = sqrt(sum((implist_coef - z)^2)/999000)
## SEs SEs
z = rep(mean(implist_se), times=1000)
MCSE_impSE_1 = sqrt(sum((implist_se - z)^2)/999000)
## imputation Coefficient MCSEs
z = rep(mean(implist_coef2), times=1000)
MCSE_impCoef_2 = sqrt(sum((implist_coef2 - z)^2)/999000)
## SEs SEs
z = rep(mean(implist_se2), times=1000)
MCSE_impSE_2 = sqrt(sum((implist_se2 - z)^2)/999000)
## PP Coefficient MCSEs
z = rep(mean(CCA_coef[,1]), times=1000)
MCSE_CCACoef_1 = sqrt(sum((CCA_coef[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(CCA_se[,1]), times=1000)
MCSE_CCASE_1 = sqrt(sum((CCA_se[,1] - z)^2)/999000)
## PP Coefficient MCSEs
z = rep(mean(CCA_coef2[,1]), times=1000)
MCSE_CCACoef_2 = sqrt(sum((CCA_coef2[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(CCA_se2[,1]), times=1000)
MCSE_CCASE_2 = sqrt(sum((CCA_se2[,1] - z)^2)/999000)
## ITT80 Coefficient MCSEs
z = rep(mean(Switch_coef80[,1]), times=1000)
MCSE_SwitchCoef_1 = sqrt(sum((Switch_coef80[,1] - z)^2)/999000)
## SEs SEs
z = rep(mean(Switch_se80[,1]), times=1000)
MCSE_SwitchSE_1 = sqrt(sum((Switch_se80[,1] - z)^2)/999000)
#creating new dataset
Total = Slope1Bias
Total = as.data.frame(Total)
#adding variables for MCSEs for coefficient and SEs
Total$MC_SE_Coef = c(NA, MCSE_CCACoef_1, MCSE_CCACoef_2, MCSE_LMMCoef_1, MCSE_LMMCoef_2, MCSE_SEMCoef_1, MCSE_SEMCoef_2, MCSE_SwitchCoef_1, MCSE_SwitchCoef80_2,
MCSE_impCoef_1, MCSE_impCoef_2)
Total$MC_SE_SEs = c(NA, MCSE_CCASE_1, MCSE_CCASE_2, MCSE_LMMSE_1, MCSE_LMMSE_2, MCSE_SEMSE_1, MCSE_SEMSE_2, MCSE_SwitchSE_1, MCSE_SwitchSE80_2,
MCSE_impSE_1, MCSE_impSE_2)
#adding variable for coverage
Total$Coverage = c(
prop.table(table(UnDel_coverage[1:1000,1]))[2],
prop.table(table(CCA_coverage[1:1000,1]))[2],
prop.table(table(CCA_coverage2[1:1000,1]))[2],
prop.table(table(LMM_coverage[1:1000,1]))[2],
prop.table(table(LMM_coverage2[1:1000,1]))[2],
prop.table(table(OpenMx_coverage[1:1000,1]))[2],
prop.table(table(OpenMx_coverage2[1:1000,1]))[2],
prop.table(table(Switch_coverage80[1:1000,1]))[2],
prop.table(table(Switch_coverage802[1:1000,1]))[2],
prop.table(table(imp_coverage[1:1000,1]))[2],
prop.table(table(imp_coverage2[1:1000,1]))[2]
)
#export as excel, with percent missing and time-point introduced
WriteXLS(Total, ExcelFileName = "ITT_MNAR1000_20_trt_TP2_Aux.xlsx", col.names = TRUE, row.names=TRUE)
|
59dd6e3a1a936d6df8d8f3c531c421e63cb78341
|
e5a1e0780d2b93689dbb153e5ab733c4049f8839
|
/man/V4_T1.9.Rd
|
9d71cca9adc0c60220562ed0f1ada2a9f9274447
|
[
"MIT"
] |
permissive
|
LucyNjoki/rKenyaCensus
|
5b86efcdb7604067500087be68c463587daf362d
|
6db00e5b1b71a781e6def15dd98a4828b6d960bc
|
refs/heads/master
| 2022-11-06T17:58:34.520255
| 2020-06-29T11:34:02
| 2020-06-29T11:34:02
| 276,578,774
| 0
| 1
|
NOASSERTION
| 2020-07-02T07:30:50
| 2020-07-02T07:30:49
| null |
UTF-8
|
R
| false
| true
| 465
|
rd
|
V4_T1.9.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V4_T1.9.R
\docType{data}
\name{V4_T1.9}
\alias{V4_T1.9}
\title{Volume 4: Table 1.9}
\format{A data frame with 4 variables:
\describe{
\item{\code{CountyCode}}{County Code (1 - 47)}
\item{\code{County}}{County}
\item{\code{SubCountyCode}}{Sub County Code}
\item{\code{SubCounty}}{Sub County}
}}
\usage{
data(V4_T1.9)
}
\description{
List of Counties and Sub-Counties
}
\keyword{datasets}
|
516a1087fd50cd848cdf6576508567b1dc4fa78f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SensoMineR/examples/fahst.rd.R
|
115777a32966bb9ccba9d41f57482d9733c12664
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
fahst.rd.R
|
library(SensoMineR)
### Name: fahst
### Title: Factorial Approach for Hierarchical Sorting Task data
### Aliases: fahst
### Keywords: multivariate
### ** Examples
## Not run:
##D data(cards)
##D ## Example of FAHST results
##D group.cards<-c(2,3,3,2,2,4,2,3,2,1,3,2,3,3,3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3)
##D res.fahst<-fahst(cards,group=group.cards)
## End(Not run)
|
15e6ea2207ad57bdb4765e50e02f1e62e4c04504
|
8dcb923dea78fa398f185c06b5975d259a29f7c3
|
/modules/met_data/prepare_soil_moisture_data.R
|
97fca0f8a37fc3e33122af1cf7280f1c4ae35aeb
|
[] |
no_license
|
mingkaijiang/EucFACE_Carbon_Budget
|
ce69f2eb83066e08193bb81d1a0abc437b83dc8d
|
11abb2d6cd5e4121879ddecdf10ee5ba40af32ad
|
refs/heads/master
| 2020-09-03T03:43:00.823064
| 2020-01-15T02:14:51
| 2020-01-15T02:14:51
| 219,377,500
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,905
|
r
|
prepare_soil_moisture_data.R
|
prepare_soil_moisture_data <- function(plot.image, monthly) {
#### Download the data - takes time to run
myDF <- download_soil_moisture_data()
#### Assign ring information
myDF$Ring <- sub("FACE_R", "", myDF$Source)
myDF$Ring <- sub("_B1.*", "", myDF$Ring)
myDF$Ring <- as.numeric(myDF$Ring)
myDF <- myDF[order(myDF$Date),]
myDF$Month <- format(as.Date(myDF$Date), "%Y-%m")
myDF$Month <- as.Date(paste0(myDF$Month,"-1"), format = "%Y-%m-%d")
### Color scheme
cols <- colorRampPalette(c('red','blue', "darkblue"))(24)
if (plot.image == T) {
#### Plotting
### Need to split into 6 rings
for (i in 1:6) {
### Depth
if (i == 2) {
depth <- c(5, 30, 44, 75)
} else {
depth <- c(5, 30, 35, 75)
}
### Calculate daily average
dDF <- aggregate(myDF[myDF$Ring == i,3:46], by=list(myDF[myDF$Ring == i, "Date"]), mean, na.action=na.rm)
### Calculate monthly average
mDF <- aggregate(myDF[myDF$Ring == i,3:46], by=list(myDF[myDF$Ring == i, "Month"]), mean, na.action=na.rm)
### Daily time series
d.series <- unique(myDF[myDF$Ring == i, "Date"])
x.d <- c(1:length(d.series))
### Monthly time series
m.series <- unique(myDF[myDF$Ring == i, "Month"])
x.m <- c(1:length(m.series))
### Prepare theta data frame at daily timestep
thDF <- dDF[, c("Theta5_1_Avg","Theta30_1_Avg","ThetaHL_1_Avg","Theta75_1_Avg")]
theta <- as.matrix(thDF)
if (monthly == F) {
### Countour plotting
filled.contour(x = d.series, y = depth, z=theta, xlab = "Date", ylab = "Depth (cm)",
col=cols, main = paste0("Ring ", i))
} else if (monthly == T) {
### Prepare theta data frame at daily timestep
thDF <- mDF[, c("Theta5_1_Avg","Theta30_1_Avg","ThetaHL_1_Avg","Theta75_1_Avg")]
theta <- as.matrix(thDF)
filled.contour(x = m.series, y = depth, z=theta, xlab = "Date", ylab = "Depth (cm)",
col=cols, main = paste0("Ring ", i))
}
}
} else {
### Calculate daily average
dDF1 <- aggregate(myDF[myDF$Ring == 1,3:10],
by=list(myDF[myDF$Ring == 1, "Date"]), mean, na.action=na.rm)
dDF2 <- aggregate(myDF[myDF$Ring == 2,3:10],
by=list(myDF[myDF$Ring == 2, "Date"]), mean, na.action=na.rm)
dDF3 <- aggregate(myDF[myDF$Ring == 3,3:10],
by=list(myDF[myDF$Ring == 3, "Date"]), mean, na.action=na.rm)
dDF4 <- aggregate(myDF[myDF$Ring == 4,3:10],
by=list(myDF[myDF$Ring == 4, "Date"]), mean, na.action=na.rm)
dDF5 <- aggregate(myDF[myDF$Ring == 5,3:10],
by=list(myDF[myDF$Ring == 5, "Date"]), mean, na.action=na.rm)
dDF6 <- aggregate(myDF[myDF$Ring == 6,3:10],
by=list(myDF[myDF$Ring == 6, "Date"]), mean, na.action=na.rm)
colnames(dDF1)[1] <- colnames(dDF2)[1] <-colnames(dDF3)[1] <-colnames(dDF4)[1] <-colnames(dDF5)[1] <-colnames(dDF6)[1] <-"Date"
### Save to out df
out.list <- list(R1 = data.table(dDF1),
R2 = data.table(dDF2),
R3 = data.table(dDF3),
R4 = data.table(dDF4),
R5 = data.table(dDF5),
R6 = data.table(dDF6))
return(out.list)
}
}
|
c41b394cbed882576122645e81215d1e3cc04b7c
|
cabd8bb22762efbe99dcd4760ed7722c55d7b624
|
/tests_2_way_contingency_tables.R
|
efba22f380ed37a696f9742a6c12fb8c06e27e57
|
[] |
no_license
|
carinabmorgan/tamu
|
ab87f48d030d27d29f88a66ce58759096d8e0d12
|
dd156222a32680c65103fb118ea1c9c4adffd322
|
refs/heads/main
| 2023-07-14T00:28:24.864358
| 2021-08-13T15:24:10
| 2021-08-13T15:24:10
| 395,701,199
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,236
|
r
|
tests_2_way_contingency_tables.R
|
#TESTS FOR 2 WAY CONTINGENCY TABLES
library(DescTools)
library(lawstat)
#Breslow-Day & CMH Tests for common odds ratios (must be 2x2xz , z>1)
#Breslow Day: checks that there is not a significant difference between OR's (H0)
y<-c() #add all data values of matrix
dim(y)<-c(2,2) #add dimensions of matrix
BreslowDayTest(y, OR=NA, correct=FALSE)
#CMH test: checks to see of the common odds ratio = 1, only use if you do NOT reject B-D test
cmh.test(y)
#linear trend test for 2x2 tables (would use chi sq for larger contingency table)
linear.trend <- function(freq,NI,NJ,x,y) {
# Katari, Contingency Table Analysis
# PARAMETERS:
# freq: vector of the frequencies, given by rows
# NI: number of rows
# NJ: number of columns
# x: vector of row scores
# y: vector of column scores
# RETURNS:
# r: Pearsonís sample correlation
# M2: test statistic
# p.value: two-sided p-value of the asymptotic M2-test
table <-matrix(freq, nrow = NI, ncol = NJ, byrow = TRUE);
rowmarg<-addmargins(table)[,NJ+1][1:NI];
colmarg<-addmargins(table)[NI+1,][1:NJ];
n<-addmargins(table)[NI+1,NJ+1];
xmean<-sum(rowmarg*x)/n;
ymean<-sum(colmarg*y)/n;
xsq<-sqrt(sum(rowmarg*(x-xmean)^2));
ysq<-sqrt(sum(colmarg*(y-ymean)^2));
r<-sum((x-xmean)%*%table%*%(y-ymean))/(xsq*ysq);
M2=(n-1)*r^2;
p.value <- 1-pchisq(M2,1);
return(list(r=r,M2=M2, p.value=p.value))
}
freq<-c()
x<-c()
y<-c()
linear.trend(freq=freq,NI=3,NJ=3,x,y)
#H0: x & y independent
#Finding odds ratios (2x2 table)
#create your 2x2 table
mat<-matrix(c(), nrow=, ncol=, byrow= )
rownames(mat)<-c() #add row names
colnames(mat)<-c() #add column names
#calculate OR with 95% CI
OddsRatio(mat, conf.level=0.95)
#McNemars matched pairs test: test for symmetry in 2x2 table/matrix, aka if the probability of cell [i,j]= probability of cell [j,i] for matched pairs
#NOTE: want cells to sum to 25 ideally
#H0: no difference in proportions between paired data
sum(mat)
mcnemar.test(mat, correct=TRUE) #use correct=TRUE if any cell has a count less than 5
#if we do not have the correct sum, we need an exact test
library(rcompanion)
nominalSymmetryTest(mat)
#sensitivity & specificity
library(caret)
y<-c() #response values
model<-glm(y ~ x, family='binomial') #let x be the predictors
actual_values<-y
pred_values<-predict(model, type='response')
#confusion matrix
conf_mat<-confusionMatrix(actual_values, pred_values, cutoff=0.5) #generally use .5 cutoff
#sensitivity: proportion of people with disease who test positive
sensitivity(conf_mat)
#specificity: proportion of people without disease who test negative
specificity(conf_mat)
#hypothesis testing for a population proportion
#sample size calculation for proportion inference
#test that proportions (probs of success) is the same in multiple groups (H0) versus different between groups (Ha)
probs<-c() #must have a probability for every group in your matrix, so a 2x2 matrix would have 2 values in this vector
prop.test(mat, p=probs, alternative='two.sided', conf.level=0.95)
|
d0c2632fd032f25bfd6d983c8471e7f4221e6d5e
|
fde4f97ceb49af70f44297cb3aa5d8301357f849
|
/peaky.R
|
54e2d63b7dc8bb6edb65bb2eb67b8aa80e6c47a4
|
[] |
no_license
|
zeugirdoR/peaky-stan
|
2fb028df1f1975c6447ee7be4a429ffb53270635
|
80bbe710988a000ddaac4575eca484132e058add
|
refs/heads/master
| 2016-09-11T11:48:09.301779
| 2014-11-20T17:13:37
| 2014-11-20T17:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,333
|
r
|
peaky.R
|
## the peaky prior.
#
library(rstan)
N <- 100
c <- 0.1
a.true <- 0.025
b.true <- -0.01
y1 <- rnorm(N,exp(-50*(a.true^2+b.true^2)))
y2 <- rnorm(N,exp(-50*((a.true-c)^2+(b.true-c)^2)))
peaky_dat <- list(N=N,c=c,y=cbind(y1,y2))
# with flat N(0,100)xN(0,100) prior: NO LEARNING.
peaky_fit <- stan(file = 'peaky.stan',data=peaky_dat, verbose = FALSE)
peaky_fit
# with uniform prior on manifold:
peakyOK_fit <- stan(file = 'peakyOK.stan',data=peaky_dat, verbose = FALSE)
## add more samples: Nailed it!
## a = 0 \pm 0.03 and b = -0.01 \pm 0.03
peakyOK <- stan(fit = peakyOK_fit, data=peaky_dat, iter = 20000, chains=4)
peakyOK
## with no specs for prior: Stan claims posterior improper!
peaky0 <- stan(file='peaky0.stan',data=peaky_dat, verbose = FALSE)
## with (a,b) iid U[-1,1]:
peakyU <- stan(file='peakyU.stan',data=peaky_dat,verbose=FALSE)
## give sims to rv
# thetas <- extract(stanfit,pars="m")$m
# th <- rvsims(thetas,n.sims=dim(thetas))
# with transformed parameters block:
peakyT <- stan(file='peakyT.stan',data=peaky_dat,verbose=FALSE)
# more samples
peakyT1 <- stan(fit=peakyT,data=peaky_dat,iter=20000,verbose=FALSE)
as <- extract(peakyT1,pars="a")$a
a <- rvsims(as,n.sims=dim(as))
rvhist(a)
bs <- extract(peakyT1,pars="b")$b
b <- rvsims(bs,n.sims=dim(bs))
rvhist(b)
|
4417f5834a4fddc0d806e190b7cb8e8ba6fe8fce
|
95ed1ee245f4a786d50afa095ad1996f3845b465
|
/Plot2.R
|
8c1f466bd21e19d3a8e587c4719305985fa52bba
|
[] |
no_license
|
BrandonAson/ExData_Plotting1
|
3cdf3f60724043bdce05294ccc4f91a40f11b2c8
|
3d5684ff18870141c713da56d92713593c501875
|
refs/heads/master
| 2020-12-25T20:31:24.457702
| 2015-01-10T17:42:59
| 2015-01-10T17:42:59
| 29,000,244
| 0
| 0
| null | 2015-01-09T04:14:45
| 2015-01-09T04:14:45
| null |
UTF-8
|
R
| false
| false
| 1,087
|
r
|
Plot2.R
|
Plot2 <- function()
{
############### LOAD PACKAGES ################
library(data.table)
########## LOAD DATASET ##########
dataSet <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("?", ""))
####### RECONFIGURE DATE AS CLASS & SELECT DATE RANGE 2007-02-01 to 2007-02-02#########
#Combine date & time into a single column named 'DateTime'
dataSet$DateTime <- paste(dataSet$Date, dataSet$Time)
dataSet$DateTime <- strptime(dataSet$DateTime, format = "%d/%m/%Y %H:%M:%S")
#select date range 2007-02-01 to 2007-02-02
selectDates <- subset(dataSet, dataSet$DateTime < ("2007-02-03 00:00:00") & dataSet$DateTime > ("2007-02-01 00:00:00"))
########## GENERATE PLOT #############
selectDates$Global_active_power <- as.numeric(selectDates$Global_active_power)
plot(selectDates$DateTime, selectDates$Global_active_power,type="l", ylab="Global Active Power (kilowatts)", xlab= "", bg = "white")
dev.copy(png, filename = "plot2.png", width = 480, height = 480, units = "px")
dev.off()
}
|
3d731cf1754fb0d00dd43301a940eb965651cf56
|
77e2ab3f92aeae91734daeb4bbc8569ef1d40c6a
|
/man/tables.Rd
|
aaaf64dbb1ba3cb5160816419919fe38c7897562
|
[] |
no_license
|
baharak/SparkR
|
6ef2c5e948091c1e5b7a359834d54ec2b5360874
|
4ec87f6f817ab9dff29178b769d22efe5a734ce9
|
refs/heads/master
| 2021-01-21T19:06:50.749325
| 2015-07-27T06:36:19
| 2015-07-27T06:36:19
| 39,803,604
| 1
| 1
| null | 2015-07-27T23:52:13
| 2015-07-27T23:52:12
| null |
UTF-8
|
R
| false
| false
| 501
|
rd
|
tables.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SQLContext.R
\name{tables}
\alias{tables}
\title{Tables}
\usage{
tables(sqlContext, databaseName = NULL)
}
\arguments{
\item{sqlContext}{SQLContext to use}
\item{databaseName}{name of the database}
}
\value{
a DataFrame
}
\description{
Returns a DataFrame containing names of tables in the given database.
}
\examples{
\dontrun{
sc <- sparkR.init()
sqlContext <- sparkRSQL.init(sc)
tables(sqlContext, "hive")
}
}
|
1900d947f397e43b92a58f33b56ab0fd045897fa
|
b2d31f342878148109735c24780ab52c0dfcd975
|
/R/estimate.R
|
54c5b3faea0c7e2c6a4e5a1f59b42e6b15ce3f6b
|
[] |
no_license
|
cran/bioassays
|
b3e45cdeb5dddc22d5065f542c543e64c3e0f117
|
892682e437df89afe80bbee88d348a1c56b1962f
|
refs/heads/master
| 2022-12-25T09:55:21.485996
| 2020-10-09T19:10:02
| 2020-10-09T19:10:02
| 268,085,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,466
|
r
|
estimate.R
|
#'@name estimate
#'@aliases estimate
#'@title Estimate Samples from Standard Curve
#'@description This function will estimate the unknown variable (example: concentration) based on a standard curve.
#'@usage estimate (data, colname = "blankminus", fitformula = fiteq, method = "linear/nplr")
#'
#'@param data data in dataframe format
#'@param colname column name whose values has to be estimated
#'@param fitformula formula used for fitting standard curve
#'@param method method = "linear" if standard curve is linear in nature. method = "nplr" if standard curve is nonparametric logistic curve.
#'
#'@details For linear standard curve 'fitformula' need to generated using \code{\link[stats]{lm}}.
#'For nonparametric logistic curve 'fitformula' need to generated using \code{\link[nplr]{nplr}}.
#'
#'@return A dataframe with estimated values added to right as a new column "estimated".
#'
#'@author A.A Palakkan
#'
#'@examples
#'## loading data
#'data(data_DF1)
#'
#'## Filtering standards
#'std<- dplyr::filter(data_DF1, data_DF1$id=="STD")
#'std <- aggregate(std$blankminus ~ std$concentration, FUN = mean )
#'colnames (std) <-c("con", "OD")
#'
#'## 3-parametric regression curve fitting
#'fit1<-nplr::nplr(std$con,std$OD,npars=3,useLog = FALSE)
#'
#'## Linear regression curve fitting
#'fit2<- stats::lm(formula = con ~ OD,data = std)
#'
#'## Estimating the 'blankminus'
#'## eg:1 Based on nonparametric logistic regression fitting
#'estimated_nplr <- estimate(data_DF1,colname = "blankminus",fitformula = fit1,method = "nplr")
#'
#'## eg:2 Based on linear regression fitting
#'estimated_lr<-estimate(data_DF1,colname="blankminus",fitformula=fit2,method="linear")
#'
#'@keywords math
#'
#'@importFrom nplr getEstimates
#'@importFrom stats coefficients
#'@export
#'
#'
#'
estimate<-function(data,colname="blankminus",fitformula=fiteq, method="linear/nplr"){
fiteq<-NULL
if(method=="nplr"){
projected <- nplr::getEstimates(fitformula,targets=data[,colname],conf.level= .95)[c(3)]
newlayout<-data.frame(cbind(data,estimated=projected))
colnames(newlayout)[ncol(newlayout)]<-"estimated"
return(newlayout)
}
if(method=="linear") {
coe<-stats::coefficients(fitformula)
names(coe)<-NULL
xvalue<-data[,colname]
projected<-coe[1]+(coe[2]*xvalue)
newlayout<-data.frame(cbind(data,estimated=projected))
colnames(newlayout)[ncol(newlayout)]<-"estimated"
return(newlayout)
}
}
|
d8c82dcfd3215af7a7d21ab125bafebe8fecee4f
|
71993c9cdadaff86bf072858e580bc6943d5356b
|
/R/app_utils.R
|
d1c04562cdbad01310a6a002b1b9ee2c9246e04a
|
[] |
no_license
|
ca3tech/covid
|
fa83fb991587882a6e8874330382652c52a3aa38
|
9c19f82391b2def84b3fc1103890465f33af6b11
|
refs/heads/master
| 2023-03-07T15:13:44.006001
| 2021-02-20T22:51:00
| 2021-02-20T22:51:00
| 275,665,035
| 0
| 0
| null | 2020-08-09T21:06:25
| 2020-06-28T20:43:26
|
R
|
UTF-8
|
R
| false
| false
| 7,528
|
r
|
app_utils.R
|
getMap <- function(county_geo) {
# The htmlwidgets::onRender was reverse engineered by looking
# at the R leaflet GitHub code and the help for htmlwidgets.
# It is there to facilitate adding markers to the map from
# feature ids in the URL query parameters.
if(! is.null(county_geo)) {
leaflet() %>%
setView(lng = -98.583, lat = 39.833, zoom = 3) %>%
addTiles() %>%
addGeoJSON(county_geo) %>%
htmlwidgets::onRender("function(el, data, map) {
Shiny.setInputValue('map_rendered', true);
}")
}
}
# Manage map markers
updateCountyMarkers <- function(ftrids, sfids, county_geo, center=FALSE, progress=NULL) {
if(is.null(ftrids) || length(ftrids) == 0) {
ftrids <- sfids
}
lapply(ftrids, function(ftrid) {
sfids <<- updateCountyMarker(ftrid, sfids, county_geo, center, progress)
})
sfids
}
updateCountyMarker <- function(ftrid, sfids, county_geo, center=FALSE, progress=NULL) {
mlid <- getCountyMarkerId(ftrid)
if(ftrid %in% sfids) {
sfids <- sfids[sfids != ftrid]
leafletProxy("map") %>%
removeMarker(mlid)
} else {
sfids <- c(sfids, ftrid)
addCountyMarker(ftrid, geo=county_geo, center=center)
}
sfids
}
getCountyMarkerId <- function(featureId) {
paste0("marker_",featureId)
}
addCountyMarker <- function(featureId, geo, center=FALSE) {
mlid <- getCountyMarkerId(featureId)
sel <- vapply(geo$features, function(f) f$id == featureId, TRUE)
if(any(sel)) {
features <- geo$features[sel]
feature <- features[[1]]
mpoint <- get_lon_lat_center(feature)
lab <- HTML(paste(
paste0("<b>County:</b>",feature$properties["county_name"]),
paste0("<b>Confirmed:</b>",feature$properties["confirmed_cases"]),
paste0("<b>Active:</b>",feature$properties["active_case_est"]),
ifelse("active_rank" %in% names(feature$properties), paste0("<b>Active Rank:</b>",feature$properties["active_rank"]), ""),
paste0("<b>Deaths:</b>",feature$properties["confirmed_deaths"]),
sep = "</br>"
))
m <- leafletProxy("map") %>%
addMarkers(lng = mpoint[1], lat = mpoint[2], layerId = mlid, label = lab)
if(center) {
m %>% flyTo(lng = mpoint[1], mpoint[2], zoom = 8)
}
}
}
# I found a feature that had a geometry$coordinates
# value that did not match the expected structure
# resulting in an application crash. This fixes that.
.fix_feature_coords <- function(feature) {
if(length(feature$geometry$coordinates) > 1) {
if(all(vapply(feature$geometry$coordinates, length, 1) > 1)) {
feature$geometry$coordinates <- lapply(feature$geometry$coordinates, list)
feature$geometry$type <- "MultiPolygon"
}
}
feature
}
.pt_lst_to_df <- function(ptlist) {
plyr::ldply(ptlist, function(pt) data.frame(lon = pt[1], lat = pt[2]))
}
.geo_coord_to_points <- function(coords) {
if(length(coords) == 1) {
.pt_lst_to_df(coords[[1]])
} else {
lapply(coords, .geo_coord_to_points)
}
}
get_lon_lat_center <- function(feature) {
feature <- .fix_feature_coords(feature)
pts <- .geo_coord_to_points(feature$geometry$coordinates)
if(is.data.frame(pts)) {
as.numeric(geosphere::centroid(pts))
} else {
# There is more than one region
# Find the region with the greatest area
areas <- vapply(pts, geosphere::areaPolygon, 1.0)
armax <- max(areas)
imax <- which(vapply(areas, function(a) a == armax, TRUE))
# Return the centroid of the largest region
as.numeric(geosphere::centroid(pts[[imax[1]]]))
}
}
##
# Plot generation
confirmedCasesPlot <- function(date_summary) {
future({
plot_ly(date_summary, x = ~date, y = ~confirmed_cases, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
layout(plot_bgcolor = "#888",
xaxis = list(title = "Date", tickangle = -45),
yaxis = list(title = "Confirmed Cases")
) %>%
config(displayModeBar = FALSE)
})
}
newCasesPlot <- function(date_summary) {
future({
fit <- stats::lm(new_cases ~ date, data = date_summary)
plot_ly(date_summary, x = ~date, y = ~new_cases, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
add_lines(x = ~date, y = fitted(fit), color = I("yellow")) %>%
layout(plot_bgcolor = "#888", showlegend = FALSE,
xaxis = list(title = "Date", tickangle = -45),
yaxis = list(title = "New Cases")
) %>%
config(displayModeBar = FALSE)
})
}
confirmedDeathsPlot <- function(date_summary) {
future({
plot_ly(date_summary, x = ~date, y = ~confirmed_deaths, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
layout(plot_bgcolor = "#888",
xaxis = list(title = "Date", tickangle = -45),
yaxis = list(title = "Confirmed Deaths")
) %>%
config(displayModeBar = FALSE)
})
}
newDeathsPlot <- function(date_summary) {
future({
fit <- stats::lm(new_deaths ~ date, data = date_summary)
plot_ly(date_summary, x = ~date, y = ~new_deaths, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
add_lines(x = ~date, y = fitted(fit), color = I("yellow")) %>%
layout(plot_bgcolor = "#888", showlegend = FALSE,
xaxis = list(title = "Date", tickangle = -45),
yaxis = list(title = "New Deaths")
) %>%
config(displayModeBar = FALSE)
})
}
deathRatePlot <- function(date_summary) {
future({
date_summary$death_rate <- 0
sel <- date_summary$confirmed_cases > 0
date_summary$death_rate[sel] <- round(date_summary$confirmed_deaths[sel] / date_summary$confirmed_cases[sel], 4)
plot_ly(date_summary, x = ~date, y = ~death_rate, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
layout(plot_bgcolor = "#888",
xaxis = list(title = "Date", tickangle = -45),
yaxis = list(title = "Death Rate")
) %>%
config(displayModeBar = FALSE)
})
}
computeExposureData <- function(statsdf) {
pop <- statsdf$Population
p_not_infected <- 1 - statsdf$Probability_of_Exposure
n50 <- statsdf$N50
lxmax <- as.integer(log(pop, 2))
x <- 2 ^ (0:lxmax)
lx <- log(x, 2)
list(
plot = data.frame(x = x, xlog = lx, y = 1 - (p_not_infected ^ x)),
label = data.frame(x = log(n50, 2), y = 1 - (p_not_infected ^ n50), label = paste0("N50=",n50))
)
}
exposureProbPlot <- function(stats) {
future({
expdfs <- computeExposureData(stats)
pldf <- expdfs[["plot"]]
xti <- seq.int(from = 3, to = nrow(pldf), by = 3)
xb <- pldf$xlog[xti]
xl <- pldf$x[xti]
plot_ly(pldf, x = ~xlog, y = ~y, type = "scatter", mode = "lines", color = I("blue"), hoverinfo = "y") %>%
add_text(x = ~x, y = ~y, text = ~label, data = expdfs[["label"]], color = I("yellow")) %>%
layout(plot_bgcolor = "#888", showlegend = FALSE,
xaxis = list(
title = "Number of People",
tickmode = "array",
tickvals = xb,
ticktext = xl,
tickangle = -45
),
yaxis = list(title = "Probability Exposed")
) %>%
config(displayModeBar = FALSE)
})
}
##
# General utilities
statsToList <- function(statsdf) {
sl <- as.list(statsdf)
names(sl) <- gsub("_", " ", names(sl))
sl
}
data(zipcode)
getZipcodeLonLat <- function(zip_codes) {
dplyr::rename(zipcode[zipcode$zip %in% zip_codes, c("zip", "longitude", "latitude")],
zipcode=zip, lon=longitude, lat=latitude)
}
|
3d6476e388c06c1b79c4751638375a0baa5fa691
|
690f368023ed55daac428fed7b3a2125a0556c9c
|
/tests/testthat/test_sqrt_x.R
|
1426dd7da703bf06debe761e201e43eb079eab36
|
[] |
no_license
|
petersonR/bestNormalize
|
6b41727938489bb527ebda9533dae5c439986ed8
|
4d568bc4eb41d7d17a54fcdbb2b20a5c844c0ca6
|
refs/heads/master
| 2023-04-06T08:28:26.633324
| 2023-03-21T23:00:24
| 2023-03-21T23:00:24
| 109,146,034
| 42
| 5
| null | 2023-02-15T23:49:46
| 2017-11-01T15:05:50
|
R
|
UTF-8
|
R
| false
| false
| 2,337
|
r
|
test_sqrt_x.R
|
context('sqrt_x functionality')
data(iris)
train <- iris$Petal.Width
sqrt_x_obj <- sqrt_x(train)
test_that('sqrt_x Transforms original data consistently', {
expect_equal(sqrt_x_obj$x.t, predict(sqrt_x_obj))
expect_equal(sqrt_x_obj$x, predict(sqrt_x_obj, inverse = TRUE))
})
test_that('sqrt_x Transforms new data consistently', {
nd <- seq(0, 4, length = 100)
pred <- predict(sqrt_x_obj, newdata = nd)
expect_true(!any(is.na(pred)))
nd2 <- predict(sqrt_x_obj, newdata = pred, inverse = TRUE)
expect_equal(nd, nd2)
})
test_that('sqrt_x correctly handles missing original data', {
b <- sqrt_x(c(NA, train))
expect_equal(as.numeric(NA), b$x.t[1])
expect_equal(as.numeric(NA), predict(b)[1])
expect_equal(as.numeric(NA), predict(b, inverse = TRUE)[1])
})
test_that('sqrt_x correctly handles missing new data', {
b <- sqrt_x(train)
expect_equal(as.numeric(NA), predict(b, newdata = c(1, NA))[2])
expect_equal(as.numeric(NA), predict(b, newdata = c(1, NA), inverse = TRUE)[2])
})
# Test standardization
sqrt_x_obj <- sqrt_x(train, standardize = FALSE)
test_that('sqrt_x Transforms original data consistently', {
expect_equal(sqrt_x_obj$x.t, predict(sqrt_x_obj))
expect_equal(sqrt_x_obj$x, predict(sqrt_x_obj, inverse = TRUE))
})
test_that('sqrt_x Transforms new data consistently', {
nd <- seq(0, 4, length = 100)
pred <- predict(sqrt_x_obj, newdata = nd)
expect_true(!any(is.na(pred)))
nd2 <- predict(sqrt_x_obj, newdata = pred, inverse = TRUE)
expect_equal(nd, nd2)
})
test_that('sqrt_x correctly handles missing original data', {
b <- sqrt_x(c(NA, train), standardize = FALSE)
expect_equal(as.numeric(NA), b$x.t[1])
expect_equal(as.numeric(NA), predict(b)[1])
expect_equal(as.numeric(NA), predict(b, inverse = TRUE)[1])
})
test_that('sqrt_x correctly handles missing new data', {
b <- sqrt_x(train, standardize = FALSE)
expect_equal(as.numeric(NA), predict(b, newdata = c(1, NA))[2])
expect_equal(as.numeric(NA), predict(b, newdata = c(1, NA), inverse = TRUE)[2])
})
sqrt_x_obj <- sqrt_x(train, a = 1)
test_that('sqrt_x Transforms new data consistently (given a)', {
nd <- seq(0, 4, length = 100)
pred <- predict(sqrt_x_obj, newdata = nd)
expect_true(!any(is.na(pred)))
nd2 <- predict(sqrt_x_obj, newdata = pred, inverse = TRUE)
expect_equal(nd, nd2)
})
|
e6a86369cb3fac2de24e66aac438636b440abf90
|
50cbe33890817fcab78e72d7abf72cf10135198d
|
/scripts/00_solar_angle_function.R
|
ae06a294412ec12690b85580cd820899667ec886
|
[] |
no_license
|
avhesketh/semibalanus
|
84cc0c18d718c32717421f14b23d8c81bfbcb4e4
|
7fa53aeaf583b90e8ae793cbe15393685282abe4
|
refs/heads/main
| 2023-04-13T19:25:36.904974
| 2022-08-15T17:05:52
| 2022-08-15T17:05:52
| 414,035,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,072
|
r
|
00_solar_angle_function.R
|
############# AMELIA HESKETH ##############
############ October 14, 2021 #############
## Calculating solar angle of substratum ##
# You will need four variables to run this formula:
# orientation_rad = compass angle the substratum faces relative to north = 0, in radians
# sigma = angle of substratum relative to horizontal (0 angle), in radians
# solar_azimuth_rad = solar azimuth for relevant site + time, in radians
# beta = solar elevation for relevant site + time, in radians
# Note that these variable names need to be column names (spelled exactly the same) in your dataframe.
solar_angle <- function(df){
solar_angle_degrees = ((acos(sin(df$beta)*cos(df$sigma) + cos(abs(df$orientation_rad - df$solar_azimuth_rad))*cos(df$beta)*sin(df$sigma)))/(2*pi))*360
solar_angle_degrees <- as.data.frame(solar_angle_degrees)
df <- cbind(df, solar_angle_degrees)
}
# Example of function use:
# df_with_angles <- solar_angle(df)
# A new column containing the solar angle (in degrees, not radians) should now be
# appended to the end of your original dataframe.
|
f1a9b942564f15124fd7fc9fdb69baf92bea64e1
|
d32f01511d86b09c2d808e67499389b601e5e844
|
/03-main-analysis.R
|
aa79afa1799fd1482ed368dae319c607ea8132a3
|
[] |
no_license
|
bstaton1/cesrf-minijack-rates
|
d27d8541a0ba5e5c3b9718cbb15274b1c5343169
|
397fab9cded8a68669bbd41e06ba0b37f498c911
|
refs/heads/main
| 2023-04-11T21:57:17.881808
| 2021-11-11T01:06:35
| 2021-11-11T01:06:35
| 362,966,941
| 0
| 0
| null | 2021-10-05T22:33:16
| 2021-04-29T22:53:15
|
R
|
UTF-8
|
R
| false
| false
| 4,764
|
r
|
03-main-analysis.R
|
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
# SCRIPT THAT CONDUCTS THE MINIJACK ANALYSIS FOUND IN THE MAIN TEXT #
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
# NOTE: THE BOOTSTRAP IN THIS SCRIPT TAKES APPROXIMATELY 2.5 HOURS TO RUN
# load necessary packages
source("00-packages.R")
# read/format data file
source("01-data-prep.R")
# load in the functions
source("02-functions.R")
##### FIT GLMMs #####
# fit GLMMs: fixed effects of sire age and progeny weight, random effects for parent IDs
# separate model for each year
fit_14 = glmmTMB(minijack ~ sire_age + progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2014), family = binomial)
fit_15 = glmmTMB(minijack ~ sire_age + progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2015), family = binomial)
fit_16 = glmmTMB(minijack ~ sire_age + progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2016), family = binomial)
# fit GLMMs: fixed effects of progeny weight, random effects for parent IDs
# separate model for each year
# these are the "null" models, used for testing hypothesis that sire age is important
fit_null_14 = glmmTMB(minijack ~ progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2014), family = binomial)
fit_null_15 = glmmTMB(minijack ~ progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2015), family = binomial)
fit_null_16 = glmmTMB(minijack ~ progeny_wt + (1|sire_id) + (1|dam_id),
data = subset(dat, year == 2016), family = binomial)
##### PERFORM PARAMETRIC BOOTSTRAP #####
# start a clock
starttime = Sys.time()
# number of bootstrapped samples per year
nsim = 2000
# number of cores assigned to the cluster
# this assumes your computer has at least 9 cores that can accept jobs
# fewer cores will make the code run more slowly
# run: parallel::detectCores()
# to figure out how many your computer has. leave at least one free for other tasks
ncpus = 10
# initialize a parallel computing cluster
my_cluster = makeSOCKcluster(ncpus)
# send needed packages to the cluster
clusterEvalQ(my_cluster, {library("lme4"); library("glmmTMB"); library("stringr")})
# send needed environmental variables to the cluster
clusterExport(my_cluster, ls())
# set up the random number generator on the cluster
clusterSetupRNG(my_cluster, type = "RNGstream", seed = rep(1, 6))
# perform the bootstrap for 2014: non-null model
cat("\nRunning Bootstrap: 2014 (non-null model)")
boot_out_14 = bootMer(fit_14, FUN = function(rand_fit) c(get_probs(rand_fit), get_odds_ratios(rand_fit)), nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# perform the bootstrap for 2014: null model
cat("\nRunning Bootstrap: 2014 (null model)")
boot_out_null_14 = bootMer(fit_null_14, FUN = sim_fit_from_null, nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# perform the bootstrap for 2015: non-null model
cat("\nRunning Bootstrap: 2015 (non-null model)")
boot_out_15 = bootMer(fit_15, FUN = function(rand_fit) c(get_probs(rand_fit), get_odds_ratios(rand_fit)), nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# perform the bootstrap for 2015: null model
cat("\nRunning Bootstrap: 2015 (null model)")
boot_out_null_15 = bootMer(fit_null_15, FUN = sim_fit_from_null, nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# perform the bootstrap for 2016: non-null model
cat("\nRunning Bootstrap: 2016 (non-null model)")
boot_out_16 = bootMer(fit_16, FUN = function(rand_fit) c(get_probs(rand_fit), get_odds_ratios(rand_fit)), nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# perform the bootstrap for 2016: null model
cat("\nRunning Bootstrap: 2016 (null model)")
boot_out_null_16 = bootMer(fit_null_16, FUN = sim_fit_from_null, nsim = nsim,
parallel = "snow", cl = my_cluster, ncpus = ncpus, seed = 1)
# stop the cluster
stopCluster(my_cluster)
# end the clock
stoptime = Sys.time()
# calculate the time difference
format(stoptime - starttime, digits = 2)
# save the output objects
if (!dir.exists("model-output")) dir.create("model-output")
saveRDS(boot_out_14, "model-output/boot_out_14.rds")
saveRDS(boot_out_15, "model-output/boot_out_15.rds")
saveRDS(boot_out_16, "model-output/boot_out_16.rds")
saveRDS(boot_out_null_14, "model-output/boot_out_null_14.rds")
saveRDS(boot_out_null_15, "model-output/boot_out_null_15.rds")
saveRDS(boot_out_null_16, "model-output/boot_out_null_16.rds")
|
d606bfd5798e399ff019ed86f82b1409db195aa7
|
4457b97c3a5c1037fd6fcd6a9fddb3bab0bc67dd
|
/services/executionenvironments/r/generator/generator.r
|
534cd596d81a0bad6a34d60c43de810398fd8b67
|
[
"MIT"
] |
permissive
|
hpi-epic/mpcsl
|
9887c3d07f0902a3e6239b8b0d5299ca3af59e19
|
4b8e6998b2c569ab37ba39a8e0fec057e775b9cc
|
refs/heads/master
| 2023-05-22T15:51:17.173435
| 2023-02-14T14:28:06
| 2023-02-14T14:28:06
| 154,475,401
| 4
| 1
|
MIT
| 2023-02-14T14:28:08
| 2018-10-24T09:35:21
|
Python
|
UTF-8
|
R
| false
| false
| 2,202
|
r
|
generator.r
|
library("pcalg")
library(optparse)
library(httr)
library(igraph)
tmpDataFile <- 'df.csv'
tmpGraphFile <- 'graph.gml'
option_list_v <- list(
# optparse does not support mandatory arguments so I set a value to NA by default to verify later if it was provided.
make_option("--apiHost", type="character", help="API Host/Port", default=NA),
make_option("--uploadEndpoint", type = "character", help = "Dataset Upload Url", default = NA),
make_option("--nSamples", type = "integer", default = NA, help = "number of samples to be generated"),
make_option("--nNodes", type = "integer", default = NA, help = "number of variables"),
make_option("--edgeProbability", type = "double", default = NA, help = "probability that a given edge is in the graph"),
make_option("--edgeValueLowerBound", type = "double", default = NA, help = "lowest possible edge value"),
make_option("--edgeValueUpperBound", type = "double", default = NA, help = "highest possible edge value")
)
option_parser <- OptionParser(option_list = option_list_v)
opt <- parse_args(option_parser)
for (name in names(opt)) {
if(is.na(opt[[name]])){
stop(paste0("Paramater --", name, " is required"))
}
}
dag <- randomDAG(opt$nNodes, opt$edgeProbability, opt$edgeValueLowerBound, opt$edgeValueUpperBound)
dataset <- rmvDAG(opt$nSamples,dag)
write.csv(dataset, tmpDataFile)
igraphDAG <- igraph.from.graphNEL(dag)
write_graph(igraphDAG, tmpGraphFile, "gml")
upload_dataset <- function(uploadEndpoint, apiHost) {
url <- paste0(uploadEndpoint)
response <- RETRY("PUT", url, body = list(file = upload_file(tmpDataFile)), encode = "multipart", times = 1, quiet=FALSE)
stop_for_status(response)
responseBody <- content(response)
if (is.null(responseBody$id)){
stop(paste0("Response did not contain dataset id", responseBody))
}
datasetId <- responseBody$id
groundTruthEndpoint <- paste0("http://", apiHost, "/api/dataset/", datasetId, "/ground-truth")
response <- RETRY(
"POST",
groundTruthEndpoint,
body = list(graph_file = upload_file(tmpGraphFile)),
encode = "multipart",
times = 1,
quiet=FALSE
)
stop_for_status(response)
}
upload_dataset(opt$uploadEndpoint, opt$apiHost)
|
129971d3823bfb2fbf60a3c0253bd14153b22153
|
bad78e83e107f4198ac82d62f241e1c4a7e043b8
|
/R/reexports.R
|
749c8c81ea60113b0e61ad264b812c2008b68919
|
[] |
no_license
|
rpruim/DemoCourse
|
ec209c8d006c468baf5a4d13436ce28c6c8a1318
|
4a4080267d24581961e9a98cb0ee9109097a1f3d
|
refs/heads/master
| 2020-03-20T01:53:47.432776
| 2018-06-12T16:45:18
| 2018-06-12T16:45:18
| 137,092,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
reexports.R
|
#' @inherit mosaic::favstats
#' @export
favstats2 <- mosaic::favstats
|
c2b15092ee8ca126fa4f28d09400ac0ba84987af
|
74a6698f714d6e62060eef47ec875aec9e570086
|
/man/DiffusionRimp-package.Rd
|
b8a19bb5b8a03b1c3c3ade3ec0f7be5bb332dade
|
[] |
no_license
|
eta21/DiffusionRimp
|
2350a48f730309c373e38704cb624716558f8bc1
|
9eaed5ac9f1ed1b279cb61cf744aa57927bf1d80
|
refs/heads/master
| 2020-04-06T06:56:05.471125
| 2016-08-26T07:23:25
| 2016-08-26T07:23:25
| 43,482,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,013
|
rd
|
DiffusionRimp-package.Rd
|
\name{DiffusionRimp-package}
\alias{DiffusionRimp-package}
\alias{DiffusionRimp}
\docType{package}
\title{
Data-imputation and density approximations for diffusion processes.
}
\description{
A package for performing data imputation on discretely observed diffusion processes as well as calculating numerical approximations to transition and first passage time densities.
}
\details{
\tabular{ll}{
Package: \tab DiffusionRimp\cr
Type: \tab Package\cr
Version: \tab 0.1.0\cr
Date: \tab 2015-12-01\cr
License: \tab GPL (>= 2)\cr
}
Functions included in the package:
\tabular{lcl}{
\code{\link{RS.impute}} \tab:\tab Perform inference on a diffusion model using the random walk Metropolis-Hastings algorithm using the data-imputation algorithm. \cr
\code{\link{BiRS.impute}} \tab:\tab Perform inference on a bivariate diffusion model using the random walk Metropolis-Hastings algorithm using the data-imputation algorithm. \cr
\code{\link{MOL.density}} \tab:\tab Calculate the transitional density of a diffusion model using the method of lines.\cr
\code{\link{BiMOL.density}}\tab:\tab Calculate the transitional density of a bivariate diffusion model using the method of lines.\cr
\code{\link{MOL.passage}}\tab:\tab Calculate the first passage time density of a time-homogeneous diffusion model with fixed barriers (i.e., a two-barrier first passage time problem). \cr
\code{\link{BiMOL.passage}}\tab:\tab Calculate the first passage time density of a time-homogeneous bivariate diffusion model with fixed barriers (i.e., a four-barrier problem in two dimensions). \cr
\code{\link{MOL.aic}}*\tab:\tab Calculate a pseudo-AIC value for a diffusion model using the method of lines. \cr
\code{\link{BiMOL.aic}}*\tab:\tab Calculate pseudo-AIC value for a bivariate diffusion model using the method of lines.\cr
}
* Functions use C++.
}
\author{
Etienne A.D. Pienaar \email{etiennead@gmail.com}
}
\keyword{ package }
\keyword{ C++}
\examples{
# example(RS.impute)
# example(MOL.density)
# example(MOL.passage)
}
|
6183880e62e681d72d0cfdc791270714a4190ef8
|
d3fd64b723dd45a32eb8992a450f0f66e3ef6ce5
|
/app.R
|
73e6fc95294cb8ab5ca00419690e38f3a9364075
|
[] |
no_license
|
vishu133/arulesapp
|
648d88d11d3b391deb3bb756ba8f7170bb03a238
|
f5e67589b926b220a1fa06bfc37de200aa7c1cff
|
refs/heads/master
| 2020-04-04T08:07:14.122470
| 2018-11-02T01:05:51
| 2018-11-02T01:05:51
| 155,771,848
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,435
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
EnsurePackage <- function(x) {
x <- as.character(x)
if( !require(x,character.only = T))
{install.packages(x,repos = "https://cran.r-project.org/")
require(x,character.only = T)}
}
library(shiny)
library("arules")
library("arulesViz")
library("shiny")
library("caret")
library("colorspace")
data("AdultUCI")
#Data Preprocessing: For ARules by converting integers to categories
AdultUCI$age_grp <- discretize(AdultUCI$age, method = "cluster",
labels = c("Young","Middle Aged","Senior"),
order = T,onlycuts = F)
AdultUCI$hours_per_week_grp <- discretize(AdultUCI$`hours-per-week`,method = "fixed",
categories = c(-Inf,25,41,60,Inf),
labels = c("Part-time","Full-time",
"Over-time","Burn-out 60"),
order = T)
AdultUCI$capital_gain_grp <- discretize(AdultUCI$`capital-gain`,method = "fixed",
categories = c(-Inf,0,1,10000,99998,Inf),
labels = c("None","Low",
"High","Super High"),
order = T)
AdultUCI$capital_loss_grp <- discretize(AdultUCI$`capital-loss`,method = "fixed",
categories = c(-Inf,0,1,1000,2000,Inf),
labels = c("None","Low","Med",
"High"),
order = T)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Arules"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
numericInput("supp",
"Min Support:",
value = 0.1),
numericInput("conf",
"Confidence",
value = 0.5),
numericInput("minlen",
"Minimum Rules",
value = 3),
numericInput("maxlen",
"Max Rules",
value = 10),
actionButton("button", "Find Rules"))
,
# Show a plot of the generated distribution
mainPanel(
verbatimTextOutput("Arules")
)
))
# Define server logic required to draw a histogram
server <- function(input, output) {
amodel <- eventReactive(input$button,{
rules_record1 <- apriori(AdultUCI[,sapply(AdultUCI, is.factor)],
parameter = list(support = input$supp,
confidence = input$conf,
minlen = input$minlen,
maxlen = input$maxlen))
return(inspect(sort(rules_record1, by = "lift",decreasing = T)[1:10]))})
output$Arules <- renderPrint(amodel())
}
# Run the application
shinyApp(ui = ui, server = server)
|
89fa61762e310daeeaba9b0c2b0ed81efafdb7d0
|
1f8ba5288dce0249e88b54f152aad8bf46ee8f49
|
/scripts/kmeans_n_grps_sab_sdm.R
|
1023dbcff2f36d5f449d00272d2c5d1bebc52d9b
|
[] |
no_license
|
philiphaupt/sabellaria_goodwin_sands
|
e9f3a84ec78d83a4fc32033ebb268c48ecf037ba
|
dfb9d22f68955a467857aec9f05925b93ba88c0e
|
refs/heads/main
| 2023-07-18T04:52:59.528719
| 2021-09-07T09:50:35
| 2021-09-07T09:50:35
| 366,675,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 600
|
r
|
kmeans_n_grps_sab_sdm.R
|
# reclassify the number of categories of probailities
library(ecbtools)
#-----------
# Convert sabellaria raster to stack raster to allow extracting kmeans
sab_stack <- raster::stack(sab_mod_goodwin)
# reclassify the probability maop into cateogries based on values - can use proximity to influence
set.seed(123)
kmeans_sab <- raster.kmeans(x = sab_stack, k = n_prob_categories, iter.max = 100, nstart = 10, geo = T, geo.weight = 0.2)
# test plot
plot(kmeans_sab,
col = topo.colors(n_prob_categories),
main = paste0(n_prob_categories, " probability categories, high and low")
)
|
e9416c2f2058ae255ea4dfaf317bd72d81851e8b
|
85c5490e286d6e966d882526f85dac038e74360f
|
/rSource/evaluate.R
|
e7b59ef6955e5d8119acc9b5e3153fa11df88044
|
[] |
no_license
|
markatango/langModel
|
44bae9fa3dd19610b5ba9cc7de2bc8af51430c8d
|
c21b4eb65c7c95b8ecfbc536bae6538433fdd18f
|
refs/heads/master
| 2021-01-23T19:38:04.366005
| 2019-06-23T17:45:15
| 2019-06-23T17:45:15
| 34,079,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,881
|
r
|
evaluate.R
|
inOut <- function(testNgram){
pWords <- predictFromTextFold(testNgram$pref)$uSAD
lpw <-
testNgram$suff %in% pWords[1:min(3,length(pWords))]
}
#========================== START ================================
# remove existing corpus and ngram data and tokens to free up memory
rm(dCorpus,Ngrams,NgramDocStats,tokens)
gc()
NFOLD <- 5
samplesSizes <- c(0.5,0.25,0.125,0.0625)
perf <- matrix(rep(0,NFOLD*length(samplesSizes)),nrow=NFOLD)
# load the reduced full text sets so things fit into memory
if(!exists("texts")){
# If already present just read in here to sample
fileList <- paste(unlist(strsplit(system(paste("dir ", dirSampName),intern=TRUE),"\\s+")),sep=" ")
fullFileNames <- paste(dirSampName,"/",fileList,sep="")
texts <- lapply(fullFileNames, readLines)
nTexts <- sapply(texts,function(t)length(t))
nDocs <- length(nTexts)
docNames <- fileList
}
for(i in 1:NFOLD){
testSelect <- lapply(nTexts,function(n) {runif(n)<1/NFOLD } )
testTexts <- lapply(1:length(testSelect),function(i) texts[[i]][testSelect[[i]]])
trainTexts <- lapply(1:length(testSelect),function(i) texts[[i]][!(testSelect[[i]])])
# sample the training set for this run
ntTexts <- sapply(trainTexts,function(t)length(t))
ntDocs <- length(nTexts)
# use a subsample of the test samples due to speed considerations
nttTtexts <- sapply(testTexts,function(t)length(t))
testTexts <- lapply(1:length(testTexts),function(i){
testTexts[[i]][sample(1:nttTtexts[i], min(TESTSUBSAMPLESIZE,nttTtexts[i]))]
})
removeFiles(dirTempName)
writeSamples(testTexts,dirTempName)
dCorpus <- Corpus(DirSource(dirTempName))
testTokens <- lapply(1:ntDocs,
function(t) lapply(2:NMAX,
function(i) getTokens(i,t)
)
)
testNgrams <- foreach(b=iter(testTokens),.combine="rbind") %do% {
foreach(c=iter(b),.combine="rbind") %do% c
}
#testNgrams <- testNgrams[-which(testNgrams$doc==0),]
testNgrams$pref <- exPrefix(testNgrams$tokens)
testNgrams$suff <- exSuffix(testNgrams$tokens)
for(j in 1:length(samplesSizes)){
set.seed(1340)
sampTexts <- lapply(1:length(trainTexts),function(i){
trainTexts[[i]][sample(1:ntTexts[i], samplesSizes[j] * ntTexts[i])]
})
removeFiles(dirTempName)
writeSamples(sampTexts,dirTempName)
dCorpus <- Corpus(DirSource(dirTempName))
tokens <- lapply(1:ntDocs,
function(t) lapply(2:NMAX,
function(i) getTokens(i,t)
)
)
Ngrams <- foreach(b=iter(tokens),.combine="rbind") %do% {
foreach(c=iter(b),.combine="rbind") %do% c
}
Ngrams$pref <- unlist(exPrefix(Ngrams$tokens))
Ngrams$suff <- unlist(exSuffix(Ngrams$tokens))
Ngrams <- Ngrams[,-which(names(Ngrams)=="tokens")]
Ngrams <- Ngrams[order(-Ngrams$count, Ngrams$N, Ngrams$doc),]
Ngrams <- Ngrams[which(Ngrams$suff!=""),]
#Ngrams <- Ngrams[-which(Ngrams$count<FILTERTHRESHOLD),]
Ngrams$n <- 1:dim(Ngrams)[1]
NgramDocStats <- dcast(Ngrams, pref+suff~doc,value.var="count",sum)
NgramDocStats$total <- rowSums(NgramDocStats[,c(as.character(1:ntDocs))], na.rm=TRUE)
NgramDocStats <- NgramDocStats[order(NgramDocStats$pref, -NgramDocStats$total),]
sub5 <- subber(5)
sub10 <- subber(10)
sub15 <- subber(15)
sNDS <- ddply(NgramDocStats, .(pref), sub10)
# for each testNgram, is suff in predict(pref)
predictFromTextFold <- predictor(sNDS)
for (k in 1: dim(testNgrams)[1]) {
perf[i,j] <- perf[i,j] + inOut(testNgrams[k,])*1
}
perf[i,j] <- perf[i,j]/dim(testNgrams)[1]
}
}
perf.df <-as.data.frame(perf)
names(perf.df) <- paste(samplesSizes,sep=" ")
save(perf.df, file=paste0("perf_",TESTSUBSAMPLESIZE,".RData"))
|
f52a0be6b9d4952261bb37ed684ccd4127b8c9f7
|
798ccd069fc99bb33a0e476d6f8043659c319c3b
|
/R/methods-plotMappingRate.R
|
a901840a8bd699bdb8b80e4aebf66972d11d2cec
|
[
"MIT"
] |
permissive
|
microsud/bcbioRNASeq
|
076beefe9a928f7ac17ca785f23e060a0a181f58
|
ebccb143522938ad87388e1f1d734862b1d81d6d
|
refs/heads/master
| 2021-07-07T06:51:34.259240
| 2017-09-28T14:28:28
| 2017-09-28T14:28:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,499
|
r
|
methods-plotMappingRate.R
|
#' Plot Mapping Rate
#'
#' @rdname plotMappingRate
#' @name plotMappingRate
#'
#' @examples
#' data(bcb)
#'
#' # bcbioRNADataSet
#' plotMappingRate(bcb)
#'
#' # data.frame
#' metrics(bcb) %>% plotMappingRate
NULL
# Constructors ====
.plotMappingRate <- function(
object,
interestingGroup = "sampleName",
passLimit = 90L,
warnLimit = 70L,
flip = TRUE) {
if (is.null(object)) return(NULL)
p <- ggplot(object,
aes_(x = ~sampleName,
y = ~mappedReads / totalReads * 100L,
fill = as.name(interestingGroup))) +
geom_bar(stat = "identity") +
ylim(0L, 100L) +
labs(title = "mapping rate",
x = "sample",
y = "mapping rate (%)") +
scale_fill_viridis(discrete = TRUE)
if (!is.null(passLimit)) {
p <- p + qcPassLine(passLimit)
}
if (!is.null(warnLimit)) {
p <- p + qcWarnLine(warnLimit)
}
if (isTRUE(flip)) {
p <- p + coord_flip()
}
p
}
# Methods ====
#' @rdname plotMappingRate
#' @export
setMethod("plotMappingRate", "bcbioRNADataSet", function(
object,
passLimit = 90L,
warnLimit = 70L,
flip = TRUE) {
.plotMappingRate(
metrics(object),
interestingGroup = .interestingGroup(object),
passLimit = passLimit,
warnLimit = warnLimit,
flip = flip)
})
#' @rdname plotMappingRate
#' @export
setMethod("plotMappingRate", "data.frame", .plotMappingRate)
|
7ebd11349f40eba5d7e84d3cc561073a067de750
|
c27f5e62c95bde9bea315ac11d4c943647c97164
|
/R/plot_abundance.R
|
1fce624a7c4c72259e56bc7a827445a989589a5c
|
[] |
no_license
|
cran/imsig
|
65ad818ffef83952fc2c772c49a1ea7118ba3242
|
06be804141040ba3d59d11a7e620794ea9567512
|
refs/heads/master
| 2021-06-18T08:15:03.023680
| 2021-01-10T00:00:02
| 2021-01-10T00:00:02
| 145,896,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,737
|
r
|
plot_abundance.R
|
#' @title Plot relative abundance of immune cells
#' @description Barplots of relative abundance of immune cells across samples.The order of the samples are the same as that of \code{\link{imsig}}.
#' @param exp Dataframe of transcriptomic data (natural scale) containing genes as rows and samples as columns. Note: Gene names should be set as row names and duplicates are not allowed. Missing values are not allowed within the expression matrix. Check example- head(example_data): \code{\link{example_data}}.
#' @param r Use a value between 0 and 1. Default is 0.6. This is a user defined correlation cut-off to perform feature selection (\code{\link{feature_select}}). Feature selection aids to enrich the prediction of relative abundance of immune cells by filtering off poorly correlated ImSig genes. To get an idea of what cut-off to use check the results of (\code{\link{gene_stat}}) and choose a cut-off that displays high median correlation and maintains a high proportion of genes after feature selection.
#' @return ggplot
#' @import ggplot2
#' @import gridExtra
#' @examples
#' plot_abundance (exp = example_data, r = 0.7)
#' @seealso \code{\link{feature_select}}, \code{\link{example_data}}
#' @export
plot_abundance <- function(exp, r = 0.6){
cell <- imsig(exp, r)
cell$samples <- row.names(cell)
cell$samples <- factor(cell$samples, levels = cell$samples)
plots = lapply(1:(ncol(cell)-1), function(x) ggplot(cell, aes(x = cell$samples, y = cell[,x]))
+ geom_bar(stat = "identity") + theme_classic() +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.title.y=element_blank())+
ggtitle(colnames(cell)[x]))
do.call(grid.arrange, plots)
}
|
e071fb9bdbde52aab6af2f055dc317905464b96d
|
961687ab3cab0f0ab2774eacd0b740c5199df6b5
|
/R/GDALDB.R
|
ac4b44c02f439e58875a84d41ff83f31099f1b3c
|
[] |
no_license
|
mdsumner/RGDALDB
|
a199095774cf84960ba60902404bc8cf5d2d25b3
|
a1a3d04d3ebddb4a9e7d5f40b44a496c4a28502d
|
refs/heads/master
| 2020-03-28T12:52:57.444879
| 2019-10-14T01:30:13
| 2019-10-14T01:30:13
| 148,343,394
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,758
|
r
|
GDALDB.R
|
#' Driver for GDALDB database.
#'
#' @keywords internal
#' @export
#' @import DBI
#' @import methods
setClass("GDALDBDriver", contains = "DBIDriver")
#' @export
#' @rdname GDALDB-class
setMethod("dbUnloadDriver", "GDALDBDriver", function(drv, ...) {
TRUE
})
setMethod("show", "GDALDBDriver", function(object) {
cat("<GDALDBDriver>\n")
})
#' @export
GDALDB <- function() {
new("GDALDBDriver")
}
#' GDALDB connection class.
#'
#' @export
#' @keywords internal
setClass("GDALDBConnection",
contains = "DBIConnection",
slots = list(
dsn = "character"
#username = "character"#,
# and so on
#ptr = "externalptr"
)
)
#' @param drv An object created by \code{GDALDB()}
#' @rdname GDALDB
#' @export
#' @examples
#' \dontrun{
#' db <- dbConnect(RGDALDB::GDALDB(), dsn = system.file("extdata/nc.gpkg", package= "RGDALDB"))
#' #dbWriteTable(db, "mtcars", mtcars)
#' dbGetQuery(db, "SELECT * FROM 'nc.gpkg' WHERE AREA < 0.07")
#' }
setMethod("dbConnect", "GDALDBDriver", function(drv, dsn = "", ...) {
# ...
new("GDALDBConnection", dsn = dsn, ...)
})
#' @rdname GDALDB
#' @export
setMethod("show", "GDALDBConnection", function(object) {
cat("<GDALDBConnection>\n\n")
cat(sprintf("DataSource: %s\n", object@dsn))
cat(sprintf("Driver : %s\n", unique(sf::st_layers(object@dsn)$driver)))
})
#' @rdname GDALDB
#' @export
setMethod("dbDisconnect", "GDALDBConnection", function(conn, ...) {
TRUE
})
#' GDALDB results class.
#'
#' @keywords internal
#' @export
setClass("GDALDBResult",
contains = "DBIResult",
slots = list(data = "sf")
)
#' Send a query to GDALDB
#'
#' @export
#' @examples
#' # This is another good place to put examples
setMethod("dbSendQuery", "GDALDBConnection", function(conn, statement, ...) {
print(statement)
# some code
out <- sf::read_sf(conn@dsn, query = statement, ...)
##print(dim(out))
new("GDALDBResult", data = out, ...)
})
#' @export
setMethod("dbClearResult", "GDALDBResult", function(res, ...) {
# free resources
TRUE
})
#' Retrieve records from GDALDB query
#' @export
setMethod("dbFetch", "GDALDBResult", function(res, n = -1, ...) {
if (n > 0) dplyr::slice(res@data, 1:n) else res@data
})
#' @export
setMethod("dbHasCompleted", "GDALDBResult", function(res, ...) {
TRUE
})
#' @export
setMethod("dbListTables", "GDALDBConnection", function (conn, ...) {
sf::st_layers(conn@dsn)$name
}
)
#' @export
setMethod("dbExistsTable", "GDALDBConnection", function (conn, name, ...) {
name %in% sf::st_layers(conn@dsn)$name
}
)
#' @export
setMethod("dbListFields", "GDALDBConnection", function (conn, name, ...)
{
names(dbGetQuery(conn, sprintf("SELECT * FROM %s LIMIT 1", name)))
})
|
8c04ea027042918462c175952d2ef58f68875a43
|
0923c511a5876bcd257f48193d31f0a957598cbc
|
/mejor.R
|
8bcdb9949769d183148b82f4864a06b7b392462b
|
[] |
no_license
|
Earellanom/Programacion_Actuarial_III
|
978b322991bb8293276409dde0504ae7325afc87
|
17c3ff4ef2575384893fdebd252120caad160eda
|
refs/heads/master
| 2021-01-09T05:59:48.418673
| 2017-06-04T19:09:28
| 2017-06-04T19:09:28
| 80,865,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
mejor.R
|
mejor <- function(estado,resultado){
if(file.exists("outcome-of-care-measures.csv") == F){
if (file.exists("Calidad de Hospitales - data")==T){
setwd("./Calidad de Hospitales - data")
}else{
stop("No existe la base de datos en el directorio." )
break
}
}
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
state <- names( split(outcome$State,outcome$State))
if((estado %in% state) == F ){
stop("Estado invalido.")
break
}
if((resultado != "ataque") && (resultado != "falla") && (resultado
!= "neumonia")){
stop("Resultado invalido.")
break
}
if (resultado == "ataque"){
tabla <- cbind(outcome[,2],outcome[,7],outcome[,11])
}else{
if (resultado == "falla"){
tabla <- cbind(outcome[,2],outcome[,7],outcome[,17])
}else{
tabla <- cbind(outcome[,2],outcome[,7],outcome[,23])
}
}
filtro1 <- cbind((tabla[(tabla[,2] == estado),1]),(tabla[(tabla[,2] == estado),3]))
filtro2 <- cbind(filtro1[(filtro1[,2] != "Not Available"),1],
as.numeric(filtro1[(filtro1[,2] != "Not Available"),2]))
filtro3 <- sort(as.numeric(filtro2[,2]))
filtro4 <- cbind(filtro2[(filtro2[,2] == min(filtro3) ),1])
data.frame(filtro4)
sort(filtro4)[1]
}
mejor("MD","neumonia")
|
8f14d8c27aae62ca4d40510b20137645f1c00979
|
7398b8fe21f917e6ec35cf111984e87650f1b199
|
/r4ds_model.r
|
4a1c62ca0cf3fc87bdd5a0417fb42ed75b07aa10
|
[] |
no_license
|
JSA10/r4ds
|
a8eac7d550dfbab5b4de9cddbb8546f12f8e5d99
|
a7580b75e7edabb36bd8bb7bcb44859a60d2cf5b
|
refs/heads/master
| 2021-01-21T17:46:59.742104
| 2017-05-21T21:41:16
| 2017-05-21T21:41:16
| 91,989,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,048
|
r
|
r4ds_model.r
|
#R4DS - IV MODEL
#
#23.1 Intro
#modelr package which wraps around base R’s modelling functions to make them work naturally in a pipe.
#
library(tidyverse)
library(modelr)
options(na.action = na.warn)
#simulated dataset sim1 - simple with two variables
g <- ggplot(sim1, aes(x, y))
g + geom_point()
#see a strong pattern -> use a model to make it explicit
#need to supply basic form of the model -> in this case it looks like a linear relationship
#Practise with a few randomly generated models
models <- tibble(
a1 = runif(250, -20, 40),
a2 = runif(250, -5, 5)
)
#use geom_abline to add straight lines -> needs intercept and slope as inputs
g + geom_abline(aes(intercept = a1, slope = a2), data = models, alpha = 1/4) +
geom_point()
#Woah
#this fits 250 random models onto the plot - most are terrible
#
#simple start point
#need to calculate distance between the models 'predictions' and the 'actual' data points to assess
#To compute first turn model family into an R function.
#This takes the model parameters (a[1] = intercept and a[2] = slope)
#and the data as inputs, and gives values predicted by the model as output:
model1 <- function(a, data) {
a[1] + data$x * a[2]
}
model1(c(7, 1.5), sim1)
#7 = intercept and 1.5 = slope
#root mean squared deviation - a popular way to compute overall distance btw predicted and actual
#values - gets it down to 1 number
"""
*** Investigate difference between OLS (Ordinary Least Squares)
From https://stats.stackexchange.com/questions/146092/mean-squared-error-versus-least-squared-error-which-one-to-compare-datasets
'To sum up, keep in mind that LSE is a method that builds a model and MSE is a metric that evaluate
your model's performances.'
"""
#function to calculate RMSE - we take the difference between actual and predicted,
#square them, take the average and then take the squre root
measure_distance <- function(mod, data) {
diff <- data$y - model1(mod, data)
sqrt(mean(diff ^ 2))
}
#RMSE = (sq)Root, Mean, Square of the Difference(Error)
measure_distance(c(7, 1.5), sim1)
#> [1] 2.67 = RMSE
#Now we can use purrr to compute the distance for all the models defined above.
#We need a helper function because our distance function expects the model as a
#numeric vector of length 2.
sim1_dist <- function(a1, a2) {
measure_distance(c(a1, a2), sim1)
}
models <- models %>%
mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
models
#Next overlay 10 best models on data
g + geom_point(size = 2, colour = "grey30") +
geom_abline(
aes(intercept = a1, slope = a2, colour = -dist),
data = filter(models, rank(dist) <= 10)
)
#-dist arg to colour function in reverse order, with low = good
#rank used as conditional operator to select the models with rank based on dist
#less than or equal to 10
#We can also think about these models as observations, and visualising with a scatterplot
#of a1 vs a2, again coloured by -dist
ggplot(models, aes(a1, a2)) +
geom_point(data = filter(models, rank(dist) <= 10), size = 4, colour = "red") +
geom_point(aes(colour = -dist))
#top 10 models coloured red
#Instead of trying lots of random models, we could be more systematic and generate
#an evenly spaced grid of points (this is called a grid search)
grid <- expand.grid(
a1 = seq(-5, 20, length = 25),
a2 = seq(1, 3, length = 25)
) %>%
mutate(dist = purrr::map2_dbl(a1, a2, sim1_dist))
grid %>%
ggplot(aes(a1, a2)) +
geom_point(data = filter(grid, rank(dist) <= 10), size = 4, colour = "red") +
geom_point(aes(colour = -dist))
When you overlay the best 10 models back on the original data, they all look pretty good:
g +
geom_point(size = 2, colour = "grey30") +
geom_abline(
aes(intercept = a1, slope = a2, colour = -dist),
data = filter(grid, rank(dist) <= 10)
)
"""
You could imagine iteratively making the grid finer and finer until you narrowed
in on the best model. But there’s a better way to tackle that problem: a numerical
minimisation tool called Newton-Raphson search. The intuition of Newton-Raphson is
pretty simple: you pick a starting point and look around for the steepest slope.
You then ski down that slope a little way, and then repeat again and again, until
you can’t go any lower. In R, we can do that with optim():
"""
best <- optim(c(0, 0), measure_distance, data = sim1)
best$par
#> [1] 4.22 2.05
g +
geom_point(size = 2, colour = "grey30") +
geom_abline(intercept = best$par[1], slope = best$par[2])
"""
Don’t worry too much about the details of how optim() works. It’s the intuition
that’s important here. If you have a function that defines the distance between a
model and a dataset, an algorithm that can minimise that distance by modifying the
parameters of the model, you can find the best model. The neat thing about this
approach is that it will work for any family of models that you can write an equation for.
"""
"""
There’s one more approach that we can use for this model, because it’s is a special
case of a broader family: linear models. A linear model has the general form
y = a_1 + a_2 * x_1 + a_3 * x_2 + ... + a_n * x_(n - 1). So this simple model is
equivalent to a general linear model where n is 2 and x_1 is x. R has a tool
specifically designed for fitting linear models called lm(). lm() has a special
way to specify the model family: formulas. Formulas look like y ~ x, which lm()
will translate to a function like y = a_1 + a_2 * x. We can fit the model and
look at the output:
"""
sim1_mod <- lm(y ~ x, data = sim1)
coef(sim1_mod)
#> (Intercept) x
#> 4.22 2.05
#same result as previous steps
"""
Behind the scenes lm() doesn’t use optim() but instead takes advantage of the
mathematical structure of linear models. Using some connections between geometry,
calculus, and linear algebra, lm() actually finds the closest model in a single step,
using a sophisticated algorithm. This approach is both faster, and guarantees that
there is a global minimum.
"""
|
cc41dc11962a377572f7072d92d9c0673fd752b4
|
cfb0fe2aa2f8885648c74248ce15df72005cee7c
|
/R/Fig5-2_Fragmentierung.R
|
3bbc21cd5433767e3b6a302da000a0f1d0b7241c
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
dirkseidensticker/EggertSeidensticker2016CampoAP31
|
42771996e662fcdf7733709d8bad86d8d5bb6528
|
184bb82a018cfd8e7c9c48660d50f3a79f479396
|
refs/heads/master
| 2022-12-25T12:36:19.533405
| 2022-12-19T13:08:51
| 2022-12-19T13:08:51
| 110,661,004
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,429
|
r
|
Fig5-2_Fragmentierung.R
|
###########################
# Fig. 5.2 Fragmentierung #
###########################
library(ODB)
connection <- odb.open("data/CampoDB.odb")
sql <- 'SELECT
"t_Obj"."ObjID",
"t_Obj"."feature",
"t_Obj"."material",
"t_Obj"."GE",
"t_Obj"."n",
"t_Obj"."type",
"t_Obj"."weight",
"t_Obj_pottery"."size"
FROM "t_Obj_pottery", "t_Obj"
WHERE "t_Obj_pottery"."ObjID" = "t_Obj"."ObjID"'
df <- odb.read(connection, sql)
# Gräber
df_a <- sqldf('select * from df where feature NOT LIKE "%07/11%"
AND feature NOT LIKE "%07/13%"
AND feature NOT LIKE "%07/95%"
AND feature NOT LIKE "%05/101%"
AND feature NOT LIKE "%07/106%"
AND feature NOT LIKE "%07/109%"')
df_a_pivot <- tapply(df_a$n, list(df_a$feature, df_a$size), length)
a = colSums (df_a_pivot, na.rm = TRUE, dims = 1)
# Gruben
df_b <- sqldf('select * from df where feature LIKE "%07/11"
OR feature LIKE "%07/13%"')
df_b_pivot <- tapply(df_b$n, list(df_b$feature, df_b$size), length)
b = colSums (df_b_pivot, na.rm = TRUE, dims = 1)
# Survey
df_d <- sqldf('select * from df where feature LIKE "%05/101%"
OR feature LIKE "%07/106%"
OR feature LIKE "%07/109%"')
df_d_pivot <- tapply(df_d$n, list(df_d$feature, df_d$size), length)
d = colSums (df_d_pivot, na.rm = TRUE, dims = 1)
a <- data.frame(a) # wieder in Dataframe umwandeln
names(a)[1] <- "Anzahl"
a$Prozent <- a$Anzahl / sum(a$Anzahl) *100
a
b <- data.frame(b)
names(b)[1] <- "Anzahl"
b$Prozent <- b$Anzahl / sum(b$Anzahl) *100
b
a$Type <- 'Burials'
a$Size <- row.names(a)
b$Type <- 'Pits'
b$Size <- row.names(b)
c <- rbind(a, b)
c
a$Size <- as.character(a$Size)
a$Size <- factor(a$Size, levels=unique(a$Size), ordered=TRUE)
b$Size <- as.character(b$Size)
b$Size <- factor(b$Size, levels=unique(b$Size), ordered=TRUE)
# http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
#
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Plot
# ----
p1 <- ggplot(a, aes(x = Size, y = Anzahl)) +
geom_bar(stat = "identity", fill = "grey", colour = "black", size = .25) +
scale_x_discrete(name = "Size Classes") +
scale_y_continuous(name = 'Frequency') +
ggtitle("Burials\n") +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p2 <- ggplot(b, aes(x = Size, y = Anzahl)) +
geom_bar(stat = "identity", fill = "grey", colour = "black", size = .25) +
scale_x_discrete(name = "Size Classes") +
scale_y_continuous(name = 'Frequency') +
ggtitle("Pits\n") +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
multiplot(p1, p2, cols=2)
dev.print(device = pdf, "output/Fig5-2_Fragmentierung.pdf", width = 10, height = 6)
|
a4e86bc39e399dd9a15fc114784a176421149ec6
|
6883bea0a22bd6b586cb01b14276ae59ab807fce
|
/ui.R
|
74b259bbd7f6e526091d84b319942c7cf027b7a2
|
[] |
no_license
|
stefMT2970/DevDataProducts
|
8026482f8e33425b290c1d2b3c896e3fd30b12b5
|
09c9c4a1e381f644c56d2e491a3e0fbef23c5ea7
|
refs/heads/master
| 2020-05-20T02:56:47.334993
| 2015-07-20T12:32:08
| 2015-07-20T12:32:08
| 39,002,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,882
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
# define tab's upfront
normTab = tabItem(
tabName="normDis",
tags$p("Select a sample size to generate normal random draws."),
tags$p( "The vertical line shown is the mean of the random draws,
A standard normal Gauss curve is superimposed.
The summary table applies to the random draws."),
fluidRow(
column(6,
h4("Select sample size"),
sliderInput(inputId= 'normSize',
label ='Sample Size',
min=50, max=2000,
value=500,
step=50)
)
),
fluidRow(
plotOutput("normPDF")
),
fluidRow(
verbatimTextOutput("normSummary")
)
)
poissonTab = tabItem(
tabName="poissonDis",
tags$p("Select a sample size and lambda value to generate Poisson random draws.
The vertical line shown is the mean of the random draws,
which should be close to the selected lambda value.
The blue dots show the theoretical Poisson distribution."),
fluidRow(
column(6,
h4("Select sample size"),
sliderInput(inputId= 'poisSize',
label ='Sample Size',
min=50, max=2000,
value=500,
step=50)
),
column(6,
h4("Select lambda"),
sliderInput(inputId= 'poisLambda',
label ='Lambda (mean)',
min=1, max=20,
value=2,
step=0.2)
)
),
fluidRow(
plotOutput("poissonPDF")
)
)
chisqTab = tabItem(
tabName="chisqDis",
tags$p("Select a sample size and v (degrees of freedom) value to generate
chi square random draws.
The vertical line shown is the mean of the random draws which should
be close to the degrees of freedom. The blue line shows the
theoretical chi square distribution
"),
fluidRow(
column(6,
h4("Select sample size"),
sliderInput(inputId= 'chisqSize',
label ='Sample Size',
min=50, max=2000,
value=500,
step=50)
),
column(6,
h4("Select degrees of freedom"),
sliderInput(inputId= 'chisqV',
label ='Degrees of freedom',
min=2, max=50,
value=5,
step=1)
)
),
fluidRow(
plotOutput("chisqPDF")
)
)
tTab = tabItem(
tabName="tDis",
tags$p("Select a sample size and v (degrees of freedom) value to generate
Students t random draws.
The vertical line shown is the mean of the random draws which should
be close to 0. The blue line shows the
theoretical t distribution.
"),
fluidRow(
column(6,
h4("Select sample size"),
sliderInput(inputId= 'tSize',
label ='Sample Size',
min=50, max=2000,
value=500,
step=50)
),
column(6,
h4("Select degrees of freedom"),
sliderInput(inputId= 'tV',
label ='Degrees of freedom',
min=2, max=50,
value=5,
step=1)
)
),
fluidRow(
plotOutput("tPDF")
)
)
aboutTab = tabItem(
tabName = "about",
tags$h3("Examinations of common probability distributions"),
tags$p("Select one of the distributions from the sidebar to start the visualisation."),
tags$p("Use the sliders to change the sample size or the parameters.")
)
# UI definition
shinyUI(
dashboardPage(
dashboardHeader(title="Common Distributions"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName="about", selected = TRUE),
menuItem("Normal Distribution", tabName = "normDis"),
menuItem("Poisson Distribution", tabName="poissonDis"),
menuItem("Chi Square Distribution", tabName="chisqDis"),
menuItem("Student T Distribution", tabName="tDis")
)
),
dashboardBody(
tabItems( aboutTab, normTab, poissonTab, chisqTab, tTab )
)
)
)
|
2480b0783dc135335439baadea4c51a40b93c7b2
|
bfe4fa7d35d25dbd4749c7db18284630743f943b
|
/Simple plots/simple_plots.R
|
eee008e3fac9338657a76a1aada830654f109143
|
[] |
no_license
|
innertron/REU_Kam
|
0ccfe2d4e178b241cdf836d9c066188dbbd65e82
|
bf4028b193f13cc202f66cd28963290722b312ac
|
refs/heads/master
| 2021-01-17T20:16:07.187792
| 2016-08-02T21:29:38
| 2016-08-02T21:29:38
| 61,396,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
simple_plots.R
|
#Plot the first second of channel 1 and 2
Ch1<-subset(neural_data, select = c(Time, X1.Chan..1), Time > 0 & Time < 1, drop=T)
Ch2<-subset(neural_data, select = c(Time, X2.Chan..2), Time > 0 & Time < 1, drop=T)
plot(Ch1, ylab="Voltage", col="green", type="l", main="Simple plot of the first and second channel \n during first second")
lines(Ch2, type="l", col="red")
legend(0.025, 0.17, c("Channel 1", "Channel 2"), col=c("green", "red"), pch="ll")
#plot all the data from channel 1 and 2
Ch1<-subset(neural_data, select = c(Time, X1.Chan..1), drop=T)
Ch2<-subset(neural_data, select = c(Time, X2.Chan..2), drop=T)
plot(Ch1, ylab="Voltage", col="green", type="l", main="Simple plot of the first and second channel \n during the entire experiment")
lines(Ch2, type="l", col="red")
legend(4, 4, c("Channel 1", "Channel 2"), col=c("green", "red"), pch="ll")
|
53ac4c8cea8fe34539a5c8e9c1788dd0a418d290
|
aebfb7d9c03a2d349f66c1c335287e4e14d58071
|
/man/format_quarter.Rd
|
a093eeada9ee9a67d1e7fdd40241ede898b6cfbf
|
[] |
no_license
|
lixixibj/foss
|
d99cf1e9edc25bdfabf405922557b6c3858782cd
|
5c02c6758a628b08a2549aee4b9c53fe05d714a0
|
refs/heads/master
| 2023-06-14T11:02:08.176633
| 2021-07-09T01:24:36
| 2021-07-09T01:24:36
| 266,596,085
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 417
|
rd
|
format_quarter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.multiple.sub.seasonal.series.R
\name{format_quarter}
\alias{format_quarter}
\title{get formatted quartely of the original quarterly}
\usage{
format_quarter(original.quarter)
}
\arguments{
\item{original.quarter}{eg. c(3,4,5,7)}
}
\value{
eg.c(3,4,5,7)---->c(3,4,1,3)
}
\description{
get formatted quartely of the original quarterly
}
|
1962142ad5e63646aa65f9d223c3c44dd28f1e1f
|
32b4ed8cae7e3c3dfb4f1daa54c43a4250db85a7
|
/man/sim.ctsCH.Rd
|
6efcfe1e147df83a3965ebfe4546b69797d27a09
|
[] |
no_license
|
david-borchers/scrmlebook
|
0405c3ee269390424635d1cdf78ddd30bd726db6
|
c83da97b1a3c5cdfa47e1db8d1903185eae2ba76
|
refs/heads/main
| 2023-02-21T10:00:56.392408
| 2021-01-26T08:41:49
| 2021-01-26T08:41:49
| 333,022,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,613
|
rd
|
sim.ctsCH.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CTfns.R
\name{sim.ctsCH}
\alias{sim.ctsCH}
\title{Simulate CT data.}
\usage{
sim.ctsCH(traps.obj_, mask.obj_ = NULL, buffer_ = NULL,
haz.type_ = "IndL", detector.type_ = "Prox", gam.obj_ = NULL,
fit.obj_ = NULL, cycle.hrs_ = c(0, 24), mesh.size_ = 1000,
dens.surf_, g0_ = NULL, lambda0_ = NULL, sigma_ = NULL, endpoint_,
setup.hour_ = 0, occ.interval_ = 24)
}
\arguments{
\item{traps.obj_}{An secr traps object.}
\item{mask.obj_}{An secr mask object. If not supplied make.mask will be used to create the mask.}
\item{buffer_}{Buffer radius added to traps.obj_ to create the mask (if not supplied).}
\item{haz.type_}{The type of detection hazard to use in the simulation. Choices c("IndL", "IndU", "Dep") include the two
independent hazard parameterisations and the dependent hazard parameterisation.}
\item{gam.obj_}{GAM obj to simulate capture times from according to a time varying spline hazard.}
\item{fit.obj_}{A fitted glm object. The spline coefficients are extracted from the object when simulating.}
\item{cycle.hrs_}{The start and end of a single detection hazard cycle, defaults to c(0,24) to represent a daily cycle.}
\item{mesh.size_}{Determines the resolution of the time vector. This argument determines the vector length from the seq() command.}
\item{dens.surf_}{The density to use in the simulation. If a single number then the data will be simulated with a constant density.
The argument can also be a matrix of values of the same dimension as the mask that is supplied.}
\item{g0_}{The intercept parameter of the detection function. Supplied when appropriate (for an independent linked hazard).}
\item{lambda0_}{The intercept parameter of the encounter rate function. Supplied when appropriate (for an independent unlinked
or dependent hazard).}
\item{sigma_}{The range parameter of the detection / encounter rate function. Will be NULL when a dependent hazard is used.}
\item{endpoint_}{Specifies the duration of the simulation survey.}
\item{setup.hour_}{Used when the survey starts at some point within the first cycle.}
\item{occ.interval_}{The duration of an occasion. Used to construct the discrete-time secr capthist object.}
\item{detector_type_}{Specifies the type of detector. Defaults to using a "proximity" or passive detector but can also simulate
from an array of single-catch traps.}
}
\value{
Returns the capture histories with continuous times as a data frame, and with an occasion structure as a secr capthist object.
}
\description{
Simulates continuous-time datasets.
}
|
59256a6d45ae8fd037464dad5d360952dd743ee7
|
c0a08d09bb804cbfd9c186ba4e5c75dfb3e4faee
|
/man/TSGMM_Binom.Rd
|
60d6ee9d39dfcf7daf95bf5b98dc17f5dffe758f
|
[] |
no_license
|
lalondetl/GMM
|
bd1b21f694a35c09484dfb6e99aa974a6f6a48f6
|
628889324529b0b86f55a4d2054bed5442af0b40
|
refs/heads/master
| 2018-07-05T06:26:23.000514
| 2018-05-31T20:57:15
| 2018-05-31T20:57:15
| 119,891,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,906
|
rd
|
TSGMM_Binom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TSGMM_Binom.R
\name{TSGMM_Binom}
\alias{TSGMM_Binom}
\title{Two-Step Generalized Method of Moments, Longitudinal Count (number of events from n trials) Outcome}
\usage{
TSGMM_Binom(ymat, subjectID, Zmat, Xmat, Tvec, N, mc = "EC",
covTypeVec = c(-1))
}
\arguments{
\item{ymat}{The matrix of responses, ordered by subject, time within subject. The first column is the number of successes, the second the number of failures.}
\item{subjectID}{The vector of subject ID values for each response.}
\item{Zmat}{The design matrix for time-independent covariates.}
\item{Xmat}{The design matrix for time-dependent covariates.}
\item{Tvec}{The vector of times for each subject.}
\item{N}{The number of subjects.}
\item{mc}{The method of identifying appropriate moment conditions, either 'EC' for extended classification (default) or 'Types' for user-identified types.}
\item{covTypeVec}{The vector indicating the type of each time-dependent covariate, according to the order of the columns of Xmat.}
}
\description{
This function calculates the Generalized Method of Moments (GMM) parameter estimates and standard errors for longitudinal count (0-n) responses. It is assumed that the count represents the number of events from n identical trials, and that n is equal for all subjects and times. This is modeled similarly to a Logistic Regression for Binomial responses. The function allows for unbalanced data, meaning subjects can have different numbers of times of observation. Both time-independent covariates and time-dependent covariates can be accommodated. Time-dependent covariates can be handled either by specifying the type of each time-dependent covariate, or by allowing the data to determine appropriate moment conditions through the extended classification method.
}
\examples{
TSGMM_Binom()
}
\keyword{GMM}
|
e5a69dae703037a1d9c0f65e329f3941df2b13a4
|
7e1aa6f485ab8b3446d6d653dfe99b0dec2f42c9
|
/Dimensionality-Reduction-PCA.R
|
2fd24498a926a97964777b8fd876443e7bfbfa76
|
[] |
no_license
|
mkaur7/Data-Mining
|
cd650aeec6ea0d42db3f3e6459c4e9e04c76c92b
|
2a7dc67293f76cd6dd6b59a1f60c9da8fb704a43
|
refs/heads/master
| 2021-07-06T09:18:33.570622
| 2017-10-02T23:18:37
| 2017-10-02T23:18:37
| 105,594,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,116
|
r
|
Dimensionality-Reduction-PCA.R
|
library(stats) #to use function prcomp()
library(factoextra) #to use functions get_eig() and fviz_screenplot()
Wine <- read.csv("C:/Users/Me/Desktop/Sem 3/Data Mining/Assignments/Data_Mining/Wine.csv") #reads the csv file
wine_matrix <- as.matrix(Wine) #reads the csv file as a matrix
wine.pca <- prcomp(wine_matrix[,2:14]) #gets the table for PCs vs Attributes for all 13 attributes
wine.pca #diplays PCs vs Attributes table
wine.eigen <- get_eig(wine.pca) #gets the eigenvalues for each PC
wine.eigen #displays eigenvalues
wine.plot <- fviz_screeplot(wine.pca, ncp = 13, linecolor = "red", addlabels=TRUE) + theme_gray() #plots the bar graph to show the Importance of each PC
wine.plot #displays the barplot
#TABLE FOR PCs vs ATTRIBUTES
# PC1 PC2 PC3 PC4 PC5
#X14.23 -0.0016464031 0.0007166319 -0.018417035 -0.141207907 -0.016866662
#X1.71 0.0006735032 0.0025001779 -0.121896019 -0.160313812 0.616815902
#X2.43 -0.0001948773 0.0046709015 -0.052106139 0.009848420 -0.019752046
#X15.6 0.0046271444 0.0287250643 -0.938281086 0.331947048 -0.062944719
#X127 -0.0174715429 0.9992801054 0.032186047 0.005173260 0.005610088
#X2.8 -0.0009863499 0.0007484732 0.040357265 0.074588860 -0.314757225
#X3.06 -0.0015575348 -0.0004491148 0.084880477 0.169135057 -0.523387659
#X.28 0.0001223031 -0.0013428434 -0.013566798 -0.010786504 0.029717213
#X2.29 -0.0005912858 0.0047111080 0.024002521 0.050216222 -0.249804915
#X5.64 -0.0023300597 0.0154838007 -0.292736147 -0.878512819 -0.331631000
#X1.04 -0.0001708674 -0.0007979233 0.026072208 0.060004447 -0.051812038
#X3.92 -0.0006850453 -0.0043427734 0.068496061 0.178448921 -0.256727053
#X1065 -0.9998302063 -0.0173653604 -0.004503435 0.003115178 0.002292603
# PC6 PC7 PC8 PC9 PC10
#X14.23 0.190857906 -0.923599698 -2.916309e-01 -6.615858e-02 -3.548615e-03
#X1.71 0.739484660 0.148439828 6.491606e-02 -1.841686e-02 -1.814407e-02
#X2.43 0.041668481 -0.045816409 1.484446e-01 -8.130224e-02 -8.445019e-02
#X15.6 -0.024984199 -0.030424721 -1.506867e-02 -2.238019e-03 3.597246e-03
#X127 -0.001518516 -0.002247297 3.436681e-03 2.468916e-03 -1.409604e-04
#X2.8 0.281973257 0.012032021 1.706091e-01 -2.486031e-01 8.498932e-01
#X3.06 0.438073743 0.029518540 2.405978e-01 -3.859621e-01 -5.162056e-01
#X.28 -0.022353206 0.005465204 -6.482046e-03 -4.041961e-02 3.911001e-02
#X2.29 0.243437316 0.314104819 -8.678638e-01 6.486000e-02 8.423359e-03
#X5.64 0.004658713 0.113187266 8.366982e-02 9.620884e-02 -2.399143e-02
#X1.04 -0.023310064 -0.031909969 1.819775e-03 -3.113806e-02 -3.860731e-02
#X3.92 0.287449896 -0.091437125 2.030626e-01 8.728265e-01 7.749797e-03
#X1065 -0.001224489 0.001089646 1.352366e-07 4.516736e-05 5.190444e-05
# PC11 PC12 PC13
#X14.23 -0.0137921427 -1.612594e-02 -9.047753e-03
#X1.71 -0.0233308251 6.740455e-02 1.140845e-02
#X2.43 0.9537701091 -1.334115e-01 1.722987e-01
#X15.6 -0.0529416863 5.517945e-03 -1.739796e-03
#X127 -0.0029473023 5.941899e-04 -2.348778e-03
#X2.8 0.0062519430 4.659242e-03 2.811223e-02
#X3.06 -0.1344622065 -3.679138e-02 -6.866896e-02
#X.28 0.1976484705 1.488168e-01 -9.663687e-01
#X2.29 0.1364547674 -1.361987e-02 1.682009e-02
#X5.64 -0.0093626418 5.023155e-02 4.492290e-03
#X1.04 0.0989492057 9.752111e-01 1.678747e-01
#X3.92 0.0330735957 1.000077e-02 -4.725937e-02
#X1065 -0.0002433867 -9.848302e-05 -3.370282e-05
#EIGENVALUES OF EACH PC
# eigenvalue variance.percent cumulative.variance.percent
#Dim.1 9.918559e+04 9.981074e+01 99.81074
#Dim.2 1.708619e+02 1.719388e-01 99.98268
#Dim.3 9.434550e+00 9.494015e-03 99.99218
#Dim.4 5.019168e+00 5.050804e-03 99.99723
#Dim.5 1.233160e+00 1.240932e-03 99.99847
#Dim.6 8.439492e-01 8.492685e-04 99.99932
#Dim.7 2.783340e-01 2.800883e-04 99.99960
#Dim.8 1.520191e-01 1.529773e-04 99.99975
#Dim.9 1.097830e-01 1.104749e-04 99.99986
#Dim.10 7.205192e-02 7.250605e-05 99.99993
#Dim.11 3.774059e-02 3.797847e-05 99.99997
#Dim.12 2.118293e-02 2.131645e-05 99.99999
#Dim.13 8.210697e-03 8.262448e-06 100.00000
#According to the plot we can see that the percentage of variance explained
#for the first Principal Component is 99.8% which clearly seems as the most
#satisfactory component because it accounts for a large proportion of the
#variability which is required to achieve a good dimensionality reduction.
#So, according to my opinion, I will pick only the first Principal Component
#to represent all 13 attributes for this data in my new data space.
|
b420ab8b99efd4c1fae9a3292b422922cfb0d5dc
|
f29f5a1caa2b3cd95b1aa50247cba1a9eafe4299
|
/Caso 1/mediacontaminante.R
|
196df6a3d881bb07e771d59247cf1c603b442e15
|
[] |
no_license
|
Mishdz/Programacion_Actuarial_III
|
7488576c9fa71f2a2e40aa26fb8afcff7f079d59
|
d551a2499ebec0747ac6cd318e4c7dfe89acc02a
|
refs/heads/master
| 2021-01-13T05:06:23.446060
| 2017-06-09T21:40:00
| 2017-06-09T21:40:00
| 81,249,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
r
|
mediacontaminante.R
|
mediacontaminante<-function(directorio="~/GitHub/Programacion_Actuarial_III/specdata",contaminante,id=1:332){
sum<-numeric()
for(m in id){
id_1<-formatC(m,(with=3),flag="0")
vri<-read.csv(paste(id_1,"csv",sep=""),header=TRUE)
if(contaminante=="sulfate"){
cnt<-c(cnt,vri$sulfate)
} else if (contaminante=="nitrate"){
cnt<-c(cnt,vri$nitrate)
} else {
paste("Contaminante",contaminante,"inexistente")
}
}
media<-mean(cnt,na.rm = TRUE)
media
}
|
5ccd4ff139134785e666bd055ef2a072e93e7be9
|
e368c0a230dc240161c2298a63f5d2db1dcfbeb4
|
/R/batchHansen.R
|
4c53b60250a2722c71a6cac0e13e47fb2ceb4df9
|
[] |
no_license
|
andrew-hipp/maticce
|
22e93468d00889157af6b4266476719ad7b19948
|
585d64aaa0663c2f285730196f6237f5609d2b56
|
refs/heads/master
| 2020-12-22T08:18:04.414379
| 2020-01-28T11:53:06
| 2020-01-28T11:53:06
| 236,723,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,215
|
r
|
batchHansen.R
|
# ---------------------------------------------------------------------
# FUNCTIONS FOR PERFORMING A SERIES OF OU ANALYSES ON A BATCH OF TREES
# ---------------------------------------------------------------------
## Changes needed:
## - measurement error portions need to be fixed
## - In a better world, allow graphical selection of subtrees to test on a single tree, then extract defining taxa
## based on those nodes, using locator() or something like it.
runBatchHansen <-
# 11 nov 08: renamed to runBatchHansen
# Runs batchHansenFit and brown over a list of ouchTrees
# Arguments:
# "ouchTrees" = list of OUCH-style trees
# "characterStates" = vector of character states, either extracted from an ouch-style tree data.frame or a named vector
# REMOVED: "SEM"= standard error of the mean, vector extracted from an ouch-style tree data.frame
# REMOVED: "rescale" = factor to multiply against (times / max(times)) -- choose based on trial analyses; set at <= 0 if you don't want to rescale trees
# "cladeMembersList" = list of vectors containing names of the members of each clade (except for the root of the tree)
# "brown" = whether to analyse the data under a Brownian motion model
# "..." = additional arguments to pass along to hansen
function(ouchTrees, characterStates, cladeMembersList, filePrefix = NULL, di = NULL, nodeNames = NULL, maxNodes = length(cladeMembersList), regimeTitles = NULL, brown = FALSE, ...) {
## check whether all objects in ouchTrees inherit ouchtree?
if(is(ouchTrees,'ouchtree')) ouchTrees <- list(ouchTrees)
treeCheck <- unlist(lapply(ouchTrees, function(x) is(x,'ouchtree')))
if(FALSE %in% treeCheck)
stop(paste('This function has been rewritten to use the new S4 ', sQuote('ouchtree'), ' class.',
'\nYou can generate a tree of this class by calling ', sQuote('ouchtree()'), '.', sep = ""))
## Check character states to make sure that they are either named and match names in the trees, or are the same length as the tips
for (i in 1:length(ouchTrees)) {
dataFlag <- NULL
stopFlag <- FALSE
tree <- ouchTrees[[i]]
terminals <- tree@nodelabels[(tree@nnodes - tree@nterm + 1):tree@nnodes]
if(any(FALSE %in% (terminals %in% names(characterStates)))) {
message(paste("Not every terminal branch in tree", i, "has a corresponding name in", sQuote("characterStates")))
if(length(characterStates) == tree@nterm) {
message("Data assumed to be in the same order as terminals")
dataFlag <- 'sameOrderTerminals'
}
if(length(characterStates) == tree@nnodes) {
message("Data assumed to be in the same order as nodes;\nany data not associated with a terminal branch will be ignored")
dataFlag <- 'sameOrderNodes'
}
if(identical(dataFlag, NULL)) stopFlag <- TRUE
message("-------------------\n")
}
else dataFlag <- 'named'
if(stopFlag) stop("Correct discrepancies between trees and data and try again!")
}
if(!identical(di, NULL)) dir.create(di)
if(class(try(sqrt.alpha, silent = TRUE)) == 'try-error') sqrt.alpha = 1 # sets sqrt.alpha to 1 if it has not been assigned already
if(class(try(sigma, silent = TRUE)) == 'try-error') sigma = 1 # sets sigma to 1 if it has not been assigned already
ar = regimeVectors(ouchTrees, cladeMembersList, maxNodes)
hansenBatch <- thetas <- vector('list',length(ouchTrees))
for (i in 1:length(ouchTrees)) {
fP <- NULL
if(!identical(filePrefix, NULL)) fP <- paste(filePrefix, ".t", i, ".", sep = "")
if(!identical(di, NULL)) fP <- paste(di, "/", fP, sep = "")
tree <- ouchTrees[[i]]
if(identical(regimeTitles, NULL)) {
regimeTitles <- as.character(1:length(ar$regList[[i]]))
if(brown) regimeTitles <- c(regimeTitles, 'brown')
}
## rescale tree if requested
# if(rescale>0) tree@times <- rescale * tree@times / max(tree@times)
## make sure data fits the tree
dataIn <- NULL
if(dataFlag == 'sameOrderTerminals') dataIn <- c(rep(NA, tree@nnodes - tree@nterm), characterStates)
if(dataFlag == 'sameOrderNodes') dataIn <- characterStates
if(dataFlag == 'named') dataIn <- characterStates[match(tree@nodelabels, names(characterStates))]
if(identical(dataIn, NULL)) stop(paste("There is a problem with your data that I failed to catch at the outset of", sQuote('runBatchHansen()')))
else names(dataIn) <- tree@nodes
## send it off to batchHansen and just stick the results in hansenBatch... this won't work as the number of regimes gets large,
## so there should be some option here to just hang onto the coefficients for each run (i.e., hang onto 'coef(hansen(...))' rather than 'hansen(...)')
## there could also be an option to save the entire object as a series of files in addition to hanging onto
hb <- batchHansen(tree, dataIn, ar$regList[[i]], regimeTitles, brown, fP, sqrt.alpha, sigma, ...)
# return(hb) ### ONLY FOR DEBUGGING
hansenBatch[[i]] <- hb$treeData
thetas[[i]] <- hb$thetas
# thetas[[i]] <- coef(hb)$theta[[1]] ## assumes only a univariate case... maticce is not currently set up for multivariate datasets
message(paste("Tree",i,"of",length(ouchTrees),"complete", "\n-----------------------------"))
}
outdata <- list(hansens = hansenBatch, thetas = thetas, regList = ar$regList, regMatrix = ar$regMatrix, nodeMatrix = ar$nodeMatrix, brown = brown, N = ouchTrees[[i]]@nterm, nodeNames = nodeNames, analysisDate = date(), call = match.call())
class(outdata) <- 'hansenBatch'
return(outdata)}
batchHansen <-
# Runs hansen and brown on a tree over a batch of selective regimes
# Arguments:
# "tree" = the standard OUCH-style (S4) tree
# "regimesList" = list of regime-paintings as output from regimeVectors
# "scalingFactor" = factor to multiply against (times / max(times)) -- choose based on trial analyses
# Value: a matrix with nrow = regimes (+ 1 if brownian model is included) and columns for u, d.f., all estimated parameters, LRvsBM, AIC, and AIC weight
function(tree, data, regimesList, regimeTitles, brown, filePrefix = NULL, sqrt.alpha, sigma, ...) {
if(brown) stop("Including the Brownian motion model has been discontinued in batchHansen")
n <- tree@nterm
## set up a matrix that returns lnL, K, sigmasq, theta0, and sqrt.alpha for every model
## thetas go into a models-by-branch matrix
hansenOptima <- list(length(regimeTitles))
variables <- c("loglik", "dof", "sigma.squared", "theta / alpha") # only display variables... set the selecting variables in the next two lines
brVars <- c("loglik", "dof", "sigma.squared", "theta")
haVars <- c("loglik", "dof", "sigma.squared", "alpha")
if(brown) thetaModels <- regimeTitles[1: (length(regimeTitles) - 1)]
else thetaModels <- regimeTitles
thetas <- matrix(NA,
nrow = length(thetaModels),
ncol = tree@nnodes,
dimnames = list(thetaModels, tree@nodes))
treeData <- matrix(data = NA, nrow = length(regimeTitles), ncol = length(variables), dimnames = list(regimeTitles,variables))
if(brown) {
br <- brown(data, tree)
if(!identical(filePrefix, NULL)) save(br, file = paste(filePrefix, 'b.Rdata', sep = ""))
treeData["brown", ] <- unlist(summary(br)[brVars])
}
for (i in seq(regimesList)) {
if(any(is.na(regimesList[[i]]))) {
message(paste("skipping regime", i))
treeData[i, ] <- rep(NA, dim(treeData)[[2]])
}
else {
message(paste("Running regime",i))
## at this point, the user has to give an initial sqrt.alpha and sigma for hansen to search on... this should be relaxed
ha = hansen(data = data, tree = tree, regimes = regimesList[[i]], sqrt.alpha = sqrt.alpha, sigma = sigma, ...)
#return(ha) # ONLY FOR DEBUGGING
treeData[i, ] <- unlist(summary(ha)[haVars])
thetas[i, ] <- ha@theta$data[ha@regimes[[1]]]
if(!identical(filePrefix, NULL)) save(ha, file = paste(filePrefix, 'r', i, '.Rdata', sep = ""))
}
}
outdata <- list(treeData = treeData, thetas = thetas)
return(outdata) }
|
5e19629f3d73d7ed79c6bd8302727e4dde7775a1
|
625f1f1c7129dea80aee80f39c0b13fe0eeef8c0
|
/bayse/rstan_test2.R
|
5d9f6fda33acd5ef0ff6536adfacd220470b5909
|
[] |
no_license
|
CarolShiomiya/sugakubunka-statistics
|
51895e57f8ee06e4c0ec435ec960a36c8e0c6a7d
|
2a92d421569ceaca7a1390700c16a4eb75e62b16
|
refs/heads/master
| 2020-03-19T17:38:55.801066
| 2020-03-04T07:30:05
| 2020-03-04T07:30:05
| 136,770,223
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
r
|
rstan_test2.R
|
RealMu<-300
measure<-rnorm(1000,RealMu,30)
Observed<-c()
for(i in 1:1000){
Observed[i]<-rnorm(1,measure[i],3)}
Observed
Observed<-list(Observed=as.numeric(Observed),n=length(Observed))
Observed
set.seed(1)
localLevelModel_1<-"
data{
int n;
vector[n] Observed;
}
parameters{
real mu;
real<lower=0> val;
vector[n] theta;
}
model{
for(i in 1:n)
theta[i]~normal(mu,val);
for(i in 1:n)
Observed[i]~normal(theta[i],3);
}
"
model<-stan(
model_code = localLevelModel_1,
data=Observed,
iter=1100,
warmup=100,
thin=1,
chains=3)
model
traceplot(model)
stan_hist(model,"val",binwidth=2)
|
a12acae53d653ab898991476643dc1d8d1d5b5c2
|
d1146058bd04b44e0866273dd2a5aab67d43e920
|
/scripts/cummeRbund.R
|
a10c5a0b604e73dd5cf7ec71acd597cc44d27af9
|
[] |
no_license
|
bellerbrock/bioinfocourse
|
234f4d04d1da6ae6e50666e1402a45f06b72c566
|
b9eca463dcba76d40e160f96ce8309cde1b30b22
|
refs/heads/master
| 2021-01-17T05:13:08.493356
| 2017-04-25T20:24:15
| 2017-04-25T20:24:15
| 47,598,053
| 0
| 0
| null | 2015-12-08T04:14:52
| 2015-12-08T04:14:52
| null |
UTF-8
|
R
| false
| false
| 2,335
|
r
|
cummeRbund.R
|
source("http://bioconductor.org/biocLite.R")
biocLite("BiocUpgrade")
biocLite("cummeRbund")
library(cummeRbund)
browseVignettes("cummeRbund")
setwd("~/Desktop/ch4_demo_dataset/cuffdiff_out/")
cuff <- readCufflinks()
cuff
##https://pods.iplantcollaborative.org/wiki/display/eot/RNA-Seq_tutorial#RNA-Seq_tutorial-Step5%3AExaminingthegeneexpressiondata
#The squared coefficient of variation is a normalized measure of cross-replicate variability that can be useful for evaluating the quality your RNA-seq data. Run an SCV plot.
png(filename = 'fpkm_scv.png', width = 800, height = 800, units = 'px')
fpkmSCVPlot(genes(cuff))
dev.off()
#plot dispersion
disp<-dispersionPlot(genes(cuff))
disp
png(filename = 'dispersion.png', width = 800, height = 800, units = 'px')
disp<-dispersionPlot(genes(cuff))
dev.off()
#Draw a density plot of genes in the two samples.
dens <- csDensity(genes(cuff))
png(filename = 'density.png', width = 800, height = 800, units = 'px')
csDensity(genes(cuff))
dev.off()
#scatterplot
png(filename = 'scatter.png', width = 800, height = 800, units = 'px')
csScatter(genes(cuff), 'q1', 'q2',smooth=T)
dev.off()
#boxplot
png(filename = 'boxplot.png', width = 800, height = 800, units = 'px')
csBoxplot(genes(cuff))
dev.off()
#boxplot with replicates
png(filename = 'boxplot_rep.png', width = 800, height = 800, units = 'px')
csBoxplot(genes(cuff),replicates=T)
dev.off()
#volcano plot
png(filename = 'volcano.png', width = 2000, height = 2000, units = 'px')
csVolcanoMatrix(genes(cuff))
dev.off()
#sig genes
sig <- getSig(cuff, alpha=0.01, level='genes')
length(sig)
#get data from cuff database
sigGenes <- getGenes(cuff,sig)
sigGenes
#get 100 significant genes
sigGenes50 <- getGenes(cuff,tail(sig,50))
png(filename = 'thin_heatmap.png', width = 400, height = 1000, units = 'px')
csHeatmap(sigGenes50, cluster='row')
dev.off()
test_gene<-getGene(cuff,'mRNA:Solyc04g005050.1')
png(filename = 'test_plot.png', width = 2000, height = 2000, units = 'px')
expressionPlot(isoforms(test_gene), logmode=T)
dev.off()
#Finally, consider gene FP3, we can find other genes in the database with similar expression patterns
sim = findSimilar(cuff, 'mRNA:Solyc04g005050.1', n=5)
sim
expressionPlot(sim,logMode=T,showErrorbars=F)
b<-expressionBarplot(sim)
#Heatmap with similar genes
h<-csHeatmap(sim,cluster='both')
h
|
e95b9ee9ccccef52fc94474f89c1e7f71707a4fe
|
72ada5d64b3d756b5c50f2a553bf1bdf3736bbb7
|
/Code/rfile.r
|
72f5cb69ba9a4c9f8c1919851794533e5f889864
|
[] |
no_license
|
ljanastas/Student-ML-For-Policy
|
99a1308e6890aa58ab6c8b55fd545899504876fd
|
d9cfe2e4db1ee65e9a908f573f148d99f2061a07
|
refs/heads/master
| 2021-04-30T12:11:22.794276
| 2018-02-12T16:22:56
| 2018-02-12T16:22:56
| 121,269,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
rfile.r
|
# This is an R file
library(foreign)
|
e98935d38263adfa984b40bae5c8f0d7486c6312
|
c56c0e2ce242ba97e44b007935a5b64869ce5919
|
/man/ant_efficiency.Rd
|
d36ac6a036eef69b8c3d03960b1da4fef4823668
|
[
"MIT"
] |
permissive
|
hugo-marques/ORFID
|
a850f0f5eb45965343bf501e0b821335e48af9c1
|
b06851a3696ac07545b9719f6c35d6c65b528d9d
|
refs/heads/master
| 2022-12-25T12:10:57.745838
| 2022-12-20T19:23:39
| 2022-12-20T19:23:39
| 187,097,236
| 4
| 2
|
MIT
| 2022-02-16T21:13:31
| 2019-05-16T20:30:59
|
R
|
UTF-8
|
R
| false
| true
| 2,368
|
rd
|
ant_efficiency.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ant_efficiency.R
\name{ant_efficiency}
\alias{ant_efficiency}
\title{Detection efficiency for directional Oregon RFID antenna data}
\usage{
ant_efficiency(x, LOC_vec)
}
\arguments{
\item{x}{data frame generated using \code{\link{join_multireader_data}}.}
\item{LOC_vec}{vector of antenna locations from first encountered to last encountered.}
}
\value{
Returns a tibble object.
}
\description{
Determines detection efficiency for each antenna in systems where multiple antennas are used along a linear migration route.
}
\details{
\code{ant_efficiency} determines the detection efficiency of each antenna in a linear migration route. Direction is determined based on the order of locations from first encountered to last encountered, as specified in \emph{LOC_vec}. Use \code{\link{site_summary}} to identify all locations present in the multi reader data, which must be included in \emph{LOC_vec}.
Antenna efficiency is determined by identifying which tags were detected at antenna x and which tags were detected anywhere after/above antenna x. The efficiency of antenna x is then the number of shared tag detections divided by the total number of detections after x. Note that efficiency and shared detections cannot be determined for the final antenna as there are no subsequent detections. Reversing the order of \emph{LOC_vec} can inform efficiency in systems with movement in multiple directions.
}
\examples{
# Create a list containing compiled reader data:
readers <- list(reader_us, reader_ds)
# Join data into a multi-reader array:
PIT_data <- join_multireader_data(readers)
# List readers:
unique(PIT_data$LOC)
# Determine antenna efficiency for animals moving from downstream to upstream:
ant_efficiency(PIT_data, c("downstream_A1", "upstream_A1"))
# Determine antenna efficiency for animals moving from upstream to downstream:
ant_efficiency(PIT_data, c("upstream_A1", "downstream_A1"))
}
\seealso{
\code{\link{import_ORFID}} for importing data files from Oregon RFID ORMR and ORSR antenna readers.
\code{\link{join_multireader_data}} for combining data from Oregon RFID ORMR and ORSR antenna readers into a multi-reader array.
\code{\link{site_summary}} for identifying all locations present in a multi-reader array.
}
\author{
Annika Putt <annika@instream.net>
}
|
38fa259ea1be1a5767d79888538c4994ace46c2c
|
c8f4cb10826757cb5184e6a9c553f3a316e171e0
|
/man/strip_style.Rd
|
5933ce480d113b9f24a92ff9b4664d6361e510c3
|
[] |
no_license
|
arturochian/crayon
|
1763fed8991dd23a6635b8ac8154300d0a5211c0
|
9598b678ecbe2c1a6d1fc67e9f3d55b49b736885
|
refs/heads/master
| 2021-01-15T17:29:37.959919
| 2014-10-15T13:55:06
| 2014-10-15T13:55:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
rd
|
strip_style.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{strip_style}
\alias{strip_style}
\title{Remove ANSI escape sequences from a string}
\usage{
strip_style(string)
}
\arguments{
\item{string}{The input string.}
}
\value{
The cleaned up string.
}
\description{
Remove ANSI escape sequences from a string
}
\examples{
strip_style(red("foobar")) == "foobar"
}
|
c387e707fc80f10c826e296d56e9242a18d63c2f
|
fe68a38a34adaf4c47f154b22dcd82c63353a2b4
|
/R/get_count_matrix_from_FPKMTable_andTotalReads.R
|
60e7c10bbb2435b930232000088319efd2987e8e
|
[
"MIT"
] |
permissive
|
NLM-Reproducibility-Project/May_2019_Epigenome
|
1a8a02a3ff6c0a04d0214ca7691922eda96828ca
|
f0dd3cd55f143eccf4ab091258dd75fd1648d308
|
refs/heads/master
| 2020-05-23T15:36:14.752039
| 2019-05-17T18:37:07
| 2019-05-17T18:37:07
| 186,830,050
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,746
|
r
|
get_count_matrix_from_FPKMTable_andTotalReads.R
|
library(EDASeq)
# read table contains fpkm data of donor samples
tissue_fpkm <- read.table(file = "RNA_matrix_FPKM.tsv",header = TRUE, stringsAsFactors = FALSE)
# Get coordinates of genes
chr_start <- gsub(".*:","",tissue_fpkm$locus)
chr_start <- gsub("-.*","",chr_start)
chr_end <- gsub(".*-","",tissue_fpkm$locus)
# Gene length
tissue_fpkm$length <- abs(as.numeric(chr_start) - as.numeric(chr_end) )
# Get exon length of the genes which will be used to transform FPKM back into raw count
ensembl_gene_id <- gsub("\\..*","",tissue_fpkm$gene_id)
# Are there duplicates in the ensembl gene id?
sum(duplicated(ensembl_gene_id)) # [1] 0 # No duplicates
# Get exon length and GC content
gene_length_and_gc_content <- getGeneLengthAndGCContent(ensembl_gene_id, "hsa")
# Exon length of genes
tissue_fpkm$exon_length <- gene_length_and_gc_content[,1]
# For ensembl gene id discarded, which exon_length with be 'NA'. Use gene length as exon length
tissue_fpkm$exon_length[is.na(tissue_fpkm$exon_length)] <- tissue_fpkm$length[is.na(tissue_fpkm$exon_length)]
# discard Placenta, IMR90 and H1 data and keep only data from donor samples
# replace 'length' column with 'exon_length'
donor_tissue_fpkm <- tissue_fpkm[,c(1:7,51,9:45)]
# Get rid of "_FPKM" in the colnames
colnames(donor_tissue_fpkm) <- gsub("_FPKM","",colnames(donor_tissue_fpkm))
# Get total reads for each sample so that raw count can be calculated from FPKM
# The total_reads_mapped_each_sample.csv is part of Sup. Table 1.
total_reads_per_sample <- read.csv(file = "total_reads_mapped_each_sample.csv", stringsAsFactors = FALSE)
# After manually checking the file;
# one abbreviation maybe wrong! "BL-3" should be "BL-1"
total_reads_per_sample$Abbreviation[5] # "BL-3"
# Correct the error
total_reads_per_sample$Abbreviation[5] <- "BL-1"
# Check tissue names(abbreivations) agree between total_reads_per_sample and donor_tissue_fpkm
colnames(donor_tissue_fpkm)[-(1:9)] %in% total_reads_per_sample$Abbreviation
# NOTE: the sample abbreviation is noted as "EG_3" in tissue_fpkm dataframe
# while the abbreviation in total_reads_per_sample is noted as "EG-3"
# make them the same first: all use '_'!
total_reads_per_sample$Abbreviation <- gsub('-','_',total_reads_per_sample$Abbreviation)
# All sample names in donor_tissue_fpkm also in total_reads_per_sample?
all(colnames(donor_tissue_fpkm)[-(1:9)] %in% total_reads_per_sample$Abbreviation)
# Order donor_tissue_fpkm by tissue abbreviates
ordered_donor_tissue_fpkm <- cbind(donor_tissue_fpkm[,1:9], donor_tissue_fpkm[, sort(colnames(donor_tissue_fpkm)[-(1:9)])] )
# Order total_reads_per_sample by tissue abbreviates
ordered_total_reads_per_sample <- total_reads_per_sample[order(total_reads_per_sample$Abbreviation),]
# Transfer FPKM data into real raw counts
# RPKM = (ExonMappedReads * 10^9 ) / (TotalMappedReads * ExonLength),
# So for paired-end reads here, read_counts = 2* FPKM*(TotalMappedReads*ExonLength)/10^9
# Construct a matrix containing total reads of each sample to do calculation later
sample_total_read_matrix <- matrix(rep(ordered_total_reads_per_sample$Mapped_RNA_seq.Reads, nrow(ordered_donor_tissue_fpkm) ),
nrow=nrow(ordered_donor_tissue_fpkm), byrow=TRUE)
# calculate raw counts; use round() function to get integers
donor_raw_counts <- round(2* ordered_donor_tissue_fpkm[,-(1:9)] * (ordered_donor_tissue_fpkm$exon_length) * sample_total_read_matrix/10^9)
# Add back annotation data
donor_raw_counts_with_annotation <- cbind(ordered_donor_tissue_fpkm[,1:9], donor_raw_counts)
# Write the raw count into tsv file
write.table(donor_raw_counts_with_annotation, file="integer_raw_counts_of_genes_among_donor_tissues.tsv", sep='\t', row.names = FALSE)
|
f1615aa4c65472b45dea85de56fcb1d57e675ec2
|
eded207ad230f39073fb371b02b2efa7d54b4115
|
/man/f2014.Rd
|
4a622dd62d3ea657266ad9661e79fe3d252e1173
|
[] |
no_license
|
jjchern/meps.hc
|
76be60858509be1077700ddf542dbe9f594b15d5
|
703fb265a60492704fcd859f380aebf0a21772f5
|
refs/heads/master
| 2021-01-01T15:38:34.002273
| 2017-07-19T21:54:13
| 2017-07-19T21:54:13
| 97,664,344
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 493
|
rd
|
f2014.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{f2014}
\alias{f2014}
\title{\code{f2014}}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 34875 rows and 1838 columns.}
\source{
\url{https://meps.ahrq.gov/mepsweb/data_stats/download_data_files_detail.jsp?cboPufNumber=HC-171}
}
\usage{
f2014
}
\description{
MEPS Household Component 2014 Full Year Consolidated Data File
}
\keyword{datasets}
|
685345632f5517710a5f42836b5c86f8ea93c7bf
|
f6c9f760bf10b1f7f8ac7ff3653e4fd180c8720f
|
/R/calc_non_centrality_parameter.R
|
406cfdd802295679b4b2e528e60be5034f8188ee
|
[] |
no_license
|
cran/sprtt
|
05a0f349644683afe6f7c8464945f89938a2d8ef
|
e6bde9996f96d6d559235a4ad99c84fd48b386db
|
refs/heads/master
| 2023-07-20T08:43:45.684070
| 2023-07-06T12:50:02
| 2023-07-06T12:50:02
| 393,568,991
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 716
|
r
|
calc_non_centrality_parameter.R
|
# t-test -----------------------------------------------------------------------
calc_non_centrality_parameter_ttest <- function(seq_ttest_arguments) {
x <- seq_ttest_arguments@x
y <- seq_ttest_arguments@y
d <- seq_ttest_arguments@d
if (seq_ttest_arguments@one_sample == TRUE ||
seq_ttest_arguments@paired == TRUE
) {
d * sqrt(length(x))
} else{
d / sqrt(1 / length(x) + 1 / length(y))
}
}
# ANOVA ------------------------------------------------------------------------
calc_non_centrality_parameter_anova <- function(seq_anova_arguments) {
# seq_anova_arguments@f^2 * seq_anova_arguments@n
seq_anova_arguments@f^2 * seq_anova_arguments@total_sample_size
}
|
b553d8c84b9884b435d74bf13b6674761c65351f
|
e82422412f814eb32d470350e4dfdcd873f77bee
|
/data-raw/dataset.R
|
73b6073293c8b90845b2448dd9da1f7112a9dc3f
|
[
"MIT"
] |
permissive
|
Amalan-ConStat/OlympicRshiny
|
5e3cdceeb80bf5dafbb6ec7fb38c247bcf7e2803
|
de824e5ed42ab3fe7ae753c0a3f86f89cb2b210b
|
refs/heads/main
| 2023-02-08T12:24:22.713681
| 2023-02-01T09:39:15
| 2023-02-01T09:39:15
| 594,175,470
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 513
|
r
|
dataset.R
|
## code to prepare `dataset` dataset goes here
load("data-raw/Olympic.RDA")
NOC[is.na(NOC$region) & !is.na(NOC$notes),2]<-NOC[is.na(NOC$region) & !is.na(NOC$notes),3]
Olympic$NOC<-factor(Olympic$NOC,levels = NOC$NOC,labels = NOC$region)
Olympic$NOC<-as.character(Olympic$NOC)
Olympic$Sex<-factor(Olympic$Sex,levels =c("M","F") ,labels = c("Male","Female"))
Olympic$Medal<-factor(Olympic$Medal,levels=c("Gold","Silver","Bronze"),labels=c("Gold","Silver","Bronze"))
usethis::use_data(Olympic, overwrite = TRUE)
|
47e04edf257b86c03cd96098dc01ae50389904ac
|
eae08e61892e4125b592ef2063489d35855715d2
|
/scripts/functions.R
|
02d76c02abcd98ee26c399dc308549d4fb0967bc
|
[] |
no_license
|
Aung-Myint-Thein/test-medical-data
|
d183118011995f79516e158d335667825e7f2c64
|
cd1ff4eb5224cb2620981db07f546c5dfd98aa95
|
refs/heads/master
| 2021-01-25T08:43:04.428995
| 2014-05-06T14:14:05
| 2014-05-06T14:14:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
functions.R
|
library(Metrics)
library(ggplot2)
library(e1071)
library(reshape)
clean.diagnosus <- function(diagnosis) {
diagnosus <- diagnosis
diagnosus <- substr(diagnosus, 2, nchar(diagnosus))
diagnosus <- gsub("[[:punct:]]", "", diagnosus)
diagnosus <- gsub(" ", "", diagnosus)
return(diagnosus)
}
get.diagnosis.group <- function(diagnosis.code, ifICD9, ifICD10, ICD9, ICD10){
diagnosis.group <- "Others"
if(diagnosis.code != ""){
## for ICD 9 codes
if(ifICD9 == 1){
if(substr(diagnosis.code, 1, 1) == "E"){
diagnosis.group <- ICD9[19, 3]
} else if(substr(diagnosis.code, 1, 1) == "V"){
diagnosis.group <- ICD9[19, 3]
} else{
code <- as.numeric(substr(diagnosis.code, 1, 3))
for(i in 1:18){
if(code <= ICD9[i, 2]){
diagnosis.group <- ICD9[i, 3]
break
}
}
}
}
## for ICD 10 codes
if(ifICD10 == 1){
for(i in 1:nrow(ICD10)){
if(substr(diagnosis.code, 1, 1) %in% c(ICD10[i, "StartAlpha"], ICD10[i, "EndAlpha"])){
if(as.numeric(substr(diagnosis.code, 2, 3)) <= ICD10[i, "EndNum"]){
diagnosis.group <- ICD10[i, "DIAGNOSISGROUP"]
break
}
}
}
}
}
return(diagnosis.group)
}
get.age.group <- function(age){
return(min(5, ceiling(age/20)))
}
|
8186b995d4b4f81683dc0a823ae9569ed30e4594
|
0c1b8779e562b9161f77528bdbabd82f523a84e8
|
/exdata_data_NEI_data/plot1.R
|
7fa3ba1b0ad33c0d2bde4fdd377bbc70e19ee9b2
|
[] |
no_license
|
sanyamsh7/datasciencecoursera
|
004fc1d4faab0ae912c4887ab7a6d8a67119ad05
|
9f0250d564399e6aff0dc9339688a055b2da043f
|
refs/heads/master
| 2023-01-20T13:32:05.886731
| 2020-11-29T06:16:03
| 2020-11-29T06:16:03
| 297,092,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
plot1.R
|
# plot total emissions from years 1999, 2002, 2005, 2008
# both the data files should be in the working directory
#reading data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# using aggregate function for effectively calculating the sum
# of year wise emissions
totByYear <- aggregate(Emissions~year, NEI, sum)
# setting up graphic device
png("plot1.png")
# using barplot
barplot(height = totByYear$Emissions, names.arg = totByYear$year, col = "blue",
xlab = "YEARS", ylab = expression('total PM'[2.5]*' emission'),
main = expression('Total PM'[2.5]*'Emissions'))
# closing graphic device
dev.off()
|
fe7054e646109b0ef1b99729b18eee4789dbdc82
|
516e4fc6ff9b1840be8156d9458e203135fd1d56
|
/R/settler.survival.R
|
8271c42b8b82ab4d2153fabb44f3c8a54e483efb
|
[] |
no_license
|
MarcoAndrello/MetaPopGen_0.0.8
|
dfc30bb1a9ce3ab58beda0d9990d1e4ab62aa54e
|
9d0d38fcbbc8417edf5f7220cdbf55c1f6b2aee7
|
refs/heads/master
| 2021-01-25T12:36:42.201135
| 2018-09-17T18:13:46
| 2018-09-17T18:13:46
| 123,484,225
| 0
| 0
| null | 2018-09-17T18:13:47
| 2018-03-01T19:45:05
|
R
|
UTF-8
|
R
| false
| false
| 91
|
r
|
settler.survival.R
|
settler.survival <-
function(S,kappa0) {
return( (1 / (1 + (1/kappa0) * S ) ))
}
|
2cb29cf10a46823a6872a0fa333677c206976576
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ESTER/examples/ictab.Rd.R
|
808bca6678da079651fcdd1b1393bd55692e1e17
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
ictab.Rd.R
|
library(ESTER)
### Name: ictab
### Title: Computes Akaike weights or pseudo-BMA weights for a set of
### models
### Aliases: ictab
### ** Examples
library(ESTER)
data(mtcars)
mod1 <- lm(mpg ~ cyl, mtcars)
mod2 <- lm(mpg ~ cyl + vs, mtcars)
mod3 <- lm(mpg ~ cyl + vs + I(vs^2), mtcars)
mod4 <- lm(mpg ~ cyl * vs, mtcars)
mods <- list(mod1 = mod1, mod2 = mod2, mod3 = mod3, mod4 = mod4)
ictab(mods, aic)
ictab(mods, bic)
## Not run:
##D library(brms)
##D mod1 <- brm(mpg ~ cyl, mtcars)
##D mod2 <- brm(mpg ~ cyl + vs, mtcars)
##D mods <- list(m1 = mod1, m2 = mod2)
##D ictab(mods, LOO, reloo = TRUE, k_threshold = 0.6, cores = 2)
## End(Not run)
|
dc953b8b5241a5f8ad27b285f49632c9074d65d1
|
5dc217c7d19a8111d1547a6206c829c4d080a8a7
|
/inst/shiny/UI/UI_ref_seq.R
|
2c4d839c87e8c1b7a4278a6667dbb550f0fe03cc
|
[] |
no_license
|
wevanjohnson/animalcules.preprocess
|
f6f9412f4e984699c46052c652c7cad924f5e422
|
95d01432a876867a90012e4a1a1d1227bfea4bfd
|
refs/heads/master
| 2020-05-01T17:30:39.244747
| 2019-05-09T19:18:53
| 2019-05-09T19:18:53
| 177,601,441
| 0
| 2
| null | 2019-05-09T19:18:55
| 2019-03-25T14:21:58
|
R
|
UTF-8
|
R
| false
| false
| 1,302
|
r
|
UI_ref_seq.R
|
tabPanel(title = "Library Generation",
mainPanel(# the following lines could be uncommented when the download ref seq can
# work on the rest of the kingdoms
# radioButtons("kingdom", "Choose a kingdom:",
# c("Archaea" = "archaea",
# "Bacteria" = "bacteria",
# "Fungi" = "fungi",
# "Invertebrate" = "invertebrate",
# "Plant" = "plant",
# "Protozoa" = "protozoa",
# "Vertebrate" = "vertibrate",
# "Vertebrate other" = "vertibrate_other",
# "Virus" = "viral")
# ),
radioButtons("kingdom", "Choose a kingdom:",
c("Bacteria" = "bacteria",
"Virus" = "viral")
),
# create checkbox input for representative library and reference library
checkboxInput("representative", "representative", value = TRUE, width = NULL),
checkboxInput("reference", "reference", value = FALSE, width = NULL),
actionButton("downloadref","Download Ref_Seq")
)
)
|
101932c527cf7dad421bf65e27c65de9d2afd0d5
|
42480ecc4478523696df9809210ae7a7e0bb760b
|
/clase01.R
|
6ae74b026c35bd2d7cf7382d1b2f783319fab199
|
[] |
no_license
|
katymq/CURSO-R-BASICO-2017
|
b109756ebb3c54b6d09a63e1e92286727ad58bb1
|
a3d0ed6564b2dd91d820c664a133a6ada1d5c393
|
refs/heads/master
| 2021-01-23T02:48:12.302532
| 2017-03-24T04:55:14
| 2017-03-24T04:55:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,175
|
r
|
clase01.R
|
########################################################################################
############################# CURSO R- BÁSICO #############################
############################# Clase 1 #############################
########################################################################################
## **INSTALACIÓN DE LIBRERÍAS**
# Instalación desde el repositorio CRAN
install.packages('nombre_paquete', dependencies=TRUE)
install.packages('bootstrap')
# Instalación de múltiples paquetes
paquete <- c("ggplot2","data.table")
lapply(seq_along(paquetes), function(i){
install.packages(paquetes[[i]])
})
########################################################################################
## **ACTUALIZACIÓN DE LIBRERÍAS**
# Paquetes instalados
installed.packages()
#Informacion de plataforma, version y paquetes
sessionInfo()
# Informacion sobre paquetes
library(help='ggplot2')
# Funciones library y require
library('ggplot2')
require('ggplot2')
# Cargan y agregan los paquetes añadidos
# Citaciones de paquetes
citation("ggplot2")
# Actualizamos paquetes
update.packages()
# Actualizamos R por medio del paquete installr
install.packages('installr', dependencies = TRUE)
library('installr')
updateR()
# Eliminación de paquetes
remove.packages("nombre_paquete", "directorio")
########################################################################################
## **PARÁMETROS EN RSTUDIO**
# Existen parémtros que se pueden modificar
getOption("width")
getOption("digits")
# Área de impresión aumentada
options(width=75)
# Área de impresión disminuida
options(width=45)
runif(10)
#Resultados con 2 decimales
options(digits=2)
rexp(10)
# Resultados con 8 decimales
options(digits=8)
rnorm(10)
########################################################################################
## **DIRECTORIO DE TRABAJO**
# Directorio de trabajo actual
getwd()
# Cambiar directorio de trabajo
setwd("C:/Users/ZONE TECHNOLOGY/Desktop/R basico")
#Enlista los archivos en el directorio
list.files()
########################################################################################
## **MANTENIMIENTO DE ARCHIVOS**
#Crear una nueva carpeta en mi directorio de trabajo
dir.create("CARPETA")
setwd("C:/Users/ZONE TECHNOLOGY/Desktop/R basico/CARPETA")
# Crear un nuevo archivo de R
file.create("nombre_nuevo_archivo.R")
#Editamos el archivo creado
file.edit("nombre_nuevo_archivo.R")
#Verificar si un archivo ya existe
file.exists("nombre_nuevo_archivo.R")
#Información del archivo
file.info("nombre_nuevo_archivo.R")
# Crear una copia de un archivo
file.copy("nombre_nuevo_archivo.R", "nombre_copia_archivo.R")
#Editar el nombre de un archivo
file.rename("nombre_nuevo_archivo.R", "nuevo_nombre.R")
#Eliminar un archivo
file.remove("nombre_nuevo_archivo.R")
########################################################################################
##Ejercicio
# Verificar el directorio de trabajo actual
# Crear una nueva carpeta en el directorio de trabajo actual llamada "Clase01"
# Configurar la carpeta "Clase01" como el nuevo directorio de trabajo
# Crear un nuevo archivo llamado "clase01.R"
# Editar el archivo "clase01.R" con lo siguiente:
# Instalación del paquete "knitr"
# Crear 20 números aleatorios de la función noramal
# rnorm(20)
# Aumentar el área de impresión a 100
# Disminuir el número de decimales a 1
# Crear 20 números aleatorios de la función noramal
# rnorm(20)
# Crear una copia del archivo "clase01.R" llamada "clase01copia.R"
# Editar el archivo "clase01copia.R" agregando lo siguiente:
# Disminuir el área de impresión a 40
# Aumentar el número de decimales a 5
# Crear 30 números aleatorios de la función noramal
# rnorm(30)
#Cambiar de nombre al archivo "clase01copia.R" a "ejemplo"
#Eliminar el archivo "clase01.R"
########################################################################################
|
c9f4c96ca33484a4eceb13a839e1d1a23107722a
|
9e8936a8cc7beae524251c8660fa755609de9ce5
|
/R/bart.R
|
63c8d83e51f578cae0d477da10033117da981611
|
[
"MIT"
] |
permissive
|
tidymodels/parsnip
|
bfca10e2b58485e5b21db64517dadd4d3c924648
|
907d2164a093f10cbbc1921e4b73264ca4053f6b
|
refs/heads/main
| 2023-09-05T18:33:59.301116
| 2023-08-17T23:45:42
| 2023-08-17T23:45:42
| 113,789,613
| 451
| 93
|
NOASSERTION
| 2023-08-17T23:43:21
| 2017-12-10T22:48:42
|
R
|
UTF-8
|
R
| false
| false
| 7,773
|
r
|
bart.R
|
#' Bayesian additive regression trees (BART)
#'
#' @description
#'
#' `bart()` defines a tree ensemble model that uses Bayesian analysis to
#' assemble the ensemble. This function can fit classification and regression
#' models.
#'
#' \Sexpr[stage=render,results=rd]{parsnip:::make_engine_list("bart")}
#'
#' More information on how \pkg{parsnip} is used for modeling is at
#' \url{https://www.tidymodels.org/}.
#'
#' @inheritParams boost_tree
#' @param prior_terminal_node_coef A coefficient for the prior probability that
#' a node is a terminal node. Values are usually between 0 and one with
#' a default of 0.95. This affects the baseline probability; smaller numbers
#' make the probabilities larger overall. See Details below.
#' @param prior_terminal_node_expo An exponent in the prior probability that
#' a node is a terminal node. Values are usually non-negative with
#' a default of 2 This affects the rate that the prior probability decreases as
#' the depth of the tree increases. Larger values make deeper trees less likely.
#' @param prior_outcome_range A positive value that defines the width of a prior
#' that the predicted outcome is within a certain range. For regression it is
#' related to the observed range of the data; the prior is the number of standard
#' deviations of a Gaussian distribution defined by the observed range of the
#' data. For classification, it is defined as the range of +/-3 (assumed to be
#' on the logit scale). The default value is 2.
#'
#' @details
#' The prior for the terminal node probability is expressed as
#' `prior = a * (1 + d)^(-b)` where `d` is the depth of the node, `a` is
#' `prior_terminal_node_coef` and `b` is `prior_terminal_node_expo`. See the
#' Examples section below for an example graph of the prior probability of a
#' terminal node for different values of these parameters.
#'
#'
#' @templateVar modeltype bart
#' @template spec-details
#'
#' @template spec-references
#'
#' @seealso \Sexpr[stage=render,results=rd]{parsnip:::make_seealso_list("bart")}
#'
#' @examplesIf !parsnip:::is_cran_check()
#' show_engines("bart")
#'
#' bart(mode = "regression", trees = 5)
#'
#' # ------------------------------------------------------------------------------
#' # Examples for terminal node prior
#'
#' library(ggplot2)
#' library(dplyr)
#'
#' prior_test <- function(coef = 0.95, expo = 2, depths = 1:10) {
#' tidyr::crossing(coef = coef, expo = expo, depth = depths) %>%
#' mutate(
#' `terminial node prior` = coef * (1 + depth)^(-expo),
#' coef = format(coef),
#' expo = format(expo))
#' }
#'
#' prior_test(coef = c(0.05, 0.5, .95), expo = c(1/2, 1, 2)) %>%
#' ggplot(aes(depth, `terminial node prior`, col = coef)) +
#' geom_line() +
#' geom_point() +
#' facet_wrap(~ expo)
#' @export
bart <-
function(mode = "unknown", engine = "dbarts",
trees = NULL, prior_terminal_node_coef = NULL,
prior_terminal_node_expo = NULL,
prior_outcome_range = NULL) {
args <- list(
trees = enquo(trees),
prior_terminal_node_coef = enquo(prior_terminal_node_coef),
prior_terminal_node_expo = enquo(prior_terminal_node_expo),
prior_outcome_range = enquo(prior_outcome_range)
)
new_model_spec(
"bart",
args = args,
eng_args = NULL,
mode = mode,
user_specified_mode = !missing(mode),
method = NULL,
engine = engine,
user_specified_engine = !missing(engine)
)
}
# ------------------------------------------------------------------------------
#' @method update bart
#' @rdname parsnip_update
#' @inheritParams bart
#' @param prior_terminal_node_coef A coefficient for the prior probability that
#' a node is a terminal node.
#' @param prior_terminal_node_expo An exponent in the prior probability that
#' a node is a terminal node.
#' @export
update.bart <-
function(object,
parameters = NULL,
trees = NULL,
prior_terminal_node_coef = NULL,
prior_terminal_node_expo = NULL,
prior_outcome_range = NULL,
fresh = FALSE, ...) {
args <- list(
trees = enquo(trees),
prior_terminal_node_coef = enquo(prior_terminal_node_coef),
prior_terminal_node_expo = enquo(prior_terminal_node_expo),
prior_outcome_range = enquo(prior_outcome_range)
)
update_spec(
object = object,
parameters = parameters,
args_enquo_list = args,
fresh = fresh,
cls = "bart",
...
)
}
#' Developer functions for predictions via BART models
#' @export
#' @keywords internal
#' @name bart-internal
#' @inheritParams predict.model_fit
#' @param obj A parsnip object.
#' @param ci Confidence (TRUE) or prediction interval (FALSE)
#' @param level Confidence level.
#' @param std_err Attach column for standard error of prediction or not.
bartMachine_interval_calc <- function(new_data, obj, ci = TRUE, level = 0.95) {
if (obj$spec$mode == "classification") {
rlang::abort("In bartMachine: Prediction intervals are not possible for classification")
}
get_std_err <- obj$spec$method$pred$pred_int$extras$std_error
if (ci) {
cl <-
rlang::call2(
"calc_credible_intervals",
.ns = "bartMachine",
bart_machine = rlang::expr(obj$fit),
new_data = rlang::expr(new_data),
ci_conf = level
)
} else {
cl <-
rlang::call2(
"calc_prediction_intervals",
.ns = "bartMachine",
bart_machine = rlang::expr(obj$fit),
new_data = rlang::expr(new_data),
pi_conf = level
)
}
res <- rlang::eval_tidy(cl)
if (!ci) {
if (get_std_err) {
.std_error <- apply(res$all_prediction_samples, 1, stats::sd, na.rm = TRUE)
}
res <- res$interval
}
res <- tibble::as_tibble(res)
names(res) <- c(".pred_lower", ".pred_upper")
if (!ci & get_std_err) {
res$.std_err <- .std_error
}
res
}
#' @export
#' @rdname bart-internal
#' @keywords internal
dbart_predict_calc <- function(obj, new_data, type, level = 0.95, std_err = FALSE) {
types <- c("numeric", "class", "prob", "conf_int", "pred_int")
mod_mode <- obj$spec$mode
lo <- (1 - level)/2
hi <- 1 - lo
if (type == "conf_int") {
post_dist <- predict(obj$fit, new_data, type = "ev")
} else {
post_dist <- predict(obj$fit, new_data, type = "ppd")
}
if (type == "numeric") {
res <- tibble::tibble(.pred = apply(post_dist, 2, mean, na.rm = TRUE))
} else if (type == "class") {
mn <- apply(post_dist, 2, mean, na.rm = TRUE)
lvl <- ifelse(mn > 0.5, obj$lvl[2], obj$lvl[1])
lvl <- factor(lvl, levels = obj$lvl)
res <- tibble::tibble(.pred_class = lvl)
} else if (type == "prob") {
mn <- apply(post_dist, 2, mean, na.rm = TRUE)
res <-
tibble::tibble(a = 1 - mn, b = mn) %>%
setNames(paste0(".pred_", obj$lv))
} else if (type %in% c("conf_int", "pred_int")) {
if (mod_mode == "regression") {
res <-
tibble::tibble(
.pred_lower = apply(post_dist, 2, quantile, probs = lo, na.rm = TRUE),
.pred_upper = apply(post_dist, 2, quantile, probs = hi, na.rm = TRUE)
)
} else {
bnds <- apply(post_dist, 2, quantile, probs = c(lo, hi), na.rm = TRUE)
bnds <- apply(bnds, 1, function(x) sort(x))
res <-
tibble::tibble(
.pred_lower_a = 1 - bnds[,2],
.pred_lower_b = bnds[,1],
.pred_upper_a = 1 - bnds[,1],
.pred_upper_b = bnds[,2]
) %>%
rlang::set_names(
c(
paste0(".pred_lower_", obj$lvl),
paste0(".pred_upper_", obj$lvl)
)
)
}
if (std_err) {
res$.std_error <- apply(post_dist, 2, stats::sd, na.rm = TRUE)
}
}
res
}
|
025141032fcbe72f36773c81ba8d20e49458e70c
|
1162f8a2de179f8b93369f7a76a058180e98aa21
|
/R/rho_test_0.R
|
f60dde624fb789ce55db6a5296c941760f54dba0
|
[] |
no_license
|
elwood-shannon/esfstatistika
|
96c99139f211bb14d1aee65e8f4742a9294e66c6
|
b5cdb4fbaad627c63263bf35d284597c9b90f768
|
refs/heads/master
| 2022-12-19T13:57:27.459372
| 2020-09-24T12:29:24
| 2020-09-24T12:29:24
| 250,825,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
rho_test_0.R
|
#' \eqn{\rho}-test oproti 0
#'
#' Funkce pro vypocet testovaciho kriteria a kritickych oboru pri testovani vyznamnosti korelacniho koeficientu (oproti nule).
#
#
#' @param r Vyberovy korelacni koeficient.
#' @param n Pocet pozorovani.
#' @param alfa Hladina vyznamnosti (je 0.05, pokud neni stanoveno jinak).
#' @param dec Pocet desetinnych mist v konecnem vysledku (je 10, pokud neni stanoveno jinak).
#' @return Vypocita kriticke obory a testovaci kriterium testu vyznamnosti korelacniho koeficientu.
#' @export
rho_test_0 <- function(r, n, alfa = 0.05, dec = 10){
kriterium <- (r * sqrt(n - 2)) / (sqrt(1 - r^2))
o_h1p <- qt(p = 1 - alfa / 2, df = n - 2)
o_h1n <- -1 * qt(p = 1 - alfa / 2, df = n - 2)
l_h1 <- -1 * qt(p = 1 - alfa, df = n - 2)
p_h1 <- qt(p = 1 - alfa, df = n - 2)
print('Oboustranna H1 -------------------------------------')
print(paste('W je: (- nekonecno', ',', round(o_h1n, dec), ']', 'u', '[', round(o_h1p, dec), ',', '+ nekonecno)'))
print('Levostranna H1 -------------------------------------')
print(paste('W je: (- nekonecno', ',', round(l_h1, dec), ']'))
print('Pravostranna H1 ------------------------------------')
print(paste('W je:', '[', round(p_h1, dec), ',', '+ nekonecno)'))
print('----------------------------------------------------')
print(paste('Testovaci kriterium je: ', round(kriterium, dec)))
}
|
67bc3f5806b2e06a2843baed2c86824e5c48dde4
|
af06be512fbe0f94cbc89749d6c1da27e10724b9
|
/source/03_exploratory-analyses.R
|
dbeadfb86f1bc8738b329104fec36fac76ba5a5b
|
[] |
no_license
|
keanarichards/stats-masters
|
f0fd80e2f7eec2fa57722c68aa0972de83360a7c
|
ffd79a42baff8ca46b40dd445a4a05bb2233fb39
|
refs/heads/master
| 2022-12-31T14:46:45.556745
| 2020-10-13T12:36:35
| 2020-10-13T12:36:35
| 262,153,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 837
|
r
|
03_exploratory-analyses.R
|
# load packages -----------------------------------------------------------
## Package names
packages <- c("tidyverse", "here", "psych")
## Install packages not yet installed
installed_packages <- packages %in% rownames(installed.packages())
if (any(installed_packages == FALSE)) {
install.packages(packages[!installed_packages])
}
## Packages loading
invisible(lapply(packages, library, character.only = TRUE))
# load data ---------------------------------------------------------------
wide <- read_csv(here("data", "wide.csv"))
# find alpha for each condition, then average across all 4 of them -------
scaleKey <- rep(1, 4)
alphas <- rep(1,4)
for (i in 1:4){
alphas[i] <- scoreItems(keys = scaleKey, items = wide %>% dplyr::select(paste0("intell_", i), paste0("conf_", i),paste0("comm_", i),paste0("prob_", i)))$alpha
}
|
66603626f392d0496aa6893c362dd9313b300484
|
1443e812411278d1f776f8f7d1196add8e2dcc31
|
/tests/testthat/test_rmarkdown_functions.R
|
2f338f39ee8f7149ef33fe7447b3d3ed8825545d
|
[
"MIT"
] |
permissive
|
WeiSong-bio/roryk-bcbioSinglecell
|
e96f5ab1cb99cf1c59efd728a394aaea104d82b2
|
2b090f2300799d17fafe086bd03a943d612c809f
|
refs/heads/master
| 2020-06-15T23:38:23.802177
| 2018-07-03T21:01:07
| 2018-07-03T21:01:07
| 195,422,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
test_rmarkdown_functions.R
|
context("R Markdown Functions")
# prepareSingleCellTemplate ====================================================
test_that("prepareSingleCellTemplate", {
files <- c(
"_footer.Rmd",
"_header.Rmd",
"_output.yaml",
"_setup.R",
"bibliography.bib"
)
expect_silent(prepareSingleCellTemplate())
expect_true(all(file.exists(files)))
unlink(files)
})
|
1bb2f0ab26dbf8c836d7acc0ae62aeea24110c6c
|
3163e89817ded391b753a1932421b96241756633
|
/R/old_reconstructCDS.R
|
f97ce8fa94504c0f0557cf1dc351ff88ce94c275
|
[
"Apache-2.0"
] |
permissive
|
fursham-h/ponder
|
c1612c1e2dfc1dba64ddc75fb368be6e0538bfc2
|
5131a51c73fcf2a28fd43122f97b23932a59c4ca
|
refs/heads/master
| 2022-03-27T11:16:34.321088
| 2019-12-08T23:31:26
| 2019-12-08T23:31:26
| 126,469,267
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,499
|
r
|
old_reconstructCDS.R
|
#' Reconstruct CDS with alternative internal and downstream segments
#'
#' @description
#' This function will add alternative segments from a query transcript into
#' a reference CDS from the same gene and generate a new ORF
#'
#' Note: This function will not insert/remove first exons.
#'
#' @param queryTranscript A GRanges object containing exon structure of a query transcript from
#' the same gene family
#' @param refCDS A GRanges object containing CCDS information of a gene family
#' @param fasta Fasta sequence of the genome
#' @param txrevise_out
#' Optional.
#' GRangesobject from 'indentifyAddedRemovedRegions()'
#' or 'testTXforStart()' output. ?indentifyAddedRemovedRegions or ?testTXforStart
#' for more information.
#' Arguments refCDS and queryTranscript is not mandatory if this argument is provided
#'
#'
#' @return
#' A list containing:
#' (1) A GRanges Object of new ORF, or NA if no ORF is found
#' (2) TRUE/FALSE object on whether ORF is an alternative CDS transcript
#' @author Fursham Hamid
#'
#' @examples
#'
#' library("BSgenome.Mmusculus.UCSC.mm10")
#' reconstructCDS(ptbp2Data$transcripts$ENSMUST00000197833, ptbp2Data$refCDS, fasta = BSgenome.Mmusculus.UCSC.mm10)
#'
#'
reconstructCDS <- function(queryTranscript, refCDS, fasta, txrevise_out = NULL, gene_id, transcript_id){
# check if txrevise_out input is provided, and build one if not.
if (is.null(txrevise_out)) {
if (missing(refCDS) | missing(queryTranscript)) {
stop('Please provide input GRanges objects')
}
combinedList = list(refTx = refCDS, testTx = queryTranscript)
diffSegments = indentifyAddedRemovedRegions("refTx", "testTx", combinedList[c("refTx", "testTx")])
} else {
diffSegments = txrevise_out
}
# prepare output
output = list(ORF_considered = NA, ORF_found = FALSE)
# return NA if there is no unique internal and downstream alternative segments
# else, construct new CDS with insertion of segments from query transcript
# Basically, this part tests if there is any unique internal and downstream segments
if (length(diffSegments[[1]]$contained[diffSegments[[1]]$contained == TRUE]) == 0 &
length(diffSegments[[1]]$downstream[diffSegments[[1]]$downstream == TRUE]) == 0 &
length(diffSegments[[2]]$contained[diffSegments[[2]]$contained == TRUE]) == 0) {
# if not, it will return the refCDS as the CDS and return alternative_tx as false
Alternative_tx = FALSE
augmentedCDS = sort(GenomicRanges::reduce(
diffSegments$shared_exons),
decreasing = as.character(strand(diffSegments$shared_exons))[1] == '-')
## POSSIBLE WARNING
} else {
# if there are, construct a new exon structure with removal/addition of the alternative segments
Alternative_tx = TRUE
augmentedCDS = sort(append(
diffSegments$shared_exons,
reduce(diffSegments[[2]][diffSegments[[2]]$upstream != TRUE])),
decreasing = as.character(strand(diffSegments$shared_exons))[1] == '-')
# this part will correct the open reading frame
# obtain critical information on the new CDS
queryStrand = as.character(strand(augmentedCDS))[1]
thisqueryseq = unlist(Biostrings::getSeq(fasta, augmentedCDS))
# prepare a dict of stop codons for pattern matching
list_stopcodons = Biostrings::DNAStringSet(c("TAA", "TAG", "TGA"))
pdict_stopcodons = Biostrings::PDict(list_stopcodons)
# search for in-frame stop codons
allmatches = Biostrings::matchPDict(pdict_stopcodons, thisqueryseq)
combinedmatches = unlist(allmatches)
inframe_stopcodons = sort(combinedmatches[end(combinedmatches) %% 3 == 0,])
# append 3' end of transcript to the first stop codon if found
if (length(inframe_stopcodons) > 0) {
downUTRsize = length(thisqueryseq) - end(inframe_stopcodons[1])
augmentedCDS = resizeTranscripts(augmentedCDS, end = downUTRsize)
augmentedCDS = augmentedCDS %>% as.data.frame() %>%
dplyr::mutate(type = 'CDS', gene_id = gene_id, transcript_id = transcript_id) %>%
dplyr::mutate(phase = cumsum(width%%3)%%3)
augmentedCDS$phase = c(0, head(augmentedCDS$phase, - 1))
augmentedCDS = makeGRangesFromDataFrame(augmentedCDS, keep.extra.columns = TRUE)
output$ORF_found = TRUE
} else {
# if a stop codon is not found, return NA
augmentedCDS = NA
}
}
output = modifyList(output,
list(ORF_considered = augmentedCDS))
return(output)
}
|
5f9b4e709c7b7260ed7a160cc28c7abf9221b940
|
947831e8db6ce82ef431a2dd796d30e47b44c707
|
/man/VarRankMembers.Rd
|
35e7cbd64dcbedd49ec471a8839b6e46c30eaafc
|
[] |
no_license
|
T-Marty/trademartyr
|
a7fcebf13cfa7a2e3cd4ea9b2ddb8f66e3822bd8
|
aa1aa8bff804acf7eae58ef9978af7e421395bbe
|
refs/heads/master
| 2020-03-22T00:11:00.999312
| 2019-05-23T10:18:46
| 2019-05-23T10:18:46
| 139,228,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,001
|
rd
|
VarRankMembers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/position_prep.R
\name{VarRankMembers}
\alias{VarRankMembers}
\title{Function to perform rank by an external (pre-calculated) variable, accounting
for membership.}
\usage{
VarRankMembers(dn, s = 0, hist_members = newHM)
}
\arguments{
\item{dn}{xts object of variables ready to rank. Columns assumed to be
individual assets.}
\item{s}{Skip period (integer). This is the time between the variable
calculation date (end of formation period) and ranking
(and presumably investment) date.}
\item{hist_members}{xts object containing membership data. Column names
are assumed to be asset names corresponding to those in df. Note: All entries
not NA are assumed to represent active membership.}
}
\description{
Function to perform rank by an external (pre-calculated)
variable, accounting for membership. Analogous to `momRankMembers` but
assumes a pre-calculated variable, instead of calculating and ranking on
price momentum.
}
|
5782a917b9028ebad1b674b1c087cc5ef836bafa
|
61c07605275b7d4d407a40c3bd754b2e686c4be3
|
/1 clase de R.R
|
fcb0aed474f36510d2d9e0336a8aabea0750e702
|
[] |
no_license
|
sauc117/Programacion_Actuarial_lll
|
419b06660d301b0eadcc6b996425fa6c1cb78e9a
|
d176b02d635505f11a312baab9fa946ea37c185e
|
refs/heads/master
| 2021-09-14T23:12:45.966516
| 2018-05-22T01:37:36
| 2018-05-22T01:37:36
| 119,411,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
1 clase de R.R
|
#Aqui se esta realizando una asignasion
x <- 1
x <- 2
#ctrl r --- limpiar
print(x)
#La asiganasion no me permitio visualizar el resultado para ellos debo solicitar una impresion
print(x) #Tener cuidado cuando es con mayusculas o minusculas
print(x)
#La asignasion puede hacerce tambien de textos
msg <- "Hola"
print(msg)
|
d4dadeac834eccd0cb176fde174b30cb2712337c
|
2a8174d7e6238ca1917b937de0f67d81b971e01c
|
/Well_data_cleaning.R
|
f8a21d3c1700927899dac4ea0487e69f61ac2488
|
[] |
no_license
|
jemsethio/ag_water_delta
|
24e8ef85adb35577780709db692744677e7e0b4b
|
4a2186a5c52603de5432a643c937ff3cc5af1619
|
refs/heads/master
| 2023-03-07T11:56:11.844380
| 2021-02-18T20:33:53
| 2021-02-18T20:33:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,451
|
r
|
Well_data_cleaning.R
|
rm(list=ls()) # Caution: this clears the Environment
library(raster)
# clear and set wd ----
if (!require("pacman")) install.packages("pacman")
pacman::p_load(
stars, # spatiotemporal data handling
terra, # raster data handling
raster, # raster data handling
sf, # vector data handling
dplyr, # data wrangling
stringr, # string manipulation
lubridate, # dates handling
data.table, # data wrangling
tidyr, # reshape
tidyUSDA, # download USDA NASS data
keyring, # API key management
FedData, # download Daymet data
daymetr, # download Daymet data
ggplot2, # make maps
tmap, # make maps
future.apply, # parallel processing
CropScapeR, # download CDL data
prism,# download PRISM data
rgal,
rspatial
)
#Read the data into R
library("readxl")
well_reading<- read_excel( "C:/Users/obemb/OneDrive/Desktop/data/Data/well_data/Water_depth/Alluvial_siteWI.xlsx" ) %>%
subset( ., select = c(WL_Below_L,Measuremen,Latitude__,Longitude,Station_Na))
colnames(well_reading)[2]<-"date"
colnames(well_reading)[3]<-"Latitude"
colnames(well_reading)[1]<-"well_depth"
well_reading$Year <-as.numeric( format(as.Date(well_reading$date), format = "%Y"))
well_reading$month <- as.numeric(format(as.Date(well_reading$date), format = "%m"))
well_reading<-well_reading%>%mutate(.,season=ifelse(month>7,"Spring","Fall"))%>% subset( ., select = -c(date))
#well_reading<- well_reading[well_reading$Year==year&well_reading$season==var_type, ]
Spring<- well_reading[well_reading$season=="Spring", ]
colnames(Spring)[1]<-"W_S"
library("readxl")
well_reading<- read_excel( "C:/Users/obemb/OneDrive/Desktop/data/Data/well_data/Water_depth/Alluvial_siteWI.xlsx" ) %>%
subset( ., select = c(WL_Below_L,Measuremen,Latitude__,Longitude,Station_Na))
colnames(well_reading)[2]<-"date"
colnames(well_reading)[3]<-"Latitude"
colnames(well_reading)[1]<-"well_depth"
well_reading$Year <-as.numeric( format(as.Date(well_reading$date), format = "%Y"))
well_reading$month <- as.numeric(format(as.Date(well_reading$date), format = "%m"))
well_reading<-well_reading%>%mutate(.,season=ifelse(month>7,"Spring","Fall"))%>% subset( ., select = -c(date))
Fall<- well_reading[well_reading$season=="Fall", ]
colnames(Fall)[1]<-"W_F"
DTW<-merge(Fall,Spring,by=c("Station_Na"="Station_Na", "Year"="Year","Latitude"="Latitude","Longitude" ="Longitude" ),sort = TRUE)%>%
subset( ., select = -c(month.x,month.y,season.y, season.x))%>%mutate(.,Diff=W_S-W_F)
|
9da047f8e507055d155ff020c6a186bc7ef4cbf8
|
aeebd1497c7446e8ec967ba774ca5e016ce062a4
|
/The payoff of investing in mutual funds with a high CSR/02-variables alpha.R
|
f2edb611670863887f1d467003ae8d3fa905788b
|
[] |
no_license
|
shenfan2018/shenfan2018
|
212e881877df52b8772905b5a3546739cd4b5921
|
0bb70a7b0cdb0dc4d14a9576b02b6f22c7e9dfdb
|
refs/heads/master
| 2020-04-01T18:05:52.266821
| 2019-11-20T12:45:09
| 2019-11-20T12:45:09
| 153,471,302
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,300
|
r
|
02-variables alpha.R
|
# alpha R square return, R square choose 4
# raw return
load('fund-NAV.RData')
# alpha
factor <- fread("C:/Users/shenfan/Desktop/csr/基金data/更新至2019-07-05-three_four_five_factor_daily/three_four_five_factor_daily/fivefactor_daily.csv")
setnames(factor, "trddy", "date")
factor <- factor[, date := as.Date(date)]
NAV <- factor[data.NAV, on = .(date), nomatch = 0
][!is.na(AdjustedNAVGrowth)
][, ret_rf := AdjustedNAVGrowth - rf
][, year := year(date)
][, month := month(date)
][, sem := ifelse(month == 1 | month == 2 | month == 3 | month == 4 | month == 5 | month == 6, 1, 2)
][, n := .N, keyby = .(id, year, sem)
][n > 30
][year > 2008
][, .(raw_return = prod(AdjustedNAVGrowth + 1) - 1, alpha_capm = coef(lm(ret_rf ~ mkt_rf))[1], alpha_3 = coef(lm(ret_rf ~ mkt_rf + smb + hml))[1], alpha_4 = coef(lm(ret_rf ~ mkt_rf + smb + hml + umd))[1], alpha_5 = coef(lm(ret_rf ~ mkt_rf + smb + hml + rmw + cma))[1], alpha_6 = coef(lm(ret_rf ~ mkt_rf + smb + hml + rmw + cma + umd))[1], Rsquare = summary(lm(ret_rf ~ mkt_rf + smb + hml + umd))[[8]]), keyby = .(id, year, sem)]
load("variablesfund.RData")
variables <- variables[, quarter := quarter(date)
][, sem := ifelse(quarter == 2, 1, 2)]
variables <- NAV[variables, on = .(id, year, sem)]
save(variables, file = "variables.RData")
|
88d157c1aaa08ff0cfc2a4adc372d3550bb16c80
|
792ee880bc80a08af80eb87d8d193fd586a1f6de
|
/man/clear.Rd
|
29071838cf7aefedf2223901af388c158b09bdcb
|
[] |
no_license
|
cran/schoRsch
|
9852bf631fa28b90e14a5d939973264ceef8e6a1
|
52957e3499cd0afc2e7a8ffb20602bc4d4bb9467
|
refs/heads/master
| 2022-11-10T11:54:14.422863
| 2022-11-01T20:14:58
| 2022-11-01T20:14:58
| 17,699,487
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
rd
|
clear.Rd
|
\name{clear}
\alias{clear}
\title{Clear Global Workspace}
\description{The global workspace is cleared; \code{clear} is a shortcut for the usual \code{rm(list=ls())}.}
\usage{
clear()
}
%\arguments{}
%\details{ }
%\value{ }
%\references{}
\author{Roland Pfister, Markus Janczyk}
% \note{}
\seealso{
\code{\link{rm}}; \code{\link{ls}};
}
\examples{
## Declare variables
a <- 1
b <- "abc"
ls()
## Clear workspace
clear()
ls()
}
\keyword{utilities}
|
a735cc18c2353170b22e839d4b3eb8d4f6690dfc
|
b2a2d5465defe353276e27109b7dffc984169df1
|
/deloitte/final.r
|
416dd16add4ca1fa14bd0711575dd4a75a07be25
|
[] |
no_license
|
dmpe/analytics-challenges
|
91db670834f8b97b74514ac6d3dd0aced8ab6c9a
|
80adc8a24772d4bae6ce6e79d0a1bed2117ca250
|
refs/heads/master
| 2020-03-07T18:04:44.662652
| 2018-06-03T15:11:47
| 2018-06-03T15:11:47
| 127,627,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,593
|
r
|
final.r
|
library(plyr)
library(tidyverse)
library(reshape2)
library(hms)
library(lubridate)
input_data <- read_csv(file = "in/tables/ga_transactions.csv")
input_data$source <- as.factor(input_data$source)
input_data$medium <- as.factor(input_data$medium)
input_data$campaign <- as.factor(input_data$campaign)
shorted <- input_data[,3:9]
################
source_Table <- data.frame(table(shorted$source))
top10 <- source_Table[order(-source_Table$Freq),]
ggplot(top10[c(1:10),], aes(Var1,Freq)) + geom_point()
###############
shorted$Year <- format(as.Date(shorted$date), "%Y")
shorted$Month <- format(as.Date(shorted$date), "%m")
################
year2017 <- shorted %>%
filter(Year == 2017) %>%
group_by(Month, source) %>%
summarize(sumTransactionRevenue = sum(transactionRevenue)) %>%
top_n(n = 12, wt = sumTransactionRevenue)
year2017_1 <- shorted %>%
filter(Year == 2017) %>%
group_by(Month, source) %>%
summarize(sumTransactionRevenue = sum(transactionRevenue)) %>%
top_n(n = 12, wt = sumTransactionRevenue) %>%
summarise(LargestSumTransactionRevenue = max(sumTransactionRevenue))
year2017_2 <- inner_join(year2017, year2017_1, by = c("sumTransactionRevenue" = "LargestSumTransactionRevenue") )
ggplot(year2017_2, aes(Month.x, sumTransactionRevenue)) + geom_histogram()
####################
write.csv(result, file = "out/tables/output.csv", row.names = FALSE)
##########
ga_profiles <- read_csv("in/tables/ga_profiles.csv")
ga_sessions <- read_csv("in/tables/ga_sessions.csv")
ga_ana_pageviews <- read_csv("in/tables/google_analytics_pageview.csv")
merged_data <- inner_join(ga_profiles, ga_sessions, by=c("id" = "idProfile"))
####################
ggplot(zbozi, aes(impressions, clicks)) + geom_point() +
geom_smooth(method = "lm", se = F) +
ggtitle("Impressions & Clicks", subtitle = "") + xlab("Impressions") + ylab("clicks")
ggplot(zbozi, aes(cpc, position)) + geom_point() +
geom_smooth(method = "lm", se = F) +
ggtitle("Cpc & Position", subtitle = "") + xlab("Cpc") + ylab("Position")
ggplot(zbozi, aes(spend, position)) + geom_point() +
geom_smooth(method = "lm", se = F) +
ggtitle("Spend & Position", subtitle = "") + xlab("Spend") + ylab("Position")
##################### Load Heureka_cz/sk
heureka_cz <- read_csv("in/tables/heureka_cz.csv",
col_types = cols(cpc = col_double()),
locale = locale(decimal_mark = ","))
heureka_cz$conversion_rates <- as.double(heureka_cz$conversion_rates) / 100
heureka_sk <- read_csv("in/tables/heureka_sk.csv")
heureka_sk$conversion_rates <- as.double(heureka_sk$conversion_rates) / 10000
heureka_sk_conv <- read_csv("out/tables/heureka_fx_conv.csv",
col_types = cols(cpc = col_double(),
rate = col_character()),
locale = locale(decimal_mark = ","))
exchange <- read_csv("in/tables/exchange_rates.csv",
col_types = cols(date = col_date(format = "%Y-%m-%d"))) %>%
filter(currency_orderby == 1)
heureka_sk <- left_join(heureka_sk, exchange, by = c("date"="date"))
heureka_sk_conv$conversion_rates <- as.double(heureka_sk_conv$conversion_rates) / 10000
heureka_sk_conv$cpc_cz <- heureka_sk_conv$cpc * as.double(heureka_sk_conv$rate)
heureka_sk_conv$spend_cz <- heureka_sk_conv$spend * as.double(heureka_sk_conv$rate)
heureka_sk_sm <- data.frame(heureka_sk_conv$date,
heureka_sk_conv$cpc_cz,
heureka_sk_conv$spend_cz,
heureka_sk_conv$conversion_rates)
heureka_cz_sm <- data.frame(heureka_cz$date,
heureka_cz$cpc,
heureka_cz$spend,
heureka_cz$conversion_rates)
mergedDF_heureka <- inner_join(heureka_sk_sm, heureka_cz_sm,
by = c("heureka_sk_conv.date"="heureka_cz.date"))
############ Load & Process Zbozi data
zbozi <- read_csv("in/tables/zbozi_cz.csv",
col_types = cols(date = col_date(format = "%d.%m.%Y")))
names(zbozi)[names(zbozi) == 'spend'] <- 'zbozi_spend'
df_zbozi_Spend <- zbozi[,c(2,7)]
df_heureka_Zbozi_joined <- inner_join(mergedDF_heureka, df_zbozi_Spend,
by = c("heureka_sk_conv.date"="date"))
df_heureka_Zbozi_joined$CZ_SK_Spend <- df_heureka_Zbozi_joined$heureka_cz.spend +
df_heureka_Zbozi_joined$heureka_sk_conv.spend_cz
############ Melt
#df_heureka_Zbozi_Spend <- melt(df_heureka_Zbozi_joined[,(c(3,6,8))])
df_heureka_Zbozi_Spend <- melt(df_heureka_Zbozi_joined[,(c(8,10))])
############ Plot it
ggplot(df_heureka_Zbozi_Spend) +
geom_col(aes(variable, value), show.legend=F) +
ggtitle("Expenditure on Heureka CZ&SK vs. Zbozi", subtitle = "16.Jan 2018 - 3.Mar 2018") +
xlab("Marketing Channel") + ylab("Costs") +
theme(axis.line = element_line(colour = "darkblue",
size = 1, linetype = "solid")) +
scale_x_discrete(labels=c("Zbozi", "Heureka CZ/SK"))
######################
### For Month/week
colnames(df_heureka_Zbozi_joined)
df_heureka_Zbozi_joined$week <- strftime(df_heureka_Zbozi_joined$heureka_sk_conv.date,
format = "%V")
df_heureka_Zbozi_joined$heureka_sk_conv.date <- as.Date(df_heureka_Zbozi_joined$heureka_sk_conv.date)
dfs <- df_heureka_Zbozi_joined[,c(1,8,10)]
df_s2 <- gather(dfs, key = variable,
value = measurement,
-heureka_sk_conv.date)
ggplot(df_s2, aes(as.Date(heureka_sk_conv.date, format = "%b-%Y"), measurement, colour = variable)) +
geom_line() +
ggtitle("Expenditure on Heureka CZ & SK vs. Zbozi", subtitle = "16.Jan 2018 - 3.Apr 2018") +
xlab("Month") + ylab("Costs in (CZK)") +
scale_y_continuous(breaks = c(10000, 20000, 30000, 40000, 50000, 60000)) +
scale_color_hue(name="Marketing Channel",
labels=c("Heureka CZ & SK", "Zbozi.cz"))
##################
dfs <- df_heureka_Zbozi_joined[,c(1,8,10)]
df_s3 <- gather(dfs, key = variable,
value = measurement,
-heureka_sk_conv.date)
ggplot(df_s3, aes(as.Date(heureka_sk_conv.date, format = "%b-%Y"), measurement, colour = variable)) +
geom_line() +
ggtitle("Expenditure on Heureka CZ & SK vs. Zbozi", subtitle = "16.Jan 2018 - 3.Apr 2018") +
xlab("Month") + ylab("Costs in (CZK)") +
scale_y_continuous(breaks = c(10000, 20000, 30000, 40000, 50000, 60000)) +
scale_color_hue(name="Marketing Channel",
labels=c("Heureka CZ & SK", "Zbozi.cz"))
|
db26eb2fc2e495eb365c8d6d3296a3abe02225f8
|
0fa81f133a994b4e83c2a9799b90900f6193b3cc
|
/man/cas_read_db_download.Rd
|
19f744c7573cd4b5eae1b4c194c90f89beb33d69
|
[
"MIT"
] |
permissive
|
giocomai/castarter
|
8529995b68886e86cd815b5f4d45db7ddacb6ba1
|
da788b11a5eaa0cf24c79b85b1c660b30100428c
|
refs/heads/main
| 2023-08-27T14:28:07.905239
| 2023-08-09T12:10:22
| 2023-08-09T12:10:22
| 36,666,980
| 10
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,603
|
rd
|
cas_read_db_download.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cas_db_index.R
\name{cas_read_db_download}
\alias{cas_read_db_download}
\title{Read index from local database}
\usage{
cas_read_db_download(
index = FALSE,
id = NULL,
batch = "latest",
status = 200L,
db_connection = NULL,
db_folder = NULL,
...
)
}
\arguments{
\item{batch}{Default to "latest": returns only the path to the file with the
highest batch identifier available. Valid values are: "latest", "all", or a
numeric identifier corresponding to desired batch.}
\item{status}{Defaults to 200. Keeps only files downloaded with the given
status (can be more than one, given as a vector). If NULL, no filter based
on status is applied.}
\item{db_connection}{Defaults to NULL. If NULL, uses local SQLite database.
If given, must be a connection object or a list with relevant connection
settings (see example).}
\item{...}{Passed to \code{cas_get_db_file()}.}
}
\value{
A data frame with three columns and data stored in the \code{index_id}
table of the local database. The data frame has zero rows if the database
does not exist or no data was previously stored there.
}
\description{
Read index from local database
}
\examples{
cas_set_options(
base_folder = fs::path(tempdir(), "R", "castarter_data"),
db_folder = fs::path(tempdir(), "R", "castarter_data"),
project = "example_project",
website = "example_website"
)
cas_enable_db()
urls_df <- cas_build_urls(
url = "https://www.example.com/news/",
start_page = 1,
end_page = 10
)
cas_write_db_index(urls = urls_df)
cas_read_db_index()
}
|
30e987fa155f362ec3466417a92f85af39ed52c8
|
57965d63586beb192af1a2f8974fdd5630a3964b
|
/R/npregiv.R
|
e1443750c27341673162438e12f07ed1ae612ed3
|
[] |
no_license
|
JeffreyRacine/R-Package-np
|
6fee493cbd555cabe976d2f9c14cd10aef99c665
|
525db82ebc67423728888daf66ce0d9fdd70bbc7
|
refs/heads/master
| 2023-08-31T13:32:00.925187
| 2023-08-27T13:08:45
| 2023-08-27T13:08:45
| 1,957,067
| 41
| 23
| null | 2022-08-12T15:40:15
| 2011-06-26T20:09:34
|
C
|
UTF-8
|
R
| false
| false
| 93,166
|
r
|
npregiv.R
|
## This functions accepts the following arguments:
## y: univariate outcome
## z: endogenous predictors
## w: instruments
## x: exogenous predictors
## zeval: optional evaluation data for the endogenous predictors
## xeval: optional evaluation data for the exogenous predictors
## alpha.min: minimum value when conducting 1-dimensional search for
## optimal Tikhonov regularization parameter alpha
## alpha.max: maximum value when conducting 1-dimensional search for
## optimal Tikhonov regularization parameter alpha
## p: order of the local polynomial kernel estimator (p=0 is local
## constant, p=1 local linear etc.)
## This function returns a list with at least the following elements:
## phi: the IV estimator of phi(z)
## convergence: a character string indicating whether/why iteration terminated
npregiv <- function(y,
z,
w,
x=NULL,
zeval=NULL,
xeval=NULL,
p=1,
nmulti=1,
random.seed=42,
optim.maxattempts = 10,
optim.method=c("Nelder-Mead", "BFGS", "CG"),
optim.reltol=sqrt(.Machine$double.eps),
optim.abstol=.Machine$double.eps,
optim.maxit=500,
alpha=NULL,
alpha.iter=NULL,
alpha.min=1.0e-10,
alpha.max=1.0e-01,
alpha.tol=.Machine$double.eps^0.25,
iterate.Tikhonov=TRUE,
iterate.Tikhonov.num=1,
iterate.max=1000,
iterate.diff.tol=1.0e-08,
constant=0.5,
method=c("Landweber-Fridman","Tikhonov"),
penalize.iteration=TRUE,
smooth.residuals=TRUE,
start.from=c("Eyz","EEywz"),
starting.values=NULL,
stop.on.increase=TRUE,
return.weights.phi=FALSE,
return.weights.phi.deriv.1=FALSE,
return.weights.phi.deriv.2=FALSE,
bw=NULL,
...) {
## This function was constructed initially by Samuele Centorrino
## <samuele.centorrino@univ-tlse1.fr> to reproduce illustrations in
## the following papers:
## A) Econometrica (2011), Volume 79, pp. 1541-1565
## "Nonparametric Instrumental Regression"
## S. Darolles, Y. Fan, J.P. Florens, E. Renault
## B) Econometrics Journal (2010), volume 13, pp. S1-S27. doi:
## 10.1111/j.1368-423X.2010.00314.x
## "The practice of non-parametric estimation by solving inverse
## problems: the example of transformation models"
## FREDERIQUE FEVE AND JEAN-PIERRE FLORENS
## IDEI and Toulouse School of Economics, Universite de Toulouse
## Capitole 21 alle de de Brienne, 31000 Toulouse, France. E-mails:
## feve@cict.fr, florens@cict.fr
## It was modified by Jeffrey S. Racine <racinej@mcmaster.ca> and all
## errors remain my responsibility. I am indebted to Samuele and the
## Toulouse School of Economics for their generous hospitality.
## First we require two functions, the first that conducts Regularized
## Tikhonov Regression' (aka Ridge Regression)
## This function conducts regularized Tikhonov regression which
## corresponds to (3.9) in Feve & Florens (2010).
## This function accepts as arguments
## alpha: penalty
## CZ: row-normalized kernel weights for the `independent' variable
## CY: row-normalized kernel weights for the `dependent' variable
## Cr: row-normalized kernel weights for the `instrument/endogenous' variable (see NOTE below)
## r: vector of conditional expectations (z can be E(Z|z) - see NOTE below)
## NOTE: for Cr, in the transformation model case treated in Feve &
## Florens (2010) this maps Z onto the Y space. In the IV case
## (Darrolles, Fan, Florens & Renault (2011, forthcoming Econometrica)
## it maps W (the instrument) onto the space of the endogenous
## regressor Z.
## NOTE: for r, in the transformation model it will be equivalent to
## the vector of exogenous covariates, and in the endogenous case r is
## the conditional mean of y given the instrument W.
## This function returns TBA (need better error checking!)
## phi: the vector of estimated values for the unknown function at the evaluation points
tikh <- function(alpha,CZ,CY,Cr.r,cholesky=FALSE){
if(cholesky) {
return(chol2inv(chol(alpha*diag(nrow(CY)) + CY%*%CZ)) %*% Cr.r)
} else {
return(solve(alpha*diag(nrow(CY)) + CY%*%CZ) %*% Cr.r)
}
}
## Samuele indicates alternate form for estimator (visit to SUNY
## Stony Brook Feb 24 2015) that can be used with evaluation
## data. There is no need to carry around two versions of the same
## function, so with some thought we could jettison the above and
## use this throughout.
tikh.eval <- function(alpha,CZ,CY,CY.eval,r,cholesky=FALSE){
if(cholesky) {
return(CY.eval%*%chol2inv(chol(alpha*diag(nrow(CY)) + CZ%*%CY)) %*% r)
} else {
return(CY.eval%*%solve(alpha*diag(nrow(CY)) + CZ%*%CY) %*% r)
}
}
## This function applies the iterated Tikhonov approach which
## corresponds to (3.10) in Feve & Florens (2010).
## This function accepts as arguments
## alpha: penalty
## CZ: row-normalized kernel weights for the `independent' variable
## CY: row-normalized kernel weights for the `dependent' variable
## Cr: row-normalized kernel weights for the `instrument/endogenous' variable (see NOTE below)
## r: vector of conditional expectations (z can be E(Z|z) - see NOTE below)
## NOTE: for Cr, in the transformation model case treated in Feve &
## Florens (2010) this maps Z onto the Y space. In the IV case
## (Darrolles, Fan, Florens & Renault (2011, forthcoming Econometrica)
## it maps W (the instrument) onto the space of the endogenous
## regressor Z.
## NOTE: for r, in the transformation model it will be equivalent to
## the vector of exogenous covariates, and in the endogenous case r is
## the conditional mean of y given the instrument W.
## This function returns TBA (need better error checking!)
## phi: the vector of estimated values for the unknown function at the evaluation points
## SSalpha: (scalar) value of the sum of square residuals criterion
## which is a function of alpha (see (3.10) of Feve & Florens (2010)
ittik <- function(alpha,CZ,CY,Cr.r,r,cholesky=FALSE) {
if(cholesky) {
invmat <- chol2inv(chol(alpha*diag(nrow(CY)) + CY%*%CZ))
} else {
invmat <- solve(alpha*diag(nrow(CY)) + CY%*%CZ)
}
invmat.Cr.r <- invmat %*% Cr.r
phi <- invmat.Cr.r + alpha * invmat %*% invmat.Cr.r
return(sum((CZ%*%phi - r)^2)/NZD(alpha))
}
## This function returns the weight matrix for a local polynomial,
## and was rewritten 14/1/15 in Toulouse while visiting JP. It
## supports mixed data types. Basic error checking is
## undertaken. deriv = 0, strips off weights for mean, = p partials
## up to order 2. No cross-partials in this one. Basically useful
## for univariate case when deriv > 0 though could be refined - the
## old function was slower but had more capability (that basically
## went unused).
## Update - from ?npksum, "The option permutation.operator= can be
## used to `mix and match' operator strings to create a `hybrid'
## kernel, in addition to the kernel sum with no operators applied,
## one for each continuous dimension in the data. For example, for a
## two-dimensional data frame of numeric datatypes,
## permutation.operator=c("derivative") will return the usual kernel
## sum as if operator = c("normal","normal") in the ksum member, and
## in the p.ksum member, it will return kernel sums for operator =
## c("derivative","normal"), and operator =
## c("normal","derivative"). This makes the computation of gradients
## much easier."
## So, the upshot is that I could, for the multivariate case, add
## the derivative stuff.
Kmat.lp <- function(deriv=0,
mydata.train=NULL,
mydata.eval=NULL,
bws=NULL,
p=0,
shrink=TRUE,
warning.immediate=TRUE,
...) {
## 14/1/15, Toulouse - note that the weights herein ** DO NOT **
## shrink towards the lc estimator (neither for the function nor
## derivatives), unlike the function returned in
## glpreg(). However, they all appear to agree with the previous
## Kmat.lp with ** also ** did not shrink towards the lc
## estimator. This is noticeably faster, which ought to render
## Tikhonov faster as well.
## Basic error checking...
if(is.null(mydata.train)) stop("You must provide training data")
if(is.null(mydata.eval)) mydata.eval <- mydata.train
if(is.null(bws)) stop("You must provide bandwidths")
n.train=nrow(mydata.train)
n.eval=nrow(mydata.eval)
X.train <- as.data.frame(mydata.train)
X.eval <- as.data.frame(mydata.eval)
## Check whether it appears that training and evaluation data are
## conformable...
if(ncol(X.train)!=ncol(X.eval))
stop("Error: training and evaluation data have unequal number of columns\n")
X.col.numeric <- sapply(1:ncol(X.train),function(i){is.numeric(X.train[,i])})
## k represents the number of numeric regressors, this will return
## zero if there are none
k <- ncol(as.data.frame(X.train[,X.col.numeric]))
if(k > 0) {
X.train.numeric <- as.data.frame(X.train[,X.col.numeric])
X.eval.numeric <- as.data.frame(X.eval[,X.col.numeric])
}
if(deriv<0||deriv>2)
stop(paste("Error: deriv= (integer) is invalid\n[min = ", 0, ", max = ", p, "]\n",sep=""))
if(p < 0)
stop(paste("Error: p (order of polynomial) must be a non-negative integer\np is (", p, ")\n",sep=""))
K.x <- npksum(txdat=X.train,
exdat=X.eval,
bws=bws,
return.kernel.weights=TRUE,
...)$kw
if(p==0) {
## No shrinking necessary for local constant estimator
if(deriv==0) {
Kmat <- t(K.x)/NZD(rowSums(t(K.x)))
} else if(deriv==1) {
## Note this is not general XXX Feb 25 2015, for
## univariate z only
K.x.deriv <- npksum(txdat=X.train,
exdat=X.eval,
bws=bws,
return.kernel.weights=TRUE,
operator="derivative",
...)$kw/NZD(bws)
rSk <- NZD(rowSums(t(K.x)))
Kmat <- t(K.x.deriv)/NZD(rSk)-t(K.x)/NZD(rSk)*(rowSums(t(K.x.deriv))/NZD(rSk))
}
}
if(p > 0) {
## Re-use this matrix, shrinking occurs here
W.z <- W.glp(xdat=X.train.numeric,
degree=rep(p,NCOL(X.train.numeric)))
if(is.null(mydata.eval)) {
## Guess we could avoid copy with conditional statement below using either W.z or W.z.eval
W.z.eval <- W.z
} else {
W.z.eval <- W.glp(xdat=X.train.numeric,
exdat=as.data.frame(X.eval.numeric),
degree=rep(p,NCOL(X.train.numeric)))
}
nc <- ncol(W.z)
WzkWz.inv <- list()
for(i in 1:ncol(K.x)) {
if(tryCatch(WzkWz.inv[[i]] <- as.matrix(chol2inv(chol(t(W.z)%*%(K.x[,i]*W.z)))),
error = function(e){
return(matrix(FALSE,nc,nc))
})[1,1]!=FALSE) {
} else {
if(shrink==FALSE) {
## If we do not explicitly engage ridging then we do not fail
## and terminate, rather, we return NA when Wmat.sum is
## singular
Kmat <- NA
} else {
## Ridging
epsilon <- 1/n.train
ridge <- 0
while(tryCatch(as.matrix(chol2inv(chol((chol(t(W.z)%*%(K.x[,i]*W.z)+diag(rep(ridge,nc))))))),
error = function(e){
return(matrix(FALSE,nc,nc))
})[1,1]==FALSE) {
ridge <- ridge + epsilon
}
WzkWz.inv[[i]] <- as.matrix(chol2inv(chol(t(W.z)%*%(K.x[,i]*W.z)+diag(rep(ridge,nc)))))
warning(paste("Ridging obs. ", i, ", ridge = ", signif(ridge,6),sep=""),
immediate.=warning.immediate,
call.=!warning.immediate)
}
}
}
}
if(p==1) {
if(deriv==0) Kmat <- t(sapply(1:ncol(K.x),function(i){W.z.eval[i,,drop=FALSE]%*%WzkWz.inv[[i]]%*%t(W.z)*K.x[,i]}))
if(deriv==1) {
W.z.deriv.1 <- W.glp(xdat=X.train.numeric,
exdat=as.matrix(X.eval.numeric),
degree=rep(p,NCOL(X.train.numeric)),
gradient.vec = 1)
Kmat <- t(sapply(1:ncol(K.x),function(i){W.z.deriv.1[i,,drop=FALSE]%*%WzkWz.inv[[i]]%*%t(W.z)*K.x[,i]}))
}
}
if(p >= 2) {
if(deriv==0) {
Kmat <- t(sapply(1:ncol(K.x),function(i){W.z.eval[i,,drop=FALSE]%*%WzkWz.inv[[i]]%*%t(W.z)*K.x[,i]}))
}
if(deriv==1) {
W.z.deriv.1 <- W.glp(xdat=X.train.numeric,
exdat=as.matrix(X.eval.numeric),
degree=rep(p,NCOL(X.train.numeric)),
gradient.vec = 1)
Kmat <- t(sapply(1:ncol(K.x),function(i){W.z.deriv.1[i,,drop=FALSE]%*%WzkWz.inv[[i]]%*%t(W.z)*K.x[,i]}))
}
if(deriv==2) {
W.z.deriv.2 <- W.glp(xdat=X.train.numeric,
exdat=as.matrix(X.eval.numeric),
degree=rep(p,NCOL(X.train.numeric)),
gradient.vec = 2)
Kmat <- t(sapply(1:ncol(K.x),function(i){W.z.deriv.2[i,,drop=FALSE]%*%WzkWz.inv[[i]]%*%t(W.z)*K.x[,i]}))
}
}
return(Kmat)
}
glpreg <- function(tydat=NULL,
txdat=NULL,
exdat=NULL,
bws=NULL,
degree=NULL,
leave.one.out=FALSE,
deriv=1,
...) {
## Don't think this error checking is robust
if(is.null(tydat)) stop("Error: You must provide y data")
if(is.null(txdat)) stop("Error: You must provide X data")
if(is.null(bws)) stop("Error: You must provide a bandwidth object")
if(is.null(degree) | any(degree < 0)) stop(paste("Error: degree vector must contain non-negative integers\ndegree is (", degree, ")\n",sep=""))
if(p>0 && (deriv<0||deriv>degree)) stop("deriv must lie between 0 and degree")
miss.ex = missing(exdat)
if (miss.ex){
exdat <- txdat
}
txdat <- as.data.frame(txdat)
exdat <- as.data.frame(exdat)
maxPenalty <- sqrt(.Machine$double.xmax)
n.train <- nrow(txdat)
n.eval <- nrow(exdat)
## Check whether it appears that training and evaluation data are
## conformable
if(ncol(txdat)!=ncol(exdat))
stop("Error: training and evaluation data have unequal number of columns\n")
if(all(degree == 0)) {
## Local constant using only one call to npksum
if(leave.one.out == TRUE) {
## exdat not supported with leave.one.out, but this is only used
## for cross-validation hence no exdat
tww <- npksum(txdat = txdat,
weights = as.matrix(data.frame(1,tydat)),
tydat = rep(1,length(tydat)),
bws = bws,
bandwidth.divide = TRUE,
leave.one.out = leave.one.out,
ukertype="liracine",
okertype="liracine",
...)$ksum
} else {
tww <- npksum(txdat = txdat,
exdat = exdat,
weights = as.matrix(data.frame(1,tydat)),
tydat = rep(1,length(tydat)),
bws = bws,
bandwidth.divide = TRUE,
leave.one.out = leave.one.out,
ukertype="liracine",
okertype="liracine",
...)$ksum
}
## Note that as bandwidth approaches zero the local constant
## estimator undersmooths and approaches each sample realization,
## so use the convention that when the sum of the kernel weights
## equals 0, return y. This is unique to this code.
mhat <- tww[2,]/NZD(tww[1,])
grad <- gradients(npreg(tydat=tydat,
txdat=txdat,
exdat = exdat,
bws = bws,
ukertype="liracine",
okertype="liracine",
gradients=TRUE,
...))
return(list(mean = mhat,
grad = grad))
} else {
W <- W.glp(xdat=txdat,
degree=degree)
W.eval <- W.glp(xdat=txdat,
exdat=exdat,
degree=degree)
W.eval.deriv <- W.glp(xdat=txdat,
exdat=exdat,
degree=degree,
gradient.vec=rep(deriv,NCOL(txdat)))
## Local polynomial via smooth coefficient formulation and one
## call to npksum
if(leave.one.out == TRUE) {
## exdat not supported with leave.one.out, but this is only used
## for cross-validation hence no exdat
tww <- npksum(txdat = txdat,
tydat = as.matrix(cbind(tydat,W)),
weights = W,
bws = bws,
bandwidth.divide = TRUE,
leave.one.out = leave.one.out,
ukertype="liracine",
okertype="liracine",
...)$ksum
} else {
tww <- npksum(txdat = txdat,
exdat = exdat,
tydat = as.matrix(cbind(tydat,W)),
weights = W,
bws = bws,
bandwidth.divide = TRUE,
leave.one.out = leave.one.out,
ukertype="liracine",
okertype="liracine",
...)$ksum
}
tyw <- array(tww,dim = c(ncol(W)+1,ncol(W),n.eval))[1,,]
tww <- array(tww,dim = c(ncol(W)+1,ncol(W),n.eval))[-1,,]
coef.mat <- matrix(maxPenalty,ncol(W),n.eval)
epsilon <- 1.0/n.eval
ridge <- double(n.eval)
ridge.lc <- double(n.eval)
doridge <- !logical(n.eval)
nc <- ncol(tww[,,1])
## Test for singularity of the generalized local polynomial
## estimator, shrink the mean towards the local constant mean.
ridger <- function(i) {
doridge[i] <<- FALSE
ridge.lc[i] <- ridge[i]*tyw[1,i][1]/NZD(tww[,,i][1,1])
tryCatch(chol2inv(chol(tww[,,i]+diag(rep(ridge[i],nc))))%*%tyw[,i],
error = function(e){
ridge[i] <<- ridge[i]+epsilon
doridge[i] <<- TRUE
return(rep(maxPenalty,nc))
})
}
while(any(doridge)){
iloo <- (1:n.eval)[doridge]
coef.mat[,iloo] <- sapply(iloo, ridger)
}
mhat <- sapply(1:n.eval, function(i) {
(1-ridge[i])*W.eval[i,, drop = FALSE] %*% coef.mat[,i] + ridge.lc[i]
})
grad <- sapply(1:n.eval, function(i) {W.eval.deriv[i,-1, drop = FALSE] %*% coef.mat[-1,i]})
return(list(mean = mhat,
grad = grad))
}
}
minimand.cv.ls <- function(bws=NULL,
ydat=NULL,
xdat=NULL,
degree=NULL,
W=NULL,
...) {
## Don't think this error checking is robust
if(is.null(ydat)) stop("Error: You must provide y data")
if(is.null(xdat)) stop("Error: You must provide X data")
if(is.null(W)) stop("Error: You must provide a weighting matrix W")
if(is.null(bws)) stop("Error: You must provide a bandwidth object")
if(is.null(degree) | any(degree < 0)) stop(paste("Error: degree vector must contain non-negative integers\ndegree is (", degree, ")\n",sep=""))
xdat <- as.data.frame(xdat)
n <- length(ydat)
maxPenalty <- sqrt(.Machine$double.xmax)
if(any(bws<=0)) {
return(maxPenalty)
} else {
if(all(degree == 0)) {
## Local constant via one call to npksum
tww <- npksum(txdat = xdat,
weights = as.matrix(data.frame(1,ydat)),
tydat = rep(1,n),
bws = bws,
leave.one.out = TRUE,
bandwidth.divide = TRUE,
ukertype="liracine",
okertype="liracine",
...)$ksum
mean.loo <- tww[2,]/NZD(tww[1,])
if (!any(is.nan(mean.loo)) && !any(mean.loo == maxPenalty)){
fv <- mean((ydat-mean.loo)^2)
} else {
fv <- maxPenalty
}
return(ifelse(is.finite(fv),fv,maxPenalty))
} else {
## Generalized local polynomial via smooth coefficient
## formulation and one call to npksum
tww <- npksum(txdat = xdat,
tydat = as.matrix(cbind(ydat,W)),
weights = W,
bws = bws,
leave.one.out = TRUE,
bandwidth.divide = TRUE,
ukertype="liracine",
okertype="liracine",
...)$ksum
tyw <- array(tww,dim = c(ncol(W)+1,ncol(W),n))[1,,]
tww <- array(tww,dim = c(ncol(W)+1,ncol(W),n))[-1,,]
mean.loo <- rep(maxPenalty,n)
epsilon <- 1.0/n
ridge <- double(n)
ridge.lc <- double(n)
doridge <- !logical(n)
nc <- ncol(tww[,,1])
## Test for singularity of the generalized local polynomial
## estimator, shrink the mean towards the local constant mean.
ridger <- function(i) {
doridge[i] <<- FALSE
ridge.lc[i] <- ridge[i]*tyw[1,i][1]/NZD(tww[,,i][1,1])
W[i,, drop = FALSE] %*% tryCatch(chol2inv(chol(tww[,,i]+diag(rep(ridge[i],nc))))%*%tyw[,i],
error = function(e){
ridge[i] <<- ridge[i]+epsilon
doridge[i] <<- TRUE
return(rep(maxPenalty,nc))
})
}
while(any(doridge)){
iloo <- (1:n)[doridge]
mean.loo[iloo] <- (1-ridge[iloo])*sapply(iloo, ridger) + ridge.lc[iloo]
}
if (!any(is.nan(mean.loo)) && !any(mean.loo == maxPenalty)){
fv <- mean((ydat-mean.loo)^2)
} else {
fv <- maxPenalty
}
return(ifelse(is.finite(fv),fv,maxPenalty))
}
}
}
minimand.cv.aic <- function(bws=NULL,
ydat=NULL,
xdat=NULL,
degree=NULL,
W=NULL,
...) {
## Don't think this error checking is robust
if(is.null(ydat)) stop("Error: You must provide y data")
if(is.null(xdat)) stop("Error: You must provide X data")
if(!all(degree==0)) if(is.null(W)) stop("Error: You must provide a weighting matrix W")
if(is.null(bws)) stop("Error: You must provide a bandwidth object")
if(is.null(degree) | any(degree < 0)) stop(paste("Error: degree vector must contain non-negative integers\ndegree is (", degree, ")\n",sep=""))
xdat <- as.data.frame(xdat)
n <- length(ydat)
maxPenalty <- sqrt(.Machine$double.xmax)
if(any(bws<=0)) {
return(maxPenalty)
} else {
## This computes the kernel function when i=j (i.e., K(0))
kernel.i.eq.j <- npksum(txdat = xdat[1,],
weights = as.matrix(data.frame(1,ydat)[1,]),
tydat = 1,
bws = bws,
bandwidth.divide = TRUE,
ukertype="liracine",
okertype="liracine",
...)$ksum[1,1]
if(all(degree == 0)) {
## Local constant via one call to npksum
tww <- npksum(txdat = xdat,
weights = as.matrix(data.frame(1,ydat)),
tydat = rep(1,n),
bws = bws,
bandwidth.divide = TRUE,
ukertype="liracine",
okertype="liracine",
...)$ksum
ghat <- tww[2,]/NZD(tww[1,])
trH <- kernel.i.eq.j*sum(1/NZD(tww[1,]))
aic.penalty <- (1+trH/n)/(1-(trH+2)/n)
if (!any(ghat == maxPenalty) & (aic.penalty > 0)){
fv <- log(mean((ydat-ghat)^2)) + aic.penalty
} else {
fv <- maxPenalty
}
return(ifelse(is.finite(fv),fv,maxPenalty))
} else {
## Generalized local polynomial via smooth coefficient
## formulation and one call to npksum
tww <- npksum(txdat = xdat,
tydat = as.matrix(cbind(ydat,W)),
weights = W,
bws = bws,
bandwidth.divide = TRUE,
ukertype="liracine",
okertype="liracine",
...)$ksum
tyw <- array(tww,dim = c(ncol(W)+1,ncol(W),n))[1,,]
tww <- array(tww,dim = c(ncol(W)+1,ncol(W),n))[-1,,]
ghat <- rep(maxPenalty,n)
epsilon <- 1.0/n
ridge <- double(n)
ridge.lc <- double(n)
doridge <- !logical(n)
nc <- ncol(tww[,,1])
## Test for singularity of the generalized local polynomial
## estimator, shrink the mean towards the local constant mean.
ridger <- function(i) {
doridge[i] <<- FALSE
ridge.lc[i] <- ridge[i]*tyw[1,i][1]/NZD(tww[,,i][1,1])
W[i,, drop = FALSE] %*% tryCatch(chol2inv(chol(tww[,,i]+diag(rep(ridge[i],nc))))%*%tyw[,i],
error = function(e){
ridge[i] <<- ridge[i]+epsilon
doridge[i] <<- TRUE
return(rep(maxPenalty,nc))
})
}
while(any(doridge)){
ii <- (1:n)[doridge]
ghat[ii] <- (1-ridge[ii])*sapply(ii, ridger) + ridge.lc[ii]
}
trH <- kernel.i.eq.j*sum(sapply(1:n,function(i){
(1-ridge[i])*W[i,, drop = FALSE] %*% chol2inv(chol(tww[,,i]+diag(rep(ridge[i],nc)))) %*% t(W[i,, drop = FALSE]) + ridge[i]/NZD(tww[,,i][1,1])
}))
aic.penalty <- (1+trH/n)/(1-(trH+2)/n)
if (!any(ghat == maxPenalty) & (aic.penalty > 0)){
fv <- log(mean((ydat-ghat)^2)) + aic.penalty
} else {
fv <- maxPenalty
}
return(ifelse(is.finite(fv),fv,maxPenalty))
}
}
}
glpcv <- function(ydat=NULL,
xdat=NULL,
degree=NULL,
bwmethod=c("cv.ls","cv.aic"),
nmulti=nmulti,
random.seed=42,
optim.maxattempts = 10,
optim.method=c("Nelder-Mead", "BFGS", "CG"),
optim.reltol=sqrt(.Machine$double.eps),
optim.abstol=.Machine$double.eps,
optim.maxit=500,
debug=FALSE,
...) {
## Save seed prior to setting
if(exists(".Random.seed", .GlobalEnv)) {
save.seed <- get(".Random.seed", .GlobalEnv)
exists.seed = TRUE
} else {
exists.seed = FALSE
}
set.seed(random.seed)
if(debug) system("rm optim.debug bandwidth.out optim.out")
## Don't think this error checking is robust
if(is.null(ydat)) stop("Error: You must provide y data")
if(is.null(xdat)) stop("Error: You must provide X data")
if(is.null(degree) | any(degree < 0)) stop(paste("Error: degree vector must contain non-negative integers\ndegree is (", degree, ")\n",sep=""))
if(!is.null(nmulti) && nmulti < 1) stop(paste("Error: nmulti must be a positive integer (minimum 1)\nnmulti is (", nmulti, ")\n",sep=""))
bwmethod = match.arg(bwmethod)
optim.method <- match.arg(optim.method)
optim.control <- list(abstol = optim.abstol,
reltol = optim.reltol,
maxit = optim.maxit)
maxPenalty <- sqrt(.Machine$double.xmax)
xdat <- as.data.frame(xdat)
num.bw <- ncol(xdat)
if(is.null(nmulti)) nmulti <- min(5,num.bw)
## Which variables are categorical, which are discrete...
xdat.numeric <- sapply(1:ncol(xdat),function(i){is.numeric(xdat[,i])})
## First initialize initial search values of the vector of
## bandwidths to lie in [0,1]
if(debug) write(c("cv",paste(rep("x",num.bw),seq(1:num.bw),sep="")),file="optim.debug",ncolumns=(num.bw+1))
## Pass in the local polynomial weight matrix rather than
## recomputing with each iteration.
W <- W.glp(xdat=xdat,
degree=degree)
sum.lscv <- function(bw.gamma,...) {
## Note - we set the kernel for unordered and ordered regressors
## to the liracine kernel (0<=lambda<=1) and test for proper
## bounds in sum.lscv.
if(all(bw.gamma>=0)&&all(bw.gamma[!xdat.numeric]<=1)) {
lscv <- minimand.cv.ls(bws=bw.gamma,ydat=ydat,xdat=xdat,...)
} else {
lscv <- maxPenalty
}
if(debug) write(c(lscv,bw.gamma),file="optim.debug",ncolumns=(num.bw+1),append=TRUE)
return(lscv)
}
sum.aicc <- function(bw.gamma,...) {
## Note - we set the kernel for unordered and ordered regressors
## to the liracine kernel (0<=lambda<=1) and test for proper
## bounds in sum.lscv.
if(all(bw.gamma>=0)&&all(bw.gamma[!xdat.numeric]<=1)) {
aicc <- minimand.cv.aic(bws=bw.gamma,ydat=ydat,xdat=xdat,...)
} else {
aicc <- maxPenalty
}
if(debug) write(c(aicc,bw.gamma),file="optim.debug",ncolumns=(num.bw+1),append=TRUE)
return(aicc)
}
## Multistarting
fv.vec <- numeric(nmulti)
## Pass in the W matrix rather than recomputing it each time
for(iMulti in 1:nmulti) {
num.numeric <- ncol(as.data.frame(xdat[,xdat.numeric]))
## First initialize to values for factors (`liracine' kernel)
init.search.vals <- runif(ncol(xdat),0,1)
for(i in 1:ncol(xdat)) {
if(xdat.numeric[i]==TRUE) {
init.search.vals[i] <- runif(1,.5,1.5)*EssDee(xdat[,i])*nrow(xdat)^{-1/(4+num.numeric)}
}
}
## Initialize `best' values prior to search
if(iMulti == 1) {
fv <- maxPenalty
numimp <- 0
bw.opt <- init.search.vals
best <- 1
}
if(bwmethod == "cv.ls" ) {
suppressWarnings(optim.return <- optim(init.search.vals,
fn=sum.lscv,
method=optim.method,
control=optim.control,
degree=degree,
W=W,
...))
attempts <- 0
while((optim.return$convergence != 0) && (attempts <= optim.maxattempts)) {
init.search.vals <- runif(ncol(xdat),0,1)
if(xdat.numeric[i]==TRUE) {
init.search.vals[i] <- runif(1,.5,1.5)*EssDee(xdat[,i])*nrow(xdat)^{-1/(4+num.numeric)}
}
attempts <- attempts + 1
optim.control$abstol <- optim.control$abstol * 10.0
optim.control$reltol <- optim.control$reltol * 10.0
# optim.control <- lapply(optim.control, '*', 10.0) ## Perhaps do not want to keep increasing maxit??? Jan 31 2011
suppressWarnings(optim.return <- optim(init.search.vals,
fn=sum.lscv,
method=optim.method,
control=optim.control,
degree=degree,
W=W,
...))
}
} else {
suppressWarnings(optim.return <- optim(init.search.vals,
fn=sum.aicc,
method=optim.method,
control=optim.control,
degree=degree,
W=W,
...))
attempts <- 0
while((optim.return$convergence != 0) && (attempts <= optim.maxattempts)) {
init.search.vals <- runif(ncol(xdat),0,1)
if(xdat.numeric[i]==TRUE) {
init.search.vals[i] <- runif(1,.5,1.5)*EssDee(xdat[,i])*nrow(xdat)^{-1/(4+num.numeric)}
}
attempts <- attempts + 1
optim.control$abstol <- optim.control$abstol * 10.0
optim.control$reltol <- optim.control$reltol * 10.0
# optim.control <- lapply(optim.control, '*', 10.0) ## Perhaps do not want to keep increasing maxit??? Jan 31 2011
suppressWarnings(optim.return <- optim(init.search.vals,
fn = sum.aicc,
method=optim.method,
control = optim.control,
W=W,
...))
}
}
if(optim.return$convergence != 0) warning(" optim failed to converge")
fv.vec[iMulti] <- optim.return$value
if(optim.return$value < fv) {
bw.opt <- optim.return$par
fv <- optim.return$value
numimp <- numimp + 1
best <- iMulti
if(debug) {
if(iMulti==1) {
write(cbind(iMulti,t(bw.opt)),"bandwidth.out",ncolumns=(1+length(bw.opt)))
write(cbind(iMulti,fv),"optim.out",ncolumns=2)
} else {
write(cbind(iMulti,t(bw.opt)),"bandwidth.out",ncolumns=(1+length(bw.opt)),append=TRUE)
write(cbind(iMulti,fv),"optim.out",ncolumns=2,append=TRUE)
}
}
}
}
## Restore seed
if(exists.seed) assign(".Random.seed", save.seed, .GlobalEnv)
return(list(bw=bw.opt,
fv=fv,
numimp=numimp,
best=best,
fv.vec=fv.vec))
}
## Here is where the function `npregiv' really begins:
console <- newLineConsole()
## Basic error checking
if(!is.logical(penalize.iteration)) stop("penalize.iteration must be logical (TRUE/FALSE)")
if(!is.logical(smooth.residuals)) stop("smooth.residuals must be logical (TRUE/FALSE)")
if(!is.logical(stop.on.increase)) stop("stop.on.increase must be logical (TRUE/FALSE)")
if(!is.logical(iterate.Tikhonov)) stop("iterate.Tikhonov must be logical (TRUE/FALSE)")
if(iterate.Tikhonov.num < 1) stop("iterate.Tikhonov.num must be a positive integer")
if(missing(y)) stop("You must provide y")
if(missing(z)) stop("You must provide z")
if(missing(w)) stop("You must provide w")
if(NCOL(y) > 1) stop("y must be univariate")
if(NROW(y) != NROW(z) || NROW(y) != NROW(w)) stop("y, z, and w have differing numbers of rows")
if(iterate.max < 2) stop("iterate.max must be at least 2")
if(iterate.diff.tol < 0) stop("iterate.diff.tol must be non-negative")
if(constant <= 0 || constant >=1) stop("constant must lie in (0,1)")
if(p < 0) stop("p must be a non-negative integer")
if(!is.null(alpha) && alpha <= 0) stop("alpha must be positive")
if(!is.null(alpha.iter) && alpha.iter <= 0) stop("alpha.iter must be positive")
if(return.weights.phi.deriv.1 && !return.weights.phi) stop("must use return.weights.phi=TRUE when using return.weights.phi.deriv.1=TRUE")
if(return.weights.phi.deriv.2 && !return.weights.phi) stop("must use return.weights.phi=TRUE when using return.weights.phi.deriv.2=TRUE")
if(return.weights.phi.deriv.2 && p<2) stop("must use p >= 2 when using return.weights.phi.deriv.2=TRUE")
start.from <- match.arg(start.from)
method <- match.arg(method)
## Need to determine how many x, w, z are numeric
z <- data.frame(z)
w <- data.frame(w)
if(!is.null(x)) {
z <- data.frame(z,x)
## JP points out that, with exogenous predictors, they must be
## part of both z and the instruments. The line below was added
## 20/1/15 in Toulouse.
w <- data.frame(w,x)
## Obviously, if you have exogenous variables that are only in
## the instrument set, you can trivially accommodate this
## (append to w before invoking the function - added to man
## page)
if(!is.null(zeval)&&!is.null(xeval)) zeval <- data.frame(zeval,xeval)
}
z.numeric <- sapply(1:NCOL(z),function(i){is.numeric(z[,i])})
num.z.numeric <- NCOL(as.data.frame(z[,z.numeric]))
w.numeric <- sapply(1:NCOL(w),function(i){is.numeric(w[,i])})
num.w.numeric <- NCOL(as.data.frame(w[,w.numeric]))
if(method=="Tikhonov") {
## Now y=phi(z) + u, hence E(y|w)=E(phi(z)|w) so we need two
## bandwidths, one for y on w and one for phi(z) on w (in the
## first step we use E(y|w) as a proxy for phi(z) and use
## bandwidths for y on w).
## Convergence value returned for Landweber-Fridman but value
## required
## convergence <- NULL
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush("Computing bandwidths and E(y|w)...",console)
} else {
console <- printPush("Computing E(y|w) using supplied bandwidths...",console)
}
if(is.null(bw)) {
hyw <- glpcv(ydat=y,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.y.w <- hyw$bw
} else {
bw.E.y.w <- bw$bw.E.y.w
}
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing weight matrix and E(y|w)...", console)
E.y.w <- glpreg(tydat=y,
txdat=w,
bws=bw.E.y.w,
degree=rep(p, num.w.numeric),
...)$mean
KYW <- Kmat.lp(mydata.train=data.frame(w),
bws=bw.E.y.w,
p=rep(p, num.w.numeric),
...)
## We conduct local polynomial kernel regression of E(y|w) on z
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush("Computing bandwidths for E(E(y|w)|z)...", console)
} else {
console <- printPush("Computing E(E(y|w)|z) using supplied bandwidths...", console)
}
if(is.null(bw)) {
hywz <- glpcv(ydat=E.y.w,
xdat=z,
degree=rep(p, num.z.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.E.y.w.z <- hywz$bw
} else {
bw.E.E.y.w.z <- bw$bw.E.E.y.w.z
}
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing weight matrix and E(E(y|w)|z)...", console)
E.E.y.w.z <- glpreg(tydat=E.y.w,
txdat=z,
bws=bw.E.E.y.w.z,
degree=rep(p, num.z.numeric),
...)$mean
KYWZ <- Kmat.lp(mydata.train=data.frame(z),
bws=bw.E.E.y.w.z,
p=rep(p, num.z.numeric),
...)
## Next, we minimize the function ittik to obtain the optimal value
## of alpha (here we use the iterated Tikhonov function) to
## determine the optimal alpha for the non-iterated scheme. Note
## that the function `optimize' accepts bounds on the search (in
## this case alpha.min to alpha.max))
## E(r|z)=E(E(phi(z)|w)|z)
## \phi^\alpha = (\alpha I+CzCw)^{-1}Cr x r
if(!is.null(bw)) alpha <- bw$alpha
if(is.null(alpha)&&is.null(bw)) {
console <- printClear(console)
console <- printPop(console)
console <- printPush("Numerically solving for alpha...", console)
alpha <- optimize(ittik, c(alpha.min, alpha.max), tol = alpha.tol, CZ = KYW, CY = KYWZ, Cr.r = E.E.y.w.z, r = E.y.w)$minimum
}
## Finally, we conduct regularized Tikhonov regression using this
## optimal alpha.
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing initial phi(z) estimate...", console)
phi <- as.vector(tikh(alpha, CZ = KYW, CY = KYWZ, Cr.r = E.E.y.w.z))
phi.mat <- phi
phi.eval.mat <- NULL
if(!is.null(zeval)) {
## If there is evaluation data, KPHIWZ and KPHIWZ.eval will
## differ...
KPHIWZ.eval <- Kmat.lp(mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.E.y.w.z,
p=rep(p, num.z.numeric),
...)
phi.eval <- as.vector(tikh.eval(alpha, CZ = KYW, CY = KYWZ, CY.eval = KPHIWZ.eval, r = E.y.w))
phi.eval.mat <- cbind(phi.eval.mat,phi.eval)
}
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush("Computing bandwidths for E(phi(z)|w)...", console)
} else {
console <- printPush("Computing E(phi(z)|w) using supplied bandwidths...", console)
}
for(i in 1:iterate.Tikhonov.num) {
if(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num) {
console <- printClear(console)
console <- printPop(console)
console <- printPush(paste("Iteration ",i," of ",iterate.Tikhonov.num,sep=""), console)
}
if(is.null(bw)) {
hphiw <- glpcv(ydat=phi, ## 23/1/15 phi is sample
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.phi.w <- hphiw$bw
} else {
bw.E.phi.w <- bw$bw.E.phi.w
}
if(!(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num)) {
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing weight matrix for E(phi(z)|w)...", console)
}
E.phi.w <- glpreg(tydat=phi,
txdat=w,
bws=bw.E.phi.w,
degree=rep(p, num.w.numeric),
...)$mean
KPHIW <- Kmat.lp(mydata.train=data.frame(w),
bws=bw.E.phi.w,
p=rep(p, num.w.numeric),
...)
if(!(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num)) {
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush("Computing bandwidths for E(E(phi(z)|w)|z)...", console)
} else {
console <- printPush("Computing E(E(phi(z)|w)|z) using supplied bandwidths...", console)
}
}
if(is.null(bw)) {
hphiwz <- glpcv(ydat=E.phi.w,
xdat=z,
degree=rep(p, num.z.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.E.phi.w.z <- hphiwz$bw
} else {
bw.E.E.phi.w.z <- bw$bw.E.E.phi.w.z
}
E.E.phi.w.z <- glpreg(tydat=E.y.w,
txdat=z,
bws=bw.E.E.phi.w.z,
degree=rep(p, num.z.numeric),
...)$mean
if(!(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num)) {
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing weight matrix for E(E(phi(z)|w)|z)...", console)
}
KPHIW <- Kmat.lp(mydata.train=data.frame(w),
bws=bw.E.phi.w,
p=rep(p, num.w.numeric),
...)
KPHIWZ <- Kmat.lp(mydata.train=data.frame(z),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
## Next, we minimize the function ittik to obtain the optimal value
## of alpha (here we use the iterated Tikhonov approach) to
## determine the optimal alpha for the non-iterated scheme.
if(!is.null(bw)) alpha.iter <- bw$alpha.iter
if(!iterate.Tikhonov) {
alpha.iter <- alpha
} else {
if(is.null(alpha.iter)&&is.null(bw)) {
if(!(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num)) {
console <- printClear(console)
console <- printPop(console)
console <- printPush(paste("Iterating and recomputing the numerical solution for alpha (iteration ",i," of ",iterate.Tikhonov.num,")",sep=""), console)
}
alpha.iter <- optimize(ittik, c(alpha.min, alpha.max), tol = alpha.tol, CZ = KPHIW, CY = KPHIWZ, Cr.r = E.E.phi.w.z, r = E.y.w)$minimum
}
}
## Finally, we conduct regularized Tikhonov regression using this
## optimal alpha and the updated bandwidths.
if(!(iterate.Tikhonov.num > 1 && i < iterate.Tikhonov.num)) {
console <- printClear(console)
console <- printPop(console)
console <- printPush("Computing final phi(z) estimate...", console)
}
phi <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ, r = E.y.w))
phi.mat <- cbind(phi.mat,phi)
H <- NULL
if(return.weights.phi) {
H <- KPHIWZ%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
## First derivative
KPHIWZ.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
phi.deriv.1 <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ.deriv.1, r = E.y.w))
H.deriv.1 <- NULL
if(return.weights.phi.deriv.1) {
H.deriv.1 <- KPHIWZ.deriv.1%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
## Second derivative
phi.deriv.2 <- NULL
H.deriv.2 <- NULL
if(p >= 2) {
KPHIWZ.deriv.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
phi.deriv.2 <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ.deriv.2, r = E.y.w))
if(return.weights.phi.deriv.2) {
H.deriv.2 <- KPHIWZ.deriv.2%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
}
## If evaluation data are provided...
phi.eval <- NULL
phi.deriv.eval.1 <- NULL
phi.deriv.eval.2 <- NULL
H.eval <- NULL
H.deriv.eval.1 <- NULL
H.deriv.eval.2 <- NULL
if(!is.null(zeval)) {
## If there is evaluation data, KPHIWZ and KPHIWZ.eval will
## differ...
KPHIWZ.eval <- Kmat.lp(mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
phi.eval <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ.eval, r = E.y.w))
phi.eval.mat <- cbind(phi.eval.mat,phi.eval)
if(return.weights.phi) {
H.eval <- KPHIWZ.eval%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
KPHIWZ.eval.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
phi.deriv.eval.1 <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ.eval.deriv.1, r = E.y.w))
if(return.weights.phi.deriv.1) {
H.deriv.eval.1 <- KPHIWZ.eval.deriv.1%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
if(p >= 2) {
KPHIWZ.eval.deriv.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.E.phi.w.z,
p=rep(p, num.z.numeric),
...)
phi.deriv.eval.2 <- as.vector(tikh.eval(alpha.iter, CZ = KPHIW, CY = KPHIWZ, CY.eval = KPHIWZ.eval.deriv.2, r = E.y.w))
if(return.weights.phi.deriv.2) {
H.deriv.eval.2 <- KPHIWZ.eval.deriv.2%*%solve(alpha.iter*diag(nrow(KPHIWZ)) + KPHIW%*%KPHIWZ)%*%KYW
}
}
}
}
console <- printClear(console)
console <- printPop(console)
if((alpha.iter-alpha.min)/NZD(alpha.min) < 0.01) warning(paste("Tikhonov parameter alpha (",formatC(alpha.iter,digits=4,format="f"),") is close to the search minimum (",alpha.min,")",sep=""))
if((alpha.max-alpha.iter)/NZD(alpha.max) < 0.01) warning(paste("Tikhonov parameter alpha (",formatC(alpha.iter,digits=4,format="f"),") is close to the search maximum (",alpha.max,")",sep=""))
return(list(phi=phi,
phi.eval=phi.eval,
phi.mat=phi.mat,
phi.eval.mat=phi.eval.mat,
phi.deriv.1=as.matrix(phi.deriv.1),
phi.deriv.eval.1=if(!is.null(phi.deriv.eval.1)){as.matrix(phi.deriv.eval.1)}else{NULL},
phi.deriv.2=if(!is.null(phi.deriv.2)){as.matrix(phi.deriv.2)}else{NULL},
phi.deriv.eval.2=if(!is.null(phi.deriv.eval.2)){as.matrix(phi.deriv.eval.2)}else{NULL},
phi.weights=H,
phi.deriv.1.weights=H.deriv.1,
phi.deriv.2.weights=H.deriv.2,
phi.eval.weights=H.eval,
phi.deriv.eval.1.weights=H.deriv.eval.1,
phi.deriv.eval.2.weights=H.deriv.eval.2,
alpha=alpha,
alpha.iter=alpha.iter,
bw.E.y.w=bw.E.y.w,
bw.E.E.y.w.z=bw.E.E.y.w.z,
bw.E.phi.w=bw.E.phi.w,
bw.E.E.phi.w.z=bw.E.E.phi.w.z))
} else {
## Landweber-Fridman
## For the stopping rule
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E(y|w) for stopping rule...",sep=""),console)
} else {
console <- printPush(paste("Computing E(y|w) for stopping rule using supplied bandwidths...",sep=""),console)
}
norm.stop <- numeric()
if(is.null(bw)) {
h <- glpcv(ydat=y,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.y.w <- h$bw
} else {
bw.E.y.w <- bw$bw.E.y.w
}
E.y.w <- glpreg(tydat=y,
txdat=w,
bws=bw.E.y.w,
degree=rep(p, num.w.numeric),
...)$mean
if(return.weights.phi) {
if(p<0) stop("glp return weights not supported")
if(NCOL(z) > 1) stop("dimension of z must be one for currently supported return weights")
T.mat.r <- Kmat.lp(mydata.train=data.frame(w),
bws=bw.E.y.w,
p=rep(p, num.w.numeric),
...)
}
## We begin the iteration computing phi.0 and phi.1 directly, then
## iterate.
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E(y|z) for iteration 0...",sep=""),console)
} else {
console <- printPush(paste("Computing E(y|z) for iteration 0 using supplied bandwidths...",sep=""),console)
}
if(is.null(starting.values)) {
if(is.null(bw)) {
h <- glpcv(ydat=if(start.from=="Eyz") y else E.y.w,
xdat=z,
degree=rep(p, num.z.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.E.y.z <- h$bw
} else {
bw.E.y.z <- bw$bw.E.y.z
}
g <- glpreg(tydat=if(start.from=="Eyz") y else E.y.w,
txdat=z,
bws=bw.E.y.z,
degree=rep(p, num.z.numeric),
...)
phi.0 <- g$mean
phi.0.deriv.1 <- g$grad
if(p >= 2) {
phi.0.deriv.2 <- glpreg(tydat=if(start.from=="Eyz") y else E.y.w,
txdat=z,
bws=bw.E.y.z,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
}
if(!is.null(zeval)) {
g <- glpreg(tydat=if(start.from=="Eyz") y else E.y.w,
txdat=z,
exdat=zeval,
bws=bw.E.y.z,
degree=rep(p, num.z.numeric),
...)
phi.eval.0 <- g$mean
phi.eval.0.deriv.1 <- g$grad
if(p >= 2) {
phi.eval.0.deriv.2 <- glpreg(tydat=if(start.from=="Eyz") y else E.y.w,
txdat=z,
exdat=zeval,
bws=bw.E.y.z,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
}
} else {
phi.eval.0 <- NULL
phi.eval.0.deriv.1 <- NULL
phi.eval.0.deriv.2 <- NULL
}
if(return.weights.phi) {
H <- Kmat.lp(mydata.train=data.frame(z),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) H.eval <- Kmat.lp(mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
if(p==0 || p==1) {
if(return.weights.phi.deriv.1) {
H.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) H.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
}
} else {
if(return.weights.phi.deriv.1) {
H.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
H.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
}
}
if(return.weights.phi.deriv.2) {
H.deriv.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
H.deriv.eval.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.E.y.z,
p=rep(p, num.z.numeric),
...)
}
}
}
}
} else {
## Starting values input by user
phi.0 <- starting.values
if(return.weights.phi) H <- NULL
bw.E.y.z <- NULL
}
starting.values.phi <- phi.0
console <- printClear(console)
console <- printPop(console)
if(smooth.residuals) {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[y-phi(z)|w] for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing E[y-phi(z)|w] for iteration 1 using supplied bandwidths...",sep=""),console)
}
} else {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[phi(z)|w] for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing E[phi(z)|w] for iteration 1 using supplied bandwidths...",sep=""),console)
}
}
if(smooth.residuals) {
resid <- y - phi.0
if(is.null(bw)) {
h <- glpcv(ydat=resid,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.resid.w <- h$bw
} else {
bw.resid.w <- bw$bw.resid.w[1,]
}
resid.fitted <- glpreg(tydat=resid,
txdat=w,
bws=bw.resid.w,
degree=rep(p, num.w.numeric),
...)$mean
} else {
if(is.null(bw)) {
h <- glpcv(ydat=phi.0,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.resid.w <- h$bw
} else {
bw.resid.w <- bw$bw.resid.w[1,]
}
resid.fitted <- E.y.w - glpreg(tydat=phi.0,
txdat=w,
bws=bw.resid.w,
degree=rep(p, num.w.numeric),
...)$mean
}
if(return.weights.phi) {
T.mat <- Kmat.lp(mydata.train=data.frame(w),
bws=bw.resid.w,
p=rep(p, num.z.numeric),
...)
}
norm.stop[1] <- sum(resid.fitted^2)/NZD(sum(E.y.w^2))
console <- printClear(console)
console <- printPop(console)
if(smooth.residuals) {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[E(y-phi(z)|w)|z] for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing E[E(y-phi(z)|w)|z] for iteration 1 using supplied bandwidths...",sep=""),console)
}
} else {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[E(y|w) - E(phi(z)|w)|z] for iteration 1...",sep=""),console)
} else {
console <- printPush(paste("Computing E[E(y|w) - E(phi(z)|w)|z] for iteration 1 using supplied bandwidths...",sep=""),console)
}
}
if(is.null(bw)) {
h <- glpcv(ydat=resid.fitted,
xdat=z,
degree=rep(p, num.z.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
bw.resid.fitted.w.z <- h$bw
} else {
bw.resid.fitted.w.z <- bw$bw.resid.fitted.w.z[1,]
}
g <- glpreg(tydat=resid.fitted,
txdat=z,
bws=bw.resid.fitted.w.z,
degree=rep(p, num.z.numeric),
...)
phi.deriv.1.list <- list()
phi.deriv.eval.1.list <- list()
phi <- phi.0 + constant*g$mean
phi.deriv.1 <- phi.0.deriv.1 + constant*g$grad
phi.mat <- phi
phi.deriv.1.list[[1]] <- phi.deriv.1
phi.deriv.2.list <- list()
phi.deriv.eval.2.list <- list()
phi.deriv.2 <- NULL
phi.deriv.eval.2 <- NULL
if(p >= 2) {
phi.deriv.2 <- phi.0.deriv.2 + constant*glpreg(tydat=resid.fitted,
txdat=z,
bws=bw.resid.fitted.w.z,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
phi.deriv.2.list[[1]] <- phi.deriv.2
}
if(!is.null(zeval)) {
g <- glpreg(tydat=resid.fitted,
txdat=z,
exdat=zeval,
bws=bw.resid.fitted.w.z,
degree=rep(p, num.z.numeric),
...)
phi.eval <- phi.eval.0 + constant*g$mean
phi.eval.mat <- phi.eval
phi.deriv.eval.1 <- phi.eval.0.deriv.1 + constant*g$grad
phi.deriv.eval.1.list[[1]] <- phi.deriv.eval.1
if(p >= 2) {
phi.deriv.eval.2 <- phi.eval.0.deriv.2 + constant*glpreg(tydat=resid.fitted,
txdat=z,
bws=bw.resid.fitted.w.z,
exdat=zeval,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
phi.deriv.eval.2.list[[1]] <- phi.deriv.eval.2
}
} else {
phi.eval <- NULL
phi.eval.mat <- NULL
phi.deriv.eval.1 <- NULL
phi.deriv.eval.1.list <- NULL
}
## Need these list even when no weights for return
phi.weights.list <- list()
phi.deriv.1.weights.list <- list()
phi.deriv.2.weights.list <- list()
phi.eval.weights.list <- list()
phi.deriv.eval.1.weights.list <- list()
phi.deriv.eval.2.weights.list <- list()
if(return.weights.phi) {
T.mat.adjoint <- Kmat.lp(mydata.train=data.frame(z),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) T.mat.adjoint.eval <- Kmat.lp(mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
if(p==0 || p==1) {
if(return.weights.phi.deriv.1) {
T.mat.adjoint.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) T.mat.adjoint.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
}
} else {
if(return.weights.phi.deriv.1) {
T.mat.adjoint.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
T.mat.adjoint.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
}
}
if(return.weights.phi.deriv.2) {
T.mat.adjoint.deriv.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
T.mat.adjoint.deriv.eval.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=bw.resid.fitted.w.z,
p=rep(p, num.z.numeric),
...)
}
}
}
if(smooth.residuals) {
if(!is.null(zeval)) {
H.eval <- H.eval + constant*T.mat.adjoint.eval%*%(T.mat-T.mat%*%H)
if(return.weights.phi.deriv.1) H.deriv.eval.1 <- H.deriv.eval.1 + constant*T.mat.adjoint.deriv.eval.1%*%(T.mat-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.eval.2 <- H.deriv.eval.2 + constant*T.mat.adjoint.deriv.eval.2%*%(T.mat-T.mat%*%H)
}
if(return.weights.phi.deriv.1) H.deriv.1 <- H.deriv.1 + constant*T.mat.adjoint.deriv.1%*%(T.mat-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.2 <- H.deriv.2 + constant*T.mat.adjoint.deriv.2%*%(T.mat-T.mat%*%H)
H <- H + constant*T.mat.adjoint%*%(T.mat-T.mat%*%H)
} else {
if(!is.null(zeval)) {
H.eval <- H.eval + constant*T.mat.adjoint.eval%*%(T.mat.r-T.mat%*%H)
if(return.weights.phi.deriv.1) H.deriv.eval.1 <- H.deriv.eval.1 + constant*T.mat.adjoint.deriv.eval.1%*%(T.mat.r-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.eval.2 <- H.deriv.eval.2 + constant*T.mat.adjoint.deriv.eval.2%*%(T.mat.r-T.mat%*%H)
}
if(return.weights.phi.deriv.1) H.deriv.1 <- H.deriv.1 + constant*T.mat.adjoint.deriv.1%*%(T.mat.r-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.2 <- H.deriv.2 + constant*T.mat.adjoint.deriv.2%*%(T.mat.r-T.mat%*%H)
H <- H + constant*T.mat.adjoint%*%(T.mat.r-T.mat%*%H)
}
phi.weights.list[[1]] <- H
if(return.weights.phi.deriv.1) phi.deriv.1.weights.list[[1]] <- H.deriv.1
if(p>1 && return.weights.phi.deriv.2) phi.deriv.2.weights.list[[1]] <- H.deriv.2
if(!is.null(zeval)) {
phi.eval.weights.list[[1]] <- H.eval
if(return.weights.phi.deriv.1) phi.deriv.eval.1.weights.list[[1]] <- H.deriv.eval.1
if(p>1 && return.weights.phi.deriv.2) phi.deriv.eval.2.weights.list[[1]] <- H.deriv.eval.2
}
}
if(!is.null(bw)) iterate.max <- bw$norm.index
## In what follows we rbind() bandwidths to return and are careful
## about which ones are used when fed in, so we use h$bw below
## (but above all are named).
for(j in 2:iterate.max) {
console <- printClear(console)
console <- printPop(console)
if(smooth.residuals) {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[y-phi(z)|w] for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing E[y-phi(z)|w] for iteration ", j," using supplied bandwidths...",sep=""),console)
}
} else {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[phi(z)|w] for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing E[phi(z)|w] for iteration ", j," using supplied bandwidths...",sep=""),console)
}
}
if(smooth.residuals) {
resid <- y - phi
if(is.null(bw)) {
h <- glpcv(ydat=resid,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
} else {
h <- NULL
h$bw <- bw$bw.resid.w[j,]
}
resid.fitted <- glpreg(tydat=resid,
txdat=w,
bws=h$bw,
degree=rep(p, num.w.numeric),
...)$mean
} else {
if(is.null(bw)) {
h <- glpcv(ydat=phi,
xdat=w,
degree=rep(p, num.w.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
} else {
h <- NULL
h$bw <- bw$bw.resid.w[j,]
}
resid.fitted <- E.y.w - glpreg(tydat=phi,
txdat=w,
bws=h$bw,
degree=rep(p, num.w.numeric),
...)$mean
}
bw.resid.w <- rbind(bw.resid.w,h$bw)
if(return.weights.phi) {
T.mat <- Kmat.lp(mydata.train=data.frame(w),
bws=h$bw,
p=rep(p, num.w.numeric),
...)
}
norm.stop[j] <- ifelse(penalize.iteration,j*sum(resid.fitted^2)/NZD(sum(E.y.w^2)),sum(resid.fitted^2)/NZD(sum(E.y.w^2)))
console <- printClear(console)
console <- printPop(console)
if(smooth.residuals) {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[E(y-phi(z)|w)|z] for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing E[E(y-phi(z)|w)|z] for iteration ", j," using supplied bandwidths...",sep=""),console)
}
} else {
if(is.null(bw)) {
console <- printPush(paste("Computing bandwidths and E[E(y|z)-E(phi(z)|w)|z] for iteration ", j,"...",sep=""),console)
} else {
console <- printPush(paste("Computing E[E(y|z)-E(phi(z)|w)|z] for iteration ", j," using supplied bandwidths...",sep=""),console)
}
}
if(is.null(bw)) {
h <- glpcv(ydat=resid.fitted,
xdat=z,
degree=rep(p, num.z.numeric),
nmulti=nmulti,
random.seed=random.seed,
optim.maxattempts=optim.maxattempts,
optim.method=optim.method,
optim.reltol=optim.reltol,
optim.abstol=optim.abstol,
optim.maxit=optim.maxit,
...)
} else {
h$bw <- bw$bw.resid.fitted.w.z[j,]
}
g <- glpreg(tydat=resid.fitted,
txdat=z,
bws=h$bw,
degree=rep(p, num.z.numeric),
...)
phi <- phi + constant*g$mean
phi.mat <- cbind(phi.mat,phi)
phi.deriv.1 <- phi.deriv.1 + constant*g$grad
phi.deriv.1.list[[j]] <- phi.deriv.1
if(p >= 2) {
phi.deriv.2 <- phi.deriv.2 + constant*glpreg(tydat=resid.fitted,
txdat=z,
bws=h$bw,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
phi.deriv.2.list[[j]] <- phi.deriv.2
}
if(!is.null(zeval)) {
g <- glpreg(tydat=resid.fitted,
txdat=z,
exdat=zeval,
bws=h$bw,
degree=rep(p, num.z.numeric),
...)
phi.eval <- phi.eval + constant*g$mean
phi.eval.mat <- cbind(phi.eval.mat,phi.eval)
phi.deriv.eval.1 <- phi.deriv.eval.1 + constant*g$grad
phi.deriv.eval.1.list[[j]] <- phi.deriv.eval.1
if(p >= 2) {
phi.deriv.eval.2 <- phi.deriv.eval.2 + constant*glpreg(tydat=resid.fitted,
txdat=z,
exdat=zeval,
bws=h$bw,
degree=rep(p, num.z.numeric),
deriv=2,
...)$grad
phi.deriv.eval.2.list[[j]] <- phi.deriv.eval.2
}
}
bw.resid.fitted.w.z <- rbind(bw.resid.fitted.w.z,h$bw)
if(return.weights.phi) {
T.mat.adjoint <- Kmat.lp(mydata.train=data.frame(z),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) T.mat.adjoint.eval <- Kmat.lp(mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
if(p==0 || p==1) {
if(return.weights.phi.deriv.1) {
T.mat.adjoint.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) T.mat.adjoint.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
}
} else {
if(return.weights.phi.deriv.1) {
T.mat.adjoint.deriv.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
T.mat.adjoint.deriv.eval.1 <- Kmat.lp(deriv=1,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
}
}
if(return.weights.phi.deriv.2) {
T.mat.adjoint.deriv.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
if(!is.null(zeval)) {
T.mat.adjoint.deriv.eval.2 <- Kmat.lp(deriv=2,
mydata.train=data.frame(z),
mydata.eval=data.frame(z=zeval),
bws=h$bw,
p=rep(p, num.z.numeric),
...)
}
}
}
if(smooth.residuals) {
if(!is.null(zeval)) {
H.eval <- H.eval + constant*T.mat.adjoint.eval%*%(T.mat-T.mat%*%H)
if(return.weights.phi.deriv.1) H.deriv.eval.1 <- H.deriv.eval.1 + constant*T.mat.adjoint.deriv.eval.1%*%(T.mat-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.eval.2 <- H.deriv.eval.2 + constant*T.mat.adjoint.deriv.eval.2%*%(T.mat-T.mat%*%H)
}
if(return.weights.phi.deriv.1) H.deriv.1 <- H.deriv.1 + constant*T.mat.adjoint.deriv.1%*%(T.mat-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.2 <- H.deriv.2 + constant*T.mat.adjoint.deriv.2%*%(T.mat-T.mat%*%H)
H <- H + constant*T.mat.adjoint%*%(T.mat-T.mat%*%H)
} else {
if(!is.null(zeval)) {
H.eval <- H.eval + constant*T.mat.adjoint.eval%*%(T.mat.r-T.mat%*%H)
if(return.weights.phi.deriv.1) H.deriv.eval.1 <- H.deriv.eval.1 + constant*T.mat.adjoint.deriv.eval.1%*%(T.mat.r-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.eval.2 <- H.deriv.eval.2 + constant*T.mat.adjoint.deriv.eval.2%*%(T.mat.r-T.mat%*%H)
}
if(return.weights.phi.deriv.1) H.deriv.1 <- H.deriv.1 + constant*T.mat.adjoint.deriv.1%*%(T.mat.r-T.mat%*%H)
if(p>1 && return.weights.phi.deriv.2) H.deriv.2 <- H.deriv.2 + constant*T.mat.adjoint.deriv.2%*%(T.mat.r-T.mat%*%H)
H <- H + constant*T.mat.adjoint%*%(T.mat.r-T.mat%*%H)
}
phi.weights.list[[j]] <- H
if(return.weights.phi.deriv.1) phi.deriv.1.weights.list[[j]] <- H.deriv.1
if(p>1 && return.weights.phi.deriv.2) phi.deriv.2.weights.list[[j]] <- H.deriv.2
if(!is.null(zeval)) {
phi.eval.weights.list[[j]] <- H.eval
if(return.weights.phi.deriv.1) phi.deriv.eval.1.weights.list[[j]] <- H.deriv.eval.1
if(p>1 && return.weights.phi.deriv.2) phi.deriv.eval.2.weights.list[[j]] <- H.deriv.eval.2
}
}
console <- printClear(console)
console <- printPop(console)
if(is.null(bw)) console <- printPush(paste("Computing stopping rule for iteration ", j,"...",sep=""),console)
## The number of iterations in LF is asymptotically equivalent
## to 1/alpha (where alpha is the regularization parameter in
## Tikhonov). Plus the criterion function we use is increasing
## for very small number of iterations. So we need a threshold
## after which we can pretty much confidently say that the
## stopping criterion is decreasing. In Darolles et al. (2011)
## \alpha ~ O(N^(-1/(min(beta,2)+2)), where beta is the so
## called qualification of your regularization method. Take the
## worst case in which beta = 0 and then the number of
## iterations is ~ N^0.5.
if(is.null(bw)) {
if(j > round(sqrt(nrow(z))) && !is.monotone.increasing(norm.stop)) {
## If stopping rule criterion increases or we are below stopping
## tolerance then break
if(stop.on.increase && norm.stop[j] > norm.stop[j-1]) {
convergence <- "STOP_ON_INCREASE"
break()
}
if(abs(norm.stop[j-1]-norm.stop[j]) < iterate.diff.tol) {
convergence <- "ITERATE_DIFF_TOL"
break()
}
}
convergence <- "ITERATE_MAX"
}
}
## Extract minimum, and check for monotone increasing function and
## issue warning in that case. Otherwise allow for an increasing
## then decreasing (and potentially increasing thereafter) portion
## of the stopping function, ignore the initial increasing portion,
## and take the min from where the initial inflection point occurs
## to the length of norm.stop
phi.weights <- NULL
phi.deriv.1.weights <- NULL
phi.deriv.2.weights <- NULL
phi.eval.weights <- NULL
phi.deriv.eval.1.weights <- NULL
phi.deriv.eval.2.weights <- NULL
if(is.null(bw)) {
norm.value <- norm.stop/(1:length(norm.stop))
if(which.min(norm.stop) == 1 && is.monotone.increasing(norm.stop)) {
warning("Stopping rule increases monotonically (consult model$norm.stop):\nThis could be the result of an inspired initial value (unlikely)\nNote: we suggest manually choosing phi.0 and restarting (e.g. instead set `starting.values' to E[E(Y|w)|z])")
convergence <- "FAILURE_MONOTONE_INCREASING"
phi <- starting.values.phi
j <- 1
while(norm.value[j+1] > norm.value[j]) j <- j + 1
j <- j-1 + which.min(norm.value[j:length(norm.value)])
phi <- phi.mat[,j]
if(p>0) phi.deriv.1 <- phi.deriv.1.list[[j]]
if(p>=2) phi.deriv.2 <- phi.deriv.2.list[[j]]
if(return.weights.phi) phi.weights <- phi.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.1.weights <- phi.deriv.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.2.weights <- phi.deriv.2.weights.list[[j]]
if(!is.null(zeval)) {
phi.eval <- phi.eval.mat[,j]
if(p>0) phi.deriv.eval.1 <- phi.deriv.eval.1.list[[j]]
if(p>=2) phi.deriv.eval.2 <- phi.deriv.eval.2.list[[j]]
if(return.weights.phi) phi.eval.weights <- phi.eval.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.eval.1.weights <- phi.deriv.eval.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.eval.2.weights <- phi.deriv.eval.2.weights.list[[j]]
}
} else {
## Ignore the initial increasing portion, take the min to the
## right of where the initial inflection point occurs
j <- 1
while(norm.stop[j+1] > norm.stop[j]) j <- j + 1
j <- j-1 + which.min(norm.stop[j:length(norm.stop)])
phi <- phi.mat[,j]
if(p>0) phi.deriv.1 <- phi.deriv.1.list[[j]]
if(p>=2) phi.deriv.2 <- phi.deriv.2.list[[j]]
if(return.weights.phi) phi.weights <- phi.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.1.weights <- phi.deriv.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.2.weights <- phi.deriv.2.weights.list[[j]]
if(!is.null(zeval)) {
phi.eval <- phi.eval.mat[,j]
if(p>0) phi.deriv.eval.1 <- phi.deriv.eval.1.list[[j]]
if(p>=2) phi.deriv.eval.2 <- phi.deriv.eval.2.list[[j]]
if(return.weights.phi) phi.eval.weights <- phi.eval.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.eval.1.weights <- phi.deriv.eval.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.eval.2.weights <- phi.deriv.eval.2.weights.list[[j]]
}
}
if(j == iterate.max) warning("iterate.max reached: increase iterate.max or inspect norm.stop vector")
} else {
## bw passed in, set j to norm.index, push out weights etc.
j <- bw$norm.index
phi <- phi.mat[,j]
if(p>0) phi.deriv.1 <- phi.deriv.1.list[[j]]
if(p>=2) phi.deriv.2 <- phi.deriv.2.list[[j]]
if(return.weights.phi) phi.weights <- phi.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.1.weights <- phi.deriv.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.2.weights <- phi.deriv.2.weights.list[[j]]
if(!is.null(zeval)) {
phi.eval <- phi.eval.mat[,j]
if(p>0) phi.deriv.eval.1 <- phi.deriv.eval.1.list[[j]]
if(p>=2) phi.deriv.eval.2 <- phi.deriv.eval.2.list[[j]]
if(return.weights.phi) phi.eval.weights <- phi.eval.weights.list[[j]]
if(return.weights.phi.deriv.1) phi.deriv.eval.1.weights <- phi.deriv.eval.1.weights.list[[j]]
if(p>=2 && return.weights.phi.deriv.2) phi.deriv.eval.2.weights <- phi.deriv.eval.2.weights.list[[j]]
}
norm.value <- NULL
norm.stop <- NULL
convergence <- NULL
}
console <- printClear(console)
console <- printPop(console)
return(list(phi=phi,
phi.mat=phi.mat,
phi.deriv.1=as.matrix(phi.deriv.1),
phi.deriv.2=if(!is.null(phi.deriv.2)){as.matrix(phi.deriv.2)}else{NULL},
phi.weights=phi.weights,
phi.deriv.1.weights=phi.deriv.1.weights,
phi.deriv.2.weights=phi.deriv.2.weights,
phi.eval=phi.eval,
phi.eval.mat=phi.eval.mat,
phi.deriv.eval.1=if(!is.null(phi.deriv.eval.1)){as.matrix(phi.deriv.eval.1)}else{NULL},
phi.deriv.eval.2=if(!is.null(phi.deriv.eval.2)){as.matrix(phi.deriv.eval.2)}else{NULL},
phi.eval.weights=phi.eval.weights,
phi.deriv.eval.1.weights=phi.deriv.eval.1.weights,
phi.deriv.eval.2.weights=phi.deriv.eval.2.weights,
norm.index=j,
norm.stop=norm.stop,
norm.value=norm.value,
convergence=convergence,
starting.values.phi=starting.values.phi,
return.weights.phi=return.weights.phi,
bw.E.y.w=bw.E.y.w,
bw.E.y.z=bw.E.y.z,
bw.resid.w=as.matrix(bw.resid.w),
bw.resid.fitted.w.z=as.matrix(bw.resid.fitted.w.z)))
}
}
|
015c57255ef967741470bf659b79913f769926cd
|
071fe414fd73885db2ddd77f51ad47ce9acf751f
|
/src/R/nvidia.r
|
da3d418ad77b3d186bfa5ab8b17496618af0885f
|
[] |
no_license
|
perigee/lestrefles
|
9336180e9f5a1cf3ef8f990d2cc8b6f5e19f5b16
|
509b6ba9efd65f690cc3c2f07b546f22a61158a3
|
refs/heads/master
| 2020-04-28T09:30:08.025767
| 2013-06-03T23:26:18
| 2013-06-03T23:26:18
| 2,107,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,870
|
r
|
nvidia.r
|
# Get the data from internet
#require('RMySQL')
require('quantmod')
# set timezone
#Get db object
#con <- dbConnect(MySQL(), user="stockuser", password="stockpwd", dbname="stockdb", host="localhost")
#sql <- "select symbol from nasdaq where industry='Semiconductors' "
#rs <- dbSendQuery(con, sql)
#tickers <- fetch(rs)
#Close database
#huh <- dbHasCompleted(rs)
#dbClearResult(rs)
#dbDisconnect(con)
#for(i in 1:length(tickers[,1])) {
# ticker=tickers[i,1]
plotData <- function(name){
Sys.setenv(TZ = "GMT")
ticker <- name
#Get date
beginDate <- '1990-01-01'
data <- getSymbols(ticker, from=beginDate, to=Sys.Date(), auto.assign=FALSE)
# add last day close price
data.quote = getQuote(ticker, what = yahooQuote.EOD)
xts.quote <- xts(data.quote[, -1], as.Date(data.quote[, 1])) # use Date for indexClass
xts.quote$Adjusted <- xts.quote[, 'Close'] # add an Adjusted column
data <- rbind(data, xts.quote)
#tail(data)
data <- adjustOHLC(data, use.Adjusted=TRUE) # adjust the data
#dataMACD <- MACD(data)
#dataHist <- dataMACD$macd - dataMACD$signal # get macd histogram
#dataMaVo <- cbind(Cl(data), dataHist, Vo(data)) # combine the macd hist with volume and close price
#dataMaVo <- cbind(dataHist, Vo(data)) # combine the macd hist with volume and close price
#filename <- c("cache/",tickers[1,1],".rdata")
#save(data,file=filename)
#chartSeries(dataMaVo, name=ticker, subset="last 7 months", TA="addMACD()")
#chartSeries(data,name=ticker, subset='last 6 months', TA="addMACD();addBBands()")
#chartSeries(data,name=ticker, subset='last 6 months', TA="addMACD()")
tail(data)
chartSeries(data,name=ticker, subset='last 10 months', TA='addBBands();addVo();addMACD();addVolatility();addSMA(n=7)')
tail(data)
#addTA(MACD(data))
#saveChart('pdf') # save the file in format of PDF
#} # for loop end
}
# 1 1 2 3 5 8 13 21 34 55 89 144
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.