blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24611169e637ebb0b0847600777ca80e8452b9ab
|
d2dc4a6e3b8ff93e43f267690dcaa425def5fb3f
|
/managers.R
|
382b769ca2736c727e33ccc5fc23647fe89fd059
|
[] |
no_license
|
priyankasahoolyit/DS_PRACTICALS_L00151175
|
f7111ec3fb97d42d1826ffbf6de3e39bd6648810
|
7230b7d81aaf6e933ff2bd62761d0afd15286133
|
refs/heads/main
| 2023-03-21T07:05:14.705596
| 2021-03-16T14:28:39
| 2021-03-16T14:28:39
| 346,652,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,096
|
r
|
managers.R
|
# Enter data into vectors before constructing the data frame
date_col <- c("10/15/18","10/11/18","10/21/18","10/28/18","05/01/18")
country_col <- c("US","US","IRL","IRL","IRL")
gender_col <- c("M","F","F","M","F")
age_col <- c(32,45,25,39,99)
q1_col <- c(5, 3, 3, 3, 2)
q2_col <- c(4, 5, 5, 3, 2)
q3_col <- c(5, 2, 5, 4, 1)
q4_col <- c(5, 5, 5, NA, 2) # NA is inserted in place of missing data for this
q5_col <- c(5, 5, 2, NA, 1)
# construct a data frame using the data from all the vectors
managers_data <- data.frame(date_col,
country_col,
gender_col,
age_col,
q1_col,
q2_col,
q3_col,
q4_col,
q5_col)
managers_data
columns_names <- c("Date",
"Country",
"Gender",
"Age",
"Q1",
"Q2",
"Q3",
"Q4",
"Q5")
# Add column names to the managers_data dataframes
colnames(managers_data) <- columns_names
managers_data
# Recode incorrect 'age' to NA
managers_data$Age[managers_data$Age == 99] <- NA
managers_data
# 2 options to create a new variable
# 1- create a new vectorand store the logical check in it
# 2 - create the new var when doing the logical check
managers_data$age_cat[managers_data$Age >= 45] <- "Elder"
managers_data$age_cat[managers_data$Age >= 26 & managers_data$Age <= 44 ] <- "Middle Aged"
managers_data$age_cat[managers_data$Age <= 25] <- "Young"
managers_data$age_cat[is.na(managers_data$Age)] <- "Elder"
managers_data
# Recode age_cat so that it is ordinal and factored
# with the order young , middle aged, elder
Age_cat <- factor(managers_data$age_cat, order = TRUE, levels = c("Young", "Middle Aged", "Elder" ))
Age_cat
#replace manager_data, age_cat variable with
# the factored variable
managers_data$age_cat <- Age_cat
managers_data
# Look at the structure of the dataframe
str(managers_data)
|
88ac57fae373a01490f8dbc87cb75acb0685164f
|
6bfb407d6dbc79e672a3886eab30a38012888d71
|
/man/compPosts.Rd
|
dd3b55cc974e5e7e55ac1b74ec4b56bdc88d9c5c
|
[] |
no_license
|
hferg/bayestraitr
|
ca438050b7565e0d6bf752ad8a9a152bd568a0b0
|
308253d82d02ec0c51414b4608c74fbe0acd528d
|
refs/heads/master
| 2021-10-22T14:55:55.160296
| 2019-03-11T15:15:45
| 2019-03-11T15:15:45
| 107,663,993
| 0
| 0
| null | 2019-03-11T15:16:23
| 2017-10-20T10:16:43
|
R
|
UTF-8
|
R
| false
| true
| 1,427
|
rd
|
compPosts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compPosts.R
\name{compPosts}
\alias{compPosts}
\title{compPosts}
\usage{
compPosts(logs, pars, thinning = 1, burnin = 0, alpha = 0.5,
fontsize = 3)
}
\arguments{
\item{logs}{The name of the trait data file on which BayesTraits was run,
or a vector of >1 names if comparing between >1 logs.}
\item{pars}{A vector containing the names of two parameters to be compared.
Must be a single parameter if comparing between two logs. To see which
parameters are present in a posterior use \link[bayestraitr]{getParams}.}
\item{thinning}{Thinning parameter for the posterior - defaults to 1
(all samples). 2 uses every second sample, 3 every third and so on.}
\item{burnin}{The number of generations to remove from the start of the
chain as burnin. Use if the chain has not reached convergence before sampling
began. Useful if the burnin parameter for the analysis itself was not long
enough.}
}
\description{
Compares histograms of one or more parameters from the same output file, or
one parameter from one or more output files.
Generates a plot showing the distributions of the same parameter from two
posterior samples from BayesTraits MCMC OR a plot showing the distribution
of two different parameters from a single BayesTraits posterior.
}
\examples{
plotPosterior(cool-data.txt, c("Lh", "Alpha 1"))
plotPosterior(cool-data.txt, params[c(1:2)])
}
|
fdfe38cba3d503a02279bfb0bbb52b09f96f882e
|
f70a41e996e76adbe3bb29f40c47fe7046e9b7d3
|
/Interns/ClaireMarie/model_improvement.R
|
73b23b9fd294db680d5c00999f5653157ae491f8
|
[] |
no_license
|
DrJonYearsley/Phenograss
|
5c541e25fafff1ee6d1f746f5a4e40129b1abd2a
|
d3cce1fa799939f6f84201561a7b08907c56ea7f
|
refs/heads/master
| 2022-08-12T10:50:32.685141
| 2022-07-15T14:37:31
| 2022-07-15T14:37:31
| 221,275,083
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,931
|
r
|
model_improvement.R
|
#
# Creation of the dataset of all covariates for the improvement model
#
# Claire-Marie Alla
# 15/07/2021 - 30/07/2021
# ++++++++++++++++++++++++++++++++++++++++++++++
rm(list=ls())
library(sf)
library(stars)
library(segmented)
library(ggplot2)
library(nlme)
library(tidyr)
library(viridisLite)
library(gstat)
library(dplyr)
library(rgdal)
library(emmeans)
library(raster)
elevation_dir = '~/Stage/Data_created/elevation_SRTM3_square'
aspect_dir = '~/Stage/Data_created/elevation_SRTM3_square/exposition_squares'
modisPath = '~/Stage/Data/MODIS'
soilPath = '~/Stage/Data/soil_data/Soils_IE_WetDry'
climatePath = '~/Stage/Data/Climate'
dataDir = '~/Stage/Data/MODIS/Phenophase_estimates'
outputDir = '~/Stage/Data_created'
input_file_preffix = 'phenology'
# Import data --------
#squaresList = c(1:9, 13:21)
# 2 first months are missing in temperature data
#years = c(2013:2017)
# Test 1
# squaresList = c(20)
# years = c(2013:2017)
# Test 2
squaresList = c(1:9, 13:21)
years = c(2013)
# Filename segmented data
for (i in 1:length(squaresList)) {
for (y in 1:length(years)) {
filename = paste0(input_file_preffix,'_square_',squaresList[i],'_',years[y],'.RData')
load(file.path(dataDir,filename))
output_smoothed$square = squaresList[i]
if (y==1 & i==1) {
phenology = output_smoothed
d_pheno = d_final
} else {
phenology = rbind(phenology,
output_smoothed)
d_pheno = rbind(d_pheno,
d_final)
}
}
}
phenology = subset(phenology, phenology$phase == 1)
# Create a wide version of phenology
phenology_wide = pivot_wider(data=subset(phenology, warning==FALSE),
id_cols = c('pixelID','year', 'x_MODIS','y_MODIS','x_ITM','y_ITM','square'),
names_from = 'phase',
names_prefix = 'phase',
values_from = c(t,slope),
values_fn = mean)
# Centre the x and y coordinates
phenology_wide$x_ITM_centre = phenology_wide$x_ITM - mean(phenology_wide$x_ITM, na.rm=TRUE)
phenology_wide$y_ITM_centre = phenology_wide$y_ITM - mean(phenology_wide$y_ITM, na.rm=TRUE)
rm(list='phenology')
# Add environmental covariates
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Read in MODIS grid
modis = read_stars(file.path(modisPath, 'modis_grid_ireland.tif'))
crs_modis = st_crs(modis)
IR = st_read('~/Stage/Data/Quadrats/country.shp')
squares = st_read('~/Stage/Data/Quadrats/agriclimate_quadrats_Ireland.shp')
squares_modis = st_transform(squares, crs_modis) # Make sure CRS is identical (required for gstat interpolation)
crs_squares = st_crs(squares)
# 1. Retrieve slope data
# Save modis square - not find better to use a raster function (not available in stars)
fname = paste0("all_squares_elevation.tif")
# Use the field function
crop_slope = raster(file.path(outputDir,fname))
crop_slope = terrain(crop_slope, opt='slope', unit='degrees')
# phenology_wide$x_MODIS = sapply(phenology_wide$geometry,"[[",1)
# phenology_wide$y_MODIS = sapply(phenology_wide$geometry,"[[",2)
phenology_wide$slope = as.numeric(extract(crop_slope,
SpatialPoints(data.frame(phenology_wide$x_MODIS,
phenology_wide$y_MODIS))))
# Create a geometry column for phenology wide
phenology_wide = st_as_sf(phenology_wide, coords = c("x_MODIS", "y_MODIS"),
crs = crs_modis)
d_pheno = st_as_sf(d_pheno, coords = c("x_MODIS", "y_MODIS"),
crs = crs_modis)
# Convert dates to a date object for temperature and precipitation
d_pheno$date = as.Date(as.character(d_pheno$date), format="%Y-%m-%d")
# 2. Retrieve elevation data
# ++++++++++++++++++++++++++++++++++++++++++++++++++++
elevation = read_stars(file.path(outputDir, 'all_squares_elevation.tif'))
st_crs(elevation) = crs_modis
phenology_wide$elevation = as.numeric(st_extract(elevation, st_sfc(phenology_wide$geometry, crs=crs_modis))[[1]])
# 3. Retrieve aspect slope data
aspect_slope = read_stars(file.path(outputDir, 'all_squares_aspect_slope.tif'))
st_crs(aspect_slope) = crs_modis
phenology_wide$aspect_slope = as.numeric(st_extract(aspect_slope, st_sfc(phenology_wide$geometry, crs=crs_modis))[[1]])
phenology_wide$class_aspect[phenology_wide$aspect_slope <= 20] = 'N'
phenology_wide$class_aspect[phenology_wide$aspect_slope >= 340] = 'N'
phenology_wide$class_aspect[phenology_wide$aspect_slope > 20 & phenology_wide$aspect_slope < 70] = 'NE'
phenology_wide$class_aspect[phenology_wide$aspect_slope >= 70 & phenology_wide$aspect_slope <= 110] = 'E'
phenology_wide$class_aspect[phenology_wide$aspect_slope > 110 & phenology_wide$aspect_slope < 160] = 'SE'
phenology_wide$class_aspect[phenology_wide$aspect_slope >= 160 & phenology_wide$aspect_slope <= 200] = 'S'
phenology_wide$class_aspect[phenology_wide$aspect_slope > 200 & phenology_wide$aspect_slope < 250] = 'SW'
phenology_wide$class_aspect[phenology_wide$aspect_slope >= 250 & phenology_wide$aspect_slope <= 290] = 'W'
phenology_wide$class_aspect[phenology_wide$aspect_slope > 290 & phenology_wide$aspect_slope < 340] = 'NW'
# 4. Retrieve soil data IFS
# ++++++++++++++++++++++++++++++++++++++++++++++
soil_data = st_read(file.path(soilPath,"Soils_IE_WetDry.shp"))
# Convert soil data in MODIS crs
soil_data_modis = st_transform(soil_data, crs = crs_modis)
# Make a join
phenology_wide = st_join(st_as_sf(phenology_wide, crs = crs_modis),
soil_data_modis["CATEGORY"])
# Retrieve MERA data
filename = paste0('soilmoisture_2012_to_2017.RData')
load(file.path(climatePath,filename))
filename2 = paste0('temperature_degrees_2012_to_2017.RData')
load(file.path(climatePath,filename2))
temperature_mera$date = as.Date(as.character(temperature_mera$validityDate),
format="%Y%m%d")
filename3 = paste0('precipitation_2012_to_2017.RData')
load(file.path(climatePath,filename3))
precipitation_mera$date = as.Date(as.character(precipitation_mera$validityDate),
format="%Y%m%d")
# Function : Create variogram
variogramme <- function(mera_square) {
# Create a geometry column
mera_square_sf = st_as_sf(mera_square, coords = c("Longitude", "Latitude"),
crs = st_crs("EPSG:4326"))
# Transform into the modis CRS
mera_square_modis = st_transform(mera_square_sf, crs = crs_modis)
mera_square_modis$grp = sapply(st_equals(mera_square_modis$geometry), max)
test = mera_square_modis %>% group_by(grp) %>% summarize(Value = mean(Value,na.rm=T))
crop_square = st_crop(modis, squares_modis[s,,])
# Method 2:
# Interpolate using a model variogram (try a linear variogram)
# Look at the empirical variogram
v_emp = variogram(Value ~ 1, data = test)
if (length(which(v_emp$dist == 0.0)) != 0) {
return (stars())
}
else {
# Fit variogram model (try linear) use show.vgm() to display all possible models
v_mod = fit.variogram(v_emp, model = vgm(NA,"Lin",0))
# Now do some ordinary krigging to interpolate
vario = gstat::krige(formula = Value ~ 1,
locations=test,
model=v_mod,
newdata=crop_square)
return(vario)
}
}
# For each square
for (s in squaresList) {
print(s)
pixel_list = unique(d_pheno$pixelID[d_pheno$square == s])
nPixel = length(pixel_list)
for (y in years) {
print(y)
# Subset mera data in the current year
tmp_temperature_mera = subset(temperature_mera, temperature_mera$square == s
& temperature_mera$date >= paste0(y, '-01-01')
& temperature_mera$date < paste0(y, '-03-05'))
tmp_precipitation_mera = subset(precipitation_mera, precipitation_mera$square == s
& precipitation_mera$date >= paste0(y, '-01-01')
& precipitation_mera$date < paste0(y, '-03-05'))
days = unique(d_pheno$doy[d_pheno$year == y & d_pheno$square == s
& d_pheno$doy > 0
& d_pheno$doy < 60])
days = sort(days)
for (day in days) {
# recuperer la date
d = which(d_pheno$square == s & d_pheno$year == y
& d_pheno$doy == day)[1]
# 6. Retrieve temperature cumulated MERA data
# ++++++++++++++++++++++++++++++++++++++++++++++
mera_square_temp = subset(tmp_temperature_mera,
tmp_temperature_mera$date == d_pheno$date[d])
g_mod2 = variogramme(mera_square_temp)
# 7. Retrieve precipitation cumulated MERA data
# ++++++++++++++++++++++++++++++++++++++++++++++
mera_square_precip = subset(tmp_precipitation_mera,
tmp_precipitation_mera$date == d_pheno$date[d])
g_mod3 = variogramme(mera_square_precip)
for (i in pixel_list) {
# Retrieve the index of one of the pixels
ind = which(d_pheno$square == s & d_pheno$year == y
& d_pheno$pixelID == i & d_pheno$doy == day)[1]
if (length(ind) != 0 & !is.na(ind)) {
if (length(g_mod2) != 0) {
d_pheno$temperature[d_pheno$square == s & d_pheno$year == y & d_pheno$pixelID == i
& d_pheno$doy == day] = as.numeric(st_extract(g_mod2, d_pheno$geometry[ind])[[1]])
}
if (length(g_mod3) != 0) {
d_pheno$precipitation[d_pheno$square == s & d_pheno$year == y & d_pheno$pixelID == i
& d_pheno$doy == day] = as.numeric(st_extract(g_mod3, d_pheno$geometry[ind])[[1]])
}
}
}
}
# 5. Retrieve soil moisture MERA data
# ++++++++++++++++++++++++++++++++++++++++++++++
mera_square = subset(soilmoisture_mera, soilmoisture_mera$square == s
& (soilmoisture_mera$validityDate > paste0(y, '02-21')
& soilmoisture_mera$validityDate < paste0(y, '03-01')))
g_mod = variogramme(mera_square)
for (i in pixel_list) {
# Retrieve the index of one of the pixels
ind = which(phenology_wide$square == s & phenology_wide$year == y & phenology_wide$pixelID == i)
if (length(ind) != 0) {
if (length(g_mod) != 0) {
# Soil moisture
phenology_wide$soilmoisture[phenology_wide$square == s
& phenology_wide$year == y
& phenology_wide$pixelID == i] = as.numeric(st_extract(g_mod,phenology_wide$geometry[ind[1]]))
}
# Temperature cumulated over 5.5
phenology_wide$cumul_temp[phenology_wide$square == s
& phenology_wide$year == y
& phenology_wide$pixelID == i] = sum(d_pheno$temperature[d_pheno$square == s
& d_pheno$year == y
& d_pheno$pixel == i
& d_pheno$temperature >= 5.5], na.rm = T)
# Precipitation cumulated over 0
phenology_wide$cumul_precip0[phenology_wide$square == s
& phenology_wide$year == y
& phenology_wide$pixelID == i] = sum(d_pheno$precipitation[d_pheno$square == s
& d_pheno$year == y
& d_pheno$pixel == i], na.rm = T)
# Precipitation cumulated over 1
phenology_wide$cumul_precip1[phenology_wide$square == s
& phenology_wide$year == y
& phenology_wide$pixelID == i] = sum(d_pheno$precipitation[d_pheno$square == s
& d_pheno$year == y
& d_pheno$pixel == i
& d_pheno$precipitation > 1], na.rm = T)
# Precipitation cumulated over 5
phenology_wide$cumul_precip5[phenology_wide$square == s & phenology_wide$year == y
& phenology_wide$pixelID == i] = sum(d_pheno$precipitation[d_pheno$square == s
& d_pheno$year == y
& d_pheno$pixel == i
& d_pheno$precipitation > 5], na.rm = T)
}
}
}
}
# ++++++++++++++++++++++++++++++++++++++++++++++
#save(phenology_wide, file=paste0(dataDir, '/gls_improve_model_square1_to_21_in_2017.Rdata'))
save(phenology_wide, file=paste0(dataDir, '/gls_improve_model_square1_to_21_in_2013.Rdata'))
#save(phenology_wide, file=paste0(dataDir, '/gls_improve_model_square20_in_2013_to_2017.Rdata'))
|
04b706619be0dc42e46ba6973233fde87dddd90f
|
dbce782290364e848d8f8bc34ef80109faac7690
|
/cachematrix.R
|
ed90b3c9babf4ab433ef2bd5601ff62c21b22ebd
|
[] |
no_license
|
dizien/ProgrammingAssignment2
|
6899771866217e5f2b540c89edd2ca25c00f5fe6
|
637318b4921fdbb9d35957841534a733a03926cd
|
refs/heads/master
| 2021-01-21T08:44:13.986688
| 2014-11-17T08:09:53
| 2014-11-17T08:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
cachematrix.R
|
## Example usage:
## > M <- matrix(rnorm(25), nrow = 5) // Create a matrix M
## > cm <- makeCacheMatrix(M) // Create our special matrix
## > cm$get() // Return the matrix
## > cacheSolve(cm) // Return the inverse
## > cacheSolve(cm) // Call the 2nd time and cached
## // inverse is returned
## makeCacheMatrix() returns a list of functions to:
## 1. Set the value of the matrix
## 2. Get the value of the matrix
## 3. Set the value of the inverse
## 4. Get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
## inv will store the cached inverse matrix
inv <- NULL
## Setter for the matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## Getter for the matrix
get <- function() x
## Setter for the inverse
setinv <- function(inverse) inv <<- inverse
# Getter for the inverse
getinv <- function() inv
## Return the matrix with our newly defined functions
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve() function computes the inverse of the matrix. If the inverse is
## already calculated before, it returns the cached inverse.
cacheSolve <- function(x, ...) {
inv <- x$getinv()
## If the inverse is already calculated, return it
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
## The inverse is not yet calculated, so we calculate it
data <- x$get()
inv <- solve(data, ...)
## Cache the inverse
x$setinv(inv)
## Return it
inv
}
|
dbfe3ae2c7a55f91ddbe801d4243d26cd2e7c157
|
c223b48eaa8a5ad5394634b0cbc7c959dd7472db
|
/cachematrix.R
|
e23604c8ae4a8e029d0eb7e5c6845693637c5118
|
[] |
no_license
|
dcardonav/ProgrammingAssignment2
|
5e7c8aa589cc2cf8d797c3881069bdd372675d44
|
f2732e48b0dbf661d1baa665137436ffc8e766c3
|
refs/heads/master
| 2020-02-26T14:27:07.787983
| 2015-01-25T21:53:04
| 2015-01-25T21:53:04
| 29,831,831
| 0
| 0
| null | 2015-01-25T21:24:21
| 2015-01-25T21:24:20
| null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
cachematrix.R
|
## This code follows the skeleton of the example, only that it uses the function
## solve to calculate the inverse of the matrix, also added a message indicating when
## was the matrix initialized
## This functions creates a closure that contains the data about the matrix,
## in its first call, it doesn't store the inverse, note that when a new
## matrix is set, the cached inverse is deleted
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
print("Initalizing matrix...")
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(i) inv <<- i
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## This function takes the previous closure as an argument and
## checks if the inverse was already calculated, if so, it returns;
## if the inverse isn't cached it solves the matrix and stores the inverse
cacheSolve <- function(x, ...) {
i <- x$getinv()
if(!is.null(i)) {
print("Getting cached inverse...")
return(i)
}
print("Solving matrix and caching...")
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
586ff6453b711c102a54cf7805bb91f9093516ac
|
2b66e8403221c00c9280387c0c23b8e0e49ec452
|
/scripts/shannon_calc.R
|
0b57d43027a0ac505e71a2964dfb5da55b9fd0b6
|
[
"MIT"
] |
permissive
|
nick-youngblut/SIPSimR
|
b143787564df4b3ae11d7050a82d0510eeabea66
|
087c769df8ecb970ae052954a404350f5bdb92b4
|
refs/heads/master
| 2021-07-17T15:19:33.367198
| 2018-03-19T19:26:36
| 2018-03-19T19:26:36
| 96,668,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,650
|
r
|
shannon_calc.R
|
#!/usr/bin/env Rscript
# init
rm(list=ls())
# opt parsing
suppressPackageStartupMessages(library(docopt))
'usage: shannon_calc.r [options] <data>
options:
<data> Either a SIPSim OTU table or a list
of phyloseq objects.
-l The `data` & `data_preFrac` object
are lists of phyloseq objects.
-h Help
description:
Calculate the Shannon index for each gradient fraction community.
Input should either be 1) a SIPSim OTU table
from a single SIPSim simulation 2) a list of phyloseq
objects (eg., communities from multiple days).
The output is written to STDOUT.
' -> doc
opts = docopt(doc)
# packages
pkgs <- c('dplyr', 'tidyr', 'phyloseq')
for(x in pkgs){
suppressPackageStartupMessages(library(x, character.only=TRUE))
}
#-- functions --#
min_max_BD = function(){
## min G+C cutoff
min_GC = 13.5
## max G+C cutoff
max_GC = 80
## max G+C shift
max_13C_shift_in_BD = 0.036
min_BD = min_GC/100.0 * 0.098 + 1.66
max_BD = max_GC/100.0 * 0.098 + 1.66
max_BD = max_BD + max_13C_shift_in_BD
return(c('min_BD' = min_BD, 'max_BD' = max_BD))
}
load_simulated = function(filename){
sim = read.delim(filename, sep='\t') %>%
select(library, fraction, taxon, BD_mid, rel_abund) %>%
rename('OTU' = taxon,
'Buoyant_density' = BD_mid,
'abundance' = rel_abund,
'sample' = fraction)
return(sim)
}
otu2df = function(x){
x %>% otu_table %>% as.data.frame
}
sample2df = function(x){
x %>% sample_data %>% as.data.frame
}
emp2df = function(x){
# convert to dataframes
tmp = lapply(x, otu2df)
samps = names(tmp)
emp = tmp[[samps[1]]]
emp$OTU = rownames(emp)
for (x in samps[2:length(samps)]){
y = tmp[[x]]
y$OTU = rownames(y)
emp = left_join(emp, y, c('OTU' = 'OTU'))
}
tmp = NULL
emp[is.na(emp)] = 0
return(emp)
}
load_emperical = function(filename){
# import object
emp = readRDS(filename)
# getting all sample data
emp_sample_data = do.call(rbind, lapply(emp, sample2df))
# converting to dataframe
emp = emp2df(emp)
# format dataframe
emp = emp %>%
gather(sample, abundance, starts_with('12C-Con'))
emp = inner_join(emp, emp_sample_data, c('sample' = 'X.Sample')) %>%
mutate(Day = Day %>% as.character) %>%
group_by(sample) %>%
ungroup() %>%
select(Day, sample, OTU, Buoyant_density, abundance) %>%
rename('library' = Day) # library by day
return(emp)
}
#-- main --#
BD = min_max_BD()
# loading data
if(opts[['-l']] == TRUE){
df = load_emperical(opts[['<data>']])
} else {
df = load_simulated(opts[['<data>']])
}
shannon.long = function(df, abundance_col, ...){
# calculating shannon diversity index from a 'long' formated table
## community_col = name of column defining communities
## abundance_col = name of column defining taxon abundances
df = df %>% as.data.frame
cmd = paste0(abundance_col, '/sum(', abundance_col, ')')
df.s = df %>%
group_by_(...) %>%
mutate_(REL_abundance = cmd) %>%
mutate(pi__ln_pi = REL_abundance * log(REL_abundance),
shannon = -sum(pi__ln_pi, na.rm=TRUE)) %>%
ungroup() %>%
dplyr::select(-REL_abundance, -pi__ln_pi) %>%
distinct_(...)
return(df.s)
}
# calculating shannon index
df.shan = shannon.long(df, 'abundance', 'library', 'sample') %>%
filter(Buoyant_density >= BD[1], Buoyant_density <= BD[2]) %>%
select(-abundance)
# writing
write.table(df.shan, stdout(), quote=FALSE, row.names=FALSE, sep='\t')
|
d9a68ef4a64dd7b912a440a31c290ae59d29da06
|
d6bfc60c25935e478c84cf57f3623154e2fb93ae
|
/gvis_for_jake.R
|
00f836196e28ac4359b117d70592194e41899ea4
|
[] |
no_license
|
joebrew/misc
|
64456c6080090f6caeab5f06a841b32e5abbe486
|
8d632607b05b6e5b6dfee7c408776aa6aa368cb0
|
refs/heads/master
| 2016-09-05T10:26:47.411972
| 2015-09-04T11:13:44
| 2015-09-04T11:13:44
| 23,672,319
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,357
|
r
|
gvis_for_jake.R
|
#####
# ATTACH (AND INSTALL, IF REQUIRED) PACKAGES
#####
library(RCurl)
library(googleVis)
#####
# READ IN ALACHUA COUNTY'S "CONTROL FLU" DATA, AND NAME DAT
#####
my_link <- "https://docs.google.com/spreadsheets/d/1icEDpqkJVNuvGLV6GcULuvfVK0healPyPep3enHkceE/export?gid=0&format=csv"
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
my_csv <- getURL(my_link)
dat <- read.csv(textConnection(my_csv)); rm(my_csv, my_link)
#####
# EXPLORE THE DATA
#####
head(dat)
summary(dat)
plot(dat)
# Note that this is the kind of format you'll need:
# - some sort of id column {in this case "id", which is equivalent to school}
# - some sort of time column (year, day, date, etc.) {in this case, year}
# - some sort of value column {in this case, immunization rate}
#####
# SET UP THE PARAMETERS FOR THE MOTION CHART
# AND NAME THE RESULTING OBJECT "X"
#####
x <- gvisMotionChart(data = dat,
idvar="id",
timevar="year",
xvar = "year", # or frLunch13 - Percent of kids on free/reduced lunch
yvar = "immRate", # Immunization rate
colorvar = "type", # elem / middle / high
sizevar = "totMem") # total number of enrolled student
#####
# PLOT THE MOTION CHART IN YOUR DEFAULT BROWSER
#####
plot(x)
|
8b76953c721c18b3741879b3341daeb7f8ae1bee
|
486deb2a88424a9dd6e4761af999263a6fa316b8
|
/man/filter.dead.ends.Rd
|
b2655dabca599a82fc8552d54344e70a40df0997
|
[] |
no_license
|
cran/eRTG3D
|
4ba5c89aba6d9f2a2500b6514de7a8fd157b7561
|
785c339e125caad743cc40502d58bfe15d53d24c
|
refs/heads/master
| 2022-03-24T18:26:26.296102
| 2022-02-25T11:10:05
| 2022-02-25T11:10:05
| 209,614,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 449
|
rd
|
filter.dead.ends.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrapper3D.R
\name{filter.dead.ends}
\alias{filter.dead.ends}
\title{Remove dead ends}
\usage{
filter.dead.ends(cerwList)
}
\arguments{
\item{cerwList}{list of data.frames and NULL entries}
}
\value{
A list that is only containing valid tracks.
}
\description{
Function to filter out tracks that have found a dead end
}
\examples{
filter.dead.ends(list(niclas, niclas))
}
|
b3a0b9f2dcf10db45dfdfebcd71ee0e2725032a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/psychmeta/examples/estimate_var_artifacts.Rd.R
|
b545797afb9e2861597ec1964bc3345435fd3666
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
estimate_var_artifacts.Rd.R
|
library(psychmeta)
### Name: estimate_var_artifacts
### Title: Taylor series approximations for the variances of estimates
### artifact distributions.
### Aliases: estimate_var_artifacts estimate_var_qxi estimate_var_qxa
### estimate_var_rxxi estimate_var_rxxa estimate_var_ut estimate_var_ux
### estimate_var_ryya estimate_var_qya estimate_var_qyi estimate_var_ryyi
### ** Examples
estimate_var_qxi(qxa = c(.8, .85, .9, .95), var_qxa = c(.02, .03, .04, .05),
ux = .8, var_ux = 0,
ux_observed = c(TRUE, TRUE, FALSE, FALSE),
indirect_rr = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_qxa(qxi = c(.8, .85, .9, .95), var_qxi = c(.02, .03, .04, .05),
ux = .8, var_ux = 0,
ux_observed = c(TRUE, TRUE, FALSE, FALSE),
indirect_rr = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_rxxi(rxxa = c(.8, .85, .9, .95),
var_rxxa = c(.02, .03, .04, .05), ux = .8, var_ux = 0,
ux_observed = c(TRUE, TRUE, FALSE, FALSE),
indirect_rr = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_rxxa(rxxi = c(.8, .85, .9, .95), var_rxxi = c(.02, .03, .04, .05),
ux = .8, var_ux = 0,
ux_observed = c(TRUE, TRUE, FALSE, FALSE),
indirect_rr = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_ut(rxx = c(.8, .85, .9, .95), var_rxx = 0,
ux = c(.8, .8, .9, .9), var_ux = c(.02, .03, .04, .05),
rxx_restricted = c(TRUE, TRUE, FALSE, FALSE),
rxx_as_qx = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_ux(rxx = c(.8, .85, .9, .95), var_rxx = 0,
ut = c(.8, .8, .9, .9), var_ut = c(.02, .03, .04, .05),
rxx_restricted = c(TRUE, TRUE, FALSE, FALSE),
rxx_as_qx = c(TRUE, FALSE, TRUE, FALSE))
estimate_var_ryya(ryyi = .9, var_ryyi = .04, rxyi = .4, var_rxyi = 0, ux = .8, var_ux = 0)
estimate_var_ryya(ryyi = .9, var_ryyi = .04, rxyi = .4, var_rxyi = 0, ux = .8, var_ux = 0)
estimate_var_qyi(qya = .9, var_qya = .04, rxyi = .4, var_rxyi = 0, ux = .8, var_ux = 0)
estimate_var_ryyi(ryya = .9, var_ryya = .04, rxyi = .4, var_rxyi = 0, ux = .8, var_ux = 0)
|
5100e561c4484fe19573946af465b3bea5635f4c
|
41560490ab5b1ed0bbe9add8b7a3f512022eb738
|
/Rpurl/Repeated_RCBD_PiephoEdmondson2018.R
|
4724b7c51d056775bfcc59c1c76880169affc55a
|
[] |
no_license
|
lena-bauer/DSFAIR
|
3fa1473f790fbabd24a33042b3f4e0b46eb92d66
|
6928bd3b0ff9c68dfb17a8ceaa46892c44c5819f
|
refs/heads/master
| 2023-03-14T14:27:39.682718
| 2021-03-12T11:43:53
| 2021-03-12T11:43:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,774
|
r
|
Repeated_RCBD_PiephoEdmondson2018.R
|
# packages
pacman::p_load(conflicted, # handle conflicting functions
agriTutorial, tidyverse, # data import and handling
nlme, glmmTMB, # linear mixed modelling
mixedup, AICcmodavg, car, # linear mixed model processing
emmeans, multcomp, # mean comparisons
ggplot2, gganimate, gifski) # (animated) plots
conflict_prefer("select", "dplyr") # set select() from dplyr as default
conflict_prefer("filter", "dplyr") # set filter() from dplyr as default
# data (import via URL)
dat <- agriTutorial::sorghum %>% # data from agriTutorial package
rename(block = Replicate,
weekF = factweek, # week as factor
weekN = varweek, # week as numeric/integer
plot = factplot) %>%
mutate(variety = paste0("var", variety), # variety id
block = paste0("block", block), # block id
weekF = paste0("week", weekF), # week id
plot = paste0("plot", plot), # plot id
unit = paste0("obs", 1:n() )) %>% # obsevation id
mutate_at(vars(variety:plot, unit), as.factor) %>%
as_tibble()
dat
dat %>%
group_by(variety) %>%
summarize(mean = mean(y, na.rm=TRUE),
std.dev = sd(y, na.rm=TRUE)) %>%
arrange(desc(mean)) %>% # sort
print(n=Inf) # print full table
dat %>%
group_by(weekF, variety) %>%
summarize(mean = mean(y, na.rm=TRUE)) %>%
pivot_wider(names_from = weekF, values_from = mean)
var_colors <- c("#8cb369", "#f4a259", "#5b8e7d", "#bc4b51")
names(var_colors) <- dat$variety %>% levels()
gganimate_plot <- ggplot(
data = dat, aes(y = y, x = weekF,
group = variety,
color = variety)) +
geom_boxplot(outlier.shape = NA) +
geom_point(alpha = 0.5, size = 3) +
scale_y_continuous(
name = "Leaf area index",
limits = c(0, 6.5),
expand = c(0, 0),
breaks = c(0:6)
) +
scale_color_manual(values = var_colors) +
theme_bw() +
theme(legend.position = "bottom",
axis.title.x = element_blank()) +
transition_time(weekN) +
shadow_mark(exclude_layer = 2)
animate(gganimate_plot, renderer = gifski_renderer()) # render gif
dat.wk1 <- dat %>% filter(weekF == "week1") # subset data from first week only
mod.wk1 <- lm(formula = y ~ variety + block,
data = dat.wk1)
mod.iid <- glmmTMB(formula = y ~ weekF * (variety + block)
+ (1 | unit), # add random unit term to mimic error variance
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat)
# Extract variance component estimates
# alternative: mod.iid %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
mod.iid %>% mixedup::extract_vc(ci_scale = "var")
mod.iid.nlme <- gls(model = y ~ weekF * (block + variety),
correlation = NULL, # default, i.e. homoscedastic, independent errors
data = dat)
# Extract variance component estimates
tibble(varstruct = "iid") %>%
mutate(sigma = mod.iid.nlme$sigma) %>%
mutate(Variance = sigma^2)
mod.AR1 <- glmmTMB(formula = y ~ weekF * (variety + block) +
ar1(weekF + 0 | plot), # ar1 structure as random term to mimic error var
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat)
# Extract variance component estimates
# alternative: mod.ar1 %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
mod.AR1 %>% extract_vc(ci_scale = "var", show_cor = TRUE)
mod.AR1.nlme <- gls(model = y ~ weekF * (block + variety),
correlation = corAR1(form = ~ weekN | plot),
data = dat)
# Extract variance component estimates
tibble(varstruct = "ar(1)") %>%
mutate(sigma = mod.AR1.nlme$sigma,
rho = coef(mod.AR1.nlme$modelStruct$corStruct, unconstrained = FALSE)) %>%
mutate(Variance = sigma^2,
Corr1wk = rho,
Corr2wks = rho^2,
Corr3wks = rho^3,
Corr4wks = rho^4)
mod.AR1.nlme.V2 <- gls(model = y ~ weekF * (variety + block),
correlation = corExp(form = ~ weekN | plot),
data = dat)
tibble(varstruct = "ar(1)") %>%
mutate(sigma = mod.AR1.nlme.V2$sigma,
rho = exp(-1/coef(mod.AR1.nlme.V2$modelStruct$corStruct,
unconstrained = FALSE))) %>%
mutate(Variance = sigma^2,
Corr1wk = rho,
Corr2wks = rho^2,
Corr3wks = rho^3,
Corr4wks = rho^4)
## mod.AR1nugget <- glmmTMB(formula = y ~ weekF * (variety + block) +
## ar1(weekF + 0 | plot), # ar1 structure as random term to mimic error var
## # dispformula = ~ 0, # error variance allowed = nugget!
## REML = TRUE, # needs to be stated since default = ML
## data = dat)
##
## # show variance components
## # alternative: mod.AR1nugget %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
## mod.AR1nugget %>% extract_vc(ci_scale = "var", show_cor = TRUE)
##
## # We can see that the we get $\sigma^2_{plot} =$ `0.019`, an additional residual/nugget variance $\sigma^2_{N} =$ `0.004` and a $\rho =$ of `0.908`.
mod.AR1nugget.nlme <- gls(model = y ~ weekF * (block + variety),
correlation = corExp(form = ~ weekN | plot, nugget = TRUE),
data = dat)
tibble(varstruct = "ar(1) + nugget") %>%
mutate(sigma = mod.AR1nugget.nlme$sigma,
nugget = coef(mod.AR1nugget.nlme$modelStruct$corStruct,
unconstrained = FALSE)[2],
rho = (1-coef(mod.AR1nugget.nlme$modelStruct$corStruct,
unconstrained = FALSE)[2])*
exp(-1/coef(mod.AR1nugget.nlme$modelStruct$corStruct,
unconstrained = FALSE)[1])) %>%
mutate(Variance = sigma^2,
Corr1wk = rho,
Corr2wks = rho^2,
Corr3wks = rho^3,
Corr4wks = rho^4)
mod.hCS <- glmmTMB(formula = y ~ weekF * (variety + block) +
cs(weekF + 0 | plot), # hcs structure as random term to mimic error var
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat)
# show variance components
# alternative: mod.hCS %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
mod.hCS %>% extract_vc(ci_scale = "var", show_cor = TRUE)
mod.CS.nlme <- gls(y ~ weekF * (block + variety),
corr = corCompSymm(form = ~ weekN | plot),
data = dat)
tibble(varstruct = "cs") %>%
mutate(sigma = mod.CS.nlme$sigma,
rho = coef(mod.CS.nlme$modelStruct$corStruct, unconstrained = FALSE)) %>%
mutate(Variance = sigma^2,
Corr1wk = rho,
Corr2wks = rho,
Corr3wks = rho,
Corr4wks = rho)
mod.Toep <- glmmTMB(formula = y ~ weekF * (variety + block) +
toep(weekF + 0 | plot), # teop structure as random term to mimic err var
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat)
# show variance components
# alternative: mod.Toep %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
mod.Toep %>% extract_vc(ci_scale = "var", show_cor = TRUE)
mod.UN <- glmmTMB(formula = y ~ weekF * (variety + block) +
us(weekF + 0 | plot), # us structure as random term to mimic error var
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat)
# show variance components
# alternative: mod.UN %>% broom.mixed::tidy(effects = "ran_pars", scales = "vcov")
mod.UN %>% extract_vc(ci_scale = "var", show_cor = TRUE)
mod.UN.nlme <- gls(y ~ weekF * (block + variety),
corr = corSymm(form = ~ 1 | plot),
weights = varIdent(form = ~ 1|weekF),
data = dat)
# Extract variance component estimates: variances
mod.UN.nlme$modelStruct$varStruct %>%
coef(unconstrained = FALSE, allCoef = TRUE) %>%
enframe(name = "grp", value = "varStruct") %>%
mutate(sigma = mod.UN.nlme$sigma) %>%
mutate(StandardError = sigma * varStruct) %>%
mutate(Variance = StandardError ^ 2)
# Extract variance component estimates: correlations
mod.UN.nlme$modelStruct$corStruct
AICcmodavg::aictab(
cand.set = list(mod.iid, mod.hCS, mod.AR1, mod.Toep, mod.UN),
modnames = c("iid", "hCS", "AR1", "Toeplitz", "UN"),
second.ord = FALSE) # get AIC instead of AICc
AICcmodavg::aictab(
cand.set = list(mod.iid.nlme, mod.CS.nlme, mod.AR1.nlme, mod.AR1nugget.nlme, mod.UN.nlme),
modnames = c("iid", "CS", "AR1", "AR1 + nugget", "UN"),
second.ord = FALSE) # get AIC instead of AICc
ggplot(data = dat,
aes(y = y, x = weekF,
group = variety,
color = variety)) +
geom_point(alpha = 0.75, size = 3) +
stat_summary(fun=mean, geom="line") + # lines between means
scale_y_continuous(
name = "Leaf area index",
limits = c(0, 6.5),
expand = c(0, 0),
breaks = c(0:6)) +
scale_color_manual(values = var_colors) +
theme_bw() +
theme(legend.position = "bottom",
axis.title.x = element_blank())
mod.AR1 %>%
ggeffects::ggemmeans(terms = c("weekF", "variety"),
ci.lvl = 0.95)
glmmTMB(formula = y ~ 0 +
variety + variety:weekN +
weekF*block +
ar1(weekF + 0 | plot), # ar1 structure as random term to mimic error var
dispformula = ~ 0, # fix original error variance to 0
REML = TRUE, # needs to be stated since default = ML
data = dat) %>%
ggeffects::ggemmeans(terms = c("weekN", "variety"),
ci.lvl = 0.95) %>%
ggplot(., aes(x=x,
y=predicted)) +
scale_color_manual(values = var_colors) +
scale_fill_manual(values = var_colors) +
theme_bw() +
theme(legend.position = "bottom",
axis.title.x = element_blank()) +
scale_y_continuous(
name = "Leaf area index",
limits = c(0, 6.5),
expand = c(0, 0),
breaks = c(0:6)) +
geom_line(aes(colour = group),
size = 1.5) +
geom_ribbon(aes(ymin = conf.low,
ymax = conf.high,
fill = group),
alpha = 0.2)
|
0efe32f120b832a839e5ab2e163418f2aa9f9130
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rLindo/R/rLindoParam.R
|
b25f01a18ae7d98d688bef8ad2c76b78feb97ac9
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 77,068
|
r
|
rLindoParam.R
|
#rLindoParam.R
#The R interface to LINDO API 8.0.
#This file includes all LINDO API parameter and constant definitions.
#Copyright (C) 2013 LINDO Systems.
LS_MIN <- +1L
LS_MAX <- -1L
LS_CONTYPE_GE <- 'G'
LS_CONTYPE_LE <- 'L'
LS_CONTYPE_EQ <- 'E'
LS_CONTYPE_FR <- 'N'
LS_CONETYPE_QUAD <- 'Q'
LS_CONETYPE_RQUAD <- 'R'
LS_VARTYPE_CONT <- 'C'
LS_VARTYPE_BIN <- 'B'
LS_VARTYPE_INT <- 'I'
LS_INFINITY <- 1.0E+30
LS_BASTYPE_BAS <- 0L
LS_BASTYPE_ATLO <- -1L
LS_BASTYPE_ATUP <- -2L
LS_BASTYPE_FNUL <- -3L
LS_BASTYPE_SBAS <- -4L
LS_UNFORMATTED_MPS <- 0L
LS_FORMATTED_MPS <- 1L
LS_UNFORMATTED_MPS_COMP <- 2L
LS_FORMATTED_MPS_COMP <- 3L
LS_SOLUTION_OPT <- 0L
LS_SOLUTION_MIP <- 1L
LS_SOLUTION_OPT_IPM <- 2L
LS_SOLUTION_OPT_OLD <- 3L
LS_SOLUTION_MIP_OLD <- 4L
LS_BASFILE_BIN <- 1L
LS_BASFILE_MPS <- 2L
LS_BASFILE_TXT <- 3L
LS_INT_PARAMETER_TYPE <- 4L
LS_DOUBLE_PARAMETER_TYPE <- 8L
LS_MAX_ERROR_MESSAGE_LENGTH <- 1024L
LS_DEFAULT <- -1L
LS_MAX_JOBJECTS <- 100L
LS_PROPERTY_UNKNOWN <- 0L
LS_PROPERTY_LINEAR <- 1L
LS_PROPERTY_CONVEX <- 2L
LS_PROPERTY_CONCAVE <- 3L
LS_PROPERTY_QUASI_CONVEX <- 4L
LS_PROPERTY_QUASI_CONCAVE <- 5L
LS_PROPERTY_MAX <- 6L
#--------------bit masks for LScopyModel--------------#
LS_RAW_COPY <- 0L
LS_DEEP_COPY <- 1L
LS_TIME_COPY <- 2L
LS_STOC_COPY <- 4L
LS_SNGSTG_COPY <- 8L
#----------------Time frames in seconds---------------#
LSSEC01 <- 1L
LSSEC02 <- 2L
LSSEC03 <- 3L
LSSEC04 <- 4L
LSSEC05 <- 5L
LSSEC06 <- 6L
LSSEC10 <- 10L
LSSEC15 <- 15L
LSSEC20 <- 20L
LSSEC30 <- 30L
LSMIN01 <- 60L
LSMIN02 <- 120L
LSMIN03 <- 180L
LSMIN05 <- 300L
LSMIN06 <- 600L
LSMIN10 <- 600L
LSMIN15 <- 900L
LSMIN20 <- 1200L
LSMIN30 <- 1800L
LSHOUR01 <- 3600L
LSHOUR02 <- 7200L
LSHOUR03 <- 10800L
LSHOUR05 <- 18000L
LSHOUR06 <- 21600L
LSHOUR08 <- 28800L
LSHOUR12 <- 43200L
LSDAY <- 86400L
LSWEEK <- 604800L
LSMONTH <- 2592000L
LSQUARTER <- 7776000L
LSYEAR <- 31104000L
#----------------------Days of week-------------------#
LSSUNDAY <- 0L
LSMONDAY <- 1L
LSTUESDAY <- 2L
LSWEDNESDAY <- 3L
LSTHURSDAY <- 4L
LSFRIDAY <- 5L
LSSATURDAY <- 6L
#----------------bit mask for components--------------#
LS_DATA_CORE <- 1L
LS_DATA_TIME <- 2L
LS_DATA_STOC <- 4L
LS_DATA_FILE <- 8L
#----------------Solution or model statu--------------#
LS_STATUS_OPTIMAL <- 1L
LS_STATUS_BASIC_OPTIMAL <- 2L
LS_STATUS_INFEASIBLE <- 3L
LS_STATUS_UNBOUNDED <- 4L
LS_STATUS_FEASIBLE <- 5L
LS_STATUS_INFORUNB <- 6L
LS_STATUS_NEAR_OPTIMAL <- 7L
LS_STATUS_LOCAL_OPTIMAL <- 8L
LS_STATUS_LOCAL_INFEASIBLE <- 9L
LS_STATUS_CUTOFF <- 10L
LS_STATUS_NUMERICAL_ERROR <- 11L
LS_STATUS_UNKNOWN <- 12L
LS_STATUS_UNLOADED <- 13L
LS_STATUS_LOADED <- 14L
LS_STATUS_BOUNDED <- 15L
#-----------General parameters (1021 - 1099)----------#
LS_IPARAM_OBJSENSE <- 1022L
LS_DPARAM_CALLBACKFREQ <- 1023L
LS_DPARAM_OBJPRINTMUL <- 1024L
LS_IPARAM_CHECK_FOR_ERRORS <- 1025L
LS_IPARAM_ALLOW_CNTRLBREAK <- 1026L
LS_IPARAM_DECOMPOSITION_TYPE <- 1027L
LS_IPARAM_LP_SCALE <- 1029L
LS_IPARAM_LP_ITRLMT <- 1030L
LS_IPARAM_SPLEX_PPRICING <- 1031L
LS_IPARAM_SPLEX_REFACFRQ <- 1032L
LS_IPARAM_BARRIER_SOLVER <- 1033L
LS_IPARAM_PROB_TO_SOLVE <- 1034L
LS_IPARAM_LP_PRINTLEVEL <- 1035L
LS_IPARAM_MPS_OBJ_WRITESTYLE <- 1036L
LS_IPARAM_SPLEX_DPRICING <- 1037L
LS_IPARAM_SOL_REPORT_STYLE <- 1038L
LS_IPARAM_INSTRUCT_LOADTYPE <- 1039L
LS_IPARAM_SPLEX_DUAL_PHASE <- 1040L
LS_IPARAM_LP_PRELEVEL <- 1041L
LS_IPARAM_STRING_LENLMT <- 1042L
LS_IPARAM_USE_NAMEDATA <- 1043L
LS_IPARAM_SPLEX_USE_EXTERNAL <- 1044L
LS_DPARAM_LP_ITRLMT <- 1045L
LS_IPARAM_COPY_MODE <- 1046L
LS_IPARAM_SBD_NUM_THREADS <- 1047L
LS_IPARAM_NUM_THREADS <- 1048L
LS_IPARAM_MULTITHREAD_MODE <- 1049L
LS_IPARAM_FIND_BLOCK <- 1050L
##Generic solver parameters (1251 - 1500)
LS_IPARAM_SOLVER_IUSOL <- 1251L
LS_IPARAM_SOLVER_TIMLMT <- 1252L
LS_DPARAM_SOLVER_CUTOFFVAL <- 1253L
LS_DPARAM_SOLVER_FEASTOL <- 1254L
LS_IPARAM_SOLVER_RESTART <- 1255L
LS_IPARAM_SOLVER_IPMSOL <- 1256L
LS_DPARAM_SOLVER_OPTTOL <- 1257L
LS_IPARAM_SOLVER_USECUTOFFVAL <- 1258L
LS_IPARAM_SOLVER_PRE_ELIM_FILL <- 1259L
LS_DPARAM_SOLVER_TIMLMT <- 1260L
LS_IPARAM_SOLVER_CONCURRENT_OPTMODE <- 1261L
LS_DPARAM_SOLVER_PERT_FEASTOL <- 1262L
LS_IPARAM_SOLVER_PARTIALSOL_LEVEL <- 1263L
## Advanced parameters for the simplex method (4000 - 41++)
LS_DPARAM_LP_MIN_FEASTOL <- 4060L
LS_DPARAM_LP_MAX_FEASTOL <- 4061L
LS_DPARAM_LP_MIN_OPTTOL <- 4062L
LS_DPARAM_LP_MAX_OPTTOL <- 4063L
LS_DPARAM_LP_MIN_PIVTOL <- 4064L
LS_DPARAM_LP_MAX_PIVTOL <- 4065L
LS_DPARAM_LP_AIJ_ZEROTOL <- 4066L
LS_DPARAM_LP_PIV_ZEROTOL <- 4067L
LS_DPARAM_LP_PIV_BIGTOL <- 4068L
LS_DPARAM_LP_BIGM <- 4069L
LS_DPARAM_LP_BNDINF <- 4070L
LS_DPARAM_LP_INFINITY <- 4071L
LS_IPARAM_LP_PPARTIAL <- 4072L
LS_IPARAM_LP_DPARTIAL <- 4073L
LS_IPARAM_LP_DRATIO <- 4074L
LS_IPARAM_LP_PRATIO <- 4075L
LS_IPARAM_LP_RATRANGE <- 4076L
LS_IPARAM_LP_DPSWITCH <- 4077L
LS_IPARAM_LP_PALLOC <- 4078L
LS_IPARAM_LP_PRTFG <- 4079L
LS_IPARAM_LP_OPRFREE <- 4080L
LS_IPARAM_LP_SPRINT_SUB <- 4081L
## Advanced parameters for LU decomposition (4800 - 4+++)
LS_IPARAM_LU_NUM_CANDITS <- 4800L
LS_IPARAM_LU_MAX_UPDATES <- 4801L
LS_IPARAM_LU_PRINT_LEVEL <- 4802L
LS_IPARAM_LU_UPDATE_TYPE <- 4803L
LS_IPARAM_LU_USE_PIVCOL <- 4804L
LS_IPARAM_LU_PIVMOD <- 4806L
LS_DPARAM_LU_EPS_DIAG <- 4900L
LS_DPARAM_LU_EPS_NONZ <- 4901L
LS_DPARAM_LU_EPS_PIVABS <- 4902L
LS_DPARAM_LU_EPS_PIVREL <- 4903L
LS_DPARAM_LU_INI_RCOND <- 4904L
LS_DPARAM_LU_SPVTOL_UPDATE <- 4905L
LS_DPARAM_LU_SPVTOL_FTRAN <- 4906L
LS_DPARAM_LU_SPVTOL_BTRAN <- 4907L
## Parameters for the IPM method (3000 - 3+++)
LS_DPARAM_IPM_TOL_INFEAS <- 3150L
LS_DPARAM_IPM_TOL_PATH <- 3151L
LS_DPARAM_IPM_TOL_PFEAS <- 3152L
LS_DPARAM_IPM_TOL_REL_STEP <- 3153L
LS_DPARAM_IPM_TOL_PSAFE <- 3154L
LS_DPARAM_IPM_TOL_DFEAS <- 3155L
LS_DPARAM_IPM_TOL_DSAFE <- 3156L
LS_DPARAM_IPM_TOL_MU_RED <- 3157L
LS_DPARAM_IPM_BASIS_REL_TOL_S <- 3158L
LS_DPARAM_IPM_BASIS_TOL_S <- 3159L
LS_DPARAM_IPM_BASIS_TOL_X <- 3160L
LS_DPARAM_IPM_BI_LU_TOL_REL_PIV <- 3161L
LS_DPARAM_IPM_CO_TOL_INFEAS <- 3162L
LS_DPARAM_IPM_CO_TOL_PFEAS <- 3163L
LS_DPARAM_IPM_CO_TOL_DFEAS <- 3164L
LS_DPARAM_IPM_CO_TOL_MU_RED <- 3165L
LS_IPARAM_IPM_MAX_ITERATIONS <- 3166L
LS_IPARAM_IPM_OFF_COL_TRH <- 3167L
LS_IPARAM_IPM_NUM_THREADS <- 3168L
LS_IPARAM_IPM_CHECK_CONVEXITY <- 3169L
## Nonlinear programming (NLP) parameters (2500 - 25++)
LS_IPARAM_NLP_SOLVE_AS_LP <- 2500L
LS_IPARAM_NLP_SOLVER <- 2501L
LS_IPARAM_NLP_SUBSOLVER <- 2502L
LS_IPARAM_NLP_PRINTLEVEL <- 2503L
LS_DPARAM_NLP_PSTEP_FINITEDIFF <- 2504L
LS_IPARAM_NLP_DERIV_DIFFTYPE <- 2505L
LS_DPARAM_NLP_FEASTOL <- 2506L
LS_DPARAM_NLP_REDGTOL <- 2507L
LS_IPARAM_NLP_USE_CRASH <- 2508L
LS_IPARAM_NLP_USE_STEEPEDGE <- 2509L
LS_IPARAM_NLP_USE_SLP <- 2510L
LS_IPARAM_NLP_USE_SELCONEVAL <- 2511L
LS_IPARAM_NLP_PRELEVEL <- 2512L
LS_IPARAM_NLP_ITRLMT <- 2513L
LS_IPARAM_NLP_LINEARZ <- 2514L
LS_IPARAM_NLP_LINEARITY <- 2515L
LS_IPARAM_NLP_STARTPOINT <- 2516L
LS_IPARAM_NLP_CONVEXRELAX <- 2517L
LS_IPARAM_NLP_CR_ALG_REFORM <- 2518L
LS_IPARAM_NLP_QUADCHK <- 2519L
LS_IPARAM_NLP_AUTODERIV <- 2520L
LS_IPARAM_NLP_MAXLOCALSEARCH <- 2521L
LS_IPARAM_NLP_CONVEX <- 2522L
LS_IPARAM_NLP_CONOPT_VER <- 2523L
LS_IPARAM_NLP_USE_LINDO_CRASH <- 2524L
LS_IPARAM_NLP_STALL_ITRLMT <- 2525L
LS_IPARAM_NLP_AUTOHESS <- 2526L
LS_IPARAM_NLP_FEASCHK <- 2527L
LS_DPARAM_NLP_ITRLMT <- 2528L
LS_IPARAM_NLP_MAXSUP <- 2529L
LS_IPARAM_NLP_MSW_SOLIDX <- 2530L
LS_IPARAM_NLP_ITERS_PER_LOGLINE <- 2531L
LS_IPARAM_NLP_MAX_RETRY <- 2532L
LS_IPARAM_NLP_MSW_NORM <- 2533L
LS_IPARAM_NLP_MSW_POPSIZE <- 2534L
LS_IPARAM_NLP_MSW_MAXPOP <- 2535L
LS_IPARAM_NLP_MSW_MAXNOIMP <- 2536L
LS_IPARAM_NLP_MSW_FILTMODE <- 2537L
LS_DPARAM_NLP_MSW_POXDIST_THRES <- 2538L
LS_DPARAM_NLP_MSW_EUCDIST_THRES <- 2539L
LS_DPARAM_NLP_MSW_XNULRAD_FACTOR <- 2540L
LS_DPARAM_NLP_MSW_XKKTRAD_FACTOR <- 2541L
LS_IPARAM_NLP_MAXLOCALSEARCH_TREE <- 2542L
LS_IPARAM_NLP_MSW_NUM_THREADS <- 2543L
LS_IPARAM_NLP_MSW_RG_SEED <- 2544L
LS_IPARAM_NLP_MSW_PREPMODE <- 2545L
LS_IPARAM_NLP_MSW_RMAPMODE <- 2546L
LS_IPARAM_NLP_XSMODE <- 2547L
LS_DPARAM_NLP_MSW_OVERLAP_RATIO <- 2548L
LS_DPARAM_NLP_INF <- 2549L
LS_IPARAM_NLP_IPM2GRG <- 2550L
## Mixed integer programming (MIP) parameters (5000 - 5+++)
LS_IPARAM_MIP_TIMLIM <- 5300L
LS_IPARAM_MIP_AOPTTIMLIM <- 5301L
LS_IPARAM_MIP_LSOLTIMLIM <- 5302L
LS_IPARAM_MIP_PRELEVEL <- 5303L
LS_IPARAM_MIP_NODESELRULE <- 5304L
LS_DPARAM_MIP_INTTOL <- 5305L
LS_DPARAM_MIP_RELINTTOL <- 5306L
LS_DPARAM_MIP_RELOPTTOL <- 5307L
LS_DPARAM_MIP_PEROPTTOL <- 5308L
LS_IPARAM_MIP_MAXCUTPASS_TOP <- 5309L
LS_IPARAM_MIP_MAXCUTPASS_TREE <- 5310L
LS_DPARAM_MIP_ADDCUTPER <- 5311L
LS_DPARAM_MIP_ADDCUTPER_TREE <- 5312L
LS_IPARAM_MIP_MAXNONIMP_CUTPASS <- 5313L
LS_IPARAM_MIP_CUTLEVEL_TOP <- 5314L
LS_IPARAM_MIP_CUTLEVEL_TREE <- 5315L
LS_IPARAM_MIP_CUTTIMLIM <- 5316L
LS_IPARAM_MIP_CUTDEPTH <- 5317L
LS_IPARAM_MIP_CUTFREQ <- 5318L
LS_IPARAM_MIP_HEULEVEL <- 5319L
LS_IPARAM_MIP_PRINTLEVEL <- 5320L
LS_IPARAM_MIP_PREPRINTLEVEL <- 5321L
LS_DPARAM_MIP_CUTOFFOBJ <- 5322L
LS_IPARAM_MIP_USECUTOFFOBJ <- 5323L
LS_IPARAM_MIP_STRONGBRANCHLEVEL <- 5324L
LS_IPARAM_MIP_TREEREORDERLEVEL <- 5325L
LS_IPARAM_MIP_BRANCHDIR <- 5326L
LS_IPARAM_MIP_TOPOPT <- 5327L
LS_IPARAM_MIP_REOPT <- 5328L
LS_IPARAM_MIP_SOLVERTYPE <- 5329L
LS_IPARAM_MIP_KEEPINMEM <- 5330L
LS_IPARAM_MIP_BRANCHRULE <- 5331L
LS_DPARAM_MIP_REDCOSTFIX_CUTOFF <- 5332L
LS_DPARAM_MIP_ADDCUTOBJTOL <- 5333L
LS_IPARAM_MIP_HEUMINTIMLIM <- 5334L
LS_IPARAM_MIP_BRANCH_PRIO <- 5335L
LS_IPARAM_MIP_SCALING_BOUND <- 5336L
LS_DPARAM_MIP_PSEUDOCOST_WEIGT <- 5337L
LS_DPARAM_MIP_LBIGM <- 5338L
LS_DPARAM_MIP_DELTA <- 5339L
LS_IPARAM_MIP_DUAL_SOLUTION <- 5340L
LS_IPARAM_MIP_BRANCH_LIMIT <- 5341L
LS_DPARAM_MIP_ITRLIM <- 5342L
LS_IPARAM_MIP_AGGCUTLIM_TOP <- 5343L
LS_IPARAM_MIP_AGGCUTLIM_TREE <- 5344L
LS_DPARAM_MIP_SWITCHFAC_SIM_IPM_ITER <- 5345L
LS_IPARAM_MIP_ANODES_SWITCH_DF <- 5346L
LS_DPARAM_MIP_ABSOPTTOL <- 5347L
LS_DPARAM_MIP_MINABSOBJSTEP <- 5348L
LS_IPARAM_MIP_PSEUDOCOST_RULE <- 5349L
LS_IPARAM_MIP_ENUM_HEUMODE <- 5350L
LS_IPARAM_MIP_PRELEVEL_TREE <- 5351L
LS_DPARAM_MIP_REDCOSTFIX_CUTOFF_TREE <- 5352L
LS_IPARAM_MIP_USE_INT_ZERO_TOL <- 5353L
LS_IPARAM_MIP_USE_CUTS_HEU <- 5354L
LS_DPARAM_MIP_BIGM_FOR_INTTOL <- 5355L
LS_IPARAM_MIP_STRONGBRANCHDONUM <- 5366L
LS_IPARAM_MIP_MAKECUT_INACTIVE_COUNT <- 5367L
LS_IPARAM_MIP_PRE_ELIM_FILL <- 5368L
LS_IPARAM_MIP_HEU_MODE <- 5369L
LS_DPARAM_MIP_TIMLIM <- 5370L
LS_DPARAM_MIP_AOPTTIMLIM <- 5371L
LS_DPARAM_MIP_LSOLTIMLIM <- 5372L
LS_DPARAM_MIP_CUTTIMLIM <- 5373L
LS_DPARAM_MIP_HEUMINTIMLIM <- 5374L
LS_IPARAM_MIP_FP_MODE <- 5375L
LS_DPARAM_MIP_FP_WEIGHT <- 5376L
LS_IPARAM_MIP_FP_OPT_METHOD <- 5377L
LS_DPARAM_MIP_FP_TIMLIM <- 5378L
LS_IPARAM_MIP_FP_ITRLIM <- 5379L
LS_IPARAM_MIP_FP_HEU_MODE <- 5380L
LS_DPARAM_MIP_OBJ_THRESHOLD <- 5381L
LS_IPARAM_MIP_LOCALBRANCHNUM <- 5382L
LS_DPARAM_MIP_SWITCHFAC_SIM_IPM_TIME <- 5383L
LS_DPARAM_MIP_ITRLIM_SIM <- 5384L
LS_DPARAM_MIP_ITRLIM_NLP <- 5385L
LS_DPARAM_MIP_ITRLIM_IPM <- 5386L
LS_IPARAM_MIP_MAXNUM_MIP_SOL_STORAGE <- 5387L
LS_IPARAM_MIP_CONCURRENT_TOPOPTMODE <- 5388L
LS_IPARAM_MIP_CONCURRENT_REOPTMODE <- 5389L
LS_IPARAM_MIP_PREHEU_LEVEL <- 5390L
LS_IPARAM_MIP_PREHEU_PRE_LEVEL <- 5391L
LS_IPARAM_MIP_PREHEU_PRINT_LEVEL <- 5392L
LS_IPARAM_MIP_PREHEU_TC_ITERLIM <- 5393L
LS_IPARAM_MIP_PREHEU_DFE_VSTLIM <- 5394L
LS_IPARAM_MIP_PREHEU_VAR_SEQ <- 5395L
LS_IPARAM_MIP_USE_PARTIALSOL_LEVEL <- 5396L
LS_IPARAM_MIP_GENERAL_MODE <- 5397L
LS_IPARAM_MIP_NUM_THREADS <- 5398L
LS_IPARAM_MIP_POLISH_NUM_BRANCH_NEXT <- 5399L
LS_IPARAM_MIP_POLISH_MAX_BRANCH_COUNT <- 5400L
LS_DPARAM_MIP_POLISH_ALPHA_TARGET <- 5401L
LS_IPARAM_MIP_CONCURRENT_STRATEGY <- 5402L
LS_DPARAM_MIP_BRANCH_TOP_VAL_DIFF_WEIGHT <- 5403L
LS_IPARAM_MIP_BASCUTS_DONUM <- 5404L
LS_IPARAM_MIP_PARA_SUB <- 5405L
LS_DPARAM_MIP_PARA_RND_ITRLMT <- 5406L
LS_DPARAM_MIP_PARA_INIT_NODE <- 5407L
LS_IPARAM_MIP_PARA_ITR_MODE <- 5408L
LS_IPARAM_MIP_PARA_FP <- 5409L
LS_IPARAM_MIP_PARA_FP_MODE <- 5410L
## Global optimization (GOP) parameters (6000 - 6+++)
LS_DPARAM_GOP_RELOPTTOL <- 6400L
LS_DPARAM_GOP_FLTTOL <- 6401L
LS_DPARAM_GOP_BOXTOL <- 6402L
LS_DPARAM_GOP_WIDTOL <- 6403L
LS_DPARAM_GOP_DELTATOL <- 6404L
LS_DPARAM_GOP_BNDLIM <- 6405L
LS_IPARAM_GOP_TIMLIM <- 6406L
LS_IPARAM_GOP_OPTCHKMD <- 6407L
LS_IPARAM_GOP_BRANCHMD <- 6408L
LS_IPARAM_GOP_MAXWIDMD <- 6409L
LS_IPARAM_GOP_PRELEVEL <- 6410L
LS_IPARAM_GOP_POSTLEVEL <- 6411L
LS_IPARAM_GOP_BBSRCHMD <- 6412L
LS_IPARAM_GOP_DECOMPPTMD <- 6413L
LS_IPARAM_GOP_ALGREFORMMD <- 6414L
LS_IPARAM_GOP_RELBRNDMD <- 6415L
LS_IPARAM_GOP_PRINTLEVEL <- 6416L
LS_IPARAM_GOP_BNDLIM_MODE <- 6417L
LS_IPARAM_GOP_BRANCH_LIMIT <- 6418L
LS_IPARAM_GOP_CORELEVEL <- 6419L
LS_IPARAM_GOP_OPT_MODE <- 6420L
LS_IPARAM_GOP_HEU_MODE <- 6421L
LS_IPARAM_GOP_SUBOUT_MODE <- 6422L
LS_IPARAM_GOP_USE_NLPSOLVE <- 6423L
LS_IPARAM_GOP_LSOLBRANLIM <- 6424L
LS_IPARAM_GOP_LPSOPT <- 6425L
LS_DPARAM_GOP_TIMLIM <- 6426L
LS_DPARAM_GOP_BRANCH_LIMIT <- 6427L
LS_IPARAM_GOP_QUADMD <- 6428L
LS_IPARAM_GOP_LIM_MODE <- 6429L
LS_DPARAM_GOP_ITRLIM <- 6430L
LS_DPARAM_GOP_ITRLIM_SIM <- 6431L
LS_DPARAM_GOP_ITRLIM_IPM <- 6432L
LS_DPARAM_GOP_ITRLIM_NLP <- 6433L
LS_DPARAM_GOP_ABSOPTTOL <- 6434L
LS_DPARAM_GOP_PEROPTTOL <- 6435L
LS_DPARAM_GOP_AOPTTIMLIM <- 6436L
LS_IPARAM_GOP_LINEARZ <- 6437L
LS_IPARAM_GOP_NUM_THREADS <- 6438L
## License information parameters
LS_IPARAM_LIC_CONSTRAINTS <- 500L
LS_IPARAM_LIC_VARIABLES <- 501L
LS_IPARAM_LIC_INTEGERS <- 502L
LS_IPARAM_LIC_NONLINEARVARS <- 503L
LS_IPARAM_LIC_GOP_INTEGERS <- 504L
LS_IPARAM_LIC_GOP_NONLINEARVARS <- 505L
LS_IPARAM_LIC_DAYSTOEXP <- 506L
LS_IPARAM_LIC_DAYSTOTRIALEXP <- 507L
LS_IPARAM_LIC_NONLINEAR <- 508L
LS_IPARAM_LIC_EDUCATIONAL <- 509L
LS_IPARAM_LIC_RUNTIME <- 510L
LS_IPARAM_LIC_NUMUSERS <- 511L
LS_IPARAM_LIC_BARRIER <- 512L
LS_IPARAM_LIC_GLOBAL <- 513L
LS_IPARAM_LIC_PLATFORM <- 514L
LS_IPARAM_LIC_MIP <- 515L
LS_IPARAM_LIC_SP <- 516L
LS_IPARAM_LIC_CONIC <- 517L
LS_IPARAM_LIC_RESERVED1 <- 519L
## Model analysis parameters (1500 - 15++)
LS_IPARAM_IIS_ANALYZE_LEVEL <- 1550L
LS_IPARAM_IUS_ANALYZE_LEVEL <- 1551L
LS_IPARAM_IIS_TOPOPT <- 1552L
LS_IPARAM_IIS_REOPT <- 1553L
LS_IPARAM_IIS_USE_SFILTER <- 1554L
LS_IPARAM_IIS_PRINT_LEVEL <- 1555L
LS_IPARAM_IIS_INFEAS_NORM <- 1556L
LS_IPARAM_IIS_ITER_LIMIT <- 1557L
LS_DPARAM_IIS_ITER_LIMIT <- 1558L
LS_IPARAM_IIS_TIME_LIMIT <- 1559L
LS_IPARAM_IIS_METHOD <- 1560L
LS_IPARAM_IIS_USE_EFILTER <- 1561L
LS_IPARAM_IIS_USE_GOP <- 1562L
LS_IPARAM_IIS_NUM_THREADS <- 1563L
## Output log format parameter
LS_IPARAM_FMT_ISSQL <- 1590L
## Stochastic Parameters (6000 - 6+++)
LS_IPARAM_STOC_NSAMPLE_SPAR <- 6600L
LS_IPARAM_STOC_NSAMPLE_STAGE <- 6601L
LS_IPARAM_STOC_RG_SEED <- 6602L
LS_IPARAM_STOC_METHOD <- 6603L
LS_IPARAM_STOC_REOPT <- 6604L
LS_IPARAM_STOC_TOPOPT <- 6605L
LS_IPARAM_STOC_ITER_LIM <- 6606L
LS_IPARAM_STOC_PRINT_LEVEL <- 6607L
LS_IPARAM_STOC_DETEQ_TYPE <- 6608L
LS_IPARAM_STOC_CALC_EVPI <- 6609L
LS_IPARAM_STOC_SAMP_CONT_ONLY <- 6611L
LS_IPARAM_STOC_BUCKET_SIZE <- 6612L
LS_IPARAM_STOC_MAX_NUMSCENS <- 6613L
LS_IPARAM_STOC_SHARE_BEGSTAGE <- 6614L
LS_IPARAM_STOC_NODELP_PRELEVEL <- 6615L
LS_DPARAM_STOC_TIME_LIM <- 6616L
LS_DPARAM_STOC_RELOPTTOL <- 6617L
LS_DPARAM_STOC_ABSOPTTOL <- 6618L
LS_IPARAM_STOC_DEBUG_MASK <- 6619L
LS_IPARAM_STOC_VARCONTROL_METHOD <- 6620L
LS_IPARAM_STOC_CORRELATION_TYPE <- 6621L
LS_IPARAM_STOC_WSBAS <- 6622L
LS_IPARAM_STOC_ALD_OUTER_ITER_LIM <- 6623L
LS_IPARAM_STOC_ALD_INNER_ITER_LIM <- 6624L
LS_DPARAM_STOC_ALD_DUAL_FEASTOL <- 6625L
LS_DPARAM_STOC_ALD_PRIMAL_FEASTOL <- 6626L
LS_DPARAM_STOC_ALD_DUAL_STEPLEN <- 6627L
LS_DPARAM_STOC_ALD_PRIMAL_STEPLEN <- 6628L
LS_IPARAM_CORE_ORDER_BY_STAGE <- 6629L
LS_SPARAM_STOC_FMT_NODE_NAME <- 6630L
LS_SPARAM_STOC_FMT_SCENARIO_NAME <- 6631L
LS_IPARAM_STOC_MAP_MPI2LP <- 6632L
LS_IPARAM_STOC_AUTOAGGR <- 6633L
LS_IPARAM_STOC_BENCHMARK_SCEN <- 6634L
LS_DPARAM_STOC_INFBND <- 6635L
LS_IPARAM_STOC_ADD_MPI <- 6636L
LS_IPARAM_STOC_ELIM_FXVAR <- 6637L
LS_DPARAM_STOC_SBD_OBJCUTVAL <- 6638L
LS_IPARAM_STOC_SBD_OBJCUTFLAG <- 6639L
LS_IPARAM_STOC_SBD_NUMCANDID <- 6640L
LS_DPARAM_STOC_BIGM <- 6641L
LS_IPARAM_STOC_NAMEDATA_LEVEL <- 6642L
LS_IPARAM_STOC_SBD_MAXCUTS <- 6643L
LS_IPARAM_STOC_DEQOPT <- 6644L
LS_IPARAM_STOC_DS_SUBFORM <- 6645L
LS_DPARAM_STOC_REL_PSTEPTOL <- 6646L
LS_DPARAM_STOC_REL_DSTEPTOL <- 6647L
LS_IPARAM_STOC_NUM_THREADS <- 6648L
LS_IPARAM_STOC_DETEQ_NBLOCKS <- 6649L
## Sampling parameters (7000 - 7+++)
LS_IPARAM_SAMP_NCM_METHOD <- 7701L
LS_DPARAM_SAMP_NCM_CUTOBJ <- 7702L
LS_IPARAM_SAMP_NCM_DSTORAGE <- 7703L
LS_DPARAM_SAMP_CDSINC <- 7704L
LS_IPARAM_SAMP_SCALE <- 7705L
LS_IPARAM_SAMP_NCM_ITERLIM <- 7706L
LS_DPARAM_SAMP_NCM_OPTTOL <- 7707L
LS_IPARAM_SAMP_NUM_THREADS <- 7708L
LS_IPARAM_SAMP_RG_BUFFER_SIZE <- 7709L
##Branch And Price parameters (8000 - 8499)
LS_DPARAM_BNP_INFBND <- 8010L
LS_IPARAM_BNP_LEVEL <- 8011L
LS_IPARAM_BNP_PRINT_LEVEL <- 8012L
LS_DPARAM_BNP_BOX_SIZE <- 8013L
LS_IPARAM_BNP_NUM_THREADS <- 8014L
LS_DPARAM_BNP_SUB_ITRLMT <- 8015L
LS_IPARAM_BNP_FIND_BLK <- 8016L
LS_IPARAM_BNP_PRELEVEL <- 8017L
LS_DPARAM_BNP_COL_LMT <- 8018L
LS_DPARAM_BNP_TIMLIM <- 8019L
LS_DPARAM_BNP_ITRLIM_SIM <- 8020L
LS_DPARAM_BNP_ITRLIM_IPM <- 8021L
LS_IPARAM_BNP_BRANCH_LIMIT <- 8022L
LS_DPARAM_BNP_ITRLIM <- 8023L
## Genetic Algorithm Parameters (8500-8+++)
LS_DPARAM_GA_CXOVER_PROB <- 8501L
LS_DPARAM_GA_XOVER_SPREAD <- 8502L
LS_DPARAM_GA_IXOVER_PROB <- 8503L
LS_DPARAM_GA_CMUTAT_PROB <- 8504L
LS_DPARAM_GA_MUTAT_SPREAD <- 8505L
LS_DPARAM_GA_IMUTAT_PROB <- 8506L
LS_DPARAM_GA_TOL_ZERO <- 8507L
LS_DPARAM_GA_TOL_PFEAS <- 8508L
LS_DPARAM_GA_INF <- 8509L
LS_DPARAM_GA_INFBND <- 8510L
LS_DPARAM_GA_BLXA <- 8511L
LS_DPARAM_GA_BLXB <- 8512L
LS_IPARAM_GA_CXOVER_METHOD <- 8513L
LS_IPARAM_GA_IXOVER_METHOD <- 8514L
LS_IPARAM_GA_CMUTAT_METHOD <- 8515L
LS_IPARAM_GA_IMUTAT_METHOD <- 8516L
LS_IPARAM_GA_SEED <- 8517L
LS_IPARAM_GA_NGEN <- 8518L
LS_IPARAM_GA_POPSIZE <- 8519L
LS_IPARAM_GA_FILEOUT <- 8520L
LS_IPARAM_GA_PRINTLEVEL <- 8521L
LS_IPARAM_GA_INJECT_OPT <- 8522L
LS_IPARAM_GA_NUM_THREADS <- 8523L
LS_IPARAM_GA_OBJDIR <- 8524L
LS_DPARAM_GA_OBJSTOP <- 8525L
LS_DPARAM_GA_MIGRATE_PROB <- 8526L
LS_IPARAM_GA_SSPACE <- 8527L
## Version info
LS_IPARAM_VER_MAJOR <- 990L
LS_IPARAM_VER_MINOR <- 991L
LS_IPARAM_VER_BUILD <- 992L
LS_IPARAM_VER_REVISION <- 993L
## Last card for parameters
LS_IPARAM_VER_NUMBER <- 999L
#-----------Math operator codes (1000-1500)----------#
EP_NO_OP <- 0000L
EP_PLUS <- 1001L
EP_MINUS <- 1002L
EP_MULTIPLY <- 1003L
EP_DIVIDE <- 1004L
EP_POWER <- 1005L
EP_EQUAL <- 1006L
EP_NOT_EQUAL <- 1007L
EP_LTOREQ <- 1008L
EP_GTOREQ <- 1009L
EP_LTHAN <- 1010L
EP_GTHAN <- 1011L
EP_AND <- 1012L
EP_OR <- 1013L
EP_NOT <- 1014L
EP_PERCENT <- 1015L
EP_POSATE <- 1016L
EP_NEGATE <- 1017L
EP_ABS <- 1018L
EP_SQRT <- 1019L
EP_LOG <- 1020L
EP_LN <- 1021L
EP_PI <- 1022L
EP_SIN <- 1023L
EP_COS <- 1024L
EP_TAN <- 1025L
EP_ATAN2 <- 1026L
EP_ATAN <- 1027L
EP_ASIN <- 1028L
EP_ACOS <- 1029L
EP_EXP <- 1030L
EP_MOD <- 1031L
EP_FALSE <- 1032L
EP_TRUE <- 1033L
EP_IF <- 1034L
EP_PSN <- 1035L
EP_PSL <- 1036L
EP_LGM <- 1037L
EP_SIGN <- 1038L
EP_FLOOR <- 1039L
EP_FPA <- 1040L
EP_FPL <- 1041L
EP_PEL <- 1042L
EP_PEB <- 1043L
EP_PPS <- 1044L
EP_PPL <- 1045L
EP_PTD <- 1046L
EP_PCX <- 1047L
EP_WRAP <- 1048L
EP_PBNO <- 1049L
EP_PFS <- 1050L
EP_PFD <- 1051L
EP_PHG <- 1052L
EP_RAND <- 1053L
EP_USER <- 1054L
EP_SUM <- 1055L
EP_AVG <- 1056L
EP_MIN <- 1057L
EP_MAX <- 1058L
EP_NPV <- 1059L
EP_VAND <- 1060L
EP_VOR <- 1061L
EP_PUSH_NUM <- 1062L
EP_PUSH_VAR <- 1063L
EP_NORMDENS <- 1064L
EP_NORMINV <- 1065L
EP_TRIAINV <- 1066L
EP_EXPOINV <- 1067L
EP_UNIFINV <- 1068L
EP_MULTINV <- 1069L
EP_USRCOD <- 1070L
EP_SUMPROD <- 1071L
EP_SUMIF <- 1072L
EP_VLOOKUP <- 1073L
EP_VPUSH_NUM <- 1074L
EP_VPUSH_VAR <- 1075L
EP_VMULT <- 1076L
EP_SQR <- 1077L
EP_SINH <- 1078L
EP_COSH <- 1079L
EP_TANH <- 1080L
EP_ASINH <- 1081L
EP_ACOSH <- 1082L
EP_ATANH <- 1083L
EP_LOGB <- 1084L
EP_LOGX <- 1085L
EP_LNX <- 1086L
EP_TRUNC <- 1087L
EP_NORMSINV <- 1088L
EP_INT <- 1089L
EP_PUSH_STR <- 1090L
EP_VPUSH_STR <- 1091L
EP_PUSH_SPAR <- 1092L
EP_NORMPDF <- 1093L
EP_NORMCDF <- 1094L
EP_LSQ <- 1095L
EP_LNPSNX <- 1096L
EP_LNCPSN <- 1097L
EP_XEXPNAX <- 1098L
EP_XNEXPMX <- 1099L
EP_PBT <- 1100L
EP_PBTINV <- 1101L
EP_PBNINV <- 1102L
EP_PCC <- 1103L
EP_PCCINV <- 1104L
EP_PCXINV <- 1105L
EP_EXPN <- 1106L
EP_PFDINV <- 1107L
EP_PGA <- 1108L
EP_PGAINV <- 1109L
EP_PGE <- 1110L
EP_PGEINV <- 1111L
EP_PGU <- 1112L
EP_PGUINV <- 1113L
EP_PHGINV <- 1114L
EP_PLA <- 1115L
EP_PLAINV <- 1116L
EP_PLG <- 1117L
EP_PLGINV <- 1118L
EP_LGT <- 1119L
EP_LGTINV <- 1120L
EP_LGNM <- 1121L
EP_LGNMINV <- 1122L
EP_NGBN <- 1123L
EP_NGBNINV <- 1124L
EP_NRM <- 1125L
EP_PPT <- 1126L
EP_PPTINV <- 1127L
EP_PPSINV <- 1128L
EP_PTDINV <- 1129L
EP_TRIAN <- 1130L
EP_UNIFM <- 1131L
EP_PWB <- 1132L
EP_PWBINV <- 1133L
EP_NRMINV <- 1134L
EP_TRIANINV <- 1135L
EP_EXPNINV <- 1136L
EP_UNIFMINV <- 1137L
EP_MLTNMINV <- 1138L
EP_BTDENS <- 1139L
EP_BNDENS <- 1140L
EP_CCDENS <- 1141L
EP_CXDENS <- 1142L
EP_EXPDENS <- 1143L
EP_FDENS <- 1144L
EP_GADENS <- 1145L
EP_GEDENS <- 1146L
EP_GUDENS <- 1147L
EP_HGDENS <- 1148L
EP_LADENS <- 1149L
EP_LGDENS <- 1150L
EP_LGTDENS <- 1151L
EP_LGNMDENS <- 1152L
EP_NGBNDENS <- 1153L
EP_NRMDENS <- 1154L
EP_PTDENS <- 1155L
EP_PSDENS <- 1156L
EP_TDENS <- 1157L
EP_TRIADENS <- 1158L
EP_UNIFDENS <- 1159L
EP_WBDENS <- 1160L
EP_RADIANS <- 1161L
EP_DEGREES <- 1162L
EP_ROUND <- 1163L
EP_ROUNDUP <- 1164L
EP_ROUNDDOWN <- 1165L
EP_ERF <- 1166L
EP_PBN <- 1167L
EP_PBB <- 1168L
EP_PBBINV <- 1169L
EP_BBDENS <- 1170L
EP_PSS <- 1171L
EP_SSDENS <- 1172L
EP_SSINV <- 1173L
#----Model and solution information codes ( 110xx-140xx)----#
## Model statistics (11001-11199)
LS_IINFO_NUM_NONZ_OBJ <- 11001L
LS_IINFO_NUM_SEMICONT <- 11002L
LS_IINFO_NUM_SETS <- 11003L
LS_IINFO_NUM_SETS_NNZ <- 11004L
LS_IINFO_NUM_QCP_CONS <- 11005L
LS_IINFO_NUM_CONT_CONS <- 11006L
LS_IINFO_NUM_INT_CONS <- 11007L
LS_IINFO_NUM_BIN_CONS <- 11008L
LS_IINFO_NUM_QCP_VARS <- 11009L
LS_IINFO_NUM_CONS <- 11010L
LS_IINFO_NUM_VARS <- 11011L
LS_IINFO_NUM_NONZ <- 11012L
LS_IINFO_NUM_BIN <- 11013L
LS_IINFO_NUM_INT <- 11014L
LS_IINFO_NUM_CONT <- 11015L
LS_IINFO_NUM_QC_NONZ <- 11016L
LS_IINFO_NUM_NLP_NONZ <- 11017L
LS_IINFO_NUM_NLPOBJ_NONZ <- 11018L
LS_IINFO_NUM_RDCONS <- 11019L
LS_IINFO_NUM_RDVARS <- 11020L
LS_IINFO_NUM_RDNONZ <- 11021L
LS_IINFO_NUM_RDINT <- 11022L
LS_IINFO_LEN_VARNAMES <- 11023L
LS_IINFO_LEN_CONNAMES <- 11024L
LS_IINFO_NUM_NLP_CONS <- 11025L
LS_IINFO_NUM_NLP_VARS <- 11026L
LS_IINFO_NUM_SUF_ROWS <- 11027L
LS_IINFO_NUM_IIS_ROWS <- 11028L
LS_IINFO_NUM_SUF_BNDS <- 11029L
LS_IINFO_NUM_IIS_BNDS <- 11030L
LS_IINFO_NUM_SUF_COLS <- 11031L
LS_IINFO_NUM_IUS_COLS <- 11032L
LS_IINFO_NUM_CONES <- 11033L
LS_IINFO_NUM_CONE_NONZ <- 11034L
LS_IINFO_LEN_CONENAMES <- 11035L
LS_DINFO_INST_VAL_MIN_COEF <- 11036L
LS_IINFO_INST_VARNDX_MIN_COEF <- 11037L
LS_IINFO_INST_CONNDX_MIN_COEF <- 11038L
LS_DINFO_INST_VAL_MAX_COEF <- 11039L
LS_IINFO_INST_VARNDX_MAX_COEF <- 11040L
LS_IINFO_INST_CONNDX_MAX_COEF <- 11041L
LS_IINFO_NUM_VARS_CARD <- 11042L
LS_IINFO_NUM_VARS_SOS1 <- 11043L
LS_IINFO_NUM_VARS_SOS2 <- 11044L
LS_IINFO_NUM_VARS_SOS3 <- 11045L
LS_IINFO_NUM_VARS_SCONT <- 11046L
LS_IINFO_NUM_CONS_L <- 11047L
LS_IINFO_NUM_CONS_E <- 11048L
LS_IINFO_NUM_CONS_G <- 11049L
LS_IINFO_NUM_CONS_R <- 11050L
LS_IINFO_NUM_CONS_N <- 11051L
LS_IINFO_NUM_VARS_LB <- 11052L
LS_IINFO_NUM_VARS_UB <- 11053L
LS_IINFO_NUM_VARS_LUB <- 11054L
LS_IINFO_NUM_VARS_FR <- 11055L
LS_IINFO_NUM_VARS_FX <- 11056L
LS_IINFO_NUM_INST_CODES <- 11057L
LS_IINFO_NUM_INST_REAL_NUM <- 11058L
LS_IINFO_NUM_SPARS <- 11059L
LS_IINFO_NUM_PROCS <- 11060L
## LP and NLP related info (11200-11299)
LS_IINFO_METHOD <- 11200L
LS_DINFO_POBJ <- 11201L
LS_DINFO_DOBJ <- 11202L
LS_DINFO_PINFEAS <- 11203L
LS_DINFO_DINFEAS <- 11204L
LS_IINFO_MODEL_STATUS <- 11205L
LS_IINFO_PRIMAL_STATUS <- 11206L
LS_IINFO_DUAL_STATUS <- 11207L
LS_IINFO_BASIC_STATUS <- 11208L
LS_IINFO_BAR_ITER <- 11209L
LS_IINFO_SIM_ITER <- 11210L
LS_IINFO_NLP_ITER <- 11211L
LS_IINFO_ELAPSED_TIME <- 11212L
LS_DINFO_MSW_POBJ <- 11213L
LS_IINFO_MSW_PASS <- 11214L
LS_IINFO_MSW_NSOL <- 11215L
LS_IINFO_IPM_STATUS <- 11216L
LS_DINFO_IPM_POBJ <- 11217L
LS_DINFO_IPM_DOBJ <- 11218L
LS_DINFO_IPM_PINFEAS <- 11219L
LS_DINFO_IPM_DINFEAS <- 11220L
LS_IINFO_NLP_CALL_FUN <- 11221L
LS_IINFO_NLP_CALL_DEV <- 11222L
LS_IINFO_NLP_CALL_HES <- 11223L
LS_IINFO_CONCURRENT_OPTIMIZER <- 11224L
LS_IINFO_LEN_STAGENAMES <- 11225L
LS_DINFO_BAR_ITER <- 11226L
LS_DINFO_SIM_ITER <- 11227L
LS_DINFO_NLP_ITER <- 11228L
LS_IINFO_BAR_THREADS <- 11229L
LS_IINFO_NLP_THREADS <- 11230L
LS_IINFO_SIM_THREADS <- 11231L
LS_DINFO_NLP_THRIMBL <- 11232L
LS_SINFO_NLP_THREAD_LOAD <- 11233L
LS_SINFO_BAR_THREAD_LOAD <- 11234L
LS_SINFO_SIM_THREAD_LOAD <- 11235L
LS_SINFO_ARCH <- 11236L
LS_IINFO_ARCH_ID <- 11237L
## MIP and MINLP related info (11300-11400)
LS_IINFO_MIP_STATUS <- 11300L
LS_DINFO_MIP_OBJ <- 11301L
LS_DINFO_MIP_BESTBOUND <- 11302L
LS_IINFO_MIP_SIM_ITER <- 11303L
LS_IINFO_MIP_BAR_ITER <- 11304L
LS_IINFO_MIP_NLP_ITER <- 11305L
LS_IINFO_MIP_BRANCHCOUNT <- 11306L
LS_IINFO_MIP_NEWIPSOL <- 11307L
LS_IINFO_MIP_LPCOUNT <- 11308L
LS_IINFO_MIP_ACTIVENODES <- 11309L
LS_IINFO_MIP_LTYPE <- 11310L
LS_IINFO_MIP_AOPTTIMETOSTOP <- 11311L
LS_IINFO_MIP_NUM_TOTAL_CUTS <- 11312L
LS_IINFO_MIP_GUB_COVER_CUTS <- 11313L
LS_IINFO_MIP_FLOW_COVER_CUTS <- 11314L
LS_IINFO_MIP_LIFT_CUTS <- 11315L
LS_IINFO_MIP_PLAN_LOC_CUTS <- 11316L
LS_IINFO_MIP_DISAGG_CUTS <- 11317L
LS_IINFO_MIP_KNAPSUR_COVER_CUTS <- 11318L
LS_IINFO_MIP_LATTICE_CUTS <- 11319L
LS_IINFO_MIP_GOMORY_CUTS <- 11320L
LS_IINFO_MIP_COEF_REDC_CUTS <- 11321L
LS_IINFO_MIP_GCD_CUTS <- 11322L
LS_IINFO_MIP_OBJ_CUT <- 11323L
LS_IINFO_MIP_BASIS_CUTS <- 11324L
LS_IINFO_MIP_CARDGUB_CUTS <- 11325L
LS_IINFO_MIP_CLIQUE_CUTS <- 11326L
LS_IINFO_MIP_CONTRA_CUTS <- 11327L
LS_IINFO_MIP_GUB_CONS <- 11328L
LS_IINFO_MIP_GLB_CONS <- 11329L
LS_IINFO_MIP_PLANTLOC_CONS <- 11330L
LS_IINFO_MIP_DISAGG_CONS <- 11331L
LS_IINFO_MIP_SB_CONS <- 11332L
LS_IINFO_MIP_IKNAP_CONS <- 11333L
LS_IINFO_MIP_KNAP_CONS <- 11334L
LS_IINFO_MIP_NLP_CONS <- 11335L
LS_IINFO_MIP_CONT_CONS <- 11336L
LS_DINFO_MIP_TOT_TIME <- 11347L
LS_DINFO_MIP_OPT_TIME <- 11348L
LS_DINFO_MIP_HEU_TIME <- 11349L
LS_IINFO_MIP_SOLSTATUS_LAST_BRANCH <- 11350L
LS_DINFO_MIP_SOLOBJVAL_LAST_BRANCH <- 11351L
LS_IINFO_MIP_HEU_LEVEL <- 11352L
LS_DINFO_MIP_PFEAS <- 11353L
LS_DINFO_MIP_INTPFEAS <- 11354L
LS_IINFO_MIP_WHERE_IN_CODE <- 11355L
LS_IINFO_MIP_FP_ITER <- 11356L
LS_DINFO_MIP_FP_SUMFEAS <- 11357L
LS_DINFO_MIP_RELMIPGAP <- 11358L
LS_DINFO_MIP_ROOT_OPT_TIME <- 11359L
LS_DINFO_MIP_ROOT_PRE_TIME <- 11360L
LS_IINFO_MIP_ROOT_METHOD <- 11361L
LS_DINFO_MIP_SIM_ITER <- 11362L
LS_DINFO_MIP_BAR_ITER <- 11363L
LS_DINFO_MIP_NLP_ITER <- 11364L
LS_IINFO_MIP_TOP_RELAX_IS_NON_CONVEX <- 11365L
LS_DINFO_MIP_FP_TIME <- 11366L
LS_IINFO_MIP_THREADS <- 11367L
LS_SINFO_MIP_THREAD_LOAD <- 11368L
LS_DINFO_MIP_ABSGAP <- 11369L
LS_DINFO_MIP_RELGAP <- 11370L
## GOP related info (11601-11699)
LS_DINFO_GOP_OBJ <- 11600L
LS_IINFO_GOP_SIM_ITER <- 11601L
LS_IINFO_GOP_BAR_ITER <- 11602L
LS_IINFO_GOP_NLP_ITER <- 11603L
LS_DINFO_GOP_BESTBOUND <- 11604L
LS_IINFO_GOP_STATUS <- 11605L
LS_IINFO_GOP_LPCOUNT <- 11606L
LS_IINFO_GOP_NLPCOUNT <- 11607L
LS_IINFO_GOP_MIPCOUNT <- 11608L
LS_IINFO_GOP_NEWSOL <- 11609L
LS_IINFO_GOP_BOX <- 11610L
LS_IINFO_GOP_BBITER <- 11611L
LS_IINFO_GOP_SUBITER <- 11612L
LS_IINFO_GOP_MIPBRANCH <- 11613L
LS_IINFO_GOP_ACTIVEBOXES <- 11614L
LS_IINFO_GOP_TOT_TIME <- 11615L
LS_IINFO_GOP_MAXDEPTH <- 11616L
LS_DINFO_GOP_PFEAS <- 11617L
LS_DINFO_GOP_INTPFEAS <- 11618L
LS_DINFO_GOP_SIM_ITER <- 11619L
LS_DINFO_GOP_BAR_ITER <- 11620L
LS_DINFO_GOP_NLP_ITER <- 11621L
LS_DINFO_GOP_LPCOUNT <- 11622L
LS_DINFO_GOP_NLPCOUNT <- 11623L
LS_DINFO_GOP_MIPCOUNT <- 11624L
LS_DINFO_GOP_BBITER <- 11625L
LS_DINFO_GOP_SUBITER <- 11626L
LS_DINFO_GOP_MIPBRANCH <- 11627L
LS_DINFO_GOP_FIRST_TIME <- 11628L
LS_DINFO_GOP_BEST_TIME <- 11629L
LS_DINFO_GOP_TOT_TIME <- 11630L
LS_IINFO_GOP_THREADS <- 11631L
LS_SINFO_GOP_THREAD_LOAD <- 11632L
LS_DINFO_GOP_ABSGAP <- 11633L
LS_DINFO_GOP_RELGAP <- 11634L
## Progress info during callbacks
LS_DINFO_SUB_OBJ <- 11700L
LS_DINFO_SUB_PINF <- 11701L
LS_DINFO_CUR_OBJ <- 11702L
LS_IINFO_CUR_ITER <- 11703L
LS_DINFO_CUR_BEST_BOUND <- 11704L
LS_IINFO_CUR_STATUS <- 11705L
LS_IINFO_CUR_LP_COUNT <- 11706L
LS_IINFO_CUR_BRANCH_COUNT <- 11707L
LS_IINFO_CUR_ACTIVE_COUNT <- 11708L
LS_IINFO_CUR_NLP_COUNT <- 11709L
LS_IINFO_CUR_MIP_COUNT <- 11710L
LS_IINFO_CUR_CUT_COUNT <- 11711L
LS_DINFO_CUR_ITER <- 11712L
## Model generation progress info (1800+)
LS_DINFO_GEN_PERCENT <- 11800L
LS_IINFO_GEN_NONZ_TTL <- 11801L
LS_IINFO_GEN_NONZ_NL <- 11802L
LS_IINFO_GEN_ROW_NL <- 11803L
LS_IINFO_GEN_VAR_NL <- 11804L
## IIS-IUS info
LS_IINFO_IIS_BAR_ITER <- 11850L
LS_IINFO_IIS_SIM_ITER <- 11851L
LS_IINFO_IIS_NLP_ITER <- 11852L
LS_DINFO_IIS_BAR_ITER <- 11853L
LS_DINFO_IIS_SIM_ITER <- 11854L
LS_DINFO_IIS_NLP_ITER <- 11855L
LS_IINFO_IIS_TOT_TIME <- 11856L
LS_IINFO_IIS_ACT_NODE <- 11857L
LS_IINFO_IIS_LPCOUNT <- 11858L
LS_IINFO_IIS_NLPCOUNT <- 11859L
LS_IINFO_IIS_MIPCOUNT <- 11860L
LS_IINFO_IIS_THREADS <- 11861L
LS_SINFO_IIS_THREAD_LOAD <- 11862L
LS_IINFO_IUS_BAR_ITER <- 11875L
LS_IINFO_IUS_SIM_ITER <- 11876L
LS_IINFO_IUS_NLP_ITER <- 11877L
LS_DINFO_IUS_BAR_ITER <- 11878L
LS_DINFO_IUS_SIM_ITER <- 11879L
LS_DINFO_IUS_NLP_ITER <- 11880L
LS_IINFO_IUS_TOT_TIME <- 11881L
LS_IINFO_IUS_ACT_NODE <- 11882L
LS_IINFO_IUS_LPCOUNT <- 11883L
LS_IINFO_IUS_NLPCOUNT <- 11884L
LS_IINFO_IUS_MIPCOUNT <- 11885L
LS_IINFO_IUS_THREADS <- 11886L
LS_SINFO_IUS_THREAD_LOAD <- 11887L
## Presolve info
LS_IINFO_PRE_NUM_RED <- 11900L
LS_IINFO_PRE_TYPE_RED <- 11901L
LS_IINFO_PRE_NUM_RDCONS <- 11902L
LS_IINFO_PRE_NUM_RDVARS <- 11903L
LS_IINFO_PRE_NUM_RDNONZ <- 11904L
LS_IINFO_PRE_NUM_RDINT <- 11905L
## Error info
LS_IINFO_ERR_OPTIM <- 11999L
## Misc info
LS_SINFO_MODEL_FILENAME <- 12000L
LS_SINFO_MODEL_SOURCE <- 12001L
LS_IINFO_MODEL_TYPE <- 12002L
LS_SINFO_CORE_FILENAME <- 12003L
LS_SINFO_STOC_FILENAME <- 12004L
LS_SINFO_TIME_FILENAME <- 12005L
LS_IINFO_ASSIGNED_MODEL_TYPE <- 12006L
## Stochastic Information
LS_DINFO_STOC_EVOBJ <- 13201L
LS_DINFO_STOC_EVPI <- 13202L
LS_DINFO_STOC_PINFEAS <- 13203L
LS_DINFO_STOC_DINFEAS <- 13204L
LS_DINFO_STOC_RELOPT_GAP <- 13205L
LS_DINFO_STOC_ABSOPT_GAP <- 13206L
LS_IINFO_STOC_SIM_ITER <- 13207L
LS_IINFO_STOC_BAR_ITER <- 13208L
LS_IINFO_STOC_NLP_ITER <- 13209L
LS_IINFO_NUM_STOCPAR_RHS <- 13210L
LS_IINFO_NUM_STOCPAR_OBJ <- 13211L
LS_IINFO_NUM_STOCPAR_LB <- 13212L
LS_IINFO_NUM_STOCPAR_UB <- 13213L
LS_IINFO_NUM_STOCPAR_INSTR_OBJS <- 13214L
LS_IINFO_NUM_STOCPAR_INSTR_CONS <- 13215L
LS_IINFO_NUM_STOCPAR_AIJ <- 13216L
LS_DINFO_STOC_TOTAL_TIME <- 13217L
LS_IINFO_STOC_STATUS <- 13218L
LS_IINFO_STOC_STAGE_BY_NODE <- 13219L
LS_IINFO_STOC_NUM_SCENARIOS <- 13220L
LS_DINFO_STOC_NUM_SCENARIOS <- 13221L
LS_IINFO_STOC_NUM_STAGES <- 13222L
LS_IINFO_STOC_NUM_NODES <- 13223L
LS_DINFO_STOC_NUM_NODES <- 13224L
LS_IINFO_STOC_NUM_NODES_STAGE <- 13225L
LS_DINFO_STOC_NUM_NODES_STAGE <- 13226L
LS_IINFO_STOC_NUM_NODE_MODELS <- 13227L
LS_IINFO_STOC_NUM_COLS_BEFORE_NODE <- 13228L
LS_IINFO_STOC_NUM_ROWS_BEFORE_NODE <- 13229L
LS_IINFO_STOC_NUM_COLS_DETEQI <- 13230L
LS_DINFO_STOC_NUM_COLS_DETEQI <- 13231L
LS_IINFO_STOC_NUM_ROWS_DETEQI <- 13232L
LS_DINFO_STOC_NUM_ROWS_DETEQI <- 13233L
LS_IINFO_STOC_NUM_COLS_DETEQE <- 13234L
LS_DINFO_STOC_NUM_COLS_DETEQE <- 13235L
LS_IINFO_STOC_NUM_ROWS_DETEQE <- 13236L
LS_DINFO_STOC_NUM_ROWS_DETEQE <- 13237L
LS_IINFO_STOC_NUM_COLS_NAC <- 13238L
LS_IINFO_STOC_NUM_ROWS_NAC <- 13239L
LS_IINFO_STOC_NUM_COLS_CORE <- 13240L
LS_IINFO_STOC_NUM_ROWS_CORE <- 13241L
LS_IINFO_STOC_NUM_COLS_STAGE <- 13242L
LS_IINFO_STOC_NUM_ROWS_STAGE <- 13243L
LS_IINFO_STOC_NUM_NBF_CUTS <- 13244L
LS_IINFO_STOC_NUM_NBO_CUTS <- 13245L
LS_IINFO_DIST_TYPE <- 13246L
LS_IINFO_SAMP_SIZE <- 13247L
LS_DINFO_SAMP_MEAN <- 13248L
LS_DINFO_SAMP_STD <- 13249L
LS_DINFO_SAMP_SKEWNESS <- 13250L
LS_DINFO_SAMP_KURTOSIS <- 13251L
LS_IINFO_STOC_NUM_QCP_CONS_DETEQE <- 13252L
LS_IINFO_STOC_NUM_CONT_CONS_DETEQE <- 13253L
LS_IINFO_STOC_NUM_INT_CONS_DETEQE <- 13254L
LS_IINFO_STOC_NUM_BIN_CONS_DETEQE <- 13255L
LS_IINFO_STOC_NUM_QCP_VARS_DETEQE <- 13256L
LS_IINFO_STOC_NUM_NONZ_DETEQE <- 13259L
LS_IINFO_STOC_NUM_BIN_DETEQE <- 13260L
LS_IINFO_STOC_NUM_INT_DETEQE <- 13261L
LS_IINFO_STOC_NUM_CONT_DETEQE <- 13262L
LS_IINFO_STOC_NUM_QC_NONZ_DETEQE <- 13263L
LS_IINFO_STOC_NUM_NLP_NONZ_DETEQE <- 13264L
LS_IINFO_STOC_NUM_NLPOBJ_NONZ_DETEQE <- 13265L
LS_IINFO_STOC_NUM_QCP_CONS_DETEQI <- 13266L
LS_IINFO_STOC_NUM_CONT_CONS_DETEQI <- 13267L
LS_IINFO_STOC_NUM_INT_CONS_DETEQI <- 13268L
LS_IINFO_STOC_NUM_BIN_CONS_DETEQI <- 13269L
LS_IINFO_STOC_NUM_QCP_VARS_DETEQI <- 13270L
LS_IINFO_STOC_NUM_NONZ_DETEQI <- 13271L
LS_IINFO_STOC_NUM_BIN_DETEQI <- 13272L
LS_IINFO_STOC_NUM_INT_DETEQI <- 13273L
LS_IINFO_STOC_NUM_CONT_DETEQI <- 13274L
LS_IINFO_STOC_NUM_QC_NONZ_DETEQI <- 13275L
LS_IINFO_STOC_NUM_NLP_NONZ_DETEQI <- 13276L
LS_IINFO_STOC_NUM_NLPOBJ_NONZ_DETEQI <- 13277L
LS_IINFO_STOC_NUM_EVENTS_BLOCK <- 13278L
LS_IINFO_STOC_NUM_EVENTS_DISCRETE <- 13279L
LS_IINFO_STOC_NUM_EVENTS_PARAMETRIC <- 13280L
LS_IINFO_STOC_NUM_EXPLICIT_SCENARIOS <- 13281L
LS_IINFO_STOC_PARENT_NODE <- 13282L
LS_IINFO_STOC_ELDEST_CHILD_NODE <- 13283L
LS_IINFO_STOC_NUM_CHILD_NODES <- 13284L
LS_IINFO_NUM_STOCPAR_INSTR <- 13285L
LS_IINFO_INFORUNB_SCEN_IDX <- 13286L
LS_DINFO_STOC_EVMU <- 13287L
LS_DINFO_STOC_EVWS <- 13288L
LS_DINFO_STOC_EVAVR <- 13289L
LS_IINFO_DIST_NARG <- 13290L
LS_IINFO_SAMP_VARCONTROL_METHOD <- 13291L
LS_IINFO_STOC_NUM_NLP_VARS_DETEQE <- 13292L
LS_IINFO_STOC_NUM_NLP_CONS_DETEQE <- 13293L
LS_DINFO_STOC_EVOBJ_LB <- 13294L
LS_DINFO_STOC_EVOBJ_UB <- 13295L
LS_DINFO_STOC_AVROBJ <- 13296L
LS_DINFO_SAMP_MEDIAN <- 13297L
LS_DINFO_DIST_MEDIAN <- 13298L
LS_IINFO_STOC_NUM_CC <- 13299L
LS_IINFO_STOC_NUM_ROWS_CC <- 13300L
LS_IINFO_STOC_ISCBACK <- 13301L
LS_IINFO_STOC_LP_COUNT <- 13302L
LS_IINFO_STOC_NLP_COUNT <- 13303L
LS_IINFO_STOC_MIP_COUNT <- 13304L
LS_DINFO_STOC_OPT_TIME <- 13305L
LS_DINFO_SAMP_CORRDIFF_ST <- 13306L
LS_DINFO_SAMP_CORRDIFF_CT <- 13307L
LS_DINFO_SAMP_CORRDIFF_SC <- 13308L
LS_IINFO_STOC_NUM_EQROWS_CC <- 13309L
LS_IINFO_STOC_NUM_ROWS <- 13310L
LS_IINFO_STOC_NUM_CC_VIOLATED <- 13311L
LS_IINFO_STOC_NUM_COLS_DETEQC <- 13312L
LS_IINFO_STOC_NUM_ROWS_DETEQC <- 13313L
LS_IINFO_STOC_NUM_QCP_CONS_DETEQC <- 13314L
LS_IINFO_STOC_NUM_CONT_CONS_DETEQC <- 13315L
LS_IINFO_STOC_NUM_INT_CONS_DETEQC <- 13316L
LS_IINFO_STOC_NUM_BIN_CONS_DETEQC <- 13317L
LS_IINFO_STOC_NUM_QCP_VARS_DETEQC <- 13318L
LS_IINFO_STOC_NUM_NONZ_DETEQC <- 13319L
LS_IINFO_STOC_NUM_BIN_DETEQC <- 13320L
LS_IINFO_STOC_NUM_INT_DETEQC <- 13321L
LS_IINFO_STOC_NUM_CONT_DETEQC <- 13322L
LS_IINFO_STOC_NUM_QC_NONZ_DETEQC <- 13323L
LS_IINFO_STOC_NUM_NLP_NONZ_DETEQC <- 13324L
LS_IINFO_STOC_NUM_NLPOBJ_NONZ_DETEQC <- 13325L
LS_IINFO_STOC_NUM_NLP_CONS_DETEQC <- 13326L
LS_IINFO_STOC_NUM_NLP_VARS_DETEQC <- 13327L
LS_IINFO_STOC_NUM_NONZ_OBJ_DETEQC <- 13328L
LS_IINFO_STOC_NUM_NONZ_OBJ_DETEQE <- 13329L
LS_DINFO_STOC_CC_PLEVEL <- 13340L
LS_IINFO_STOC_THREADS <- 13341L
LS_DINFO_STOC_THRIMBL <- 13342L
LS_IINFO_STOC_NUM_EQROWS <- 13343L
LS_SINFO_STOC_THREAD_LOAD <- 13344L
LS_IINFO_STOC_NUM_BUCKETS <- 13345L
##BNP information
LS_IINFO_BNP_SIM_ITER <- 14000L
LS_IINFO_BNP_LPCOUNT <- 14001L
LS_IINFO_BNP_NUMCOL <- 14002L
LS_DINFO_BNP_BESTBOUND <- 14003L
LS_DINFO_BNP_BESTOBJ <- 14004L
#-----------------Error codes (2001-2299)---------------#
LSERR_NO_ERROR <- 0000L
LSERR_OUT_OF_MEMORY <- 2001L
LSERR_CANNOT_OPEN_FILE <- 2002L
LSERR_BAD_MPS_FILE <- 2003L
LSERR_BAD_CONSTRAINT_TYPE <- 2004L
LSERR_BAD_MODEL <- 2005L
LSERR_BAD_SOLVER_TYPE <- 2006L
LSERR_BAD_OBJECTIVE_SENSE <- 2007L
LSERR_BAD_MPI_FILE <- 2008L
LSERR_INFO_NOT_AVAILABLE <- 2009L
LSERR_ILLEGAL_NULL_POINTER <- 2010L
LSERR_UNABLE_TO_SET_PARAM <- 2011L
LSERR_INDEX_OUT_OF_RANGE <- 2012L
LSERR_ERRMSG_FILE_NOT_FOUND <- 2013L
LSERR_VARIABLE_NOT_FOUND <- 2014L
LSERR_INTERNAL_ERROR <- 2015L
LSERR_ITER_LIMIT <- 2016L
LSERR_TIME_LIMIT <- 2017L
LSERR_NOT_CONVEX <- 2018L
LSERR_NUMERIC_INSTABILITY <- 2019L
LSERR_STEP_TOO_SMALL <- 2021L
LSERR_USER_INTERRUPT <- 2023L
LSERR_PARAMETER_OUT_OF_RANGE <- 2024L
LSERR_ERROR_IN_INPUT <- 2025L
LSERR_TOO_SMALL_LICENSE <- 2026L
LSERR_NO_VALID_LICENSE <- 2027L
LSERR_NO_METHOD_LICENSE <- 2028L
LSERR_NOT_SUPPORTED <- 2029L
LSERR_MODEL_ALREADY_LOADED <- 2030L
LSERR_MODEL_NOT_LOADED <- 2031L
LSERR_INDEX_DUPLICATE <- 2032L
LSERR_INSTRUCT_NOT_LOADED <- 2033L
LSERR_OLD_LICENSE <- 2034L
LSERR_NO_LICENSE_FILE <- 2035L
LSERR_BAD_LICENSE_FILE <- 2036L
LSERR_MIP_BRANCH_LIMIT <- 2037L
LSERR_GOP_FUNC_NOT_SUPPORTED <- 2038L
LSERR_GOP_BRANCH_LIMIT <- 2039L
LSERR_BAD_DECOMPOSITION_TYPE <- 2040L
LSERR_BAD_VARIABLE_TYPE <- 2041L
LSERR_BASIS_BOUND_MISMATCH <- 2042L
LSERR_BASIS_COL_STATUS <- 2043L
LSERR_BASIS_INVALID <- 2044L
LSERR_BASIS_ROW_STATUS <- 2045L
LSERR_BLOCK_OF_BLOCK <- 2046L
LSERR_BOUND_OUT_OF_RANGE <- 2047L
LSERR_COL_BEGIN_INDEX <- 2048L
LSERR_COL_INDEX_OUT_OF_RANGE <- 2049L
LSERR_COL_NONZCOUNT <- 2050L
LSERR_INVALID_ERRORCODE <- 2051L
LSERR_ROW_INDEX_OUT_OF_RANGE <- 2052L
LSERR_TOTAL_NONZCOUNT <- 2053L
LSERR_MODEL_NOT_LINEAR <- 2054L
LSERR_CHECKSUM <- 2055L
LSERR_USER_FUNCTION_NOT_FOUND <- 2056L
LSERR_TRUNCATED_NAME_DATA <- 2057L
LSERR_ILLEGAL_STRING_OPERATION <- 2058L
LSERR_STRING_ALREADY_LOADED <- 2059L
LSERR_STRING_NOT_LOADED <- 2060L
LSERR_STRING_LENGTH_LIMIT <- 2061L
LSERR_DATA_TERM_EXIST <- 2062L
LSERR_NOT_SORTED_ORDER <- 2063L
LSERR_INST_MISS_ELEMENTS <- 2064L
LSERR_INST_TOO_SHORT <- 2065L
LSERR_INST_INVALID_BOUND <- 2066L
LSERR_INST_SYNTAX_ERROR <- 2067L
LSERR_COL_TOKEN_NOT_FOUND <- 2068L
LSERR_ROW_TOKEN_NOT_FOUND <- 2069L
LSERR_NAME_TOKEN_NOT_FOUND <- 2070L
LSERR_NOT_LSQ_MODEL <- 2071L
LSERR_INCOMPATBLE_DECOMPOSITION <- 2072L
LSERR_NO_MULTITHREAD_SUPPORT <- 2073L
LSERR_INVALID_PARAMID <- 2074L
LSERR_INVALID_NTHREADS <- 2075L
LSERR_COL_LIMIT <- 2076L
LSERR_QCDATA_NOT_LOADED <- 2077L
LSERR_NO_QCDATA_IN_ROW <- 2078L
LSERR_BAD_SMPS_CORE_FILE <- 2301L
LSERR_BAD_SMPS_TIME_FILE <- 2302L
LSERR_BAD_SMPS_STOC_FILE <- 2303L
LSERR_BAD_SMPI_CORE_FILE <- 2304L
LSERR_BAD_SMPI_STOC_FILE <- 2305L
LSERR_CANNOT_OPEN_CORE_FILE <- 2306L
LSERR_CANNOT_OPEN_TIME_FILE <- 2307L
LSERR_CANNOT_OPEN_STOC_FILE <- 2308L
LSERR_STOC_MODEL_NOT_LOADED <- 2309L
LSERR_STOC_SPAR_NOT_FOUND <- 2310L
LSERR_TIME_SPAR_NOT_FOUND <- 2311L
LSERR_SCEN_INDEX_OUT_OF_SEQUENCE <- 2312L
LSERR_STOC_MODEL_ALREADY_PARSED <- 2313L
LSERR_STOC_INVALID_SCENARIO_CDF <- 2314L
LSERR_CORE_SPAR_NOT_FOUND <- 2315L
LSERR_CORE_SPAR_COUNT_MISMATCH <- 2316L
LSERR_CORE_INVALID_SPAR_INDEX <- 2317L
LSERR_TIME_SPAR_NOT_EXPECTED <- 2318L
LSERR_TIME_SPAR_COUNT_MISMATCH <- 2319L
LSERR_CORE_SPAR_VALUE_NOT_FOUND <- 2320L
LSERR_INFO_UNAVAILABLE <- 2321L
LSERR_STOC_MISSING_BNDNAME <- 2322L
LSERR_STOC_MISSING_OBJNAME <- 2323L
LSERR_STOC_MISSING_RHSNAME <- 2324L
LSERR_STOC_MISSING_RNGNAME <- 2325L
LSERR_MISSING_TOKEN_NAME <- 2326L
LSERR_MISSING_TOKEN_ROOT <- 2327L
LSERR_STOC_NODE_UNBOUNDED <- 2328L
LSERR_STOC_NODE_INFEASIBLE <- 2329L
LSERR_STOC_TOO_MANY_SCENARIOS <- 2330L
LSERR_STOC_BAD_PRECISION <- 2331L
LSERR_CORE_BAD_AGGREGATION <- 2332L
LSERR_STOC_NULL_EVENT_TREE <- 2333L
LSERR_CORE_BAD_STAGE_INDEX <- 2334L
LSERR_STOC_BAD_ALGORITHM <- 2335L
LSERR_CORE_BAD_NUMSTAGES <- 2336L
LSERR_TIME_BAD_TEMPORAL_ORDER <- 2337L
LSERR_TIME_BAD_NUMSTAGES <- 2338L
LSERR_CORE_TIME_MISMATCH <- 2339L
LSERR_STOC_INVALID_CDF <- 2340L
LSERR_BAD_DISTRIBUTION_TYPE <- 2341L
LSERR_DIST_SCALE_OUT_OF_RANGE <- 2342L
LSERR_DIST_SHAPE_OUT_OF_RANGE <- 2343L
LSERR_DIST_INVALID_PROBABILITY <- 2344L
LSERR_DIST_NO_DERIVATIVE <- 2345L
LSERR_DIST_INVALID_SD <- 2346L
LSERR_DIST_INVALID_X <- 2347L
LSERR_DIST_INVALID_PARAMS <- 2348L
LSERR_DIST_ROOTER_ITERLIM <- 2349L
LSERR_ARRAY_OUT_OF_BOUNDS <- 2350L
LSERR_DIST_NO_PDF_LIMIT <- 2351L
LSERR_RG_NOT_SET <- 2352L
LSERR_DIST_TRUNCATED <- 2353L
LSERR_STOC_MISSING_PARAM_TOKEN <- 2354L
LSERR_DIST_INVALID_NUMPARAM <- 2355L
LSERR_CORE_NOT_IN_TEMPORAL_ORDER <- 2357L
LSERR_STOC_INVALID_SAMPLE_SIZE <- 2358L
LSERR_STOC_NOT_DISCRETE <- 2359L
LSERR_STOC_SCENARIO_LIMIT <- 2360L
LSERR_DIST_BAD_CORRELATION_TYPE <- 2361L
LSERR_TIME_NUMSTAGES_NOT_SET <- 2362L
LSERR_STOC_SAMPLE_ALREADY_LOADED <- 2363L
LSERR_STOC_EVENTS_NOT_LOADED <- 2364L
LSERR_STOC_TREE_ALREADY_INIT <- 2365L
LSERR_RG_SEED_NOT_SET <- 2366L
LSERR_STOC_OUT_OF_SAMPLE_POINTS <- 2367L
LSERR_STOC_SCENARIO_SAMPLING_NOT_SUPPORTED <- 2368L
LSERR_STOC_SAMPLE_NOT_GENERATED <- 2369L
LSERR_STOC_SAMPLE_ALREADY_GENERATED <- 2370L
LSERR_STOC_SAMPLE_SIZE_TOO_SMALL <- 2371L
LSERR_RG_ALREADY_SET <- 2372L
LSERR_STOC_BLOCK_SAMPLING_NOT_SUPPORTED <- 2373L
LSERR_EMPTY_SPAR_STAGE <- 2374L
LSERR_EMPTY_ROW_STAGE <- 2375L
LSERR_EMPTY_COL_STAGE <- 2376L
LSERR_STOC_CONFLICTING_SAMP_SIZES <- 2377L
LSERR_STOC_EMPTY_SCENARIO_DATA <- 2378L
LSERR_STOC_CORRELATION_NOT_INDUCED <- 2379L
LSERR_STOC_PDF_TABLE_NOT_LOADED <- 2380L
LSERR_STOC_NO_CONTINUOUS_SPAR_FOUND <- 2381L
LSERR_STOC_ROW_ALREADY_IN_CC <- 2382L
LSERR_STOC_CC_NOT_LOADED <- 2383L
LSERR_STOC_CUT_LIMIT <- 2384L
LSERR_STOC_GA_NOT_INIT <- 2385L
LSERR_STOC_ROWS_NOT_LOADED_IN_CC <- 2386L
LSERR_SAMP_ALREADY_SOURCE <- 2387L
LSERR_SAMP_USERFUNC_NOT_SET <- 2388L
LSERR_SAMP_INVALID_CALL <- 2389L
LSERR_STOC_MAP_MULTI_SPAR <- 2390L
LSERR_STOC_MAP_SAME_SPAR <- 2391L
LSERR_STOC_SPAR_NOT_EXPECTED_OBJ <- 2392L
LSERR_DIST_PARAM_NOT_SET <- 2393L
LSERR_SPRINT_MISSING_TAG_ROWS <- 2577L
LSERR_SPRINT_MISSING_TAG_COLS <- 2578L
LSERR_SPRINT_MISSING_TAG_RHS <- 2579L
LSERR_SPRINT_MISSING_TAG_ENDATA <- 2580L
LSERR_SPRINT_MISSING_VALUE_ROW <- 2581L
LSERR_SPRINT_EXTRA_VALUE_ROW <- 2582L
LSERR_SPRINT_MISSING_VALUE_COL <- 2583L
LSERR_SPRINT_EXTRA_VALUE_COL <- 2584L
LSERR_SPRINT_MISSING_VALUE_RHS <- 2585L
LSERR_SPRINT_EXTRA_VALUE_RHS <- 2586L
LSERR_SPRINT_MISSING_VALUE_BOUND <- 2587L
LSERR_SPRINT_EXTRA_VALUE_BOUND <- 2588L
LSERR_SPRINT_INTEGER_VARS_IN_MPS <- 2589L
LSERR_SPRINT_BINARY_VARS_IN_MPS <- 2590L
LSERR_SPRINT_SEMI_CONT_VARS_IN_MPS <- 2591L
LSERR_SPRINT_UNKNOWN_TAG_BOUNDS <- 2592L
LSERR_SPRINT_MULTIPLE_OBJ_ROWS <- 2593L
LSERR_SPRINT_COULD_NOT_SOLVE_SUBPROBLEM <- 2594L
LSERR_COULD_NOT_WRITE_TO_FILE <- 2595L
LSERR_COULD_NOT_READ_FROM_FILE <- 2596L
LSERR_READING_PAST_EOF <- 2597L
LSERR_LAST_ERROR <- 2598L
#-------------------Optimization Method------------------#
LS_METHOD_FREE <- 0L
LS_METHOD_PSIMPLEX <- 1L
LS_METHOD_DSIMPLEX <- 2L
LS_METHOD_BARRIER <- 3L
LS_METHOD_NLP <- 4L
LS_METHOD_MIP <- 5L
LS_METHOD_MULTIS <- 6L
LS_METHOD_GOP <- 7L
LS_METHOD_IIS <- 8L
LS_METHOD_IUS <- 9L
LS_METHOD_SBD <- 10L
LS_METHOD_GA <- 12L
#------------------Concurrent Strategy-------------------#
LS_STRATEGY_USER <- 0L
LS_STRATEGY_PRIMIP <- 1L
LS_STRATEGY_NODEMIP <- 2L
LS_STRATEGY_HEUMIP <- 3L
#---------------------NLP Methods------------------------#
LS_NMETHOD_FREE <- 4L
LS_NMETHOD_LSQ <- 5L
LS_NMETHOD_QP <- 6L
LS_NMETHOD_CONOPT <- 7L
LS_NMETHOD_SLP <- 8L
LS_NMETHOD_MSW_GRG <- 9L
#---------------------Solver Options---------------------#
LS_PROB_SOLVE_FREE <- 0L
LS_PROB_SOLVE_PRIMAL <- 1L
LS_PROB_SOLVE_DUAL <- 2L
LS_BAR_METHOD_FREE <- 4L
LS_BAR_METHOD_INTPNT <- 5L
LS_BAR_METHOD_CONIC <- 6L
LS_BAR_METHOD_QCONE <- 7L
LSSOL_BASIC_PRIMAL <- 11L
LSSOL_BASIC_DUAL <- 12L
LSSOL_BASIC_SLACK <- 13L
LSSOL_BASIC_REDCOST <- 14L
LSSOL_INTERIOR_PRIMAL <- 15L
LSSOL_INTERIOR_DUAL <- 16L
LSSOL_INTERIOR_SLACK <- 17L
LSSOL_INTERIOR_REDCOST <- 18L
#-----------------------Model Types------------------------#
LS_LP <- 10L
LS_QP <- 11L
LS_SOCP <- 12L
LS_SDP <- 13L
LS_NLP <- 14L
LS_MILP <- 15L
LS_MIQP <- 16L
LS_MISOCP <- 17L
LS_MISDP <- 18L
LS_MINLP <- 19L
LS_CONVEX_QP <- 20L
LS_CONVEX_NLP <- 21L
LS_CONVEX_MIQP <- 22L
LS_CONVEX_MINLP <- 23L
LS_UNDETERMINED <- -1L
#----------------------Decomposition Options---------------#
LS_LINK_BLOCKS_FREE <- 0L
LS_LINK_BLOCKS_SELF <- 1L
LS_LINK_BLOCKS_NONE <- 2L
LS_LINK_BLOCKS_COLS <- 3L
LS_LINK_BLOCKS_ROWS <- 4L
LS_LINK_BLOCKS_BOTH <- 5L
LS_LINK_BLOCKS_MATRIX <- 6L
#-------------------------Write Options--------------------#
LS_MPS_USE_MAX_NOTE <- 0L
LS_MPS_USE_MAX_CARD <- 1L
LS_MPS_USE_MAX_FLIP <- 2L
#-------------------------Derive Methods-------------------#
LS_DERIV_FREE <- 0L
LS_DERIV_FORWARD_DIFFERENCE <- 1L
LS_DERIV_BACKWARD_DIFFERENCE <- 2L
LS_DERIV_CENTER_DIFFERENCE <- 3L
#--------------------------Set Types-----------------------#
LS_MIP_SET_CARD <- 4L
LS_MIP_SET_SOS1 <- 1L
LS_MIP_SET_SOS2 <- 2L
LS_MIP_SET_SOS3 <- 3L
#------------------------QTerm Types-----------------------#
LS_QTERM_NONE <- 0L
LS_QTERM_INDEF <- 1L
LS_QTERM_POSDEF <- 2L
LS_QTERM_NEGDEF <- 3L
LS_QTERM_POS_SEMIDEF <- 4L
LS_QTERM_NEG_SEMIDEF <- 5L
#--------------------------MIP Mode------------------------#
LS_MIP_MODE_NO_TIME_EVENTS <- 2L
LS_MIP_MODE_FAST_FEASIBILITY <- 4L
LS_MIP_MODE_FAST_OPTIMALITY <- 8L
LS_MIP_MODE_NO_BRANCH_CUTS <- 16L
#--------------------------MIP Cut Level-------------------#
LS_MIP_GUB_COVER_CUTS <- 2L
LS_MIP_FLOW_COVER_CUTS <- 4L
LS_MIP_LIFT_CUTS <- 8L
LS_MIP_PLAN_LOC_CUTS <- 16L
LS_MIP_DISAGG_CUTS <- 32L
LS_MIP_KNAPSUR_COVER_CUTS <- 64L
LS_MIP_LATTICE_CUTS <- 128L
LS_MIP_GOMORY_CUTS <- 256L
LS_MIP_COEF_REDC_CUTS <- 512L
LS_MIP_GCD_CUTS <- 1024L
LS_MIP_OBJ_CUT <- 2048L
LS_MIP_BASIS_CUTS <- 4096L
LS_MIP_CARDGUB_CUTS <- 8192L
LS_MIP_DISJUN_CUTS <- 16384L
#--------------------------MIP Pre Level-------------------#
LS_MIP_PREP_SPRE <- 2L
LS_MIP_PREP_PROB <- 4L
LS_MIP_PREP_COEF <- 8L
LS_MIP_PREP_ELIM <- 16L
LS_MIP_PREP_DUAL <- 32L
LS_MIP_PREP_DBACK <- 64L
LS_MIP_PREP_BINROWS <- 128L
LS_MIP_PREP_AGGROWS <- 256L
LS_MIP_PREP_COEF_LIFTING <- 512L
LS_MIP_PREP_MAXPASS <- 1024L
#------------------------Solver Pre Level-------------------#
LS_SOLVER_PREP_SPRE <- 2L
LS_SOLVER_PREP_PFOR <- 4L
LS_SOLVER_PREP_DFOR <- 8L
LS_SOLVER_PREP_ELIM <- 16L
LS_SOLVER_PREP_DCOL <- 32L
LS_SOLVER_PREP_DROW <- 64L
LS_SOLVER_PREP_MAXPASS <- 1024L
#-------------------IIS & IUS analysis levels---------------#
LS_NECESSARY_ROWS <- 1L
LS_NECESSARY_COLS <- 2L
LS_SUFFICIENT_ROWS <- 4L
LS_SUFFICIENT_COLS <- 8L
LS_IIS_INTS <- 16L
LS_IISRANK_LTF <- 32L
LS_IISRANK_DECOMP <- 64L
LS_IISRANK_NNZ <- 128L
LS_IISLIMIT_MIS <- 256L
#-------------Infeasibility norms for IIS finder------------#
LS_IIS_NORM_FREE <- 0L
LS_IIS_NORM_ONE <- 1L
LS_IIS_NORM_INFINITY <- 2L
#-------------------------IIS Methods-----------------------#
LS_IIS_DEFAULT <- 0L
LS_IIS_DEL_FILTER <- 1L
LS_IIS_ADD_FILTER <- 2L
LS_IIS_GBS_FILTER <- 3L
LS_IIS_DFBS_FILTER <- 4L
LS_IIS_FSC_FILTER <- 5L
LS_IIS_ELS_FILTER <- 6L
#-------------codes for IINFO_MIP_WHERE_IN_CODE-------------#
LS_MIP_IN_PRESOLVE <- 0L
LS_MIP_IN_FP_MODE <- 1L
LS_MIP_IN_HEU_MODE <- 2L
LS_MIP_IN_ENUM <- 3L
LS_MIP_IN_CUT_ADD_TOP <- 4L
LS_MIP_IN_CUT_ADD_TREE<- 5L
LS_MIP_IN_BANDB <- 6L
#----------------------StocOptDataTypes---------------------#
LS_JCOL_INST <--8L
LS_JCOL_RUB <--7L
LS_JCOL_RLB <--6L
LS_JCOL_RHS <--5L
LS_IROW_OBJ <--4L
LS_IROW_VUB <--3L
LS_IROW_VLB <--2L
LS_IROW_VFX <--1L
LS_IMAT_AIJ <- 0L
#----------------------StocOptDistribFun--------------------#
LSDIST_TYPE_BINOMIAL <- 701L
LSDIST_TYPE_DISCRETE <- 702L
LSDIST_TYPE_DISCRETE_BLOCK <- 703L
LSDIST_TYPE_NEGATIVE_BINOMIAL<- 704L
LSDIST_TYPE_GEOMETRIC <- 705L
LSDIST_TYPE_POISSON <- 706L
LSDIST_TYPE_LOGARITHMIC <- 707L
LSDIST_TYPE_HYPER_GEOMETRIC <- 708L
LSDIST_TYPE_LINTRAN_BLOCK <- 709L
LSDIST_TYPE_SUB_BLOCK <- 710L
LSDIST_TYPE_SUB <- 711L
LSDIST_TYPE_USER <- 712L
LSDIST_TYPE_BETA <- 801L
LSDIST_TYPE_CAUCHY <- 802L
LSDIST_TYPE_CHI_SQUARE <- 803L
LSDIST_TYPE_EXPONENTIAL <- 804L
LSDIST_TYPE_F_DISTRIBUTION <- 805L
LSDIST_TYPE_GAMMA <- 806L
LSDIST_TYPE_GUMBEL <- 807L
LSDIST_TYPE_LAPLACE <- 808L
LSDIST_TYPE_LOGNORMAL <- 809L
LSDIST_TYPE_LOGISTIC <- 810L
LSDIST_TYPE_NORMAL <- 811L
LSDIST_TYPE_PARETO <- 812L
LSDIST_TYPE_STABLE_PARETIAN <- 813L
LSDIST_TYPE_STUDENTS_T <- 814L
LSDIST_TYPE_TRIANGULAR <- 815L
LSDIST_TYPE_UNIFORM <- 816L
LSDIST_TYPE_WEIBULL <- 817L
LSDIST_TYPE_WILCOXON <- 818L
LSDIST_TYPE_BETABINOMIAL <- 819L
LSDIST_TYPE_SYMMETRICSTABLE <- 820L
#-----------supported operations modifying the core---------#
LS_REPLACE <- 0L
LS_ADD <- 1L
LS_SUB <- 2L
LS_MULTIPLY <- 3L
LS_DIVIDE <- 4L
#-------------scenario indices for special cases------------#
LS_SCEN_ROOT <- -1L
LS_SCEN_AVRG <- -2L
LS_SCEN_MEDIAN <- -3L
LS_SCEN_USER <- -4L
LS_SCEN_NONE <- -5L
#---------warmstart rule in optimizing wait-see model-------#
LS_WSBAS_FREE <- -1L
LS_WSBAS_NONE <- 0L
LS_WSBAS_AVRG <- 1L
LS_WSBAS_LAST <- 2L
#------------------------StocOptSolver----------------------#
LS_METHOD_STOC_FREE <- -1L
LS_METHOD_STOC_DETEQ <- 0L
LS_METHOD_STOC_NBD <- 1L
LS_METHOD_STOC_ALD <- 2L
LS_METHOD_STOC_HS <- 4L
#----------------------StocOptDeteqType---------------------#
LS_DETEQ_FREE <- -1L
LS_DETEQ_IMPLICIT <- 0L
LS_DETEQ_EXPLICIT <- 1L
LS_DETEQ_CHANCE <- 2L
#------------------------DistribOptFun----------------------#
LS_USER <- 0L
LS_PDF <- 1L
LS_CDF <- 2L
LS_CDFINV <- 3L
LS_PDFDIFF <- 4L
#------------------------SampleOptCorr----------------------#
LS_CORR_TARGET <- -1L
LS_CORR_LINEAR <- 0L
LS_CORR_PEARSON <- 0L
LS_CORR_KENDALL <- 1L
LS_CORR_SPEARMAN <- 2L
#------------------------SampleOptType----------------------#
LS_MONTECARLO <- 0L
LS_LATINSQUARE <- 1L
LS_ANTITHETIC <- 2L
#------------------------RandOptMethod----------------------#
LS_RANDGEN_FREE <- -1L
LS_RANDGEN_SYSTEM <- 0L
LS_RANDGEN_LINDO1 <- 1L
LS_RANDGEN_LINDO2 <- 2L
LS_RANDGEN_LIN1 <- 3L
LS_RANDGEN_MULT1 <- 4L
LS_RANDGEN_MULT2 <- 5L
LS_RANDGEN_MERSENNE <- 6L
#------------------------SampOptNCMAlg----------------------#
LS_NCM_STD <- 1L
LS_NCM_GA <- 2L
LS_NCM_ALTP <- 4L
LS_NCM_L2NORM_CONE <- 8L
LS_NCM_L2NORM_NLP <- 16L
#--------------------------PtrTypes-------------------------#
LS_PTR_ENV <- 0L
LS_PTR_MODEL <- 1L
LS_PTR_SAMPLE <- 2L
LS_PTR_RG <- 3L
#---------------------------MtMode--------------------------#
LS_MTMODE_FREE <- -1L
LS_MTMODE_EXPLCT <- 0L
LS_MTMODE_PPCC <- 1L
LS_MTMODE_PP <- 2L
LS_MTMODE_CCPP <- 3L
LS_MTMODE_CC <- 4L
#---------------------FileFormatSprint---------------------#
LS_SPRINT_OUTPUT_FILE_FREE <- 0L
LS_SPRINT_OUTPUT_FILE_BIN <- 1L
LS_SPRINT_OUTPUT_FILE_TXT <- 2L
#-----------------------MSW_PREPMODE-----------------------#
LS_MSW_MODE_TRUNCATE_FREE <- 1L
LS_MSW_MODE_SCALE_REFSET <- 2L
LS_MSW_MODE_EXPAND_RADIUS <- 4L
LS_MSW_MODE_SKEWED_SAMPLE <- 8L
LS_MSW_MODE_BEST_LOCAL_BND <- 16L
LS_MSW_MODE_BEST_GLOBAL_BND <- 32L
LS_MSW_MODE_SAMPLE_FREEVARS <- 64L
LS_MSW_MODE_PRECOLLECT <- 128L
LS_MSW_MODE_POWER_SOLVE <- 256L
#-----------------------GA_CROSSOVER-----------------------#
LS_GA_CROSS_SBX <- 101L
LS_GA_CROSS_BLXA <- 102L
LS_GA_CROSS_BLXAB <- 103L
LS_GA_CROSS_HEU <- 104L
LS_GA_CROSS_ONEPOINT <- 201L
LS_GA_CROSS_TWOPOINT <- 202L
|
9d78d26489d6b288a6be77d5c445e47cf6086d34
|
f416f02e2e6eb2ab304966a1feabda65295228b2
|
/R/attack_model_whale.R
|
426b543e65853c8d1b6ea63dfd9514510fd5b038
|
[] |
no_license
|
nicholascarey/attackR
|
5150a55ef9c7176e08178ae8b799ab959b3d770d
|
287544fe96ef9eb58c33e3de1ed1755da97975ab
|
refs/heads/master
| 2020-07-26T20:30:10.820508
| 2020-07-16T15:43:04
| 2020-07-16T15:43:04
| 208,758,145
| 0
| 0
| null | 2020-07-16T09:35:28
| 2019-09-16T09:14:23
|
R
|
UTF-8
|
R
| false
| false
| 33,500
|
r
|
attack_model_whale.R
|
#'@title Attack Model
#'
#'@description *\code{attack_model_whale}* models the visual aspects of an
#' attack by a whale on a prey
#'
#' This function is a customised version of *\code{\link{attack_model}}* which
#' incorporates the unique changes to a rorqual whale's visual profile caused
#' by it opening its huge mouth when attacking a school of prey.
#'
#' It contains several additional inputs relating to the morphology of the
#' mouth and the timings of its opening, which greatly change the whale's
#' visual profile.
#'
#' This help document only contains help on use of the inputs specific to this
#' function. See *\code{\link{attack_model}}* for description of the others.
#'
#'@details These inputs are used to calculate the apparent width to the prey of
#' a whale's opening jaws, and this is subsequently used to calculate the
#' maximum **{α}**.
#'
#'@section *\code{jaw_length}*: The distance of the whale's jaw 'hinge' (i.e.
#' where upper and lower jaws meet) from the rostrum, in the same units as the
#' *\code{body_length}*. Can be an exact value or an allometric formula based
#' on length. For example:\cr
#'
#' \code{## Humpback Whale jaw location in cm (source:)}\cr \code{jaw_length =
#' (10^(1.205*log10(hw_bl/100) - 0.880))*100}
#'
#' \code{## Blue Whale jaw location in cm (source:)}\cr \code{jaw_length =
#' 10^(1.36624*log10(bw_bl/100) - 1.21286)*100}
#'
#' Note the body length values (*\code{hw_bl}*, *\code{bw_bl}*) must exist
#' externally; they cannot reference the entered *\code{body_length}* value
#' internal to the function, unless this also references the same existing
#' value.
#'
#'@section *\code{jaw_angle_upper}*: This is the angle in radians off the
#' longitudnal axis of the whale of the upper jaw at maximum gape. In both
#' humpbacks and blue whales this is 0.5235988 (30°).
#'
#'@section *\code{jaw_angle_lower}*: This is the angle in radians off the
#' longitudnal axis of the whale of the lower jaw at maximum gape. In both
#' humpbacks and blue whales this is 0.8726646 (50°).
#'
#'@section *\code{a_, b_ c_, d_} inputs*: *\code{a_mouth_open}* - when the mouth
#' starts to open \cr *\code{b_max_gape_start}* - when maximum gape is reached
#' \cr *\code{c_max_gape_end}* - when mouth starts to close, or how long it is
#' held at max gape \cr *\code{d_mouth_closed}* - when mouth is completely
#' closed \cr \cr
#'
#' These inputs set the timings (i.e. iteration, row or frame) of these events
#' within the model. If *\code{speed}* is a vector, they set the locations
#' along the speed vector these events occur. Similarly if *\code{speed}* is a
#' single value, they set similarly the timings within the model, but obviously
#' this is related to *\code{model_length}*.
#'
#' The complete mouth opening action does not have to occur during the model.
#' The inputs can be used to set, for example timing of max gape to be at the
#' last value in the speed vector. Also, if these are left *\code{NULL}*, the
#' mouth will not open, and the model is equivalent to one created using
#' *\code{\link{attack_model}}*.
#'
#'@section Application of the mouth opening and morphology inputs: The function
#' programatically determines the location of the jaw tips at each iteration of
#' the model during the mouth opening event, and their distance from the prey,
#' calculates their visual angle **{α}**, and combines these to give a total
#' jaw **{α}**. This is then compared to the **{α}** of the rest of the body to
#' determine the maximum **{α}**. These calculations are done in the vertical
#' plane only, and occur separately from any **{α}** calculations done using
#' the body profiles; if the total jaw **{α}** is greater than the **{α}**
#' determined from the body widths, it will always be selected as the maximum
#' **{α}** regardless of any filtering between vertical and horizontal planes
#' using *\code{width_filter}*.
#'
#'@usage attack_model_whale(speed, model_length = NULL, frequency = 60,
#' body_length = NULL, body_width_v = NULL, body_width_h = NULL, profile_v =
#' NULL, profile_h = NULL, max_width_loc_v = NULL, max_width_loc_h = NULL,
#' width_filter = "mid", jaw_length = NULL, jaw_angle_upper = 0.5235988,
#' jaw_angle_lower = 0.8726646, a_mouth_open = NULL, b_max_gape_start = NULL,
#' c_max_gape_end = NULL, d_mouth_closed = NULL, simple_output = FALSE, plot =
#' TRUE, plot_from = 0, plot_to = NULL, alpha_range = NULL, dadt_range = NULL)
#'
#'@param speed numeric. Either a single constant speed value or vector of speeds
#' at the same frequency in Hz as *\code{frequency}*. Must be same unit as
#' *\code{body_length}* per second. If a data.frame is entered the first
#' colummn is used. For a constant speed value the function will repeat this
#' the required number of times at the correct frequency based on
#' *\code{model_length}*.
#'@param model_length integer. Total length of the model in rows. Required if
#' *\code{speed}* is a single value, in which case along with frequency it
#' determines the distance the predator starts at. If *\code{speed}* is a
#' vector *\code{model_length}* can be left NULL, in which case it is assumed
#' the predator reaches the prey on the last value, and the length of the speed
#' vector determines total length of model. Alternatively,
#' *\code{model_length}* can be used to set a different capture point along the
#' speed vector, in which case its value must be less than the total length of
#' *\code{speed}*.
#'@param frequency numeric. Frequency (Hz) of the model, i.e. how many speed and
#' other measurements per second. Must be same frequency in Hz as
#' *\code{speed}*.
#'@param body_length numeric. Length of the attacker. Must be same units as
#' *\code{body_width_v}* and *\code{body_width_h}*, and that used in
#' *\code{speed}*.
#'@param body_width_v numeric. Maximum width of the attacker in the vertical
#' plane.
#'@param body_width_h numeric. Maximum width of the attacker in the horizontal
#' plane.
#'@param profile_v numeric. A vector describing the shape of the attacker in the
#' vertical plane. See details.
#'@param profile_h numeric. A vector describing the shape of the attacker in the
#' horizontal plane. See details.
#'@param max_width_loc_v numeric. Location of the maximum girth in the vertical
#' plane of the predator along the body, if not provided as part of the body
#' profile inputs. See details.
#'@param max_width_loc_h numeric. Location of the maximum girth in the
#' horizontal plane of the predator along the body, if not provided as part of
#' the body profile inputs. See details.
#'@param width_filter string. Filters apparent widths between vertical and
#' horizontal planes for each row of the model in various ways. See details.
#'@param jaw_length numeric. distance of the whale's jaw 'hinge' (i.e. where
#' upper and lower jaws meet) from the rostrum, in the same units as the
#' body_length. See details.
#'@param jaw_angle_upper numeric. Angle in radians off the whale's longitudnal
#' axis of the upper jaw at maximum gape. See details.
#'@param jaw_angle_lower numeric. Angle in radians off the whale's longitudnal
#' axis of the lower jaw at maximum gape. See details.
#'@param a_mouth_open integer. Iteration of the model (i.e. row, or placement
#' along the speed profile) where the mouth starts to open. See details.
#'@param b_max_gape_start integer. Iteration of the model (i.e. row, or placement
#' along the speed profile) where the mouth has reached max gape. See details.
#'@param c_max_gape_end integer. Iteration of the model (i.e. row, or placement
#' along the speed profile) where the mouth starts to close See details.
#'@param d_mouth_closed integer. Iteration of the model (i.e. row, or placement
#' along the speed profile) where the mouth has fully closed. See details.
#'@param simple_output logical. Choose structure of output. If TRUE, a simple
#' data frame of the model is returned, otherwise output is a *\code{list}*
#' object given an *\code{attack_model_whale}* class, and containing the final
#' model, input parameters, subset regions, and more.
#'@param plot logical. Choose to plot result.
#'@param plot_from numeric. Time on x-axis to plot from.
#'@param plot_to numeric. Time on x-axis to plot to.
#'@param alpha_range numeric. Vector of two values of alpha. Optional. These
#' will appear on any plot as a blue region, and if *\code{simple_output =
#' FALSE}*, this region of the model is subset out to a separate entry in the
#' saved *\code{list}* object. If any are not reached in the scenario there
#' should be a message. If upper range is not reached, it is plotted from lower
#' value to end of model, i.e. *\code{model_length}* location.
#'@param dadt_range numeric. Vector of two values of alpha. Optional. These will
#' appear on any plot as a green region, and if *\code{simple_output = FALSE}*,
#' this region of the model is subset out to a separate entry in the saved
#' *\code{list}* object. If any are not reached in the scenario there should be
#' a message. If upper range is not reached, it is plotted from lower value to
#' end of model, i.e. *\code{model_length}* location.
#'
#'@author Nicholas Carey - \email{nicholascarey@gmail.com}, Dave Cade
#' \email{davecade@stanford.edu},
#'
#'@export
attack_model_whale <- function(
speed,
model_length = NULL,
frequency = 60,
body_length = NULL,
body_width_v = NULL,
body_width_h = NULL,
profile_v = NULL,
profile_h = NULL,
max_width_loc_v = NULL,
max_width_loc_h = NULL,
width_filter = "mid",
jaw_length = NULL,
jaw_angle_upper = 0.5235988,
jaw_angle_lower = 0.8726646,
a_mouth_open = NULL,
b_max_gape_start = NULL,
c_max_gape_end = NULL,
d_mouth_closed = NULL,
simple_output = FALSE,
plot = TRUE,
plot_from = 0,
plot_to = NULL,
alpha_range = NULL,
dadt_range = NULL){
# Error Checks and Messages -----------------------------------------------
## Checks here
## speed
## If speed single value, require model_length
if(length(speed) == 1 && is.null(model_length)) stop("For constant speed values a model_length is required")
## model_length
## Cannot be longer than speed
if(length(speed) > 1 && !is.null(model_length) && model_length > length(speed)) stop("model_length cannot be longer than the speed vector")
if(length(speed) > 1 && is.null(model_length)) message("model_length set to final value in speed vector")
## body_length
if(body_length < 100) message("body_length is numerically quite low. For best results in interpolation of widths etc., use a unit that has higher numeric value, \nideally 100 or greater (ish). E.g. if using metres, use centimentres instead. ")
## Profiles
## Values must be between 0 and 1
# if(any(profile_v > 1) || any(profile_h > 1)) stop("Body profiles must only contain values between 0 and 1.")
# if(any(profile_v < 0) || any(profile_h < 0)) stop("Body profiles must only contain values between 0 and 1.")
# ## Can't both be NULL
# if(is.null(profile_v) && is.null(profile_h)) stop("Provide at least one body profile.")
# ## Must be over 2 long (nose, mid, tail)
# if((!is.null(profile_v) && length(profile_v) < 3) || (!is.null(profile_h) && length(profile_h) < 3)) stop("Profiles must be at least 3 values long: e.g. nose, midpoint, tail.")
# ## If a profile is empty, message that associated inputs ignored
# if(is.null(profile_v)) message("No vertical body profile (profile_v) found. Any inputs for max_width_loc_v and body_width_v ignored.")
# if(is.null(profile_h)) message("No horizontal body profile (profile_h) found. Any inputs for max_width_loc_h and body_width_h ignored.")
# ## If a profile doesn't contain 1.0, then max_width_loc should be NULL
# if(any(profile_v == 1) && !is.null(max_width_loc_v)) stop("profile_v already contains a max girth location (value of 1.0). max_width_loc_v cannot also be specified.")
# if(any(profile_h == 1) && !is.null(max_width_loc_h)) stop("profile_h already contains a max girth location (value of 1.0). max_width_loc_h cannot also be specified.")
# ## And vice versa - if no 1.0 in profile, then mac_girth_loc required
# if(!any(profile_v == 1) && is.null(max_width_loc_v)) stop("No max girth location (value of 1.0) found in profile_v. Please specify one with max_width_loc_v.")
# if(!any(profile_h == 1) && is.null(max_width_loc_h)) stop("No max girth location (value of 1.0) found in profile_h. Please specify one with max_width_loc_h.")
## max_width_loc
## Must be between 0 and 1 (if entered)
if(!is.null(max_width_loc_v) && (max_width_loc_v >= 1 || max_width_loc_v <= 0)) stop("Max width locations must be between 0 and 1. They represent a proportional distance along the length from the nose.")
if(!is.null(max_width_loc_h) && (max_width_loc_h >= 1 || max_width_loc_h <= 0)) stop("Max width locations must be between 0 and 1. They represent a proportional distance along the length from the nose.")
## width_filter
if(!(width_filter %in% (c("mid", "max", "min", "v", "h", "max_width_v", "max_width_h")))) stop("width_filter input not recognised.")
## body_length
if(is.null(body_length)) stop("Please enter a body_length.")
## body_width
if(!is.null(profile_v) && is.null(body_width_v)) stop("Please enter a body_width_v.")
if(!is.null(profile_h) && is.null(body_width_h)) stop("Please enter a body_width_h.")
# Save inputs -------------------------------------------------------------
# ## put all inputs into a list for inclusion in final output
inputs <- list(
speed = speed,
model_length = model_length,
frequency = frequency,
body_length = body_length,
body_width_v = body_width_v,
body_width_h = body_width_h,
profile_v = profile_v,
profile_h = profile_h,
max_width_loc_v = max_width_loc_v,
max_width_loc_h = max_width_loc_h,
width_filter = width_filter,
jaw_length = jaw_length,
jaw_angle_upper = jaw_angle_upper,
jaw_angle_lower = jaw_angle_lower,
a_mouth_open = a_mouth_open,
b_max_gape_start = b_max_gape_start,
c_max_gape_end = c_max_gape_end,
d_mouth_closed = d_mouth_closed,
simple_output = simple_output,
plot = plot,
plot_from = plot_from,
plot_to = plot_to,
alpha_range = alpha_range,
dadt_range = dadt_range)
# Fix speed if dataframe ------------------------------------------------------
## If speed is a dataframe, make it a vector of FIRST column
if(is.data.frame(speed)){
speed <- speed[,1]}
# v and h profile copying -------------------------------------------------
# If one of the profiles is empty, just copy to the other. ----------------
## same with two other _v and _h settings
## Duplicates a lot of calcs, but avoids code breaking
if(is.null(profile_h)){
profile_h <- profile_v
body_width_h <- body_width_v
max_width_loc_h <- max_width_loc_v}
if(is.null(profile_v)){
profile_v <- profile_h
body_width_v <- body_width_h
max_width_loc_v <- max_width_loc_h}
# Set prey location along speed profile -----------------------------------
## Modify speed to end at model_length
## Save original and add time
## This is only for plotting later
if(length(speed) == 1){
speed_orig <- data.frame(time = seq(0, model_length/60-1/frequency, 1/frequency),
speed = rep(speed, model_length))
} else {
speed_orig <- data.frame(time = seq(0, length(speed)/60-1/frequency, 1/frequency),
speed = speed)
}
## Truncate (or replicate) speed to model_length if model_length not NULL
if(length(speed) == 1) speed <- rep(speed, model_length)
if(length(speed) > 1 && !is.null(model_length)) speed <- speed[1:model_length]
if(length(speed) > 1 && is.null(model_length)) model_length <- length(speed)
# Mouth opening parameters ------------------------------------------------
## Is mouth opening?
if(!is.null(a_mouth_open)) {mouth_opening <- TRUE
} else {mouth_opening <- FALSE}
## If a-d are NULL, just make jaw XZ all zeros
## This will keep mouth closed
if(!mouth_opening){
message("Whale model with mouth NOT opening... ")
## set jaw coords all to zero to length of model
up_X <- rep(0, length(speed))
up_Z <- up_X
low_X <- up_X
low_Z <- up_X
## Also make inputs zero - for plotting later
a_mouth_open <- 0
b_max_gape_start <- 0
c_max_gape_end <- 0
d_mouth_closed <- 0
} else {
message("Whale model with mouth opening... ")
## rename input variables
a <- a_mouth_open
b <- b_max_gape_start
c <- c_max_gape_end
d <- d_mouth_closed
## extra term - for if mouth closes BEFORE end of speed vector
e <- length(speed)
## Create mouth open XZ
# empty vector
up_X <- c()
# mouth closed - fill zeros to a
up_X[1:a] <- 0
# mouth opens - fill a to b
up_X[a:b] <- (jaw_length - cos(((a:b)-a)/(b-a)*jaw_angle_upper)*jaw_length) # % in cm
# mouth held at max gape - repeat last value to c
up_X[b:c] <- up_X[b]
# mouth closes - fill c to d
up_X[c:d] <- (jaw_length - cos(((d:c)-c)/(d-c)*jaw_angle_upper)*jaw_length) # % in cm
# if mouth closes before end of vector, fill in zeros
if(e > d){up_X[d:e] <- 0}
# truncate to same length as speed/model_length
up_X <- up_X[1:model_length]
## same for upper jaw Z
up_Z <- c()
up_Z[1:a] <- 0
up_Z[a:b] <- sin(((a:b)-a)/(b-a)*jaw_angle_upper)*jaw_length
up_Z[b:c] <- up_Z[b]
up_Z[c:d] <- sin(((d:c)-c)/(d-c)*jaw_angle_upper)*jaw_length
if(e > d){up_Z[d:e] <- 0}
up_Z <- up_Z[1:model_length]
## same for lower jaw X
low_X <- c()
low_X[1:a] <- 0
low_X[a:b] <- (jaw_length - cos(((a:b)-a)/(b-a)*jaw_angle_lower)*jaw_length)
low_X[b:c] <- low_X[b]
low_X[c:d] <- (jaw_length - cos(((d:c)-c)/(d-c)*jaw_angle_lower)*jaw_length)
if(e > d){low_X[d:e] <- 0}
low_X <- low_X[1:model_length]
## same for lower jaw Z
low_Z <- c()
low_Z[1:a] <- 0
low_Z[a:b] <- sin(((a:b)-a)/(b-a)*jaw_angle_lower)*jaw_length
low_Z[b:c] <- low_Z[b]
low_Z[c:d] <- sin(((d:c)-c)/(d-c)*jaw_angle_lower)*jaw_length
if(e > d){low_Z[d:e] <- 0}
low_Z <- low_Z[1:model_length]
}
# Calculate start distances -----------------------------------------------
## Calculate the start_distance using the speed vector
## Remove last value because it is a derivative.
## This is of the jaw
## Takes into account jaw moving backwards due to it opening at the end of the
## model. That is start_distance would otherwise be larger - further back.
start_distance <- sum((speed[-length(speed)]/frequency)) - low_X[model_length]
# Create model ------------------------------------------------------------
## Build up model as dataframe by column
## frame
model_data <- data.frame(frame = seq(1, length(speed), 1))
## speed profile
model_data$speed <- speed
## Time and time reversed (in seconds)
model_data$time <- seq(0, nrow(model_data)/60-1/frequency, 1/frequency)
model_data$time_rev <- rev(model_data$time)
# Distances ---------------------------------------------------------------
## Prey distance from nose tip
## This is assuming mouth stays closed. Therefore towards end of the model
## some of these will go past the prey and will need filtered out
model_data$distance_nose <-
c(start_distance,
start_distance-(cumsum(model_data$speed[-length(model_data$speed)]/frequency)))
## Prey distance from low jaw tip (assumed capture point)
## Adds low_X - distance the jaw has moved backwards due to opening
model_data$distance_low_jaw <-
c(start_distance,
start_distance-(cumsum(model_data$speed[-length(model_data$speed)]/frequency))) + low_X
model_data$distance_up_jaw <-
c(start_distance,
start_distance-(cumsum(model_data$speed[-length(model_data$speed)]/frequency))) + up_X
## Calc alpha of both jaws plus total jaw alpha
## Replace any values above pi/2 for half jaw
alpha_up_jaw <- atan2(up_Z, model_data$distance_up_jaw)
alpha_up_jaw <- replace(alpha_up_jaw, alpha_up_jaw > pi/2, pi/2)
alpha_low_jaw <- atan2(low_Z, model_data$distance_low_jaw)
alpha_low_jaw <- replace(alpha_low_jaw, alpha_low_jaw > pi/2, pi/2)
alpha_total_jaw <- alpha_up_jaw + alpha_low_jaw
# Widths ------------------------------------------------------------------
## This section takes the two body profiles, incorporates max_width_loc if
## it isn't in either, and interpolates linearly between each segment.
## Then it works out a final width - either mean/max/min
## Widths at resolution of body_length unit
widths_v <- interpolate_widths(profile_v, max_width_loc_v, body_length, body_width_v)
widths_h <- interpolate_widths(profile_h, max_width_loc_h, body_length, body_width_h)
widths_df <- data.frame(widths_v = widths_v,
widths_h = widths_h)
## Filter widths based on width_filter input
## And add distance from nose
if(width_filter == "mid") {
widths <- apply(widths_df, 1, function(x) mean(x))
widths <- data.frame(dist_from_nose = (1:length(widths))-1,
width = widths)
## if mouth is opening, only use widths up to jaw
if(mouth_opening) widths[1:round(jaw_length),2] <- NA}
if(width_filter == "max") {
widths <- apply(widths_df, 1, function(x) max(x))
widths <- data.frame(dist_from_nose = (1:length(widths))-1,
width = widths)
if(mouth_opening) widths[1:round(jaw_length),2] <- NA}
if(width_filter == "min") {
widths <- apply(widths_df, 1, function(x) min(x))
widths <- data.frame(dist_from_nose = (1:length(widths))-1,
width = widths)
if(mouth_opening) widths[1:round(jaw_length),2] <- NA}
if(width_filter == "v") {
widths <- widths_df[[1]]
widths <- data.frame(dist_from_nose = (1:length(widths))-1,
width = widths)
if(mouth_opening) widths[1:round(jaw_length),2] <- NA}
if(width_filter == "h") {
widths <- widths_df[[2]]
widths <- data.frame(dist_from_nose = (1:length(widths))-1,
width = widths)
if(mouth_opening) widths[1:round(jaw_length),2] <- NA}
if(width_filter == "max_width_v") {
## location of max_width - first one in case of multiple matches
max_width_index_v <- which.max(widths_df[[1]])
widths <- max(widths_df[[1]])
widths <- data.frame(dist_from_nose = max_width_index_v-1,
width = widths)}
if(width_filter == "max_width_h") {
## location of max_width
max_width_index_h <- which.max(widths_df[[2]])
widths <- max(widths_df$widths_h)
widths <- data.frame(dist_from_nose = max_width_index_h-1,
width = widths)}
## For every row of model add distance_nose to segment distances
## This gives distance of every segment from prey at every iteration of model
distances_all <- lapply(model_data$distance_nose, function(x) widths$dist_from_nose + x)
## Convert these to alpha
alpha_all <- lapply(distances_all, function(x) calc_alpha(widths$width, x))
## Index of max alpha
## i.e. what part of body is max alpha at any particular stage
alpha_max_index <- sapply(alpha_all, function(x) which.max(x))
## Max alpha of any body segment at each iteration
alpha_max <- sapply(alpha_all, function(x) max(x, na.rm = TRUE))
## max of all body segemnts or total jaw
alpha_max <- pmax(alpha_max, alpha_total_jaw, na.rm = TRUE)
## Add to model
model_data$alpha <- alpha_max
## Calc dadt
model_data$dadt <- c(
NA,
diff(model_data$alpha) * frequency)
# Find alpha and dadt ranges ----------------------------------------------
## dadt region
if(is.null(dadt_range)){
dadt_range_region <- NULL
} else {
## find location of closest match to LOWER dadt_range
## which dadt are higher than lower value?
dadt_range_low_index <- first_closest(dadt_range[1], model_data$dadt)
## if it's never reached, set it to NA
if(length(dadt_range_low_index)==0){
dadt_range_low_index <- NA
message("Lower range of dadt_range never reached in this scenario. No dadt_range range plotted.")}
## same for UPPER dadt_range range
## NOTE - it's third in the vector (mean is second)
dadt_range_high_index <- first_closest(dadt_range[2], model_data$dadt)
if(length(dadt_range_high_index)==0){
dadt_range_high_index <- NA
message("Upper range of dadt_range never reached in this scenario.")}
## Use these to subset model to dadt_range range
if(is.na(dadt_range_low_index)){
dadt_range_region <- "No matching dadt_range region in this model"
} else if (is.na(dadt_range_high_index)) {
dadt_range_region <- model_data[dadt_range_low_index:model_length,]
} else {
dadt_range_region <- model_data[dadt_range_low_index:dadt_range_high_index,]
}
}
## alpha region
if(is.null(alpha_range)){
alpha_range_region <- NULL
} else {
## find location of closest match to LOWER alpha_range
## which dadt are higher than lower value?
alpha_range_low_index <- first_closest(alpha_range[1], model_data$alpha)
## if it's never reached, set it to NA
if(length(alpha_range_low_index)==0){
alpha_range_low_index <- NA
message("Lower range of alpha_range never reached in this scenario. No alpha_range range plotted.")}
## same for UPPER alpha_range range
## NOTE - it's third in the vector (mean is second)
alpha_range_high_index <- first_closest(alpha_range[2], model_data$alpha)
if(length(alpha_range_high_index)==0){
alpha_range_high_index <- NA
message("Upper range of alpha_range never reached in this scenario.")}
## Use these to subset model to alpha_range range
if(is.na(alpha_range_low_index)){
alpha_range_region <- "No matching alpha_range region in this model"
} else if (is.na(alpha_range_high_index)) {
alpha_range_region <- model_data[(alpha_range_low_index-1):model_length,]
} else {
alpha_range_region <- model_data[(alpha_range_low_index-1):(alpha_range_high_index+1),]
}
}
# Assemble final output ---------------------------------------------------
## if simple_output = TRUE, output model data frame only
if(simple_output == TRUE){
output <- model_data
## otherwise assemble output list() object
} else if(simple_output == FALSE){
output <- list(
final_model = model_data,
inputs = inputs,
dadt_range_region = dadt_range_region,
alpha_range_region = alpha_range_region,
all_data = list(
widths_interpolated = widths_df,
widths_filtered = widths,
distances_per_i = distances_all,
alpha_per_i = alpha_all,
body_max_alpha_per_i = alpha_max_index,
alpha_up_jaw = alpha_up_jaw,
alpha_low_jaw = alpha_low_jaw,
alpha_total_jaw = alpha_total_jaw))
## Give it a class
## Only works for lists, not dataframes
class(output) <- "attack_model_whale"
}
# Plot --------------------------------------------------------------------
if(plot == TRUE){
## make x limits
if(is.null(plot_to)){
plot_to <- max(speed_orig$time)
}
## make x limits
if(is.null(plot_from)){
plot_from <- 0
}
## set plot parameters - will apply to all unless changed
## mgp controls - (axis.title.position, axis.label.position, axis.line.position)
par(mgp = c(3, 0.5, 0), mar = c(3,1.5,1.5,1.5))
## plot complete speed profile
# colour for all speed plotting
speed_col <- "grey"
## as blank points
plot(speed~time, data = speed_orig,
ylim = c(0, max(speed_orig$speed)),
xlim = c(plot_from, plot_to),
axes = FALSE,
pch = ".",
col = "white",
ylab = "",
xlab = "")
## add line of speed
with(speed_orig, lines(x = time, y = speed, lwd = 3, col = speed_col))
## add points of mouth open
with(speed_orig, points(x = time[a_mouth_open:b_max_gape_start],
y = speed[a_mouth_open:b_max_gape_start],
pch = "*",
col = "red"))
## add points of max gape
with(speed_orig, points(x = time[b_max_gape_start:c_max_gape_end],
y = speed[b_max_gape_start:c_max_gape_end],
pch = "*",
col = "gold2"))
## add points of mouth closing
with(speed_orig, points(x = time[c_max_gape_end:d_mouth_closed],
y = speed[c_max_gape_end:d_mouth_closed],
pch = "*",
col = "darkgreen"))
## add x axis and title
axis(side = 1, col = "black", lwd = 3, col.axis = "black",
at = seq(0, max(speed_orig$time)+1, 1))
mtext("time", side = 1, line = 1.5)
## add y axis and title
axis(side = 2, col = speed_col, lwd = 3, col.axis = speed_col, pos = plot_from, cex.axis = 0.8)
## plot alpha
par(new=T)
# colour for all alpha plotting
alpha_col <- "slateblue1"
plot(alpha~time, data = model_data,
ylim = c(0, max(model_data$alpha, na.rm = T)),
xlim = c(plot_from, plot_to),
pch = ".",
col = "white",
axes = FALSE,
ylab = "",
xlab = "")
with(model_data, lines(x = time, y = alpha, col = alpha_col, lwd = 3))
## add y axis and title
axis(side = 2, col = alpha_col, lwd = 3, col.axis = alpha_col,
pos = plot_from+(0.05*(plot_to-plot_from)), cex.axis = 0.8)
## plot dadt
par(new=T)
# colour for all alpha plotting
dadt_col <- "green"
plot(dadt~time, data = model_data,
ylim = c(0, max(model_data$dadt, na.rm = T)),
xlim = c(plot_from, plot_to),
pch = ".",
col = "white",
axes=FALSE,
ylab = "",
xlab = "")
with(model_data, lines(x = time, y = dadt, col = dadt_col, lwd = 3))
axis(side = 2, col = dadt_col, lwd = 3, col.axis = dadt_col,
pos = plot_from+(0.1*(plot_to-plot_from)), cex.axis = 0.8)
## add dadt_range range
## if upper and lower of dadt_range range locations are equal, just draw dashed line
## but don't draw it if both equal length of x
## this means neither actually occurs
## see above (this is hacky - must be a better way)
if(!is.null(dadt_range)){
abline(v = model_data$time[dadt_range_low_index],
col = rgb(15/255,245/255,53/255, alpha = 0.4),
lty = 1,
lwd = 3)
abline(v = model_data$time[dadt_range_high_index],
col = rgb(15/255,245/255,53/255, alpha = 0.4),
lty = 1,
lwd = 3)
rect(xleft = model_data$time[dadt_range_low_index],
ybottom = -5,
xright = model_data$time[dadt_range_high_index],
ytop = max(model_data$dadt, na.rm = T)+5,
col = rgb(15/255,245/255,53/255, alpha = 0.2),
lty = 0)
## if dadt_range_high_index is NA, then fill to end
if(is.na(dadt_range_high_index)){
rect(xleft = model_data$time[dadt_range_low_index],
ybottom = -5,
xright = model_data$time[nrow(model_data)],
ytop = 100,
col = rgb(15/255,245/255,53/255, alpha = 0.2),
lty = 0)}
}
## add alpha_range range
## if upper and lower of alpha_range range locations are equal, just draw dashed line
## but don't draw it if both equal length of x
## this means neither actually occurs
## see above (this is hacky - must be a better way)
if(!is.null(alpha_range)){
abline(v = model_data$time[alpha_range_low_index],
col = rgb(77/255,195/255,255/255, alpha = 0.4),
lty = 1,
lwd = 3)
abline(v = model_data$time[alpha_range_high_index],
col = rgb(77/255,195/255,255/255, alpha = 0.4),
lty = 1,
lwd = 3)
rect(xleft = model_data$time[alpha_range_low_index],
ybottom = -5,
xright = model_data$time[alpha_range_high_index],
ytop = 20,
col = rgb(77/255,195/255,255/255, alpha = 0.2),
lty = 0)
## if alpha_range_high_index is NA, then fill to end
if(is.na(alpha_range_high_index)){
rect(xleft = model_data$time[alpha_range_low_index],
ybottom = -5,
xright = model_data$time[nrow(model_data)],
ytop = max(model_data$alpha, na.rm = T)+5,
col = rgb(77/255,195/255,255/255, alpha = 0.2),
lty = 0)}
}
## add prey location line
abline(v=model_data$time[model_length], lty = 3, lwd = 2)
## add legend
legend("topleft", inset=.15,
c("Speed", "Alpha", "dadt", "prey", "mouth opening", "max gape", "mouth closing"),
text.col = c(speed_col, alpha_col, dadt_col, "black", "red", "gold2", "darkgreen"),
col = c(speed_col, alpha_col, dadt_col, "black", "red", "gold2", "darkgreen"),
lty=c(1,1,1,3, NA, NA, NA),
pch = c("*", "*", "*"),
lwd = 3, cex=0.8)
}
# Return results ----------------------------------------------------------
return(output)}
|
a51690fbe43b817008c4dbfd4411063589f88dad
|
9c083e82eac6eb131c969ddf4e159b6d6f9958f8
|
/MBS Project/modcode/optimization2.R
|
95ce6466c5ec1dc834b06756806c6075bad2bde5
|
[] |
no_license
|
darvilp/MBSteam2
|
e7ea4f82f75e543b40185f4c9f218d89a0e77ee3
|
4fbec6663fdc691197fb31ccc0e0e108914e1a97
|
refs/heads/master
| 2016-09-07T18:35:57.684538
| 2014-09-06T01:29:44
| 2014-09-06T01:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,131
|
r
|
optimization2.R
|
#This was the emailed copy
## Load in the DiffEq solver
library(deSolve)
# Clear the memory
rm(list=ls())
#200 cases of polio in 2011-> taking into account Pakistan's pop and with .3% of cases reported:
#I=~.0004=4E-4 currently.
#So I=~ 1E-6 should be eradication condition
youngperc=.37
oldperc=1-youngperc
init.values <- c(
Sy = .495 *youngperc, Vy = .00*youngperc,
Iy = .01*youngperc, Ry = .495*youngperc,
Cy = .0, Hy = 0, Ly = 0,
S = .495*oldperc, V = .00*oldperc,
I = .01*oldperc, R = .495*oldperc,
C = .0, H = 0, L = 0,M=0)
#Tested with rough equilibria values and it looks weird. Mostly because we start with no vaccinated.
times <- seq(0, 10*365, by = 1)
bestM=9000000
print(i)
SVIR <- function(time, y.values, parameters) {
with(as.list(c(y.values, parameters)), {
if((Iy+I)/(S+V+I+R+Sy+Vy+Iy+Ry)>.01 & time>55 & done==0){
break
done=0
if(M<bestM){
bestM=M
bestvac=(i)/1000
print(bestvac)
print(bestM)
print(time)
print('endline')
}
}
else{print('h')}
if (time%%vaccycle <=vaccyclelength){
dSy.dt = birth*(S+V+I+R)+Sy*(birthy-vacy-contacty*(Iy+I)-ldeathy-aging)+dehyd*Vy+(Cy)*Ry
dVy.dt = vacy*Sy-ldeathy*Vy-aging*Vy-dehyd*Vy
dM.dt = vacy*(Sy+Iy+Ry)
}
else{
dSy.dt = birth*(S+V+I+R)+Sy*(birthy-vacy-contacty*(Iy+I)-ldeathy-aging)+(Cy)*Ry
dVy.dt = -ldeathy*Vy-aging*Vy
dM.dt = 0
}
dCy.dt = civy
dIy.dt = contacty*(I+Iy)*Sy-(hdeathy+hdeathy*dehyd)*Iy-recovy*Iy-aging*Iy
dRy.dt = recovy*Iy-hdeathy*Ry-aging*Ry-Cy*Ry
dHy.dt = hdeathy*(Ry+(1+dehyd)*Iy)
dLy.dt = ldeathy*(Sy+Vy)
dS.dt = -aq*S-(vac)*S-contact*(I+Iy)*S-ldeath*S-C*S+aging*Sy
dV.dt = (vac)*S-ldeath*V+aging*Vy
dI.dt = contact*(I+Iy)*S-hdeath*I*(1+dehyd)-recov*I+aging*Iy
dR.dt = recov*I-hdeath*R+aging*Ry+(aq-C)*S
dC.dt = civ
dH.dt = hdeath*(R+I(1+dehyd))
dL.dt = ldeath*(S+V)
return(list(c(dSy.dt,dVy.dt,dIy.dt,dRy.dt,dCy.dt,
dHy.dt,dLy.dt,dS.dt,dV.dt, dI.dt, dR.dt,
dC.dt,dH.dt,dL.dt,dM.dt)))
})
}
for(i in 1:3){
pars <- c(
done=1,
contacty= 0.190/1,
recovy = 0.028,
ldeathy = 6.8/365/1000, #disease independent death
hdeathy = .004633/100, #disease induced death for young. Approx twice as likely to die
vacy = i/1000,
aqy = .0, #Aqcuired immunity rate
birthy = .0, #This can be lower if Civ vaccination is Recovered
civy = .0000002, #Modernization factor
aging = 1/365/14,
dehyd = .01, #percentage of vaccines that do not work
contact = .142/1,
recov = .028,
ldeath = 6.8/365/1000, #disease independent death
hdeath = .00567/100, #disease induced death for old approx twice as likely to die
vac = 0,
aq = .0,
birth = 24.3/365/1000, #This can be lower if Civ vaccination is Recovered; dS.dt
civ = .0,
vaccycle=floor(365/4),#currently about 4 rounds of vac. per year in pakistan-> 60 total from 93 to 2007
vaccyclelength=5 )
print(ode(func = SVIR, y = init.values,
parms = pars, times = times))
}
|
492629906f9e67b14d1a48e43e4fadb9d2b7dccc
|
748cbafcae0eb56ab3a8752265aea50a427f2d5f
|
/graficos.R
|
d6d2681308d5fbbbdea5f64e9dcd5fcb0a5e7797
|
[] |
no_license
|
pho-souza/cultura
|
16ba1e1acfe5a184d25466fb659b545d27da2bf7
|
732667bab89e74842d4060d00ea81b08619c1a85
|
refs/heads/master
| 2021-09-26T17:28:28.297523
| 2018-10-31T19:46:00
| 2018-10-31T19:46:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,743
|
r
|
graficos.R
|
library(ggplot2)
library(ggthemes)
library(ggthemr)
library(ggrepel)
library(scales)
# Gráficos Linguagem -----------------------------------------------------------
tamanho=2
options(scipen = 2)
#Gráficos por linguagem
#Gráfico valores por linguagem
grafico_Valores_Linguagem<-linguagem %>%
ggplot(aes(x=Linguagem,
y=Valor,
fill=Linguagem,
group=Linguagem))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
#### Optei pelo ggrepel porque ele coloca labels em cada plot, ficando mais fácil de visualizar
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# direction = "y",
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt"))+
ggtitle("Valores pagos em reais por linguagem em 2015",
ylab("Em reais"))
#Gráfico contagem por linguagem
grafico_Contagem_Linguagem<-linguagem %>%
ggplot(aes(x=Linguagem,
y=Num,
fill=Linguagem))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num, big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
ggtitle("Projetos pagos por linguagem em 2015")
#Gráficos para empregos
grafico_Empregos_Linguagem<-linguagem %>%
ggplot(aes(x=Linguagem,
y=Empregos,
fill=Linguagem,
group=Linguagem))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ","))),
# size=tamanho,
# direction = "y",
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
ggtitle("Empregos diretos gerados por linguagem em 2015")
#Gráfico de público estiamdo
grafico_Publico_Linguagem<-linguagem %>%
ggplot(aes(x=Linguagem,
y=Publico,
fill=Linguagem,
group=Linguagem))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
#ggthemes::theme_hc() +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Publico,big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
ggtitle("Público estimado por linguagem em 2015")
#salva apenas estes gráficos
# Gráficos Natureza -----------------------------------------------------------
#Gráficos contagem por natureza jurídica_ Gráfico circular
grafico_Contagem_Natureza_Pie<-natureza %>%
mutate(pos = cumsum(Num)-Num/(length(Num))) %>%
ggplot(aes(x="",
y=Num,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
geom_col(width = 1, position = "stack") +
coord_polar("y")+
geom_text(aes(label=sprintf("%s",
format(percent(Num/sum(Num)),
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line=element_blank())+
ggtitle("Projetos pagos por natureza jurídica em 2015")+
theme(legend.title = element_blank(),
axis.title=element_blank())
## Gráfico barras
grafico_Contagem_Natureza<-natureza %>%
ggplot(aes(x=Natureza_Jurídica,
y=Num,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
ggtitle("Projetos pagos por natureza jurídica em 2015")+
theme(legend.title = element_blank(),
axis.title=element_blank())
##Gráficos valores por natureza jurídica
grafico_Valores_Natureza<-natureza %>%
ggplot(aes(x=Natureza_Jurídica,
y=Valor,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
ggtitle("Valores pagos em reais por natureza jurídica em 2015",
ylab("Em reais"))
#Gráficos para empregos
grafico_Empregos_Natureza<-natureza %>%
ggplot(aes(x=Natureza_Jurídica,
y=Empregos,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(), axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ",")),
# y=Empregos+EmpregosSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Empregos diretos gerados por natureza jurídica em 2015")
#Gráficos para empregos pie
grafico_Empregos_Natureza_Pie<-natureza %>%
ggplot(aes(x="",
y=Empregos,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
#Gráfico de colunas
geom_col(width = 1) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line= element_blank())+
geom_text(aes(label=sprintf("%s",
format(Empregos,
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3)+
coord_polar("y") +
ggtitle("Empregos diretos gerados por natureza jurídica em 2015")
#Gráfico de público estimado
grafico_Publico_Natureza<-natureza %>%
ggplot(aes(x=Natureza_Jurídica,
y=Publico,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Publico,
# big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho, show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Público estimado por natureza em 2015")
#Gráfico de público estimado pie
grafico_Publico_Natureza_Pie <-natureza %>%
ggplot(aes(x="",
y=Publico,
fill=Natureza_Jurídica,
group=Natureza_Jurídica))+
#Gráfico de colunas
geom_col(width = 1) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line= element_blank())+
geom_text(aes(label=sprintf("%s",
format(Publico,
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3)+
ggtitle("Público estimado por natureza em 2015")+
coord_polar("y")
# Gráficos Cor ou Raça -----------------------------------------------------------
### Gráficos por cor ou raça -----------------
## Neste gráficos haviam dados faltando, e estes serão destacados no gráfico
grafico_Contagem_Cor<-cor %>%
ggplot(aes(x=Cor,
y=Num,
fill=Cor,
group=Cor))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
ggtitle("Projetos pagos por cor ou raça em 2015")
grafico_Valores_Cor<-cor %>%
ggplot(aes(x=Cor,
y=Valor,
fill=Cor,
group=Cor))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
ggtitle("Valores pagos em reais por cor ou raça em 2015",ylab("Em reais"))
#Gráficos para empregos
grafico_Empregos_Cor<-cor %>%
ggplot(aes(x=Cor,
y=Empregos,
fill=Cor,
group=Cor))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ",")),
# y=Empregos+EmpregosSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Empregos diretos gerados por cor em 2015")
#Gráfico de público estimado por cor
grafico_Publico_Cor<-cor %>%
ggplot(aes(x=Cor,
y=Publico,
fill=Cor,
group=Cor))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Publico,
# big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Público estimado por cor ou raça em 2015")
# Gráficos Sexo -----------------------------------------------------------
### Gráficos sexo
## Neste gráficos haviam dados faltando, e estes serão destacados no gráfico
grafico_Contagem_Sexo<-sexo %>%
ggplot(aes(x=Sexo,
y=Num,
fill=Sexo,
group=Sexo))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
ggtitle("Projetos pagos por gênero em 2015")
# Grafico Contagem sexo Pizza
grafico_Contagem_Sexo_Pie <-sexo %>%
ggplot(aes(x="",
y=Num,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(width = 1) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line = element_blank())+
geom_text(aes(label=sprintf("%s",
format(percent(Num/sum(Num)),
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3)+
ggtitle("Projetos pagos por sexo em 2015")+
coord_polar(theta = "y")
grafico_Valores_Sexo<-sexo %>%
ggplot(aes(x=Sexo,
y=Valor,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Valores pagos em reais por gênero em 2015",ylab("Em reais"))
#Gráficos para empregos
grafico_Empregos_Sexo<-sexo %>%
ggplot(aes(x=Sexo,
y=Empregos,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ",")),
# y=Empregos+EmpregosSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Empregos diretos gerados por gênero em 2015")
# Grafico Contagem sexo Pizza
grafico_Empregos_Sexo_Pie <-sexo %>%
ggplot(aes(x="",
y=Empregos,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(width = 1) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line = element_blank())+
geom_text(aes(label=sprintf("%s",
format(Empregos,
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3)+
ggtitle("Empregos diretos gerados por gênero em 2015")+
coord_polar(theta = "y")
#Gráfico de público estiamdo
grafico_Publico_Sexo<-sexo %>%
ggplot(aes(x=Sexo,
y=Publico,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Publico,
# big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Público estimado por gênero em 2015")
# Grafico Contagem sexo Pizza
grafico_Publico_Sexo_Pie <-sexo %>%
ggplot(aes(x="",
y=Publico,
fill=Sexo,
group=Sexo))+
#Gráfico de colunas
geom_col(width = 1) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
line = element_blank())+
geom_text(aes(label=sprintf("%s",
format(Publico,
big.mark = ".",
decimal.mark = ","))),
position = position_stack(vjust=0.5),
size=3)+
ggtitle("Público estimado por gênero em 2015")+
coord_polar(theta = "y")
# Gráficos Cidade -----------------------------------------------------------
grafico_Contagem_RA_Proponente<-cidades %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(RA_Proponente,Num),
y=Num,
fill=RA_Proponente,
group=RA_Proponente))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
theme(
legend.title = element_blank(),
legend.position = "none")+
ggtitle("Projetos pagos por RA em 2015")+
coord_flip()
grafico_Valores_RA_Proponente<-cidades %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(RA_Proponente,
Valor),
y=Valor,fill=RA_Proponente,
group=RA_Proponente))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(
legend.title = element_blank(),
legend.position = "none")+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Valores pagos em reais por RA do proponente em 2015",
ylab("Em reais"))+
coord_flip()
#Gráficos para empregos
grafico_Empregos_RA_Proponente<-cidades %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(RA_Proponente,Empregos),
y=Empregos,
fill=RA_Proponente,
group=RA_Proponente))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(
legend.title = element_blank(),
legend.position = "none")+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ",")),
# y=Empregos+EmpregosSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Empregos diretos gerados por RA do proponente em 2015")+
coord_flip()
# Gráfico de público estimado
grafico_Publico_RA_Proponente<-cidades %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
#Optou-se por colocar por ordem
ggplot(aes(x=reorder(RA_Proponente,Publico),
y=Publico,
fill=RA_Proponente,
group=RA_Proponente))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(legend.title = element_blank(),
legend.position = "none")+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Publico,
# big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Público estimado por RA do proponente em 2015")+
coord_flip()
# Gráficos Cidades Atingidas -----------------------------------------------------------
grafico_Contagem_RA_Atingidas<-cidades_atingidas %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(Cidades, Num),
y=Num,
fill=Cidades,
group=Cidades))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
theme(axis.text.x = element_blank(),
legend.title = element_blank(),
legend.position = "none")+
ggtitle("Número de vezes que uma RA do proponente foi alvo por um projeto")+
coord_flip()
# Gráfico de escolaridade ----------------------------------------
## Neste gráficos haviam dados faltando, e estes serão destacados no gráfico
grafico_Contagem_Escolaridade<-escolaridade %>%
filter(Num>2) %>%
ggplot(aes(x=reorder(Escolaridade,Num),
y=Num,
fill=Escolaridade,
group=Escolaridade))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
theme(
legend.title = element_blank(),
legend.position = "none")+
coord_flip() +
ggtitle("Projetos pagos por escolaridade em 2015")
grafico_Valores_Escolaridade<-escolaridade %>%
filter(Num>2) %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
ggplot(aes(x=reorder(Escolaridade,Valor),
y=Valor,
fill=Escolaridade,
group=Escolaridade))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(
legend.title = element_blank(),
legend.position = "none")+
geom_errorbar(mapping=aes(ymin=Valor-ValorSD,
ymax=Valor+ValorSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("R$ %s",
# format(Valor,
# big.mark = ".",
# decimal.mark = ",")),
# y=Valor+ValorSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Valores pagos em reais por escolaridade em 2015",
xlab("Em reais"))+
coord_flip()
#Gráficos para empregos
grafico_Empregos_Escolaridade<-escolaridade %>%
filter(Num>2) %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
ggplot(aes(x=reorder(Escolaridade,Empregos),
y=Empregos,
fill=Escolaridade,
group=Escolaridade))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(legend.position = "none",
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Empregos-EmpregosSD,
ymax=Empregos+EmpregosSD),
width=0.2) +
####
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s",
# format(Empregos,
# big.mark = ".",
# decimal.mark = ",")),
# y=Empregos+EmpregosSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0,
# units = "pt")) +
ggtitle("Empregos diretos gerados por escolaridade em 2015")+
coord_flip()
# Gráfico de público estimado
grafico_Publico_Escolaridade<-escolaridade %>%
filter(Num>2) %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
ggplot(aes(x=reorder(Escolaridade,Publico),
y=Publico,
fill=Escolaridade,
group=Escolaridade))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
#Tirou-se a legenda dos projetos e optou-se apenas por deixar o rótulo y de cada escolaridade
theme(legend.position = "none",
legend.title = element_blank(),
axis.title=element_blank())+
geom_errorbar(mapping=aes(ymin=Publico-PublicoSD,
ymax=Publico+PublicoSD),
width=0.2) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s", format(Publico,
# big.mark = ".",
# decimal.mark = ",")),
# y=Publico+PublicoSD),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
ggtitle("Público estimado por escolaridade em 2015")+
coord_flip()
#### Contagem RA_Atingida
grafico_Contagem_RA_Atingida<-cidades_atingidas %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(Cidades,Num),
y=Num,
fill=Cidades,
group=Cidades))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_label_repel(stat="identity",
# aes(label=sprintf("%s", format(Num,
# big.mark = ".",
# decimal.mark = ",")),
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt")) +
theme(
legend.title = element_blank(),
legend.position = "none",
axis.title = element_blank())+
ggtitle("Projetos pagos por RA atingida em 2015")+
coord_flip()
grafico_Valores_RA_Atingida<-cidades_atingidas %>%
#optei por colocar apenas RAs com mais de um projeto executado, para deixar o gráfico mais limpo
filter(Num>2) %>%
ggplot(aes(x=reorder(Cidades,ValoresMedios),
y=ValoresMedios,
fill=Cidades))+
#Gráfico de colunas
geom_col(position = position_dodge(width = 0)) +
theme(
legend.title = element_blank(),
legend.position = "none",
axis.title = element_blank())+
# ggrepel::geom_label_repel(#stat="identity",
# aes(label=ifelse(ValoresMedios>100000,
# yes=prettyNum(x=ValoresMedios,
# big.mark = ".",
# decimal.mark = ","),
# no=NA)),
# #nudge_y = cidades_atingidas$ValoresMedios,
# size=tamanho, #y=ValoresMedios),
# show.legend = F,
# direction = "x",
# nudge_y = 25000,
# box.padding = unit(x = 0, units = "pt"),
# label.padding = unit(x = 1,units = "pt"),
# label.size = 0.1,
# na.rm = T) +
ggtitle("Valores médios pagos em reais por RA atingida em 2015",
ylab("Em reais"))+
coord_flip()
##### Modalidades ----------------------
grafico_Contagem_Modalidades <- modalidades %>%
ggplot(aes(x = gsub('[ ]', '\n', Modalidade),
y=Num,
fill=Modalidade,
group=Modalidade))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_text_repel(stat="identity",
# aes(label=Num,
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# nudge_y = 2) +
theme(#axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title = element_blank(),
#legend.position = "buttom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank())+
ggtitle("Projetos pagos por modalidade em 2015")
# Contagem de quem já concorreu ------------------------
### Ja contemplado
grafico_Contagem_Ja_Contemplado <- ja_contemplado %>%
ggplot(aes(x = Ja_Contemplado,
y = Num,
fill = Ja_Contemplado,
group = Ja_Contemplado))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_text_repel(stat="identity",
# aes(label=Num,
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# nudge_y = 2) +
theme(#axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
legend.position = "none",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Quantidade de proponentes que já foram contemplados com \n recursos do FAC antes de 2015")+
theme(legend.title = element_blank(),
axis.title=element_blank())
### Projetos já concorreram ----------------------------------
### Ja concorreram
grafico_Contagem_Ja_Concorreu <- ja_concorreu %>%
ggplot(aes(x = Ja_Concorreu,
y = Num,
fill = Ja_Concorreu,
group = Ja_Concorreu))+
geom_col(position = position_dodge(width = 0)) +
# ggrepel::geom_text_repel(stat="identity",
# aes(label=Num,
# y=Num),
# size=tamanho,
# show.legend = F,
# box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# nudge_y = 2) +
theme(#axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
legend.position = "none",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Quantidade de proponentes que já concorreram para \n recursos do FAC antes de 2015")+
theme(legend.title = element_blank(),
axis.title=element_blank())
# batimento entre linguagens e sexo e cor
grafico_Contagem_Proj_Cor <- proj.cor %>%
ggplot(aes(x = Cor_ou_Raça,
y = Num,
fill = Linguagem,
group = Linguagem))+
geom_col() +
# ggrepel::geom_label_repel(aes(label=Num, fill=Cor_ou_Raça),
# size=tamanho,
# force = 1,
# show.legend = F,
# #box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# arrow = arrow(length = unit(0.01, 'npc')), box.padding = unit(1.5, 'lines'),color="black" ) +
# geom_text(aes(label=Num),
# position = position_stack(vjust = 0.5),
# hjust=+0.5,
# size=3)+
theme(#axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
legend.position = "bottom",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Quantidade de linguagens por cor ou raça")+
theme(legend.title = element_blank(),
axis.title=element_blank())#+
coord_flip()
grafico_Contagem_Proj_Sexo <- proj.sexo %>%
ggplot(aes(x = Sexo,
y = Num,
fill = Linguagem,
group = Linguagem))+
geom_col() +
# ggrepel::geom_label_repel(aes(label=Num, fill=Cor_ou_Raça),
# size=tamanho,
# force = 1,
# show.legend = F,
# #box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# arrow = arrow(length = unit(0.01, 'npc')), box.padding = unit(1.5, 'lines'),color="black" ) +
# geom_text(aes(label=Num),
# position = position_stack(vjust = 0.5),
# hjust=+0.5,
# size=3)+
theme(#axis.text.x = element_blank(),
legend.title = element_blank(),
axis.title=element_blank(),
legend.position = "bottom",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Quantidade de linguagens por sexo")+
theme(legend.title = element_blank(),
axis.title=element_blank())#+
coord_flip()
### Pessoas contempladas antes e quantas vezes concorreram -------------
grafico_Contagem_Contemplados <- contemplados %>%
ggplot(aes(x = Ja_Concorreu,
y = Num,
fill = Ja_Contemplado,
group = Ja_Contemplado))+
geom_col() +
# ggrepel::geom_label_repel(aes(label=Num, fill=Cor_ou_Raça),
# size=tamanho,
# force = 1,
# show.legend = F,
# #box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# arrow = arrow(length = unit(0.01, 'npc')), box.padding = unit(1.5, 'lines'),color="black" ) +
# geom_text(aes(label=Num),
# position = position_stack(vjust = 0.5),
# hjust=+0.5,
# size=3)+
theme(#axis.text.x = element_blank(),
axis.title=element_blank(),
legend.position = "bottom",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Quantidade de beneficiários que já foram contemplados
dada a quantidade de vezes que pleitaram recursos")+
theme(#legend.title = element_blank(),
axis.title=element_blank())+
labs(fill="Quantidade de vezes que \n já foi contemplados")
### Valores pessoas contempladas antes e quantas vezes concorreram -------------
grafico_Valores_Contemplados <- contemplados %>%
ggplot(aes(x = Ja_Concorreu,
y = Valor,
fill = Ja_Contemplado,
group = Ja_Contemplado))+
geom_col() +
# ggrepel::geom_label_repel(aes(label=Num, fill=Cor_ou_Raça),
# size=tamanho,
# force = 1,
# show.legend = F,
# #box.padding = unit(x = 0, units = "pt"),
# direction = "y",
# arrow = arrow(length = unit(0.01, 'npc')), box.padding = unit(1.5, 'lines'),color="black" ) +
# geom_text(aes(label=Num),
# position = position_stack(vjust = 0.5),
# hjust=+0.5,
# size=3)+
theme(#axis.text.x = element_blank(),
axis.title=element_blank(),
legend.position = "bottom",
axis.text.x = element_text(vjust = 0.5))+
ggtitle("Valores recebidos em 2015 por beneficiários que já foram contemplados
dada a quantidade de vezes que pleitaram recursos")+
theme(#legend.title = element_blank(),
axis.title=element_blank())+
labs(fill="Quantidade de vezes que \n já foi contemplados")
# graficos salvos ------------------------------
graficos_todos<-c("grafico_Valores_Linguagem","grafico_Contagem_Linguagem","grafico_Empregos_Linguagem",
"grafico_Publico_Linguagem","grafico_Valores_Natureza","grafico_Contagem_Natureza",
"grafico_Empregos_Natureza","grafico_Publico_Natureza","grafico_Valores_Cor","grafico_Contagem_Cor",
"grafico_Empregos_Cor","grafico_Publico_Cor","grafico_Valores_Sexo","grafico_Contagem_Sexo",
"grafico_Empregos_Sexo","grafico_Publico_Sexo","grafico_Valores_Escolaridade",
"grafico_Contagem_Escolaridade","grafico_Empregos_Escolaridade","grafico_Publico_Escolaridade",
"grafico_Valores_RA_Proponente","grafico_Contagem_RA_Proponente","grafico_Empregos_RA_Proponente",
"grafico_Publico_RA_Proponente","grafico_Valores_RA_Atingida","grafico_Contagem_RA_Atingida",
"grafico_Contagem_Natureza_Pie","grafico_Empregos_Natureza_Pie","grafico_Publico_Natureza_Pie",
"grafico_Contagem_Sexo_Pie","grafico_Empregos_Sexo_Pie","grafico_Publico_Sexo_Pie",
"grafico_Contagem_Modalidades","grafico_Contagem_Ja_Concorreu","grafico_Contagem_Ja_Contemplado",
"grafico_Contagem_Proj_Sexo", "grafico_Contagem_Proj_Cor", "grafico_Valores_Contemplados",
"grafico_Contagem_Contemplados")
# Vwr graficos---------------------------
#gráficos para linguagem
grafico_Valores_Linguagem
grafico_Contagem_Linguagem
grafico_Empregos_Linguagem
grafico_Publico_Linguagem
#graficos RA atingidas
grafico_Valores_RA_Atingida
grafico_Contagem_RA_Atingida
#graficos cor ou raça
grafico_Contagem_Cor
grafico_Valores_Cor
grafico_Publico_Cor
grafico_Empregos_Cor
grafico_Contagem_Escolaridade
grafico_Valores_Escolaridade
grafico_Publico_Escolaridade
grafico_Empregos_Escolaridade
#graficos RA proponentes
grafico_Contagem_RA_Proponente
grafico_Valores_RA_Proponente
grafico_Publico_RA_Proponente
grafico_Empregos_RA_Proponente
#grafico sexo
grafico_Contagem_Sexo
grafico_Valores_Sexo
grafico_Publico_Sexo
grafico_Empregos_Sexo
grafico_Publico_Sexo_Pie
#grafico natureza
grafico_Contagem_Natureza
grafico_Valores_Natureza
grafico_Empregos_Natureza
grafico_Publico_Natureza
grafico_Publico_Natureza_Pie
# Salva os dados em RDS -------------------
save(list = graficos_todos,file = "graficos_dados.RData")
rm(list=graficos_todos)
|
99dfd279661ec8fc5f36cd90f5596b0d0e5d7e00
|
024d2ee48d6eae806e98cba16819eb2b3fd52f1e
|
/man/as.issue.Rd
|
7f540885fa4da09ef380bef8d4340eb112eb55c1
|
[] |
no_license
|
Bioconductor/BiocContributions
|
eb4421f440afeb18f6c926fa0de2b6a4778e1949
|
13595f647f6b95af566583ffed0fe9a465147a69
|
refs/heads/master
| 2020-04-10T00:31:58.441908
| 2017-06-27T16:49:41
| 2017-06-27T16:49:41
| 40,479,641
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 322
|
rd
|
as.issue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracker.R
\name{as.issue}
\alias{as.issue}
\title{Coerce to an issue object}
\usage{
as.issue(x, ...)
}
\arguments{
\item{x}{object to be coerced}
\item{...}{Additional arguments passed to methods}
}
\description{
Coerce to an issue object
}
|
9ce902b4ed768d7fc207d4a10de6450d15e7602f
|
56ae3b4dbc591d653343b762f8f1f3b9a0744e82
|
/man/mode.Rd
|
bcc47227ca6d8f9f1aace426dad0446b9c7b8cc6
|
[
"MIT"
] |
permissive
|
ms32035/R-adasd
|
38014ae244c6591719e62f3efb8011f5817d67e2
|
9d4dabddcf1d6a2f26b6cb2fbb464b0b086bca5f
|
refs/heads/master
| 2016-09-06T19:28:33.627241
| 2015-01-24T17:24:20
| 2015-01-24T17:24:20
| 29,705,071
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
rd
|
mode.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{mode}
\alias{mode}
\title{Mode}
\usage{
mode(x)
}
\arguments{
\item{x}{- An integer vector}
}
\value{
Most frequent value of \code{x}
}
\description{
\code{mode} returns the most frequent value of an integer vector
}
\examples{
mode(c(1,2,2))
}
|
ae56d71bf04e72a0c069ca8cc595b68853a37a63
|
f7cb684b54e1f73dd8483b15ee2e0ae3351abf12
|
/cachematrix.R
|
0d1ecddacd8227b2f35178de6e1f8ef512bf520d
|
[] |
no_license
|
gaoqn1985/ProgrammingAssignment2
|
34c9032ae1054ef2c8cc0d51d508cc6b76fa2000
|
f68031c589404fab4847658c9e15d3fc6cb1c625
|
refs/heads/master
| 2021-01-18T12:36:48.823054
| 2014-04-27T16:26:14
| 2014-04-27T16:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,216
|
r
|
cachematrix.R
|
# The function, makeCacheMatrix creates a special "matrix"
# which is really a list containing a function to
#
# set the value of the matrix
# get the value of the matrix
# set the value of the inverse
# get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
setMatrix <- function(y) {
x <<- y
inverse <<- NULL
}
getMatrix <- function() x
setInverse <- function(solve) inverse <<- solve
getInverse <- function() inverse
list(setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse,
getInverse = getInverse)
}
# The following function calculates the inverse of the special "matrix"
# created with the above function.
# It first checks to see if the inverse has already been calculated.
# If so, it gets the inverse from the cache and skips the computation.
# Otherwise, it calculates the inverse of the data and
# sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$getMatrix()
inverse <- solve(data, ...)
x$setInverse(inverse)
inverse
}
|
16ed3b538e9cb11d075fedea0d0788c6b5e8face
|
9cfdd919707a77d4832e7d3aec65a4ae2c56eb47
|
/Okapi.r
|
91bf42afbcbb4df50edf48777ab89a88d480e9f0
|
[] |
no_license
|
nondayo/Okapi
|
d82786e4cd1bcd2dae197620188ae1d49ba25c3f
|
28c9dd3cc28a517cf03e1f7f3a851a8f700a1c5b
|
refs/heads/master
| 2021-01-17T08:41:01.062351
| 2017-03-08T13:07:07
| 2017-03-08T13:07:07
| 83,945,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,510
|
r
|
Okapi.r
|
library(jiebaR)
library(text2vec)
library(xml2)
library(dplyr)
library(httr)
# 爬蟲,將Okapi網站文章爬下來
table = lapply(c(1:9000) ,function(num) {
df = data.frame(number=num)
response <- paste0("http://okapi.books.com.tw/article/", num , sep="") %>%
as.character() %>%
GET()
abc <- content(response)
if (status_code(response) == 200) {
df$unit <- abc %>%
xml_find_all(xpath = "//*[@id='article']/h2/em") %>%
xml_text()
df$title <- abc %>%
xml_find_all(xpath = "//*[@id='article']/h1") %>%
xml_text()
df$writer <- abc %>%
xml_find_all(xpath = "//*[@id='article']/p[1]/a") %>%
xml_text() %>%
paste0(collapse = ', ')
df$article <- abc %>%
xml_find_all(xpath = "//*[@id='article']/article") %>%
xml_text()
df$date <- abc %>%
xml_find_all(xpath = "//*[@id='article']/p[1]") %>%
xml_text()
Sys.sleep(5)
df
}
})
df_total <- Reduce(x = table, f = rbind)
StartName1 <-regexpr("/", df_total$date)
EndName1 <- regexpr("瀏覽次數", df_total$date)
df_date <- substr(df_total$date, start = StartName1+2, stop = EndName1-2)
StartName2 <-regexpr("\\(", df_total$date)
EndName2 <- regexpr("\\)", df_total$date)
df_seen <- substr(df_total$date, start = StartName2 + 1, stop = EndName2 - 1)
df_total <- cbind(df_total[,-6], df_date, df_seen, stringsAsFactors = F)
save(df_total, file = "df_total.RData")
# 用jiebaR斷詞
cutter <- worker(bylines = T)
article_words <- sapply(df_article, function(x)
segment(x, cutter)
)
save(article_words, file = "article_words.RData")
#建立詞庫
library(text2vec)
# an iterator to acess tokens in each article
article_words.token <- itoken(article_words)
# to create vocabulary base on the above tokens
article_words.vocabulary <- create_vocabulary(article_words.token, ngram = c(1, 1))
article_words.vocabulary2 <- create_vocabulary(article_words.token, ngram = c(1, 2))
# 詞(terms), 在所有文章出現的總次數(terms count), 在幾篇文章裡出現過(doc_counts)
#terms: (character) vector of unique terms
#terms_counts: (integer) vector of term counts across all documents
#doc_counts: (integer) vector of document counts that contain corresponding term
head(article_words.vocabulary)
head(article_words.vocabulary2)
# enforce the encoding of terms to be 'UTF-8'
Encoding(article_words.vocabulary$vocab$terms) = 'UTF-8'
# show message
cat("\n",paste0("The vocabulary size, |V| = ",length(article_words.vocabulary$vocab$terms)),"\n")
# show
head(article_words.vocabulary$vocab[order(-article_words.vocabulary$vocab$terms_counts)][120:150],10)
nrow(article_words.vocabulary$vocab)
# vectorization of words
article_words.token <- itoken(article_words)
article_words.vectorizer <- vocab_vectorizer(article_words.vocabulary, grow_dtm = FALSE, skip_grams_window = 5)
# construct term co-occurrence matrix according to a.token and a.vectorizer
# create_tcm(輸入值, 怎麼運算)
article_words.tcm <- create_tcm(article_words.token, article_words.vectorizer)
# show dimenstion of tcm
article_words.tcm@Dim[1]
article_words.tcm@Dim[2]
# glove = GlobalVectors$new(word_vectors_size, vocabulary, x_max, learning_rate = 0.15,
# max_cost = 10, alpha = 0.75, lambda = 0, shuffle = FALSE, initial = NULL)
# glove$fit(x, n_iter, convergence_tol = -1)
# Construct a Global vectors model
# x_max 一篇文章中出現多少次以上的詞就濾掉
glove = GlobalVectors$new(word_vectors_size = 100, vocabulary = article_words.vocabulary,
x_max = 15, learning_rate = 0.2)
# fit Glove model to input matrix x
glove$fit(article_words.tcm, n_iter = 100, closure = T)
word_vectors <- glove$get_word_vectors()
head(word_vectors)
str(word_vectors)
# word vector application
# calculate the unit vector
word.vec.norm <- sqrt(rowSums(word_vectors ^ 2))
word_vectors = word_vectors / word.vec.norm
save(word_vectors, file = "word_vectors.RData")
### write word analogy funciton
get_analogy = function(a, b, c) {
test <- word_vectors[a, , drop = FALSE] -
word_vectors[b, , drop = FALSE] +
word_vectors[c, , drop = FALSE]
cos_sim = sim2(x = word_vectors, y = test, method = "cosine", norm = "l2")
head(sort(cos_sim[,1], decreasing = TRUE), 10)
}
# try the following analogy task
get_analogy("日本","東京","台灣")
#get_analogy("法國","巴黎","臺灣")
#get_analogy("中國","北京","臺灣")
#get_analogy("泰國","曼谷","臺灣")
# word vectors to article vectors
aw <- article_words
wv <- word_vectors
new_listnames = paste('A', df_total$number, sep = '')
names(aw) = new_listnames
str(aw[1])
#把文章向量接起來
t_article_vectors = sapply(aw, function(words){
colSums(wv[unique(words), ])
})
article_vectors = t(t_article_vectors)
df_clus <- as.data.frame(article_vectors)
df_clus$writer <- df_total$writer
writer_150 <- names(table(df_clus$writer)[table(df_clus$writer)>150])
df_clus <- df_clus[df_clus$writer %in% writer_150,]
#寫超過150篇文章的共有10位作者,1815篇文章
#把篩選出來的1815篇文章整理成df_clus_2
df_clus$writer_factor = as.factor(df_clus$writer)
df_clus_2 = df_clus[,setdiff(names(df_clus), c('writer'))]
#隨機森林
set.seed(5566)
df_clus.rf <- randomForest(writer_factor ~ ., df_clus_2, proximity=TRUE,
keep.forest=TRUE)
save(df_clus.rf, file = "df_clus.rf.RData")
#confusion matrix
(table.rf=df_clus.rf$confusion)
cat("AVERAGE CORRECTION RATIO =", sum(diag(table.rf)/sum(table.rf))*100,"%\n")
df_res = data.frame(writer = df_clus$writer, predicted = df_clus.rf$predicted)
plot(df_clus.rf)
#MDSplot(df_clus.rf, df_clus_2$writer_factor)
#顏色代表群,數字代表作者('DL' '但唐謨' '個人意見' '博客來OKAPI編輯室' '寶妹' '張妙如' '李屏瑤' '米果' '莽斯特' '陳琡分')
## Using different symbols for the classes:
#MDSplot(df_clus.rf, df_clus_2$writer_factor, palette = rainbow(10) , pch=as.character(as.numeric(df_clus.rf$predicted)))
res = MDSplot(df_clus.rf, df_clus_2$writer_factor, palette = rainbow(10) , pch=as.character(as.numeric(df_clus.rf$predicted)), k=3)
#install.packages('plot3D' ,repos='http://cran.csie.ntu.edu.tw/')
library(plot3D)
tobedraw = as.data.frame(res$points)
names(tobedraw) = list('x', 'y', 'z')
tobedraw$writer = df_clus$writer_factor
tobedraw$predicted = df_clus.rf$predicted
head(tobedraw)
scatter3D(x=tobedraw$x, y=tobedraw$y, z=tobedraw$z,
colvar = as.numeric(tobedraw$writer),
pch = as.character(as.numeric(tobedraw$predicted)))
# 輸入文章,讓模型預測作者
migo_1 <- readChar("米果-甘蔗的大人味.txt", nchars = file.info("米果-甘蔗的大人味.txt")$size)
migo_2 <- readChar("米果-東京人教我的雪天生活對策.txt", nchars = file.info("米果-東京人教我的雪天生活對策.txt")$size)
migo_3 <- readChar("米果-時時刻刻謹慎的日本.txt", nchars = file.info("米果-時時刻刻謹慎的日本.txt")$size)
migo_4 <- readChar("米果-突然想去家庭餐廳吃漢堡排.txt", nchars = file.info("米果-突然想去家庭餐廳吃漢堡排.txt")$size)
dan_1 <- readChar("但唐謨-看電影請勿笑得像白癡.txt", nchars = file.info("但唐謨-看電影請勿笑得像白癡.txt")$size)
dan_2 <- readChar("但唐謨-動作電影不熱血不酷.txt", nchars = file.info("但唐謨-動作電影不熱血不酷.txt")$size)
dan_3 <- readChar("但唐謨-荒島上的屍控奇幻旅程.txt", nchars = file.info("但唐謨-荒島上的屍控奇幻旅程.txt")$size)
dan_4 <- readChar("但唐謨-變遷中的美國亞裔同志影像.txt", nchars = file.info("但唐謨-變遷中的美國亞裔同志影像.txt")$size)
GuessWriter <- function(x){
writer_aw <- segment(x, worker(bylines = T))
rnames <- rownames(word_vectors)
writer_aw_matched_unique <- unique(intersect(rnames, unlist(writer_aw)))
writer_av <- colSums(word_vectors[writer_aw_matched_unique,])
writer_av <- as.data.frame(t(writer_av))
newnames <- paste('V', c(1:100), sep = '')
names(writer_av) <- newnames
writer_pred <- predict(df_clus.rf, writer_av)
writer_pred
}
GuessWriter(migo_1)
GuessWriter(migo_2)
GuessWriter(migo_3)
GuessWriter(migo_4)
GuessWriter(dan_1)
GuessWriter(dan_2)
GuessWriter(dan_3)
GuessWriter(dan_4)
|
b357d623f87d90707395647eb4ba9002180e1ec3
|
3b5ab1e206635d838f46376d36f9762a1556ed3d
|
/man/predict.logisticr.Rd
|
94fe20466080d50b20de33d1512b85ea381e32b7
|
[] |
no_license
|
MGallow/logitr
|
d0b355c7f5aa6966ea66793379a7e7fa206d0924
|
76ed1adf1f4408ed1e1c4ecda2b062041ab0de04
|
refs/heads/master
| 2021-01-19T13:45:53.835109
| 2018-06-21T00:11:17
| 2018-06-21T00:11:17
| 88,107,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 891
|
rd
|
predict.logisticr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.logisticr}
\alias{predict.logisticr}
\title{Predict Logistic Regression}
\usage{
\method{predict}{logisticr}(object, X, y = NULL, ...)
}
\arguments{
\item{object}{'logisticr' object or matrix of betas}
\item{X}{matrix or data frame of (new) observations}
\item{y}{optional, matrix or vector of response values 0,1}
\item{...}{additional arguments}
}
\value{
predictions and loss metrics
}
\description{
Generates prediction for logistic regression. Note that one can either input a 'logisticr' object or a matrix of beta coefficients.
}
\examples{
library(dplyr)
X = dplyr::select(iris, -Species)
y = dplyr::select(iris, Species)
y$Species = ifelse(y$Species == 'setosa', 1, 0)
logisticr(X, y)
fitted = logisticr(X, y, lam = 0.1, penalty = 'ridge', method = 'MM')
predict(fitted, X)
}
|
47d2dfbd2415e40c2af62500f1bcf048cc565f5c
|
82e5da2a0d20503903cb9807aa0aa68620273b3b
|
/MNLFA/scripts/find_balanced_sample.R
|
37a38d881f9962daf1a13f82a54f8b5cc3778bf6
|
[] |
no_license
|
rrobinn/invariance-repetitive-bx
|
4d865e6cb0edab34333333623e63f569337f5f53
|
db04a1d3d17b73822f8d8f3b38698a8cc2c4ea15
|
refs/heads/master
| 2023-01-27T18:47:31.405037
| 2020-12-10T16:53:06
| 2020-12-10T16:53:06
| 234,354,868
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,731
|
r
|
find_balanced_sample.R
|
#For longitudinal data, MNLFA requires a cross-sectional
#calibration sample. This code generates multiple calibration samples that have
#a similar age distribution
input.object = ob # MNLFA object
dir = input.object$dir
mrdata = input.object$mrdata
myindicators = input.object$indicators
myMeanImpact = input.object$meanimpact
myVarImpact = input.object$varimpact
myMeasInvar = input.object$measinvar
mytime = input.object$time
myauxiliary = input.object$auxiliary
myID = input.object$ID
varlist<-c(myID,myauxiliary,myindicators,myMeasInvar,myMeanImpact,myVarImpact)
varlist<-unique(varlist)
# Calculate mean and sd of longitudinal sample
# (AGE18 = age centered at 18mo)
my_mean = mean(mrdata$AGE18, na.omit = TRUE)
my_sd = sd(mrdata$AGE18)
# output variables
ran.list = list()
mean.list = list()
sd.list = list()
# Generate 1000 random samples
for (i in c(1:1000)) {
ranuni = stats::runif(dim(mrdata)[1], min = 0, max = 1)
mrdata<-mrdata[order(mrdata[myID], ranuni),] # randomy shuffles based on ID
srdata<-mrdata[!duplicated(mrdata[myID]),]
srdata<-srdata[varlist]
mean.list[i] = mean(srdata$AGE18, na.omit = TRUE)
sd.list[i] = sd(srdata$AGE18)
ran.list[[i]] = ranuni
}
# turns lists into data.frames
m = do.call(rbind, mean.list)
s = do.call(rbind, sd.list)
matches = data.frame(mean_age = m, mean_sd = s, i = c(1:length(m)))
# find closest match
matches$dist_from_mean = abs(matches$mean_age - my_mean)
matches$dist_from_sd = abs(matches$mean_sd - my_sd)
matches = matches %>% arrange(dist_from_mean, dist_from_sd)
matches[1:40,]
# potentially good ones: i = 845, 724, 521
##############################
# Optional code for examining sample age distributions
##############################
# test the samples to make sure they're not too similar
s1 = ran.list[[845]]
mrdata$ranuni = s1
mrdata<-mrdata[order(mrdata[myID],mrdata$ranuni),] # randomy shuffles based on ID
srdata<-mrdata[!duplicated(mrdata[myID]),]
list1 = paste(srdata$ID2, srdata$AGE, sep ='_')
dat1 =srdata %>% dplyr::select(ID2, AGE18_1 = AGE18)
s2 = ran.list[[724]]
mrdata$ranuni = s2
mrdata<-mrdata[order(mrdata[myID],mrdata$ranuni),] # randomy shuffles based on ID
srdata<-mrdata[!duplicated(mrdata[myID]),]
list2 = paste(srdata$ID2, srdata$AGE, sep ='_')
dat2 = srdata %>% dplyr::select(ID2, AGE18_2 = AGE18)
s3 = ran.list[[521]]
mrdata$ranuni = s3
mrdata<-mrdata[order(mrdata[myID],mrdata$ranuni),] # randomy shuffles based on ID
srdata<-mrdata[!duplicated(mrdata[myID]),]
list3 = paste(srdata$ID2, srdata$AGE, sep ='_')
dat3= srdata %>% dplyr::select(ID2, AGE18_3 = AGE18)
# look at overlap between samples
length( setdiff(list1,list2) ) / length(list1)
length( setdiff(list2,list1) ) / length(list1)
length( intersect(list1,list2) ) / nrow(srdata) # 38% overlap
length( intersect(list1,list3) ) / nrow(srdata) # 40% overlap
length( intersect(list2,list3) ) / nrow(srdata) # 37% overlap
dat1 = dat1 %>% arrange(ID2)
dat2 = dat2 %>% arrange(ID2)
dat3 = dat3 %>% arrange(ID2)
# visualize correlation in age for each kid
plot_data = merge(dat1, dat2, by = 'ID2')
ggplot(data = plot_data, aes(x = AGE18_1, y =AGE18_2)) +
geom_point()
# compare age distributions to sample mean
ggplot(data = mrdata, aes(x = AGE18)) +
geom_histogram(color = 'black')
ggplot(data = dat1, aes(x = AGE18_1)) +
geom_histogram(color = 'black')
ggplot(data = dat2, aes(x = AGE18_2)) +
geom_histogram(color = 'black')
ggplot(data = dat3, aes(x = AGE18_3)) +
geom_histogram(color = 'black')
# 1 and 3
ru1 = ran.list[[845]]
ru2 = ran.list[[521]]
ru3 = ran.list[[724]]
ru1 = data.frame(ru1)
colnames(ru1)[1] = 'ru'
ru2 = data.frame(ru2)
colnames(ru2)[1] = 'ru'
ru3 = data.frame(ru3)
colnames(ru3)[1] = 'ru'
|
4e63836158bb5b3dced9fa16973c5f9ac0e1b2b2
|
76e1ad0b4fea46946939c33e795b73d86ba2124b
|
/task4/task4explore2.R
|
fc914d5e847c38f1385d4b7660c71330bacca1b5
|
[] |
no_license
|
guidogallopyn/TextPrediction
|
a559940bd1e05ea9ff1e07a71c87dbc4b51ccba5
|
dfeb4d109c686ea004cd1736a60cf418b7f37970
|
refs/heads/master
| 2021-01-02T09:15:11.169480
| 2015-04-27T18:21:36
| 2015-04-27T18:21:36
| 34,554,810
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,464
|
r
|
task4explore2.R
|
# exploring storing, cleaning and basic tokenization and counting on very lorge corpora
setwd("~/Documents/Courses/DataScience/CapStone")
library(tm)
library(slam)
library(hash)
#library(RWeka) # don't load it, see http://stackoverflow.com/questions/17703553/bigrams-instead-of-single-words-in-termdocument-matrix-using-r-and-rweka
#options(mc.cores=1) # RWeka bug workaround
#source("NGramLM.R")
# loading 100 million word corpus, cleaning and counting unigrams with tdm fails on Mac with 8 Mb (so do not run this code)
if(FALSE) {
start.time <- Sys.time()
corpus <- VCorpus(DirSource("data/final/en_US/"))
corpus <- tm_map(corpus, content_transformer(function(x) iconv(x, from="latin1", to="ASCII", sub="")))
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation, preserve_intra_word_dashes = TRUE)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, stripWhitespace)
tdm <- TermDocumentMatrix(corpus)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
}
#what about PCorpus? works, created a db file on hdisk
#install.packages("filehash")
corpus <- VCorpus(DirSource("data/micro/en_US/"))
inspect(corpus)
meta(corpus)
meta(corpus[[1]])
(tdm <- TermDocumentMatrix(corpus))
nTerms(tdm)
library(filehash)
corpus <- PCorpus(DirSource("data/micro/en_US/"),dbControl = list(dbName = "db/enUSmicro.db"))
inspect(corpus)
meta(corpus)
meta(corpus[[1]])
(tdm <- TermDocumentMatrix(corpus))
nTerms(tdm)
# loading, cleaning and counting small corpus with Pcorpus, compare with
cleaning <- function(corpus) {
corpus<-tm_map(corpus, content_transformer(function(x) iconv(x, from="latin1", to="ASCII", sub="")))
corpus<-tm_map(corpus, content_transformer(tolower))
corpus<-tm_map(corpus, removePunctuation, preserve_intra_word_dashes = TRUE)
corpus<-tm_map(corpus, removeNumbers)
tm_map(corpus, stripWhitespace)
}
corpus <- VCorpus(DirSource("data/micro/en_US/"))
inspect(corpus)
meta(corpus)
meta(corpus[[1]])
clean<-cleaning(corpus)
inspect(corpus)
inspect(clean)
meta(clean)
meta(clean[[1]])
(tdm <- TermDocumentMatrix(clean))
nTerms(tdm)
# caveate is that tm_map chnages originalpCorpus, not VCorpus
corpus <- PCorpus(DirSource("data/micro/en_US/"),dbControl = list(dbName = "db/enUSmicro.db"))
inspect(corpus)
meta(corpus)
meta(corpus[[1]])
clean<-cleaning(corpus)
inspect(corpus)
inspect(clean)
meta(clean)
meta(clean[[1]])
(tdm <- TermDocumentMatrix(clean))
nTerms(tdm)
# maybe better loading, cleaning and counting small corpus with Pcorpus, compare with
cleaning2 <- function(corpus) {
tm_map(corpus, FUN = tm_reduce, tmFuns= list(content_transformer(function(x) iconv(x, from="latin1", to="ASCII", sub="")),
content_transformer(tolower),
removePunctuation,
removeNumbers,
stripWhitespace))
}
corpus <- PCorpus(DirSource("data/micro/en_US/"),dbControl = list(dbName = "db/enUSmicro.db"))
#corpus <- PCorpus(DirSource("data/large/en_US/"),dbControl = list(dbName = "db/enUSlarge.db"))
inspect(corpus)
meta(corpus)
meta(corpus[[1]])
clean<-cleaning2(corpus)
inspect(corpus)
inspect(clean)
meta(clean)
meta(clean[[1]])
meta(corpus)
meta(corpus[[1]])
(tdm <- TermDocumentMatrix(clean))
nTerms(tdm)
measure <- function (name, permanent=FALSE) {
start.time <- Sys.time()
if (permanent) corpus <- PCorpus(DirSource(paste0("data/",name,"/en_US/")), dbControl = list(dbName = paste0("db/enUS",name,".db")))
else corpus <- VCorpus(DirSource(paste0("data/",name,"/en_US/")))
loadtime <- Sys.time() - start.time
start.time <- Sys.time()
corpus <- cleaning2(corpus)
cleaningtime<- Sys.time() - start.time
start.time <- Sys.time()
n<-sum(slam::row_sums(TermDocumentMatrix(corpus, control=list(tokenize = RWeka::WordTokenizer, wordLengths=c(1, Inf)))))
countingtime <- Sys.time() - start.time
data.frame(corpus=name,nWords=n, perm=permanent, LoadTime=loadtime, CleaningTime=cleaningtime,CountingTime=countingtime)
}
measure("micro",permanent=FALSE)
measure("micro",permanent=TRUE)
# loading, cleaning and counting small corpus with V corpus
(df<-measure("small", FALSE))
(df<-rbind(df, measure("small",TRUE)))
(df<-rbind(df, measure("medium",FALSE)))
(df<-rbind(df, measure("medium",TRUE)))
(df<-rbind(df, measure("large",FALSE)))
(df<-rbind(df, measure("large",TRUE)))
#(df<-rbind(df, measure("final",TRUE))) takes too long
df
if(FALSE) { # this takes very long
start.time <- Sys.time()
corpus <- VCorpus(DirSource(paste0("data/final/en_US/")))
loadtime <- Sys.time() - start.time
df<-rbind(df,data.frame(corpus="final",nWords=df$nWords[7], perm=FALSE, LoadTime=10*3600, CleaningTime=NA,CountingTime=NA))
df
}
save(df,file="task4/tab2.RData")
summary(lm(log(as.numeric(LoadTime)) ~ log(nWords) * factor(perm), data=df ))
summary(lm(CleaningTime ~ nWords * factor(perm), data=df ))
summary(lm(CountingTime ~ nWords * factor(perm), data=df ))
# new strategy: read, process and count line by line (no reading of entire corpus)
ctrl<- list(tolower=TRUE,
removePunctuation=list(preserve_intra_word_dashes = TRUE),
removeNumbers=TRUE,
tokenize = function(x) RWeka::NGramTokenizer(x, RWeka::Weka_control(min = 1, max = 3)), # count unigrams
wordLengths=c(1, Inf))
mypath <- "data/small/en_US/"
start.time <- Sys.time()
for (doc in list.files(mypath)) {
con <- file(paste0(mypath,doc), "r")
n<-1
freq<-hash()
while(TRUE) {
line <- readLines(con,1) # read 1 line
if(length(line)==0) break # EOF
tf <- termFreq(PlainTextDocument(line), control=ctrl) # clean and count with tm
if(length(tf)==0) next
f <- tf[1:length(tf)]
si <- intersect(names(freq),names(f)) # words alreday seen in prev lines
sd <- setdiff(names(f),names(freq)) # new words
if(length(si)>0) freq[si] <- values(freq[si]) + f[si]
if(length(sd)>0) freq[sd] <- f[sd]
n<-n+1
cat(".")
}
close(con)
cat(paste(doc,"lines processed",n))
save(freq,file=paste0("counts/ngram",doc,".Rdata"))
}
(totalTime <- Sys.time() - start.time)
head(freq)
unigrams<- sapply(strsplit(names(freq)," "),length) ==1
bigrams<- sapply(strsplit(names(freq)," "),length) ==2
head(sort(freq[unigrams],decreasing=TRUE),12)
head(sort(freq[bigrams],decreasing=TRUE),12)
|
5ee244e2bf5efcce2cf3fc6c1f7785e38b13f054
|
375c0d3412e884260d5dd53498b6d0c6857aa3d1
|
/plot4.R
|
a4a9038b794412db1b736b5f86d28028bbca127c
|
[] |
no_license
|
tommwilliamson/ExData_Plotting1
|
9247ce05ca98abc624e28b76fcd0742ddb658905
|
94b3b29794a271e4a23c432acacdd8940b4d97ff
|
refs/heads/master
| 2021-05-07T06:28:32.427353
| 2017-11-23T21:53:21
| 2017-11-23T21:53:21
| 111,750,578
| 0
| 0
| null | 2017-11-23T01:46:48
| 2017-11-23T01:46:47
| null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
plot4.R
|
library(data.table)
# Load the data
all_data <- fread('household_power_consumption.txt')
# Select just the data from 2007-02-01 and 2007-02-02
sub_data <- all_data[all_data$Date %in% "1/2/2007" | all_data$Date %in% "2/2/2007"]
gsub("?",NA,sub_data, fixed = TRUE)
# Create date/time variable
dateTime <- paste(sub_data$Date,sub_data$Time)
dateTime <- strptime(dateTime,"%d/%m/%Y %H:%M:%S")
# Create the last graph
png(filename="plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
par(bg=NA)
plot(dateTime,as.numeric(sub_data$Global_active_power), type='l',
xlab="", ylab="Global Active Power (kilowatts)")
plot(dateTime,as.numeric(sub_data$Voltage), type='l',
xlab="datetime", ylab="Voltage")
plot(dateTime,as.numeric(sub_data$Sub_metering_1), type='n',
xlab="", ylab="Energy sub metering")
points(dateTime,as.numeric(sub_data$Sub_metering_1), type='l')
points(dateTime, as.numeric(sub_data$Sub_metering_2), col="red", type='l')
points(dateTime, as.numeric(sub_data$Sub_metering_3), col="blue", type='l')
legend("topright", legend = names(sub_data)[grep('Sub_metering',names(sub_data))],
col=c("Black","Red","Blue"),lty=1,bty='n')
plot(dateTime,as.numeric(sub_data$Global_reactive_power), type='l',
xlab="datetime", ylab="global_reactive_power")
par(mfrow=c(1,1))
dev.off()
|
b156c50b687aa61c40e6b240e374ca22ca0768a8
|
0af8332ed8cb059282d6b870a9eb843c1e6680bc
|
/Rpkg/R/mbl_tidy.R
|
a5233e3ba3b39c73613ab1d7e4cd424bc7c51680
|
[] |
no_license
|
tomsing1/mbl2018
|
44383313845897323be29fc9263e309276d38418
|
6c69e0693abf75955ad22b3e502b33a3c3c0e5d5
|
refs/heads/master
| 2020-03-19T02:22:25.456235
| 2018-06-20T17:12:27
| 2018-06-20T17:12:27
| 135,623,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,938
|
r
|
mbl_tidy.R
|
#' Converts your expression data into a (huge) "tidy" data.frame
#'
#' Use this function to extract all of your expression data into a data.frame
#' that can be used with dplyr and ggplot to make arbitrariy informative
#' plots.
#'
#' @export
#' @seealso [mbl_plot_expression()]
#'
#' @param x The expression object to tidy
#' @return a (huge) data.frame with your expression data. Each row holds the
#' expression of one gene in one sample. The columns include all of the
#' gene- and sample-level metadata for the obseration.
#'
#' @examples
#' # Make a boxplot with points of Fxyd6 in the cheek wildtype/knockout
#' y <- mbl_load_rnaseq("mouse", dataset = "mbl")
#' ydat <- mbl_tidy(y) # all of the rnaseq data
#' gdat <- filter(ydat, source == "cheek", symbol == "Fxyd6")
#' ggplot(gdat, aes(x = genotype, y = cpm)) +
#' geom_boxplot() +
#' geom_point()
mbl_tidy <- function(x, ...) {
UseMethod("mbl_tidy", x)
}
#' @rdname mbl_tidy
#' @export
#' @importFrom edgeR cpm
#' @importFrom reshape2 melt
#' @method mbl_tidy DGEList
mbl_tidy.DGEList <- function(x, normalized.lib.sizes = TRUE, prior.count = 3, ...) {
mats <- list(
cpm = cpm(x, normalized.lib.sizes = normalized.lib.sizes,
log = TRUE, prior.count = prior.count),
count = x$counts)
mbl_tidy.core(mats, genes = x$genes, samples = x$samples)
}
#' @rdname mbl_tidy
#' @export
#' @method mbl_tidy EList
mbl_tidy.EList <- function(x, ...) {
mats <- list(cpm = x$E)
if (is.matrix(x$weights)) {
mats$weight <- x$weights
rownames(mats$weight) <- rownames(x)
colnames(mats$weight) <- colnames(x)
} else {
names(mats)[1L] <- "value"
}
mbl_tidy.core(mats, genes = x$genes, samples = x$targets)
}
mbl_tidy.core <- function(mats, genes, samples, ...) {
if (is.matrix(mats)) mats <- list(value = mats)
stopifnot(is.list(mats))
stopifnot(all(sapply(mats, is.matrix)))
assert_named(mats, type = "unique")
rnames <- rownames(mats[[1]])
snames <- colnames(mats[[1]])
genes$.gene_id <- rnames
gid.col <- sapply(genes, function(xx) all(xx == rnames))
gid.col <- colnames(genes)[which(gid.col)[1L]]
if (gid.col != ".gene_id") genes$.gene_id <- NULL
samples$.sample_id <- snames
sid.col <- sapply(samples, function(xx) all(xx == snames))
sid.col <- colnames(samples)[which(sid.col)[1L]]
if (sid.col != ".sample_id") samples$.sample_id <- NULL
adat.all <- lapply(names(mats), function(mname) {
m <- mats[[mname]]
stopifnot(all.equal(rownames(m), rnames))
m <- melt(m)
m <- transform(m, Var1 = as.character(Var1), Var2 = as.character(Var2))
colnames(m) <- c(gid.col, sid.col, mname)
m
})
adat <- do.call(cbind, adat.all)
# if there were multiple matrices, there will be multiple sample_id columns
# so we remove those
adat <- adat[, !duplicated(colnames(adat))]
out <- inner_join(adat, genes, by = gid.col)
out <- inner_join(out, samples, by = sid.col)
out
}
|
b9d2c2d00c3eef76b0854bff9434217cc0fe5c4c
|
6e20f24df0673e606b2a61596a4155a5d16a2058
|
/scripts/production/live_timing/in_race_stats.R
|
b6bc45fefc5e96bf68117d2a48cba1394e12ca79
|
[] |
no_license
|
drewbennison/thesingleseater
|
1992a624b8abc5b6ca4feccee5d08be3d72283fd
|
2723ec23e239bc06631d08943df101cce4d43a7f
|
refs/heads/master
| 2022-03-18T13:05:56.778874
| 2022-03-07T00:12:33
| 2022-03-07T00:12:33
| 180,887,735
| 1
| 0
| null | 2019-04-29T15:36:49
| 2019-04-11T22:30:24
|
R
|
UTF-8
|
R
| false
| false
| 2,253
|
r
|
in_race_stats.R
|
library(data.table)
library(tidyverse)
library(lubridate)
library(png)
library(ggthemes)
dt <- fread("C:/Users/drewb/Desktop/2022_02_27_r.csv")
#lap speed
dt %>% select(-time_stamp) %>%
select(lastName, LastSpeed, diff, gap, overallRank, startPosition, laps, totalTime, lastLapTime) %>%
unique() %>%
filter(laps>79) %>%
mutate(LastSpeedNew = as.numeric(LastSpeed),
LapTimeNew = 1.8/LastSpeedNew*60*60) %>%
filter(lastName %in% c("McLaughlin", "Herta", "Palou", "Power", "VeeKay")) %>%
rename(Lap = laps) %>%
rename(`Lap Time` = LapTimeNew) %>%
rename(speed = LastSpeed) %>%
rename(Driver = lastName) %>%
#filter(lap_time < 120) %>%
ggplot(aes(x=Lap, y=`Lap Time`, color=Driver)) + geom_line() +
labs(title = "Lap time by driver, Firestone GP at St. Petersburg",
y= "Lap time (seconds)") +
theme_bw() +
labs(caption = "@thesingleseater | thesingleseater.com")
ggsave("C:/Users/drewb/Desktop/gap.png", width = 7, height = 4, dpi = 500)
#gap of two drivers - only works if one is the leader
dt %>%
select(lastName, LastSpeed, diff, gap, overallRank, startPosition, laps, totalTime) %>%
unique() %>%
filter(lastName %in% c("McLaughlin", "Herta", "Palou", "Power", "VeeKay")) %>%
mutate(diff = -1 * as.numeric(diff)) %>%
filter(!is.na(diff), laps>79) %>%
rename(Lap = laps) %>%
rename(Driver = lastName) %>%
ggplot(aes(x=Lap, y=diff, color=Driver)) + geom_line() +
labs(title = "Gap to leader, Firestone GP at St. Petersburg",
caption = "@thesingleseater | thesingleseater.com",
y="Gap to leader (seconds)") +
theme_bw() +
ylim(-15, 0)
ggsave("C:/Users/drewb/Desktop/diff.png", width = 7, height = 4, dpi = 500)
#static gap of all drivers
dt %>% select(lastName, diff, time_stamp) %>%
mutate(time_stamp = as_datetime(time_stamp),
diff = as.numeric(diff)) %>%
group_by(lastName) %>%
slice(which.max(time_stamp)) %>%
filter(diff < 10, diff > -10) %>%
ggplot(aes(y=reorder(lastName, -diff), x=diff)) +
geom_col() +
labs(x="Gap to fastest driver (seconds)",
y="",
title = "Single lap time") +
theme_bw()
ggsave("C:/Users/drewb/Desktop/plot.png", dpi = 800, height = 6, width = 8)
#push to pass remaining
|
53902aeab930f07fefede127a69126f0b38af273
|
2c485b1c2f39fc3c269c6f578e21d698dcec63e6
|
/R/accessors.R
|
7dff3fb45ee2f959fee3862bc585db603790725d
|
[] |
no_license
|
aalfons/simFrame
|
002f47cad078c93dec24c4c9fab4893e7bb56922
|
23314f0b1f6632560e0d95dc568f708f3c1286a9
|
refs/heads/master
| 2021-12-23T10:23:44.587577
| 2021-11-23T12:46:58
| 2021-11-23T12:46:58
| 6,717,992
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,769
|
r
|
accessors.R
|
# ----------------------
# Author: Andreas Alfons
# KU Leuven
# ----------------------
# mutator functions, whose generic functions contain the '...' argument:
# the expression needs to be defined in the environment of the generic
# function, but it needs to be evaluated in the environment one level
# further up (i.e., second environment above the current one)
## class "DataControl"
setMethod("getSize", "DataControl", function(x) slot(x, "size"))
#setMethod("setSize", "DataControl",
# function(x, size) eval.parent(substitute(slot(x, "size") <- size)))
setMethod("setSize", "DataControl",
function(x, size) {
eval.parent(substitute(slot(x, "size") <- size, env=parent.frame()), n=2)
})
setMethod("getDistribution", "DataControl", function(x) slot(x, "distribution"))
setMethod("setDistribution", "DataControl",
function(x, distribution) {
eval.parent(substitute(slot(x, "distribution") <- distribution))
})
setMethod("getDots", "DataControl", function(x) slot(x, "dots"))
#setMethod("setDots", "DataControl",
# function(x, dots) eval.parent(substitute(slot(x, "dots") <- dots)))
setMethod("setDots", "DataControl",
function(x, dots) {
eval.parent(substitute(slot(x, "dots") <- dots, env=parent.frame()), n=2)
})
setMethod("getColnames", "DataControl", function(x) slot(x, "colnames"))
setMethod("setColnames", "DataControl",
function(x, colnames) {
eval.parent(substitute(slot(x, "colnames") <- colnames))
})
## class "SampleControl"
setMethod("getK", "VirtualSampleControl", function(x) slot(x, "k"))
setMethod("setK", "VirtualSampleControl",
function(x, k) eval.parent(substitute(slot(x, "k") <- k)))
setMethod("getDesign", "SampleControl", function(x) slot(x, "design"))
setMethod("setDesign", "SampleControl",
function(x, design) eval.parent(substitute(slot(x, "design") <- design)))
setMethod("getGrouping", "SampleControl", function(x) slot(x, "grouping"))
setMethod("setGrouping", "SampleControl",
function(x, grouping) {
eval.parent(substitute(slot(x, "grouping") <- grouping))
})
setMethod("getCollect", "SampleControl", function(x) slot(x, "collect"))
setMethod("setCollect", "SampleControl",
function(x, collect) eval.parent(substitute(slot(x, "collect") <- collect)))
setMethod("getFun", "SampleControl", function(x) slot(x, "fun"))
#setMethod("setFun", "SampleControl",
# function(x, fun) eval.parent(substitute(slot(x, "fun") <- fun)))
setMethod("setFun", "SampleControl",
function(x, fun) {
eval.parent(substitute(slot(x, "fun") <- fun, env=parent.frame()), n=2)
})
setMethod("getSize", "SampleControl", function(x) slot(x, "size"))
#setMethod("setSize", "SampleControl",
# function(x, size) eval.parent(substitute(slot(x, "size") <- size)))
setMethod("setSize", "SampleControl",
function(x, size) {
eval.parent(substitute(slot(x, "size") <- size, env=parent.frame()), n=2)
})
setMethod("getProb", "SampleControl", function(x) slot(x, "prob"))
#setMethod("setProb", "SampleControl",
# function(x, prob) eval.parent(substitute(slot(x, "prob") <- prob)))
setMethod("setProb", "SampleControl",
function(x, prob) {
eval.parent(substitute(slot(x, "prob") <- prob, env=parent.frame()), n=2)
})
setMethod("getDots", "SampleControl", function(x) slot(x, "dots"))
#setMethod("setDots", "SampleControl",
# function(x, dots) eval.parent(substitute(slot(x, "dots") <- dots)))
setMethod("setDots", "SampleControl",
function(x, dots) {
eval.parent(substitute(slot(x, "dots") <- dots, env=parent.frame()), n=2)
})
## class "TwoStageControl"
setMethod("getDesign", "TwoStageControl", function(x) slot(x, "design"))
setMethod("setDesign", "TwoStageControl",
function(x, design) eval.parent(substitute(slot(x, "design") <- design)))
setMethod("getGrouping", "TwoStageControl", function(x) slot(x, "grouping"))
setMethod("setGrouping", "TwoStageControl",
function(x, grouping) {
eval.parent(substitute(slot(x, "grouping") <- grouping))
})
# utility function to check the 'stage' argument of the following methods
checkStage <- function(stage) {
if(!isTRUE(stage == 1) && !isTRUE(stage == 2)) {
stop("'stage' must be either 1 or 2")
}
}
# in the following mutators: 'stage' is not available in the environment of
# the generic function and needs to be extracted from the additional arguments
setMethod("getFun", "TwoStageControl",
function(x, stage = NULL) {
fun <- slot(x, "fun")
if(is.null(stage)) fun
else {
checkStage(stage)
fun[[stage]]
}
})
setMethod("setFun", "TwoStageControl",
function(x, fun, stage = NULL) {
pf <- parent.frame() # environment of generic function
if(is.null(stage)) expr <- substitute(slot(x, "fun") <- fun, pf)
else {
checkStage(stage)
expr <- substitute(slot(x, "fun")[[list(...)$stage]] <- fun, pf)
}
eval.parent(expr, n=2) # evaluate expression
})
setMethod("getSize", "TwoStageControl",
function(x, stage = NULL) {
size <- slot(x, "size")
if(is.null(stage)) size
else {
checkStage(stage)
size[[stage]]
}
})
setMethod("setSize", "TwoStageControl",
function(x, size, stage = NULL) {
pf <- parent.frame() # environment of generic function
if(is.null(stage)) expr <- substitute(slot(x, "size") <- size, pf)
else {
checkStage(stage)
expr <- substitute(slot(x, "size")[[list(...)$stage]] <- size, pf)
}
eval.parent(expr, n=2) # evaluate expression
})
setMethod("getProb", "TwoStageControl",
function(x, stage = NULL) {
prob <- slot(x, "prob")
if(is.null(stage)) prob
else {
checkStage(stage)
prob[[stage]]
}
})
setMethod("setProb", "TwoStageControl",
function(x, prob, stage = NULL) {
pf <- parent.frame() # environment of generic function
if(is.null(stage)) expr <- substitute(slot(x, "prob") <- prob, pf)
else {
checkStage(stage)
expr <- substitute(slot(x, "prob")[[list(...)$stage]] <- prob, pf)
}
eval.parent(expr, n=2) # evaluate expression
})
setMethod("getDots", "TwoStageControl",
function(x, stage = NULL) {
dots <- slot(x, "dots")
if(is.null(stage)) dots
else {
checkStage(stage)
dots[[stage]]
}
})
setMethod("setDots", "TwoStageControl",
function(x, dots, stage = NULL) {
pf <- parent.frame() # environment of generic function
if(is.null(stage)) expr <- substitute(slot(x, "dots") <- dots, pf)
else {
checkStage(stage)
expr <- substitute(slot(x, "dots")[[list(...)$stage]] <- dots, pf)
}
eval.parent(expr, n=2) # evaluate expression
})
## class "SampleSetup"
# public accessors (getters)
setMethod("getIndices", "SampleSetup", function(x) slot(x, "indices"))
setMethod("getProb", "SampleSetup", function(x) slot(x, "prob"))
#setMethod("getDesign", "SampleSetup", function(x) slot(x, "design"))
#setMethod("getGrouping", "SampleSetup", function(x) slot(x, "grouping"))
#setMethod("getCollect", "SampleSetup", function(x) slot(x, "collect"))
#setMethod("getFun", "SampleSetup", function(x) slot(x, "fun"))
setMethod("getControl", "SampleSetup", function(x) slot(x, "control"))
setMethod("getSeed", "SampleSetup", function(x) slot(x, "seed"))
setMethod("getCall", "SampleSetup", function(x) slot(x, "call"))
# private mutators (setters)
setMethod("setIndices", "SampleSetup",
function(x, indices) eval.parent(substitute(slot(x, "indices") <- indices)))
setMethod("setSeed", "SampleSetup",
function(x, seed) eval.parent(substitute(slot(x, "seed") <- seed)))
setMethod("setCall", "SampleSetup",
function(x, call) eval.parent(substitute(slot(x, "call") <- call)))
# summary
setMethod("getSize", "SummarySampleSetup", function(x) slot(x, "size"))
## class "ContControl"
setMethod("getTarget", "VirtualContControl", function(x) slot(x, "target"))
setMethod("setTarget", "VirtualContControl",
function(x, target) eval.parent(substitute(slot(x, "target") <- target)))
setMethod("getEpsilon", "VirtualContControl", function(x) slot(x, "epsilon"))
setMethod("setEpsilon", "VirtualContControl",
function(x, epsilon) eval.parent(substitute(slot(x, "epsilon") <- epsilon)))
setMethod("getGrouping", "ContControl", function(x) slot(x, "grouping"))
setMethod("setGrouping", "ContControl",
function(x, grouping) {
eval.parent(substitute(slot(x, "grouping") <- grouping))
})
setMethod("getAux", "ContControl", function(x) slot(x, "aux"))
setMethod("setAux", "ContControl",
function(x, aux) eval.parent(substitute(slot(x, "aux") <- aux)))
setMethod("getDistribution", "DCARContControl",
function(x) slot(x, "distribution"))
setMethod("setDistribution", "DCARContControl",
function(x, distribution) {
eval.parent(substitute(slot(x, "distribution") <- distribution))
})
setMethod("getDots", "DCARContControl", function(x) slot(x, "dots"))
#setMethod("setDots", "DCARContControl",
# function(x, dots) eval.parent(substitute(slot(x, "dots") <- dots)))
setMethod("setDots", "DCARContControl",
function(x, dots) {
eval.parent(substitute(slot(x, "dots") <- dots, env=parent.frame()), n=2)
})
setMethod("getFun", "DARContControl", function(x) slot(x, "fun"))
#setMethod("setFun", "DARContControl",
# function(x, fun) eval.parent(substitute(slot(x, "fun") <- fun)))
setMethod("setFun", "DARContControl",
function(x, fun) {
eval.parent(substitute(slot(x, "fun") <- fun, env=parent.frame()), n=2)
})
setMethod("getDots", "DARContControl", function(x) slot(x, "dots"))
#setMethod("setDots", "DARContControl",
# function(x, dots) eval.parent(substitute(slot(x, "dots") <- dots)))
setMethod("setDots", "DARContControl",
function(x, dots) {
eval.parent(substitute(slot(x, "dots") <- dots, env=parent.frame()), n=2)
})
## class "NAControl"
setMethod("getTarget", "VirtualNAControl", function(x) slot(x, "target"))
setMethod("setTarget", "VirtualNAControl",
function(x, target) eval.parent(substitute(slot(x, "target") <- target)))
setMethod("getNArate", "VirtualNAControl", function(x) slot(x, "NArate"))
setMethod("setNArate", "VirtualNAControl",
function(x, NArate) eval.parent(substitute(slot(x, "NArate") <- NArate)))
setMethod("getGrouping", "NAControl", function(x) slot(x, "grouping"))
setMethod("setGrouping", "NAControl",
function(x, grouping) {
eval.parent(substitute(slot(x, "grouping") <- grouping))
})
setMethod("getAux", "NAControl", function(x) slot(x, "aux"))
setMethod("setAux", "NAControl",
function(x, aux) eval.parent(substitute(slot(x, "aux") <- aux)))
setMethod("getIntoContamination", "NAControl",
function(x) slot(x, "intoContamination"))
setMethod("setIntoContamination", "NAControl",
function(x, intoContamination) {
eval.parent(substitute(slot(x, "intoContamination") <- intoContamination))
})
## class "Strata"
# public accessors (getters)
setMethod("getValues", "Strata", function(x) slot(x, "values"))
setMethod("getSplit", "Strata", function(x) slot(x, "split"))
setMethod("getDesign", "Strata", function(x) slot(x, "design"))
setMethod("getNr", "Strata", function(x) slot(x, "nr"))
setMethod("getLegend", "Strata", function(x) slot(x, "legend"))
setMethod("getSize", "Strata", function(x) slot(x, "size"))
setMethod("getCall", "Strata", function(x) slot(x, "call"))
# private mutators (setters)
setMethod("setCall", "Strata",
function(x, call) eval.parent(substitute(slot(x, "call") <- call)))
## class "SimControl"
setMethod("getContControl", "SimControl", function(x) slot(x, "contControl"))
setMethod("setContControl", "SimControl",
function(x, contControl) {
eval.parent(substitute(slot(x, "contControl") <- contControl))
})
setMethod("getNAControl", "SimControl", function(x) slot(x, "NAControl"))
setMethod("setNAControl", "SimControl",
function(x, NAControl) {
eval.parent(substitute(slot(x, "NAControl") <- NAControl))
})
setMethod("getDesign", "SimControl", function(x) slot(x, "design"))
setMethod("setDesign", "SimControl",
function(x, design) eval.parent(substitute(slot(x, "design") <- design)))
setMethod("getFun", "SimControl", function(x) slot(x, "fun"))
#setMethod("setFun", "SimControl",
# function(x, fun) eval.parent(substitute(slot(x, "fun") <- fun)))
setMethod("setFun", "SimControl",
function(x, fun) {
eval.parent(substitute(slot(x, "fun") <- fun, env=parent.frame()), n=2)
})
setMethod("getDots", "SimControl", function(x) slot(x, "dots"))
#setMethod("setDots", "SimControl",
# function(x, dots) eval.parent(substitute(slot(x, "dots") <- dots)))
setMethod("setDots", "SimControl",
function(x, dots) {
eval.parent(substitute(slot(x, "dots") <- dots, env=parent.frame()), n=2)
})
setMethod("getSAE", "SimControl", function(x) slot(x, "SAE"))
setMethod("setSAE", "SimControl",
function(x, SAE) eval.parent(substitute(slot(x, "SAE") <- SAE)))
### class "SimResult"
#
## public accessors (getters)
#setMethod("getValues", "SimResult", function(x) slot(x, "values"))
#setMethod("getAdd", "SimResult", function(x) slot(x, "add"))
## class "SimResults"
# public accessors (getters)
setMethod("getValues", "SimResults", function(x) slot(x, "values"))
setMethod("getAdd", "SimResults", function(x) slot(x, "add"))
setMethod("getDesign", "SimResults", function(x) slot(x, "design"))
setMethod("getColnames", "SimResults", function(x) slot(x, "colnames"))
setMethod("getEpsilon", "SimResults", function(x) slot(x, "epsilon"))
setMethod("getNArate", "SimResults", function(x) slot(x, "NArate"))
setMethod("getDataControl", "SimResults", function(x) slot(x, "dataControl"))
setMethod("getSampleControl", "SimResults", function(x) slot(x, "sampleControl"))
setMethod("getNrep", "SimResults", function(x) slot(x, "nrep"))
setMethod("getControl", "SimResults", function(x) slot(x, "control"))
setMethod("getSeed", "SimResults", function(x) slot(x, "seed"))
setMethod("getCall", "SimResults", function(x) slot(x, "call"))
# private mutators (setters)
setMethod("setValues", "SimResults",
function(x, values) eval.parent(substitute(slot(x, "values") <- values)))
setMethod("setSeed", "SimResults",
function(x, seed) eval.parent(substitute(slot(x, "seed") <- seed)))
setMethod("setCall", "SimResults",
function(x, call) eval.parent(substitute(slot(x, "call") <- call)))
|
abd9f5f062a7d256530adf9fe08d63eadd4369f5
|
5e85f011e52d2c5b51d833100e9e38677d047f6e
|
/Functions/gravel_functions.R
|
b6870af79a8e43976e70fe1dcde7b44bfc39c7e2
|
[
"MIT"
] |
permissive
|
Jpomz/honestly-you-eat-this
|
26fce41c4f068ea31a5703999864c05ef878f156
|
fa1fe624d73494769bf402f998a87096fb2e145e
|
refs/heads/master
| 2021-06-16T00:55:20.503870
| 2019-05-21T19:35:03
| 2019-05-21T19:35:03
| 115,657,416
| 1
| 1
| null | 2018-02-07T05:29:39
| 2017-12-28T20:31:48
|
R
|
UTF-8
|
R
| false
| false
| 3,067
|
r
|
gravel_functions.R
|
#####################################################
# this script was copied exactly from the supplementary material of Gravel et al 2013, Methods in Ecology and Evolution. doi: 10.1111/2041-210X.12103
# if using, please cite the original publication
# J Pomeranz 27 Nov 2018
# original script below this line
#####################################################
##############################################
##############################################
#
# R Code supplementing the paper
# Inferring food web structure form predator-prey body size relationship
# by Gravel, Poisot, Albouy, Velez, Mouillot
# Methods in Ecology and Evolution
# PUTS THE VOLUME/ISSUE/PAGES HERE
# February 2013
#
##############################################
##############################################
# 2. Useful functions
# from gravel et al. 2013
##############################################
# Get regression parameters
# Input arguments:
# Bprey = log10 biomass of the prey
# Bpred = log10 biomass of the predator
# Quartil = a vector of the inferior and the superio quartile c(0.03,0.97)
# Returns a list of regression objectis
# Requires the quantreg package
reg_fn = function(Bprey,Bpred,quartil) {
library(quantreg)
mean_reg = lm(Bprey~Bpred) # For the n parameter
qrsup = rq(Bprey~Bpred,tau = quartil[2]) # For the higher limit of the range
qrinf = rq(Bprey~Bpred,tau = quartil[1]) # For the lower limit of the range
return(list(mean_reg$coef,qrsup$coef,qrinf$coef))
}
##############################################
# Estimate the niche parameters for all species of a list
# Input arguments:
# pars = resulting parameters of the function reg_Niche
# Ball = list of body size
# Returns a matrix with four parameters for each species
get_pars_Niche = function(pars,Ball) {
mean_reg = pars[[1]]
qrsup = pars[[2]]
qrinf = pars[[3]]
# Estimate parameters for the allometric relationships
delta = mean_reg[2]
b1 = mean_reg[1]
b2 = delta
# Estimate the parameters for the niche model
n = Ball # The niche n
c = b1 + b2*Ball # The centroid c
low = qrinf[1] + qrinf[2]*Ball # The lower limit of the range
high = qrsup[1] + qrsup[2]*Ball # The higher limit of the range
return(cbind(n,c,low,high))
}
##############################################
# Transform the parameters into an interaction matrix (the metaweb)
# Input:
# n = vector of size S with the parameter n for each of the S species
# c = vector of size S with the parameter c for each of the S species
# low = vector of size S with the parameter low for each of the S species
# high = vector of size S with the parameter high for each of the S species
# Returns a SxS matrix with 0 indicating absence of a link and 1 indicating the presence of a link
# Predators on columns, preys on rows
L_fn = function(n,c,low,high) {
S = length(n)
L = matrix(0,nr=S,nc=S)
for(i in 1:S)
for(j in 1:S)
if(n[j]>low[i] && n[j]<high[i]) L[j,i] = 1
return(L)
}
##############################################
|
1c3a0ebc96b4ec3651ebc614deb67057dfd4d0c4
|
16399902180809a4648e97beb7df23cf94e38374
|
/man/get_tuik_month.Rd
|
9ef1c9ba237cbd46e52bf5db807ebcfb98661068
|
[] |
no_license
|
AudioElektronik/artuik
|
ec1d2e7aae95832dcf5dfee1d639a4fdfb025546
|
a43bfab683a556878e94765fbc9675901435de54
|
refs/heads/master
| 2020-12-03T20:24:41.293970
| 2016-10-25T12:52:13
| 2016-10-25T12:52:13
| 67,956,760
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 332
|
rd
|
get_tuik_month.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_utils.R
\name{get_tuik_month}
\alias{get_tuik_month}
\title{Getting month as numeric from TUIK data}
\usage{
get_tuik_month(month_vec)
}
\description{
TUIK gives months like this "01-January". This function is just for getting
c(1) out of that.
}
|
92bacfded14b729a158cfae78ea36729b50852be
|
1c6acc9ac8520df58e703e42b41746b499c93cea
|
/Microsat_and_RAD_DAPC_map.R
|
27758f0daee1684a4a7b4f35ba7a992be6e0fff0
|
[] |
no_license
|
DanJeffries/Jeffries-et-al-2016-crucian-phylogeography
|
26bcc366c5809c52d4285c56c015e58155ff3b0b
|
45aacea583029754a94a5a5472e3a4919394ebd8
|
refs/heads/master
| 2020-04-25T18:13:06.026423
| 2016-02-19T10:19:12
| 2016-02-19T10:19:12
| 41,794,954
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,654
|
r
|
Microsat_and_RAD_DAPC_map.R
|
library(maps)
library(mapdata)
library(mapplots)
par(mar = c(0,0,0,0)) ## set plot window margins
par(pin = c(4,4))
par(mfrow = c(1,1)) ## set plot window size
mycolsolid = c("black", "red", "darkgreen", "blue") # set colours
mycol <- transp(mycolsolid, alpha = .8)
setwd("~/Dropbox/PhD/Dan's PhD (Shared)/Data/Microsatellites/DAPC/Complete dataset outputs/")
map("worldHires", xlim=c(-10, 55), ylim=c(43,70), col="gray90", fill=TRUE)##plots the area of the map that I am interested in (just Europe, leave out x/ylim arguments for whole world view)
cords <- read.csv("C:/Users/Dan/Dropbox/PhD/Dan's PhD (Shared)/Data/Microsatellites/R stats/Adegenet IBD/Mantelcoordinates.csv", header = T) ## load my microsat coordinates file.
points(cords$lon, cords$lat, pch=19, col="red", cex=0.5) ## Plots my microsat sample locations on the map
pies <- read.csv("C:/Users/Dan/Dropbox/PhD/Dan's PhD (Shared)/Data/Microsatellites/DAPC/Complete dataset outputs/Whole EU 4 clusters/WholeEU_mean_clustermemberships.csv", header=T) ## have read this in again and specified that there are headers as I was having difficulty assigning headers to the object. This allows me to call populattions using the $ operator as below.
names(pies)
MicrosatPies<- pies[,c(1,3,6,7,9,15,22,23,25,30,33:35,37,41:46,50)]
names(MiscrosatPies)
MicrosatPies
## Plot map ##
map("worldHires", xlim=c(-10, 45), ylim=c(43,70), col="gray90", fill=TRUE)##plots the area of the map that I am interested in
## UK Pies ##
add.pie(pies$HOLT,x=0.4,y=54.5,labels="",radius=0.847,edges=200,clockwise=T, col = mycol)
add.pie(pies$CAKE,x= -1.9,y=53.8,labels="",radius=0.628833334,edges=200,clockwise=T, col = mycol)
add.pie(pies$BF,x=-2,y=52.3,labels="",radius=0.715833334,edges=200,clockwise=T, col = mycol)
add.pie(pies$RM,x=2.3,y=51.,labels="",radius=0.776166667,edges=200,clockwise=T, col = mycol)
add.pie(pies$MOAT,x=4.9,y=53.3 ,labels="",radius=0.719875,edges=200,clockwise=T, col = mycol)
## Baltic Pies ##
add.pie(pies $ SK , x = 13.152523 , y = 55.550972 , labels = "" , radius = 0.959416667 , edges = 200, clockwise = T, col = mycol)
add.pie(pies$STYV,x=14.271862,y=57.561081,labels="",radius=0.870625,edges=200,clockwise=T, col = mycol)
aadd.pie(pies$SD,x=12.5,y=63,labels="",radius=0.903333334,edges=200,clockwise=T, col = mycol)
add.pie(pies $ CALK , x = 25.758348 , y = 62.262291 , labels = "" , radius = 0.713958334 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ OU , x = 25.472832 , y = 65.012375 , labels = "" , radius = 1.046208334 , edges = 200, clockwise = T, col = mycol)
## The 3 below do not have allelic richness calculated as they bought the number down too low. Have been given a standard radius, be sure to point out in the figure
add.pie(pies $ KAP , x = 18.785334 , y = 57.849045 , labels = "" , radius = 0.7 , edges = 200, clockwise = T, col = mycol)
## STEC is not included as it was ommitted from DAPC analyses
add.pie(pies $ STEC , x = 17.804031 , y = 59.601791 , labels = "" , radius = 0.676208334 , edges = 200, clockwise = T, col = mycol)
## Polish Pies ##
add.pie(pies $ TU , x = 20.5 , y = 50.5 , labels = "" , radius = 1.477666667 , edges = 200, clockwise = T, col = mycol)
add.pie(pies$POLEN,x=25.022095,y=53,labels="",radius=1.134958334,edges=200,clockwise=T, col = mycol)
## Lower Europe Pies ##
add.pie(pies $ PRO , x = 40.46814 , y = 47.457809 , labels = "" , radius = 1.279916667 , edges = 200, clockwise = T, col = mycol)
## New pies ## NEED TO DO RADIUSES!!
add.pie(pies $ COP , x = 12.55 , y = 55.77 , labels = "" , radius = 1.05 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ OBY , x = 17.79 , y = 60.21 , labels = "" , radius = 0.76 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ PED , x = 12.34 , y = 55.73 , labels = "" , radius = 0.911 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ TROM , x = 18.95 , y = 69.65 , labels = "" , radius = 0.593 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ WEN , x = 18.95 , y = 59.66 , labels = "" , radius = 1 , edges = 200, clockwise = T, col = mycol)
add.pie(pies $ GAM , x = 12.5 , y = 56 , labels = "" , radius = 1.05 , edges = 200, clockwise = T, col = mycol)
|
721ab75d64663ede7cde71b8c75d2138c905dedc
|
9aaa265c68cf699ae42fc8618c14b459c0c3fc4a
|
/plot4.R
|
d81ef20205c0bf1647319ef666b601167119989f
|
[] |
no_license
|
gchamberlain/ExData_Plotting1
|
dfba143a0514e22cd605ed9ecddb54ad5cacd499
|
6c92eac8341b95bc1b4e6d42445e619f1ba6d58a
|
refs/heads/master
| 2021-01-19T23:33:35.963814
| 2014-09-05T19:09:50
| 2014-09-05T19:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,161
|
r
|
plot4.R
|
plot4 <- function (){
##Install if not installed and load dplyr - I use this for filtering
if("dplyr" %in% rownames(installed.packages()) == FALSE) {install.packages("dplyr")}
library(dplyr)
##Read data in
power <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
## Fix dates
power$Date <- as.Date(power$Date, "%d/%m/%Y")
##Make dplyr df
power_df <- tbl_df(power)
##Subset by date range
subset <- filter(power_df,Date >= "2007-02-01" , Date <= "2007-02-02")
##Create time as datetime
##subset_fixed <- transform(subset, Time=strptime(paste(Date, Time), format="%d/%m/%Y %H:%M:%S"))
subset$Time <- paste(subset$Date, subset$Time, sep=" ")
subset$Time <- strptime(subset$Time, "%Y-%m-%d %H:%M:%S")
##Create the plots
png("plot4.png", width = 480, height = 480)
## Create a 2x2 grid for the plots
par(mfrow=c(2,2))
## Create plot1
ylimits = range(subset$Global_active_power)
plot(subset$Time, subset$Global_active_power, type="l", xlab = "", ylab = "Global Active Power", main = "", ylim=ylimits)
## Create plot2
ylimits = range(subset$Voltage)
plot(subset$Time, subset$Voltage, type="l", ylab = "Voltage", xlab="datetime",main = "" ,ylim=ylimits)
## Create plot 3
ylimits = range(c(data$Sub_metering_1, subset$Sub_metering_2, subset$Sub_metering_3))
plot(subset$Time, subset$Sub_metering_1, type="l", ylab = "Energy sub metering", xlab = "", col="black", ylim = ylimits)
par(new = TRUE)
plot(subset$Time, subset$Sub_metering_2, type="l", col="red", axes = FALSE, xlab="", ylab="", ylim = ylimits)
par(new = TRUE)
plot(subset$Time, subset$Sub_metering_3, type="l", col="blue", axes = FALSE, xlab="", ylab="", ylim = ylimits)
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lty = c(1,1,1), col = c("black", "red", "blue"), bty="n")
## Create plot4
ylimits = range(subset$Global_reactive_power)
plot(subset$Time, subset$Global_reactive_power, type="l", ylab = "Global_reactive_power", xlab="datetime",main = "" ,ylim=ylimits)
##Close the device
dev.off()
}
|
7be81b7bf9823e55e728213c4ccd772a032c2754
|
77f7e4ef1491dce7647f1e05aad634ad0f77d584
|
/man/validate_genes.Rd
|
3d6dc2d4adaa52d83c912926f5b052ba18ed9c59
|
[
"Apache-2.0"
] |
permissive
|
nceglia/cellassign-1
|
b4989ba398ed8780a2c9504bfdc3faf4b60c6b44
|
3a080938d1480abc0cbfd6320d0cdf284a686e90
|
refs/heads/master
| 2020-04-28T05:23:27.748646
| 2019-03-11T14:33:39
| 2019-03-11T14:33:39
| 175,018,128
| 0
| 0
|
NOASSERTION
| 2019-03-11T14:31:02
| 2019-03-11T14:31:01
| null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
validate_genes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{validate_genes}
\alias{validate_genes}
\title{Extract expression matrix from expression object}
\usage{
validate_genes(Y, rho)
}
\value{
The expression matrix and marker gene matrix, with only expressed genes, for input to \code{cellassign}
}
\description{
Extract expression matrix from expression object
}
\keyword{internal}
|
56b635633fe350f803d3b21bc47ec228ae350020
|
0029e6c7a7d29a2b28ea1a143c81abe98528263f
|
/man/rcpp_fit_fun.Rd
|
e1afdf14169c662ff57dd8e5312af15e8aad7e39
|
[] |
no_license
|
xiaobeili/regsem
|
dc2869acdb2beca03f2adfca18ceaa0a3a8d7466
|
43dfc639127467943047604172679d821b78c8c5
|
refs/heads/master
| 2020-07-14T00:39:27.155602
| 2019-08-29T15:21:00
| 2019-08-29T15:21:00
| 205,191,264
| 0
| 0
| null | 2019-08-29T15:14:31
| 2019-08-29T15:14:31
| null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
rcpp_fit_fun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rcpp_fit_fun}
\alias{rcpp_fit_fun}
\title{Calculates the objective function values.}
\usage{
rcpp_fit_fun(ImpCov, SampCov, type2, lambda, gamma, pen_vec, pen_diff,
e_alpha, rlasso_pen)
}
\arguments{
\item{ImpCov}{expected covariance matrix.}
\item{SampCov}{Sample covariance matrix.}
\item{type2}{penalty type.}
\item{lambda}{penalty value.}
\item{gamma}{additional penalty for mcp and scad}
\item{pen_vec}{vector of penalized parameters.}
\item{pen_diff}{Vector of values to take deviation from.}
\item{e_alpha}{Alpha for elastic net}
\item{rlasso_pen}{Alpha for rlasso2}
}
\description{
Calculates the objective function values.
}
|
b9ee7c7e62ac5a4589b0c2da9f4799862f619a0e
|
f887889d3b7e27bd406cc53aceca9bbc59bb0789
|
/MAIN_SCRIPT.R
|
86cb3647a08cbd620fc5eaa483c87fa19b428930
|
[] |
no_license
|
Sineond/soviet_sports_magazines
|
eb6c770227a0ce58bdbdb1486cff54b559e02b9a
|
edd1dfa9d3a50efb7694b6e98cd6d746c524fcbb
|
refs/heads/master
| 2022-09-30T23:32:21.806617
| 2020-06-07T17:48:00
| 2020-06-07T17:48:00
| 270,383,362
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,040
|
r
|
MAIN_SCRIPT.R
|
library(dplyr)
library(tidytext)
library(stopwords)
library(stringr)
library(quanteda)
library(textclean)
library(qdapRegex)
library(tm)
library(tidyr)
library(readtext)
library(openxlsx)
library(stringr)
library(ggplot2)
library(hrbrthemes)
install.packages("janitor")
update.packages("tidyselect")
remove.packages("rlang")
install.packages("rlang")
library(openxlsx)
library(janitor)
setwd("C:/data")
dict <- read.xlsx("dict_analysis.xlsx")
dict$word<- gsub('[[:digit:]]+', '', dict$word)
dict <- na.omit(dict)
dict <- dict[!(is.na(dict$word) | dict$word==""), ]
dict$word <- str_to_lower(dict$word)
test_soccer <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "soccer") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_hockey <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "hockey") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_basketball <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "basketball") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_handball <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "handball") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_polo <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "polo") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_volleyball <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(sports == "volleyball") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id)))
test_soccer$year <- as.numeric(test_soccer$year)
test_hockey$year <- as.numeric(test_hockey$year)
test_volleyball$year <- as.numeric(test_volleyball$year)
test_handball$year <- as.numeric(test_handball$year)
test_polo$year <- as.numeric(test_polo$year)
test_basketball$year <- as.numeric(test_basketball$year)
test_soccer %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Soccer-related words in Physical Culture and Sports journal")
test_basketball %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Basketball-related words in Physical Culture and Sports journal")
test_handball %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Handball-related words in Physical Culture and Sports journal")
test_volleyball %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Volleyball-related words in Physical Culture and Sports journal")
test_polo %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Water-polo related words in Physical Culture and Sports journal")
test_hockey %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of Hockey related words in Physical Culture and Sports journal")
test_soccer <- a %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(type == "team") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000)
test_hockey <- a %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(type == "team") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000)
test_team <- a %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(type == "team") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000)
test_individual <- a %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% dict$word) %>%
inner_join(dict) %>%
filter(type == "individual") %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
test_team$year <- as.numeric(test_team$year)
test_individual$year <- as.numeric(test_individual$year)
test_team %>%
ggplot( aes(x=year, y=proportion)) +
geom_line( color="black", size = 1.4) +
geom_point(shape=21, color="black", fill="#69b3a2", size=7) +
theme_ipsum() +
theme(axis.text.x=element_text(size=rel(2))) +
theme(axis.text.y=element_text(size=rel(2))) +
ggtitle("Changes in mentions of individual games in Physical Culture and Sports journal")
test_team$year <- regmatches(test_team$doc_id, gregexpr("\\d{4}", test_team$doc_id))
test_individual$year <- regmatches(test_individual$doc_id, gregexpr("\\d{4}", test_individual$doc_id))
test_team <- test_team %>% mutate(olymp = ifelse(year == 1936 | year == 1960 | year == 1964 | year == 1980 | year == 1984 | year == 1988, "yes", "no"))
test_individual <- test_individual %>% mutate(olymp = ifelse(year == 1936 | year == 1960 | year == 1964 | year == 1980 | year == 1984 | year == 1988, "yes", "no"))
test_team <- test_team %>% mutate(period = ifelse(year >= 1928 & year <= 1953, "stalinism",
ifelse(year >=1954 & year <=1978, "post-stalin era", "1980s and perestroika")))
test_individual <- test_individual %>% mutate(period = ifelse(year >= 1928 & year <= 1953, "stalinism",
ifelse(year >=1954 & year <=1978, "post-stalin era", "1980s and perestroika")))
test_t <- select(test_team, olymp, mentions) %>% group_by(olymp) %>% summarise(amount = sum(mentions))
test_t <- t(test_t)
test_t <- as.data.frame(test_t)
test_t$stalinism <- test_t$V3
test_t$poststalin_era <- test_t$V2
test_t$perestroika_1980s <- test_t$V1
test_t$V1 <- NULL
test_t$V2 <- NULL
test_t$V3 <- NULL
test_i <- select(test_individual, period, mentions) %>% group_by(period) %>% summarise(amount = sum(mentions))
test_i <- t(test_i)
test_i <- as.data.frame(test_i)
test_i$stalinism <- test_i$V3
test_i$poststalin_era <- test_i$V2
test_i$perestroika_1980s <- test_i$V1
test_i$V1 <- NULL
test_i$V2 <- NULL
test_i$V3 <- NULL
test_tt <- test_t[-1, ]
test_ii <- test_i[-1, ]
ccc <- bind_rows(test_tt, test_ii)
ccc$perestroika_1980s <- as.integer(ccc$perestroika_1980s)
ccc$stalinism <- as.integer(ccc$stalinism)
ccc$poststalin_era <- as.integer(ccc$poststalin_era)
View(cc)
rownames(ccc) = c('team', 'individual')
install.packages("vcd")
ccc
library(vcd)
n <- chisq.test(dd)
n
View(dd)
b$residuals
dd <- as.table(as.matrix(ccc))
dd <- as.data.frame(dd)
dd$Freq <- as.numeric(dd$Freq)
View(dd)
b_resid = as.data.frame(b$residuals)
b_resid
b_count = as.data.frame(b$observed)
b_count
n_resid = as.data.frame(n$residuals)
n_resid
n_count = as.data.frame(n$observed)
n_count
ggplot() +
geom_raster(data = n_resid, aes(x = Var2, y = Var1, fill = Freq), hjust = 0.5, vjust = 0.5) +
scale_fill_gradient2("Pearson residuals", low = "#2166ac", mid = "#f7f7f7", high = "#b2182b", midpoint = 0) +
geom_text(data = n_count, aes(x = Var2, y = Var1, label = Freq)) +
xlab("Period") +
ylab("Game_type") +
theme_bw()
print(chisq.test(dd))
print(dd)
summary.aov(res.aov)
res.aov <- aov(proportion ~ period, data = test_team)
summary(res.aov)
tukey <- TukeyHSD(res.aov)
tukey
plot(tukey, asp = 1)
?plot
assoc(dd, shade=TRUE, legends = TRUE, gp_axis = gpar(lty = 1))
print(ccc)
sportdict_olymp <- readLines("C:/data/spotdict_olympic.txt", encoding = "UTF-8")
sportdict_olymp <- gsub('[[:digit:]]+', '', sportdict_olymp)
sportdict_olymp <- as.character(sportdict_olymp)
sportdict_olymp <- lapply(sportdict_olymp, tolower)
test_olymp_fizra <- a %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% sportdict_olymp) %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
mutate(period = ifelse(year >= 1928 & year <= 1953, "stalinism",
ifelse(year >=1954 & year <=1978, "post-stalin era", "1980s and perestroika")))
test_olymp_soccer <- b %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% sportdict_olymp) %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
mutate(period = ifelse(year >= 1955 & year <= 1970, "Thaw and stagnation",
ifelse(year >=1971 & year <=1984, "Late USSR", "Perestroika")))
test_olymp_LA <- d %>%
unnest_tokens(word, txt.lem) %>%
filter(! word %in% tm::stopwords("ru")) %>%
group_by(doc_id) %>%
mutate(num_of_words = n()) %>%
filter(word %in% sportdict_olymp) %>%
mutate (mentions = n()) %>%
group_by(doc_id) %>%
summarise(mentions = unique(mentions), num_of_words = unique(num_of_words)) %>%
mutate(proportion = (mentions/num_of_words)*1000) %>%
mutate(year = regmatches(doc_id, gregexpr("\\d{4}", doc_id))) %>%
mutate(period = ifelse(year >= 1955 & year <= 1970, "Thaw and stagnation",
ifelse(year >=1971 & year <=1984, "Late USSR", "Perestroika")))
res.aov <- aov(proportion ~ period, data = test_olymp_fizra)
summary(res.aov)
tukey <- TukeyHSD(res.aov)
tukey
plot(tukey, asp = 2)
t_test_soc <- t.test(proportion ~ period, data = test_olymp_soccer )
t_test_LA <- t.test(proportion ~ period, data = test_olymp_LA)
summary(t_test_soc)
res.aov_soc <- aov(proportion ~ period, data = test_olymp_soccer)
summary(res.aov_soc)
tukey <- TukeyHSD(res.aov_soc)
tukey
plot(tukey, asp = 2)
res.aov_LA <- aov(proportion ~ period, data = test_olymp_LA)
summary(res.aov_LA)
tukey <- TukeyHSD(res.aov_LA)
tukey
plot(tukey, asp = 1.5)
?aov
hist(test_olymp_fizra$proportion)
t_test_olymp_ind <- t.test(proportion ~ olymp, data = test_individual )
t_test_olymp_team<- t.test(proportion ~ olymp, data = test_team)
summary(t_test_olymp_ind)
|
5247b9d716de820629b7178bdbaa43a485ee47f8
|
3646e6edc7b38488a562a99d34c4f764c7163b23
|
/WTCTools/inst/tests/test.getStat.R
|
e26c309d0960635a42f2725ae70bc950d344efaa
|
[] |
no_license
|
CSJCampbell/WTCTools
|
2d98f0659ffc79edaa10a0a949799e3a577a0d64
|
e97b1af05ad2003a5a7811f7f7acc72dfddb7773
|
refs/heads/master
| 2021-06-02T17:17:12.413625
| 2017-09-21T01:18:12
| 2017-09-21T01:18:12
| 41,179,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,420
|
r
|
test.getStat.R
|
context("check get statistic")
test_that("getStat", {
pl <- matrix(data = c(0, 30, -30, 0), nrow = 2,
dimnames = list(c("a", "b"), c("a", "b")))
dat <- data.frame(round = rep(1:6, each = 2),
player1 = rep(c("A", "B"), times = 2),
player2 = rep(c("B", "A"), times = 2),
result = c(0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0),
list1 = c("a", "b", "b", "a"),
list2 = c("b", "a", "a", "b"),
scorefrac = c(0.1, 0.8, 0.7, 0.8, 0.3, 0.9, 0.1, 0.2, 0, 0.9, 0.8, 0.1),
stringsAsFactors = FALSE)
out <- getStat(data = dat, pairlookup = pl, result = "result")
expect_equal(object = out, expected = 1.3692667430363)
})
test_that("getNewStat", {
pl <- matrix(data = c(0, 30, -30, 0), nrow = 2,
dimnames = list(c("a", "b"), c("a", "b")))
dat <- data.frame(round = rep(1:6, each = 2),
player1 = rep(c("A", "B"), times = 2),
player2 = rep(c("B", "A"), times = 2),
result = c(0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0),
list1 = c("a", "b", "b", "a"),
list2 = c("b", "a", "a", "b"),
scorefrac = c(0.1, 0.8, 0.7, 0.8, 0.3, 0.9, 0.1, 0.2, 0, 0.9, 0.8, 0.1),
stringsAsFactors = FALSE)
out <- getNewStat(val = 60, data = dat, pairlookup = pl,
pair = c("a", "b"))
expect_equal(object = out, expected = 2.14598134587704)
})
|
a35a31ba7b647b1e7d0fe3a84b766bbe48027466
|
ee386dda6eabd44b32a5aa27a7133e99fe9ddbdf
|
/R/growthUI.R
|
57cfa4d19b03fe59e1ec9b67c40796a3511925db
|
[
"MIT"
] |
permissive
|
wStockhausen/shinyTC.CohortProgression
|
e04bfef4b84be8e25bc7e3cafbc802145f0117a3
|
66dfc28485591be428bcdf53425a9371a3f1628c
|
refs/heads/master
| 2021-07-22T11:46:48.164159
| 2021-07-17T15:59:41
| 2021-07-17T15:59:41
| 178,032,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,433
|
r
|
growthUI.R
|
#'
#' @title UI for the Shiny Tanner crab growth module
#'
#' @description Function to create the UI for the Shiny Tanner crab growth module.
#'
#' @param id - a character string that uniquely identifies the growth module in the enclosing Shiny UI.
#'
#' @return A Shiny tabPanel allowing the user to change growth parameters and plot mean growth and size transition probabilities.
#'
#' @details Allows the user to change growth parameters and plot mean growth and size transition probabilities for the Tanner crab model.
#'
#' @import shiny
#'
growthUI<-function(id){
require(shiny);
ns<-NS(id); #namespace function
tabPanel(
"Growth",
sidebarLayout(
sidebarPanel(
wellPanel(
fluidRow(
actionButton(ns("refresh1"),"Refresh"),
actionButton(ns("reset1"),"Reset")
) #fluidRow
), #wellPanel
div(
id=ns("inputs"),
useShinyjs(),
tabsetPanel(
tabPanel(
"parameters",
wellPanel(
fluidRow(numericInput(ns("zA"),h5("zA: reference pre-molt size (mm CW)"),min=0,value=25)),
fluidRow(numericInput(ns("pA"),h5("pA: mean post-molt size (mm CW) at zA"),min=0,value=33.0888265902)),
fluidRow(numericInput(ns("zB"),h5("zB: reference pre-molt size (mm CW)"),min=0,value=125)),
fluidRow(numericInput(ns("pB"),h5("pB: mean post-molt size (mm CW) at zB"),min=0,value=166.95985413)),
fluidRow(numericInput(ns("pBeta"),h5("pBeta: scale factor"),min=0,value=0.811647719391)),
sliderInput(ns("maxZBEx"),"max bin range for growth",value=10,min=1,max=50,step=1)
) #wellPanel
), #parameters tabPanel
tabPanel(
"plot controls",
wellPanel(
fluidRow(
column(
12,
h4("pre-molt sizes (mm CW)"),
fluidRow(
column(6,numericInput(ns("minX"),"min",value= 25,min=0)),
column(6,numericInput(ns("maxX"),"max",value=185,min=0))
), #fluidRow
fluidRow(
sliderInput(ns("skip"),"number of pre-molt size bins to skip",value=0,min=0,max=10,step=1)
)
) #column
),
fluidRow(
column(
12,
h4("post-molt sizes (mm CW)"),
fluidRow(
column(6,numericInput(ns("minY"),"min",value= 25,min=0)),
column(6,numericInput(ns("maxY"),"max",value=185,min=0))
) #fluidRow
) #column
), #fluidRow
sliderInput(ns("scale"),"probability scale",value=10,min=1,max=50,step=1)
) #wellPanel
) #controls tabPanel
) #tabsetPanel
) #div
), #sidebarPanel
mainPanel(
fluidRow(
column(
12,
h3("Size transition probabilities"),
plotOutput(ns("pltPrG"))
) #column
) #fluidRow
# fluidRow(
# column(
# 12,
# h3("Mean growth"),
# plotOutput(ns("pltMnG"))
# ) #column
# ) #fluidRow
)#mainPanel
) #sidebarLayout
) #tabPanel
}
|
c05a27d4f94720df70b09c57a61d9572176cf572
|
39119a34464e2235b2e94a5e5513d803db480c15
|
/lmDetMCD.R
|
24db941258e995e03fda3086d4f28db01cb4d9bb
|
[] |
no_license
|
kopyakova/tas
|
bf90a5c5e8c9b66a5bcc4092f914bc2bf9b83bda
|
7d329938d60af9deac33a8bd57e1dde70b86f333
|
refs/heads/master
| 2020-12-21T17:21:23.604217
| 2020-02-09T19:59:29
| 2020-02-09T19:59:29
| 236,502,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,372
|
r
|
lmDetMCD.R
|
## Function for regression based on the deterministic MCD
# Input:
# x ........ matrix of explanatory variables
# y ........ response variable
# alpha .... proportion of observations to be used for the subset size in the
# MCD estimator
# anything else you need
# Output
# A list with the following components:
# coefficients .... estimated regression coefficients based on the reweighted
# deterministic MCD covariance matrix
# fitted.values ... fitted values for all observations in the data
# residuals ....... residuals for all observations in the data
# MCD ............. R object for the deterministic MCD (entire output from
# function covDetMCD())
# any other output you want to return
lmDetMCD <- function(x, y, alpha = 0.5, ...) {
n = length(x)
data <- cbind(x, y)
#get MCD estimates
MCD = covMcd(data, alpha, nsamp = "deterministic")
mu_x = MCD$center[1]
mu_y = MCD$center[2]
sigma_yy = MCD$cov[2,2]
sigma_xx = MCD$cov[1,1]
sigma_xy = MCD$cov[1,2]
#calculate coefficients
beta = solve(sigma_xx) %*% sigma_xy
intercept = mu_y - mu_x*beta
coefficients = c(intercept, beta)
#calculate predicted / fitted values and residuals
fitted.values = cbind(rep(1, n),x) %*% coefficients
residuals = y - fitted.values
return(list(coefficients, fitted.values, residuals, MCD))
}
|
af923fbedb9eaabab5a074d885ec1560497a6708
|
cc30f2417f0bb348a7603406ef993995e9fa8a7d
|
/tests/testthat/test for parent_sel.R
|
4069486071d7b854c555bdd812bc854dea22355e
|
[] |
no_license
|
lalalaeat/Genetic-Algorithm-of-Multivariate-Regression
|
9d674139a8c6340cda0f8ad349d3c1853ca74974
|
7b63f2726fea42bca37a6cb24d19d1701f43228c
|
refs/heads/master
| 2021-01-06T19:57:16.635232
| 2020-02-18T21:27:14
| 2020-02-18T21:27:14
| 241,469,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 154
|
r
|
test for parent_sel.R
|
context("Test for parent_sel")
test_that("Scores is an appropriate vector", {
expect_true(is.numeric(scores))
expect_false(is.infinite(scores))
})
|
afb2289476cda37642092a92fff0a5742d0fa8ef
|
477d8d072d7f7e8a58b5b818f300f537243f366d
|
/src/BAMreport.R
|
2510c8ca8e9d06c6a210512a98d934ad279e868e
|
[
"MIT"
] |
permissive
|
shulp2211/BAMreport
|
ddec0b1bbec2d5537aa82be0e304983a0ff5a8be
|
7f492494cc6823e663c33d572815b0f0567dca2e
|
refs/heads/master
| 2020-09-25T12:33:00.129868
| 2017-10-10T18:45:59
| 2017-10-10T18:45:59
| 226,006,020
| 3
| 1
|
MIT
| 2019-12-05T03:14:05
| 2019-12-05T03:14:04
| null |
UTF-8
|
R
| false
| false
| 16,665
|
r
|
BAMreport.R
|
#!/usr/bin/env Rscript
library(MASS)
library(grid)
library(gtable)
library(gridExtra)
## Default when nothing is passed
args = commandArgs(trailingOnly = TRUE)
if(length(args) == 0){
args = c("--help")
}
## Help section
if("--help" %in% args) {
cat("
Prepare a report for the input BAM file.
Arguments:
--stats=someValue - char, name of file with the stats
--output=someValue - char, name of the output PDF file
--rlens=someValue - char, name of the file with the read length distr.
--rnucs=someValue - char, name of the file with nucleotide distr.
--rqual=someValue - char, name of the file with quality distr.
--insrt=someValue - char, name of the file with insert-length distr.
--rcovs=someValue - char, name of the file with coverage distr.
--gccov=someValue - char, name of the file with the GC coverage info.
--fclip=someValue - char, name of the file with 5' clipping info.
--mm=someValue - char, name of the file with mismatch info.
--indel=someValue - char, name of the file with indel info.
--verifybamid=someValue - char, name of the file with verifyBamId info.
--help - print this text
Example:
./BAMreport.R --stats=stats.txt --output=report.pdf \n\n")
q(save="no")
}
## Parse arguments (we expect the form --arg=value)
parseArgs = function(x) strsplit(sub("^--", "", x), "=")
argsDF = as.data.frame(do.call("rbind", parseArgs(args)))
argsL = as.list(as.character(argsDF$V2))
names(argsL) = argsDF$V1
# set defaults
if(is.null(argsL$stats)) {
argsL$stats = NULL
}
if(is.null(argsL$output)) {
argsL$output = "report.pdf"
}
if(is.null(argsL$rlens)) {
argsL$rlens = NULL
}
if(is.null(argsL$rnucs)) {
argsL$rnucss = NULL
}
if(is.null(argsL$rqual)) {
argsL$rqual = NULL
}
if(is.null(argsL$insrt)) {
argsL$insrt = NULL
}
if(is.null(argsL$rcovs)) {
argsL$rcovs = NULL
}
if(is.null(argsL$gccov)) {
argsL$gccov = NULL
}
if(is.null(argsL$fclip)) {
argsL$fclip = NULL
}
if(is.null(argsL$mm)) {
argsL$mm = NULL
}
if(is.null(argsL$indel)) {
argsL$indel = NULL
}
if(is.null(argsL$verifybamid)) {
argsL$verifybamid = NULL
}
# the output file
pdf(argsL$output, width = 14, height = 10)
# create the stats page
# ---------------------
if (!is.null(argsL$stats)) {
stats = argsL$stats
fileConn = file(stats)
data = readLines(fileConn)
close(fileConn)
plot.new()
indx = 0
for (l in data) {
mtext(l, side = 3, line = indx, adj = 0)
indx = indx - 1
}
}
# copy the output from verifyBamId
# --------------------------------
if (!is.null(argsL$verifybamid)) {
verifybamid = argsL$verifybamid
df = read.table(verifybamid, comment.char = "!", header = F)
names(df) = c("property", "value")
table = tableGrob(df)
title = textGrob("selfSM - verifyBamId",gp=gpar(fontsize=15))
footnote = textGrob("", x=0, hjust=0, gp=gpar( fontface="italic"))
padding = unit(0.5,"line")
table = gtable_add_rows(table,
heights = grobHeight(title) + padding,
pos = 0)
table = gtable_add_rows(table,
heights = grobHeight(footnote)+ padding)
table = gtable_add_grob(table, list(title, footnote),
t=c(1, nrow(table)), l=c(1,2),
r=ncol(table))
grid.newpage()
grid.draw(table)
}
# create the read length distribution
# -----------------------------------
if (!is.null(argsL$rlens)) {
data = read.table(argsL$rlens)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V3) == 0) {
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
} else {
par(mfrow = c(1,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1"),]
total = sum(as.numeric(plotdata$V3))
mp = barplot(plotdata$V3/total,
xlab = "Length of read",
ylab = "Fraction of read1's",
axes = F,
main = "Read 1")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
if (max(tmp$V3) > 0) {
# for the second read
plotdata = data[which(data$V1 == "2"),]
total = sum(as.numeric(plotdata$V3))
mp = barplot(plotdata$V3/total,
xlab = "Length of read",
ylab = "Fraction of read2's",
axes = F,
main = "Read 2")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
}
mtext("Read length distribution", outer = TRUE, cex = 1.5)
}
# create the nucleotide distribution
# -----------------------------------
if (!is.null(argsL$rnucs)) {
colors = c("red", "blue", "yellow", "green", "purple")
data = read.table(argsL$rnucs)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V8) == 0) {
par(mfcol = c(5,1), oma = c(0, 0, 3, 0))
} else {
par(mfcol = c(5,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1" & data$V8 != 0),]
rA = plotdata$V3/plotdata$V8
rC = plotdata$V4/plotdata$V8
rG = plotdata$V5/plotdata$V8
rT = plotdata$V6/plotdata$V8
rN = plotdata$V7/plotdata$V8
yl = max(c(rA,rC,rG,rT,rN))
barplot(rA, ylim = c(0,yl),main = "A",ylab = "",xlab = "",col = colors[1])
barplot(rC, ylim = c(0,yl),main = "C",ylab = "",xlab = "",col = colors[2])
barplot(rG, ylim = c(0,yl),main = "G",ylab = "",xlab = "",col = colors[3])
barplot(rT, ylim = c(0,yl),main = "T",ylab = "",xlab = "",col = colors[4])
barplot(rN, ylim = c(0,yl),main = "N",ylab = "",xlab = "",col = colors[5])
if (max(tmp$V8) != 0) {
# for the second read
plotdata = data[which(data$V1 == "2" & data$V8 != 0),]
rA = plotdata$V3/plotdata$V8
rC = plotdata$V4/plotdata$V8
rG = plotdata$V5/plotdata$V8
rT = plotdata$V6/plotdata$V8
rN = plotdata$V7/plotdata$V8
yl = max(c(rA,rC,rG,rT,rN))
barplot(rA,ylim = c(0,yl),main= "A",ylab = "",xlab = "",col = colors[1])
barplot(rC,ylim = c(0,yl),main= "C",ylab = "",xlab = "",col = colors[2])
barplot(rG,ylim = c(0,yl),main= "G",ylab = "",xlab = "",col = colors[3])
barplot(rT,ylim = c(0,yl),main= "T",ylab = "",xlab = "",col = colors[4])
barplot(rN,ylim = c(0,yl),main= "N",ylab = "",xlab = "",col = colors[5])
}
mtext("Nucleotide composition variation (Read1, Read2)", outer=TRUE, cex = 1.5)
}
# create the quality distribution
# -------------------------------
if (!is.null(argsL$rqual)) {
data = read.table(argsL$rqual)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V7) == 0) {
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
} else {
par(mfrow = c(1,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1"),]
plotdata = plotdata[,3:7]
conf = matrix(0, nrow = 2, ncol = nrow(plotdata))
conf[1,] = 0
conf[2,] = 60
z = list(stats = t(as.matrix(plotdata)),
n = rep(100, nrow(plotdata)),
conf = conf,
out = vector(),
group = vector(),
names = seq(1, nrow(plotdata)))
bxp(z,
outline = F,
xlab = "Position on the read",
ylab = "Quality value",
axes = F,
boxfill = rgb(255,0,0,150,maxColorValue=255),
whisklty = 3,
ylim = c(0, 60),
main = "Read 1"
)
axis(1, at = seq(1, nrow(plotdata), 5), labels = seq(1, nrow(plotdata), 5))
axis(2, at = seq(0, 60, 10), labels = seq(0, 60, 10))
if (max(tmp$V7) != 0) {
# for the second read
plotdata = data[which(data$V1 == "2"),]
plotdata = plotdata[,3:7]
conf = matrix(0, nrow = 2, ncol = nrow(plotdata))
conf[1,] = 0
conf[2,] = 60
z = list(stats = t(as.matrix(plotdata)),
n = rep(100, nrow(plotdata)),
conf = conf,
out = vector(),
group = vector(),
names = seq(1, nrow(plotdata)))
bxp(z,
outline = F,
xlab = "Position on the read",
ylab = "Quality value",
axes = F,
boxfill = rgb(255,0,0,150,maxColorValue=255),
whisklty = 3,
ylim = c(0, 60),
main = "Read 2"
)
axis(1,at=seq(1, nrow(plotdata), 5), labels = seq(1, nrow(plotdata), 5))
axis(2, at = seq(0, 60, 10), labels = seq(0, 60, 10))
}
mtext("Quality value variation (Read1,Read2)", outer = TRUE, cex = 1.5)
}
# plot the insert length distribution
# -----------------------------------
if (!is.null(argsL$insrt)) {
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
colors = c(rgb(255,0,0,150,maxColorValue=255),
rgb(0,0,255,100,maxColorValue=255))
data = read.table(argsL$insrt)
total = sum(as.numeric(data$V2))
data$V2 = data$V2 / total
xmax = quantile(rep(data$V1,data$V2*1000),.99)
# find the best bit normal distribution
smpl = rep(data$V1,data$V2*1000)
fit = fitdistr(smpl, "normal")
x = seq(0,xmax,length=10*xmax)*fit$estimate[2]
hx = dnorm(x, fit$estimate[1], fit$estimate[2])
ymax = max(max(hx),max(data$V2))
plot(data,
type = "l",
lwd = 3,
col = colors[1],
main = "Insert length distribution",
xlab = "Insert length",
ylab = "Fraction of properly-paired pairs",
xlim = c(0, xmax),
ylim = c(0, ymax))
lines(x, hx, col = colors[2], lwd = 3)
legend("topright",
legend = c("observed",paste("best normal fit (",round(fit$estimate[1],2),",",round(fit$estimate[2],2), ")")),
fill = colors,
cex = 1.5)
}
# plot the coverage distribution
# ------------------------------
if (!is.null(argsL$rcovs)) {
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
colors = c(rgb(255,0,0,150,maxColorValue=255),
rgb(0,0,255,100,maxColorValue=255))
data = read.table(argsL$rcovs)
datax = data[-1,]
xlimit = quantile(rep(datax$V1, datax$V2), 0.98)
bases = sum(as.numeric(data$V2))
data$V2 = data$V2 / bases
data = data[-1,]
repfrac = sum(as.numeric(data$V2))
mfac = 100 / data$V2[1]
smpl = rep(data$V1,data$V2 * mfac)
fit = fitdistr(smpl, "normal")
x = seq(0,xlimit,length=100*xlimit)*fit$estimate[2]
hx = dnorm(x, fit$estimate[1], fit$estimate[2]) * repfrac
ymax = max(max(hx),max(data$V2))
plot(data,
type = "l",
lwd = 3,
col = colors[1],
main = "Depth of coverage",
xlab = "Coverage",
ylab = "Fraction of genome covered",
xlim = c(0, xlimit),
ylim = c(0, ymax))
lines(x, hx, col = colors[2], lwd = 3)
legend("topright",
legend = c("observed",paste("best normal fit (",round(fit$estimate[1],2),",",round(fit$estimate[2],2), ")")),
fill = colors,
cex = 1.5)
}
# plot the normalized coverage distribution (more informative with low cov)
# -----------------------------------------
if (!is.null(argsL$gccov)) {
data = read.table(argsL$gccov)
coverage = data$V5
avgcoverage = mean(coverage)
relativecov = round(coverage / avgcoverage, 3)
h = hist(relativecov, xlim = c(0,2), breaks = 1000)
rel = subset(relativecov,relativecov <= 2)
xfit = seq(0,2,length=1000)
yfit = dnorm(xfit, mean = mean(rel), sd = sd(rel))
yfit = yfit * max(h$counts) / max(yfit)
lines(xfit, yfit, col="blue", lwd=2)
}
# plot the GC coverage distribution
# -------------------------------
if (!is.null(argsL$gccov)) {
data = read.table(argsL$gccov)
ylimit = quantile(subset(data$V5, data$V5 != 0.00), 0.98)
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
x = data$V4
y = data$V5
plot(x, y,
main = "GC content vs Coverage from aligned reads",
xlab = "GC content",
ylab = "Coverage",
col=rgb(0,100,0,50,maxColorValue=255),
pch=16,
ylim = c(0, ylimit))
}
# plot the 5' clipping positions
# ---------------------------------
if (!is.null(argsL$fclip)) {
data = read.table(argsL$fclip)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V4) == 0) {
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
} else {
par(mfrow = c(1,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1"),]
mp = barplot(plotdata$V3/plotdata$V4,
xlab = "5' Clip position",
ylab = "Fraction of read1's",
axes = F,
main = "Read 1")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
if (max(tmp$V4) != 0) {
# for the second read
plotdata = data[which(data$V1 == "2"),]
mp = barplot(plotdata$V3/plotdata$V4,
xlab = "5' Clip position",
ylab = "Fraction of read2's",
axes = F,
main = "Read 2")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
}
mtext("5' Clipping locations on aligned reads", outer = TRUE, cex = 1.5)
}
# plot the position of mismatches
# -------------------------------
if (!is.null(argsL$mm)) {
data = read.table(argsL$mm)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V4) == 0) {
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
} else {
par(mfrow = c(1,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1"),]
mp = barplot(plotdata$V3 / plotdata$V4,
xlab = "Position on the read",
ylab = "Fraction of read1s with mismatches",
axes = F,
main = "Read 1")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
if (max(tmp$V4) != 0) {
# for the second read
plotdata = data[which(data$V1 == "2"),]
mp = barplot(plotdata$V3 / plotdata$V4,
xlab = "Position on the read",
ylab = "Fraction of read2s with mismatches",
axes = F,
main = "Read 2")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
}
mtext("Mismatch positions (vs reference) on aligned reads", outer = TRUE, cex = 1.5)
}
# plot the position of indels
# ---------------------------------
if (!is.null(argsL$indel)) {
data = read.table(argsL$indel)
tmp = data[which(data$V1 == "2"),]
par(cex.axis = 1.5, cex.lab = 1.5, cex.main = 1.5)
if (max(tmp$V4) == 0) {
par(mfrow = c(1,1), oma = c(0, 0, 3, 0))
} else {
par(mfrow = c(1,2), oma = c(0, 0, 3, 0))
}
# for the first read
plotdata = data[which(data$V1 == "1"),]
mp = barplot(plotdata$V3 / plotdata$V4,
xlab = "Position on the read",
ylab = "Fraction of read1s with indels",
axes = F,
main = "Read 1")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
if (max(tmp$V4) != 0) {
# for the second read
plotdata = data[which(data$V1 == "2"),]
mp = barplot(plotdata$V3 / plotdata$V4,
xlab = "Position on the read",
ylab = "Fraction of read2s with indels",
axes = F,
main = "Read 2")
index = seq(0, max(plotdata$V2), 10)
index[1] = 1
axis(1, at = mp[index], labels = index)
axis(2)
}
mtext("Indel positions (vs reference) on the read", outer = TRUE, cex = 1.5)
}
dev.off()
|
364a0f1f59fa0c2d4b6b6cbaae819f51fbbeeed0
|
6a7966e6d4ba1ed513c9f806572dc95cdaeb4c57
|
/Plot6.R
|
babec61dc0a9db43a9aadf987683f9c51ffc9943
|
[] |
no_license
|
norlindah/Module-4-Course-Project-2
|
8d69622925d3284ca08d8e52a8e2f0a4c1518e06
|
e8749d047f905cd2abd5f2f15d6f3cdae183c97f
|
refs/heads/master
| 2021-01-19T02:24:10.586627
| 2017-04-05T08:10:53
| 2017-04-05T08:10:53
| 87,273,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,489
|
r
|
Plot6.R
|
# Q6: Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
## Read file
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
library(dplyr)
library(ggplot2)
#Emissions from motor vehicle sources in Baltimore(flips == 24510); Los Angeles County(flips == 06037)) and type = on-road vehicle
NEIBaltimore <- summarise(group_by(filter(NEI, fips == "24510" & type =="ON-ROAD"),year), Emissions = sum(Emissions))
NEILosAngelas <- summarise(group_by(filter(NEI, fips == "06037" & type =="ON-ROAD"),year), Emissions = sum(Emissions))
#combine both cities
NEIBaltimore$City <- "Baltimore City"
NEILosAngelas$City <- "Los Angeles County"
both_CityVehicle <- rbind(NEIBaltimore, NEILosAngelas)
png("plot6.png", width=840, height=480)
g <- ggplot(both_CityVehicle,aes(x=factor(year),y=Emissions, fill=year, label = round(Emissions,2)))+
geom_bar(aes(fill=year), stat="identity") +
facet_grid(scales="free", space="free", .~City) +
guides(fill=FALSE) + theme_bw() +
labs(x="year", y=expression("Total PM"[2.5]*" Emission in Tons")) +
labs(title=expression("Motor Vehicle Emissions in Baltimore Vs. Los Angeles City"))+
geom_label(aes(fill = year), colour = "white", fontface = "bold")
print(g)
dev.off()
|
0c61010c8a25232f699ba993435703c52947025d
|
7069cfc271fac646ea6dcaf78d0bd494d1cb7095
|
/unused/DAscreenplotting.r
|
37cdc54505e3238566b6d21db3fce4de65adc231
|
[] |
no_license
|
chiser/T-maze-drosophila
|
8591e890d863a5f41c7feabf31bd585d07584cbc
|
f4129d5eb6220256e4c40d42035d787a5bdbba41
|
refs/heads/master
| 2021-12-25T03:11:28.559974
| 2021-09-25T19:42:29
| 2021-09-25T19:42:29
| 41,262,113
| 0
| 1
| null | null | null | null |
ISO-8859-10
|
R
| false
| false
| 6,425
|
r
|
DAscreenplotting.r
|
##### Import the csv file into R
Tmaze <- read.csv(file.choose(), header = TRUE, sep = ";", quote = "\"",dec = "," )
nExp <- length (Tmaze[[1]])
nGroups <- length(levels(Tmaze[[1]]))
# I get errors every time with apply functions...I forget it for the momment
#Tmaze$PI <- apply(Tmaze,1,function(Tmaze=Tmaze) (Tmaze[2]-Tmaze[4]))
#Tmaze$PI <- (Tmaze$Red-Tmaze$Dark)/(Tmaze$Red+Tmaze$Dark)
###### A less efficient way of calculating PIs
Tmaze$PI <- vector("numeric", length = nExp)
for(i in 1:nExp){
Tmaze$PI[i] <- (Tmaze[[i,3]]-Tmaze[[i,5]])/(Tmaze[[i,3]]+Tmaze[[i,5]])
}
###### This is in order to make groups according to their names in the case of fly food. So that they can be assigned a different colour in the plot for instance. pmatch should do the same
#### A factor level to sort the ones with ATR and without in the experimental group and the genetic controls
Tmaze$Treatment2 <- ifelse(grepl("ATR", Tmaze[[1]], ignore.case = T), "Experimental ATR", ifelse (grepl("Co", Tmaze[[1]], ignore.case = T), "Experimental Co", "Genetic Control"))
#############Another way of doing it
#dataATR <- grep("ATR",data[[1]])
#dataCo <- grep ("Co", data[[1]])
#dataGenetic <- grep ("AUS", data[[1]])
idGroup <- data.frame ("Group"=levels(Tmaze[[1]]),"Treatment"= ifelse(grepl("TH>", levels(Tmaze[[1]]), ignore.case = T), "Positive Control",ifelse(grepl("Tdc2", levels(Tmaze[[1]]), ignore.case = T), "Positive Control", ifelse(grepl("ATR", levels(Tmaze[[1]]), ignore.case = T), "Experimental ATR", ifelse (grepl("Co", levels(Tmaze[[1]]), ignore.case = T), "Experimental Co", "Genetic Control")))),
"Colour"=ifelse(grepl("ATR", levels(Tmaze[[1]]), ignore.case = T), "darkgoldenrod", ifelse (grepl("Co", levels(Tmaze[[1]]), ignore.case = T), "darkgoldenrod1", "darkgreen")))
### Another way of making the treatments... specifying the treatment name by the last letters
#idGroup$Treatment2 <- sub('.*(?=.{4}$)', '', idGroup$Group, perl=T)
#### To create two columns just for differentiating treatment and geneticline
idGroup$LINE <- gsub("(ATR)", "", idGroup$Group, fixed = TRUE)
idGroup$LINE <- gsub("(Co)", "", idGroup$LINE, fixed = TRUE)
### make medians and means for the groups in the idGroup table. With lapply I creat lists, so I have to be careful, with the for loops below I create numeric vectors
idGroup$mean <- NULL
mean <- NULL
idGroup$mean <- sapply(seq_len(nrow(idGroup)), function(i) {
mean(Tmaze$PI[idGroup$Group[i]==Tmaze$Fly.line])
})
idGroup$median <- NULL
median <- NULL
idGroup$median <- sapply(seq_len(nrow(idGroup)), function(i) {
median(Tmaze$PI[idGroup$Group[i]==Tmaze$Fly.line])
})
#### less effficient way of calculating median and mean
#idGroup$median <- NULL
#median <- NULL
#for(i in 1:length(idGroup$Group)){
# idGroup$median[i] <- median(Tmaze$PI[idGroup$Group[i]==Tmaze$Fly.line])
#}
#idGroup$mean <- NULL
#mean <- NULL
#for(i in 1:length(idGroup$Group)){
# idGroup$mean[i] <- mean(Tmaze$PI[idGroup$Group[i]==Tmaze$Fly.line])
#}
##### To order the groups for plotting. This will only work nicely if I put a "1" in front of my control line so that it put it the first
#idGroup <- idGroup[with(idGroup, order(Group)), ]
#Tmaze <- Tmaze[with(Tmaze, order(Fly.line)), ]
###### Ordering data by putting first the Genetic controls and the the lines with their ATR controls in a descending order by mean
library(dplyr)
idGroup$Treatment <- ordered(idGroup$Treatment, levels = c("Genetic Control", "Positive Control", "Experimental ATR", "Experimental Co"))
idGroup <- idGroup[order(idGroup$Treatment), ]
idGroup$rank <- ifelse (idGroup$Treatment =="Positive Control", 1, ifelse (idGroup$Treatment =="Genetic Control", 0, 2))
idGroup <- idGroup %>% group_by(LINE) %>% mutate(temp=mean(mean)) %>%
ungroup %>% arrange(rank, -temp) %>% select(-rank, -temp)
###### The merge statement in base R can perform the equivalent of inner and left joins, as well as right and full outer joins, which are unavailable in sqldf.
######
#library(sqldf)
#
###### Firstly, you can get the mean of column Mean for each group with this statement (similar to aggregate in R)
#sqldf("
# SELECT
# `Group` AS `Group`,
# AVG(`Mean`) AS `GroupMean`
# FROM idGroup
# GROUP BY `Group`;")
######Then it is a case of using the JOIN statement (like merge in R) to join this table to the original one, putting 'Gen' at the top and then sorting by GroupMean. I call these these tables t1 and t2, join them together, and then select from them the columns I want, and sorting the table.
#sqldf("
#SELECT
# t1.`Group` AS `Group`,
# t1.`Treatment` AS `Treatment`,
# t1.`Mean` AS `Mean`,
# t2.`GroupMean` AS `GroupMean`
#FROM
# (SELECT * FROM idGroup) t1
# JOIN
# (SELECT
# `Group` AS `Group`,
# AVG(`Mean`) AS `GroupMean`
# FROM idGroup
# GROUP BY `Group`) t2
# ON t1.`Group` = t2.`Group`
#ORDER BY CASE `Treatment` WHEN 'Genenetic Control' THEN 1 ELSE 2 END,
# `GroupMean` DESC,
# `Mean` DESC;
#")
###### Order the Tmaze data in the way the idGroup table is ordered. It looks fine in the Global environment and in the plots. However opening the table the order isnīt there
levels <- as.character(idGroup$Group)
Tmaze$Fly.line <- factor(Tmaze$Fly.line, levels = levels)
#boxplot(data$PI ~ data$Fly.line, ylab = "PI", las=2, at =c(1,2, 4,5, 7,8, 10, 12,13, 15,16, 18,19, 21,22, 24),par(mar = c(12, 5, 4, 2) + 0.1),
#col = c(c(rep(c("darkgoldenrod","darkgoldenrod1"),3),"darkgoldenrod",rep(c("darkgoldenrod","darkgoldenrod1"),4),"darkgoldenrod")))
boxplot(Tmaze$PI ~ Tmaze$Fly.line, ylab = "PI", las =2 , ylim = c(-1,1),col= as.character(idGroup$Colour), cex.axis =1.2, cex.lab = 1.2 , par(mar = c(17, 8, 1, 5) + 0.1)) + segments(x0 = 0, y0 = 0, x1 = 30, y1 = 0, col = "blue", lwd = 1)
#boxplot(data$PI ~ data$Fly.line, ylab = "PI", mtext(n, side = 2,line = 8), las=2, at =c(1,2, 4,5, 7,8, 10, 12,13, 15,16, 18,19, 21,22, 24),par(mar = c(12, 5, 4, 2) + 0.1),
# col = c(c(rep(c("darkgoldenrod","darkgoldenrod1"),3),"darkgoldenrod",rep(c("darkgoldenrod","darkgoldenrod1"),4),"darkgoldenrod")))
|
d586b829c1be59cb4791cae0d08a06733dfd2653
|
6a5ccc17ea4cfa439538b8b0292afae3f342af60
|
/010_forecast/20161023_カオス時系列の基礎とニューラルネットワーク.R
|
8ebf2eb17f50dc41d1413aac3341ac2917a2722a
|
[] |
no_license
|
Tonoyama/website
|
a4fb1ea2f773e30a541248d7894244714055b383
|
f78d0cec4ae183cd63891b87f950a4512d481ca2
|
refs/heads/master
| 2023-02-03T08:50:18.658843
| 2020-12-19T04:56:14
| 2020-12-19T04:56:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,658
|
r
|
20161023_カオス時系列の基礎とニューラルネットワーク.R
|
# カオス時系列の基礎とニューラルネットワーク | Logics of Blue
# https://logics-of-blue.com/カオス時系列の基礎とニューラルネットワーク/
# 2016年10月23日:新規作成
# 2018年04月25日:コードを一部修正し、動作確認
# 馬場真哉
# ロジスティック写像 ---------------------------------------------------------------
# 必要であればパッケージをインストール
# install.packages("nonlinearTseries")
library(nonlinearTseries)
# nonlinearTseriesの関数を使ってシミュレーションしてみる
logMap <- logisticMap(
r = 4,
n.sample = 100,
start = 0.4,
n.transient = 0,
do.plot = TRUE
)
# ロジスティック曲線の例
K <- 1
b <- 3
c <- 1
x <- seq(-5, 10, 0.1)
y <- K / (1 + b * exp(-c * x))
plot(y ~ x, type = "l", main="ロジスティック曲線")
# embedの使い方
1:5
embed(1:5, 2)
# データをずらす
lagData <- embed(y, 2)
lagData[1:5,]
# 差分をとる。これが「増加値」になる
diffData <- lagData[,1] - lagData[,2]
# yの増加値を縦軸に、yの値そのものを横軸に置いたグラフ
plot(
diffData ~ lagData[,1],
ylab = "yの増加値",
xlab = "yの値",
main = "yの増加値の変化"
)
# ロジスティック写像のデータをずらしてプロットしてみる
lagLogMap <- embed(logMap, 2)
plot(
lagLogMap[,1] ~ lagLogMap[,2],
ylab = "今期の値",
xlab = "前期の値",
main = "今期の値と前期の値の比較"
)
# 2期目の値
4 * 0.4 * (1 - 0.4)
logMap
# 定義通りに計算してみる
x0 <- 0.4 # 初期値
x <- numeric(100) # カオス時系列を入れる入れ物
x[1] <- x0
r <- 4 # パラメタ
for(i in 2:100){
x[i] <- r * x[i-1] * (1 - x[i-1])
}
# 結果は同じ
x[-1]
logMap
# ロジスティック写像の特徴 ------------------------------------------------------------
# 初期値をわずかに変えてみる
logMap2 <- logisticMap(
n.sample = 100,
start = 0.400000001,
n.transient = 0,
do.plot = F
)
# 初期値0.4の時のロジスティック写像と比較
ts.plot(
ts(logMap),
ts(logMap2),
col = c(1,2),
lty = c(1,2),
lwd = c(2,1),
main = "初期値を変えたときの比較"
)
# パラメタを変えてみる
logMap3 <- logisticMap(
r = 3.5,
n.sample = 100,
start = 0.4,
n.transient = 0,
do.plot = T
)
# リアプノフ指数 -----------------------------------------------------------------
# ロジスティック写像の微分
logMapDifferential <- function(r, x){
return(-2 * r * x + r)
}
# リアプノフ指数が正なので、カオス
sum(log(abs(logMapDifferential(4, logMap))))/ 99
# これはカオスではない(周期的変動)なので、リアプノフ指数も負になる
sum(log(abs(logMapDifferential(3.5, logMap3))))/ 99
# サロゲートテスト ----------------------------------------------------------------
# リアプノフ指数が正だったカオス時系列は有意
surrogateTest(
time.series = logMap,
significance = 0.05,
K = 1,
one.sided = FALSE,
FUN = timeAsymmetry
)
# 正規乱数を入れてみても、棄却されない
set.seed(1)
surrogateTest(
time.series = rnorm(100),
significance = 0.05,
K = 1,
one.sided = FALSE,
FUN = timeAsymmetry
)
# ARIMAモデルによる予測 -----------------------------------------------------------
# 必要であればパッケージをインストール
# install.packages("forecast")
library(forecast)
logMapArima <- auto.arima(
logMap,
ic = "aic",
trace = T,
stepwise = F,
approximation = F
)
# arima(0,0,0)すなわち、ただのホワイトノイズだとみなされてしまった。
logMapArima
# 101期目以降を予測しても、もちろん当たらない
logMapNext <- logisticMap(
r = 4,
n.sample = 120,
start = 0.4,
n.transient = 0,
do.plot = FALSE
)
plot(forecast(logMapArima, h=20))
lines(logMapNext)
# 予測精度の計算
f <- forecast(logMapArima, h=20)$mean
sqrt(sum((f - logMapNext[100:119])^2)/20) # RMSE
sum(abs(f - logMapNext[100:119]))/20 # MAE
accuracy(forecast(logMapArima, h=20),logMapNext[100:119])
# ニューラルネットワークによる予測 --------------------------------------------------------
set.seed(1)
logMapNnet <- nnetar(
y = logMap,
p = 1,
size = 4
)
plot(forecast(logMapNnet, h=20))
lines(logMapNext)
# ニューラルネットワークの予測精度
accuracy(forecast(logMapNnet, h=20),logMapNext[100:119])
# 5期先までのみを予測する
accuracy(forecast(logMapNnet, h=5),logMapNext[100:104])
|
ba180ab8f810f319029867c06f6f45c919ab3095
|
713b7d218b003042987eaca3329cac66d262250a
|
/man/PosWM.Rd
|
e53ed2b2ad98a4cf27f069cc71fdd815e70ce839
|
[] |
no_license
|
GfellerLab/PLDpred
|
d835da6e8b4ea173e18bc62241dd56f11356ffa7
|
05913e1ada555e84a272de484b99dcbe8a779434
|
refs/heads/master
| 2021-07-05T04:47:27.477600
| 2020-10-01T06:47:48
| 2020-10-01T06:47:48
| 189,026,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 373
|
rd
|
PosWM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PWMgenes.R
\docType{data}
\name{PosWM}
\alias{PosWM}
\title{Position Weigth Matrix per HLA-gene}
\format{object of class RData}
\usage{
data(PWMgenes)
}
\description{
Position weight matrix calculated for each gene based on HLA sequences in IMGT/HLA (Robinson et al. 2010).
}
\keyword{datasets}
|
91dcda640e8adccbe8767c599751c8e3b166f1e4
|
a7c06b7db20c3b64f97dabdc539246818331ec14
|
/experiment.R
|
b81489d11dfe66bd9be78bd8ba728d1bb1e1f29b
|
[] |
no_license
|
damarals/Trab1
|
a1d2cd30c1f811827e4c2ef0a8e0de176532df2f
|
f15f05f3da29502556c98cce5f956986b2ada371
|
refs/heads/master
| 2023-08-07T04:52:11.904618
| 2021-09-16T14:24:03
| 2021-09-16T14:24:03
| 405,504,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,602
|
r
|
experiment.R
|
# load data --------------------------------------------------------------------
coln <- 24
u <- glue::glue('https://archive.ics.uci.edu/ml/machine-learning-databases/00194/sensor_readings_{coln}.data')
da_robot_colnames <- c(paste0('us', 1:coln), 'class')
da_robot <- readr::read_csv(file = u, col_names = da_robot_colnames,
col_types = paste0(c(rep('d', coln), 'c'), collapse = ""))
# preprocessing ----------------------------------------------------------------
da_robot <- da_robot |>
dplyr::mutate_if(rlang::is_double,
function(x) (x - min(x))/(max(x) - min(x))) |>
dplyr::mutate(class = stringr::str_replace_all(class, '-', '_'),
class = stringr::str_to_lower(class),
class = factor(class, levels = c('move_forward', 'slight_right_turn',
'sharp_right_turn', 'slight_left_turn')))
# implementing models ----------------------------------------------------------
## Adaline
### adaline model with sigmoid activation
adaline <- function(formula, data, epochs = 200, lr = 0.1) {
# matrix of weights
nclass <- nrow(dplyr::distinct(data[, all.vars(formula)[1]]))
W <- matrix(rnorm(nclass * ncol(data)), ncol = nclass)
# double loop
## iterating first on rw (rows) and then on ep (epochs)
for(ep in 1:epochs) {
## shuffle data
data <- data[sample(1:nrow(data), nrow(data), replace = FALSE), ]
## response in dummy format
truth_form <- glue::glue('~ -1 + {all.vars(formula)[1]}')
truth <- model.matrix(as.formula(truth_form), data = data)
colnames(truth) <- stringr::str_remove(colnames(truth), all.vars(formula)[1])
## metrics vars
best_EQ <- 0 # for best epoch based on EQ
EQ <- 0 # store EQ for each epoch
for(rw in 1:nrow(data)) {
# data specification and prediction
X <- model.matrix(formula, data = data[rw,]) # [1 Var1 Var2 ...] (1 x (p+1))
Ui <- X %*% W # (1 x (p+1)) * ((p+1) x n_class) = (1 x n_class)
Yi <- 1/(1 + exp(-Ui)) # sigmoid activation
# error quantification
Ei <- truth[rw,] - Yi
EQ <- EQ + 0.5*sum(Ei^2)
# learning phase
W <- W + lr*(t(X)/as.numeric(X%*%t(X)))%*%Ei
}
# best epoch verification
if(EQ > best_EQ) {
best_epoch <- list(W = W)
best_eq <- EQ
}
#message(glue::glue('Epoch {ep}, MSE: {round(EQ/nrow(data), 5)}'))
}
# converting the function into a model like any other
# already implemented in R
model <- structure(list(W = best_epoch$W, formula = formula,
labels = colnames(truth)), class = "adaline")
return(model)
}
### logistic perceptron predict function
predict.adaline <- function(object, newdata) {
X <- model.matrix(object$formula, data = newdata) # [1 newdata]
Ui <- X %*% object$W # (1 x (p+1)) * ((p+1) x n_class) = (1 x n_class)
Yi <- 1/(1 + exp(-Ui)) # sigmoid activation
estimate <- object$labels[max.col(Yi)] # get labels of the largest activation
return(factor(estimate, levels = object$labels))
}
## PL
### logistic perceptron model
perceptron_log <- function(formula, data, epochs = 200, lr = 0.5, mom = 0.3) {
# matrix of weights
nclass <- nrow(dplyr::distinct(data[, all.vars(formula)[1]]))
W <- matrix(rnorm(nclass * ncol(data)), ncol = nclass)
W_old <- W
# double loop
## iterating first on rw (rows) and then on ep (epochs)
for(ep in 1:epochs) {
## shuffle data
data <- data[sample(1:nrow(data), nrow(data), replace = FALSE), ]
## response in dummy format
truth_form <- glue::glue('~ -1 + {all.vars(formula)[1]}')
truth <- model.matrix(as.formula(truth_form), data = data)
colnames(truth) <- stringr::str_remove(colnames(truth), all.vars(formula)[1])
## metrics vars
best_EQ <- 0 # for best epoch based on EQ
EQ <- 0 # store EQ for each epoch
for(rw in 1:nrow(data)) {
# data specification and prediction
X <- model.matrix(formula, data = data[rw,]) # [1 Var1 Var2 ...] (1 x (p+1))
Ui <- X %*% W # (1 x (p+1)) * ((p+1) x n_class) = (1 x n_class)
Yi <- 1/(1 + exp(-Ui)) # sigmoid activation
# error quantification
Ei <- truth[rw,] - Yi
EQ <- EQ + 0.5*sum(Ei^2)
# local gradients
Di <- 0.5 * (1 - Yi^2) + 0.05
DDi <- Ei * Di
# learning phase
W_aux <- W
W <- W + lr*t(X)%*%DDi + mom*(W - W_old)
W_old <- W_aux
}
# best epoch verification
if(EQ > best_EQ) {
best_epoch <- list(W = W)
best_eq <- EQ
}
#message(glue::glue('Epoch {ep}, MSE: {round(EQ/nrow(data), 5)}'))
}
# converting the function into a model like any other
# already implemented in R
model <- structure(list(W = W, formula = formula,
labels = colnames(truth)), class = "pl")
return(model)
}
### logistic perceptron predict function
predict.pl <- function(object, newdata) {
X <- model.matrix(object$formula, data = newdata) # [1 newdata]
Ui <- X %*% object$W # (1 x (p+1)) * ((p+1) x n_class) = (1 x n_class)
Yi <- 1/(1 + exp(-Ui)) # sigmoid activation
estimate <- object$labels[max.col(Yi)] # get labels of the largest activation
return(factor(estimate, levels = object$labels))
}
## LMQ
### LMQ model with tikhonov (lambda)
lmq <- function(formula, data, lambda = 1e-3) {
# data specification
X <- model.matrix(formula, data = data)[,-1] # no intercept (bias)
# response (y) in dummy format
y_form <- glue::glue('~ -1 + {all.vars(formula)[1]}')
y <- model.matrix(as.formula(y_form), data = data)
colnames(y) <- stringr::str_remove(colnames(y), all.vars(formula)[1])
# get weigth matrix W
W <- solve(t(X) %*% X + diag(lambda, ncol(X))) %*% t(X) %*% y
# converting the function into a model like any other
# already implemented in R
model <- structure(list(W = W, formula = formula,
labels = colnames(y)), class = "lmq")
return(model)
}
### LMQ predict function
predict.lmq <- function(object, newdata) {
X <- model.matrix(object$formula, data = newdata)[,-1] # no intercept (bias)
y_pred <- X %*% object$W # vector of scores for each discriminant
estimate <- object$labels[max.col(y_pred)] # get labels of the largest score
return(factor(estimate, levels = object$labels))
}
## MLP
### multilayer perceptron model
mlp <- function(formula, data, size = 64, epochs = 300, lr = 0.2, mom = 0.4) {
# matrix of weights
## input layer
W <- matrix(rnorm(size * ncol(data)), ncol = size)
W_old <- W
## hidden layer
nclass <- nrow(dplyr::distinct(data[, all.vars(formula)[1]]))
H <- matrix(rnorm(nclass * (size + 1)), ncol = nclass)
H_old <- H
# double loop
## iterating first on rw (rows) and then on ep (epochs)
for(ep in 1:epochs) {
## shuffle data
data <- data[sample(1:nrow(data), nrow(data), replace = FALSE), ]
## response in dummy format
truth_form <- glue::glue('~ -1 + {all.vars(formula)[1]}')
truth <- model.matrix(as.formula(truth_form), data = data)
colnames(truth) <- stringr::str_remove(colnames(truth), all.vars(formula)[1])
## metrics vars
best_EQ <- 0 # for best epoch based on EQ
EQ <- 0 # store EQ for each epoch
for(rw in 1:nrow(data)) {
# data specification and prediction
X <- model.matrix(formula, data = data[rw,]) # [1 Var1 Var2 ...] (1 x (p+1))
## hidden layer
Ui <- X %*% W # (1 x (p+1)) * ((p+1) x size) = (1 x size)
Zi <- 1/(1 + exp(-Ui)) # sigmoid activation
# Zi <- exp(Ui)/sum(exp(Ui))
## output layer
Z <- cbind(1, Zi) # [1 Z1 Z2 ...] (1 x (size+1))
Uk <- Z %*% H # (1 x (size+1)) * ((size+1) x n_class)
Yk <- 1/(1+exp(-Uk)) # sigmoid activation
# Yk <- exp(Uk)/sum(exp(Uk))
# error quantification
Ek <- truth[rw,] - Yk
EQ <- EQ + 0.5*sum(Ek^2)
# local gradients
## output layer
Dk <- Yk * (1 - Yk) + 0.01
DDk <- Ek * Dk
## hidden layer
Di <- Zi * (1 - Zi) + 0.01
DDi <- Di * DDk %*% t(H[-1,])
# learning phase
## output layer
H_aux <- H
H <- H + lr*t(Z)%*%DDk + mom*(H - H_old)
H_old <- H_aux
## hidden layer
W_aux <- W
W <- W + 2*lr*t(X)%*%DDi + mom*(W - W_old)
W_old <- W_aux
}
# best epoch verification
if(EQ > best_EQ) {
best_epoch <- list(W = W, H = H)
best_eq <- EQ
}
#message(glue::glue('Epoch {ep}, MSE: {round(EQ/nrow(data), 5)}'))
}
# converting the function into a model like any other
# already implemented in R
model <- structure(list(W = best_epoch$W, H = best_epoch$H, formula = formula,
labels = colnames(truth)), class = "mlp")
return(model)
}
### mlp predict function
predict.mlp <- function(object, newdata) {
X <- model.matrix(object$formula, data = newdata) # [1 newdata]
## hidden layer
Ui <- X %*% object$W # (1 x (p+1)) * ((p+1) x size) = (1 x size)
Zi <- 1/(1 + exp(-Ui)) # sigmoid activation
## output layer
Z <- cbind(1, Zi) # [1 Z1 Z2 ...] (1 x (size+1))
Uk <- Z %*% object$H # (1 x (size+1)) * ((size+1) x n_class)
Yk <- 1/(1 + exp(-Uk)) # sigmoid activation
estimate <- object$labels[max.col(Yk)] # get labels of the largest activation
return(factor(estimate, levels = object$labels))
}
# useful functions -------------------------------------------------------------
## get metrics function
get_metrics <- function(models, da_test, truth) {
purrr::map_dfr(models, function(model) {
estimate <- predict(model, da_test) # prediction
cm <- table(truth, estimate) # confusion matrix
# get metrics
accuracy <- sum(diag(cm))/sum(cm)
precision_by_class <- diag(cm)/colSums(cm)
precision_by_class[is.nan(precision_by_class)] <- 0 # fix division by zero
# store metrics in a dataframe
metrics <- tibble::tibble(accuracy) |>
tibble::add_column(dplyr::bind_rows(precision_by_class))
return(metrics)
}, .id = 'model')
}
# run experiment ---------------------------------------------------------------
## settings for parallel processing (multiple iterations at the same time)
globals <- list('da_robot' = da_robot, 'get_metrics' = get_metrics,
'adaline' = adaline, 'perceptron_log' = perceptron_log,
'lmq' = lmq, 'mlp' = mlp, 'predict.adaline' = predict.adaline,
'predict.pl' = predict.pl, 'predict.lmq' = predict.lmq,
'predict.mlp' = predict.mlp)
future::plan(future::multisession, workers = 5) # 10 iterations at the same time
## loop 100x and assign results to `da_experiment`
da_experiment <- furrr::future_map_dfr(1:100, function(seed) {
## data split 80/20 (train/test)
set.seed(seed)
da_split <- rsample::initial_split(da_robot, prop = 0.8, strata = "class")
da_train <- rsample::training(da_split)
da_test <- rsample::testing(da_split)
## apply models in train
mod_ada <- adaline(formula = class ~ ., data = da_train)
mod_pl <- perceptron_log(formula = class ~ ., data = da_train)
mod_lmq <- lmq(formula = class ~ ., data = da_train)
mod_mlp <- mlp(formula = class ~ ., data = da_train)
## collect metrics in test
metrics <- get_metrics(models = list('ada' = mod_ada, 'pl' = mod_pl,
'lmq' = mod_lmq, 'mlp' = mod_mlp),
da_test = da_test, truth = da_test$class)
return(metrics)
}, .id = 'seed', .progress = TRUE,
.options = furrr::furrr_options(seed = TRUE, globals = globals))
# write da_metrics in a .csv file
fs::dir_create('data')
readr::write_csv(da_experiment, 'data/experiment.csv')
|
683f0942a1e1a8e0d2ddda37411e99b42d0def78
|
02fe930e2f9c76fea9643d013e98ab937724579f
|
/R/humanIDterms.R
|
36375b295190fb0cca7b4ce55794e42df88b8834
|
[] |
no_license
|
sturkarslan/DuffyTools
|
ec751c1b58db5bef30ccc76f6e2afbc26c317d7d
|
10c540eaabdda27c14ddc9e27096e52444a2a67c
|
refs/heads/master
| 2016-08-04T23:58:02.110791
| 2014-11-26T18:09:35
| 2014-11-26T18:09:35
| 27,187,826
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
r
|
humanIDterms.R
|
# humanIDterms.R - turn human GENE_IDs into two extra columns ENTREZ_ID and common NAME
getHumanIDterms <- function( geneIDs) {
geneIDs <- as.character( geneIDs)
# format is {commonname:GInumber:chromosome:location}
ginum <- sub( "(^.+:GI)([0-9]+)(:?.*$)", "\\2", geneIDs)
nam <- sub( "(^.+?)(:.*$)", "\\1", geneIDs)
# verify the Entrez ID is valid...
suppressWarnings( ginum[ is.na( as.integer( ginum))] <- "" )
out <- list( "GENE_NAME"=nam, "ENTREZ_ID"=ginum)
return( out)
}
addHumanIDterms <- function( mydf, idColumn="GENE_ID") {
if ( ! idColumn %in% colnames(mydf)) {
cat( "\nHuman GeneID column not found: ", idColumn, "\nFound: ", colnames(mydf))
return( mydf)
}
humanTerms <- getHumanIDterms( mydf[[ idColumn]])
out <- cbind( as.data.frame( humanTerms), mydf, stringsAsFactors=FALSE)
return( out)
}
|
884992f8e0af8360096d08569fbac29dbd75b679
|
775b84bf87a7ea3a4ac509ae6b8d9832b8b0093e
|
/plot1.R
|
1cdcf45329c0fb04b8b19dd3349a21c8df0d71a5
|
[] |
no_license
|
Patrick-J-Close/ExData_Project2
|
7e75d220c6334f53b87b03ac1590e5d86cb40960
|
ccc216dee1c05d674015ab33e0d1e817a4028139
|
refs/heads/master
| 2016-08-12T15:10:14.021836
| 2015-10-25T18:38:44
| 2015-10-25T18:38:44
| 44,480,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
plot1.R
|
##Coursera Exploratory Data Analysis Project 2
#
setwd("C:/Users/Patrick Close/Documents/Courses/ExploratoryDataAnalysis/Project2_Data")
#
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
#Plot1
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission
#from all sources for each of the years 1999, 2002, 2005, and 2008.
#
TotalByYear <- aggregate(Emissions ~ year, NEI, sum)
#
png("plot1.png")
barplot(height=TotalByYear$Emissions, names.arg = TotalByYear$year, xlab = "Year", ylab = "Total PM2.5 Emissions", main = "Total PM2.5 Emissions by Year")
dev.off()
|
6cb576e5fbddbca2d22a8ef7b7c7b7ff92928ecb
|
8d3d3cb60740ab51a318d130cc8ec21014098a76
|
/plot2.R
|
60d1a54aaa08bc424f8afedc7526d1716f83af91
|
[] |
no_license
|
ritesh256/ExData_Plotting1
|
18ad2876e613864d92674a3892f208b80c3127a6
|
1ffe406a06cecda6696a33072d0c92c9cf2efb2e
|
refs/heads/master
| 2022-05-14T18:40:49.930538
| 2022-05-05T12:32:25
| 2022-05-05T12:32:25
| 22,800,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 563
|
r
|
plot2.R
|
plot2 <- function() {
dataread <- read.table("household_power_consumption.txt", sep=";", header=T, na.strings=c("?"),colClasses="character")
dataread$Date <- as.Date(dataread$Date, "%d/%m/%Y")
finaldata <- dataread[which(dataread$Date >= "2007-02-01" & dataread$Date <= "2007-02-02"),]
finaldata$Date <- strptime(paste(finaldata$Date,finaldata$Time), format = "%Y-%m-%d %H:%M:%S")
plot(finaldata$Date, as.numeric(finaldata$Global_active_power), type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file = "plot2.png")
dev.off()
}
|
6769a5388acda5ecb90151b6c16e41ca902829e7
|
fabdac62fcb0951d98d71a8ec0604900479bfba8
|
/R/tunable.R
|
574ba9098fb64e217752434d4a306922dd5a7c02
|
[
"MIT"
] |
permissive
|
r-lib/generics
|
bec3bcb8b7f55fbda1e22998b562d067e6cb268a
|
387397154bb8488a0b6dfbcbba91b781d598ee0e
|
refs/heads/main
| 2022-07-11T20:36:04.652473
| 2022-07-05T21:25:22
| 2022-07-05T21:25:22
| 137,095,400
| 53
| 18
|
NOASSERTION
| 2022-07-05T21:23:47
| 2018-06-12T15:55:26
|
R
|
UTF-8
|
R
| false
| false
| 1,515
|
r
|
tunable.R
|
#' Declare tunable parameters
#'
#' Returns information on potential hyper-parameters that can be optimized.
#'
#' @param x An object, such as a recipe, recipe step, workflow, or model
#' specification.
#' @param ... Other arguments passed to methods
#'
#'@return A tibble with a column for the parameter `name`, information on the
#' _default_ method for generating a corresponding parameter object, the
#' `source` of the parameter (e.g. "recipe", etc.), and the `component` within
#' the source. For the `component` column, a little more specificity is given
#' about the location of the parameter (e.g. "step_normalize" for recipes or
#' "boost_tree" for models). The `component_id` column contains the unique step
#' `id` field or, for models, a logical for whether the model specification
#' argument was a main parameter or one associated with the engine.
#' @details
#' For a model specification, an engine must be chosen.
#'
#' If the object has no tunable parameters, a tibble with no rows is returned.
#'
#' The information about the default parameter object takes the form of a
#' named list with an element for the function call and an optional element for
#' the source of the function (e.g. the `dials` package). For model
#' specifications, If the parameter is unknown to the underlying `tunable`
#' method, a `NULL` is returned.
#'
#' @section Methods:
#' \Sexpr[stage=render,results=rd]{generics:::methods_rd("tunable")}
#'
#' @export
tunable <- function(x, ...) {
UseMethod("tunable")
}
|
51640ba58577a4db8fbb28d33e3ecbdf4fdd9e33
|
9f4c069e7a174b333bf7b315853e0040eedc1eb5
|
/man/Split.Seasons.Rd
|
62aadd2e23e1f3589fe9d932d0c9f3de1b250974
|
[] |
no_license
|
amitghosh-ag/CropWatR
|
6fe4ecbf7212773243e9f7413503407acc5724d7
|
90ce6155a54d0434b0e13064f194b71e962539de
|
refs/heads/master
| 2020-05-07T08:35:53.860173
| 2019-04-09T09:58:09
| 2019-04-09T09:58:09
| 180,337,562
| 0
| 1
| null | 2019-04-09T09:56:53
| 2019-04-09T09:56:53
| null |
UTF-8
|
R
| false
| false
| 5,802
|
rd
|
Split.Seasons.Rd
|
\name{Split.Seasons}
\alias{Split.Seasons}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Split.Seasons(Crop, Variable, Lat.long, TopSoil, Crop.Layers, PH)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Crop}{
%% ~~Describe \code{Crop} here~~
}
\item{Variable}{
%% ~~Describe \code{Variable} here~~
}
\item{Lat.long}{
%% ~~Describe \code{Lat.long} here~~
}
\item{TopSoil}{
%% ~~Describe \code{TopSoil} here~~
}
\item{Crop.Layers}{
%% ~~Describe \code{Crop.Layers} here~~
}
\item{PH}{
%% ~~Describe \code{PH} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (Crop, Variable, Lat.long, TopSoil, Crop.Layers, PH)
{
if (Variable != "Precip_")
RasterBrick <- brick(paste0(Variable, "2008.grd"))
aea.Loc.IDs <- read.csv("aea.Loc.IDs.csv")
if (Variable == "Precip_")
RasterBrick <- brick("Prism.ppt.10km.aea.grd")
DF <- as.data.frame(getValues(RasterBrick))
DF <- cbind(DF, Lat.long)
DF <- na.omit(DF)
print("BEPAM growing pixels in aea.Loc.IDs:")
print(table(c(DF$x, DF$y) \%in\% c(aea.Loc.IDs$x, aea.Loc.IDs$y)))
DF <- merge(DF, aea.Loc.IDs, by.x = c("x", "y"), by.y = c("x",
"y"), all = TRUE)
print("BEPAM growing pixels in TopSoil:")
print(table(c(DF$x, DF$y) \%in\% c(TopSoil$x, TopSoil$y)))
DF <- merge(DF, TopSoil, by.x = c("x", "y"), by.y = c("x",
"y"), all = TRUE)
print(table(DF$STATE_FIPS \%in\% PH$State_Fips))
print(unique(DF$State_name[which(!(DF$STATE_FIPS \%in\% PH$State_Fips))]))
DF <- merge(DF, PH, by.x = "STATE_FIPS", by.y = "State_Fips",
all.x = TRUE)
print(unique(DF$State_name[which(!(DF$STATE_FIPS \%in\% Crop.Layers$STATE_FIPS))]))
Droppers <- c("CountyFIPS", "HUC2", "Abbreviation", "State_name",
"Ers.region", "CRD")
Crop.Layers <- Crop.Layers[, -which(names(Crop.Layers) \%in\%
Droppers)]
DF <- merge(DF, Crop.Layers, by.x = c("x", "y", "STATE_FIPS"),
by.y = c("x", "y", "STATE_FIPS"), all.x = TRUE)
DF <- cbind(DF[4:ncol(DF)], DF[, 1:3])
DF <- DF[!is.na(DF$Planting.Main), ]
DF <- DF[!is.na(DF$Harvesting.Main), ]
DF <- DF[!is.na(DF$STATE_FIPS), ]
DF <- DF[!is.na(DF$layer.1), ]
DF$STATE_FIPS <- as.factor(DF$STATE_FIPS)
if (Variable == "MNRH_") {
DF2 <- DF
save(DF2, file = paste0(Intermediates, paste("BASE",
Crop, Variable, "MasterDF2", sep = ".")))
}
OverWinter <- max(DF$Harvesting.Main)
if (OverWinter > 365) {
DF <- as.data.frame(cbind(DF[, 1:365], DF[, 1:length(DF)]))
names(DF)[366:730] <- paste0(rep("layer."), 366:730)
}
Split.DF <- split(DF, DF$STATE_FIPS, drop = FALSE)
print("number of states growing crop:")
print(length(Split.DF))
if (Crop != "sugarcane" & Crop != "switchgrass" & Crop !=
"miscanthus" & Crop != "idle_cropland" & Crop != "pasture_grass" &
Crop != "rep_cropland") {
Split.DF <- lapply(Split.DF, drop.levels)
}
Growing.Season <- lapply(Split.DF, function(x) x[, c(x$Planting.Main[1]:x$Harvesting.Main[1],
(which(names(x) == "CountyFIPS")):(which(names(x) ==
"STATE_FIPS")))])
Fallow.Season <- lapply(Split.DF, function(x) x[, c(1:(x$Planting.Main[1] -
1), (x$Harvesting.Main[1] + 1):ncol(x))])
if (OverWinter > 365) {
GS.dates <- lapply(Growing.Season, function(x) names(x[grep("layer",
names(x))]))
GS.dates <- lapply(GS.dates, function(x) as.numeric(substr(x,
7, 9)))
GS.dates.1 <- lapply(GS.dates, function(x) paste0("layer.",
x - 365))
GS.dates.2 <- lapply(GS.dates, function(x) paste0("layer.",
x + 365))
Dups <- c(paste0("layer.", 365:730))
for (i in 1:length(Fallow.Season)) {
Fallow.Season[[i]] <- Fallow.Season[[i]][, -(which(names(Fallow.Season[[i]]) \%in\%
Dups))]
FS.check <- ncol(Fallow.Season[[i]][, grep("layer",
names(Fallow.Season[[i]]))]) + ncol(Growing.Season[[i]][,
grep("layer", names(Growing.Season[[i]]))])
if (FS.check > 365) {
Fallow.Season[[i]] <- Fallow.Season[[i]][, -(which(names(Fallow.Season[[i]]) \%in\%
GS.dates.1[[i]]))]
}
}
}
GS.length <- unlist(lapply(Growing.Season, function(x) length(x[grep("layer",
names(x))])))
FS.length <- unlist(lapply(Fallow.Season, function(x) length(x[grep("layer",
names(x))])))
print(GS.length + FS.length)
DF <- list(Variable = Variable, Growing.Season = Growing.Season,
Fallow.Season = Fallow.Season)
save(DF, file = paste0(Intermediates, paste("Base", Crop,
Variable, "MasterDF", sep = ".")))
return(DF)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
2584fa2ce295d17738bcc22042aa71a2968f79c5
|
d42d8794ec113887571abaecb926f60e9be6cf3e
|
/man/hydroplot.Rd
|
6b93e418a9883c722b6545300e738c42ee33261d
|
[] |
no_license
|
cran/hydroTSM
|
f5adb9f9541fd9e89675283a9bfc9c115eb1dad2
|
d6e5846e1604fca0b835352d6595eaf121656a99
|
refs/heads/master
| 2020-06-07T18:36:12.569941
| 2020-03-11T15:50:03
| 2020-03-11T15:50:03
| 17,696,710
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,053
|
rd
|
hydroplot.Rd
|
% File hydroplot.Rd
% Part of the hydroTSM R package, http://www.rforge.net/hydroTSM/ ;
% http://cran.r-project.org/web/packages/hydroTSM/
% Copyright 2008-2013 Mauricio Zambrano-Bigiarini
% Distributed under GPL 2 or later
\name{hydroplot}
\Rdversion{1.1}
\alias{sname2plot}
\alias{hydroplot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Hydrological time series plotting and extraction.
}
\description{
\code{hydroplot}: When \code{x} is a zoo object it plots (a maximum of) 9 graphs (lines plot, boxplots and/or histograms) of the daily, monthly, annual and/or seasonal time series. \cr
\code{sname2plot}: When \code{x} is a data frame whose columns contain the time series of several gauging stations, it takes the name of one gauging station and plots the graphs described above.
}
\usage{
hydroplot(x, FUN, na.rm=TRUE, ptype="ts+boxplot+hist", pfreq="dma",
var.type, var.unit="units", main=NULL, xlab="Time", ylab,
win.len1=0, win.len2=0, tick.tstep="auto", lab.tstep="auto",
lab.fmt=NULL, cex=0.3, cex.main=1.3, cex.lab=1.3, cex.axis=1.3,
col=c("blue", "lightblue", "lightblue"),
from, to, date.fmt= "\%Y-\%m-\%d",
stype="default", season.names=c("Winter", "Spring", "Summer", "Autumn"),
h=NULL, ...)
sname2plot(x, sname, FUN, na.rm=TRUE, ptype="ts+boxplot+hist",
pfreq="dma", var.type, var.unit="units", main=NULL,
xlab="Time", ylab=NULL, win.len1=0, win.len2=0,
tick.tstep="auto", lab.tstep="auto", lab.fmt=NULL,
cex=0.3, cex.main=1.3, cex.lab=1.3, cex.axis=1.3,
col=c("blue", "lightblue", "lightblue"),
dates=1, date.fmt = "\%Y-\%m-\%d", from, to, stype="default",
season.names=c("Winter", "Spring", "Summer", "Autumn"),
h=NULL )
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
zoo, xts or data.frame object, with columns storing the time series of one or more gauging stations.
}
\item{sname}{
ONLY required when \code{x} is a data frame. \cr
Character representing the name of a station, which have to correspond to one column name in \code{x}
}
\item{FUN}{
ONLY required when \code{var.type} is missing AND \code{pfreq != "o"}. \cr
Function that have to be applied for transforming from daily to monthly or annual time step (e.g., For precipitation \code{FUN=sum} and for temperature and flow ts, \code{FUN=mean})
}
\item{na.rm}{
Logical. Should missing values be removed before the computations?
}
\item{ptype}{
Character indicating the type of plot that will be plotted. Valid values are: \cr
-) \kbd{ts} => only time series \cr
-) \kbd{ts+boxplot} => only time series + boxplot \cr
-) \kbd{ts+hist} => only time series + histogram \cr
-) \kbd{ts+boxplot+hist} => time series + boxplot + histogram
}
\item{pfreq}{
Character indicating how many plots are desired by the user. Valid values are: \cr
-) \kbd{dma} : Daily, Monthly and Annual values are plotted \cr
-) \kbd{dm} : Daily and Monthly values are plotted \cr
-) \kbd{ma} : Monthly and Annual values are plotted \cr
-) \kbd{o} : Only the original zoo object is plotted, and \code{ptype} is changed to \kbd{ts} \cr
-) \kbd{seasonal}: Line and bloxplots of seasonal time series (see \code{stype} and \code{season.names}). When \code{pfreq} is \kbd{seasonal}, \code{ptype} is set to \kbd{ts+boxplot}
}
\item{var.type}{
ONLY required when \code{FUN} is missing. \cr
character representing the type of variable being plotted. Used for determining the function used for computing the monthly and annual values when \code{FUN} is missing. Valid values are: \cr
-) \kbd{Precipitation} => \code{FUN=sum} \cr
-) \kbd{Temperature} => \code{FUN=mean} \cr
-) \kbd{Flow} => \code{FUN=mean} \cr
}
\item{var.unit}{
Character representing the measurement unit of the variable being plotted. ONLY used for labelling the axes (e.g., "mm" for precipitation, "C" for temperature, and "m3/s" for flow.)
}
\item{main}{
Character representing the main title of the plot. If the user do not provide a title, this is created automatically as: \code{main= paste(var.type, "at", sname, sep=" ")},
}
\item{xlab}{
A title for the x axis. See \code{\link[graphics]{plot}}.
}
\item{ylab}{
A title for the y axis. See \code{\link[graphics]{plot}}.
}
\item{win.len1}{
number of days for being used in the computation of the first moving average. A value equal to zero indicates that this moving average is not going to be computed.
}
\item{win.len2}{
number of days for being used in the computation of the second moving average. A value equal to zero indicates that this moving average is not going to be computed.
}
\item{tick.tstep}{
Character indicating the time step that have to be used for putting the ticks on the time axis. Valid values are: \cr
-) \kbd{days}, \cr
-) \kbd{months}, \cr
-) \kbd{years}
}
\item{lab.tstep}{
Character indicating the time step that have to be used for putting the labels on the time axis. Valid values are: \cr
-) \kbd{days}, \cr
-) \kbd{months}, \cr
-) \kbd{years}
}
\item{lab.fmt}{
Character indicating with the format to be used for the label of the axis. See \code{format} in \code{\link[base]{as.Date}}. If not specified, it will try \kbd{"\%Y-\%m-\%d"} when \code{lab.tstep=="days"}, \kbd{"\%b"} when \code{lab.tstep=="month"}, and \kbd{"\%Y"} when \code{lab.tstep=="year"}.
}
\item{cex}{
A numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. (See \code{\link[graphics]{par}}).
}
\item{cex.main}{
The magnification to be used for main titles relative to the current setting of \code{cex} (See \code{\link[graphics]{par}}).
}
\item{cex.lab}{
The magnification to be used for x and y labels relative to the current setting of \code{cex} (See \code{\link[graphics]{par}}).
}
\item{cex.axis}{
The magnification to be used for axis annotation relative to the current setting of \code{cex} (See \code{\link[graphics]{par}}).
}
\item{col}{
A character vector with 3 elements, representing the colors to be used for plotting the lines of the ts, the boxplots, and the histograms, respectively. \cr
When \code{pfreq="o"}, only one character element is needed.
See \code{\link[graphics]{plot.default}}).
}
\item{dates}{
ONLY required when \code{x} is a data frame. It is a numeric, factor or Date object indicating how to obtain the dates corresponding to the \code{sname} station. \cr
If \code{dates} is a number (default), it indicates the index of the column in \code{x} that stores the dates \cr
If \code{dates} is a factor, it is converted into Date class, using the date format specified by \code{date.fmt} \cr
If \code{dates} is already of Date class, the code verifies that the number of days in \code{dates} be equal to the number of element in \code{x}
}
\item{date.fmt}{
Character indicating the format in which the dates are stored in \var{dates}, \var{from} and \var{to}. See \code{format} in \code{\link[base]{as.Date}}. \cr
ONLY required when \code{class(dates)=="factor"} or \code{class(dates)=="numeric"}.
}
\item{from}{
OPTIONAL, used for extracting a subset of values. \cr
Character indicating the starting date for the values to be extracted. It must be provided in the format specified by \code{date.fmt}.
}
\item{to}{
OPTIONAL, used for extracting a subset of values. \cr
Character indicating the ending date for the values to be extracted. It must be provided in the format specified by \code{date.fmt}.
}
\item{stype}{
OPTIONAL, only used when \code{pfreq=seasonal}. \cr
character, indicating which weather seasons will be used for computing the output. Possible values are: \cr
-) \kbd{default} => "winter"= DJF = Dec, Jan, Feb; "spring"= MAM = Mar, Apr, May; "summer"= JJA = Jun, Jul, Aug; "autumn"= SON = Sep, Oct, Nov \cr
-) \kbd{FrenchPolynesia} => "winter"= DJFM = Dec, Jan, Feb, Mar; "spring"= AM = Apr, May; "summer"= JJAS = Jun, Jul, Aug, Sep; "autumn"= ON = Oct, Nov
}
\item{season.names}{
OPTIONAL, only used when \code{pfreq=seasonal}. \cr
character of length 4 indicating the names of each one of the weather seasons defined by \code{stype}.These names are only used for plotting purposes
}
\item{h}{
OPTIONAL, only used when \code{pfreq=seasonal}, for plotting horizontal lines in each seasonal plot. \cr
numeric, with 1 or 4 elements, with the value used for plotting an horizontal line in each seasonal plot, in the following order: winter (DJF), spring (MAM), summer (JJA), autumn (SON).
}
\item{\dots}{
further arguments passed to the \code{plot.zoo} and \code{axis} functions or from other methods.
}
}
\details{
Plots of the daily/monthly/annual/seasonal values of the time series given as input. \cr
Depending on the value of \code{pfreq}, daily, monthly, annual and/or seasonal time series plots, boxplots and histograms are produced. \cr
Depending on the value of \code{ptype}, time series plots, boxplots and/or histograms are produced.
}
%%\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
%%}
%%\references{
%% ~put references to the literature/web site here ~
%%}
\author{
Mauricio Zambrano-Bigiarini, \email{mzb.devel@gmail}
}
%%\note{
%% ~~further notes~~
%%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{sname2ts}}
}
\examples{
#############
## Loading daily streamflows at the station Oca en Ona (Ebro River basin, Spain) ##
data(OcaEnOnaQts)
## 3 ts, 3 boxplots and 3 histograms
hydroplot(OcaEnOnaQts, FUN=mean, ylab= "Q", var.unit = "m3/s")
## only the original time series
hydroplot(OcaEnOnaQts, pfreq="o")
## only the year 1962 of the original time series
hydroplot(OcaEnOnaQts, pfreq="o", from="1962-01-01", to="1962-12-31")
## seasonal plots
\dontrun{
hydroplot(OcaEnOnaQts, pfreq="seasonal", FUN=mean, stype="default")
## custom season names (let's assume to be in the Southern Hemisphere)
hydroplot(OcaEnOnaQts, pfreq="seasonal", FUN=mean,
stype="default", season.names=c("Summer","Autumn", "Winter","Spring"))
}
#############
## Loading the monthly time series of precipitation within the Ebro River basin.
data(EbroPPtsMonthly)
## Plotting the monthly and annual values of precipitation at station "P9001",
## stored in 'EbroPPtsMonthly'.
sname2plot(EbroPPtsMonthly, sname="P9001", var.type="Precipitation", dates=1,
pfreq="ma")
## Plotting seasonal precipitation at station "P9001"
par(mar=c(5.1, 4.1, 4.1, 2.1))
sname2plot(EbroPPtsMonthly, sname="P9001", FUN=sum, dates=1, pfreq="seasonal",
stype="default")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{graphs}
\keyword{manip}
|
9d9942f9d97b46cdb20f5e032d48f2e2101921f2
|
ee284711423ebb0e157b460247177f4b7afbceeb
|
/scripts_git_done/Deseq2_adundanse.R
|
3f4ce6542b078982ab85dd3d865ebe446e0b47d9
|
[] |
no_license
|
crabron/temp
|
645b690c8fcb4cc8152b7c8fada3cd85a8bf4ea9
|
1dba39476db86c219191ea68052b3b34b98566a8
|
refs/heads/master
| 2021-06-25T11:35:29.299661
| 2020-09-09T01:49:19
| 2020-09-09T01:49:19
| 157,358,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,961
|
r
|
Deseq2_adundanse.R
|
diagdds_more = function(ps){
diagdds = phyloseq_to_deseq2(ps, ~ Repeats)
diagdds = DESeq(diagdds, test="Wald", fitType="parametric")
res = results(diagdds)
res = res[order(res$padj, na.last=NA), ]
sigtab = res[(res$padj < 0.1), ]
sigtab = sigtab[(sigtab$log2FoldChange > 1),]
sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(ps)[rownames(sigtab), ], "matrix"))
return(sigtab)
}
diagdds_less = function(ps){
diagdds = phyloseq_to_deseq2(ps, ~ Repeats)
diagdds = DESeq(diagdds, test="Wald", fitType="parametric")
res = results(diagdds)
res = res[order(res$padj, na.last=NA), ]
sigtab = res[(res$padj < 0.1), ]
sigtab <- sigtab[(sigtab$log2FoldChange < 1),]
sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(ps)[rownames(sigtab), ], "matrix"))
return(sigtab)
}
change.prop <-prop.table(table(sigtab$Phylum))
boxplot(log10(assays(diagdds)[["cooks"]]), range=0, las=2)
cts <- counts(dds)
geoMeans <- apply(cts, 1, function(row) if (all(row == 0)) 0 else exp(mean(log(row[row != 0]))))
dds <- estimateSizeFactors(dds, geoMeans=geoMeans)
cook <- dds@assays[["cooks"]]
For Lise:
diagdds_Lise = function(ps, name){
diagdds <- phyloseq_to_deseq2(ps, ~ Repeats)
samp <-sample_data(ps)
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.data.frame(dds.counts)
aggdata <- t(aggregate.data.frame(t(dds.counts.df), by=list(samp$Repeats), median))
colnames(aggdata) <- aggdata[1,]
aggdata <- aggdata[-1,]
res = results(diagdds)
res.df <- as.data.frame(res)
nice <- cbind(res.df, as.data.frame(tax_table(ps)[rownames(res.df),]), as.data.frame(aggdata)[rownames(res.df),])
return(nice)
}
diagg.var <- diagdds_Lise(ps.var, "site1_diagdds.csv")
diagg.art <- diagdds_Lise(ps.art, "site2_diagdds.csv")
diagg.pse <- diagdds_Lise(ps.pse, "site3_diagdds.csv")
diagg.sph <-diagdds_Lise(ps.sph, "site4_diagdds.csv")
diagg.bac <-diagdds_Lise(ps.bac "site5_diagdds.csv")
> View(diagg.var)
> View(diagg.art)
> View(diagg.pse)
> View(diagg.sph)
> View(diagg.bac)
nice <- cbind(as.data.frame(sigtab), as.data.frame(tax_table(ps.1)[rownames(sigtab),]), as.data.frame(aggdata[rownames(sigtab),]))
EF517956.1.1666
ggplot(data=depth.mut, aes(log(Conc_by_RealTime), ratio)) + geom_point() + ggrepel::geom_text_repel(data=subset(depth.mut, log(depth.mut$Conc_by_RealTime) < 16 ), aes(label=ID), size = 3)
cooks.clean <- t(log10(assays(diagdds.ps.all.clean)[["cooks"]]))
cooks.clean <- rowMeans(cooks.clean, na.rm = TRUE)
diagdds_taxas = function(ps, taxa_level){
physeq <- taxa_level(ps, taxa_level)
diagdds <- phyloseq_to_deseq2(physeq, ~ Repeats)
diagdds <- DESeq(diagdds, test="Wald", fitType="parametric")
res = results(diagdds)
res.df <- as.data.frame(res)
return(res.df)
}
Des.Lise <- function(ps){
otus.ps.vegan <- veganifyOTU(ps)
metadata <- as(sample_data(ps), "data.frame")
sim <- with(metadata, simper(otus.ps.vegan, Description))
simper <- cbind(sim$s1903_In_s1903_In_Al$species,sim$s1903_In_s1903_In_Al$average)
colnames(simper) <- c("ID","ave_sim")
simper <- as.data.frame(simper, row.names = "ID")
simper <- column_to_rownames(simper, var = "ID")
diagdds <- phyloseq_to_deseq2(ps, ~ Description)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(diagdds), 1, gm_mean)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
diagdds = DESeq(diagdds, fitType="local")
samp <-sample_data(ps)
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.data.frame(dds.counts)
aggdata <- t(aggregate.data.frame(t(dds.counts.df), by=list(samp$Description), median))
colnames(aggdata) <- aggdata[1,]
aggdata <- aggdata[-1,]
res = results(diagdds)
res.df <- as.data.frame(res)
nice <- cbind(res.df, simper[rownames(res.df),], as.data.frame(tax_table(ps)[rownames(res.df),]), as.data.frame(aggdata)[rownames(res.df),])
return(nice)
}
Des.Norm <- function(ps){
diagdds <- phyloseq_to_deseq2(ps, ~ Repeats)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(diagdds), 1, gm_mean)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
diagdds = DESeq(diagdds, fitType="local")
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.matrix(dds.counts)
ps.norm.dec <- phyloseq(otu_table(t(dds.counts.df), taxa_are_rows=FALSE),
sample_data(ps@sam_data),
tax_table(ps@tax_table@.Data),
phy_tree(ps@phy_tree))
ps.norm.dec
nice <- ps.norm.dec
return(nice)
}
Des.Tax = function(ps, Taxa){
ps <- taxa_level(ps, Taxa)
diagdds <- phyloseq_to_deseq2(ps, ~ Description)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(diagdds), 1, gm_mean)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
diagdds = DESeq(diagdds, fitType="local")
samp <-sample_data(ps)
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.data.frame(dds.counts)
aggdata <- t(aggregate.data.frame(t(dds.counts.df), by=list(samp$Description), median))
colnames(aggdata) <- aggdata[1,]
aggdata <- aggdata[-1,]
res = results(diagdds)
res.df <- as.data.frame(res)
nice <- cbind(res.df, as.data.frame(tax_table(ps)[rownames(res.df),]), as.data.frame(aggdata)[rownames(res.df),])
return(nice)
}
Des.Phylo <- function(ps, Taxa){
ps <- taxa_level(ps, Taxa)
diagdds <- phyloseq_to_deseq2(ps, ~ Description)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(diagdds), 1, gm_mean)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
diagdds = DESeq(diagdds, fitType="local")
samp <-sample_data(ps)
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.data.frame(dds.counts)
aggdata <- t(aggregate.data.frame(t(dds.counts.df), by=list(samp$Description), median))
colnames(aggdata) <- aggdata[1,]
aggdata <- aggdata[-1,]
res = results(diagdds)
res.df <- as.data.frame(res)
nice <- cbind(res.df, as.data.frame(tax_table(ps)[rownames(res.df),]), as.data.frame(aggdata)[rownames(res.df),])
return(nice)
}
ps.1903 <- prune_taxa(taxa_sums(ps.1903) > 0, ps.1903)
diagddsraw = phyloseq_to_deseq2(ps.1903, ~ Description)
iagdds = estimateSizeFactors(diagddsraw, type="poscounts")
GPdds = estimateDispersions(iagdds, fitType = "local")
otu_table(ps.1903.varstab) <- otu_table(t(getVarianceStabilizedData(GPdds)), taxa_are_rows = FALSE)
ps.1903.mod <- prune_taxa(taxa_sums(ps.1903) > 10, ps.1903)
diagdds <- phyloseq_to_deseq2(ps.1903.mod, ~ Description)
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(diagdds), 1, gm_mean)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
pst <- varianceStabilizingTransformation(diagdds, fitType="mean")
pst.dimmed <- t(as.matrix(assay(pst)))
pst.dimmed[pst.dimmed < 0.0] <- 0.0
ps.varstab.mod <- ps.1903.mod
otu_table(ps.varstab.mod) <- otu_table(pst.dimmed, taxa_are_rows = FALSE)
ordination.b <- ordinate(ps.varstab.mod, "PCoA", "bray")
p <- plot_ordination(ps.varstab.mod, ordination.b, type="sample", color="Al", shape="Inoculation", title="PCoA - Bray", axes = c(1,2) ) + theme_bw() + theme(text = element_text(size = 14)) + geom_point(size = 3)
p + stat_ellipse( type="norm", alpha=0.7)
diagdds <- phyloseq_to_deseq2(ps.1903, ~ Description)
diagdds = estimateSizeFactors(diagdds, geoMeans = geoMeans)
pst <- varianceStabilizingTransformation(diagdds, fitType="mean")
pst.dimmed <- t(as.matrix(assay(pst)))
pst.dimmed[pst.dimmed < 0.0] <- 0.0
ps.varstab.mod <- ps.1903.mod
otu_table(ps.varstab.mod) <- otu_table(pst.dimmed, taxa_are_rows = FALSE)
Des.Al <- function(ps){
diagdds = phyloseq_to_deseq2(ps, ~ Description)
diagdds = estimateSizeFactors(diagdds, type="poscounts")
diagdds = estimateDispersions(diagdds, fitType = "local")
diagdds = DESeq(diagdds)
samp <-sample_data(ps)
dds.counts <- diagdds@assays@.xData$data$counts
dds.counts.df <- as.data.frame(dds.counts)
aggdata <- t(aggregate.data.frame(t(dds.counts.df), by=list(samp$Description), median))
colnames(aggdata) <- aggdata[1,]
aggdata <- aggdata[-1,]
res = results(diagdds)
res.df <- as.data.frame(res)
nice <- cbind(res.df,as.data.frame(tax_table(ps)[rownames(res.df),]), as.data.frame(aggdata)[rownames(res.df),])
return(nice)
}
diagdds = phyloseq_to_deseq2(ps, ~ Description)
diagdds = estimateSizeFactors(diagdds, type="poscounts")
diagdds = estimateDispersions(diagdds, fitType = "local")
pst <- varianceStabilizingTransformation(diagdds)
pst.dimmed <- t(as.matrix(assay(pst)))
pst.dimmed[pst.dimmed < 0.0] <- 0.0
ps.varstab <- ps
otu_table(ps.varstab) <- otu_table(pst.dimmed, taxa_are_rows = FALSE)
|
0568877202a475cf15aeccfe9f8f05cab6a6dbcf
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/GLMcat/R/TravelChoiceDoc.R
|
5fd5a61569e9617ddc804b181652f381b0b7a17f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,322
|
r
|
TravelChoiceDoc.R
|
#' Travel Mode Choice
#'
#' The data set contains 210 observations on mode choice for travel between Sydney and Melbourne, Australia.
#'
#' @docType data
#' @usage data(TravelChoice)
#'
#' @format{A dataframe containing :
#' \describe{
#' \item{indv}{Id of the individual}
#' \item{mode}{available options: air, train, bus or car}
#' \item{choice}{a logical vector indicating as TRUE the transportation mode chosen by the traveler}
#' As category-specific variables:
#' \item{invt}{travel time in vehicle}
#' \item{gc}{generalized cost measure}
#' \item{ttme}{terminal waiting time for plane, train and bus; 0 for car}
#' \item{invc}{in vehicle cost}
#' As case-specific variables:
#' \item{hinc}{household income}
#' \item{psize}{traveling group size in mode chosen}
#' }
#' }
#'
#' @keywords datasets
#'
#' @references
#' Greene, W.H. and D. Hensher (1997) \emph{Multinomial logit and discrete choice models} \emph{in}
#' Greene, W. H. (1997) \emph{LIMDEP version 7.0 user's manual revised}, Plainview, New York econometric software, Inc .
#' @source{
#' Download from on-line (18/09/2020) complements to Greene, W.H. (2011) Econometric Analysis, Prentice Hall, 7th Edition \url{http://people.stern.nyu.edu/wgreene/Text/Edition7/TableF18-2.csv}, Table F18-2.
#' }
#'
#' @examples
#' data(TravelChoice)
"TravelChoice"
|
0ec6dac42292d6ad2e8c9e5846c03bd521387d1f
|
c6b6c3c5188499033ffe39c737fe0e7c5e390a83
|
/plot.r
|
6b25da5811d1e55afe7bf2469caa226dcb0fdd23
|
[] |
no_license
|
beckymak/quandl
|
1bef499e5602843da65d24ce0580a1ea58932fd8
|
3702df4aeb8e0ec5d80e51c7ac95b178fb1eb635
|
refs/heads/master
| 2020-05-18T13:11:13.382773
| 2015-04-16T09:31:35
| 2015-04-16T09:31:35
| 33,979,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
r
|
plot.r
|
## Base plot
library(Quandl)
sdate = "1990-01-01"
edate = "2015-03-31"
hsi = Quandl("YAHOO/INDEX_HSI", type = "zoo", start_date = sdate, end_date = edate)
sp = Quandl("YAHOO/INDEX_GSPC", type = "zoo", start_date = sdate, end_date = edate)
data = merge(hsi,sp)
head(data)
par(mfrow=c(2,1))
plot(data$Close.hsi, type="l", main="HSI")
plot(data$Close.sp, type="l", main="S&P")
#---
## quantmod chartSeries
library(Quandl)
library(quantmod)
sdate = "2014-01-01"
edate = "2015-03-31"
hsi = Quandl("YAHOO/INDEX_HSI", type = "xts", start_date = sdate, end_date = edate)
sp = Quandl("YAHOO/INDEX_GSPC", type = "xts", start_date = sdate, end_date = edate)
sci = Quandl("YAHOO/INDEX_SSEC", type = "xts", start_date = sdate, end_date = edate)
chartSeries(hsi)
chartSeries(sp)
chartSeries(sci)
#---
## quantmode loop assignment
library(Quandl)
library(quantmod)
sdate = "2014-01-01"
edate = "2015-03-31"
index = c("HSI", "GSPC", "SSEC")
qcode = paste("YAHOO/INDEX_",index,sep="")
data = list()
for(i in 1:length(index)){
data[[i]] = Quandl(qcode[i], type = "xts", start_date = sdate, end_date = edate)
}
# data = lapply(qcode, Quandl, type = "xts", start_date = sdate, end_date = edate) #not tested
lapply(data, chartSeries) # all volume on one
|
17337aa94a237f464412ef280dda63894d15f12e
|
130b5eee29ef3b2007226ff02e1dbc0f415b989b
|
/bin/pid_pergene_ncRNA_mRNA_combo.R
|
ce3f34586de66588e272ae9fcc6407a73186f552
|
[] |
no_license
|
srm146/twilight_zone
|
7031842f5a9ddf626eda972735f50b8e16181275
|
09fa49ec11efb60920f678b0cd74127a08888d22
|
refs/heads/master
| 2022-01-19T05:13:48.067384
| 2019-05-08T01:57:06
| 2019-05-08T01:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,408
|
r
|
pid_pergene_ncRNA_mRNA_combo.R
|
#opening and storing all the required files
setwd("/media/stephmcgimpsey/GardnerLab-backup1/Refseq/Sequences/hmmalign_output")
#do the protein stuff - single & combined + totals
#.1alipidnuc.counts
#all_mcRNA_pid.counts
mRNAtot<-read.csv("all_mRNA_pid.counts",header=FALSE, sep='')
#####ALL THE SINGLES NEXT
setwd("/media/stephmcgimpsey/GardnerLab-backup1/Refseq/Sequences/cmalign_output")
#do the ncRNA stuff - single and combined + totals
#.1alipid.counts
#all_ncRNA_pid.counts
ncRNAtot<-read.csv("all_ncRNA_pid.counts",header=FALSE, sep='')
###ALL THE SINGLES NEXT
setwd("/media/stephmcgimpsey/GardnerLab-backup1/Refseq/Sequences")
#do the overall combo stuff - stacked mRNA/ncRNA, per gene and totals
RNAtot<-read.csv("all_mRNA_ncRNA_pid.counts",header=FALSE, sep='')
pdf(file="~/Documents/pid_breakdown_ncRNA_mRNA_combo_september.pdf")
par(mfrow=c(3,1))
#-----------------------------------------------__________------------------------------_____________________-------__________________-----------------#
##MRNA total STUFF
plot(mRNAtot$V2,mRNAtot$V1, main = "mRNA", xlab="PID %", ylab="Number of pairs per PID",pch=20,cex=0.5)
totalmRNApairs=sum(mRNAtot$V1)
totalcomboPIDmRNA=sum(mRNAtot$V1*mRNAtot$V2)
meanPIDmRNA=totalcomboPIDmRNA/totalmRNApairs
comboPIDmRNA<-mRNAtot$V1*mRNAtot$V2
modeIndexmRNA<-which.max(comboPIDmRNA)
modePIDmRNA<-mRNAtot$V2[modeIndexmRNA]
abline(v=meanPIDmRNA, col="blue")
abline(v=modePIDmRNA, col="green")
#-----------------------------------------------__________------------------------------_____________________-------__________________-----------------#
#ncRNA total stuff
plot(ncRNAtot$V2,ncRNAtot$V1, main = "ncRNA", xlab="PID %", ylab="Number of pairs per PID",pch=20,cex=0.5)
totalncRNApairs=sum(ncRNAtot$V1)
totalcomboPIDncRNA=sum(ncRNAtot$V1*ncRNAtot$V2)
meanPIDncRNA=totalcomboPIDncRNA/totalncRNApairs
comboPIDncRNA<-ncRNAtot$V1*ncRNAtot$V2
modeIndexncRNA<-which.max(comboPIDncRNA)
modePIDncRNA<-ncRNAtot$V2[modeIndexncRNA]
abline(v=meanPIDncRNA, col="blue")
abline(v=modePIDncRNA, col="green")
#-----------------------------------------------__________------------------------------_____________________-------__________________-----------------#
#Combo stuff
plot(RNAtot$V2,RNAtot$V1, main = "RNA", xlab="PID %", ylab="Number of pairs per PID",pch=20,cex=0.5)
totalRNApairs=sum(RNAtot$V1)
totalcomboPIDRNA=sum(RNAtot$V1*RNAtot$V2)
meanPIDRNA=totalcomboPIDRNA/totalRNApairs
comboPIDRNA<-RNAtot$V1*RNAtot$V2
modeIndexRNA<-which.max(comboPIDRNA)
modePIDRNA<-RNAtot$V2[modeIndexRNA]
abline(v=meanPIDRNA, col="blue")
abline(v=modePIDRNA, col="green")
#dev.off()
par(mfrow=c(1,1))
RNAtotrounded<-data.frame(round(RNAtot$V2,digits =0),RNAtot$V1)
colnames(RNAtotrounded)<-c("PID","Freq")
RNApidnames<-unique(RNAtotrounded$PID)
RNAaggregate<-aggregate(RNAtotrounded, by=list(RNAtotrounded$PID),FUN=sum)
RNAfinalbins<-data.frame(RNApidnames, RNAaggregate$Freq)
colnames(RNAfinalbins)<-c("PID","Freq")
barplot(RNAfinalbins$Freq, names.arg=RNAfinalbins$PID, main = "Summed mRNA & ncRNA", col="purple")
RNAmin<-min(RNAfinalbins$Freq)
##ncRNA
ncRNAtotrounded<-data.frame(round(ncRNAtot$V2,digits =0),ncRNAtot$V1)
colnames(ncRNAtotrounded)<-c("PID","Freq")
ncRNApidnames<-unique(ncRNAtotrounded$PID)
ncRNAaggregate<-aggregate(ncRNAtotrounded, by=list(ncRNAtotrounded$PID),FUN=sum)
ncRNAfinalbins<-data.frame(ncRNApidnames, ncRNAaggregate$Freq)
colnames(ncRNAfinalbins)<-c("PID","Freq")
barplot(ncRNAfinalbins$Freq, names.arg=ncRNAfinalbins$PID,main = "ncRNA", col="red")
ncRNAmin<-min(ncRNAfinalbins$Freq)
#mRNA
mRNAtotrounded<-data.frame(round(mRNAtot$V2,digits =0),mRNAtot$V1)
colnames(mRNAtotrounded)<-c("PID","Freq")
mRNApidnames<-unique(mRNAtotrounded$PID)
mRNAaggregate<-aggregate(mRNAtotrounded, by=list(mRNAtotrounded$PID),FUN=sum)
mRNAfinalbins<-data.frame(mRNApidnames, mRNAaggregate$Freq)
colnames(mRNAfinalbins)<-c("PID","Freq")
barplot(mRNAfinalbins$Freq, names.arg=mRNAfinalbins$PID, main ="mRNA", col="blue")
mRNAmin<-min(mRNAfinalbins$Freq)
#stacked ncRNA & mRNA
mergedncRNAmRNA<-merge(mRNAfinalbins,ncRNAfinalbins,by="PID",all=TRUE)
mergedncRNAmRNA[is.na(mergedncRNAmRNA)] <- 0
merg1<-data.frame(mergedncRNAmRNA$Freq.x,mergedncRNAmRNA$Freq.y)
merg2<-t(merg1)
par(las=1)
barplot(as.matrix(merg2), main="Combo mRNA & ncRNA",names.arg=mergedncRNAmRNA$PID, cex.names=0.5,xlab="PID",col=c("blue","red"),legend=c("mRNA","ncRNA"))
dev.off()
|
23a7fb6d587ab102a3d3d726eabb0a7af7e79879
|
31d0a769a6c7c205eabb299e1cc67caa7e863bfd
|
/R/NCRNWater_Park_Class_def.R
|
13b1a1c472814a4cd2c71cc0ffcb9259740422ac
|
[] |
no_license
|
NCRN/NCRNWater
|
832ec864dd244d351a61fba71bbd51dfb7125ef2
|
87a16069713e2ea188d8bb8a2ae0cab97a43af4f
|
refs/heads/master
| 2023-05-24T20:46:33.926355
| 2023-05-02T15:26:13
| 2023-05-02T15:26:13
| 53,692,951
| 5
| 8
| null | 2023-05-02T15:26:15
| 2016-03-11T19:53:41
|
R
|
UTF-8
|
R
| false
| false
| 1,233
|
r
|
NCRNWater_Park_Class_def.R
|
#' @title S4 Class Definition for Park object in NCRNWater
#'
#' @description An S4 class that contains the data from water monitoring from a single park. Data on sites will be stored as one or more S4 objects in each park object
#' @slot ParkCode A short code to designate the park, typically an NPS 4 letter code. Stored as a length 1 character vector.
#' @slot ShortName A short name for the park. Stored as a length 1 character vector.
#' @slot LongName A long, formal name for the park. Stored as a length 1 character vector.
#' @slot Network The code for the Inventory & Monitoring network the park belongs to. Stored as a length 1 character vector.
#' @slot Sites A list of \code{Site} objects associated with the park.
#'
#' @exportClass Park
setClass(Class="Park",
slots=c(
ParkCode="character",
ShortName="character",
LongName="character",
Network="character",
Sites="list"
),
prototype=list(ParkCode=character(),
ShortName=character(),
LongName=character(),
Network=character(),
Sites=list()
)
)
|
4d154d263095a076cf2a3c4a62750bc6bff8ccc9
|
bbc835ef62629e1e0df3c4df1d4984bddc622b6b
|
/R/mj_mode.R
|
7aa2b271f23d07fb63590777a30376043e800ba4
|
[] |
no_license
|
mjmarin/mjtools
|
643eb9a8a3d9b273a9f8b98e461404116082f94e
|
69a7a1855b68bf84d70d2bb761fcd5973893ad48
|
refs/heads/master
| 2021-07-23T03:52:23.119629
| 2017-11-04T10:09:56
| 2017-11-04T10:09:56
| 109,036,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
r
|
mj_mode.R
|
#' @title Mode of a numeric set
#' @description
#' \code{mj_mode} computes the statistical mode of a set of numbers
#' @param V Vector of numbers
#' @return Most repeated sample
#'
#' @author Manuel J. Marin-Jimenez
#'
#' @examples
#' mj_mode(c(1,2,2,3,1,2))
#' @export
mj_mode <- function(V)
{
uV <- sort(unique(V));
Ht <- tabulate(match(V, uV));
idxMax <- which.max(Ht);
valm <- uV[idxMax];
return (valm);
}
|
2ec6dbb96819b2b61cfe730df7a799d085ec10e8
|
7dfc42a2633f4734244b025546bd0df1207d005f
|
/R/qck_raw_data.R
|
b2e90f23b742c0d2e89c35a024c5850f7689619e
|
[] |
no_license
|
rmylonas/Prots4Prots
|
1ccf29431badeffb74bfb74d5b9eee57cf34f211
|
88f144908b2229fd455ca8c797943fe20c163d9d
|
refs/heads/master
| 2021-01-21T08:50:33.181495
| 2014-12-10T17:06:27
| 2014-12-10T17:06:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,593
|
r
|
qck_raw_data.R
|
#' @name QualityCheck
#' @title Quality check
#' @description
#' Quality check of raw data.
#'
#' @keywords Quality_check
#'
#' @section Introduction:
#' Data to be treated need to be checked for integrity.
#'
NULL
#' @title Report QC on raw data
#'
#' @description Report QC on raw data
#'
#' @details
#' Report QC on raw data.
#' TODO Use ExpressionSet object as input
#'
#' @param dataset Dataset to check.
#' @param outputFolderTemp Temporary folder to store data generated at this
#' step of the analysis.
#' @param outputFile Report of current analysis step will be appended to
#' this Rmd file.
#' @param distMethod Distance method, for the heatmap.
#' @export
reportQualityCheckRawData <- function(dataset, outputFolderTemp, outputFile,
distMethod="euclidean") {
execLabel <- paste(
c(format(Sys.time(), "%Y%m%d%H%M%S"), trunc(
runif(1) * 10000)),
collapse='')
tempOutput <- paste(c(outputFolderTemp, '/report_heatmap_Rmd_',
execLabel,'.txt'), collapse='')
write.table(dataset, tempOutput, sep="\t")
# Generate report
cat('',
'Quality check',
'---------------------------------------------------------------------',
'',
'',
paste(
c('Checking quality of the samples by computing distances (',
distMethod,
') between them, and applying a hierarchical clustering.'),
collapse=''),
'',
paste(
c('```{r reportQualityCheckRawData', execLabel,
', echo=FALSE, fig.width=8, fig.height=8}'),
collapse=''),
'library(latticeExtra)',
'library(ggplot2)',
'library(vsn)',
'library(knitr)',
'',
'',
sep = "\n", file=outputFile, append=TRUE)
cat('displayHeatmap <- ', file=outputFile, append=TRUE)
# print(displayHeatmap)
cat(paste(deparse(displayHeatmap), collapse="\n"),
file=outputFile, append=TRUE)
cat(' ',
paste(
c('matrixdata <- as.matrix(read.table("',
tempOutput,
'", stringsAsFactors=FALSE))'),
collapse=''),
paste(
c('displayHeatmap(matrixdata, distMethod="',
distMethod, '")'),
collapse=''),
'```',
'',
'',
'---------------------------------------------------------------------',
'',
sep = "\n", file=outputFile, append=TRUE)
return(dataset)
}
|
84b18a3a0b322efe859214969b025669bd91b7ef
|
911d4a1591a8b90514ccbe8fba37b3717761d38a
|
/R/clustering_quality.R
|
c9555e13952649d20e02cd2899e320ec6602aec4
|
[
"MIT"
] |
permissive
|
Nowosad/motifplus
|
f461dc72fee0f2d06ec5a48b3ec41ed6611e0bc8
|
ad68f47266dd067aad12c09618f4638ce6fad41e
|
refs/heads/master
| 2023-01-14T06:44:06.788452
| 2020-11-21T10:38:07
| 2020-11-21T10:38:07
| 314,783,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,861
|
r
|
clustering_quality.R
|
#' Measures a degree of mutual dissimilarity between all objects in a cluster
#'
#' @param my_k - a cluster number (from the k column)
#' @param df - a tibble with the k column and the signature column
#' @param sample_size - size of the sample (~maxhist)
#'
#' @export
k_homogeneity = function(my_k, df, sample_size){
df_one_k = df[df$k == my_k, ]
sum_dist = 0
n_elem = 0
max_nums = sample(1:nrow(df_one_k), sample_size)
for (i in max_nums){
for (j in max_nums){
tmp_dist = philentropy::jensen_shannon(df_one_k$signature[[i]],
df_one_k$signature[[j]],
testNA = FALSE,
unit = "log2")
sum_dist = sum_dist + tmp_dist
n_elem = n_elem + 1
}
}
n_elem = n_elem - length(max_nums)
avg_dist = sum_dist / n_elem
return(avg_dist)
}
#' It is an average distance between the focus cluster and all of the rest of the clusters
#'
#' @param my_k - a cluster number (from the k column)
#' @param df - a tibble with the k column and the signature column
#' @param sample_size - size of the sample (~maxhist)
#'
#' @export
k_interdistance = function(my_k, df, sample_size){
df_one_k = df[df$k == my_k, ]
sum_dist = 0
n_elem = 0
for (kk in setdiff(unique(df$k), my_k)){
df_two_k = df[df$k == kk, ]
max_nums1 = sample(1:nrow(df_one_k), sample_size)
max_nums2 = sample(1:nrow(df_two_k), sample_size)
for (i in max_nums1){
for (j in max_nums2){
tmp_dist = philentropy::jensen_shannon(df_one_k$signature[[i]],
df_two_k$signature[[j]],
testNA = FALSE,
unit = "log2")
sum_dist = sum_dist + tmp_dist
n_elem = n_elem + 1
}
}
}
avg_dist = sum_dist / n_elem
return(avg_dist)
}
|
0a425ed069bbdafb07f8f321b897274cef6d54d2
|
6572dad13e8786a255496e0567a9cfd74715deed
|
/R/DONKI_Notifications.R
|
a266fc16d1bb222222070cf2576e2bf754d6ca84
|
[] |
no_license
|
Liu-Zhichao/nasaR
|
f6e21d75feb91ed6a4c994d3d47cb3cf8abb5494
|
3a992b4a097e0fddea616cb45b40f39741a8c531
|
refs/heads/master
| 2020-09-29T06:27:41.054893
| 2019-12-12T20:51:48
| 2019-12-12T20:51:48
| 226,975,444
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
DONKI_Notifications.R
|
#' DONKI_Notifications: Space Weather Database Of Notifications, Knowledge, Information - Notifications
#'
#' Get access to the data of Notifications.
#'
#' @param key String. Your NASA API key, you can enter your key in the function parameter, but it's not recommended. Instead, you'd better save your key in R environment and call it "NASA_TOKEN". Then the function would automatically acquire your key info.
#' @param start_date Date. Starting UTC date for Notifications search. 7 days prior to current UTC date as default. The request date range is limit to 30 days. If the request range is greater than 30 days, it would limit your request range to (end_date-30) to end_date.
#' @param end_date Date. Ending UTC date for Notifications search. Current UTC date as default.
#' @param type String. "all" as default (choices: "all", "FLR", "SEP", "CME", "IPS", "MPC", "GST", "RBE", "report")
#' @return Data of Notifications.
#' @examples
#' DONKI_Notifications(start_date = as.Date("2019-01-01"), end_date = as.Date("2019-03-01"))
#' @export
DONKI_Notifications <- function(key = Sys.getenv("NASA_TOKEN"), start_date = end_date - 7, end_date = lubridate::today(tzone = "UTC"), type = "all"){
library(tidyr)
library(httr)
time_diff <- as.numeric(end_date - start_date)
if (time_diff > 30){
start_date <- end_date - 30
}
response <- "https://api.nasa.gov/DONKI/notifications?" %>%
paste(., "startDate=", start_date, "&endDate=", end_date, "&type=", type, "&api_key=", key, sep = "") %>%
GET(.)
if (response$status_code != 200){
message("Unsuccessful status of response!")
}
result <- content(response)
return(result)
}
|
842c1d7eecc548d783426b84121831387b17e0d2
|
87a751ca2b329ce662781107720f519f4a9dd714
|
/man/mixture_pls.Rd
|
dd188a21b2405a4851ed3a101f4581871da32ac7
|
[
"MIT"
] |
permissive
|
ck37/tlmixture
|
74171f01ba49806a5509606241e3f45662079c56
|
3ebf16cec099018daec4ac7dce7860c998e6a7d4
|
refs/heads/master
| 2023-04-11T03:03:07.311529
| 2021-04-22T20:54:58
| 2021-04-22T20:54:58
| 162,472,693
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 530
|
rd
|
mixture_pls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixture-pls.R
\name{mixture_pls}
\alias{mixture_pls}
\title{Create exposure weights using partial least squares}
\usage{
mixture_pls(
data,
outcome,
exposures,
exposure_groups,
quantiles,
verbose = FALSE
)
}
\arguments{
\item{data}{tbd}
\item{outcome}{outcome column name}
\item{exposures}{tbd}
\item{exposure_groups}{tbd}
\item{quantiles}{tbd}
\item{verbose}{tbd}
}
\description{
Create exposure weights using partial least squares
}
|
48e996fbe5919468670f5bd6beb96895d60b779e
|
81def1155e21fd385b896ba4625a3a0f7ab4f37b
|
/R/tip_eg.R
|
1b7c3d91f78c4f5495845619ad7853de382c3951
|
[
"MIT"
] |
permissive
|
caboulton/asdetect
|
5b7ff8dccea7881ec1908133878fa76ab6610ba3
|
ac808828b5e38b7be98106ace26fa724b2dec522
|
refs/heads/master
| 2020-05-07T08:51:42.018565
| 2019-04-09T12:01:23
| 2019-04-09T12:01:23
| 180,349,926
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
tip_eg.R
|
tip_eg <- function(tend=1000, tiploc=900, s=0.1, dt=0.5) {
len <- tend/dt
t <- rep(NA, len)
t[1] <- 0
for (i in 2:len) {
t[i] <- t[i-1] + dt
}
mu <- 2*sqrt(3)/9
m <- t*mu/tiploc
x <- rep(NA, len)
x[1] <- -1
for (i in 2:len) {
x[i] <- x[i-1] + dt*(-x[i-1]^3+x[i-1]+m[i]) + sqrt(dt)*rnorm(1, sd=s)
}
result <- data.frame(t=t,x=x)
return(result)
}
|
53662f685b19ecdd7977b4b927e7566e6dc74a52
|
a7f773270dfeb0e566e103d75c22556ddba88b1c
|
/CS112 Draft.R
|
c8d366aa58e2c2eb8638a71ffa1d8877670d3719
|
[] |
no_license
|
anggunberlian/cs112
|
2230595e05d61617263f06ab928d60fd156d4365
|
1b6cf93c544540be3298527549a62ddb4c3ac1fa
|
refs/heads/master
| 2020-07-29T16:44:02.826725
| 2019-11-24T21:18:05
| 2019-11-24T21:18:05
| 209,887,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,221
|
r
|
CS112 Draft.R
|
setwd("/Users/anggunberlian/Desktop/CS112")
### Multilateral Development Institution Data
foo <- read.csv("https://tinyurl.com/yb4phxx8") # read in the data
# column names
names(foo)
# dimensions of the data set
dim(foo)
# quick look at the data structure
head(foo)
# take note of the columns representing calendar dates
date.columns <- c(11, 12, 14, 15, 16, 17, 18, 25)
for (date in date.columns) {
indices_where_empty <- which(foo[date] == "")
foo[indices_where_empty, date] <- NA
foo[[date]] <- as.Date(foo[[date]], format="%Y-%m-%d")
}
date <- 12
indices_where_empty <- which(foo[date] == "")
foo[indices_where_empty, date] <- "NA"
foo[date] == ""
which(foo[date] == "")
foo[date]
number_of_calls_per_day <- c(5, 2, 90, 45, 67, 90, 69, 2, 4, 5, 30)
larger_twenty <- number_of_calls_per_day[which(number_of_calls_per_day > 20)]
larger_twenty
less_twenty <- number_of_calls_per_day[-which(number_of_calls_per_day > 20)]
less_twenty
less_twenty2 <- number_of_calls_per_day[which(number_of_calls_per_day <= 20)]
less_twenty2
number_of_calls_per_day
head(foo)
foo[1, 12]
class(foo[1, 12])
class(foo[1, 22])
class(foo$AgreementDate)
date.columns
head(foo, 10)
# alright, hb this
foo[11, 12, 14, 15, 16, 17, 18, 25]
foo[11:12, 14:18, 25]
foo[date.columns]
na_assigned <- foo[date.columns], na.strings=c("", "", "NA")
na_assigned <- foo[date.columns, na.strings=date.columns("", "", "NA")]
sum(is.na(foo$CirculationDate[indices_2009]))
# boolean -- integer TRUE == 1, FALSE == 0
CD_correct_dates <- indices_2009[-which(foo$CirculationDate < 2009-01-01)]
CD_correct_dates
foo[4043, ]
y <- as.numeric((new_foo$RevisedCompletionDate - new_foo$ApprovalDate) / 30) # the delays in month
x <- new_foo$CirculationDate
plot(x, y, xlab = "Circulation date (year)", ylab = "Average delay (months)")
# Question 5 with all the training set
model_1_all <- glm(treat ~ . - re78, data = foo.train_set, family = binomial)
model_2_all <- glm(treat ~ age + education + hispanic + re75 - re78, data = foo.train_set, family = binomial)
model_3_all <- glm(treat ~ age + education + hispanic + married - re78, data = foo.train_set, family = binomial)
model_4_all <- glm(treat ~ age + education + black + re74 + re75 - re78, data = foo.train_set, family = binomial)
model_5_all <- glm(treat ~ age + education + black + married - re78, data = foo.train_set, family = binomial)
cv.err_1_all <- cv.glm(foo.train_set, model_1_all)
cv.err_2_all <- cv.glm(foo.train_set, model_2_all)
cv.err_3_all <- cv.glm(foo.train_set, model_3_all)
cv.err_4_all <- cv.glm(foo.train_set, model_4_all)
cv.err_5_all <- cv.glm(foo.train_set, model_5_all)
# Test set error for the train set
cv.err_1_all$delta
cv.err_2_all$delta
cv.err_3_all$delta
cv.err_4_all$delta
cv.err_5_all$delta
# Test set error for the test set
mean((foo.test_set$treat - predict(model_1_all, foo.test_set, type = "response"))^2)
mean((foo.test_set$treat - predict(model_2_all, foo.test_set, type = "response"))^2)
mean((foo.test_set$treat - predict(model_3_all, foo.test_set, type = "response"))^2)
mean((foo.test_set$treat - predict(model_4_all, foo.test_set, type = "response"))^2)
mean((foo.test_set$treat - predict(model_5_all, foo.test_set, type = "response"))^2)
|
313bf6efc16d234ca19a368f88ccfa3c6edabda3
|
13457e168e5628a931e3dd3ab696a865e05327e5
|
/R/RandomSkewers.R
|
6bebe04eaf1ef910fd64727f26a12b0853d495cc
|
[
"MIT"
] |
permissive
|
aivuk/Morphometrics
|
3c74f652295796384b08becdca82452d074013b1
|
4371a964cf3dd52573560abded1e0f0861c2bf30
|
refs/heads/master
| 2021-04-12T08:59:55.965063
| 2015-02-15T03:06:57
| 2015-02-15T03:06:57
| 30,614,868
| 0
| 0
| null | 2015-02-10T21:14:12
| 2015-02-10T21:14:12
| null |
UTF-8
|
R
| false
| false
| 3,824
|
r
|
RandomSkewers.R
|
#' Compare matrices via RandomSkewers
#'
#' Calculates covariance matrix correlation via random skewers
#'
#' @param cov.x Single covariance matrix or list of covariance matrices.
#' If single matrix is suplied, it is compared to cov.y.
#' If list is suplied and no cov.y is suplied, all matrices
#' are compared.
#' If cov.y is suplied, all matrices in list are compared to it.
#' @param cov.y First argument is compared to cov.y.
#' Optional if cov.x is a list.
#' @param num.vectors Number of random vectors used in comparison.
#' @param repeat.vector Vector of repeatabilities for correlation correction.
#' @param num.cores If list is passed, number of threads to use in computation. The doMC library must be loaded.
#' @param ... aditional arguments passed to other methods.
#' @return
#' If cov.x and cov.y are passed, returns average value
#' of response vectors correlation ('correlation'), significance ('probability') and standard deviation
#' of response vectors correlation ('correlation_sd')
#'
#' If cov.x and cov.y are passed, same as above, but for all matrices in cov.x.
#'
#' If only a list is passed to cov.x, a matrix of RandomSkewers average
#' values and probabilities of all comparisons.
#' If repeat.vector is passed, comparison matrix is corrected above
#' diagonal and repeatabilities returned in diagonal.
#' @export
#' @rdname RandomSkewers
#' @references Cheverud, J. M., and Marroig, G. (2007). Comparing covariance matrices:
#' Random skewers method compared to the common principal components model.
#' Genetics and Molecular Biology, 30, 461-469.
#' @author Diogo Melo, Guilherme Garcia
#' @seealso \code{\link{KrzCor}},\code{\link{MantelCor}}
#' @examples
#' c1 <- RandomMatrix(10)
#' c2 <- RandomMatrix(10)
#' c3 <- RandomMatrix(10)
#' RandomSkewers(c1, c2)
#'
#' RandomSkewers(list(c1, c2, c3))
#'
#' reps <- unlist(lapply(list(c1, c2, c3), MonteCarloRep, sample.size = 10,
#' RandomSkewers, num.vectors = 100,
#' iterations = 10))
#' RandomSkewers(list(c1, c2, c3), repeat.vector = reps)
#'
#' c4 <- RandomMatrix(10)
#' RandomSkewers(list(c1, c2, c3), c4)
#'
#' #Multiple threads can be used with doMC library
#' library(doMC)
#' RandomSkewers(list(c1, c2, c3), num.cores = 2)
#'
#' @keywords matrixcomparison
#' @keywords matrixcorrelation
#' @keywords randomskewers
RandomSkewers <- function(cov.x, cov.y, ...) UseMethod("RandomSkewers")
#' @rdname RandomSkewers
#' @export
RandomSkewers.default <- function (cov.x, cov.y, num.vectors = 1000, ...) {
traits <- dim (cov.x) [1]
base.vector <- Normalize(rnorm(traits))
random.vectors <- array (rnorm (num.vectors * traits, mean = 0, sd = 1), c(traits, num.vectors))
random.vectors <- apply (random.vectors, 2, Normalize)
dist <- base.vector %*% random.vectors
dz1 <- apply (cov.x %*% random.vectors, 2, Normalize)
dz2 <- apply (cov.y %*% random.vectors, 2, Normalize)
real <- apply (dz1 * dz2, 2, sum)
ac <- mean (real)
stdev <- sd (real)
prob <- sum (ac < dist) / num.vectors
output <- c(ac, prob, stdev)
names(output) <- c("correlation","probability","correlation_sd")
return(output)
}
#' @rdname RandomSkewers
#' @method RandomSkewers list
#' @export
RandomSkewers.list <- function (cov.x, cov.y = NULL, num.vectors = 1000, repeat.vector = NULL, num.cores = 1, ...)
{
if (is.null (cov.y)) {
output <- ComparisonMap(cov.x,
function(x, y) RandomSkewers(x, y, num.vectors),
repeat.vector = repeat.vector,
num.cores = num.cores)
} else{
output <- SingleComparisonMap(cov.x, cov.y,
function(x, y) RandomSkewers(x, y, num.vectors),
num.cores = num.cores)
}
return(output)
}
|
05d53783a8dba52785a03490716704b9b38e739f
|
3eefcbaa7faaff48f1335a3a3e4dc56e114c1ab0
|
/TFs_by_TADs_signifTADs_v2_permutG2t.R
|
2e8cea2d9c1f5f273f0160b51156cbf48bc56325
|
[] |
no_license
|
marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA
|
9a435c08a9064d127a86d9909042bb4ff59ad82d
|
e33a0683ac7a9afe21cfec06320c82251d3ba0d5
|
refs/heads/master
| 2021-06-16T15:57:30.182879
| 2021-05-18T08:36:44
| 2021-05-18T08:36:44
| 202,159,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,357
|
r
|
TFs_by_TADs_signifTADs_v2_permutG2t.R
|
startTime <- Sys.time()
cat(paste0("... start - ", startTime, "\n"))
require(foreach)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
require(doMC)
registerDoMC(40)
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R crisp
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R c3.mir
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R c3.tft
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R c3.all
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R trrust
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R tftg
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R motifmap
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R kegg
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R chea3_all
# Rscript TFs_by_TADs_signifTADs_v2_permutG2t.R chea3_lung
#
plotCex <- 1.4
plotType <- "svg"
myHeight <- ifelse(plotType == "png", 400, 7)
myWidth <- ifelse(plotType == "png", 500, 8)
plotCex <- 1.4
nTop <- 10
fontFamily <- "Hershey"
require(ggsci)
top_col <- pal_d3()(2)[1]
last_col <- pal_d3()(2)[2]
# yarrr::transparent("grey", trans.val = .6)
mid_col <- "#BEBEBE66"
x_qt_val <- 0.2
y_qt_val <- 0.95
dsIn <- "crisp"
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 1 | length(args) == 3)
dsIn <- args[1]
if(length(args) == 3) {
all_hicds <- args[2]
all_exprds <- args[3]
} else {
all_hicds <- list.files("PIPELINE/OUTPUT_FOLDER")
all_hicds <- all_hicds[!grepl("_RANDOM", all_hicds)]
all_hicds <- all_hicds[!grepl("_PERMUT", all_hicds)]
all_exprds <- sapply(all_hicds, function(x) list.files(file.path("PIPELINE/OUTPUT_FOLDER", x)))
}
stopifnot(dsIn %in% c("crisp", "c3.mir", "c3.all", "c3.tft", "trrust", "tftg", "motifmap", "kegg", "chea3_all", "chea3_lung"))
nPermut <- 1000
outFolder <- file.path(paste0("TFS_BY_TADS_SIGNIFTADS_V2_PERMUTG2T1000", nPermut, "_", toupper(dsIn)))
dir.create(outFolder, recursive = TRUE)
buildData <- TRUE
setDir <- "/media/electron"
setDir <- ""
entrezDT_file <- paste0(setDir, "/mnt/ed4/marie/entrez2synonym/entrez/ENTREZ_POS/gff_entrez_position_GRCh37p13_nodup.txt")
gff_dt <- read.delim(entrezDT_file, header = TRUE, stringsAsFactors = FALSE)
gff_dt$entrezID <- as.character(gff_dt$entrezID)
stopifnot(!duplicated(gff_dt$entrezID))
stopifnot(!duplicated(gff_dt$symbol))
entrez2symb <- setNames(gff_dt$symbol, gff_dt$entrezID)
symb2entrez <- setNames(gff_dt$entrezID, gff_dt$symbol)
if(buildData){
permutG2t_nRegFeat_dt <- foreach(hicds = all_hicds, .combine='rbind') %do%{
cat(paste0("> START - ", hicds,"\n"))
if(dsIn == "crisp") {
reg_file <- file.path("gene_set_library_crisp_processed.txt")
reg_dt <- read.delim(reg_file, sep="\t", header=TRUE, stringsAsFactors = FALSE)
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt <- reg_dt[reg_dt$targetSymbol %in% names(symb2entrez),]
cat(paste0("with Entrez: nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt$targetEntrezID <- symb2entrez[reg_dt$targetSymbol]
reg_dt$targetEntrezID <- as.character(reg_dt$targetEntrezID)
} else if(dsIn == "chea3_all") {
reg_file <- file.path("chea3_all_tissues_TFs_processed.txt")
reg_dt <- read.delim(reg_file, sep="\t", header=TRUE, stringsAsFactors = FALSE)
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt <- reg_dt[reg_dt$targetSymbol %in% names(symb2entrez),]
cat(paste0("with Entrez: nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt$targetEntrezID <- symb2entrez[reg_dt$targetSymbol]
reg_dt$targetEntrezID <- as.character(reg_dt$targetEntrezID)
} else if(dsIn == "chea3_lung") {
reg_file <- file.path("chea3_lung_TFs_processed.txt")
reg_dt <- read.delim(reg_file, sep="\t", header=TRUE, stringsAsFactors = FALSE)
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt <- reg_dt[reg_dt$targetSymbol %in% names(symb2entrez),]
cat(paste0("with Entrez: nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt$targetEntrezID <- symb2entrez[reg_dt$targetSymbol]
reg_dt$targetEntrezID <- as.character(reg_dt$targetEntrezID)
} else if(dsIn == "trrust"){
reg_file <- file.path("trrust_rawdata.human.tsv")
reg_dt <- read.delim(reg_file, sep="\t", header=FALSE, stringsAsFactors = FALSE,
col.names = c("regSymbol", "targetSymbol", "direction", "ID"))
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt <- reg_dt[reg_dt$targetSymbol %in% names(symb2entrez),]
cat(paste0("with Entrez: nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt$targetEntrezID <- symb2entrez[reg_dt$targetSymbol]
reg_dt$targetEntrezID <- as.character(reg_dt$targetEntrezID)
} else if(dsIn == "tftg") {
reg_file <- file.path("tftg_db_all_processed.txt")
reg_dt <- read.delim(reg_file, sep="\t", header=TRUE, stringsAsFactors = FALSE,
col.names=c("regSymbol", "targetEntrezID"))
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
} else if(dsIn == "motifmap"){
reg_file <- file.path("MOTIFMAP_ALLGENES/overlapDT_bp.Rdata")
reg_dt <- get(load(reg_file))
colnames(reg_dt)[colnames(reg_dt)=="entrezID"] <- "targetEntrezID"
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
} else if(dsIn == "kegg"){
reg_file <- file.path("hsa_kegg_entrez.txt")
reg_dt <- read.delim(reg_file, sep="\t", header=FALSE, stringsAsFactors = FALSE,
col.names = c("targetEntrezID", "regSymbol"))
reg_dt$targetEntrezID <- gsub("hsa:", "",reg_dt$targetEntrezID )
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
}else {
reg_file <- file.path(paste0(dsIn, ".v7.0.entrez_processed.txt"))
reg_dt <- read.delim(reg_file, sep="\t", header=TRUE, stringsAsFactors = FALSE,
col.names=c("regSymbol", "targetEntrezID"))
cat(paste0("init nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
}
hicds_reg_dt <- reg_dt
rm("reg_dt")
exprds = all_exprds[[paste0(hicds)]][1]
exprds_dt <- foreach(exprds = all_exprds[[paste0(hicds)]], .combine='rbind') %do% {
if(dsIn == "chea3_lung") {
if(! (grepl("lusc", exprds) | grepl("luad", exprds))) return(NULL)
}
cat(paste0("... load permut data ...\n"))
permut_dt <- get(load(file.path("PIPELINE", "OUTPUT_FOLDER", hicds, exprds, "5_runPermutationsMedian", "permutationsDT.Rdata") ))
cat(paste0("... loaded ...\n"))
stopifnot(ncol(permut_dt) >= nPermut)
permut_dt <- permut_dt[,1:nPermut]
permut_data <- foreach(i_permut = 1:ncol(permut_dt)) %dopar% {
g2t_dt <- data.frame(
entrezID = as.character(rownames(permut_dt)),
region = as.character(permut_dt[, i_permut]),
stringsAsFactors = FALSE
)
g2t_vect <- setNames(g2t_dt$region, g2t_dt$entrezID)
reg_dt <- hicds_reg_dt[hicds_reg_dt$targetEntrezID %in% g2t_dt$entrezID,]
cat(paste0("with g2t assignment: nrow(reg_dt)", "\t=\t", nrow(reg_dt), "\n"))
reg_dt$targetRegion <- g2t_vect[paste0(reg_dt$targetEntrezID)]
stopifnot(!is.na(reg_dt))
nbrReg_TADs_dt <- aggregate(regSymbol~targetRegion, data=reg_dt, function(x) length(unique(x)))
plotTit <- paste0(hicds, "\n", exprds)
geneList <- get(load(file.path("PIPELINE", "OUTPUT_FOLDER", hicds, exprds, "0_prepGeneData", "pipeline_geneList.Rdata") ))
# stopifnot(geneList %in% g2t_dt$entrezID) # not for permut
gByTAD <- g2t_dt[g2t_dt$entrezID %in% geneList,]
nGbyT <- setNames(as.numeric(table(g2t_dt$region)), names(table(g2t_dt$region)))
reg_dt <- reg_dt[reg_dt$targetEntrezID %in% geneList,] # update 08.01.20 -> NEED ALSO TO SUBSET THE REGULATED FEATURES !
# 1) # of genes in TAD
tad_nGenes_dt <- aggregate(entrezID ~ region, data=gByTAD, FUN=function(x) length(x))
colnames(tad_nGenes_dt)[colnames(tad_nGenes_dt) == "entrezID"] <- "nGenes"
stopifnot(tad_nGenes_dt$nGenes >= 3)
# 2) # of genes regulated within TAD
tad_nRegGenes_dt <- aggregate(targetEntrezID~targetRegion, data=reg_dt, FUN=function(x)length(unique(x)) )
colnames(tad_nRegGenes_dt)[colnames(tad_nRegGenes_dt) == "targetRegion"] <- "region"
colnames(tad_nRegGenes_dt)[colnames(tad_nRegGenes_dt) == "targetEntrezID"] <- "nRegGenes"
# 3) # of TFs within TAD
tad_nTFs_dt <- aggregate(regSymbol~targetRegion, data=reg_dt, FUN=function(x)length(unique(x)) )
colnames(tad_nTFs_dt)[colnames(tad_nTFs_dt) == "targetRegion"] <- "region"
colnames(tad_nTFs_dt)[colnames(tad_nTFs_dt) == "regSymbol"] <- "nTFs"
plot_dt <- merge(tad_nTFs_dt, merge(tad_nGenes_dt, tad_nRegGenes_dt,by="region"), by="region")
stopifnot(plot_dt$nRegGenes <= plot_dt$nGenes)
plot_dt$nTFs_byGenes <- plot_dt$nTFs/plot_dt$nGenes
plot_dt$nRegGenes_byGenes <- plot_dt$nRegGenes/plot_dt$nGenes
stopifnot(!duplicated(plot_dt$region))
plot_dt$hicds <- hicds
plot_dt$exprds <- exprds
permutG2t_plot_dt <- plot_dt
stopifnot(permutG2t_plot_dt$region %in% names(nGbyT))
permutG2t_plot_dt$nGenes <- nGbyT[paste0(permutG2t_plot_dt$region)]
stopifnot(!is.na(permutG2t_plot_dt$nGenes))
list(nGenes_permutG2t = permutG2t_plot_dt$nGenes,
nTFs_permutG2t = permutG2t_plot_dt$nTFs,
nRegGenes_permutG2t= permutG2t_plot_dt$nRegGenes,
nTFsOVERnGenes_permutG2t = permutG2t_plot_dt$nTFs,
nRegGenesOVERnGenes_permutG2t = permutG2t_plot_dt$nRegGenes)
} #end-foreach iterating over permut
data.frame(
hicds = hicds,
exprds = exprds,
mean_nTFs_permutG2t = mean(unlist(lapply(permut_data, function(x)x[["nTFs_permutG2t"]]))),
mean_nRegGenes_permutG2t = mean(unlist(lapply(permut_data, function(x)x[["nRegGenes_permutG2t"]]))),
mean_nTFsOVERnGenes_permutG2t = mean(unlist(lapply(permut_data, function(x)x[["nTFsOVERnGenes_permutG2t"]]))),
mean_nRegGenesOVERnGenes_permutG2t = mean(unlist(lapply(permut_data, function(x)x[["nRegGenesOVERnGenes_permutG2t"]]))),
mean_nGenes_permutG2t = mean(unlist(lapply(permut_data, function(x)x[["nGenes_permutG2t"]]))),
stringsAsFactors = FALSE
)
}# end-for iterating over exprds
exprds_dt
} # end-for iterating over hicds
outFile <- file.path(outFolder, "permutG2t_nRegFeat_dt.Rdata")
save(permutG2t_nRegFeat_dt, file = outFile, version=2)
cat(paste0("... written: ", outFile, "\n"))
} else {
outFile <- file.path(outFolder, "permutG2t_nRegFeat_dt.Rdata")
permutG2t_nRegFeat_dt <- get(load(outFile))
}
# load("TFS_BY_TADS_SIGNIFTADS_C3.TFT/permutG2t_nRegFeat_dt.Rdata")
outFile <- file.path(outFolder, paste0("permutG2t_nRegFeat_boxplot_allDS.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
par(mar=par()$mar+c(9,0,0,0))
boxplot(permutG2t_nRegFeat_dt[,!colnames(permutG2t_nRegFeat_dt) %in% c("hicds", "exprds")],
las=2,
main=paste0("all ds (n=", length(unique(file.path(permutG2t_nRegFeat_dt$hicds, permutG2t_nRegFeat_dt$exprds))),")"),
cex.main = plotCex, cex.lab = plotCex,
cex.axis=0.8)
mtext(side=3, text = paste0("permutG2t - ", dsIn))
cat(paste0("... written: ", outFile, "\n"))
# load("TFS_BY_TADS_SIGNIFTADS_C3.TFT/permutG2t_nRegFeat_dt.Rdata")
keepCols <- c("mean_nTFs_permutG2t", "mean_nGenes_permutG2t", "mean_nTFsOVERnGenes_permutG2t")
outFile <- file.path(outFolder, paste0("permutG2t_nRegFeat_boxplot_allDS_keepCols.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
par(mar=par()$mar+c(9,0,0,0))
boxplot(permutG2t_nRegFeat_dt[, keepCols], las=2,
main=paste0("all ds (n=", length(unique(file.path(permutG2t_nRegFeat_dt$hicds, permutG2t_nRegFeat_dt$exprds))),")"),
cex.main = plotCex, cex.lab = plotCex,
cex.axis=0.8)
mtext(side=3, text = paste0("permutG2t - ", dsIn))
cat(paste0("... written: ", outFile, "\n"))
#####################################################################
cat("*** DONE\n")
cat(paste0("... end - ", Sys.time(), "\n"))
|
3a4e6d901463336a2b3f81daaa18e3470a4e124d
|
9e98d6c50c13b4f8baa09de949cf6092bb27ec9f
|
/钢铁/钢铁报表数据调整.R
|
d5567699409c33d348baa93f36b9dc4beaf8d26a
|
[] |
no_license
|
DanieljcFan/genial-flow-model
|
620e8d048a12db3ed6a6c5a1bbbe22a34efbbbf3
|
6d464c6c5ed50fb44cbeb101e4598b040d85dc69
|
refs/heads/master
| 2021-01-19T23:20:54.933071
| 2017-08-31T09:27:50
| 2017-08-31T09:27:50
| 101,262,811
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 636
|
r
|
钢铁报表数据调整.R
|
setwd("I:/work/genial-flow/钢铁/")
load("gt.Rdata")
steel <- gt
rm(gt)
# 鞍钢时间格式调整
index <- which(!is.na(as.numeric(steel$report_period)))
tmp <- steel$report_period[index]
tmp <- as.POSIXct(as.numeric(tmp)*86400, origin = "1970-01-01")
steel$report_period <- as.POSIXlt(steel$report_period, format = '%Y-%m-%d')
steel$report_period[index] <- tmp
#调整列名
en <- which(1:length(names(steel)) %in% grep('[A-Za-z]',names(steel)))
name <- names(steel)[-en]
index <- regexpr('[\u4e00-\u9fa5]+$' , name)
names(steel)[-en] <- substring(name, index, index + attr(index, 'match.length'))
save(steel, file = 'steel.rda')
|
936f7d61818c790781b52efdaee017f445b54462
|
351e15860fd8e96fbaaeb8f0d2f89b824aa4e92f
|
/man/print.summary.CausalMBSTS.Rd
|
f871f7bc61b5a6f3b7a340e70ce635814e62c41c
|
[] |
no_license
|
cran/CausalMBSTS
|
d4e31399764d2a9558e41618f278d1cae9bf07a1
|
6a5dc21f046cac4cad4668fd4cca7a1415d141a5
|
refs/heads/master
| 2023-08-11T07:38:24.172797
| 2021-10-05T22:10:09
| 2021-10-05T22:10:09
| 310,509,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 796
|
rd
|
print.summary.CausalMBSTS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_CausalMBSTS.R
\name{print.summary.CausalMBSTS}
\alias{print.summary.CausalMBSTS}
\title{Format and print the estimated causal effect(s), credible interval(s), and Bayesian p-value(s) into a clear output.}
\usage{
\method{print}{summary.CausalMBSTS}(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{An object of class 'summary.CausalMBSTS', a result of a call to \code{\link{summary.CausalMBSTS}}.}
\item{digits}{Number of digits to display.}
\item{...}{Additional arguments passed to other methods.}
}
\value{
Invisibly, \code{x}.
}
\description{
Format and print the estimated causal effect(s), credible interval(s), and Bayesian p-value(s) into a clear output.
}
|
7a9d9dc1a19229eb4b2960a224cb3cd44e043b29
|
24c11d5a15303b7698383b7127522b716e2df13d
|
/lib/Clustering.R
|
63ce9d58f52ec96e0ad2ed3f3c050905cd1401d7
|
[] |
no_license
|
TZstatsADS/Fall2016-proj4-Stunnaisland
|
5ca86834c5a978b36755949aad9489b40373566d
|
def2778cf13b1f5735e9d6635d115faf3908d92d
|
refs/heads/master
| 2020-12-24T11:37:17.843874
| 2016-11-18T19:16:33
| 2016-11-18T19:16:33
| 73,025,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
Clustering.R
|
install.packages("cluster")
library(cluster)
setwd("C:/Users/Zachary/Desktop/ADS - Proj.4")
load("C:/Users/Zachary/Desktop/ADS - Proj.4/lyr.RData")
common_id = read.table("common_id.txt")
View(common_id)
msm_train = read.table("mxm_dataset_train.txt",header=F, sep=",")
#reading the bag of words into R
head(lyr)
View(lyr)
lyr_2 = na.omit(lyr)
scale(lyr_2)
k_means = kmeans(lyr_2,3)
summary(k_means)
#get cluster means
aggregate(lyr_2,by=list(fit$cluster),FUN=mean)
lyr_3 = data.frame(lyr_2, fit$cluster)
########
##Hierarchical Algothrims
distance = dist(lyr_2, method = "euclidean") # distance matrix
fit = hclust(distance, method="ward")
plot(fit) # display dendogram
groups <- cutree(fit, k=5) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(fit, k=5, border="red")
|
1bdfdab61e28dd9b074cd92ba4536ad86d5f020c
|
88b3ebd348a9361660b16a55ab2dfb260597523b
|
/2018-19_CCSB_LoadsData_WY2010-2018/Sites/Inlet_11452600/1_wy2016_2017/1_rloadest/1_wwMeHg/wwMeHg_Inlet_RScript.R
|
691eb681dd56680c0b56e776879ad1ec26218337
|
[] |
no_license
|
joed7751/CCSB_Loads
|
623a8cea5a79cb9aa06f5d7a7d73c342915d48cf
|
553a33f59d98a25bd4276c9bcd49b427829d966c
|
refs/heads/master
| 2021-07-01T02:14:32.939595
| 2020-09-22T15:44:26
| 2020-09-22T15:44:26
| 154,237,927
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,610
|
r
|
wwMeHg_Inlet_RScript.R
|
#wwMeHg Inlet script for retrieving loads models stats (the stats are used to select the best model for this constituent at this site) and loads predictions.
library(akima)
library(dataRetrieval)
library(digest)
library(leaps)
library(lubridate)
library(memoise)
library(rloadest)
library(smwrBase)
library(smwrData)
library(smwrGraphs)
library(smwrQW)
library(smwrStats)
library(boot)
library(KernSmooth)
library(lattice)
wwMeHg_Inlet<-importRDB("wwMeHg_InletR.txt")
InletQ<-importRDB("InletQR.txt")
#These data frames are created by the function importRDB.
#The calls above bring the constituent data and the daily flow data into the script.
wwMeHg_Inletm1 <- loadReg(wwMeHg ~model(1), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm1
wwMeHg_Inletm2 <- loadReg(wwMeHg ~model(2), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm2
wwMeHg_Inletm3 <- loadReg(wwMeHg ~model(3), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm3
wwMeHg_Inletm4 <- loadReg(wwMeHg ~model(4), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm4
wwMeHg_Inletm5 <- loadReg(wwMeHg ~model(5), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm5
wwMeHg_Inletm6 <- loadReg(wwMeHg ~model(6), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm6
wwMeHg_Inletm7 <- loadReg(wwMeHg ~model(7), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm7
wwMeHg_Inletm8 <- loadReg(wwMeHg ~model(8), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm8
wwMeHg_Inletm9 <- loadReg(wwMeHg ~model(9), data = wwMeHg_Inlet, flow="Flow", dates = "Dates" ,conc.units="ng/L" , station = "CCSB-Yolo")
wwMeHg_Inletm9
#These objects of class "loadReg" are created.
#A list in R allows you to gather a variety of objects under one name (that is, the name of the list) in an ordered way.
#These objects can be matrices, vectors, data frames, even other lists, etc. It is not even required that these objects are related to each other in any way.
#When the models are run (m1-m9), the output will be in the console. These are the stats used to select the best model.
#print(wwMeHg_Inletm1,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm2,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm3,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm4,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm5,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm6,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm7,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm8,brief = FALSE, load.only = FALSE)
#print(wwMeHg_Inletm9,brief = FALSE, load.only = FALSE)
#Commenting these out. These provide some explanations of the data in a longer form. Brief results are printed to console (wwMeHg_Inletm1-9)
plot(wwMeHg_Inletm7,ann=FALSE)
title(main = "11452600_wwMeHg Response vs Fitted Values",xlab = "Fitted Values",ylab = "Response Values")
plot(wwMeHg_Inletm7,which = 2,set.up = F)
title(main = "11452600_wwMeHg Residuals vs Fitted Values")
plot(wwMeHg_Inletm7,which = 3,set.up = F)
title(main = "11452600_wwMeHg Assessing Heteroscedasticity") #Add "of Residuals"?
plot(wwMeHg_Inletm7,which = 4,set.up = F)
title(main = "11452600_wwMeHg Correlogram of Samples")
plot(wwMeHg_Inletm7,which = 5,set.up = F)
title(main="11452600_wwMeHg Normal Discharge")
plot(wwMeHg_Inletm7,which = 6,set.up = F)
title(main="11452600_wwMeHg Box Plot of Loads")
#These functions plot the data using the chosen best model and add a title and labels to the plot.
wwMeHg_Inlet_load<-predLoad(wwMeHg_Inletm7,InletQ,load.units="kg",by="water year",allow.incomplete = TRUE,conf.int = 0.95,print = TRUE)
write.csv(wwMeHg_Inlet_load,"1_Inlet_wwMeHg_m7_Flux_Annual.csv")
wwMeHg_Inlet_load_day<-predLoad(wwMeHg_Inletm7, InletQ,load.units = "kg",by="day",allow.incomplete = TRUE,conf.int = 0.90,print = TRUE)
write.csv(wwMeHg_Inlet_load_day,"1_Inlet_wwMeHg_m7_Flux_Daily.csv")
#Lines 75 and 77 create data frames that use the function predLoad.
#Description of predLoad: Estimate loads from a rating-curve model from loadReg for a new data frame, aggregating the loads by specified time periods.
#Lines 76 and 78 write the data frames to a .csv file.
#file.choose() lets the user select the location for the .csv files.
|
b79b2a1e6f6622bdcfb2912d03929f246c37be31
|
094f4ae4791b0a655733b816febc4acdb766b423
|
/airjobs/server.R
|
6f350fc0f1bc1408af2002ab0f31859eb748d216
|
[] |
no_license
|
bayeshack2016/airjobs
|
ff55a5c2acabc626362fe9c7e5549ee97bf230b4
|
0a89bcdefb03ef9c9aed10fa438c93c446deeeb3
|
refs/heads/master
| 2020-12-31T04:56:29.686335
| 2016-04-24T17:36:53
| 2016-04-24T17:36:53
| 56,982,206
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,351
|
r
|
server.R
|
require(shiny)
require(dplyr)
require(scales)
all_df <- read.csv("../reshaped_data/database.csv")
switch_importance <- function(importance){
switch(importance,
"Choose Importance" = "Choose Importance",
"A Little Important" = 1,
"Somewhat Important" = 2,
"Very Important" = 3)
}
switch_education <- function(education_string){
switch(education_string,
"Choose Highest Education Level" = 13
,"Less than a High School Diploma" = 1
,"High School Diploma (or GED or High School Equivalence Certificate)" = 2
,"Post-Secondary Certificate" = 3
,"Some College Courses" = 4
,"Associate's Degree (or other 2-year degree)" = 5
,"Bachelor's Degree" = 6
,"Post-Baccalaureate Certificate" = 7
,"Master's Degree" = 8
,"Post-Master's Certificate" = 9
,"First Professional Degree" = 10
,"Doctoral Degree" = 11
,"Post-Doctoral Training" = 12)
}
switch_education_back <- function(education_num){
switch(education_num,
"1" = "Less than a High School Diploma"
,"2" = "High School Diploma"
,"3" = "Post-Secondary Certificate"
,"4" = "Some College Courses"
,"5" = "Associate's Degree (or other 2-year degree)"
,"6" = "Bachelor's Degree"
,"7" = "Post-Baccalaureate Certificate"
,"8" = "Master's Degree"
,"9" = "Post-Master's Certificate"
,"10" = "First Professional Degree"
,"11" = "Doctoral Degree"
,"12" = "Post-Doctoral Training")
}
# Define a server for the Shiny app
shinyServer(function(input, output) {
output$var_table <- renderDataTable({
},
options = list(pagingType = "simple", searching = FALSE, paging = FALSE, searchable = FALSE)
)
output$table <- renderDataTable({
job_output <- get_jobs(
database_df = all_df
, skill_col_1 = input$skill_col_1
, skill_col_2 = input$skill_col_2
, skill_col_3 = input$skill_col_3
, skill_col_4 = input$skill_col_4
, skill_col_5 = input$skill_col_5
, skill_weight_1 = switch_importance(input$skill_weight_1)
, skill_weight_2 = switch_importance(input$skill_weight_2)
, skill_weight_3 = switch_importance(input$skill_weight_3)
, skill_weight_4 = switch_importance(input$skill_weight_4)
, skill_weight_5 = switch_importance(input$skill_weight_5)
, interest_1 = input$interest_col_1
, interest_2 = input$interest_col_2
, interest_3 = input$interest_col_3
, interest_weight_1 = switch_importance(input$interest_weight_1)
, interest_weight_2 = switch_importance(input$interest_weight_2)
, interest_weight_3 = switch_importance(input$interest_weight_3)
, knowledge_1 = input$knowledge_col_1
, knowledge_2 = input$knowledge_col_2
, knowledge_3 = input$knowledge_col_3
, knowledge_4 = input$knowledge_col_4
, knowledge_5 = input$knowledge_col_5
, knowledge_weight_1 = switch_importance(input$knowledge_weight_1)
, knowledge_weight_2 = switch_importance(input$knowledge_weight_2)
, knowledge_weight_3 = switch_importance(input$knowledge_weight_3)
, knowledge_weight_4 = switch_importance(input$knowledge_weight_4)
, knowledge_weight_5 = switch_importance(input$knowledge_weight_5)
, state_1 = input$state_1
, state_2 = input$state_2
, state_3 = input$state_3
, education_level = switch_education(input$education_level)
, max_wage = input$max_wage
, min_wage = input$min_wage
)
filter_output <- filter_jobs(job_output,
state_1 = input$state_1,
state_2 = input$state_2,
state_3 = input$state_3,
min_salary_input = as.numeric(input$min_wage),
max_salary_input = as.numeric(input$max_wage),
education_level = switch_education(input$education_level)
)
},
options = list(pagingType = "simple",
searching = FALSE, paging = FALSE, searchable = FALSE,
order = list(list(2, 'desc'), list(4, 'asc'))
)
)
})
get_jobs <- function(
database_df = database_df
, skill_col_1 = "Choose"
, skill_col_2 = "Choose"
, skill_col_3 = "Choose"
, skill_col_4 = "Choose"
, skill_col_5 = "Choose"
, skill_weight_1 = "Choose"
, skill_weight_2 = "Choose"
, skill_weight_3 = "Choose"
, skill_weight_4 = "Choose"
, skill_weight_5 = "Choose"
, interest_1 = "Choose"
, interest_2 = "Choose"
, interest_3 = "Choose"
, interest_weight_1 = "Choose"
, interest_weight_2 = "Choose"
, interest_weight_3 = "Choose"
, knowledge_1 = "Choose"
, knowledge_2 = "Choose"
, knowledge_3 = "Choose"
, knowledge_4 = "Choose"
, knowledge_5 = "Choose"
, knowledge_weight_1 = "Choose"
, knowledge_weight_2 = "Choose"
, knowledge_weight_3 = "Choose"
, knowledge_weight_4 = "Choose"
, knowledge_weight_5 = "Choose"
, state_1 = "Choose"
, state_2 = "Choose"
, state_3 = "Choose"
, education_level = "Choose"
, max_wage = "Choose"
, min_wage = "Choose"
){
num_jobs <- dim(database_df)[1]
col_names <- c(skill_col_1,skill_col_2,skill_col_3,skill_col_4,skill_col_5)
col_names_nonna <- col_names[-grep("Choose",col_names)]
skills_col_index = c(10:45)[na.omit(match(col_names_nonna,substring(names(database_df)[10:45],first = 8)))]
skills_weights = as.numeric(c(skill_weight_1,skill_weight_2,skill_weight_3,skill_weight_4,skill_col_5)[-grep("Choose",col_names)])
col_names <- c(interest_1,interest_2,interest_3)
col_names_nonna <- col_names[-grep("Choose",col_names)]
interest_col_index = c(4:9)[na.omit(match(col_names_nonna,substring(names(database_df)[4:9],first=11)))]
interest_weights = as.numeric(c(interest_weight_1,interest_weight_2,interest_weight_3)[-grep("Choose",col_names)])
col_names <- c(knowledge_1,knowledge_2,knowledge_3,knowledge_4,knowledge_4,knowledge_5)
col_names_nonna <- col_names[-grep("Choose",col_names)]
knowledge_col_index = c(46:78)[na.omit(match(col_names_nonna,substring(names(database_df)[46:78],first = 11)))]
knowledge_weights = as.numeric(c(knowledge_weight_1,knowledge_weight_2,knowledge_weight_3, knowledge_weight_4, knowledge_weight_5)[-grep("Choose",col_names)])
col_names <- c(state_1, state_2, state_3)
col_names_nonna <- col_names[-grep("Choose",col_names)]
jobrank_col_index = c(82:132)[na.omit(match(col_names_nonna,substring(names(database_df)[82:132], first = 14)))]
geo_col_index = c(133:183)[na.omit(match(col_names_nonna,substring(names(database_df)[133:183],first = 8)))]
salary_col_index = c(184:235)[na.omit(match(col_names_nonna,substring(names(database_df)[184:235],first=8)))]
score1 <- rep(0,num_jobs)
if(length(skills_col_index>0)){
score1 <- score1 + as.matrix(database_df[,skills_col_index])%*%skills_weights
}
if(length(interest_col_index>0)){
score1 <- score1 + as.matrix(database_df[,interest_col_index])%*%interest_weights
}
if(length(knowledge_col_index>0)){
score1 <- score1 + as.matrix(database_df[,knowledge_col_index])%*%knowledge_weights
}
score1 <- score1/(max(score1)-min(score1))
score2 <- rep(0,num_jobs)
if(length(jobrank_col_index) >= 2){
score2 <- score2 + rowMeans(database_df[,jobrank_col_index])
} else if (length(jobrank_col_index)==1) {
score2 <- score2 + database_df[,jobrank_col_index]
}
output_df <- data.frame(database_df[,c("o_net_soc_code","title")]
, score = score1+score2
, database_df[,c("education_level_required","salary_us")]
, database_df[,c(salary_col_index
,geo_col_index
,skills_col_index
,interest_col_index
,knowledge_col_index
)])
return(output_df)
# returns df before filtering
}
filter_jobs <- function (df, state_1 = "NA",
state_2 = "NA", state_3 = "NA", min_salary_input = 0, max_salary_input = 1000000000,
education_level = 12){
df %<>%
filter(education_level_required <= education_level)
df_t <-
df %>%
select(matches('salary_')) %>%
select(matches(paste0('salary_', state_1)),
matches(paste0('salary_', state_2)),
matches(paste0('salary_', state_3)))
if (dim(df_t)[2] == 0) {
df_t$salary_min <- df$salary_us
df_t$salary_max <- df$salary_us
} else {
df_t$salary_min <- apply(df_t, 1, min)
df_t$salary_max <- apply(df_t, 1, max)
}
df_all <- cbind(df,salary_min = df_t$salary_min, salary_max = df_t$salary_max)
df_res <-
df_all %>%
filter(salary_min > min_salary_input & salary_max < max_salary_input) %>%
select(., -c(salary_min, salary_max))
save_colnames <- colnames(df_res)
# Round values
df_res <- cbind(df_res[,c(1,2,3,4)],round(df_res[,c(-1,-2,-3,-4)],2))
colnames(df_res) <- save_colnames
# Turn education level into text
df_res$education_level_required <- sapply(as.character(df_res$education_level_required), switch_education_back)
# Fix currency column display output, pull salary fields together
# for (i in 1:length(colnames(df_res))){
# if(grepl("salary_",colnames(df_res)[i])){
# df_res[,colnames(df_res)[i]] <- dollar(df_res[,colnames(df_res)[i]])
# }
# }
if(!is.na(df_res$score[1])){
df_res$score = round((df_res$score/max(df_res$score))*100,2)
}
return(df_res)
}
|
3875fcc51dc986f9ea1036938b9ada5808d87626
|
b84aaa22d48a8e42455817f9fd3becc6b8864b04
|
/ch_02/ex_01_sept_11.R
|
182e865260eb1e80981f8c71beb8a1c0b29f90f2
|
[
"MIT"
] |
permissive
|
bmoretz/Time-Series-Forecasting
|
87b545a6bf8d920d4d14868c2dc104a815a0490a
|
92ed29e42e9cb9ee6d6a5340412456b38ea6969c
|
refs/heads/master
| 2020-05-24T23:01:40.561097
| 2019-06-06T21:03:35
| 2019-06-06T21:03:35
| 187,505,956
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,740
|
r
|
ex_01_sept_11.R
|
library(data.table)
library(XLConnect)
library(forecast)
library(xts)
convert_col_types <- function(dt, cols, FUN) {
assertive::is_data.table(dt)
dt[, (cols) := lapply(.SD, FUN), .SDcols = cols][]
}
as.char = function(x, na.strings = c("NA", "NULL")) {
na = x %in% na.strings
x[na] = 0
x = as.character(x)
x[na] = NA_character_
x
}
as.num = function(x, na.strings = c("NA", "NULL")) {
na = x %in% na.strings
x[na] = 0
x = as.numeric(x)
x[na] = NA_real_
x
}
source.path <- "datasets/Sept11Travel.xls"
travel.wb <- loadWorkbook(source.path)
travel.data <- as.data.table(readWorksheet(travel.wb, "Sheet1"))
colnames(travel.data) <- c("Date", "Air", "Rail", "Auto")
travel.data$Date <- as.Date(travel.data$Date)
col.numeric <- c("Air", "Rail", "Auto")
travel.data <- convert_col_types(travel.data, col.numeric, as.numeric)
travel.data <- xts(travel.data, travel.data$Date)
rm(source.path)
rm(travel.wb)
head(travel.data)
min(travel.data$Date)
max(travel.data$Date)
# Travel Data
par(mfrow = c(3, 1))
airline.ts <- ts(travel.data$Air, start = c(1990, 1), end = c(2004, 1), frequency = 12)
airline.lm <- tslm(airline.ts ~ trend + I(trend ^ 2))
plot(airline.ts, xlab = "Time", ylab = "Airline Travel", bty = "l")
lines(airline.lm$fitted, lwd = 2)
rail.ts <- ts(travel.data$Rail, start = c(1990, 1), end = c(2004, 1), frequency = 12)
rail.lm <- tslm(rail.ts ~ trend + I(trend ^ 2))
plot(rail.ts, xlab = "Time", ylab = "Rail Travel", bty = "l")
lines(rail.lm$fitted, lwd = 2)
auto.ts <- ts(travel.data$Auto, start = c(1990, 1), end = c(2004, 1), frequency = 12)
auto.lm <- tslm(auto.ts ~ trend + I(trend ^ 2))
plot(auto.ts, xlab = "Time", ylab = "Auto Travel", bty = "l")
lines(auto.lm$fitted, lwd = 2)
# Yearly Avg. (ignore seasonality)
ep <- endpoints(travel.data, on = "years")
travel.yearly <- period.apply(travel.data, ep, mean)
par(mfrow = c(3, 1))
plot(travel.yearly$Air)
plot(travel.yearly$Rail)
plot(travel.yearly$Auto)
# Log scale
par(mfrow = c(3, 1))
airline.ts <- ts(travel.data$Air, start = c(1990, 1), end = c(2004, 1), frequency = 12)
airline.lm <- tslm(airline.ts ~ trend + I(trend ^ 2))
plot(airline.ts, xlab = "Time", ylab = "Airline Travel", bty = "l", log = "y")
lines(airline.lm$fitted, lwd = 2)
rail.ts <- ts(travel.data$Rail, start = c(1990, 1), end = c(2004, 1), frequency = 12)
rail.lm <- tslm(rail.ts ~ trend + I(trend ^ 2))
plot(rail.ts, xlab = "Time", ylab = "Rail Travel", bty = "l", log = "y")
lines(rail.lm$fitted, lwd = 2)
auto.ts <- ts(travel.data$Auto, start = c(1990, 1), end = c(2004, 1), frequency = 12)
auto.lm <- tslm(auto.ts ~ trend + I(trend ^ 2))
plot(auto.ts, xlab = "Time", ylab = "Auto Travel", bty = "l", log = "y")
lines(auto.lm$fitted, lwd = 2)
|
333957d68b58647d96b2041d7f2a93a76d71f289
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848614-test.R
|
cfd91854dbddc5555ea40069fc6196315c49abe6
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
1615848614-test.R
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(4.46390936931362e+256, -6.99993544070352e-281, -3.63875683405274e+101, 5.6464292943395e-141, NaN, 7.27044868124648e-308, 0, 0, 0, 0, 0, 0), temp = c(1.81037701089217e+87, 2.35219322332418e-312, 1.34680195206491e-20, 2.16562581831091e+161, -9.78089879828831e+20, -1.30547847812586e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 8.18547651796993e+51, 2.65180635871983e+59, 5.62050698887452e-104, 7.11278005790805e-305, 8.84662638409251e-160, 1.34680202022251e-20, 2.16562581831091e+161, -1.51345790188863e+21, 8.64563305661499e-217, 8.64562743173829e-217, 8.34238407686285e+270))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
e76931a11111a621147e3084a4d665da2810856e
|
6b35d0783e6d5dd59e89216218e8d1feb25d281b
|
/tools.R
|
55ae7b7d1d64a7270edcee876b459090db267daa
|
[] |
no_license
|
commfish/uw-fish559
|
c11a56e6085e0263b541b96620e5f169a4420e03
|
3fccd3547f97a59d5241a234aa1bc2869f8cd5eb
|
refs/heads/master
| 2020-04-02T07:56:43.880613
| 2018-11-13T22:53:43
| 2018-11-13T22:53:43
| 154,221,956
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,011
|
r
|
tools.R
|
# library(devtools)
# https://github.com/kaskr/TMB_contrib_R
# devtools::install_github("kaskr/TMB_contrib_R/TMBhelper")
library("TMBhelper")
# devtools::install_github("kaskr/TMB_contrib_R/TMBdebug")
library("TMBdebug")
# devtools::install_github("kaskr/TMB_contrib_R/TMBphase")
libraru("TMBphase")
### Find 'string' in 'file' and read vector from next line
readVec <- function(string, file){
txt <- readLines(file)
skip <- match(string, txt)
vec <- scan(file, quiet=TRUE, skip=skip, nlines=1)
return(vec)
}
### Find 'string' in 'file' and read matrix with 'nrow' rows from next line
readMat <- function(string, file, nrow){
txt <- readLines(file)
skip <- match(string, txt)
mat <- as.matrix(read.table(file, skip=skip, nrows=nrow))
dimnames(mat) <- NULL
return(mat)
}
## Function to read a basic AD Model Builder fit.
## Use for instance by:
## simple.fit <- readFit('c:/admb/examples/simple')
## Then the object 'simple.fit' is a list containing sub-objects
# 'names', 'est', 'std', 'cor', and 'cov' for all model
# parameters and sdreport quantities.
readFit <- function(file){
ret <- list()
parfile <- as.numeric(scan(paste(file,'.par', sep=''), what='', n=16, quiet=TRUE)[c(6,11,16)])
ret$nopar <- as.integer(parfile[1])
ret$nlogl <- parfile[2]
ret$maxgrad <- parfile[3]
file <- paste(file,'.cor', sep='')
lin <- readLines(file)
ret$npar <- length(lin)-2
ret$logDetHess <- as.numeric(strsplit(lin[1], '=')[[1]][2])
sublin <- lapply(strsplit(lin[1:ret$npar+2], ' '),function(x)x[x!=''])
ret$names <- unlist(lapply(sublin, function(x) x[2]))
ret$est <- as.numeric(unlist(lapply(sublin, function(x) x[3])))
ret$std <- as.numeric(unlist(lapply(sublin, function(x) x[4])))
ret$cor <- matrix(NA, ret$npar, ret$npar)
corvec <- unlist(sapply(1:length(sublin), function(i)sublin[[i]][5:(4+i)]))
ret$cor[upper.tri(ret$cor, diag=TRUE)] <- as.numeric(corvec)
ret$cor[lower.tri(ret$cor)] <- t(ret$cor)[lower.tri(ret$cor)]
ret$cov <- ret$cor*(ret$std%o%ret$std)
return(ret)
}
|
661ec23ea8f22eb842929d0448aa52a218d4a129
|
c74f2fc1a2dfd7b62ba1a8608c35411845aa1a07
|
/dlbcl_multiomics_code/trim_out_unlabelled_data.R
|
359f3fa201573a5c711d7177325a690f2756edd1
|
[] |
no_license
|
pattwm16/dlbcl-multiomics
|
6fb0b18d70d7f76429591f7f0092831ba0437ce0
|
f61e934cb931b1198452c9a2e84c4386913dca3f
|
refs/heads/master
| 2023-04-13T06:16:08.693854
| 2021-04-20T18:39:40
| 2021-04-20T18:39:40
| 281,142,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,136
|
r
|
trim_out_unlabelled_data.R
|
trim_out_unlabelled_data <- function(){
# Gene trimming ----
# collect the unlabelled genes
unlabelled_genes <- wk.gene[which(wk.gene[,2] == ""),]
# subset labelled genes
labelled_genes <- subset(wk.gene, !wk.gene$id %in% unlabelled_genes$id)
# Methylation Trimming ----
# load in annotations from package
library(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
# get the annotations and retrieve gene names
FullAnnot = getAnnotation(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
FullAnnot = FullAnnot[,c("Name","UCSC_RefGene_Name")]
# collect the unlabelled methyl sites
unlabelled_methylsites <- FullAnnot[which(FullAnnot[,2] == ""),]$Name
# Correction for 219 missing values from wk.methy
unlabelled_methylsites <- append(unlabelled_methylsites,
setdiff(row.names(wk.methy), FullAnnot$Name))
# subset labelled genes
labelled_methylsites <- subset(wk.methy, !rownames(wk.methy) %in%
unlabelled_methylsites)
return(list("Genes" = labelled_genes, "MethylSites" = labelled_methylsites))
}
|
d5b2d80639a346d82a71f82a99d99f8a66022afe
|
ec9b1ef2fe0fc9401411ecb93c8afb649a00a896
|
/man/get_history.Rd
|
f47041fff2c4d3394cb20c77b79e750df0a6c4f2
|
[] |
no_license
|
drew-walkerr/musichistoRy
|
6e0ce215e91f33d61335dd70a475da606e38d26e
|
155b3f50e9fae6a5648a031a922fb0114ab13937
|
refs/heads/main
| 2023-05-01T00:44:33.062564
| 2021-05-25T21:12:01
| 2021-05-25T21:12:01
| 319,133,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 366
|
rd
|
get_history.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_history.R
\name{get_history}
\alias{get_history}
\title{get_history}
\usage{
get_history(user)
}
\arguments{
\item{user}{Last.fm username}
}
\value{
A lastfm scrobble, or music listening history dataframe
}
\description{
This function pulls a last.fm user's music listening history
}
|
22d1de2c7ab0fffb0e584c71e5d5925b42c5a626
|
9d3a6ce50b4f5331c292a2b9fd8e54e3cf37e12a
|
/docs/docs/Programowanie/shiny/shiny3/server.R
|
5f4f7d650afbd67a419709c9f7634d149f9124ea
|
[] |
no_license
|
lukaszS21/Przewodnik
|
90a3c069b782c5ec0b8626ed3c1b7eb937db6b48
|
e10d978a1bee046c93af8b78e0b2593b06b11669
|
refs/heads/master
| 2022-03-24T19:06:15.092447
| 2020-01-12T21:06:04
| 2020-01-12T21:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
server.R
|
library(PogromcyDanych)
library(ggplot2)
shinyServer(function(input, output, session) {
tylkoWybranySerial <- reactive({
serialeIMDB[serialeIMDB$serial == input$wybranySerial, ]
})
output$listaOdcinkow <- renderUI({
serial <- tylkoWybranySerial()
selectInput("odcinki", "Odcinki w serialu", as.character(serial$nazwa) )
})
output$trend = renderPlot({
serial <- tylkoWybranySerial()
pl <- ggplot(serial, aes(id, ocena, size=glosow, color=sezon)) +
geom_point() + xlab("Numer odcinka")
if (input$liniaTrendu) {
pl <- pl + geom_smooth(se=FALSE, method="lm", size=3)
}
pl
})
output$model = renderPrint({
serial <- tylkoWybranySerial()
summary(lm(ocena~id, serial))
})
})
|
3a18cb251ceffb58503123b4b858316e4bae4892
|
be2e92a82d826011d74e3864c85a5b7ed295ac36
|
/plot4.R
|
3ffb591199073d3f6ae8bb9726f3f43cba4fe5bf
|
[] |
no_license
|
derwinmcgeary/Exploratory-Data-Analysis-Project-2
|
2550a87afefb9deb7eb1e5e0004c120cb86bca9c
|
be526dc2c90c939576d7fdcfefedb5da85bcb7ce
|
refs/heads/master
| 2016-09-06T13:26:49.026555
| 2015-07-26T01:55:21
| 2015-07-26T01:55:21
| 38,698,117
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
plot4.R
|
### Analysis starts on line 23
# I'm developing on Ubuntu, and I don't know what you're running, dear reader, so for compatibility...
if('downloader'%in%installed.packages()[,1]){
library("downloader")
} else {
install.packages("downloader")
library("downloader")
}
library(ggplot2)
library(stringi)
# We don't want to download 29MB every time! If you already have the file, you can rename it to
# "NEI_data.zip" and put it in the working directory
dataurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
zipfile <- "NEI_data.zip"
if(file.exists(zipfile)) { print("We already have the file") } else {
download(dataurl, zipfile ,mode="wb")
}
datafiles <- unzip(zipfile)
print(datafiles)
SCC <- readRDS(datafiles[1])
NEI <- readRDS(datafiles[2])
############ Analysis starts here ###################
## find all SCC values in SCC where Short.Name includes "Coal" (or "coal")
coalscc <- subset(SCC, stri_detect_regex(Short.Name,"Coal", case_insensitive=TRUE), select = c(SCC))
coalsccs <- coalscc$SCC
## subset NEI to include only the above SCC values, plus Emissions and Year
coalnei <- subset(NEI, SCC%in%coalsccs, select = c(Emissions,year))
## aggregate by year
coalnei_agg <- aggregate(coalnei$Emissions, by = list(Year = coalnei$year), FUN = sum)
colnames(coalnei_agg) <- c("Year", "Emissions")
## and plot
png(filename="plot4.png")
g <- qplot(Year, Emissions, data = coalnei_agg) + geom_smooth(method = "lm") + labs(title = "Coal Emissions in the USA")
print(g)
dev.off()
|
7bfd629751352aff14a5465d1dae89133a3b31ef
|
85f13d7969dfd03641a200efd19257947d5fe2a5
|
/man/tween_appear.Rd
|
a9d2a43b16cbcb04f70db985338da350562ba502
|
[] |
no_license
|
arturocm/tweenr
|
ae03a615d797f365cccb814f7e80909d80496e98
|
62ae8d9437c91c02a24e442ffc0705e9addf7473
|
refs/heads/master
| 2021-01-16T21:05:49.673910
| 2016-02-10T10:37:08
| 2016-02-10T10:37:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,345
|
rd
|
tween_appear.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tween_appear.R
\name{tween_appear}
\alias{tween_appear}
\title{Tween a data.frame of appearances}
\usage{
tween_appear(data, time, timerange, nframes)
}
\arguments{
\item{data}{A data.frame to tween}
\item{time}{The name of the column that holds the time dimension. This does
not need to hold time data in the strictest sence - any numerical type will
do}
\item{timerange}{The range of time to create the tween for. If missing it
will defaults to the range of the time column}
\item{nframes}{The number of frames to create for the tween. If missing it
will create a frame for each full unit in \code{timerange} (e.g.
\code{timerange = c(1, 10)} will give \code{nframes = 10})}
}
\value{
A data.frame as \code{data} but repeated \code{nframes} times and
with the additional columns \code{.age} and \code{.frame}
}
\description{
This function is intended for use when you have a data.frame of events at
different time points. This could be the appearance of an observation for
example. This function replicates your data \code{nframes} times and
calculates the duration of each frame. At each frame each row is
assigned an age based on the progression of frames and the entry point of in
time for that row. A negative age means that the row has not appeared yet.
}
|
0c23dfb331ab8435e6ce42c2380888a03e2c61bb
|
b77c8a88b4f3172268531f5425374ae40db3fd6e
|
/docs/krigagem-universal.R
|
160ce1d238efaa518e1cb8de3f9cdfe4576131c7
|
[] |
no_license
|
AndrMenezes/si2016
|
a155ad7fbbf26bef73f9d57645a451a235db0ef6
|
7f3903cba0e2e89d73893f6055d8e3ee786d449b
|
refs/heads/master
| 2020-04-16T17:02:31.487751
| 2019-01-15T01:25:02
| 2019-01-15T01:25:02
| 165,756,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
r
|
krigagem-universal.R
|
library(gstat)
data("meuse.all");data("meuse.grid")
coordinates(meuse.all) <- ~ x+y
coordinates(meuse.grid) <- ~ x+y
vi <- variogram(lead.i ~ x + y, location = meuse.all, cutoff = 1300)
vimf <- fit.variogram(vi, model = vgm(c("Exp", "Sph", "Gau")))
preditos_kgu <- krige(lead ~ x + y, loc=meuse.all, newdata=meuse.grid, model=vimf)
head(preditos_kgu)
|
9d6aedac8cf13832d3795a3020faeeaaa18f88b5
|
c746b5f40c118fb4f41a2d7cb88024738476d40f
|
/Model_Application/Single_Dataset/Huber_Lasso.R
|
6dc202a0e0df39b73861e93d242b0dc31fa60d34
|
[] |
no_license
|
multach87/Dissertation
|
5548375dac9059d5d582a3775adf83b5bc6c0be7
|
d20b4c6d3087fd878a1af9bc6e8543d2b94925df
|
refs/heads/master
| 2023-06-25T20:09:25.902225
| 2021-07-23T18:51:07
| 2021-07-23T18:51:07
| 281,465,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,753
|
r
|
Huber_Lasso.R
|
#libraries
library(glmnet)
#load data
#data.full <- readRDS()
#full.data <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/")
debug.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/debug_data_091720.RData")
#load data
single.data <- debug.data[[10]]
X <- single.data[["X"]]
Y <- single.data[["Y"]]
#winsorized function
winsorized<- function(x,a=1.5,sigma=1) {
s<-sigma
newx<-x
indp<-x>(a*s)
newx[indp]<-(a*s)
indn<- x<(a*-s)
newx[indn]<- (-a*s)
return(newx)}
#Huber lasso function
H.lasso<- function(X,Y,lambda.lasso.try,k=1.5){
n<-length(Y)
Y.orgn<- Y
model.for.cv<- cv.glmnet(X, Y, family="gaussian",lambda=lambda.lasso.try)
lambda.lasso.opt<- model.for.cv$lambda.min
model.est<- glmnet(X,Y,family="gaussian",lambda=lambda.lasso.opt)
fit.lasso<- predict(model.est,X,s=lambda.lasso.opt)
res.lasso<- Y-fit.lasso
sigma.init<- mad(Y-fit.lasso)
beta.pre<- as.numeric(model.est$beta)
Y.old<- Y
tol = 10
n.iter <- 0
while(tol>1e-4 & n.iter<100)
{
Y.new<- fit.lasso + winsorized(res.lasso,a=k, sigma=sigma.init)
model.for.cv<- cv.glmnet(X,Y.new, family="gaussian",lambda=lambda.lasso.try)
model.est<- glmnet(X,Y.new,family="gaussian",lambda=model.for.cv$lambda.min )
fit.lasso<- predict(model.est,X,s=model.for.cv$lambda.min)
res.lasso<- Y.new-fit.lasso
beta.post <- as.numeric(model.est$beta)
tol<- sum((beta.pre-beta.post)^2)
n.iter<- n.iter+1
beta.pre<- beta.post
}
sigma.est<- mean(Y.new- (X%*%beta.post)^2)
Y.fit<- X%*%beta.post
Y.res<- Y.new - Y.fit
#store number of nonzero coefs
st.lad <- sum(beta.post) # number nonzero
#generate MSE and sd(MSE) for model
mse.lad <- sum((Y - Y.fit) ^ 2) / (n - st.lad - 1)
sd.mse.lad <- sd((Y - Y.fit) ^ 2 / (n - st.lad - 1))
#store lambda
lambda.lasso.opt = model.est$lambda
object<- list(coefficient = beta.post ,
fit = Y.fit ,
iter = n.iter ,
sigma.est = sigma.est ,
mpe = mse.lad ,
mpe.sd = sd.mse.lad ,
lambda.opt = lambda.lasso.opt)
}
#run Huber lasso
set.seed(501)
lambda.lasso.try <- exp(seq(log(0.01) , log(1400) , length.out = 100))
Huber.model <- H.lasso(X = X , Y = Y , lambda.lasso.try = lambda.lasso.try)
Huber.model
|
90ad134961817b60fec5b9c72fa7b2f1b463f001
|
5afcc3b02b7f4fe14e90f33b0a42bfc51b278e19
|
/ml-cv/data_mining/R/clast_selfdata.R
|
22c4e3f672e1a41e1b956da2124701c8cbfd4c79
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
zaqwes8811/micro-apps
|
c9e51fa7931c9d5625e1517bad7b1593104a50c0
|
bb1643562751dda70ae4f8bd632a171f1de05df5
|
refs/heads/master
| 2023-04-27T21:04:09.457192
| 2023-04-25T08:41:23
| 2023-04-25T08:41:23
| 11,820,285
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 210
|
r
|
clast_selfdata.R
|
shop<- read.table("lab3_selfdata.csv", header=T, sep=";")
shop
plot(shop)
distance=array(0,c(5,5))
for(i in 1:5) { for (j in 1:5){distance[i,j]=abs(shop$v1[i]-shop$v1[j])+abs(shop$v2[i]-shop$v2[j])}}
distance
|
2c54846c6c7ae8709a53f63309d4c4ebf3a6c001
|
b13c7ee60f5d7b78d182e1f7b54a06f9c8dd15e5
|
/man/height.Rd
|
f560fa087d9cd6bf02aa17ff70814419119f58a0
|
[
"MIT"
] |
permissive
|
tarakc02/rbst
|
214f5f00c543a3759547a1aaa0aade3171847056
|
09a364957b3e7ccf97f6202ba18d1d91cc402f32
|
refs/heads/master
| 2021-01-10T02:48:42.363196
| 2016-03-08T18:33:11
| 2016-03-08T18:33:11
| 44,547,420
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 273
|
rd
|
height.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/height.R
\name{height}
\alias{height}
\title{The height of a tree}
\usage{
height(tree)
}
\arguments{
\item{tree}{A \code{bst}}
}
\description{
Used for confirming balance and testing
}
|
32c489c3661f13562965d9c726bae0a1f224190d
|
3e6bea01f47934e55bda80a6e796da59eacb8ba7
|
/R/distance_gen.R
|
29e04926d52b2b1e813ee01fe33b0ada3943bf28
|
[] |
no_license
|
nathansam/MSc-diss-code
|
b581c411b773d435d7f00bc65624712fb77d7c4d
|
1f3f4af207bc840e0056fa3027567874621b7aed
|
refs/heads/master
| 2021-05-21T01:01:15.706062
| 2020-04-02T14:27:34
| 2020-04-02T14:27:34
| 252,478,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
distance_gen.R
|
#' DistanceGen
#' @description Generates a distance matrix from a a transcriptomics dataset.
#' @param dataset A transcriptomics dataset. Preferably filtered first. First
#' columns should be gene names. All other columns should be expression levels.
#' @param metric The distance metric to be used to calculate the distances
#' between genes. See parallelDist::parDist for all accepted arguments. Also
#' allows the option of 'abs.correlation'. Not used if a distance matrix is
#' provided.
#' @param nthreads The number of threads to be used for parallel computations.
#' If NULL then the maximum number of threads available will be used.
#' @examples
#' a.filter <- AnovaFilter(Laurasmappings)
#' distance <- DistanceGen(a.filter, metric='abs.correlation')
#'
#' @export
DistanceGen <- function(dataset, metric = "euclidean", nthreads = NULL) {
# Calculate the medians at each timepoint
dataset <- CircadianTools::MedList(dataset, nthreads = nthreads)
if (is.null(nthreads) == TRUE) {
nthreads <- parallel::detectCores()
}
if (metric == "abs.correlation") {
distance <- AbsCorDist(dataset)
} else{
#Calculate the distance matrix
distance <- parallelDist::parDist(dataset, method = metric,
threads = nthreads)
}
return(distance)
}
|
78fbc75f5be24d4201538e00fbca15398307b655
|
1dc0ab4e2b05001a5c9b81efde2487f161f800b0
|
/methods/dknn/dknn.R
|
92c3c165d227eaa1273981c115fc7099356ce6df
|
[] |
no_license
|
noeliarico/knnrr
|
efd09c779a53e72fc87dc8c0f222c0679b028964
|
9f6592d1bbc1626b2ea152fbd539acfe9f9a5ab3
|
refs/heads/master
| 2020-06-01T02:44:34.201881
| 2020-03-13T13:30:52
| 2020-03-13T13:30:52
| 190,601,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,607
|
r
|
dknn.R
|
dknn <- list(
label = "k-Nearest Neighbors",
library = NULL,
loop = function(grid) {
cat("---------------------------------------------------\n")
cat("---------------------------------------------------\n\n")
cat("-- Grid -- \n")
print(grid)
# Only one model for each ranking rule, so we keep only the max value of k
loop <- grid[grid$k == max(grid$k), , drop = FALSE]
cat("\n-- Loop -- \n")
print(loop)
submodels <- vector(mode = "list", length = length(unique(loop$distance)))
for (i in 1:nrow(loop)) { # for each of the main models
submodels_i <- grid[grid$k != max(grid$k), , drop = FALSE]
model_distance <- loop[i,]$distance
submodels_i <- submodels_i %>% filter(distance == model_distance)
submodels[[i]] <- submodels_i
}
cat("\n-- Submodels -- \n")
print(submodels)
cat("---------------------------------------------------\n")
cat("---------------------------------------------------\n\n")
list(loop = loop, submodels = submodels)
},
type = c("Classification"),
parameters = data.frame(
parameter = c("k",
"distance",
"ties",
"verbose",
"developer"),
class = c("numeric",
"numeric",
"character",
"logical",
"logical"),
label = "#Neighbors"
),
grid = function(x, y, len = NULL, search = "grid") {
ks <- c(1, 2, 3)
#distances <- c("manhattan", "euclidean", "maximum", "0.5", "0.25")
#distances <- c("nominal_add", "nominal_avg")
distances <- c("jaccard", "jaccard_add", "jaccard_avg", "nominal_add", "nominal_avg", "gower")
# ties <- c("best",
# "prob_all",
# "prob_ties",
# "randomly",
# "tthreshold")
ties <- "randomly"
if (search == "grid") {
out <- expand.grid(
k = ks,
distance = distances,
ties = ties,
developer = FALSE,
verbose = FALSE
)
} else {
by_val <- if (is.factor(y))
length(levels(y))
else
1
out <-
data.frame(k = sample(
seq(1, floor(nrow(x) / 3), by = by_val),
size = len,
replace = TRUE
))
}
out
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
if (is.factor(y))
{
dknnf(
#as.matrix(x),
x,
y,
k = param$k,
distance = param$distance,
ties = param$ties,
developer = param$developer,
verbose = param$verbose,
...
)
} else {
knnreg(as.matrix(x), y, k = param$k, ...)
stop("error, reg not supported")
}
},
predict = function(modelFit, newdata, submodels = NULL) {
if (modelFit$problemType == "Classification")
{
cat("\n --> Create profile of rankings... distance = ", as.character(modelFit$distance), "\n")
argList <- list(
train = modelFit$learn$X,
test = newdata,
cl = modelFit$learn$y,
k = modelFit$k,
distance = modelFit$distance,
ties = modelFit$ties,
developer = modelFit$developer,
verbose = modelFit$verbose
)
output <- do.call("dknnfTrain",
argList)
por <- output$distances
cl <- output$cl
#cat("--> Predict --> Los rankings obtenidos para cada una de las instancias son: \n")
#print(por)
# out <- predict.knn4(
# modelFit,
# newdata,
# type = "class",
# k = model_k,
# rr = model_r,
# atttype = modelFit$atttype,
# developer = modelFit$developer
# )
out <- predict_for_k(por, cl, modelFit$ties, modelFit$k)
if (!is.null(submodels)) {
tmp <- out
out <-
vector(mode = "list", length = nrow(submodels) + 1)
out[[1]] <- tmp
for (j in seq(along = submodels$k)) {
out[[j + 1]] <- predict_for_k(por, cl, submodels$ties[j], submodels$k[j])
}
} else {
cat("Is null submodels\n")
}
} else {
out <- predict(modelFit, newdata)
}
out
},
predictors = function(x, ...)
colnames(x$learn$X),
tags = "Prototype Models",
prob = function(modelFit, newdata, submodels = NULL)
predict(modelFit, newdata, type = "prob"),
levels = function(x)
levels(x$learn$y),
sort = function(x)
x[order(-x[, 1]), ]
)
|
bf8b47a6053277ee07a942369bc45a46c15d6180
|
ea178de2d1926451fd0549b3d7c95c17804f8230
|
/cs573/code/logistic(1).R
|
197a59dcc3e7602876ee6f5584f2fc0fbde07596
|
[] |
no_license
|
jdavis/college-spring-2014
|
fe40aebef73b832edf49d426d1938673a170112d
|
20d8234b160e0640aadb299dd30ffe3db85fcac8
|
refs/heads/master
| 2016-09-05T23:10:44.832167
| 2014-05-11T03:22:35
| 2014-05-11T03:22:35
| 15,485,826
| 8
| 13
| null | null | null | null |
UTF-8
|
R
| false
| false
| 993
|
r
|
logistic(1).R
|
household <- read.table("household.dat",header=TRUE,sep=" ")
m<-glm(hownership~income,data=household,family=binomial)
m
# Plot the logistic regression result
library(ggplot2)
c <- ggplot(household, aes(y=hownership, x=income))
c + stat_smooth(method="glm", family="binomial",se=FALSE,lwd=1) +
geom_point(cex=5)
# var-covar matrix of the estimated parameters
vcov(m)
# LACK OF FIT
# Now fit the NULL model m0
m0<-glm(hownership~1,data=household,family=binomial())
anova(m0,m,test="LRT")
#predicting an observation
c<-predict(m,data.frame(income=50000),type=c("response"))
# confusion matrix
pred<-predict(m,data.frame(income=household$income),type=c("response"))
# Make decision
predclass <- rep(0,dim(household)[1])
predclass[pred>=.5] = 1
# Make matrix
table(household$hownership,predclass)
# Make a plot of the classifier
plot(household$income,household$hownership,xlab="Income", ylab="Class labels")
cut<- -m$coeff[[1]]/m$coeff[[2]]
abline(v=cut)
|
4c6d3acd5765ccc74d93de8ae32bd2a38db4f1e2
|
a22942f00085e88243f016c1c7b7abfefdeba66a
|
/rrProject1.R
|
e73df9e12f64565d26bd0c05613089ad28712170
|
[] |
no_license
|
ajc2357/rrProject1
|
7ca4a769ffa8446a301c6b027579aa6211dd2134
|
7d0b9c965390f6a0dac7f7762a5bd54e636d0f68
|
refs/heads/master
| 2020-06-03T04:03:56.135865
| 2014-07-20T03:10:24
| 2014-07-20T03:10:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 804
|
r
|
rrProject1.R
|
library(stats)
data = read.csv("./rrProject1/activity.csv", header = TRUE) ## loads data --> takes a while
data$date = as.Date(data$date, "%Y-%m-%d")
stepsbyday = aggregate(steps ~ date, data = data, sum)
###Make a histogram of the total number of steps taken each day
hist(stepsbyday$steps)
### Calculate and report the mean and median total number of steps taken per day
mean(stepsbyday$steps, na.rm = TRUE) ## 10766.19
median(stepsbyday$steps, na.rm = TRUE) ## 10765
### Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis)
### and the average number of steps taken, averaged across all days (y-axis)
plot(stepsbyday$interval, data$steps, type = "l")
### Which 5-minute interval, on average across all the days in the dataset,
### contains the maximum number of steps?
|
bd8bc269d2dc12c350e0769c17a390cb32e8c685
|
e723931c888199b817a1efa2700b07977926cb4d
|
/SocialMediaLab/R/CreateBimodalNetwork.facebook.R
|
f07637fab693046df38c57539718835ee93d8439
|
[] |
no_license
|
manlius/SocialMediaLab
|
bb85559266b559d56d547b11855da79ddfc37acf
|
24d506afcbc8fd28a2393949461e039f5aad5e24
|
refs/heads/master
| 2021-01-12T22:34:14.204377
| 2016-03-09T00:54:22
| 2016-03-09T00:54:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,371
|
r
|
CreateBimodalNetwork.facebook.R
|
#' @export
CreateBimodalNetwork.facebook <-
function(x,writeToFile,removeTermsOrHashtags, ...)
{
if (missing(writeToFile)) {
writeToFile <- FALSE # default = not write to file
}
if (!missing(removeTermsOrHashtags)) {
removeTermsOrHashtags <- as.vector(removeTermsOrHashtags) #coerce to vector... to be sure
}
if (missing(removeTermsOrHashtags)) {
removeTermsOrHashtags <- "foobar"
}
dataCombinedUNIQUE <- x # match the variable names (this must be used to avoid warnings in package compilation)
# Warn the user if they are trying to create a bimodal network
# using TEMPORAL data (i.e. it might work, but could be compatibility issues)
if (inherits(dataCombinedUNIQUE,"temporal")) {
cat("\nERROR. Attempting to use dynamic data to create bimodal network. Please use the 'dynamic=FALSE' argument when collecting data.\n")
return()
}
#EnsurePackage("igraph")
cat("\nCreating Facebook bimodal network...\n")
# make a vector of all the unique actors in the network1
usersVec <- rep(c("User"),length(unique(dataCombinedUNIQUE$from)))
postsVec <- rep(c("Post"),length(unique(dataCombinedUNIQUE$to)))
usersAndPostsVec <- c(usersVec,postsVec)
actors <- data.frame(name=unique(factor(c(as.character(unique(dataCombinedUNIQUE$from)),as.character(unique(dataCombinedUNIQUE$to))))),type=usersAndPostsVec)
# make a dataframe of the relations between actors
# we need a dataframe here because igraph needs it AFAIK
relations <- data.frame(from=dataCombinedUNIQUE$from,to=dataCombinedUNIQUE$to,relationship=dataCombinedUNIQUE$relationship,weight=dataCombinedUNIQUE$edgeWeight)
# construct a graph
g <- graph.data.frame(relations, directed=TRUE, vertices=actors)
# Make the node labels play nice with Gephi
V(g)$label <- V(g)$name
if (writeToFile=="TRUE" | writeToFile=="true" | writeToFile=="T" | writeToFile==TRUE) {
# Output the final network to a graphml file, to import directly into Gephi
currTime <- format(Sys.time(), "%b_%d_%X_%Y_%Z")
currTime <- gsub(":","_",currTime)
write.graph(g,paste0(currTime,"_FacebookBimodalNetwork.graphml"),format="graphml")
cat("Facebook bimodal network was written to current working directory, with filename:\n")
cat(paste0(currTime,"_FacebookBimodalNetwork.graphml"))
}
cat("\nDone!\n") ### DEBUG
flush.console()
return(g)
}
|
bf3f8f5791867f03ea3a2ff5878f48c9978ba5f1
|
c2361d8de7354f75fccdd5f2b6099d42ffb4fd41
|
/man/tidy.felm.Rd
|
abd1493a5d264fed022416d70bc62306572d4fb1
|
[
"MIT"
] |
permissive
|
tidymodels/broom
|
6080795cc9d8c0f304998224c87983dc6bc7a251
|
a579b0dcfc9f8feedb4e937bf336478c288852cc
|
refs/heads/main
| 2023-08-22T09:35:23.855400
| 2023-07-27T21:23:32
| 2023-07-27T21:23:32
| 23,932,217
| 707
| 210
|
NOASSERTION
| 2023-09-06T20:10:29
| 2014-09-11T19:17:04
|
R
|
UTF-8
|
R
| false
| true
| 4,282
|
rd
|
tidy.felm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfe-tidiers.R
\name{tidy.felm}
\alias{tidy.felm}
\alias{felm_tidiers}
\alias{lfe_tidiers}
\title{Tidy a(n) felm object}
\usage{
\method{tidy}{felm}(
x,
conf.int = FALSE,
conf.level = 0.95,
fe = FALSE,
se.type = c("default", "iid", "robust", "cluster"),
...
)
}
\arguments{
\item{x}{A \code{felm} object returned from \code{\link[lfe:felm]{lfe::felm()}}.}
\item{conf.int}{Logical indicating whether or not to include a confidence
interval in the tidied output. Defaults to \code{FALSE}.}
\item{conf.level}{The confidence level to use for the confidence interval
if \code{conf.int = TRUE}. Must be strictly greater than 0 and less than 1.
Defaults to 0.95, which corresponds to a 95 percent confidence interval.}
\item{fe}{Logical indicating whether or not to include estimates of
fixed effects. Defaults to \code{FALSE}.}
\item{se.type}{Character indicating the type of standard errors. Defaults to
using those of the underlying felm() model object, e.g. clustered errors
for models that were provided a cluster specification. Users can override
these defaults by specifying an appropriate alternative: "iid" (for
homoskedastic errors), "robust" (for Eicker-Huber-White robust errors), or
"cluster" (for clustered standard errors; if the model object supports it).}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Two exceptions here are:
\itemize{
\item \code{tidy()} methods will warn when supplied an \code{exponentiate} argument if
it will be ignored.
\item \code{augment()} methods will warn when supplied a \code{newdata} argument if it
will be ignored.
}}
}
\description{
Tidy summarizes information about the components of a model.
A model component might be a single term in a regression, a single
hypothesis, a cluster, or a class. Exactly what tidy considers to be a
model component varies across models but is usually self-evident.
If a model has several distinct types of components, you will need to
specify which components to return.
}
\examples{
\dontshow{if (rlang::is_installed("lfe")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# load libraries for models and data
library(lfe)
# use built-in `airquality` dataset
head(airquality)
# no FEs; same as lm()
est0 <- felm(Ozone ~ Temp + Wind + Solar.R, airquality)
# summarize model fit with tidiers
tidy(est0)
augment(est0)
# add month fixed effects
est1 <- felm(Ozone ~ Temp + Wind + Solar.R | Month, airquality)
# summarize model fit with tidiers
tidy(est1)
tidy(est1, fe = TRUE)
augment(est1)
glance(est1)
# the "se.type" argument can be used to switch out different standard errors
# types on the fly. In turn, this can be useful exploring the effect of
# different error structures on model inference.
tidy(est1, se.type = "iid")
tidy(est1, se.type = "robust")
# add clustered SEs (also by month)
est2 <- felm(Ozone ~ Temp + Wind + Solar.R | Month | 0 | Month, airquality)
# summarize model fit with tidiers
tidy(est2, conf.int = TRUE)
tidy(est2, conf.int = TRUE, se.type = "cluster")
tidy(est2, conf.int = TRUE, se.type = "robust")
tidy(est2, conf.int = TRUE, se.type = "iid")
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[=tidy]{tidy()}}, \code{\link[lfe:felm]{lfe::felm()}}
Other felm tidiers:
\code{\link{augment.felm}()}
}
\concept{felm tidiers}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} with columns:
\item{conf.high}{Upper bound on the confidence interval for the estimate.}
\item{conf.low}{Lower bound on the confidence interval for the estimate.}
\item{estimate}{The estimated value of the regression term.}
\item{p.value}{The two-sided p-value associated with the observed statistic.}
\item{statistic}{The value of a T-statistic to use in a hypothesis that the regression term is non-zero.}
\item{std.error}{The standard error of the regression term.}
\item{term}{The name of the regression term.}
}
|
029a41935ae9f656f748c2f794ac8b628b0827f7
|
0ea2ec1bf5cb593e64d04223852322c8bf716a58
|
/clients/r/generated/R/install_status.R
|
ae2e5218b73335e189d54b1881a716ed0637fcb5
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem
|
ffbe967858cb5fe909d126872210fffc78893e31
|
49c1d653763b272b06823cdd1572d08efb29173f
|
refs/heads/main
| 2022-12-11T11:57:35.977016
| 2022-11-28T09:21:03
| 2022-11-28T09:21:03
| 61,857,466
| 40
| 23
|
Apache-2.0
| 2022-09-05T10:05:35
| 2016-06-24T04:45:15
|
Java
|
UTF-8
|
R
| false
| false
| 2,077
|
r
|
install_status.R
|
# Adobe Experience Manager (AEM) API
#
# Swagger AEM is an OpenAPI specification for Adobe Experience Manager (AEM) API
#
# The version of the OpenAPI document: 3.5.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title InstallStatus
#'
#' @description InstallStatus Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field status \link{InstallStatusStatus} [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
InstallStatus <- R6::R6Class(
'InstallStatus',
public = list(
`status` = NULL,
initialize = function(
`status`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`status`)) {
stopifnot(R6::is.R6(`status`))
self$`status` <- `status`
}
},
toJSON = function() {
InstallStatusObject <- list()
if (!is.null(self$`status`)) {
InstallStatusObject[['status']] <-
self$`status`$toJSON()
}
InstallStatusObject
},
fromJSON = function(InstallStatusJson) {
InstallStatusObject <- jsonlite::fromJSON(InstallStatusJson)
if (!is.null(InstallStatusObject$`status`)) {
statusObject <- InstallStatusStatus$new()
statusObject$fromJSON(jsonlite::toJSON(InstallStatusObject$status, auto_unbox = TRUE, digits = NA))
self$`status` <- statusObject
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`status`)) {
sprintf(
'"status":
%s
',
jsonlite::toJSON(self$`status`$toJSON(), auto_unbox=TRUE, digits = NA)
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(InstallStatusJson) {
InstallStatusObject <- jsonlite::fromJSON(InstallStatusJson)
self$`status` <- InstallStatusStatus$new()$fromJSON(jsonlite::toJSON(InstallStatusObject$status, auto_unbox = TRUE, digits = NA))
self
}
)
)
|
383a32249007db170e25a384d4b76e8f5e9afbd9
|
4582eb19bfc245bbe20ffa305279fbb545e54e3b
|
/man/download_data-deprecated.Rd
|
3e3c26c15d51a4f1d58c69bf0d7faae5133288dc
|
[] |
no_license
|
edwindj/cbsodataR
|
a9045d130d9138fc40000b284b7a64e39bdd5af4
|
e66ceeccca5d62c03f54b44ed3b69d0feaacf7ef
|
refs/heads/master
| 2021-07-07T18:42:04.153201
| 2021-05-31T22:00:39
| 2021-05-31T22:00:39
| 34,798,294
| 31
| 13
| null | 2022-09-23T14:49:12
| 2015-04-29T14:24:37
|
R
|
UTF-8
|
R
| false
| true
| 1,318
|
rd
|
download_data-deprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download-data.R
\name{download_data-deprecated}
\alias{download_data-deprecated}
\alias{download_data}
\title{Gets all data from a cbs table.}
\usage{
download_data(
id,
path = file.path(id, "data.csv"),
...,
select = NULL,
typed = FALSE,
verbose = TRUE,
base_url = getOption("cbsodataR.base_url", BASE_URL)
)
}
\arguments{
\item{id}{of cbs open data table}
\item{path}{of data file, defaults to "id/data.csv"}
\item{...}{optional filter statements to select rows of the data,}
\item{select}{optional names of columns to be returned.}
\item{typed}{Should the data automatically be converted into integer and numeric?}
\item{verbose}{show the underlying downloading of the data}
\item{base_url}{optionally specify a different server. Useful for
third party data services implementing the same protocol.}
}
\description{
This method is deprecated in favor of \code{\link[=cbs_download_data]{cbs_download_data()}}.
}
\seealso{
Other download:
\code{\link{cbs_download_meta}()},
\code{\link{cbs_download_table}()}
Other data retrieval:
\code{\link{cbs_add_date_column}()},
\code{\link{cbs_add_label_columns}()},
\code{\link{cbs_extract_table_id}()},
\code{\link{cbs_get_data_from_link}()},
\code{\link{cbs_get_data}()}
}
|
8bbf21e26627cd445d85ef37c7c41566cf6068e1
|
2cc72a678154c4150e498d945f4fdbfd1008f832
|
/wifi_location.R
|
8d5fd2149d4dffc222a33f0e7cc1b515e3a0f958
|
[] |
no_license
|
mianhuang/DataAnalytics-R-Code
|
cc156c9e3a6a3af7d048d281f9338019a0b1b1b5
|
3a8033843f4b24f5c95cf66e75c632a9cdaa628d
|
refs/heads/master
| 2020-06-27T22:46:17.909511
| 2019-08-26T19:32:55
| 2019-08-26T19:32:55
| 200,073,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,737
|
r
|
wifi_location.R
|
# clear workspace
rm(list = ls())
# load Libraries: p_load can install,load, and update packages
if(require("pacman")=="FALSE"){
install.packages("pacman")
}
pacman::p_load(readr, caret, ggplot2, tidyverse, tidyr, dplyr, lubridate, plotly, C50, tibbletime, doParallel)
# find how many cores are on your machine
detectCores() # Result = Typically 4 to 6
# create Cluster with desired number of cores. Don't use them all! Your computer is running other processes.
cl <- makeCluster(4)
# register Cluster
registerDoParallel(cl)
# confirm how many cores are now "assigned" to R and RStudio
getDoParWorkers() # Result 2
# import training & validation datasets
trainingData_orig <- read_csv("trainingData.csv")
validData_orig <- read_csv("validationData.csv")
# make working copy of datasets
trainingData <- trainingData_orig
validData <- validData_orig
# remove repeated rows in datasets
trainingData <-distinct(trainingData)
validData <- distinct(validData)
# removing variables that are not needed: USERID, PHONEID, TIMESTAMP
trainingData[527:529] <- NULL
validData[527:529]<-NULL
# transform some variables to factor/numeric/datetime
trainingData[,523:526] <- lapply(trainingData[,523:526], as.factor)
trainingData[,521:522] <- lapply(trainingData[,521:522], as.numeric)
#trainingData$TIMESTAMP <- as_datetime(trainingData$TIMESTAMP, origin = "1970-01-01", tz="UTC")
# change value of WAPS= 100 (out of range value) to WAPS=-110
trainingData[,1:520] <- sapply(trainingData[,1:520],function(x) ifelse(x==100,-110,x))
summary(trainingData[1:10])
# identify and removing WAPS with zero variance (remove WAPS that has no detection)
nzv_train<-nearZeroVar(trainingData[1:520], saveMetrics=TRUE)
trainingData<-trainingData[-which(nzv_train$zeroVar==TRUE )]
# remove rows with all out of range WAP value
trainingData <- trainingData %>%
filter(apply(trainingData[1:312], 1, function(x)length(unique(x)))>1)
trainingData$LATITUDE<-NULL
trainingData$LONGITUDE<-NULL
# initial examination of the data.
barplot(table(trainingData$FLOOR[trainingData$BUILDINGID==2]))
barplot(table(trainingData$BUILDINGID))
hist(trainingData$LONGITUDE)
hist(trainingData$LATITUDE)
# subsetting data by building
bldg0 <- subset(trainingData,trainingData$BUILDINGID==0)
bldg1 <- subset(trainingData,trainingData$BUILDINGID==1)
bldg2 <- subset(trainingData,trainingData$BUILDINGID==2)
# add LOCATION column by merging FLOOR, BUILDINGID, SPACEID, & RELATIVEPOSITION
bldg0_loc<- unite(bldg0, "LOCATION", c(FLOOR, BUILDINGID, SPACEID, RELATIVEPOSITION))
bldg0_loc$LOCATION <- as.factor(bldg0_loc$LOCATION)
bldg1_loc<- unite(bldg1, "LOCATION", c(FLOOR, BUILDINGID, SPACEID, RELATIVEPOSITION))
bldg1_loc$LOCATION <- as.factor(bldg1_loc$LOCATION)
bldg2_loc<- unite(bldg2, "LOCATION", c(FLOOR, BUILDINGID, SPACEID, RELATIVEPOSITION))
bldg2_loc$LOCATION <- as.factor(bldg2_loc$LOCATION)
# set seed
set.seed(1)
# set up 10 fold cross validation
fitControl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
# create training & test set for each building
inTraining0 <- createDataPartition(bldg0_loc$LOCATION, p = .75, list = FALSE)
training0 <- bldg0_loc[inTraining0,]
testing0 <- bldg0_loc[-inTraining0,]
inTraining1 <- createDataPartition(bldg1_loc$LOCATION, p = .75, list = FALSE)
training1 <- bldg1_loc[inTraining1,]
testing1 <- bldg1_loc[-inTraining1,]
inTraining2 <- createDataPartition(bldg2_loc$LOCATION, p = .75, list = FALSE)
training2 <- bldg2_loc[inTraining2,]
testing2 <- bldg2_loc[-inTraining2,]
# training models
# KNN
system.time(knnFit0 <- train(LOCATION~., data = training0, method = "knn", trControl=fitControl))
system.time(knnFit1 <- train(LOCATION~., data = training1, method = "knn", trControl=fitControl))
system.time(knnFit2 <- train(LOCATION~., data = training2, method = "knn", trControl=fitControl))
b0predict_knn<-predict(knnFit0, testing0)
b0_ConfusionMatrix_knn<-confusionMatrix(b0predict_knn, testing0$LOCATION)
b0_ConfusionMatrix_knn
b1predict_knn<-predict(knnFit1, testing1)
b1_ConfusionMatrix_knn<-confusionMatrix(b1predict_knn, testing1$LOCATION)
b1_ConfusionMatrix_knn
b2predict_knn<-predict(knnFit2, testing2)
b2_ConfusionMatrix_knn<-confusionMatrix(b2predict_knn, testing2$LOCATION)
b2_ConfusionMatrix_knn
# C5.0
system.time(C50Fit0 <- train(LOCATION~., data = training0, method = "C5.0", trControl=fitControl))
system.time(C50Fit1 <- train(LOCATION~., data = training1, method = "C5.0", trControl=fitControl))
system.time(C50Fit2 <- train(LOCATION~., data = training2, method = "C5.0", trControl=fitControl))
b0predict_C50<-predict(C50Fit0, testing0)
b0_ConfusionMatrix_C50<-confusionMatrix(b0predict_C50, testing0$LOCATION)
b0_ConfusionMatrix_C50
b1predict_C50<-predict(C50Fit1, testing1)
b1_ConfusionMatrix_C50<-confusionMatrix(b1predict_C50, testing1$LOCATION)
b1_ConfusionMatrix_C50
b2predict_C50<-predict(C50Fit2, testing2)
b2_ConfusionMatrix_C50<-confusionMatrix(b2predict_C50, testing2$LOCATION)
b2_ConfusionMatrix_C50
# Decision Tree
system.time(rpartFit0 <- train(LOCATION~., data = training0, method = "rpart", tuneLength = 200, trControl=fitControl))
system.time(rpartFit1 <- train(LOCATION~., data = training1, method = "rpart", tuneLength = 200, trControl=fitControl))
system.time(rpartFit2 <- train(LOCATION~., data = training2, method = "rpart", tuneLength = 200, trControl=fitControl))
b0predict_rpart<-predict(rpartFit0, testing0)
b0_ConfusionMatrix_rpart<-confusionMatrix(b0predict_rpart, testing0$LOCATION)
b0_ConfusionMatrix_rpart
b1predict_rpart<-predict(rpartFit1, testing1)
b1_ConfusionMatrix_rpart<-confusionMatrix(b1predict_rpart, testing1$LOCATION)
b1_ConfusionMatrix_rpart
b2predict_rpart<-predict(rpartFit2, testing2)
b2_ConfusionMatrix_rpart<-confusionMatrix(b2predict_rpart, testing2$LOCATION)
b2_ConfusionMatrix_rpart
# Random Forest
system.time(RFfit0 <- train(LOCATION~., data=training0, method="rf", trcontrol=fitControl, tuneLength=5))
system.time(RFfit1 <- train(LOCATION~., data=training1, method="rf", trcontrol=fitControl, tuneLength=5))
system.time(RFfit2 <- train(LOCATION~., data=training2, method="rf", trcontrol=fitControl, tuneLength=5))
b0predict_RF<-predict(RFfit0, testing0)
b0_ConfusionMatrix_RF<-confusionMatrix(b0predict_RF, testing0$LOCATION)
b0_ConfusionMatrix_RF
b1predict_RF<-predict(RFfit1, testing1)
b1_ConfusionMatrix_RF<-confusionMatrix(b1predict_RF, testing1$LOCATION)
b1_ConfusionMatrix_RF
b2predict_RF<-predict(RFfit2, testing2)
b2_ConfusionMatrix_RF<-confusionMatrix(b2predict_RF, testing2$LOCATION)
b2_ConfusionMatrix_RF
# SVM
system.time(SVMfit0 <- train(LOCATION~., data = training0, method = "svmLinear", trControl=fitControl))
system.time(SVMfit1 <- train(LOCATION~., data = training1, method = "svmLinear", trControl=fitControl))
system.time(SVMfit2 <- train(LOCATION~., data = training2, method = "svmLinear", trControl=fitControl))
b0predict_SVM<-predict(SVMfit0, testing0)
b0_ConfusionMatrix_SVM<-confusionMatrix(b0predict_SVM, testing0$LOCATION)
b0_ConfusionMatrix_SVM
b1predict_SVM<-predict(SVMfit1, testing1)
b1_ConfusionMatrix_SVM<-confusionMatrix(b1predict_SVM, testing1$LOCATION)
b1_ConfusionMatrix_SVM
b2predict_SVM<-predict(SVMfit2, testing2)
b2_ConfusionMatrix_SVM<-confusionMatrix(b2predict_SVM, testing2$LOCATION)
b2_ConfusionMatrix_SVM
# compare models
b0Data <- resamples(list(C50=C50Fit0, KNN = knnFit0, rpart = rpartFit0))
summary(b0Data)
bwplot(b0Data)
dotplot(b0Data)
b1Data <- resamples(list(C50=C50Fit1, KNN = knnFit1, rpart = rpartFit1))
summary(b1Data)
bwplot(b1Data)
dotplot(b1Data)
b2Data <- resamples(list(C50=C50Fit2, KNN = knnFit2, rpart = rpartFit2))
summary(b2Data)
bwplot(b2Data)
dotplot(b2Data)
# Stop Cluster. After performing your tasks, stop your cluster.
stopCluster(cl)
|
f0bbdcccc6ad45bc4b1ec4fa798f0164ce793e4e
|
3819c5c65f13b185b8fb714d7349abfecb793a72
|
/R/F_PropensityFit.R
|
a697107b4c531731f46f7c4e1ab7d68ab1681330
|
[] |
no_license
|
cran/DynTxRegime
|
ed877579c6ffc6156fb6c84298a58d1db5940dff
|
9ecb35dfd9abf9617e0179d3d4d552dce22314e5
|
refs/heads/master
| 2023-06-25T03:37:01.776586
| 2023-04-25T13:50:11
| 2023-04-25T13:50:11
| 37,244,072
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,046
|
r
|
F_PropensityFit.R
|
# October 26, 2018
#' Class \code{PropensityFit}
#'
#' Class \code{PropensityFit} is a \code{TypedFit} identified as being
#' for a propensity regression step.
#'
#' @name PropensityFit-class
#'
#' @slot small A logical TRUE indicates that the smallest valued tx is
#' missing; FALSE indicates that the largest valued tx is missing
#' @slot levs A vector; the set of treatment options included in fit.
#'
#' @keywords internal
setClass("PropensityFit",
slots = c(small = "logical",
levs = "vector"),
contains = c("TypedFit", "TxInfoNoSubsets"))
##########
## GENERICS
##########
#' Complete a Propensity Regression Step
#'
#' Dispatches appropriate method for completing propensity regressions.
#'
#' @name newPropensityFit
#'
#' @param moPropen A modeling object
#' @param txObj A TxObj object
#' @param ... Any optional additional input.
#'
#' @keywords internal
setGeneric(name = ".newPropensityFit",
def = function(moPropen, txObj, ...) {
standardGeneric(f = ".newPropensityFit")
} )
#' Retrieve Propensity Regression Analysis
#'
#' For statistical methods that require a propensity regression analysis,
#' the value object returned by the modeling function(s) is retrieved.
#'
#' Methods are defined for all statistical methods implemented in DynTxRegime
#' that use propensity regression.
#'
#' @name propen
#'
#' @param object A value object returned by a statistical method of DynTxRegime.
#' @param ... Ignored.
#'
#' @usage
#' propen(object, ...)
#'
#' @exportMethod propen
setGeneric(name = "propen",
def = function(object, ...) { standardGeneric(f = "propen") })
##########
## METHODS
##########
#' Methods Available for Objects of Class \code{PropensityFit}
#'
#' Methods call equivalently named methods defined for \code{TypedFit}
#'
#' @name PropensityFit-methods
#'
#' @keywords internal
NULL
#' @rdname newPropensityFit
setMethod(f = ".newPropensityFit",
signature = c(moPropen = "modelObj",
txObj = "TxInfoNoSubsets"),
definition = function(moPropen, txObj, data, suppress) {
txName <- .getTxName(object = txObj)
fitResult <- try(expr = .newTypedFit(modelObj = moPropen,
data = data,
response = data[,txName],
type = "moPropen",
txObj = txObj,
suppress = suppress),
silent = TRUE)
if (is(object = fitResult, class2 = "try-error")) {
cat("converting response to factor and trying again\n")
fitResult <- tryCatch(expr = .newTypedFit(modelObj = moPropen,
data = data,
response = factor(x = data[,txName]),
type = "moPropen",
txObj = txObj,
suppress = suppress),
error = function(x){
print(x = x$message)
stop('unable to obtain propensity fit')
})
}
res <- new(Class = "PropensityFit",
"small" = moPropen@predictor@propenMissing == "smallest",
"levs" = as.character(x = .getSuperset(txObj)),
txObj,
fitResult)
return( res )
})
#' @rdname PropensityFit-methods
setMethod(f = "coef",
signature = c(object = "PropensityFit"),
definition = function(object, ...) {
return( coef(object = as(object = object,
Class = "TypedFit"), ...)$moPropen )
})
#' @rdname PropensityFit-methods
setMethod(f = "fitObject",
signature = c(object = "PropensityFit"),
definition = function(object, ...) {
return( fitObject(object = as(object = object,
Class = "TypedFit"), ...)$moPropen )
})
#' @rdname PropensityFit-methods
setMethod(f = "plot",
signature = c(x = "PropensityFit"),
definition = function(x, suppress=FALSE, ...) {
plot(x = as(object = x, Class = "TypedFit"),
suppress = suppress, ...)
})
#' @rdname PropensityFit-methods
setMethod(f = "predict",
signature = c(object = "PropensityFit"),
definition = function(object, ...) {
return( predict(object = as(object = object,
Class = "TypedFit"), ...))
})
#' Make Predictions for All Tx
#'
#' \code{.predictAll(object, newdata)}
#' predicts propensity for all tx options.
#' Returns a matrix of propensities predicted for all tx.
#'
#' @rdname PropensityFit-methods
setMethod(f = ".predictAll",
signature = c(object = "PropensityFit",
newdata = "data.frame"),
definition = function(object,
newdata,
suppress = TRUE) {
mm <- predict(object = as(object = object, Class = "TypedFit"),
newdata = newdata)
if (is.character(x = mm[1L])) {
stop("propensities returned as characters")
}
if (any(mm < -1.5e-8)) {
stop("cannot have negative probabilities")
}
if (any(mm > {1.0 + 1.5e-8})) {
stop("cannot have probabilities > 1")
}
if (!is.matrix(x = mm)) mm <- matrix(data = mm, ncol = 1L)
levs <- object@levs
if (ncol(x = mm) != length(x = levs)) {
correction <- 1.0 - rowSums(x = mm)
if (object@small) {
if (!suppress ) {
cat("assumed missing prediction for", levs[1L],"\n")
}
mm <- cbind(correction, mm)
} else {
if (!suppress ) {
cat("assumed missing prediction for",
levs[length(x = levs)],"\n")
}
mm <- cbind(mm, correction)
}
}
colnames(x = mm) <- levs
return( mm )
})
#' @rdname PropensityFit-methods
setMethod(f = "print",
signature = c(x = "PropensityFit"),
definition = function(x, ...) {
print(x = as(object = x, Class = "TypedFit"))
})
#' @rdname PropensityFit-methods
setMethod(f = "propen",
signature = c(object = "PropensityFit"),
definition = function(object, ...) {
return( fitObject(object = object) )
})
#' @rdname PropensityFit-methods
setMethod(f = "show",
signature = c(object = "PropensityFit"),
definition = function(object) {
show(object = as(object = object, Class = "TypedFit"))
})
#' @rdname PropensityFit-methods
setMethod(f = "summary",
signature = c(object = "PropensityFit"),
definition = function(object, ...) {
return( summary(object = as(object = object,
Class = "TypedFit"), ...)$moPropen )
})
|
18c78f98312a8a3c33fdc7e023f04c0c442c2f06
|
d87571c2e3f5c18b1cafee880ec17acc9c7a405f
|
/man/load.corpus.Rd
|
109a5513499d2f95eb4741825a24904213c3a3ca
|
[] |
no_license
|
mikekestemont/stylo
|
eb69a2b996c35ef69581df526ecb3f806be5fdd5
|
2092f4c2c12605cf1621a5367faf5a9aba8b921d
|
refs/heads/master
| 2021-01-18T00:53:46.496891
| 2014-02-25T08:16:30
| 2014-02-25T08:16:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 953
|
rd
|
load.corpus.Rd
|
\name{load.corpus}
\alias{load.corpus}
\title{Load text files}
\description{
Function for loading text files from a specified directory.
}
\usage{
load.corpus(files, corpus.dir = "")
}
\arguments{
\item{files}{a vector of file names.}
\item{corpus.dir}{a directory containing the text files to be loaded; if
not specified, the current working directory will be used.}
}
\value{
The function returns a variable (list), containing as elements the texts loaded.
}
\author{Maciej Eder}
\seealso{
\code{\link{stylo}}, \code{\link{classify}}, \code{\link{rolling.delta}},
\code{\link{oppose}}
}
\examples{
\dontrun{
# to load file1.txt and file2.txt, stored in the subdirectory my.files:
my.corpus = load.corpus(corpus.dir = "my.files",
files = c("file1.txt", "file2.txt") )
# to load all XML files from the current directory:
my.corpus = load.corpus(files = list.files(pattern="[.]xml$") )
}
}
%\keyword{text processing}
|
9b5e3352e92abbf7364a7154443cbabce3ae94e2
|
1a439dc569ec3025b76cb76ff9dd71fe6614ef88
|
/PaleoAnalyze/phasegram_rev.R
|
b1c67a44fcea22e9a07afe02e18df4b31573a323
|
[] |
no_license
|
seanyx/TimeSeries
|
f2ca38e1fcea182b3f43404e2b151df5d1c6127b
|
c671823c74fe14fd632b67890c13be39c54ba975
|
refs/heads/master
| 2016-09-05T13:59:12.509273
| 2014-08-12T17:55:55
| 2014-08-12T17:55:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,780
|
r
|
phasegram_rev.R
|
phasegram_rev<-function(data,frange,nf,tw,toverlap=.5,M=50,taper=T) {
# revised phasegram of between two time series
if (taper) {
data[[1]]$y=spec.taper(data[[1]]$y,p=.1)
data[[2]]$y=spec.taper(data[[2]]$y,p=.1)
}
na=names(data)
trange=range(data[[1]]$t)
N=length(data[[1]]$t)
twd=length(which(data[[1]]$t-data[[1]]$t[1]<=tw))
t1=floor(twd*toverlap)
tind=seq(1,N-twd,by=(twd-t1))
f=seq(frange[1],frange[2],length=nf)
ef=matrix(nrow=(nf-1),ncol=length(tind))
mean1=mean(data[[1]]$y)
mean2=mean(data[[2]]$y)
etotal=sum((data[[1]]$y-mean1)^2+(data[[2]]$y-mean2)^2)
ef2=vector(length=nf-1)
fp=ef
for (i in 1:(nf-1)) {
f2=c(f[i],f[i+1])
datafil=bwfilter(data,cut=f2,type='pass',PLOT=F)
data1=datafil[[1]]
ef2[i]=sum((data1[[1]]$y-mean1)^2+(data1[[2]]$y-mean2)^2)
for (j in 1:length(tind)) {
data2=data1
data2[[1]]=data2[[1]][(tind[j]:(tind[j]+twd-1)),]
data2[[2]]=data2[[2]][(tind[j]:(tind[j]+twd-1)),]
ef[i,j]=sum((data2[[1]]$y-mean(data2[[1]]$y))^2+(data2[[2]]$y-mean(data2[[2]]$y))^2)
if(sd(data2[[1]]$y)>1e-9 & sd(data2[[2]]$y)>1e-9) {
y=fitPhase(data2[[1]]$y,data2[[2]]$y,N=M,PLOT=F)
fp[i,j]=y[[2]]*(sin(y[[1]][1]*pi))^4
}
}
}
ef1=apply(ef,MARGIN=1,sum)
u=ef*1/ef1*(ef2*etotal/sum(ef2))
#test weighed by power
# for (i in 1:(length(f)-1)) {
# fp[i,]=fp[i,]*sqrt(ef1[i])
# }
dev.new()
filled.contour(data[[1]]$t[tind+floor(twd/2)],f[-nf],t(fp),color=rainbow,ann=T,axes=T)
title(main=paste('Phase shift between',na[1],'and',na[2],'\n with time window',twd,'pt and',nf,'frequency intervals and time window overlap',toverlap,'via M2'))
#dev.new()
#filled.contour(data[[1]]$t[tind+floor(twd/2)],f[-nf],t(u),color=heat.colors,ann=T,axes=T)
#dev.new()
#plot(f[-nf],ef1,type='h',col='black',ann=F,axes=F)
}
|
14527d36ab8158d9bb118b07fb35fe563dc4c52b
|
fe3050e6aa6f14b5cb99d69709967bbbb29fcadf
|
/bigData.R
|
a66cd337f3c2520b4d8c918e8c0ce2bff70117fa
|
[] |
no_license
|
royemanuel/Metrics
|
1856a71dfb2ed04b7cf50f7b779bd86ebd41d5a9
|
2fa6d2f527b8eefff9f2d474d82eb3cf998a8106
|
refs/heads/master
| 2021-01-19T05:31:45.672280
| 2018-06-05T18:56:24
| 2018-06-05T18:56:24
| 57,000,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
bigData.R
|
## The data pulls from the big Austin model runs
## This replciates the creation of the bigInfrastructureResilience.csv
nameList <- c("bigAsIs.csv", "big16kRec.csv", "big100percentRec.csv",
"bigRob.csv", "bigStep.csv")
## First, define the need of each stakeholder
nl <- c(.5, .9, .75, .95, .8)
## Second, define the sigmas for each stakeholder. I did not vary the
## decay parameter here, but I may want to when I get the SpeedFactor
## fixed. I wonder what I need to do for that. That can be our initial
## simplifying assumption
sl <- c(.1, .2, .4, .7, 0, .5)
## build the need data.frame for input into infraResAll
nMat <- data.frame(func = "constantNeed",
cLevel = nl,
startTime = NA,
slope = NA)
## build the resilience factor data.frame for input into infraResAll
rMat <-data.frame(tDelta = 30,
decay = 0,
sigma = sl)
## This builds the resilience for each electric failure scenario
## (particular to the SD model).
bigInfRFR <- metricRollup(nameList, need = nMat, resFactors = rMat, 39000)
## Writing the .csv. Leave it commented out unless you have new data and
## want to make it happen. I would recommend rewriting this part each
## time you ahve new data to put into it.
## write.csv(bigInfRFR, "bigInfrastructureRunsForRecord2.csv")
## Pull out only the resilience metrics, Infrastructure and scenario
bigInfResilience <- select(bigInfRFR, QR, EQR, Rho, extRho,
statQuoResilience, extResilience, fileName,
Infrastructure, Scenario)
## write.csv(bigInfResilience, "bigInfrastructureResilience2.csv")
|
b843a8d07f6d620966e933bb3f0d9f475239e60d
|
6b1fbbbc7c7922914c96829f24564f403a7fee09
|
/Raw/0.저장 및 불러오기.R
|
4d6ddf1f0b86ba35bb3c3a3368364e216d064361
|
[] |
no_license
|
hana-dool/R
|
3ef1fb11175137cefaf0532ec902db0bfefa6a2a
|
7652d11c1f405b3060b42ec99ebc7cccdfdbb69b
|
refs/heads/master
| 2023-04-11T23:01:35.309056
| 2021-05-18T14:31:38
| 2021-05-18T14:31:38
| 296,353,680
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 1,268
|
r
|
0.저장 및 불러오기.R
|
# 현재 위치를, 기본 directory 로 지정
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
# txt 파일 불러오기[read.table]
txt= read.table('./dataset_1.txt',
header = T, # 첫줄에 col 의 이름이 적혀있는 경우가 많다.
sep=',' # 기본적으로 txt 파일은 띄어쓰기가 구분자이나 이 예시처럼 , 일수도 있음
)
# csv 파일 불러오기[read.csv]
txt = read.csv('./example.csv')
txt
# 데이터 파일 저장하기
write.csv(txt,
file = './example.csv',
row.names = FALSE # default 로 첫 col 에 row 의 index 가 추가로 들어간다. 이를 방지
)
write.table(txt,
file= './example.txt') # 다행히 txt 에는 그런거 없다.
# 분석결과 저장하기 (cat 활용)
x <- c(1:20)
cat('mean :' , mean(x),'\n',
'var :' , var(x),
file = './analysis.txt')
# 분석결과 저장하기 (capture.output 활용)
# cat 의 경우는 사실 list 의 형태로 나타나는 분석결과를 나타낼 수 없다.
# 그래서 아래와 같이 capture.output 을 활용하면 나타낼 수 있다.
data(trees)
lm <- lm(Volume~Height,data=trees)
capture.output(summary(lm),
file = './analysis2.txt')
|
359db21fc9d17d883a908df9deedff58536e758b
|
c3f52e1fd0c9e2abe121805dfd47f1daf43701ad
|
/data-raw/edit_data_unfiltered.R
|
49ec72940d8f0e655aa5e8506f323ab007150514
|
[] |
no_license
|
lvaudor/braidHymo
|
97928c191de87c1d1539d6638203ea6864c2e2cf
|
2a705692fe7152c89d39a150c7498ef9f32cec6b
|
refs/heads/master
| 2023-04-09T10:51:08.487967
| 2022-02-07T10:24:40
| 2022-02-07T10:24:40
| 440,566,310
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
edit_data_unfiltered.R
|
dat1=read.csv("data-raw/Drac_Chabottes_2018_unfiltered.txt", row.names=1,sep=";")
dat2=read.csv("data-raw/Durance_Brillanne_2017_unfiltered.txt", row.names=1, sep=";")
dat1 = dat1 %>%
filter(TYPO_VEGE != "mature" & NAME != "Riparian")
dat2 = dat2 %>%
filter(TYPO_VEGE != "mature" & NAME != "Riparian")
write.table(dat1,"data-raw/Drac_Chabottes_2018.txt", row.names=TRUE, sep=";",dec=".")
write.table(dat2,"data-raw/Durance_Brillanne_2017.txt", row.names=TRUE,sep=";", dec=".")
|
bbbba7c6a6da7241cd4f380862d94c22ac30a8cc
|
66e04f24259a07363ad8da7cd47872f75abbaea0
|
/Joining Data in SQL/Chapter 3-Set theory clauses/3.R
|
bc3e6a6241f63732c823148f5920b6c895d93ff7
|
[
"MIT"
] |
permissive
|
artileda/Datacamp-Data-Scientist-with-R-2019
|
19d64729a691880228f5a18994ad7b58d3e7b40e
|
a8b3f8f64cc5756add7ec5cae0e332101cb00bd9
|
refs/heads/master
| 2022-02-24T04:18:28.860980
| 2019-08-28T04:35:32
| 2019-08-28T04:35:32
| 325,043,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 689
|
r
|
3.R
|
# Union all
# As you saw, duplicates were removed from the previous two exercises by using UNION.
#
# To include duplicates, you can use UNION ALL.
#
# Instructions
# 100 XP
# Determine all combinations (include duplicates) of country code and year that exist in either the economies or the populations tables. Order by code then year.
# The result of the query should only have two columns/fields. Think about how many records this query should result in.
# You'll use code very similar to this in your next exercise after the video. Make note of this code after completing it.
SELECT code, year
FROM economies
UNION ALL
SELECT country_code, year
FROM populations
ORDER BY code, year;
|
92dd6bbe97c898ce189ee1f575079367c73efec0
|
0a8924a03c6bfc019a5359f3bc4221526ecdbb3d
|
/tests/testthat.R
|
1ab7caae9493a7bff3209746edeabc8ea705ddae
|
[] |
no_license
|
Ironholds/ores
|
3a51d7aa8d856cff814b2225f66e3322cee2f13c
|
e2b08d7c2211d28d359773aae938ef32c47c9201
|
refs/heads/master
| 2021-01-10T17:54:58.388837
| 2020-08-26T01:18:06
| 2020-08-26T01:18:06
| 49,224,731
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
testthat.R
|
library(testthat)
library(ores)
test_check("ores")
|
d7bff4eb4891a83bee673f9148fd340c8c035172
|
4dc8fca8a72ac40993b72a6b733aae248296b87c
|
/R/save_as_csv.R
|
a4bb8b415cc8d636af1a24ab1ee658bec0a751d6
|
[] |
no_license
|
alexchouraki/assignment
|
6152b373d7b82824416ebb7f6fe79a6de6dfc112
|
864cb8ccd3e309b08303efec137b37f1eae65d6e
|
refs/heads/master
| 2021-07-12T10:07:15.158136
| 2017-10-16T13:51:27
| 2017-10-16T13:51:27
| 107,115,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
save_as_csv.R
|
#' Save as CSV
#' This function saves the dataframe as a csv
#' @param dframe the dataframe you want to save as .csv
#' @param filename the name and path you want to for your new .csv file
#' @param row.names false
#' @param ... other parameters
#' @return the file
#' @import dplyr
#' @import assertthat
#' @import readxl
#' @import utils
#' @example
#' save_as_csv (titanic, titanic.csv, row.names = FALSE, ...)
save_as_csv <- function(dframe, filename, row.names = FALSE, ...){
assert_that(is.data.frame(dframe))
assert_that(not_empty(dframe))
assert_that(is.dir(dirname(filename)))
assert_that(is.writeable(dirname(filename)))
assert_that(has_extension(filename,"csv"))
write.csv2(x = dframe, file = filename, row.names = row.names, ...)
invisible(normalizePath(filename))
}
|
703d7d23a8abe399bcc2548d98679b7b249595c2
|
8ef72707375ccc9b64bf0bf29f94823eed96dd59
|
/routeMapper.R
|
69d7cf97e96c1b9189ce7061f49f8f7051fd6ed7
|
[] |
no_license
|
cferenci/route-mapper
|
76700f0e5ae021bc92606c85118a5328a49b7ab8
|
2630def6f8510573aa5fe3f7214201692ed699bc
|
refs/heads/main
| 2023-02-18T16:58:54.822121
| 2020-07-22T02:53:08
| 2020-07-22T02:53:08
| 329,835,374
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,645
|
r
|
routeMapper.R
|
library(leaflet)
library(dplyr)
library(shiny)
library(shinydashboard)
library(googleway)
#library(fontawesome)
#### server
server <- function(input, output, session) {
##Google API Key
api_key <- "AIzaSyBo4AHjlO0qlcbDMX0i_WyAgxzQlAWlmDM"
##render Google map
output$map <- renderGoogle_map({
#set map to santa monica, eventually want geolocate
latlongSM <- c(34.0195, -118.4912)
google_map(key = api_key, event_return_type = "list", location = latlongSM, zoom = 15)
})
lat_long <- reactiveValues(originLocationDF = data.frame(lat = c(), long = c()))
observeEvent(
input$map_map_click, {
#create origin lat/lon
originLat <- input$map_map_click$lat
originLon <- input$map_map_click$lon
#print(input$map_map_click)
#update startingAddress input value
lat_long$originLocationDFnew <- data.frame(lat = originLat, lon = originLon)
lat_long$originLocationDF <- bind_rows(lat_long$originLocationDF,
lat_long$originLocationDFnew)
lat_long$originLocationDFhead <- head(lat_long$originLocationDF, 2)
updateTextInput(session, "startingAddress", value = paste(round(lat_long$originLocationDFhead[1, 1], 2),
round(lat_long$originLocationDFhead[1,2], 2), sep = ", "))
if(nrow(lat_long$originLocationDF) != 1){
updateTextInput(session, "endingAddress", value = paste(round(lat_long$originLocationDFhead[2, 1], 2),
round(lat_long$originLocationDFhead[2,2], 2), sep = ", "))
}
#update google map view and add markers
if(nrow(lat_long$originLocationDF) <= 2 ){
google_map_update(map_id="map", data = lat_long$originLocationDFnew) %>%
add_markers(update_map_view = FALSE)
}
}
#google_directions()
)
#clear markers
observeEvent(input$clearMarkers,{
google_map_update(map_id="map") %>%
clear_markers()
updateTextInput(session, "startingAddress",
value = paste("Origin Location..."))
updateTextInput(session, "endingAddress",
value = paste("Destination..."))
session$reload()
}
)
output$example <- renderTable(lat_long$originLocationDFhead)
}
#### user interface
ui <- tags$html(
#html head
tags$head(
tags$meta(charset="utf-8"),
tags$meta(name="viewport", content="width-device-width, initial-scale=1, shrink-to-fit=no"),
tags$link(rel = "stylesheet", type = "text/css", href = "bootstrap.css")
),#end head
#BEGIN CONTENT
#start body
tags$body(
#Header Navigation
tags$nav(class = "navbar navbar-expand-lg sticky-top navbar-dark bg-dark",
tags$a(class = "navbar-brand", href="#", "RouteR"),
tags$div(class="collapse navbar-collapse justify-content-end",
tags$ul(class="navbar-nav",
tags$li(class="nav-item",
tags$a(class="nav-link", href="#", "Trends")),
tags$li(class="nav-item",
tags$a(class="nav-link", href="#", "My Profile")),
tags$li(class="nav-item",
tags$a(class="btn btn-success", href="#", "Create New Route"))
)#end ul
)#end div
),#end Nav
tags$div(class="container-fluid",
tags$div(class = "row",
tags$div(class = "col-4 pt-3",
h3("Create Route"),
textInput(inputId = "startingAddress", label = "Origin", value = "Origin Location..."),
textInput(inputId = "endingAddress", label = "Destination", "Destination..."),
#radioButtons(inputId = "routeType", label = "Select Route Type", choices = list("Most greenspace" = 1, "Least Polluted Route" = 2, "Most Efficient Route" = 3), selected = 1),
actionButton("centerMaponAddress", "Create Route", class = "btn-primary"),
actionLink("clearMarkers", "Clear Markers")
), #endcolumn
tags$div(class="col-8",
google_mapOutput(outputId = "map")
)#endcolumn
)#endRow
),#endTabPanel
)
)##end body
shinyApp(ui = ui, server = server)
|
2b497c81f3c8a0760d967ded3dda26f686f76cd0
|
77a6af0d227e7493ab737d64023b4e4dd546de1c
|
/man/is_weekend.Rd
|
e4b50986dda865735008dd57bde29ab6188b60ff
|
[
"MIT"
] |
permissive
|
ellisvalentiner/lubridateExtras
|
a1ced92301dbf7e9fa5f56c3f3322945f8eb7bb0
|
fad48677b3d839b044173ac3a1a555555d08dde0
|
refs/heads/main
| 2022-05-15T23:10:10.989249
| 2022-03-23T15:52:27
| 2022-03-23T15:52:27
| 101,084,937
| 19
| 6
|
MIT
| 2022-01-24T00:27:03
| 2017-08-22T16:37:17
|
R
|
UTF-8
|
R
| false
| true
| 480
|
rd
|
is_weekend.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/instants.R
\name{is_weekend}
\alias{is_weekend}
\title{Is x a weekend?}
\usage{
is_weekend(x)
}
\arguments{
\item{x}{a POSIXct, POSIXlt, Date, chron, yearmon, yearqtr, zoo, zooreg,
timeDate, xts, its, ti, jul, timeSeries, or fts object.}
}
\value{
boolean indicating whether x is a weekend
}
\description{
Is x a weekend?
}
\examples{
is_weekend("2017-08-29") # FALSE
is_weekend("2017-09-02") # TRUE
}
|
08e348441d7c48053ac5a09594f20a6f4737b80d
|
c97122b68b6dc719b7a0be0ef3546230b2aa1d04
|
/Codigo/ShinyDashboardPF.R
|
aa628d313ff291754a9b9517aae6103556a5f8eb
|
[] |
no_license
|
LeonardoNeyra/Mainframes1_FinalWork
|
e02a6e5e19dd4243178a92e98a50cba3bada823f
|
0aa676a9aead14718b7943edb3b2b171ed490967
|
refs/heads/master
| 2020-04-03T12:07:11.893470
| 2018-10-29T16:24:51
| 2018-10-29T16:24:51
| 155,241,750
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,239
|
r
|
ShinyDashboardPF.R
|
##-libraries-##
library(shinydashboard)
library("plotly")
library("shiny")
library("dplyr")
library("ggplot2")
library("readxl")
library("sqldf")
library("data.table")
choiceDPLYR=c("DPLYR1","DPLYR2","DPLYR3","DPLYR4","DPLYR5","DPLYR6","DPLYR7","DPLYR8","DPLYR9","DPLYR10",
"DPLYR11","DPLYR12","DPLYR13","DPLYR14","DPLYR15","DPLYR16","DPLYR17","DPLYR18","DPLYR19","DPLYR20")
choiceSQLDF=c("SQLDF1","SQLDF2","SQLDF3","SQLDF4","SQLDF5","SQLDF6","SQLDF7","SQLDF8","SQLDF9","SQLDF10",
"SQLDF11","SQLDF12","SQLDF13","SQLDF14","SQLDF15","SQLDF16","SQLDF17","SQLDF18","SQLDF19","SQLDF20")
choiceDT=c("DT1","DT2","DT3","DT4","DT5","DT6","DT7","DT8","DT9","DT10","DT11",
"DT12","DT13","DT14","DT15","DT16","DT17","DT18","DT19","DT20")
grafGG=c("GG1","GG2","GG3","GG4","GG5","GG6","GG7","GG8","GG9","GG10","GG11","GG12","GG13",
"GG14","GG15","GG16","GG17","GG18","GG19","GG20")
grafPL=c("plotly1","plotly2","plotly3","plotly4","plotly5","plotly6","plotly7","plotly8","plotly9","plotly10","plotly11",
"plotly12","plotly13","plotly14","plotly15","plotly16","plotly17","plotly18","plotly19","plotly20")
cur<-c("Introduccion a la Ingenieria de Sistemas","Introduccion a la Programacion","Lenguaje","Matematica Basica",
"Programacion Orientada a Objetos","Automatas y Compiladores","Calculo","Matematica Discreta",
"Fisica 1","Matematica III","Fundamentos de Sistemas de Informacion","Algebra Lineal",
"Fisica aplicada a la computacion","Organizacion y gestion de empresas","Estructura de datos","Arquitectura de computadoras",
"Estadistica y Probabilidades","Base de datos","Interaccion Hombre Computador","Ingenieria de Software 1",
"Desarrollo de Aplicaciones","Ingenieria de Software 2","Modelado de Proceso de Negocios 1","Sistema Gestor de Base de Datos","Sistemas Operativos",
"Planeamiento Estrategico de TI","Investigacion de Operaciones","Sistemas de Informacion","Modelado de Proceso de Negocios 2","Arquitectura de Redes de Computadoras",
"Administracion de Redes y Seguridad de la Informacion","Sistema de Soporte de Decision","Proyecto de Investigacion","Administacion de Proyectos de Sistemas de Informacion","Gestion de TI")
##-ui-##
ui <- dashboardPage(skin = "blue",
dashboardHeader(title="Proyecto Dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Recoleccion", tabName = "dashboard", icon = icon("fas fa-upload",lib="font-awesome")),
menuItem("Pre-Procesamiento", tabName = "preprocesamiento", icon = icon("cog",lib="glyphicon")),
menuItem("Exploracion", tabName = "exploracion", icon = icon("fas fa-search",lib="font-awesome")),
menuItem("Graficos", tabName = "graficos", icon = icon("stats",lib="glyphicon")),
menuItem("Modelo", tabName = "modelo", icon = icon("fas fa-trophy",lib="font-awesome"))
)
),
dashboardBody(
tabItems(
#First tab content
tabItem(tabName = "dashboard",
fluidRow(
box(title="Carga de Datos",status="success",solidHeader=TRUE,
fileInput("idArchivo","Seleccione un archivo XLSX",accept=c(".xlsx")),
numericInput("idSheet",label="Ingrese el número de hoja",value=1,min=1,max=4)
),
box(title="Descripcion del Proyecto",status="info",solidHeader=FALSE,
"Nuestro proyecto analiza el Rendimiento Academico de los alumnos de la escuela de ICSI
de la universidad UPAO utilizando Data Mining. El objetivo principal es predecir notas mediante
un modelo de regresion lineal",br(),"Integrantes:",br(),"Neyra Ocana, Leonardo",
br(),"Ramos Saravia, Sandro",tags$hr(),"Universidad Privada Antenor Orrego - UPAO"
)
),
fluidRow(
box(title = "Dataset Seleccionado",status="warning",width=12,solidHeader=TRUE,
tableOutput(outputId="plotRecoleccion")
)
)
),
#Second tab content
tabItem(tabName = "preprocesamiento",
fluidRow(
tabBox(title="Etapas",id="tabBox1",width=12,
tabPanel("RawData","Un dataset de un curso en crudo",br(),"Usamos el paquete readxl",tags$hr(),tableOutput("view")),
tabPanel("AddCol",
"Transformacion",br(),"Agragamos columnas indicando el nombre del curso y el ciclo",
br(),"Usamos DPLYR",
tags$hr(),tableOutput("addCol")),
tabPanel("CleanHead",
"Imputacion",br(),"Eliminamos obs. innecesarias",br(),"Usamos DPLYR",tags$hr(),
"Transformacion",br(),"Agregamos nombre a las cabeceras",br(),"Usamos el paquete base",
tags$hr(),tableOutput("cleanHead")),
tabPanel("Normalization",
"Se normalizo las columnas por medio de la siguiente forma",br(),
verbatimTextOutput(outputId="TextNorm"),br(),
tableOutput("NormView")),
tabPanel("FinalDataset",
"Transformacion",br(),"Concatenamos todos los datasets de cursos en un solo dataset llamado 'ICSI'",
br(),"Convertimos algunas columnas de Character a Numeric",br(),"Usamos Janitor",tags$hr(),
"Imputacion",br(),"Eliminamos a los alumnos inhabilitados, o sea las obs. que incluyen 'IN'",
br(),"Usamos DPLYR",
tags$hr(),tableOutput("finalDataset")))
)
),
#Third tab content
tabItem(tabName = "exploracion",
fluidRow(
tabBox(title="Consultas de Exploracion",id="tabBox2",width=12,
tabPanel("Consultas con DPLYR",
selectInput("consultasDPLYR","Elige una consulta DPLYR",choices=choiceDPLYR),
textOutput("textoConsultasDPLYR"),
tags$hr(),
tableOutput("consultasViewDPLYR")),
tabPanel("Consultas con SQLDF",
selectInput("consultasSQLDF","Elige una consulta SQLDF",choices=choiceSQLDF),
textOutput("textoConsultasSQLDF"),
tags$hr(),
tableOutput("consultasViewSQLDF")),
tabPanel("Consultas con data.table",
selectInput("consultasDT","Elige una consulta SQLDF",choices=choiceDT),
textOutput("textoConsultasDT"),
tags$hr(),
tableOutput("consultasViewDT"))
)
)
),
#Fourth tab content
tabItem(tabName = "graficos",
fluidRow(
tabBox(title="Graficos de Exploracion",width=12,
tabPanel("Graficos con GGPLOT",
selectInput("graficosGG","Elige un grafico ggplot",choices=grafGG),
textOutput("textoGraficosGG"),
tags$hr(),
plotOutput("graficosViewGG")),
tabPanel("Graficos con PLOTLY",
selectInput("graficosPL","Elige un grafico plotly",choices=grafPL),
textOutput("textoGraficosPL"),
tags$hr(),
plotlyOutput("graficosViewPL"))
)
)
),
#Fifth tab content
tabItem(tabName = "modelo",
fluidRow(
box(title="Modelo de Regresion Lineal",status="info",solidHeader=TRUE,
selectInput("curso1","Elige el primer curso",choices=cur),
selectInput("curso2","Elige el segundo curso",choices=cur),
numericInput("notaX",label="Ingrese nota a predecir (X value)",value=14,min=1,max=20)
),
box(title="Datos del Modelo",status="info",solidHeader=TRUE,
h5("Coeficiente de Pearson"),
textOutput("textoPearson"),
tags$hr(),
h5("Nuevo valor de nota (Y value)"),
textOutput("textoNewY"))
),
fluidRow(
box(title="Grafico del Modelo de Regresion Lineal",status="warning",width=12,solidHeader=TRUE,
plotOutput("modeloView")
)
)
)
)
)
)
##-server-##
server <- function(input, output) {
#Logica del panel "Recoleccion"
output$plotRecoleccion<-renderTable({
req(input$idArchivo)
tryCatch({
inFile<-input$idArchivo
dat<-read_xlsx(inFile$datapath,sheet=input$idSheet)
}, error=function(e){stop(safeError(e))})
return(dat)
})
#Logica del panel de "Pre-Procesamiento"
output$view<-renderTable({raw1IntroIngSist})
raw1IntroIngSistM<-mutate(raw1IntroIngSist,Curso="Introduccion a la Ingenieria de Sistemas",Ciclo=1)
output$addCol<-renderTable({raw1IntroIngSistM})
raw1IntroIngSistMP<-ProcesarCurso(raw1IntroIngSistM)
output$cleanHead<-renderTable({raw1IntroIngSistMP})
output$TextNorm<-renderText({"normal<-function(x){(x-min(x))/(max(x)-min(x))}
dtNormal<-data.frame(ICSI%>%select(Final))
dtNormal<-normal(dtNormal$Final)
View(dtNormal)"})
output$NormView<-renderTable({head(dtNormal)})
output$finalDataset<-renderTable({ICSI})
#Logica del panel de "Exploracion de Datos"
output$consultasViewDPLYR<-renderTable({
if(input$consultasDPLYR=="DPLYR1"){DPLYR1}
else if(input$consultasDPLYR=="DPLYR2"){DPLYR2}
else if(input$consultasDPLYR=="DPLYR3"){DPLYR3}
else if(input$consultasDPLYR=="DPLYR4"){DPLYR4}
else if(input$consultasDPLYR=="DPLYR5"){DPLYR5}
else if(input$consultasDPLYR=="DPLYR6"){DPLYR6}
else if(input$consultasDPLYR=="DPLYR7"){DPLYR7}
else if(input$consultasDPLYR=="DPLYR8"){DPLYR8}
else if(input$consultasDPLYR=="DPLYR9"){DPLYR9}
else if(input$consultasDPLYR=="DPLYR10"){DPLYR10}
else if(input$consultasDPLYR=="DPLYR11"){DPLYR11}
else if(input$consultasDPLYR=="DPLYR12"){DPLYR12}
else if(input$consultasDPLYR=="DPLYR13"){DPLYR13}
else if(input$consultasDPLYR=="DPLYR14"){DPLYR14}
else if(input$consultasDPLYR=="DPLYR15"){DPLYR15}
else if(input$consultasDPLYR=="DPLYR16"){DPLYR16}
else if(input$consultasDPLYR=="DPLYR17"){DPLYR17}
else if(input$consultasDPLYR=="DPLYR18"){DPLYR18}
else if(input$consultasDPLYR=="DPLYR19"){DPLYR19}
else if(input$consultasDPLYR=="DPLYR20"){DPLYR20}
})
output$consultasViewSQLDF<-renderTable({
if(input$consultasSQLDF=="SQLDF1"){SQL1}
else if(input$consultasSQLDF=="SQLDF2"){SQL2}
else if(input$consultasSQLDF=="SQLDF3"){SQL3}
else if(input$consultasSQLDF=="SQLDF4"){SQL4}
else if(input$consultasSQLDF=="SQLDF5"){SQL5}
else if(input$consultasSQLDF=="SQLDF6"){SQL6}
else if(input$consultasSQLDF=="SQLDF7"){SQL7}
else if(input$consultasSQLDF=="SQLDF8"){SQL8}
else if(input$consultasSQLDF=="SQLDF9"){SQL9}
else if(input$consultasSQLDF=="SQLDF10"){SQL10}
else if(input$consultasSQLDF=="SQLDF11"){SQL11}
else if(input$consultasSQLDF=="SQLDF12"){SQL12}
else if(input$consultasSQLDF=="SQLDF13"){SQL13}
else if(input$consultasSQLDF=="SQLDF14"){SQL14}
else if(input$consultasSQLDF=="SQLDF15"){SQL15}
else if(input$consultasSQLDF=="SQLDF16"){SQL16}
else if(input$consultasSQLDF=="SQLDF17"){SQL17}
else if(input$consultasSQLDF=="SQLDF18"){SQL18}
else if(input$consultasSQLDF=="SQLDF19"){SQL19}
else if(input$consultasSQLDF=="SQLDF20"){SQL20}
})
output$consultasViewDT<-renderTable({
if(input$consultasDT=="DT1"){DT1}
else if(input$consultasDT=="DT2"){DT2}
else if(input$consultasDT=="DT3"){DT3}
else if(input$consultasDT=="DT4"){DT4}
else if(input$consultasDT=="DT5"){DT5}
else if(input$consultasDT=="DT6"){DT6}
else if(input$consultasDT=="DT7"){DT7}
else if(input$consultasDT=="DT8"){DT8}
else if(input$consultasDT=="DT9"){DT9}
else if(input$consultasDT=="DT10"){DT10}
else if(input$consultasDT=="DT11"){DT11}
else if(input$consultasDT=="DT12"){DT12}
else if(input$consultasDT=="DT13"){DT13}
else if(input$consultasDT=="DT14"){DT14}
else if(input$consultasDT=="DT15"){DT15}
else if(input$consultasDT=="DT16"){DT16}
else if(input$consultasDT=="DT17"){DT17}
else if(input$consultasDT=="DT18"){DT18}
else if(input$consultasDT=="DT19"){DT19}
else if(input$consultasDT=="DT20"){DT20}
})
output$textoConsultasDPLYR<-renderText({
if(input$consultasDPLYR=="DPLYR1"){"inhabilitados y habilitados en mateBasica"}
else if(input$consultasDPLYR=="DPLYR2"){"aprobados y desaprobados en 1°ciclo"}
else if(input$consultasDPLYR=="DPLYR3"){"aprobados de IntroProg y POO"}
else if(input$consultasDPLYR=="DPLYR4"){"Nota media del curso de calculo"}
else if(input$consultasDPLYR=="DPLYR5"){"Varianza en el parcial de Lenguaje"}
else if(input$consultasDPLYR=="DPLYR6"){"Promedio de notas hasta el parcial de mate3"}
else if(input$consultasDPLYR=="DPLYR7"){"Promedio de nota Final curso de mate3"}
else if(input$consultasDPLYR=="DPLYR8"){"Comparación de notas de la 1 y 2 mitad de mate3"}
else if(input$consultasDPLYR=="DPLYR9"){"Notas de alumnos del curso de fisica1"}
else if(input$consultasDPLYR=="DPLYR10"){"alumnos que están por encima de la media del curso de EstruDatos"}
else if(input$consultasDPLYR=="DPLYR11"){"Top 3 de alumnos del 3ciclo"}
else if(input$consultasDPLYR=="DPLYR12"){"Desviacion Estandar del examen final de FisCom"}
else if(input$consultasDPLYR=="DPLYR13"){"Alumno con mayor promedio de 1ciclo"}
else if(input$consultasDPLYR=="DPLYR14"){"top10 de alumnos IntroProg"}
else if(input$consultasDPLYR=="DPLYR15"){"top10 de alumnos POO"}
else if(input$consultasDPLYR=="DPLYR16"){"Join entre IntroProg y POO"}
else if(input$consultasDPLYR=="DPLYR17"){"Alumnos top5 alumnos Sisope"}
else if(input$consultasDPLYR=="DPLYR18"){"Alumnos top5 alumnos ArquiComp"}
else if(input$consultasDPLYR=="DPLYR19"){"Join entre Sisope y ArquiComp"}
else if(input$consultasDPLYR=="DPLYR20"){"Cantidad entre aprobados de MPN1 y MPN2"}
})
output$textoConsultasSQLDF<-renderText({
if(input$consultasSQLDF=="SQLDF1"){"aprobados y desaprobados en IntroIngSI"}
else if(input$consultasSQLDF=="SQLDF2"){"aprobados en el curso de IntroProg"}
else if(input$consultasSQLDF=="SQLDF3"){"Alumnos que no están invictos hasta 3 ciclo"}
else if(input$consultasSQLDF=="SQLDF4"){"Alumnos desaprobados en IntroProg"}
else if(input$consultasSQLDF=="SQLDF5"){"Alumnos desaprobados en MateBasica"}
else if(input$consultasSQLDF=="SQLDF6"){"Join alumnos desaprobados en POO e IntroProg"}
else if(input$consultasSQLDF=="SQLDF7"){"Relacion de alumnos de 1ciclo y 7ciclo"}
else if(input$consultasSQLDF=="SQLDF8"){"Cantidad de registros de alumnos desaprobados por ciclo"}
else if(input$consultasSQLDF=="SQLDF9"){"Cantidad de registros de alumnos por ciclo"}
else if(input$consultasSQLDF=="SQLDF10"){"Tasa de desaprobació de ICSI según ciclo en el semestre 201510"}
else if(input$consultasSQLDF=="SQLDF11"){"Promedio de Componente del curso mateBasica"}
else if(input$consultasSQLDF=="SQLDF12"){"Promedio de Componente de 1ciclo"}
else if(input$consultasSQLDF=="SQLDF13"){"Promedio de Final del curso de IHM"}
else if(input$consultasSQLDF=="SQLDF14"){"Top5 de Alumnos de 2ciclo con menos promedio promociona"}
else if(input$consultasSQLDF=="SQLDF15"){"Top5 de alumnos de 3ciclo con más promedio promocional"}
else if(input$consultasSQLDF=="SQLDF16"){"Desviacion estandar del 1componente del 4ciclo"}
else if(input$consultasSQLDF=="SQLDF17"){"Cantidad de alumnos que dieron susti en 1 ciclo"}
else if(input$consultasSQLDF=="SQLDF18"){"Cantidad de alumnos que dieron susti entre 1 ciclo y 8 ciclo"}
else if(input$consultasSQLDF=="SQLDF19"){"Promedio de nota promocional de alumnos que estan por segunda en MPN"}
else if(input$consultasSQLDF=="SQLDF20"){"Promedio de nota promocional de alumnos que estan por primera en MPN2"}
})
output$textoConsultasDT<-renderText({
if(input$consultasDT=="DT1"){"Cantidad desaprobados en 4ciclo"}
else if(input$consultasDT=="DT2"){"Cantidad inhabilitado en 1ciclo"}
else if(input$consultasDT=="DT3"){"Cantidad aprobados en 6ciclo"}
else if(input$consultasDT=="DT4"){"Promedio de notas en 5ciclo"}
else if(input$consultasDT=="DT5"){"Cantidad de alumnos por segunda en el curso de MPN2"}
else if(input$consultasDT=="DT6"){"Cantidad de alumnos por segunda en el 1ciclo"}
else if(input$consultasDT=="DT7"){"Numero de alumnos con <=10 del curso ProyectoInvestigacion"}
else if(input$consultasDT=="DT8"){"Numero de alumnos con 10<x=<15 del curso ProyectoInvestigacion"}
else if(input$consultasDT=="DT9"){"Numero de alumnos con >15 del curso ProyectoInvestigacion"}
else if(input$consultasDT=="DT10"){"Relacion entre consultas 7,8 y 9"}
else if(input$consultasDT=="DT11"){"Numero de inhabilitados de cada curso en 2 ciclo"}
else if(input$consultasDT=="DT12"){"Promedio de componentes del 1ciclo"}
else if(input$consultasDT=="DT13"){"Promedio del Parcial del curso de ProyectoInvestigacion"}
else if(input$consultasDT=="DT14"){"Cantidad de alumnos que no rindieron el examen final de ProyectInvestigacion"}
else if(input$consultasDT=="DT15"){"¿Que promedio (final y parcial) del 8ciclo es mayor?"}
else if(input$consultasDT=="DT16"){"Varianza de la nota promocional de 5ciclo"}
else if(input$consultasDT=="DT17"){"Alumnos que desaprobaron el parcial pero aprobaron el final en IHM"}
else if(input$consultasDT=="DT18"){"Alumnos que aprobaron el parcial pero desaprobaron el final en IHM"}
else if(input$consultasDT=="DT19"){"Promedio de nota final de los cursos de la linea de programacion"}
else if(input$consultasDT=="DT20"){"Varianza de la nota promocional de 5ciclo"}
})
#Logica del panel de "Graficos de Exploracion"
output$graficosViewGG<-renderPlot({
if(input$graficosGG=="GG1"){GG1}
else if(input$graficosGG=="GG1"){GG1}
else if(input$graficosGG=="GG2"){GG2}
else if(input$graficosGG=="GG3"){GG3}
else if(input$graficosGG=="GG4"){GG4}
else if(input$graficosGG=="GG5"){GG5}
else if(input$graficosGG=="GG6"){GG6}
else if(input$graficosGG=="GG7"){GG7}
else if(input$graficosGG=="GG8"){GG8}
else if(input$graficosGG=="GG9"){GG9}
else if(input$graficosGG=="GG10"){GG10}
else if(input$graficosGG=="GG11"){GG11}
else if(input$graficosGG=="GG12"){GG12}
else if(input$graficosGG=="GG13"){GG13}
else if(input$graficosGG=="GG14"){GG14}
else if(input$graficosGG=="GG15"){GG15}
else if(input$graficosGG=="GG16"){GG16}
else if(input$graficosGG=="GG17"){GG17}
else if(input$graficosGG=="GG19"){GG19}
else if(input$graficosGG=="GG20"){GG20}
})
output$graficosViewPL<-renderPlotly({
if(input$graficosPL=="plotly1"){plotly1}
else if(input$graficosPL=="plotly2"){plotly2}
else if(input$graficosPL=="plotly3"){plotly3}
else if(input$graficosPL=="plotly4"){plotly4}
else if(input$graficosPL=="plotly5"){plotly5}
else if(input$graficosPL=="plotly6"){plotly6}
else if(input$graficosPL=="plotly7"){plotly7}
else if(input$graficosPL=="plotly8"){plotly8}
else if(input$graficosPL=="plotly9"){plotly9}
else if(input$graficosPL=="plotly10"){plotly10}
else if(input$graficosPL=="plotly11"){plotly11}
else if(input$graficosPL=="plotly12"){plotly12}
else if(input$graficosPL=="plotly13"){plotly13}
else if(input$graficosPL=="plotly14"){plotly14}
else if(input$graficosPL=="plotly15"){plotly15}
else if(input$graficosPL=="plotly16"){plotly16}
else if(input$graficosPL=="plotly17"){plotly17}
else if(input$graficosPL=="plotly18"){plotly18}
else if(input$graficosPL=="plotly19"){plotly19}
else if(input$graficosPL=="plotly20"){plotly20}
})
output$textoGraficosGG<-renderText({
if(input$graficosGG=="GG1"){GG1}
else if(input$graficosGG=="GG1"){"Join alumnos desaprobados en POO e IntroProg"}
else if(input$graficosGG=="GG2"){"Top5 de Alumnos de 2ciclo con menos promedio promocional"}
else if(input$graficosGG=="GG3"){"Cantidad de inhabilitados y habilitados del curso de mateBasica"}
else if(input$graficosGG=="GG4"){"Cantidad de aprobados y desaprobados del 1°ciclo"}
else if(input$graficosGG=="GG5"){"Cantidad de aprobados de IntroProg y POO"}
else if(input$graficosGG=="GG6"){"Comparación de notas de la 1 mitad y 2 mitad del curso de mate3"}
else if(input$graficosGG=="GG7"){"Top 3 de alumnos del 3ciclo"}
else if(input$graficosGG=="GG8"){"top10 de alumnos IntroProg"}
else if(input$graficosGG=="GG9"){"top10 de alumnos POO"}
else if(input$graficosGG=="GG10"){"Alumnos top5 alumnos Sisope"}
else if(input$graficosGG=="GG19"){"Desaprobados en AdmiRedes y ArquiRedes"}
else if(input$graficosGG=="GG20"){"Aprobados en MateDiscreta y Mate3"}
})
output$textoGraficosPL<-renderText({
if(input$graficosPL=="plotly1"){"Notas de EP y Final de alumnos de Fisica1"}
else if(input$graficosPL=="plotly2"){"Notas del Final de alumnos del curso de fisica1"}
else if(input$graficosPL=="plotly3"){"Notas del EP de alumnos del curso de fisica1"}
else if(input$graficosPL=="plotly4"){"Join entre IntroProg y POO"}
else if(input$graficosPL=="plotly5"){"Join entre Sisope y ArquiComp"}
else if(input$graficosPL=="plotly6"){"Notas de alumnos de IntroIngSI"}
else if(input$graficosPL=="plotly7"){"Notas de alumnos de MPN2"}
else if(input$graficosPL=="plotly8"){"Notas de Peti y geti"}
else if(input$graficosPL=="plotly9"){"Notas de los componente 1 y 4 de Sistema de Soporte de Decision"}
else if(input$graficosPL=="plotly10"){"Notas de los componente 2 y 3 de Sistema de Soporte de Decision"}
else if(input$graficosPL=="plotly19"){"Notas del C3 de Proyecto de Investigación"}
else if(input$graficosPL=="plotly20"){"Notas del C4 de Proyecto de Investigación"}
})
#Logica del panel de "Modelos - Regresion Lineal"
output$textoPearson<-renderText({
M0mf<-MAlum(ICSI,input$curso1,input$curso2,input$notaX)
M0mf[[3]]
})
output$textoNewY<-renderText({
M0mf<-MAlum(ICSI,input$curso1,input$curso2,input$notaX)
M0mf[[4]]
})
output$modeloView<-renderPlot({
M0mf<-MAlum(ICSI,input$curso1,input$curso2,input$notaX)
ggplot()+
geom_point(data=M0mf[[5]],aes(x=x,y=y),color="blue")+
geom_line(data=M0mf[[6]],aes(x=x,y=y),color="red")
})
}
##-App-##
shinyApp(ui, server)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.