content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2
values | repo_name large_stringlengths 5 125 | language large_stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.46M | extension large_stringclasses 75
values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cert.R
\name{certinfo}
\alias{certinfo}
\alias{certs}
\alias{verify_cert}
\title{Certificates}
\usage{
certinfo(cert)
verify_cert(cert, root = system.file("cacert.pem", package = "openssl"))
}
\arguments{
\item{cert}{a certificate}
... | /man/certs.Rd | no_license | rOpenSec/openssl | R | false | false | 415 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cert.R
\name{certinfo}
\alias{certinfo}
\alias{certs}
\alias{verify_cert}
\title{Certificates}
\usage{
certinfo(cert)
verify_cert(cert, root = system.file("cacert.pem", package = "openssl"))
}
\arguments{
\item{cert}{a certificate}
... |
cvPoints.nc = function(k, mini, maxi, maxj)
{
k = (k+2) %% 5
result = list()
n = 1
for(i in mini:maxi)
for(j in 1:maxj)
if((k + i + 2*j) %% 5 == 0)
{
result[[n]] = c(i,j)
n = n + 1
}
return(result)
}
cvPoints = compiler::cmpfun(cvPoints.nc)
partialSSE.n... | /R/cv.R | no_license | cran/smoothAPC | R | false | false | 1,786 | r | cvPoints.nc = function(k, mini, maxi, maxj)
{
k = (k+2) %% 5
result = list()
n = 1
for(i in mini:maxi)
for(j in 1:maxj)
if((k + i + 2*j) %% 5 == 0)
{
result[[n]] = c(i,j)
n = n + 1
}
return(result)
}
cvPoints = compiler::cmpfun(cvPoints.nc)
partialSSE.n... |
install.packages('spatstat')
install.packages('spatstat.local')
install.packages('rgdal')
install.packages('sp')
library(spatstat)
library(spatstat.local)
# LongLatToUTM
source('functions/LongLatToUTM.R')
library(dplyr)
publico<-points_in_recife %>%
filter (grupo_nat_juridica == 'PUBLICO')
privado<-points_in_re... | /spatial_analysis.R | no_license | higuchip/workshop_UFPE | R | false | false | 2,194 | r | install.packages('spatstat')
install.packages('spatstat.local')
install.packages('rgdal')
install.packages('sp')
library(spatstat)
library(spatstat.local)
# LongLatToUTM
source('functions/LongLatToUTM.R')
library(dplyr)
publico<-points_in_recife %>%
filter (grupo_nat_juridica == 'PUBLICO')
privado<-points_in_re... |
#' RDN: Reliability Density Neighborhood for Applicability Domain characterization.
#'
#' The RDN package provides a straightforward way of computing a QSAR model's applicability domain (AD),
#' being currently only applicable for classification models.
#' This method scans the chemical space, starting from the loc... | /R/RDN.R | no_license | machLearnNA/RDN | R | false | false | 2,411 | r | #' RDN: Reliability Density Neighborhood for Applicability Domain characterization.
#'
#' The RDN package provides a straightforward way of computing a QSAR model's applicability domain (AD),
#' being currently only applicable for classification models.
#' This method scans the chemical space, starting from the loc... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paths.R
\name{path.find}
\alias{path.find}
\title{Find all path graphs originated from a given root.}
\usage{
path.find(index, map)
}
\arguments{
\item{index}{Index of a root node (a node whose index never appears in
\code{map[, 2]}).}
\item... | /man/path.find.Rd | no_license | cran/hsm | R | false | true | 885 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paths.R
\name{path.find}
\alias{path.find}
\title{Find all path graphs originated from a given root.}
\usage{
path.find(index, map)
}
\arguments{
\item{index}{Index of a root node (a node whose index never appears in
\code{map[, 2]}).}
\item... |
#' Tidying methods for ARIMA modeling of time series
#'
#' These methods tidy the coefficients of ARIMA models of univariate time
#' series.
#'
#' @param x An object of class "Arima"
#'
#' @details `augment` is not currently implemented, as it is not clear
#' whether ARIMA predictions can or should be merged with the o... | /R/arima_tidiers.R | no_license | talgalili/broom | R | false | false | 2,068 | r | #' Tidying methods for ARIMA modeling of time series
#'
#' These methods tidy the coefficients of ARIMA models of univariate time
#' series.
#'
#' @param x An object of class "Arima"
#'
#' @details `augment` is not currently implemented, as it is not clear
#' whether ARIMA predictions can or should be merged with the o... |
# Organization of the data ------------------------------------------------
require(tseries, quietly = TRUE)
ConsDiscr <- c("AAP", "AMZN", "DRI", "BBY", "CMCSA")
Energy <- c("APC", "ANDV", "APA", "BHGE", "COG")
Financial <- c("AMG", "AFL", "ALL", "AXP", "AIG")
ConsStaples <- c("MO", "ADM", "CPB", "CHD", "CLX")
Telec... | /HW3/DynamicPlots.R | no_license | eugeniobonifazi/Statistical-Methods-for-Data-Science-I | R | false | false | 5,515 | r |
# Organization of the data ------------------------------------------------
require(tseries, quietly = TRUE)
ConsDiscr <- c("AAP", "AMZN", "DRI", "BBY", "CMCSA")
Energy <- c("APC", "ANDV", "APA", "BHGE", "COG")
Financial <- c("AMG", "AFL", "ALL", "AXP", "AIG")
ConsStaples <- c("MO", "ADM", "CPB", "CHD", "CLX")
Telec... |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./breast_075.txt',append=TRUE)
print(... | /Model/EN/Lasso/breast/breast_075.R | no_license | esbgkannan/QSMART | R | false | false | 343 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./breast_075.txt',append=TRUE)
print(... |
library(tidyverse)
library(foreach)
library(brms)
library(glue)
library(fs)
source("00_functions.R")
# data ---
cd_strat_raw <- read_rds("data/output/by-cd_ACS_gender-age-education.Rds") %>%
transform_vars() %>%
filter(year == 2017)
# model ---
outcomes <- c("ahca", "budg", "immr", "visa", "tcja", "sanc", "turn... | /11_predict-regs.R | no_license | kuriwaki/MRP-target | R | false | false | 1,757 | r | library(tidyverse)
library(foreach)
library(brms)
library(glue)
library(fs)
source("00_functions.R")
# data ---
cd_strat_raw <- read_rds("data/output/by-cd_ACS_gender-age-education.Rds") %>%
transform_vars() %>%
filter(year == 2017)
# model ---
outcomes <- c("ahca", "budg", "immr", "visa", "tcja", "sanc", "turn... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.shapes.R
\name{shapes.coords2points}
\alias{shapes.coords2points}
\title{shapes.coords2points}
\usage{
shapes.coords2points(DT, proj.env.name = NULL)
}
\arguments{
\item{DT}{data.table$long, data.table$lat}
\item{proj.env.name}{Proje... | /man/shapes.coords2points.Rd | no_license | erikbjohn/methods.shapes | R | false | true | 477 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.shapes.R
\name{shapes.coords2points}
\alias{shapes.coords2points}
\title{shapes.coords2points}
\usage{
shapes.coords2points(DT, proj.env.name = NULL)
}
\arguments{
\item{DT}{data.table$long, data.table$lat}
\item{proj.env.name}{Proje... |
adjust_for_dividend <-function(proc, D, dt){
n <-length(proc)
dat_gbm <- proc
counter<-dt
while (counter < n){
for (i in counter:n){
dat_gbm[i] <- dat_gbm[i] - D*dat_gbm[counter]
}
counter <- counter + 60
}
dat_gbm
} | /adjust_for_dividend.R | no_license | KeimaCheck/dividend_simulation | R | false | false | 228 | r | adjust_for_dividend <-function(proc, D, dt){
n <-length(proc)
dat_gbm <- proc
counter<-dt
while (counter < n){
for (i in counter:n){
dat_gbm[i] <- dat_gbm[i] - D*dat_gbm[counter]
}
counter <- counter + 60
}
dat_gbm
} |
#' ---
#' title: "Prior probabilities in the interpretation of 'some': analysis of model predictions and empirical data"
#' author: "Judith Degen"
#' date: "November 28, 2014"
#' ---
library(ggplot2)
theme_set(theme_bw(18))
setwd("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/models/comple... | /models/complex_prior/smoothed_unbinned15/results/rscripts/model-predictions-priorsliders.r | permissive | thegricean/sinking-marbles | R | false | false | 8,645 | r | #' ---
#' title: "Prior probabilities in the interpretation of 'some': analysis of model predictions and empirical data"
#' author: "Judith Degen"
#' date: "November 28, 2014"
#' ---
library(ggplot2)
theme_set(theme_bw(18))
setwd("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/models/comple... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_create_open_pos}
\alias{IG_create_open_pos}
\title{IG API Create one or more OTC positions}
\usage{
IG_create_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = "", currency_code... | /man/IG_create_open_pos.Rd | permissive | ivanliu1989/RQuantAPI | R | false | true | 2,306 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_create_open_pos}
\alias{IG_create_open_pos}
\title{IG API Create one or more OTC positions}
\usage{
IG_create_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = "", currency_code... |
#!/usr/bin/env Rscript
#
# plot-roc.R <stats TSV> <destination image file> [<comma-separated "aligner" names to include> [title]]
#
# plots a pseudo-ROC that allows the comparison of different alignment methods and their mapping quality calculations
# the format is clarified in the map-sim script, and should be a table... | /scripts/plotting/plot-roc-gbwts.R | no_license | clairemerot/giraffe-sv-paper | R | false | false | 6,070 | r | #!/usr/bin/env Rscript
#
# plot-roc.R <stats TSV> <destination image file> [<comma-separated "aligner" names to include> [title]]
#
# plots a pseudo-ROC that allows the comparison of different alignment methods and their mapping quality calculations
# the format is clarified in the map-sim script, and should be a table... |
context("Get clinical data as a table")
# 'tableClinData' is also tested through the other plot functionalities,
# and via the tests for getClinDT in clinUtils
# so other tests are skipped
test_that("A table is successfully created for clinical data", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4... | /package/clinDataReview/tests/testthat/test_tableClinData.R | no_license | Lion666/clinDataReview | R | false | false | 1,568 | r | context("Get clinical data as a table")
# 'tableClinData' is also tested through the other plot functionalities,
# and via the tests for getClinDT in clinUtils
# so other tests are skipped
test_that("A table is successfully created for clinical data", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4... |
################## DataObserver : SERVER ################
library(shiny)
library(ggplot2)
library(ggthemes)
library(doBy)
library(dplyr)
library(plyr)
#
# shinyServer(func=function(input, output) {
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# try(risk.al... | /Shiny/DataObserver/server.R | no_license | XtopheB/ProgsOptilait | R | false | false | 10,417 | r | ################## DataObserver : SERVER ################
library(shiny)
library(ggplot2)
library(ggthemes)
library(doBy)
library(dplyr)
library(plyr)
#
# shinyServer(func=function(input, output) {
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# try(risk.al... |
library(pdftools)
library(rvest)
library(stringr)
url = "https://www.tccs.act.gov.au/city-living/trees/design-standards-23-draft-tree-species-list/"
url_list = c("native-15m", "native-10-15m", "native-less-than-10m", "introduced-15m", "introduced-10-15m", "introduced-less-than-10m", "conifers")
list_of_search_page... | /Scraped data/TCCS/TCSSpdfs.R | no_license | dcol2804/Traits-Database | R | false | false | 3,228 | r | library(pdftools)
library(rvest)
library(stringr)
url = "https://www.tccs.act.gov.au/city-living/trees/design-standards-23-draft-tree-species-list/"
url_list = c("native-15m", "native-10-15m", "native-less-than-10m", "introduced-15m", "introduced-10-15m", "introduced-less-than-10m", "conifers")
list_of_search_page... |
dt_env = new.env()
developer_ownership = function(database_host, database_name, working_dir, web_working_dir = working_dir) {
library(reshape2)
library(ggplot2)
library(gplots)
library(RColorBrewer)
library(gdata)
library(grid)
library(gridExtra)
library(htmlTable)
dt_env$database_host = database_ho... | /R/developer_ownership.R | no_license | matthieu-foucault/RdeveloperTurnover | R | false | false | 2,715 | r |
dt_env = new.env()
developer_ownership = function(database_host, database_name, working_dir, web_working_dir = working_dir) {
library(reshape2)
library(ggplot2)
library(gplots)
library(RColorBrewer)
library(gdata)
library(grid)
library(gridExtra)
library(htmlTable)
dt_env$database_host = database_ho... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termkey.R
\name{validate_termkey}
\alias{validate_termkey}
\title{Determine if a termkey is valid}
\usage{
validate_termkey(termkey, allow_seasonkeys = FALSE)
}
\arguments{
\item{termkey}{TermKey for record pulled from SQL database}
}
\value{... | /man/validate_termkey.Rd | no_license | IndianaCHE/IndianaCHEmisc | R | false | true | 966 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termkey.R
\name{validate_termkey}
\alias{validate_termkey}
\title{Determine if a termkey is valid}
\usage{
validate_termkey(termkey, allow_seasonkeys = FALSE)
}
\arguments{
\item{termkey}{TermKey for record pulled from SQL database}
}
\value{... |
\name{pseudoR2}
\alias{pseudoR2}
\alias{pseudoR2.ppm}
\alias{pseudoR2.slrm}
\title{
Calculate Pseudo-R-Squared for Point Process Model
}
\description{
Given a fitted point process model, calculate
the pseudo-R-squared value, which measures the
fraction of variation in the data that is explained
by the model... | /man/pseudoR2.Rd | no_license | spatstat/spatstat.core | R | false | false | 2,052 | rd | \name{pseudoR2}
\alias{pseudoR2}
\alias{pseudoR2.ppm}
\alias{pseudoR2.slrm}
\title{
Calculate Pseudo-R-Squared for Point Process Model
}
\description{
Given a fitted point process model, calculate
the pseudo-R-squared value, which measures the
fraction of variation in the data that is explained
by the model... |
data <- read_csv("data.csv")
data$type_employer = as.character(data$type_employer)
data$occupation = as.character(data$occupation)
data$country = as.character(data$country)
data$race = as.character(data$race)
data$marital = as.character(data$marital)
data$marital[data$marital=="Never-married"] = "Never-Married" ... | /Temp/preprop.R | no_license | ksrikanthcnc/Data-Mining | R | false | false | 9,159 | r | data <- read_csv("data.csv")
data$type_employer = as.character(data$type_employer)
data$occupation = as.character(data$occupation)
data$country = as.character(data$country)
data$race = as.character(data$race)
data$marital = as.character(data$marital)
data$marital[data$marital=="Never-married"] = "Never-Married" ... |
\name{virtualArrayComBat}
\alias{virtualArrayComBat}
\alias{virtualArrayComBat,ExpressionSet-method}
\alias{virtualArrayComBat,data.frame-method}
\alias{virtualArrayComBat,character-method}
\title{
Removes batch effects from microarray derived expression matrices. Modified version.
}
\description{
This is a modified ve... | /man/virtualArrayComBat.Rd | no_license | scfurl/virtualArray | R | false | false | 2,970 | rd | \name{virtualArrayComBat}
\alias{virtualArrayComBat}
\alias{virtualArrayComBat,ExpressionSet-method}
\alias{virtualArrayComBat,data.frame-method}
\alias{virtualArrayComBat,character-method}
\title{
Removes batch effects from microarray derived expression matrices. Modified version.
}
\description{
This is a modified ve... |
#- New ExpandYear function ----
expandYear <- function (data, areaVar = "geographicAreaM49", elementVar = "measuredElement",
itemVar = "measuredItemCPC", yearVar = "timePointYears",
valueVar = "Value", obsflagVar = "flagObservationStatus",
methF... | /shinyProducerPrices3/modified_functions.R | no_license | SWS-Methodology/faoswsProducerPrices | R | false | false | 4,155 | r | #- New ExpandYear function ----
expandYear <- function (data, areaVar = "geographicAreaM49", elementVar = "measuredElement",
itemVar = "measuredItemCPC", yearVar = "timePointYears",
valueVar = "Value", obsflagVar = "flagObservationStatus",
methF... |
library(forecast)
library(quantmod)
library(timeSeries)
library(tseries)
library(xts)
library(lmtest)
library(rugarch)
source('funcs.R')
# 1. Prepare overall data
df=read.csv('datasets_created_python/merged_all.csv')
df$date=as.POSIXct(as.Date(df$date))
df=df[seq(51,dim(df)[1],1),]
summary(df)
crypto_abr=c('BTC','ETH'... | /masters_work.R | no_license | ssh352/Speculation-and-volatility-of-cryptocurrencies | R | false | false | 4,459 | r | library(forecast)
library(quantmod)
library(timeSeries)
library(tseries)
library(xts)
library(lmtest)
library(rugarch)
source('funcs.R')
# 1. Prepare overall data
df=read.csv('datasets_created_python/merged_all.csv')
df$date=as.POSIXct(as.Date(df$date))
df=df[seq(51,dim(df)[1],1),]
summary(df)
crypto_abr=c('BTC','ETH'... |
## This script creates a "legoplot" similar to those produced by the Broad Institute
## The plot shows the relative abundance of each of the 6 possible mutations in the
## 16 sequence contexts
## Load packages
library(rgl)
#### START OF FUNCTIONS
## Functions modified from the "demo(hist3d)" examples in the rgl pac... | /Pro3/R_p3/barplot3d.R | no_license | xl0418/Code | R | false | false | 7,498 | r | ## This script creates a "legoplot" similar to those produced by the Broad Institute
## The plot shows the relative abundance of each of the 6 possible mutations in the
## 16 sequence contexts
## Load packages
library(rgl)
#### START OF FUNCTIONS
## Functions modified from the "demo(hist3d)" examples in the rgl pac... |
\name{SNPsm} % DESCRIPTION OF FUNCTION SNPsm, 23.10.2012
\alias{SNPsm}
\alias{SNPsm.default}
\alias{plot.SNPsm}
\alias{SNPsm2}
\title{
The spatial and temporal model of succession in the Swiss National Park
}
\description{
A dynamic model of succession on alp Stabelchod in the Swiss Nationl Park using different... | /man/SNPsm.Rd | no_license | cran/dave | R | false | false | 2,349 | rd | \name{SNPsm} % DESCRIPTION OF FUNCTION SNPsm, 23.10.2012
\alias{SNPsm}
\alias{SNPsm.default}
\alias{plot.SNPsm}
\alias{SNPsm2}
\title{
The spatial and temporal model of succession in the Swiss National Park
}
\description{
A dynamic model of succession on alp Stabelchod in the Swiss Nationl Park using different... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_class.R
\docType{class}
\name{NIMLSurfaceDataMetaInfo-class}
\alias{NIMLSurfaceDataMetaInfo-class}
\title{NIMLSurfaceDataMetaInfo}
\description{
This class contains meta information for surface-based data for the NIML data format
}
\secti... | /man/NIMLSurfaceDataMetaInfo-class.Rd | no_license | bbuchsbaum/neurosurf | R | false | true | 547 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_class.R
\docType{class}
\name{NIMLSurfaceDataMetaInfo-class}
\alias{NIMLSurfaceDataMetaInfo-class}
\title{NIMLSurfaceDataMetaInfo}
\description{
This class contains meta information for surface-based data for the NIML data format
}
\secti... |
library(BioPhysConnectoR)
library(ggplot2)
library(viridis)
library(parallel)
library(DECIPHER)
if(!exists("primerF")){
source("R/1_generalAA.R")
}
aln <- read.fasta("/SAN/db/RDP/Silva_123/silva.nr_v123_EUK.align")
keep <- !apply(aln$ali, 2, function (x) all(x %in% c("-", ".")) )
aln <- aln$ali[,keep]
ent <- ge... | /R/4_entropy.R | no_license | derele/AA_Metabarcoding | R | false | false | 3,764 | r | library(BioPhysConnectoR)
library(ggplot2)
library(viridis)
library(parallel)
library(DECIPHER)
if(!exists("primerF")){
source("R/1_generalAA.R")
}
aln <- read.fasta("/SAN/db/RDP/Silva_123/silva.nr_v123_EUK.align")
keep <- !apply(aln$ali, 2, function (x) all(x %in% c("-", ".")) )
aln <- aln$ali[,keep]
ent <- ge... |
#Data Table - Learnign how to work with it
#DT[i, j, by]
## R: i j by
## SQL: where | order by select | update group by
#Take DT, subset/reorder rows using i, then calculate j, grouped by by.
#Source=https://cran.r-project.org/web/packages/data.table/vignettes/datatable-int... | /Introduction_data_table.R | no_license | secun/Learning_R_Examples | R | false | false | 4,439 | r | #Data Table - Learnign how to work with it
#DT[i, j, by]
## R: i j by
## SQL: where | order by select | update group by
#Take DT, subset/reorder rows using i, then calculate j, grouped by by.
#Source=https://cran.r-project.org/web/packages/data.table/vignettes/datatable-int... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetbydaterange.R
\name{subset_by_date_range}
\alias{subset_by_date_range}
\title{subset_by_date_range}
\usage{
subset_by_date_range(data_set, date_col = "detected_at", start_date,
end_date, na.rm = FALSE)
}
\arguments{
\item{da... | /man/subset_by_date_range.Rd | no_license | Keegan-Evans/pitDataR | R | false | true | 732 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetbydaterange.R
\name{subset_by_date_range}
\alias{subset_by_date_range}
\title{subset_by_date_range}
\usage{
subset_by_date_range(data_set, date_col = "detected_at", start_date,
end_date, na.rm = FALSE)
}
\arguments{
\item{da... |
library(readxl)
library(dplyr)
setwd("C:/Users/lenovo/Documents/analisis/postales")
## Leyendo EEFF 2020 SUPERCIAS
postales<-read.csv("bal2020.txt", sep="\t", header=TRUE, dec=",", colClasses = c("RUC"="character"), fileEncoding="UTF-16", skipNul = TRUE, fill=TRUE)
## EEFF de empresas con TH postal
postalTH... | /script.R | no_license | mminita8/postales | R | false | false | 1,006 | r | library(readxl)
library(dplyr)
setwd("C:/Users/lenovo/Documents/analisis/postales")
## Leyendo EEFF 2020 SUPERCIAS
postales<-read.csv("bal2020.txt", sep="\t", header=TRUE, dec=",", colClasses = c("RUC"="character"), fileEncoding="UTF-16", skipNul = TRUE, fill=TRUE)
## EEFF de empresas con TH postal
postalTH... |
require(quantmod)
require(PerformanceAnalytics)
require(DEoptim)
require(parallel)
set.seed(1)
# Step 1: Get the data
getSymbols("PH")
# Step 2: Create your indicator
dvi <- DVI(Cl(PH))
func <- function(x) {
# Step 3: Construct your trading rule
sig <- Lag(ifelse(dvi$dvi < x[1], 1, -1))
... | /simple_backtest_opt.R | no_license | githubfun/omitt | R | false | false | 811 | r | require(quantmod)
require(PerformanceAnalytics)
require(DEoptim)
require(parallel)
set.seed(1)
# Step 1: Get the data
getSymbols("PH")
# Step 2: Create your indicator
dvi <- DVI(Cl(PH))
func <- function(x) {
# Step 3: Construct your trading rule
sig <- Lag(ifelse(dvi$dvi < x[1], 1, -1))
... |
\name{CCcheck}
\alias{CCcheck}
\title{Counter Clockwise check}
\description{Check for counter-clockwise orientation
for polygons. Positive is counterclockwise.
}
\usage{
CCcheck(Z)
}
\arguments{
\item{Z}{list(x,y) }
}
\details{ Uses sign of the area of the polygon to determine
polarity.
}
\value{
\item{j}{sign... | /man/CCcheck.Rd | no_license | cran/GEOmap | R | false | false | 691 | rd | \name{CCcheck}
\alias{CCcheck}
\title{Counter Clockwise check}
\description{Check for counter-clockwise orientation
for polygons. Positive is counterclockwise.
}
\usage{
CCcheck(Z)
}
\arguments{
\item{Z}{list(x,y) }
}
\details{ Uses sign of the area of the polygon to determine
polarity.
}
\value{
\item{j}{sign... |
rm(list=ls())
yes=read.csv('YES.csv')
jjj12=read.csv('joined12.csv')
jjj13=read.csv('joined13.csv')
jjj14=read.csv('joined.csv')
jjj15=read.csv('joined15 new.csv')
ALLIDS=rbind(jjj12,jjj13,jjj14,jjj15)
myvars=c('game_id','home_team_pts','away_team_pts')
ALLIDS2=ALLIDS[myvars]
new=merge(yes,AL... | /nba RF.R | no_license | garretthill/NBA | R | false | false | 4,518 | r |
rm(list=ls())
yes=read.csv('YES.csv')
jjj12=read.csv('joined12.csv')
jjj13=read.csv('joined13.csv')
jjj14=read.csv('joined.csv')
jjj15=read.csv('joined15 new.csv')
ALLIDS=rbind(jjj12,jjj13,jjj14,jjj15)
myvars=c('game_id','home_team_pts','away_team_pts')
ALLIDS2=ALLIDS[myvars]
new=merge(yes,AL... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_create_launch_configuration}
\alias{autoscaling_create_launch_configuration}
\title{Creates a launch configuration}
\usage{
autoscaling_create_launch_configuration(LaunchConfigurationName,
ImageId,... | /paws/man/autoscaling_create_launch_configuration.Rd | permissive | peoplecure/paws | R | false | true | 9,962 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_create_launch_configuration}
\alias{autoscaling_create_launch_configuration}
\title{Creates a launch configuration}
\usage{
autoscaling_create_launch_configuration(LaunchConfigurationName,
ImageId,... |
extractplate = function(datbefore, datafter, plate, replicate){
datbefore = datbefore[[replicate]]
datafter = datafter[[replicate]]
if (plate == 1){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = dataf... | /highSCREEN/R/extractplate.R | no_license | ingted/R-Examples | R | false | false | 1,084 | r | extractplate = function(datbefore, datafter, plate, replicate){
datbefore = datbefore[[replicate]]
datafter = datafter[[replicate]]
if (plate == 1){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = dataf... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildRecord.R
\name{getAnalyticalInfo}
\alias{getAnalyticalInfo}
\alias{gatherCompound}
\alias{gatherSpectrum}
\title{Compose data block of MassBank record}
\usage{
gatherCompound(spec, aggregated, additionalPeaks = NULL, retrieval="standard"... | /man/getAnalyticalInfo.Rd | no_license | sneumann/RMassBank | R | false | true | 2,732 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildRecord.R
\name{getAnalyticalInfo}
\alias{getAnalyticalInfo}
\alias{gatherCompound}
\alias{gatherSpectrum}
\title{Compose data block of MassBank record}
\usage{
gatherCompound(spec, aggregated, additionalPeaks = NULL, retrieval="standard"... |
simulate.rtgs.records <- function(table,date_column='date',time_column='time',sender_column='sender',receiver_column='receiver',value_column='value',priority_column='priority',date_format='As defined in default format settings.',time_format='As defined in default format settings.',decimal_separator='As defined in defau... | /R/simulate.rtgs.records.R | no_license | lubospernis/FNA_package | R | false | false | 578 | r | simulate.rtgs.records <- function(table,date_column='date',time_column='time',sender_column='sender',receiver_column='receiver',value_column='value',priority_column='priority',date_format='As defined in default format settings.',time_format='As defined in default format settings.',decimal_separator='As defined in defau... |
# This script will create a RGSet for the discovery cohort and a RGSet for the validation cohort
funnormDir <- "/amber1/archive/sgseq/workspace/hansen_lab1/funnorm_repro"
rawDir <- paste0(funnormDir,"/raw_datasets")
disValDir <- paste0(funnormDir,"/dis_val_datasets")
designDir <- paste0(funnormDir,"/designs")
normDir ... | /ruv_funnorm_results/create.ruv.funnorm.dmps.R | no_license | Jfortin1/funnorm_repro | R | false | false | 1,167 | r | # This script will create a RGSet for the discovery cohort and a RGSet for the validation cohort
funnormDir <- "/amber1/archive/sgseq/workspace/hansen_lab1/funnorm_repro"
rawDir <- paste0(funnormDir,"/raw_datasets")
disValDir <- paste0(funnormDir,"/dis_val_datasets")
designDir <- paste0(funnormDir,"/designs")
normDir ... |
library(ggplot2)
extract_cod <- function (trnas, anticod){
output = data.frame(row.names = anticod)
trnas_acod = sapply(rownames(trnas), function(x) substr(x,nchar(x)-2,nchar(x)))
for (s in colnames(trnas)){
output[,s] = sapply(anticod, function(x) if(any(trnas_acod==x)){mean(trnas[trnas_acod==x,s])}el... | /5-2_subsets_CU.R | no_license | hexavier/tRNA_viruses | R | false | false | 2,473 | r | library(ggplot2)
extract_cod <- function (trnas, anticod){
output = data.frame(row.names = anticod)
trnas_acod = sapply(rownames(trnas), function(x) substr(x,nchar(x)-2,nchar(x)))
for (s in colnames(trnas)){
output[,s] = sapply(anticod, function(x) if(any(trnas_acod==x)){mean(trnas[trnas_acod==x,s])}el... |
snap.read.2 = function(file, what, ndim, type, debug, gas, thin=1){
if(missing(what)) what="HEAD"
what=gsub("^\\s+|\\s+$", "", what)
if(missing(debug)) debug=0
if(missing(ndim) && missing(type)){
tmp=snap.select.type.2(what)
ndim=tmp$ndim
type=tmp$type
}else{
if(missing(ndim)) ... | /R/snap.read.2.R | no_license | asgr/snapshot | R | false | false | 3,271 | r | snap.read.2 = function(file, what, ndim, type, debug, gas, thin=1){
if(missing(what)) what="HEAD"
what=gsub("^\\s+|\\s+$", "", what)
if(missing(debug)) debug=0
if(missing(ndim) && missing(type)){
tmp=snap.select.type.2(what)
ndim=tmp$ndim
type=tmp$type
}else{
if(missing(ndim)) ... |
folder_out <-paste0(output_path, "/presentation_plots")
dir.create(folder_out)
folder.out2 <-paste0(output_path, "/cum_fluxes_14")
cum.flux.1st.14 <- paste0(folder.out2, "/1st_event_cum14_fluxes.dat")
cum.flux.2nd.14 <- paste0(folder.out2, "/2nd_event_cum14_fluxes.dat")
cum.flux.3rd.14 <- paste0(folder.out2, "/3... | /boxplots_N2O.R | no_license | pz10/all_incubations | R | false | false | 12,160 | r | folder_out <-paste0(output_path, "/presentation_plots")
dir.create(folder_out)
folder.out2 <-paste0(output_path, "/cum_fluxes_14")
cum.flux.1st.14 <- paste0(folder.out2, "/1st_event_cum14_fluxes.dat")
cum.flux.2nd.14 <- paste0(folder.out2, "/2nd_event_cum14_fluxes.dat")
cum.flux.3rd.14 <- paste0(folder.out2, "/3... |
6db405d7de3ff92c7279f85a2946e070 ttt_5x5-shape-4-GTTT-2-1-torus-1.qdimacs 2154 9289 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_5x5-shape-4-GTTT-2-1-torus-1/ttt_5x5-shape-4-GTTT-2-1-torus-1.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 83 | r | 6db405d7de3ff92c7279f85a2946e070 ttt_5x5-shape-4-GTTT-2-1-torus-1.qdimacs 2154 9289 |
#' Process sample contamination checks
#'
#' @description
#' Takes *selfSM reports generated by VerifyBamID during alignment, and returns a vector of freemix scores.
#' The freemix score is a sequence only estimate of sample contamination that ranges from 0 to 1.
#'
#' Note: Targeted panels are often too small for th... | /R/process.sample.contamination.checks.R | no_license | cran/varitas | R | false | false | 1,821 | r | #' Process sample contamination checks
#'
#' @description
#' Takes *selfSM reports generated by VerifyBamID during alignment, and returns a vector of freemix scores.
#' The freemix score is a sequence only estimate of sample contamination that ranges from 0 to 1.
#'
#' Note: Targeted panels are often too small for th... |
test_that("Checking anlz_tbnimet, tbni metrics only", {
# raw metric data
dat <- anlz_tbnimet(fimdata)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segm... | /tests/testthat/test-anlz_tbnimet.R | permissive | mikewessel/tbeptools | R | false | false | 2,011 | r | test_that("Checking anlz_tbnimet, tbni metrics only", {
# raw metric data
dat <- anlz_tbnimet(fimdata)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segm... |
load("data/diagnozaOsoby2011.RData")
variablesOriginalNamesYear2000 <- c("ap83_1", "ap83_2", "ap83_3", "ap84", "ap85", "ap86", "ap100", "ac8",
"wiek2000", "wiek6_2000", "status9_2000", "eduk4_2000", "PLEC", "bp107") # We cannot take bp107 - these results are not from year 2000.
variable... | /scripts/Cleanup.R | no_license | MatteoLacki/projectFive | R | false | false | 7,205 | r | load("data/diagnozaOsoby2011.RData")
variablesOriginalNamesYear2000 <- c("ap83_1", "ap83_2", "ap83_3", "ap84", "ap85", "ap86", "ap100", "ac8",
"wiek2000", "wiek6_2000", "status9_2000", "eduk4_2000", "PLEC", "bp107") # We cannot take bp107 - these results are not from year 2000.
variable... |
# Make an example table
a <- matrix(rnorm(n=100), nrow=100, ncol=100)
b <- matrix(rnorm(n=100), nrow=100, ncol=100)
c <- matrix(rnorm(n=100), nrow=100, ncol=100)
# Get values in upper triangle
values <- getUpperTriangle(a)
output <- getUpperTriangleOfMatrices(a, b, c)
#############
# FUNCTIONS #
#############
getUpp... | /FlattenMatrix_Adrian_13-03-18.R | no_license | AdrianAllen1977/R-code | R | false | false | 1,830 | r | # Make an example table
a <- matrix(rnorm(n=100), nrow=100, ncol=100)
b <- matrix(rnorm(n=100), nrow=100, ncol=100)
c <- matrix(rnorm(n=100), nrow=100, ncol=100)
# Get values in upper triangle
values <- getUpperTriangle(a)
output <- getUpperTriangleOfMatrices(a, b, c)
#############
# FUNCTIONS #
#############
getUpp... |
#####
## FOR (EVENTUALLY) RUNNING IN BATCH MODE ON AWS WITH ARGUMENTS DESCRIBED BELOW
#####
## SOURCE IN SHARED .Rprofile WHICH CONTAINS SYNAPSE LOGIN HOOK,
## SETS COMMON SYNAPSE CACHE FOR ALL WORKERS, AND SETS COMMON LIBPATH
source("/shared/code/R/.Rprofile")
#####
## TAKES FOR ARGUMENTS (PASSED FROM sgeKickoff.R)
##... | /evals/evalGenesetsConsensus.R | no_license | laderast/crcsc | R | false | false | 7,234 | r | #####
## FOR (EVENTUALLY) RUNNING IN BATCH MODE ON AWS WITH ARGUMENTS DESCRIBED BELOW
#####
## SOURCE IN SHARED .Rprofile WHICH CONTAINS SYNAPSE LOGIN HOOK,
## SETS COMMON SYNAPSE CACHE FOR ALL WORKERS, AND SETS COMMON LIBPATH
source("/shared/code/R/.Rprofile")
#####
## TAKES FOR ARGUMENTS (PASSED FROM sgeKickoff.R)
##... |
b47addcb02c5a49eb36fee58c0f7a436 ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.qdimacs 5459 15838 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp/ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | b47addcb02c5a49eb36fee58c0f7a436 ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.qdimacs 5459 15838 |
library(testthat)
test_check("newsfreq")
| /tests/test-all.R | no_license | hrbrmstr/newsfreq | R | false | false | 41 | r | library(testthat)
test_check("newsfreq")
|
##' Function to add leading zeroes to maintain fixed width.
##' @description This function ensures that fixed width data is the right
##' length by padding zeroes to the front of values. This is a common problem
##' with fixed width data after importing into R as non-character type.
##' @param x a vector of nume... | /R/leading_zero.R | no_license | cran/eeptools | R | false | false | 1,290 | r | ##' Function to add leading zeroes to maintain fixed width.
##' @description This function ensures that fixed width data is the right
##' length by padding zeroes to the front of values. This is a common problem
##' with fixed width data after importing into R as non-character type.
##' @param x a vector of nume... |
# Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date... | /Plot4.R | no_license | sbaga90/Exploratory-Data-Analysis-project1 | R | false | false | 1,779 | r | # Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date... |
# Parameters
waypoints = seq (2, 10)
runs = seq (1, 100)
#### Doorways. Experiment ?: (93, 56) --> (62, 148)
# Load data
df <- read.csv ("/home/krell/Downloads/experimentTable_Doorways.csv", header = FALSE)
# Separate out the obstacle table
obstacles <- df[df$V5 == -1,]
df <- df[df$V5 != -1, ]
getByWaypoint <- f... | /navigation/pso/evaluation.R | no_license | vanshgoyal/rotf-software | R | false | false | 7,704 | r | # Parameters
waypoints = seq (2, 10)
runs = seq (1, 100)
#### Doorways. Experiment ?: (93, 56) --> (62, 148)
# Load data
df <- read.csv ("/home/krell/Downloads/experimentTable_Doorways.csv", header = FALSE)
# Separate out the obstacle table
obstacles <- df[df$V5 == -1,]
df <- df[df$V5 != -1, ]
getByWaypoint <- f... |
###################################
### Create Node and Edge Frames ###
###################################
Derive_Edge_Weights <- function(Node_Frame, Edge_Frame){
check_list <- Node_Frame[,1]
rownames(Node_Frame) <- check_list
final_edge_frame_2 <- Edge_Frame[((is.element(Edge_Frame[,'from'],check_list)==TRUE)... | /execution/JUMPn_Helpers/JUMPn_functions/Network_Analysis.R | no_license | VanderwallDavid/JUMPn_1.0.0 | R | false | false | 749 | r | ###################################
### Create Node and Edge Frames ###
###################################
Derive_Edge_Weights <- function(Node_Frame, Edge_Frame){
check_list <- Node_Frame[,1]
rownames(Node_Frame) <- check_list
final_edge_frame_2 <- Edge_Frame[((is.element(Edge_Frame[,'from'],check_list)==TRUE)... |
library(symbolicDA)
### Name: generate.SO
### Title: generation of artifficial symbolic data table with given cluster
### structure
### Aliases: generate.SO
### Keywords: symbolic,SDA
### ** Examples
# Example will be available in next version of package, thank You for your patience :-)
| /data/genthat_extracted_code/symbolicDA/examples/generate.SO.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 297 | r | library(symbolicDA)
### Name: generate.SO
### Title: generation of artifficial symbolic data table with given cluster
### structure
### Aliases: generate.SO
### Keywords: symbolic,SDA
### ** Examples
# Example will be available in next version of package, thank You for your patience :-)
|
#' Plot the ROC Curves
#'
#'
#' @author Elías Alegría <elias.alegria@ug.uchile.cl>
#' @param models list of h2o models class H2OBinomialModel
#' @param newdata dataframe class H2OFrame
#' @param xval if TRUE plot the ROC Curves on cross validation
#'
#' @return ggplot graph
#' @export
#'
#' @seealso h2o.plotLift(), h2o... | /R/h2o.plotROC.R | no_license | huasin/h2plots | R | false | false | 3,164 | r | #' Plot the ROC Curves
#'
#'
#' @author Elías Alegría <elias.alegria@ug.uchile.cl>
#' @param models list of h2o models class H2OBinomialModel
#' @param newdata dataframe class H2OFrame
#' @param xval if TRUE plot the ROC Curves on cross validation
#'
#' @return ggplot graph
#' @export
#'
#' @seealso h2o.plotLift(), h2o... |
require("mboost")
if (require("partykit")) {
set.seed(290875)
tst <- try(data("BostonHousing", package = "mlbench"))
if (!inherits(tst, "try-error")) {
system.time(a <- blackboost(medv ~ ., data = BostonHousing,
tree_controls = ctree_control(teststat = "max",
testtype = "... | /tests/regtest-blackboost.R | no_license | boost-R/mboost | R | false | false | 4,494 | r |
require("mboost")
if (require("partykit")) {
set.seed(290875)
tst <- try(data("BostonHousing", package = "mlbench"))
if (!inherits(tst, "try-error")) {
system.time(a <- blackboost(medv ~ ., data = BostonHousing,
tree_controls = ctree_control(teststat = "max",
testtype = "... |
# Shared code to download and read in the neccessary data
source("readdata.R")
# Set locale to English, so that the labels on the x Axis are in english
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
png(filename="plot4.png")
# Make a plot with four graphs
par(mfrow=c(2,2))
# Same as in plot2.R
... | /plot4.R | no_license | alexkops/ExData_Plotting1 | R | false | false | 968 | r | # Shared code to download and read in the neccessary data
source("readdata.R")
# Set locale to English, so that the labels on the x Axis are in english
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
png(filename="plot4.png")
# Make a plot with four graphs
par(mfrow=c(2,2))
# Same as in plot2.R
... |
report_mode <- 1
# If 1, we are generating a report!
petoc <- function() {
if (report_mode == 0) {
message("Press [Enter] to continue")
r <- readline()
if (r == "q") {
terminate_session()
stop("User asked for termination.\n")
}
}
}
#' Basic tests of model functionalty. Serious issues ... | /R/validation.R | no_license | tyhlee/epicR | R | false | false | 72,108 | r | report_mode <- 1
# If 1, we are generating a report!
petoc <- function() {
if (report_mode == 0) {
message("Press [Enter] to continue")
r <- readline()
if (r == "q") {
terminate_session()
stop("User asked for termination.\n")
}
}
}
#' Basic tests of model functionalty. Serious issues ... |
#' Check result of exercise code
#'
#' \code{check_result()} compares the final result of the student code to known
#' \code{\link{pass_if}} and \code{\link{fail_if}} \code{\link{condition}}s.
#' If the student result exactly matches a known case, \code{check_result}
#' returns the matching message value.
#'
#' @param... | /R/check_result.R | no_license | garrettgman/gradethis | R | false | false | 2,077 | r |
#' Check result of exercise code
#'
#' \code{check_result()} compares the final result of the student code to known
#' \code{\link{pass_if}} and \code{\link{fail_if}} \code{\link{condition}}s.
#' If the student result exactly matches a known case, \code{check_result}
#' returns the matching message value.
#'
#' @param... |
# K-Means Clustering
# Importing the mall dataset
dataset <- read.csv('Mall_Customers.csv')
X <- dataset[4:5]
# Using the elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters... | /Working Data K-Means.R | no_license | taksug229/R-K-means-clustering | R | false | false | 862 | r | # K-Means Clustering
# Importing the mall dataset
dataset <- read.csv('Mall_Customers.csv')
X <- dataset[4:5]
# Using the elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters... |
# Description ---------------
#### Mal checken: https://www.datascience.com/blog/introduction-to-forecasting-with-arima-in-r-learn-data-science-tutorials #####
# Todo: Train Data f?r TBATS kann beide male 3 Wochen gross sein (gleiches modell), f?r das Train Data Set f?r RF muss dann letzte woche abgeschnitten werden ... | /03_DataAnalysis_05.R | no_license | CorneliusSchramm/01_Scripts_BusinessAnalytics_ParkMe | R | false | false | 10,931 | r | # Description ---------------
#### Mal checken: https://www.datascience.com/blog/introduction-to-forecasting-with-arima-in-r-learn-data-science-tutorials #####
# Todo: Train Data f?r TBATS kann beide male 3 Wochen gross sein (gleiches modell), f?r das Train Data Set f?r RF muss dann letzte woche abgeschnitten werden ... |
deck <- read.csv('deck.csv')
deal <- function(cards) {
cards[1,]
} | /playing_cards.R | no_license | umairrafique85/Playing_cards | R | false | false | 69 | r | deck <- read.csv('deck.csv')
deal <- function(cards) {
cards[1,]
} |
testlist <- list(x = structure(c(2.2202775176633e-271, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 6L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result) | /bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609958974-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 300 | r | testlist <- list(x = structure(c(2.2202775176633e-271, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 6L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result) |
setwd("E:\\Harddrive\\OneDrive - Lund University\\Mastern\\Spring 2020\\NEKN34 Time Series Analysis\\Assignments\\Ass 3\\Assignment-3-TS")
getwd()
install.packages("")
library(vars)
library(urca)
library(tseries)
library(tsDyn)
library(lmtest)
library(car)
library(data.table) #used for shifting, aka lagging
library(dy... | /MainCodingFile.R | no_license | Supersoppan/Assignment-3-TS | R | false | false | 4,993 | r | setwd("E:\\Harddrive\\OneDrive - Lund University\\Mastern\\Spring 2020\\NEKN34 Time Series Analysis\\Assignments\\Ass 3\\Assignment-3-TS")
getwd()
install.packages("")
library(vars)
library(urca)
library(tseries)
library(tsDyn)
library(lmtest)
library(car)
library(data.table) #used for shifting, aka lagging
library(dy... |
# Define server logic required to draw a histogram
server <- function(input, output) {
################### INPUT ####################
select_state <- eventReactive(input$go, {
state_name <- input$state
twin <- input$true_date
df_state <- master_df %>% filter(state_name == stat... | /server.R | no_license | pccql/shiny-project | R | false | false | 8,038 | r |
# Define server logic required to draw a histogram
server <- function(input, output) {
################### INPUT ####################
select_state <- eventReactive(input$go, {
state_name <- input$state
twin <- input$true_date
df_state <- master_df %>% filter(state_name == stat... |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = FALSE,
message = FALSE,
fig.align = "center",
out.width = "90%",
fig.width = 7,
fig.height = 5
)
## ---- echo=F--------------------------------------------... | /inst/doc/getting-started.R | no_license | cran/modeltime.gluonts | R | false | false | 2,897 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = FALSE,
message = FALSE,
fig.align = "center",
out.width = "90%",
fig.width = 7,
fig.height = 5
)
## ---- echo=F--------------------------------------------... |
# some basic useful functions
# function: is not in
'%!in%' <- function(x,y)!('%in%'(x,y))
# function: remove '\xa0' chars
phrase_clean <- function(x) gsub("[\xA0]", "", x)
# function: replace double spaces with single spaces
space_clean <- function(x) gsub(" ", " ", x)
# function: apply a function to ALL characte... | /useful_basic.r | permissive | Jegelewicz/r-codesnippets | R | false | false | 1,336 | r | # some basic useful functions
# function: is not in
'%!in%' <- function(x,y)!('%in%'(x,y))
# function: remove '\xa0' chars
phrase_clean <- function(x) gsub("[\xA0]", "", x)
# function: replace double spaces with single spaces
space_clean <- function(x) gsub(" ", " ", x)
# function: apply a function to ALL characte... |
install.packages("reshape2")
install.packages("dplyr")
install.packages("ggplot2")
library(reshape2)
library(dplyr)
library(ggplot2)
acc <- read.csv("요일별_시간대별_교통사고.csv", header=T)
acc
# 목표 : 요일별로 교통사고 사망자의 시간별 분포를 살펴보자!
### step 1. 필요없는 행을 지우고, 필요한 행만 추출하자.
# tip1) filter(데이터, 행조건, ...)
# t... | /part3/B_Network/dplyr보강/data_handling_practice.R | no_license | anhnguyendepocen/visual | R | false | false | 846 | r | install.packages("reshape2")
install.packages("dplyr")
install.packages("ggplot2")
library(reshape2)
library(dplyr)
library(ggplot2)
acc <- read.csv("요일별_시간대별_교통사고.csv", header=T)
acc
# 목표 : 요일별로 교통사고 사망자의 시간별 분포를 살펴보자!
### step 1. 필요없는 행을 지우고, 필요한 행만 추출하자.
# tip1) filter(데이터, 행조건, ...)
# t... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot}
\alias{ggplot}
\title{Create a new ggplot}
\usage{
ggplot(data = NULL, mapping = aes(), ..., environment = parent.frame())
}
\arguments{
\item{data}{Default dataset to use for plot. If not already a data.frame,
will be co... | /man/ggplot.Rd | permissive | tidyverse/ggplot2 | R | false | true | 4,576 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot}
\alias{ggplot}
\title{Create a new ggplot}
\usage{
ggplot(data = NULL, mapping = aes(), ..., environment = parent.frame())
}
\arguments{
\item{data}{Default dataset to use for plot. If not already a data.frame,
will be co... |
expit <-
function (x)
{
exp(x)/(1 + exp(x))
}
| /R/expit.R | no_license | miemiemiem/Aclust | R | false | false | 51 | r | expit <-
function (x)
{
exp(x)/(1 + exp(x))
}
|
hypotenuse <- function(x, y)
{
sqrt(x ^ 2 + y ^ 2)
}
ythagorean_triples <- data.frame(
x = c(3, 5, 8, 7, 9, 11, 12, 13, 15, 16, 17, 19),
y = c(4, 12, 15, 24, 40, 60, 35, 84, 112, 63, 144, 180),
z = c(5, 13, 17, 25, 41, 61, 37, 85, 113, 65, 145, 181)
) | /pkg/hypotenuse.R | no_license | RinLinux/RNotes | R | false | false | 262 | r |
hypotenuse <- function(x, y)
{
sqrt(x ^ 2 + y ^ 2)
}
ythagorean_triples <- data.frame(
x = c(3, 5, 8, 7, 9, 11, 12, 13, 15, 16, 17, 19),
y = c(4, 12, 15, 24, 40, 60, 35, 84, 112, 63, 144, 180),
z = c(5, 13, 17, 25, 41, 61, 37, 85, 113, 65, 145, 181)
) |
## The goal of my functions is to compute the inverse of a matrix and cache its inverse
## to avoid repeated computation and decrease the computation time. One function creates
## a special matrix that cache its inverse while the other function computes the inverse
## of the special matrix returned by the first fucntio... | /cachematrix.R | no_license | shabnamh/ProgrammingAssignment2 | R | false | false | 1,793 | r | ## The goal of my functions is to compute the inverse of a matrix and cache its inverse
## to avoid repeated computation and decrease the computation time. One function creates
## a special matrix that cache its inverse while the other function computes the inverse
## of the special matrix returned by the first fucntio... |
dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
subdata <- dataset[dataset$Date %in% c("1/2/2007", "2/2/2007"),]
# Subset the data data from the dates 2007-02-01 and 2007-02-02
datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y ... | /plot2.R | no_license | Kinundu/ExData_Plotting1 | R | false | false | 814 | r | dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
subdata <- dataset[dataset$Date %in% c("1/2/2007", "2/2/2007"),]
# Subset the data data from the dates 2007-02-01 and 2007-02-02
datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y ... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funding_metadata.R
\name{.replace_other_attribute_fundagency}
\alias{.replace_other_attribute_fundagency}
\title{Replace attributes that has others values in funding agency}
\usage{
.replace_other_attribute_fundagency(.data, attribute, other_... | /man/dot-replace_other_attribute_fundagency.Rd | permissive | AGROFIMS/ragrofims | R | false | true | 1,223 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funding_metadata.R
\name{.replace_other_attribute_fundagency}
\alias{.replace_other_attribute_fundagency}
\title{Replace attributes that has others values in funding agency}
\usage{
.replace_other_attribute_fundagency(.data, attribute, other_... |
\name{accessD.wp}
\alias{accessD.wp}
\title{Obtain whole resolution level of wavelet packet coefficients from a wavelet packet object (wp).}
\description{
Get a whole resolution level's worth of coefficients from a \code{\link{wp}} wavelet packet object. To obtain packets of coefficients from a wavelet packet object yo... | /man/accessD.wp.rd | no_license | cran/wavethresh | R | false | false | 1,077 | rd | \name{accessD.wp}
\alias{accessD.wp}
\title{Obtain whole resolution level of wavelet packet coefficients from a wavelet packet object (wp).}
\description{
Get a whole resolution level's worth of coefficients from a \code{\link{wp}} wavelet packet object. To obtain packets of coefficients from a wavelet packet object yo... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementTemplateApi.r
\name{elementTemplate$get}
\alias{elementTemplate$get}
\title{Retrieve an element template.}
\arguments{
\item{webId}{The ID of the element template.}
\item{selectedFields}{List of fields to be returned in the response, ... | /man/elementTemplate-cash-get.Rd | permissive | frbl/PI-Web-API-Client-R | R | false | true | 697 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementTemplateApi.r
\name{elementTemplate$get}
\alias{elementTemplate$get}
\title{Retrieve an element template.}
\arguments{
\item{webId}{The ID of the element template.}
\item{selectedFields}{List of fields to be returned in the response, ... |
ggplot(BOD, aes(x=Time, y=demand))+geom_line()
ggplot(BOD, aes(x=factor(Time), y=demand, group=1))+geom_line()
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+ylim(0, max(BOD$demand))
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+expand_limits(y=0)
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point()
head(wor... | /linegraph.R | no_license | yonghuat/rgraphics | R | false | false | 3,912 | r | ggplot(BOD, aes(x=Time, y=demand))+geom_line()
ggplot(BOD, aes(x=factor(Time), y=demand, group=1))+geom_line()
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+ylim(0, max(BOD$demand))
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+expand_limits(y=0)
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point()
head(wor... |
\name{fortify.mg_ensemble}
\alias{fortify.mg_ensemble}
\title{S3method fortify mg_ensemble}
\usage{
fortify.mg_ensemble(model, data = NULL, ...)
}
\description{
S3method fortify mg_ensemble
}
| /man/fortify.mg_ensemble.Rd | no_license | garrettgman/modelglyphs | R | false | false | 197 | rd | \name{fortify.mg_ensemble}
\alias{fortify.mg_ensemble}
\title{S3method fortify mg_ensemble}
\usage{
fortify.mg_ensemble(model, data = NULL, ...)
}
\description{
S3method fortify mg_ensemble
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/battingStats.R
\name{HRpct}
\alias{HRpct}
\title{Batting: Calculate home run percentage}
\usage{
HRpct(dat = NULL)
}
\arguments{
\item{dat}{A data frame you would wish to calculate. The data frame must have the same column names found in
The ... | /man/HRpct.Rd | no_license | cran/baseballDBR | R | false | true | 1,123 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/battingStats.R
\name{HRpct}
\alias{HRpct}
\title{Batting: Calculate home run percentage}
\usage{
HRpct(dat = NULL)
}
\arguments{
\item{dat}{A data frame you would wish to calculate. The data frame must have the same column names found in
The ... |
# Automate message creation for Carpentries teaching demos
# Jeff Oliver
# jcoliver@arizona.edu
# 2021-08-09
library(rmarkdown)
library(lubridate)
# The only two lines you will likely need to change are these two:
# + trainees: update with the location of your trainees file (example format
# is avail... | /auto-messages.R | permissive | klbarnes20/auto-demo-email | R | false | false | 3,853 | r | # Automate message creation for Carpentries teaching demos
# Jeff Oliver
# jcoliver@arizona.edu
# 2021-08-09
library(rmarkdown)
library(lubridate)
# The only two lines you will likely need to change are these two:
# + trainees: update with the location of your trainees file (example format
# is avail... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odbc.R
\name{odbc_ini}
\alias{odbc_ini}
\title{Manage Database Connection}
\usage{
odbc_ini()
}
\description{
This RStudio Addin opens up the .odbc.ini file to manage local
SQL server login credential
}
| /man/odbc_ini.Rd | no_license | shafayetShafee/addin_demo | R | false | true | 281 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odbc.R
\name{odbc_ini}
\alias{odbc_ini}
\title{Manage Database Connection}
\usage{
odbc_ini()
}
\description{
This RStudio Addin opens up the .odbc.ini file to manage local
SQL server login credential
}
|
\name{locq.growth}
\alias{locq.growth}
\title{
Portfolio matrix for specialization and growth
}
\description{
Portfolio matrix plot comparing two numeric vectors (here: specialization and growth)
}
\usage{
locq.growth(e_ij1, e_ij2, e_i1, e_i2, industry.names = NULL,
y.axis = "r",
psize, psize.factor ... | /man/locq.growth.Rd | no_license | cran/REAT | R | false | false | 5,736 | rd | \name{locq.growth}
\alias{locq.growth}
\title{
Portfolio matrix for specialization and growth
}
\description{
Portfolio matrix plot comparing two numeric vectors (here: specialization and growth)
}
\usage{
locq.growth(e_ij1, e_ij2, e_i1, e_i2, industry.names = NULL,
y.axis = "r",
psize, psize.factor ... |
data <- read.table(file="~/Downloads/household_power_consumption.txt", header = TRUE, sep=";", na.strings="?")
Febdates <- subset(data, Date%in%c("1/2/2007","2/2/2007"))
Febdates$Date <- as.Date(Febdates$Date, format = "%d/%m/%Y")
png(file="~/Desktop/Coursera/ExData_Plotting1/plot1.png", width= 480, height= 480, unit... | /plot1.R | no_license | shannonbrady/ExData_Plotting1 | R | false | false | 475 | r | data <- read.table(file="~/Downloads/household_power_consumption.txt", header = TRUE, sep=";", na.strings="?")
Febdates <- subset(data, Date%in%c("1/2/2007","2/2/2007"))
Febdates$Date <- as.Date(Febdates$Date, format = "%d/%m/%Y")
png(file="~/Desktop/Coursera/ExData_Plotting1/plot1.png", width= 480, height= 480, unit... |
library(readxl)
Dataset_Thesis_Raw <- read_excel("C:/Users/Utente/Downloads/Dataset Thesis Raw.xlsx")
View(Dataset_Thesis_Raw)
library(ggplot2)
ggplot(Dataset_Thesis_Raw, aes(y=age_beginning))+
geom_boxplot() +
labs(title="Histogram for Age at the beginning of the investment", y="Age of the investors")
... | /THESIS CODE.R | no_license | girolamovurro/Thesis | R | false | false | 11,147 | r | library(readxl)
Dataset_Thesis_Raw <- read_excel("C:/Users/Utente/Downloads/Dataset Thesis Raw.xlsx")
View(Dataset_Thesis_Raw)
library(ggplot2)
ggplot(Dataset_Thesis_Raw, aes(y=age_beginning))+
geom_boxplot() +
labs(title="Histogram for Age at the beginning of the investment", y="Age of the investors")
... |
<html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:108">
</head>
<body bgcolor="white">
<a href="#0" id="0">It is the valley's heavily Moslem population that tilts the sectarian scale in Jammu-Kashmir state.</a>
<a href="#1" id="1">Exotic Kashmir, a tourist paradise of houseboat hotels and Mogul garden... | /DUC-Dataset/Summary_p100_R/D114.AP900130-0010.html.R | no_license | Angela7126/SLNSumEval | R | false | false | 920 | r | <html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:108">
</head>
<body bgcolor="white">
<a href="#0" id="0">It is the valley's heavily Moslem population that tilts the sectarian scale in Jammu-Kashmir state.</a>
<a href="#1" id="1">Exotic Kashmir, a tourist paradise of houseboat hotels and Mogul garden... |
orth_Gram_Schmidt_metrique_diag <- function (M,Y)
{ nb_fact <- length(Y)
X <- vector("list",nb_fact)
if (nb_fact==1) { X<-Y} else{
normX <- vector(length=nb_fact)
X[[1]] <- Y[[1]]
normX[1] <- sum ( X[[1]]^2 * M )
for (i in 2:nb_fact)
{ X[[i]] <- Y[[i]]
for (j in... | /R/orth_gram_schmidt_metrique_diag.R | no_license | cran/factas | R | false | false | 518 | r |
orth_Gram_Schmidt_metrique_diag <- function (M,Y)
{ nb_fact <- length(Y)
X <- vector("list",nb_fact)
if (nb_fact==1) { X<-Y} else{
normX <- vector(length=nb_fact)
X[[1]] <- Y[[1]]
normX[1] <- sum ( X[[1]]^2 * M )
for (i in 2:nb_fact)
{ X[[i]] <- Y[[i]]
for (j in... |
#' db as a list of data.frame-s
#' @param con sqlite connection.
#' @export
db2list <- function(con) {
tnams = dbGetQuery(con, "SELECT name FROM sqlite_master WHERE type='table'")
sapply(tnams$name, function(x) dbGetQuery(con, paste("SELECT * FROM", x) ) )
}
#' Show db status
#' Returns a data.frame containin... | /R/5_get_data.R | no_license | mpio-be/colorZapper | R | false | false | 1,933 | r | #' db as a list of data.frame-s
#' @param con sqlite connection.
#' @export
db2list <- function(con) {
tnams = dbGetQuery(con, "SELECT name FROM sqlite_master WHERE type='table'")
sapply(tnams$name, function(x) dbGetQuery(con, paste("SELECT * FROM", x) ) )
}
#' Show db status
#' Returns a data.frame containin... |
inbox_data <- read.table("inbox_data_enron.csv", header=TRUE, sep=",", quote='')
sent_data <- read.table("sent_data_enron.csv", header=TRUE, sep=",", quote='')
from <- inbox_data['from']
colnames(from)[1] <- 'mail'
to <- sent_data['to']
colnames(to)[1] <- 'mail'
all <- rbind(from,to)
counted <- data.frame(table(all))
... | /R_rb/Chapter6/mails_interact.r | no_license | takagotch/R | R | false | false | 400 | r | inbox_data <- read.table("inbox_data_enron.csv", header=TRUE, sep=",", quote='')
sent_data <- read.table("sent_data_enron.csv", header=TRUE, sep=",", quote='')
from <- inbox_data['from']
colnames(from)[1] <- 'mail'
to <- sent_data['to']
colnames(to)[1] <- 'mail'
all <- rbind(from,to)
counted <- data.frame(table(all))
... |
library(mefa4)
set.seed(1234)
y <- Matrix(rpois(50, 0.5), 20, 10)
dimnames(y) <- list(letters[1:20], LETTERS[1:10])
x <- Melt(y)
x <- x[sample.int(nrow(x)),]
x <- data.frame(id=1:nrow(x), x)
file <- "trydata.csv"
write.csv(x, file, row.names=FALSE)
FUN <- function(x) return(x)
REDUCE <- rbind
nrows <- 20
nlines <- fu... | /R/mapreduce.R | no_license | psolymos/bamanalytics | R | false | false | 1,392 | r | library(mefa4)
set.seed(1234)
y <- Matrix(rpois(50, 0.5), 20, 10)
dimnames(y) <- list(letters[1:20], LETTERS[1:10])
x <- Melt(y)
x <- x[sample.int(nrow(x)),]
x <- data.frame(id=1:nrow(x), x)
file <- "trydata.csv"
write.csv(x, file, row.names=FALSE)
FUN <- function(x) return(x)
REDUCE <- rbind
nrows <- 20
nlines <- fu... |
#GO Enrichment script
#By Cassie Ettinger
library(tidyverse)
library(ggplot2)
library(vroom)
library(AnnotationDbi)
library(GSEABase)
library(GOstats)
## Bash commands:
# grep "gene" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[;].*//' > all_genes.txt
# sort all_genes.txt | uniq... | /scripts/GOEnrichment.R | permissive | 90talieh/Chytrid_Coelomomyces_RNASeq | R | false | false | 11,845 | r | #GO Enrichment script
#By Cassie Ettinger
library(tidyverse)
library(ggplot2)
library(vroom)
library(AnnotationDbi)
library(GSEABase)
library(GOstats)
## Bash commands:
# grep "gene" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[;].*//' > all_genes.txt
# sort all_genes.txt | uniq... |
grafik_un_smp <- function(.data, matpel, tahun_awal, tahun_akhir, judul = "Perubahan Rerata Nilai Ujian Nasional", subjudul = "Nilai Ujian Nasional Tingkat SMP Kota Bandung") {
matpel <-
matpel %>%
str_replace_all(pattern = "[:punct:]|[:space:]", replacement = "_") %>%
str_to_lower()
.data %>%
se... | /R/grafik_un_smp.R | no_license | muftiivan/dataviz | R | false | false | 3,305 | r | grafik_un_smp <- function(.data, matpel, tahun_awal, tahun_akhir, judul = "Perubahan Rerata Nilai Ujian Nasional", subjudul = "Nilai Ujian Nasional Tingkat SMP Kota Bandung") {
matpel <-
matpel %>%
str_replace_all(pattern = "[:punct:]|[:space:]", replacement = "_") %>%
str_to_lower()
.data %>%
se... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V4_T2.6b.R
\docType{data}
\name{V4_T2.6b}
\alias{V4_T2.6b}
\title{Volume 4: Table 2.6b}
\format{A data frame with 23 variables
\describe{
\item{\code{County}}{County}
\item{\code{Age}}{Age}
\item{\code{Total}}{Total}
\item{\code{Male}}{Number... | /man/V4_T2.6b.Rd | permissive | LucyNjoki/rKenyaCensus | R | false | true | 2,141 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V4_T2.6b.R
\docType{data}
\name{V4_T2.6b}
\alias{V4_T2.6b}
\title{Volume 4: Table 2.6b}
\format{A data frame with 23 variables
\describe{
\item{\code{County}}{County}
\item{\code{Age}}{Age}
\item{\code{Total}}{Total}
\item{\code{Male}}{Number... |
#' Dung-derived SOC in year t (DDSOCt)
#'
#' When it comes to soil organic carbon (SOC), there is plant-derived SOC (PDSOC) and dung-derived SOC (DDSOC). Not generally called directly but incorporated in wrapper function.
#'
#' Both of these equations are fairly straight-forward. PDSOCt just takes the estimated ANPPt ... | /R/calc_DDSOCt.R | permissive | ruan-de-wet/snapr | R | false | false | 898 | r |
#' Dung-derived SOC in year t (DDSOCt)
#'
#' When it comes to soil organic carbon (SOC), there is plant-derived SOC (PDSOC) and dung-derived SOC (DDSOC). Not generally called directly but incorporated in wrapper function.
#'
#' Both of these equations are fairly straight-forward. PDSOCt just takes the estimated ANPPt ... |
# This is the code I wrote for Assignment 2 for the coursera Data Specialization R call
# It's purpose is to cache an inverse matrix adn retrieve it
# This first function creates the set, get, setinvmat, and getinvmat functions
makeCacheMatrix <- function(x = matrix()) { # makeCacheMatrix is is a function that takes... | /cachematrix.R | no_license | deneara/ProgrammingAssignment2 | R | false | false | 2,150 | r | # This is the code I wrote for Assignment 2 for the coursera Data Specialization R call
# It's purpose is to cache an inverse matrix adn retrieve it
# This first function creates the set, get, setinvmat, and getinvmat functions
makeCacheMatrix <- function(x = matrix()) { # makeCacheMatrix is is a function that takes... |
#------------------------------------------------------
# Program name: brahman_angus_CNVR_liftover_v1.R
# Objective: analyse Derek CNVR liftover that will give
# common arsucd coor
# Author: Lloyd Low
# Email add: lloydlow@hotmail.com
#------------------------------------------------------
library(UpSetR)
l... | /scripts/brahman_angus_CNVR_liftover_v1.R | no_license | lloydlow/BrahmanAngusAssemblyScripts | R | false | false | 12,373 | r | #------------------------------------------------------
# Program name: brahman_angus_CNVR_liftover_v1.R
# Objective: analyse Derek CNVR liftover that will give
# common arsucd coor
# Author: Lloyd Low
# Email add: lloydlow@hotmail.com
#------------------------------------------------------
library(UpSetR)
l... |
# ---------------------------------------------------------------------------- #
parse_BCFstats = function(path){
library(stringr)
sname = str_replace(basename(path),'.filt.snps.stats.txt','')
s = scan(path, what='character', sep='\n', quiet=TRUE)
ind = unique(str_replace(s, '\\t.+', ''))
ind = i... | /R/Parse_BCFStats.R | no_license | frenkiboy/MyLib | R | false | false | 1,678 | r | # ---------------------------------------------------------------------------- #
parse_BCFstats = function(path){
library(stringr)
sname = str_replace(basename(path),'.filt.snps.stats.txt','')
s = scan(path, what='character', sep='\n', quiet=TRUE)
ind = unique(str_replace(s, '\\t.+', ''))
ind = i... |
d<-read.table("/Volumes/Volume_4/analysis/DsimTE/refgen/TEannotation/stat/lengthdistri.R")
ids=c("ssr","g2","g1","g05")
par(mfrow=c(2,2))
avleng<-function(df){
c<-sum(df$V4)
ls<-sum(df$V4*df$V3)
al<-ls/c
return(al)
}
histable<-function(df){
v<-c()
for(i in 1:nrow(df))
{
cur<-df[i,]
ele<-rep(cur... | /TE/melsim/lengthDistri.R | no_license | capoony/popgentools | R | false | false | 1,022 | r | d<-read.table("/Volumes/Volume_4/analysis/DsimTE/refgen/TEannotation/stat/lengthdistri.R")
ids=c("ssr","g2","g1","g05")
par(mfrow=c(2,2))
avleng<-function(df){
c<-sum(df$V4)
ls<-sum(df$V4*df$V3)
al<-ls/c
return(al)
}
histable<-function(df){
v<-c()
for(i in 1:nrow(df))
{
cur<-df[i,]
ele<-rep(cur... |
propSum <- function(x){
lim <- floor(sqrt(x))
div.vec <- c(1:lim)
div <- div.vec[x%%div.vec == 0]
ans <- sum(div + x/div) - x - lim * (x == lim^2)
return(ans)
}
lim <- 10000
amicable <- rep(NA, (lim-1))
for(i in 2:lim){
if(is.na(amicable[i])){
a <- propSum(i)
b <- propSum(a)
... | /problems/problem021.R | no_license | parksw3/projectEuler | R | false | false | 500 | r | propSum <- function(x){
lim <- floor(sqrt(x))
div.vec <- c(1:lim)
div <- div.vec[x%%div.vec == 0]
ans <- sum(div + x/div) - x - lim * (x == lim^2)
return(ans)
}
lim <- 10000
amicable <- rep(NA, (lim-1))
for(i in 2:lim){
if(is.na(amicable[i])){
a <- propSum(i)
b <- propSum(a)
... |
select <- function(x, criterion=c("BIC","AIC","CAIC","EBIC"), gamma, scores=FALSE, df.method="active"){
if(class(x)!="fanc") stop('the class of object "x" must be "fanc"')
if(!missing(gamma)){
if(gamma<=1) stop("gamma must be greater than 1")
}
if(scores==TRUE && is.null(x$x)==TRUE) stop("Data matrix is needed f... | /R/select.fanc.R | no_license | keihirose/fanc | R | false | false | 3,763 | r | select <- function(x, criterion=c("BIC","AIC","CAIC","EBIC"), gamma, scores=FALSE, df.method="active"){
if(class(x)!="fanc") stop('the class of object "x" must be "fanc"')
if(!missing(gamma)){
if(gamma<=1) stop("gamma must be greater than 1")
}
if(scores==TRUE && is.null(x$x)==TRUE) stop("Data matrix is needed f... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.