content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2
values | repo_name large_stringlengths 5 125 | language large_stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.46M | extension large_stringclasses 75
values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# CA5: R Calculator - 10357791 Alex Brown
# 1.Addition
addition <- function(numb1, numb2) {
return(numb1 + numb2)
}
#Test Addition
addition(4,5)
addition(45,2)
addition(45,-6)
#2.Subtraction
subtraction <- function(numb1, numb2) {
return(numb1 - numb2)
}
#Test Subtraction
subtraction(6,7)... | /CA5/CA5_R.r | no_license | Ystwryth/DBS | R | false | false | 1,538 | r |
# CA5: R Calculator - 10357791 Alex Brown
# 1.Addition
addition <- function(numb1, numb2) {
return(numb1 + numb2)
}
#Test Addition
addition(4,5)
addition(45,2)
addition(45,-6)
#2.Subtraction
subtraction <- function(numb1, numb2) {
return(numb1 - numb2)
}
#Test Subtraction
subtraction(6,7)... |
library(randomForest)
library(SPECIES)
traindata = read.csv("FinalTrain.csv")
test = read.csv("TestInternshipStudent.csv")
#tr = read.csv("DemoTrain.csv")
#te = read.csv("DemoTest.csv")
col1<-ncol(traindata)
traindata<-traindata[,c(3:col1)]
col<-ncol(traindata)
trainlabel <- traindata[,col]
traindata<- traindata[,c... | /randomForest.R | no_license | charusharma1991/RandomForest | R | false | false | 775 | r | library(randomForest)
library(SPECIES)
traindata = read.csv("FinalTrain.csv")
test = read.csv("TestInternshipStudent.csv")
#tr = read.csv("DemoTrain.csv")
#te = read.csv("DemoTest.csv")
col1<-ncol(traindata)
traindata<-traindata[,c(3:col1)]
col<-ncol(traindata)
trainlabel <- traindata[,col]
traindata<- traindata[,c... |
library("twitteR")
library("ROAuth")
library(wordcloud)
library(RColorBrewer)
library(tm)
library(plyr)
library(ggplot2)
library(sentiment)
library(data.table)
library(topicmodels)
#authentication
load("twitter authentication.Rdata")
registerTwitterOAuth(cred)
#data collection
m8 = searchTwitter("#prostatecancer", n... | /cancer.R | no_license | divyachandraprakash/Exploratory-Analysis-on-Cancer-using-Twitter-and-R | R | false | false | 11,164 | r | library("twitteR")
library("ROAuth")
library(wordcloud)
library(RColorBrewer)
library(tm)
library(plyr)
library(ggplot2)
library(sentiment)
library(data.table)
library(topicmodels)
#authentication
load("twitter authentication.Rdata")
registerTwitterOAuth(cred)
#data collection
m8 = searchTwitter("#prostatecancer", n... |
# RScript that aggregates reports from MiXCR's alignment tool
#
# At this point the script simply accumulates results, but it'd be easy to add
# some visualization, analysis, etc. once the data is aggregated
## Get command-line arguments
### Load dependencies
#.libPaths("/home/exacloud/gscratch/CoussensLab/... | /50_QC/mixcr.rnaseq.QC.R | no_license | CoussensLabOHSU/tcr-seq_pipeline | R | false | false | 2,818 | r | # RScript that aggregates reports from MiXCR's alignment tool
#
# At this point the script simply accumulates results, but it'd be easy to add
# some visualization, analysis, etc. once the data is aggregated
## Get command-line arguments
### Load dependencies
#.libPaths("/home/exacloud/gscratch/CoussensLab/... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedotransfer.R
\name{ksat}
\alias{ksat}
\title{Saturated hydraulic conductivity, including gravel effects.}
\usage{
ksat(sand, clay, soc, DF = 1, gravel = 0)
}
\arguments{
\item{sand}{Fraction of sand}
\item{clay}{Fraction of clay}
\item{so... | /man/ksat.Rd | no_license | grahamjeffries/rcropmod | R | false | true | 576 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pedotransfer.R
\name{ksat}
\alias{ksat}
\title{Saturated hydraulic conductivity, including gravel effects.}
\usage{
ksat(sand, clay, soc, DF = 1, gravel = 0)
}
\arguments{
\item{sand}{Fraction of sand}
\item{clay}{Fraction of clay}
\item{so... |
# Misc methods
get_color_hexa <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
| /R/Misc_methods.R | no_license | DeprezM/SCsim | R | false | false | 133 | r | # Misc methods
get_color_hexa <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WebApiTools.R
\name{getPriorityVocabKey}
\alias{getPriorityVocabKey}
\title{Get Priority Vocab Source Key}
\usage{
getPriorityVocabKey(baseUrl)
}
\arguments{
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://api.ohdsi.... | /man/getPriorityVocabKey.Rd | permissive | dikshya5119/OhdsiRTools | R | false | true | 531 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WebApiTools.R
\name{getPriorityVocabKey}
\alias{getPriorityVocabKey}
\title{Get Priority Vocab Source Key}
\usage{
getPriorityVocabKey(baseUrl)
}
\arguments{
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://api.ohdsi.... |
#' Splits a composite figure that contains multiple plots.
#'
#' Automatically detects divisions among multiple plots found within a single
#' figure image file. It then uses these divisions to split the image into
#' multiple image files; each containing only a single X-Y plot. Currently only
#' works on compo... | /R/figure_split.R | no_license | Anj-prog/metagear | R | false | false | 4,599 | r | #' Splits a composite figure that contains multiple plots.
#'
#' Automatically detects divisions among multiple plots found within a single
#' figure image file. It then uses these divisions to split the image into
#' multiple image files; each containing only a single X-Y plot. Currently only
#' works on compo... |
Sys.setlocale(category = "LC_ALL", locale = "Polish")
setwd('C:\\Users\\anna.ojdowska\\Google Drive\\Praca magisterska\\JMeter\\Results\\Processed\\SpringCloud');
route = read.csv('route.csv');
library(ggplot2)
library(reshape)
library(grid)
route$Spec = factor(route$Spec,levels=unique(route$Spec))
p = ggplot(ro... | /AzureSpringCloud/route_plot.R | no_license | annaojdowska/mono-vs-ms-results | R | false | false | 799 | r | Sys.setlocale(category = "LC_ALL", locale = "Polish")
setwd('C:\\Users\\anna.ojdowska\\Google Drive\\Praca magisterska\\JMeter\\Results\\Processed\\SpringCloud');
route = read.csv('route.csv');
library(ggplot2)
library(reshape)
library(grid)
route$Spec = factor(route$Spec,levels=unique(route$Spec))
p = ggplot(ro... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeltime-calibrate.R
\name{modeltime_calibrate}
\alias{modeltime_calibrate}
\title{Preparation for forecasting}
\usage{
modeltime_calibrate(object, new_data, id = NULL, quiet = TRUE, ...)
}
\arguments{
\item{object}{A fitted model object tha... | /man/modeltime_calibrate.Rd | permissive | ggardiakos/modeltime | R | false | true | 3,137 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeltime-calibrate.R
\name{modeltime_calibrate}
\alias{modeltime_calibrate}
\title{Preparation for forecasting}
\usage{
modeltime_calibrate(object, new_data, id = NULL, quiet = TRUE, ...)
}
\arguments{
\item{object}{A fitted model object tha... |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cluster.R
\docType{methods}
\name{id,Cluster-method}
\alias{id,Cluster-method}
\title{Returns a cluster id}
\usage{
\S4method{id}{Cluster}(object)
}
\arguments{
\item{object}{a Cluster}
}
\value{
the id
}
\description{
Returns a clust... | /vignettes/man/id-Cluster-method.Rd | no_license | gccong/ddiR-sirius | R | false | false | 355 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cluster.R
\docType{methods}
\name{id,Cluster-method}
\alias{id,Cluster-method}
\title{Returns a cluster id}
\usage{
\S4method{id}{Cluster}(object)
}
\arguments{
\item{object}{a Cluster}
}
\value{
the id
}
\description{
Returns a clust... |
\name{fscaret}
\alias{fscaret}
\title{
feature selection caret
}
\description{
Main function for fast feature selection. It utilizes other functions as regPredImp or impCalc to obtain results in a list of data frames.
}
\usage{
fscaret(trainDF, testDF, installReqPckg = FALSE, preprocessData = FALSE,
with.labels = TRUE... | /man/fscaret.Rd | no_license | cran/fscaret | R | false | false | 5,022 | rd | \name{fscaret}
\alias{fscaret}
\title{
feature selection caret
}
\description{
Main function for fast feature selection. It utilizes other functions as regPredImp or impCalc to obtain results in a list of data frames.
}
\usage{
fscaret(trainDF, testDF, installReqPckg = FALSE, preprocessData = FALSE,
with.labels = TRUE... |
library(testthat)
library(splines)
test_that("Check regular glm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = T... | /tests/testthat/test-addNonlinearity.R | no_license | gforge/Greg | R | false | false | 3,479 | r | library(testthat)
library(splines)
test_that("Check regular glm", {
n <- 100
set.seed(123)
nl_ds <- data.frame(
x = sample(seq(
from = 0,
to = pi * 3,
length.out = n
),
size = n,
replace = TRUE
),
sex = factor(sample(c("Male", "Female"),
size = n,
replace = T... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{rarefy_obs}
\alias{rarefy_obs}
\title{Calculate rarefied observation counts}
\usage{
rarefy_obs(obj, dataset, sample_size = NULL, cols = NULL,
other_cols = FALSE, out_names = NULL)
}
\arguments{
\item{obj}{A \code{\link... | /man/rarefy_obs.Rd | permissive | agronomist/metacoder | R | false | true | 3,398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculations.R
\name{rarefy_obs}
\alias{rarefy_obs}
\title{Calculate rarefied observation counts}
\usage{
rarefy_obs(obj, dataset, sample_size = NULL, cols = NULL,
other_cols = FALSE, out_names = NULL)
}
\arguments{
\item{obj}{A \code{\link... |
###DEMO for text data analysis with R###
# lessons curated by Noushin Nabavi, PhD (adapted from Datacamp lessons for text analysis by Julia Silge)
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
library(tidyr)
# can use 4 lexicons according to need for data analysis
# search using ??get_sentiments(): "afi... | /Lessons/10. TEXT ANALYSIS/text_analysis_examples.R | no_license | NoushinN/stem-ed | R | false | false | 5,407 | r | ###DEMO for text data analysis with R###
# lessons curated by Noushin Nabavi, PhD (adapted from Datacamp lessons for text analysis by Julia Silge)
# Load dplyr and tidytext
library(dplyr)
library(tidytext)
library(tidyr)
# can use 4 lexicons according to need for data analysis
# search using ??get_sentiments(): "afi... |
#define parameters
N=100
betalist=c(0.5,1,2) # try different beta values and simulate the growth curve
T=5 #simulate until time T
outcome<-matrix(NA,ncol=length(betalist),nrow=T)
out<-NA
for (j in 1:length(betalist))
{
beta=betalist[j]
t=0
count=1
sites=rep(0,N)
for ( m in 1:10)
{
sites[... | /APM541/R_v2.R | no_license | wduncan21/Classes | R | false | false | 1,157 | r | #define parameters
N=100
betalist=c(0.5,1,2) # try different beta values and simulate the growth curve
T=5 #simulate until time T
outcome<-matrix(NA,ncol=length(betalist),nrow=T)
out<-NA
for (j in 1:length(betalist))
{
beta=betalist[j]
t=0
count=1
sites=rep(0,N)
for ( m in 1:10)
{
sites[... |
library(dplyr)
library(vcd)
library(vcdExtra)
# library(gam)
library(car)
library(effects)
expit <- function(x) exp(x)/ (1 + exp(x))
expit_prob <- function(x) c(expit(x), 1 - expit(x))
random_binary_from_logits <- function(lgt) {
factor(sapply(lgt, function(x) {
sample(c(TRUE, FALSE)
, size = 1
... | /acad_data.R | no_license | julianhatwell/DDAR | R | false | false | 20,398 | r | library(dplyr)
library(vcd)
library(vcdExtra)
# library(gam)
library(car)
library(effects)
expit <- function(x) exp(x)/ (1 + exp(x))
expit_prob <- function(x) c(expit(x), 1 - expit(x))
random_binary_from_logits <- function(lgt) {
factor(sapply(lgt, function(x) {
sample(c(TRUE, FALSE)
, size = 1
... |
insertCohortDefinitionInPackage(definitionId = 5021,
name = "Test",
baseUrl = Sys.getenv("baseUrl"))
# WebAPI functions -----------------------------------------------------------
getCohortDefinitionName(baseUrl = Sys.getenv("baseUrl"), definitio... | /extras/TestCode.R | permissive | anthonysena/OhdsiRTools | R | false | false | 899 | r |
insertCohortDefinitionInPackage(definitionId = 5021,
name = "Test",
baseUrl = Sys.getenv("baseUrl"))
# WebAPI functions -----------------------------------------------------------
getCohortDefinitionName(baseUrl = Sys.getenv("baseUrl"), definitio... |
PATH <- "D:/09_analytics_new_start/06_time_series_problem/"
setwd(PATH)
data_path <- paste(PATH,"data/Train_SU63ISt.csv",sep = "")
data <- read.csv(data_path,stringsAsFactors = FALSE)
head(data$Datetime)
library(lubridate)
data$Datetime <- dmy_hm(data$Datetime)
head(data)
class(data$Datetime)
library(xts)
data.xts ... | /03_time_series_problem/code.R | no_license | shubamsharma/Data-Analytics | R | false | false | 2,523 | r | PATH <- "D:/09_analytics_new_start/06_time_series_problem/"
setwd(PATH)
data_path <- paste(PATH,"data/Train_SU63ISt.csv",sep = "")
data <- read.csv(data_path,stringsAsFactors = FALSE)
head(data$Datetime)
library(lubridate)
data$Datetime <- dmy_hm(data$Datetime)
head(data)
class(data$Datetime)
library(xts)
data.xts ... |
setwd("C:/MyGitRepos/cherry-blossom-run/Data")
# els <- readLines("MenTxt/2012.txt")
# eqIndex <- grep("^===", els)
# spacerRow <- els[eqIndex]
# headerRow <- els[eqIndex - 1]
# body <- els[-(1:eqIndex)]
#
# headerRow <- tolower(headerRow)
# ageStart <- regexpr("ag", headerRow)
# age <- substr(body, start = ageStar... | /readTxt.R | no_license | Tubbz-alt/cherry-blossom-run | R | false | false | 8,460 | r |
setwd("C:/MyGitRepos/cherry-blossom-run/Data")
# els <- readLines("MenTxt/2012.txt")
# eqIndex <- grep("^===", els)
# spacerRow <- els[eqIndex]
# headerRow <- els[eqIndex - 1]
# body <- els[-(1:eqIndex)]
#
# headerRow <- tolower(headerRow)
# ageStart <- regexpr("ag", headerRow)
# age <- substr(body, start = ageStar... |
install.packages("Metrics")
library(Metrics)
Amtrak<-read.csv('D:\\Data science classes\\Assignment R\\Amtrak.csv') # read the Amtrack data
View(Amtrak) # Seasonality 12 months
plot(Amtrak$Ridership,type="l")
# So creating 11 dummy variables
X<- data.frame(outer(rep(month.abb,length = 120), month.abb,"=... | /Forecasting.R | no_license | Amit1608/Datascience-Rcodes | R | false | false | 4,606 | r | install.packages("Metrics")
library(Metrics)
Amtrak<-read.csv('D:\\Data science classes\\Assignment R\\Amtrak.csv') # read the Amtrack data
View(Amtrak) # Seasonality 12 months
plot(Amtrak$Ridership,type="l")
# So creating 11 dummy variables
X<- data.frame(outer(rep(month.abb,length = 120), month.abb,"=... |
#############################################
## ConvertSupport
ConvertSupport <- function (fromGrid, toGrid, mu = NULL, Cov = NULL, phi = NULL)
{
# Input:
# - fromGrid: which grid should be started at?
# - toGrid:
buff <- .Machine$double.eps * max(abs(fromGrid)) * 3
if (abs(toGrid[1] - fromGrid[1]... | /R_Functions/convertSupport.r | no_license | stefanrameseder/BiddingCurves | R | false | false | 924 | r | #############################################
## ConvertSupport
ConvertSupport <- function (fromGrid, toGrid, mu = NULL, Cov = NULL, phi = NULL)
{
# Input:
# - fromGrid: which grid should be started at?
# - toGrid:
buff <- .Machine$double.eps * max(abs(fromGrid)) * 3
if (abs(toGrid[1] - fromGrid[1]... |
##' Adaptive permutation test one-sample problems
##'
##' @title One-sample adaptive permutation test
##' @template onesample_sims
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditio... | /R/simulation.R | no_license | livioivil/resamplingMCP | R | false | false | 5,069 | r | ##' Adaptive permutation test one-sample problems
##'
##' @title One-sample adaptive permutation test
##' @template onesample_sims
##' @param combination_function Function to combine stage-wise (permutation) p-values
##' @param perms Maximum number of permutations to use when computing permutation p-values and conditio... |
library(coala)
### Name: sumstat_four_gamete
### Title: Summary Statistic: Four-Gamete-Condition
### Aliases: sumstat_four_gamete
### ** Examples
model <- coal_model(5, 2) +
feat_mutation(50) +
feat_recombination(10) +
sumstat_four_gamete()
stats <- simulate(model)
print(stats$four_gamete)
| /data/genthat_extracted_code/coala/examples/sumstat_four_gamete.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 301 | r | library(coala)
### Name: sumstat_four_gamete
### Title: Summary Statistic: Four-Gamete-Condition
### Aliases: sumstat_four_gamete
### ** Examples
model <- coal_model(5, 2) +
feat_mutation(50) +
feat_recombination(10) +
sumstat_four_gamete()
stats <- simulate(model)
print(stats$four_gamete)
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Get input
input <- file("stdin")
data <- strsplit(readLines(input, warn = FALSE), split = " ")
# Specify the input data for math aptitude scores(X)
X <- rep(0, length(data))
for (i in 1:length(data)){
X[i] <- as.numeric(data[[i]][1])
}
# Speci... | /Day 8 - Least Square Regression Line.R | no_license | EirikEspe/10-Days-of-Statistics | R | false | false | 637 | r | # Enter your code here. Read input from STDIN. Print output to STDOUT
# Get input
input <- file("stdin")
data <- strsplit(readLines(input, warn = FALSE), split = " ")
# Specify the input data for math aptitude scores(X)
X <- rep(0, length(data))
for (i in 1:length(data)){
X[i] <- as.numeric(data[[i]][1])
}
# Speci... |
library(lattice)
library(datasets)
## xyplot(y ~ x | f * g, data)
xyplot(Ozone ~ Wind, data = airquality)
## First convert Month to a factor variable:
airquality <- transform(airquality, Month=factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5,1))
## store a trellis object
p <- xyplot(Ozone... | /Lattice.R | no_license | aperelson/EDA_Week2 | R | false | false | 772 | r | library(lattice)
library(datasets)
## xyplot(y ~ x | f * g, data)
xyplot(Ozone ~ Wind, data = airquality)
## First convert Month to a factor variable:
airquality <- transform(airquality, Month=factor(Month))
xyplot(Ozone ~ Wind | Month, data = airquality, layout = c(5,1))
## store a trellis object
p <- xyplot(Ozone... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exptab.R
\name{exptab}
\alias{exptab}
\title{Esporta una o piu' tabelle in un unico file csv (standard italiano)}
\usage{
exptab(tab, file, dids = names(tab), aggiungi = FALSE, ...)
}
\arguments{
\item{tab}{lista degli oggetti (table... | /man/exptab.Rd | no_license | cran/LabRS | R | false | true | 1,459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exptab.R
\name{exptab}
\alias{exptab}
\title{Esporta una o piu' tabelle in un unico file csv (standard italiano)}
\usage{
exptab(tab, file, dids = names(tab), aggiungi = FALSE, ...)
}
\arguments{
\item{tab}{lista degli oggetti (table... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alluvial.b.R
\name{alluvialClass}
\alias{alluvialClass}
\title{Alluvial Plot}
\value{
Alluvial Plot
}
\description{
Alluvial Plot
Alluvial Plot
}
\section{Super classes}{
\code{\link[jmvcore:Analysis]{jmvcore::Analysis}} -> \code{\link[Clini... | /man/alluvialClass.Rd | no_license | sbalci/ClinicoPath | R | false | true | 6,806 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alluvial.b.R
\name{alluvialClass}
\alias{alluvialClass}
\title{Alluvial Plot}
\value{
Alluvial Plot
}
\description{
Alluvial Plot
Alluvial Plot
}
\section{Super classes}{
\code{\link[jmvcore:Analysis]{jmvcore::Analysis}} -> \code{\link[Clini... |
#Summarize_off-target_editing_functs.R
# conda_environment: crispresso_downstream_env
# last modified: 2020_08_17 Anne Shen
# For use with CRISPResso version 2.0.40
#
### Dependencies:
# library(tidyverse)
# library(tidyselect)
# library(gtable)
# library(scales)
# library(gridExtra)
# library(grid)
# library(effsize)
... | /crispresso_downstream/Summarize_off-target_editing_functs.R | no_license | ashen931/crispresso_downstream | R | false | false | 59,805 | r | #Summarize_off-target_editing_functs.R
# conda_environment: crispresso_downstream_env
# last modified: 2020_08_17 Anne Shen
# For use with CRISPResso version 2.0.40
#
### Dependencies:
# library(tidyverse)
# library(tidyselect)
# library(gtable)
# library(scales)
# library(gridExtra)
# library(grid)
# library(effsize)
... |
# -*- R -*-
bibentry(bibtype = "Article",
header = "To cite dtw in publications use:",
title = "Computing and Visualizing Dynamic Time Warping Alignments in {R}: The {dtw} Package",
author = as.person("Toni Giorgino"),
journal = "Journal of ... | /inst/CITATION | no_license | cran/dtw | R | false | false | 2,012 | # -*- R -*-
bibentry(bibtype = "Article",
header = "To cite dtw in publications use:",
title = "Computing and Visualizing Dynamic Time Warping Alignments in {R}: The {dtw} Package",
author = as.person("Toni Giorgino"),
journal = "Journal of ... | |
## Put comments here that give an overall description of what your
## functions do
#Programming Assignment 2 to understand lexigraphical scoping and get more practice with
#defining functions. The code below is based on the sample code given for the makeVector
#and cachemean functions.
#the curly braces in makeCa... | /cachematrix.R | no_license | PKMarcom/ProgrammingAssignment2 | R | false | false | 2,955 | r | ## Put comments here that give an overall description of what your
## functions do
#Programming Assignment 2 to understand lexigraphical scoping and get more practice with
#defining functions. The code below is based on the sample code given for the makeVector
#and cachemean functions.
#the curly braces in makeCa... |
num=as.integer(readline(prompt = "Enter the number"))
fact=1
for(i in 1:num)
fact=fact*i
print(fact)
#-------------------------------------------
mult=as.integer(readline("Enter a number"))
for(i in 1:10)
pri(mult)
#-----------------------------------------
#Example 1
var1=c("a","b","c"... | /Basic_Function.R | permissive | ninadsumant/R-Programming | R | false | false | 2,295 | r |
num=as.integer(readline(prompt = "Enter the number"))
fact=1
for(i in 1:num)
fact=fact*i
print(fact)
#-------------------------------------------
mult=as.integer(readline("Enter a number"))
for(i in 1:10)
pri(mult)
#-----------------------------------------
#Example 1
var1=c("a","b","c"... |
library(tidyverse)
library(lubridate)
library(feather)
library(prophet)
all_daily_digits <- read_feather("all_daily_digits.feather")
# When you get the full dataset, use different values for testing and training
dd_train <- all_daily_digits
dd_test <- all_daily_digits
# Plot all the values
ggplot(all_daily_digits, ... | /2_eda.R | permissive | Breza/DailyDigit | R | false | false | 1,307 | r | library(tidyverse)
library(lubridate)
library(feather)
library(prophet)
all_daily_digits <- read_feather("all_daily_digits.feather")
# When you get the full dataset, use different values for testing and training
dd_train <- all_daily_digits
dd_test <- all_daily_digits
# Plot all the values
ggplot(all_daily_digits, ... |
#' Isomap Embedding
#'
#' `step_isomap` creates a *specification* of a recipe
#' step that will convert numeric data into one or more new
#' dimensions.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute th... | /R/isomap.R | no_license | kevinwkc/recipes | R | false | false | 6,327 | r | #' Isomap Embedding
#'
#' `step_isomap` creates a *specification* of a recipe
#' step that will convert numeric data into one or more new
#' dimensions.
#'
#' @inheritParams step_center
#' @inherit step_center return
#' @param ... One or more selector functions to choose which
#' variables will be used to compute th... |
# R code to automatically calculate degree days for the JRC-MARS gridded climate data
# ================================
# Anastasia Korycinska, Defra Risk and Horizon Scanning Team
# Animal and Plant Health Directorate, Defra, UK
# ================================
# SET THE THRESHOLD TEMPERATURE FOR DEVELOPMENT (oC)... | /Defra_JRC-MARS-accumulatedDD.R | no_license | openefsa/DefraJrcAccumulatedDD | R | false | false | 8,225 | r | # R code to automatically calculate degree days for the JRC-MARS gridded climate data
# ================================
# Anastasia Korycinska, Defra Risk and Horizon Scanning Team
# Animal and Plant Health Directorate, Defra, UK
# ================================
# SET THE THRESHOLD TEMPERATURE FOR DEVELOPMENT (oC)... |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/topology.R
\name{permute}
\alias{permute}
\alias{permute.vertices}
\title{Permute the vertices of a graph}
\usage{
permute(graph, permutation)
}
\arguments{
\item{graph}{The input graph, it can directed or undirected.}
\item{permutat... | /man/permute.Rd | no_license | davidmaciel/rigraph | R | false | false | 1,467 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/topology.R
\name{permute}
\alias{permute}
\alias{permute.vertices}
\title{Permute the vertices of a graph}
\usage{
permute(graph, permutation)
}
\arguments{
\item{graph}{The input graph, it can directed or undirected.}
\item{permutat... |
normalize_Prots_AALength <- function(data,genes,proteins,organism){
#normalize_Prots_AALength
#
#Function that gets a spectral counts proteomics dataset, divides the counts values
#for each protein by its AA chain length (queried from Uniprot). Then the transformed
#dataset is normalized by the sum of all values multip... | /ComplementaryScripts/normalize_Prots_AALength.R | permissive | SysBioChalmers/OrthOmics | R | false | false | 3,246 | r | normalize_Prots_AALength <- function(data,genes,proteins,organism){
#normalize_Prots_AALength
#
#Function that gets a spectral counts proteomics dataset, divides the counts values
#for each protein by its AA chain length (queried from Uniprot). Then the transformed
#dataset is normalized by the sum of all values multip... |
## Time Series Modeling
# Install required Libraries if necessary
list.of.packages <- c("caret", "dplyr","Boruta","mlbench",
"tidyr","fUnitRoots","FitAR","forecast",
"stringr","Metrics","tictoc","MLmetrics","h2o","opera","urca")
new.packages <- list.of.packages[!(list.of.pac... | /Time_SeriesModeling_Final_H2o.R | no_license | ahmabboud/COVID-19_Cyberthreats | R | false | false | 33,698 | r | ## Time Series Modeling
# Install required Libraries if necessary
list.of.packages <- c("caret", "dplyr","Boruta","mlbench",
"tidyr","fUnitRoots","FitAR","forecast",
"stringr","Metrics","tictoc","MLmetrics","h2o","opera","urca")
new.packages <- list.of.packages[!(list.of.pac... |
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- 7
#----------------------... | /Benchmark Scripts/WO Seed/test_submission_7.R | no_license | vikasnitk85/SpringleafMarketingesponse | R | false | false | 3,033 | r | #----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- 1
version <- 7
#----------------------... |
\name{getSens}
\alias{getSens}
\title{Estimate $Q_{10}$ value and time varying $R_b$ from temperature and efflux time series including uncertainty.}
\description{Function to determine the temperature sensitivity ($Q_{10}$ value) and time varying
basal efflux (R$_b(i)$) from a given temperature and efflux (usually ... | /man/getSens.Rd | no_license | zhuj27/RSCAPE | R | false | false | 3,640 | rd | \name{getSens}
\alias{getSens}
\title{Estimate $Q_{10}$ value and time varying $R_b$ from temperature and efflux time series including uncertainty.}
\description{Function to determine the temperature sensitivity ($Q_{10}$ value) and time varying
basal efflux (R$_b(i)$) from a given temperature and efflux (usually ... |
source("common.R")
library(ggplot2)
library(reshape2)
cordat <- as.matrix(dat[,value.cols, with=F])
cormat <- cor(cordat, use="pairwise.complete.obs")
cordt <- melt(cormat)
plt <- ggplot(cordt) + aes(x=Var2, y=Var1, fill=value) + geom_tile() +
scale_fill_gradient2(high="green", low="red") +
scale_y_discrete(... | /colorpairs.R | no_license | ashiklom/trait-manuscript | R | false | false | 403 | r | source("common.R")
library(ggplot2)
library(reshape2)
cordat <- as.matrix(dat[,value.cols, with=F])
cormat <- cor(cordat, use="pairwise.complete.obs")
cordt <- melt(cormat)
plt <- ggplot(cordt) + aes(x=Var2, y=Var1, fill=value) + geom_tile() +
scale_fill_gradient2(high="green", low="red") +
scale_y_discrete(... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{beta_lpdf}
\alias{beta_lpdf}
\title{Log probability density function for the beta distribution}
\usage{
beta_lpdf(x, shape1, shape2)
}
\description{
Log probability density function for the beta distribution
}
\details{
... | /man/beta_lpdf.Rd | permissive | jeff324/derp | R | false | true | 427 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{beta_lpdf}
\alias{beta_lpdf}
\title{Log probability density function for the beta distribution}
\usage{
beta_lpdf(x, shape1, shape2)
}
\description{
Log probability density function for the beta distribution
}
\details{
... |
#Script Name: colab_ca
#Author: coLAB
#Author URL: http://www.colab.uff.br
#License: GNU General Public License v2 or later
#License URL: http://www.gnu.org/licenses/gpl-2.0.html
#Reference: Script desenvolvido com a supervisão de Emerson Cervi
#Description: Plotar gráfico de análise de correspondência canônica
##Brev... | /oficina R/5 - Plotando gráficos de análise de correspondência/colab_ca.R | no_license | tsaiyijing/oficinaR | R | false | false | 2,043 | r | #Script Name: colab_ca
#Author: coLAB
#Author URL: http://www.colab.uff.br
#License: GNU General Public License v2 or later
#License URL: http://www.gnu.org/licenses/gpl-2.0.html
#Reference: Script desenvolvido com a supervisão de Emerson Cervi
#Description: Plotar gráfico de análise de correspondência canônica
##Brev... |
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_L2391.gas_trade_flows
#'
#' Model input for natural gas trade by LNG and regional pipeline networks.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code... | /input/gcamdata/R/zenergy_L2391.gas_trade_flows.R | permissive | JGCRI/gcam-core | R | false | false | 26,493 | r | # Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_energy_L2391.gas_trade_flows
#'
#' Model input for natural gas trade by LNG and regional pipeline networks.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code... |
#' @title Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @description \code{convert_cluster_to_settings} Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @usage
#' convert_cluster_to_settings(i, cluster_vector, setting_name, setting_from,... | /R/application_prediction.R | no_license | saeyslab/nichenetr | R | false | false | 130,153 | r | #' @title Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @description \code{convert_cluster_to_settings} Convert cluster assignment to settings format suitable for target gene prediction.
#'
#' @usage
#' convert_cluster_to_settings(i, cluster_vector, setting_name, setting_from,... |
#on charge le package Mass
library(MASS);
# z contient les données extremums de loi géomètrique
z<-dataZ041018[1:10000,1];
# on utilise la fonction fitdistr pour une loi Weibull et une loi lognormal
paraw <- fitdistr(z,densfun="weibull");
logLik(paraw) # on peut avoir le loglikelihood
paral <- fitdistr(z, densfun=... | /src/geomExtrem.R | no_license | perfectstrong/OS13-Devoir2 | R | false | false | 983 | r | #on charge le package Mass
library(MASS);
# z contient les données extremums de loi géomètrique
z<-dataZ041018[1:10000,1];
# on utilise la fonction fitdistr pour une loi Weibull et une loi lognormal
paraw <- fitdistr(z,densfun="weibull");
logLik(paraw) # on peut avoir le loglikelihood
paral <- fitdistr(z, densfun=... |
#######################################
# МЕХАНИКА КОРРЕСПОНДЕНТНОГО АНАЛИЗА #
#######################################
#Вадим Хайтов, Марина Варфоломеева
# Проблемы PCA
library(readxl)
birds <- read_excel(path = "data/macnally.xlsx")
# имена переводим в нижний регистр
colnames(birds) <- tolower(colnames(birds))
... | /09_CA_calculation.R | no_license | varmara/multivar | R | false | false | 8,167 | r |
#######################################
# МЕХАНИКА КОРРЕСПОНДЕНТНОГО АНАЛИЗА #
#######################################
#Вадим Хайтов, Марина Варфоломеева
# Проблемы PCA
library(readxl)
birds <- read_excel(path = "data/macnally.xlsx")
# имена переводим в нижний регистр
colnames(birds) <- tolower(colnames(birds))
... |
\name{rating.scale.name<-}
\docType{methods}
\alias{rating.scale.name<-}
\alias{set.RISK.NAME<-}
\alias{set.RISK.NAME<-,crp.CSFP,character-method}
\alias{rating.scale.name<--methods}
\alias{rating.scale.name<-,crp.CSFP,character-method}
\title{Set the name for the file containing the rating scale}
\description{... | /man/rating.scale.name_--methods.Rd | no_license | cran/crp.CSFP | R | false | false | 407 | rd | \name{rating.scale.name<-}
\docType{methods}
\alias{rating.scale.name<-}
\alias{set.RISK.NAME<-}
\alias{set.RISK.NAME<-,crp.CSFP,character-method}
\alias{rating.scale.name<--methods}
\alias{rating.scale.name<-,crp.CSFP,character-method}
\title{Set the name for the file containing the rating scale}
\description{... |
##' @title Calculate the MS1 and MS2 level QC metrics
##' @description Calculate the MS1 level QC metrics
##' @param spectraList An experiment design input file
##' @param outdir Output directory
##' @param cpu The number of cpu used
##' @return A data frame
##' @author Bo Wen \email{wenbo@@genomics.cn}
calcMSQCMetri... | /R/ms12QC.R | no_license | wenbostar/proteoQC | R | false | false | 10,049 | r |
##' @title Calculate the MS1 and MS2 level QC metrics
##' @description Calculate the MS1 level QC metrics
##' @param spectraList An experiment design input file
##' @param outdir Output directory
##' @param cpu The number of cpu used
##' @return A data frame
##' @author Bo Wen \email{wenbo@@genomics.cn}
calcMSQCMetri... |
#' Aggregate dataset by state
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_state <- function(dt, year_min, year_max, evtypes) {
replace_na <- function(x) ifelse(is.na(x), 0, x)
round_2 <- function(x) ro... | /Processing.R | no_license | ThotaSravani/Developing-Data-Products-Course-Project | R | false | false | 4,618 | r | #' Aggregate dataset by state
#'
#' @param dt data.table
#' @param year_min integer
#' @param year_max integer
#' @param evtypes character vector
#' @return data.table
#'
aggregate_by_state <- function(dt, year_min, year_max, evtypes) {
replace_na <- function(x) ifelse(is.na(x), 0, x)
round_2 <- function(x) ro... |
library(shiny)
library(shinydashboard)
library(rlang)
library(ggplot2)
rate <- read.csv("Unemployment_Rate_Clean.csv")
colnames(rate) <- c("No","States","Year2017","Year2018","Year2019")
rate$No <- as.numeric(rate$No)
rate$States<- as.character(rate$States)
rate$Year2017 <- as.numeric(rate$Year2017)
rate$Yea... | /Data Product/app.R | no_license | amniwahit/GroupProject | R | false | false | 9,964 | r | library(shiny)
library(shinydashboard)
library(rlang)
library(ggplot2)
rate <- read.csv("Unemployment_Rate_Clean.csv")
colnames(rate) <- c("No","States","Year2017","Year2018","Year2019")
rate$No <- as.numeric(rate$No)
rate$States<- as.character(rate$States)
rate$Year2017 <- as.numeric(rate$Year2017)
rate$Yea... |
\name{confus}
\alias{confus}
\alias{fuzconfus}
\title{(Fuzzy) Confusion Matrix}
\description{A confusion matrix is a cross-tabulation of actual class
membership with memberships predicted by a discriminant function,
classification tree, or other predictive model.
A fuzzy confusion
matrix is a confusion matrix that corr... | /man/confus.Rd | no_license | cran/optpart | R | false | false | 2,335 | rd | \name{confus}
\alias{confus}
\alias{fuzconfus}
\title{(Fuzzy) Confusion Matrix}
\description{A confusion matrix is a cross-tabulation of actual class
membership with memberships predicted by a discriminant function,
classification tree, or other predictive model.
A fuzzy confusion
matrix is a confusion matrix that corr... |
# nocov start
# tested in tidymodels/extratests#67
new_reverse_km_fit <-
function(formula,
object,
pkgs = character(0),
label = character(0),
extra_cls = character(0)) {
res <- list(formula = formula, fit = object, label = label, required_pkgs = pkgs)
class(res) <-... | /R/survival-censoring-model.R | permissive | tidymodels/parsnip | R | false | false | 3,007 | r | # nocov start
# tested in tidymodels/extratests#67
new_reverse_km_fit <-
function(formula,
object,
pkgs = character(0),
label = character(0),
extra_cls = character(0)) {
res <- list(formula = formula, fit = object, label = label, required_pkgs = pkgs)
class(res) <-... |
###########################################################
# Big Data Analytics #
# Session 8 - Text mining #
# #
# Student survey analysis #
# Urba... | /BDA_8_RSCRIPT.R | no_license | kkc-krish/BDA-1 | R | false | false | 12,661 | r | ###########################################################
# Big Data Analytics #
# Session 8 - Text mining #
# #
# Student survey analysis #
# Urba... |
#' @export
coxKernelnet <- function(x, y, t, nfolds, stdbeta, alpha)
{
# Correlation matrix
rbf <- as.matrix(getLaplacian(x, "RBF"))
# cross validation
foldid<-coxsplit(y, nfolds)
fit <- Coxnet(x, y, Omega = rbf, penalty="Net", alpha=alpha, foldid=foldid, isd=stdbeta)
beta <- fit$Beta
dt <- data.frame(cbind(y, x))... | /RegCox/R/cox-KernelNet.R | no_license | aastha3/RegCox | R | false | false | 861 | r | #' @export
coxKernelnet <- function(x, y, t, nfolds, stdbeta, alpha)
{
# Correlation matrix
rbf <- as.matrix(getLaplacian(x, "RBF"))
# cross validation
foldid<-coxsplit(y, nfolds)
fit <- Coxnet(x, y, Omega = rbf, penalty="Net", alpha=alpha, foldid=foldid, isd=stdbeta)
beta <- fit$Beta
dt <- data.frame(cbind(y, x))... |
# Clip rasters by polygon
clip.by.polygon <- function(raster, # Raster object
shape # Polygon object
) {
a1_crop<-crop(raster, shape)
step1<-rasterize(shape, a1_crop)
a1_crop*step1
} | /F_clip_raster_by_polygon.R | no_license | tufui57/SAI | R | false | false | 263 | r | # Clip rasters by polygon
clip.by.polygon <- function(raster, # Raster object
shape # Polygon object
) {
a1_crop<-crop(raster, shape)
step1<-rasterize(shape, a1_crop)
a1_crop*step1
} |
# simple case density
# q_density = function(x_vec){
# if (x_vec[1] <= 10 & x_vec[1] >= -10 & x_vec[2]>= -10 & x_vec[2]<= 10) {
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }else if(x_vec[1] <= 11 & x_vec[1] >= 10 & x_vec[2]>= -10 & x_vec[2]<= 10){
... | /functions_used/simple_case_density.R | no_license | feiding333/Bayesian-codes-of-the-exoplanet. | R | false | false | 3,295 | r | # simple case density
# q_density = function(x_vec){
# if (x_vec[1] <= 10 & x_vec[1] >= -10 & x_vec[2]>= -10 & x_vec[2]<= 10) {
# tmpphix = (1/3)*exp((-1/2)*sum((x_vec+5)^2)) + (2/3)*exp((-1/2)*sum((x_vec-5)^2))
# return(tmpphix)
# }else if(x_vec[1] <= 11 & x_vec[1] >= 10 & x_vec[2]>= -10 & x_vec[2]<= 10){
... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{storage_hosts_post}
\alias{storage_hosts_post}
\title{Create a new storage host}
\usage{
storage_hosts_post(provider, bucket, name, s3_options = NULL)
}
\arguments{
\item{provider}{string required. The storage provide... | /man/storage_hosts_post.Rd | no_license | elsander/civis-r | R | false | true | 1,118 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{storage_hosts_post}
\alias{storage_hosts_post}
\title{Create a new storage host}
\usage{
storage_hosts_post(provider, bucket, name, s3_options = NULL)
}
\arguments{
\item{provider}{string required. The storage provide... |
best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
list_state <- unique(outcome_data$State)
if (!state %in% list_state) stop("invalid state")
list_outcome <- c('heart attack'... | /best.R | no_license | rnugraha/rprog-data-ProgAssignment3-data | R | false | false | 723 | r | best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
list_state <- unique(outcome_data$State)
if (!state %in% list_state) stop("invalid state")
list_outcome <- c('heart attack'... |
#!/usr/bin/Rscript
library('ProjectTemplate')
try(load.project())
logit.fit <- glm(Installed ~ LogDependencyCount +
LogSuggestionCount +
LogImportCount +
LogViewsIncluding +
LogPackagesMaintaining +
... | /example_model.R | no_license | m4xl1n/r_recommendation_system | R | false | false | 520 | r | #!/usr/bin/Rscript
library('ProjectTemplate')
try(load.project())
logit.fit <- glm(Installed ~ LogDependencyCount +
LogSuggestionCount +
LogImportCount +
LogViewsIncluding +
LogPackagesMaintaining +
... |
# Developing Data Products, by Coursera
# Minna Asplund, 2018
#
library(shiny)
shinyServer(function(input, output) {
answer <- reactive({
a <- input$frstNmbr
b <- input$scndNmbr
while (a != b)
{
if (a > b)
{
a <- a - b
}
else
{
... | /server.R | no_license | Tiitseri/DDP | R | false | false | 477 | r | # Developing Data Products, by Coursera
# Minna Asplund, 2018
#
library(shiny)
shinyServer(function(input, output) {
answer <- reactive({
a <- input$frstNmbr
b <- input$scndNmbr
while (a != b)
{
if (a > b)
{
a <- a - b
}
else
{
... |
# Software Carpentry Workshop
# University of Chicago
# 2016-09-16
# First version of function to calculate a summary statistic, which is the mean
# of the columns (cols) in df.
calc_sum_stat <- function(df, cols) {
df_sub <- df[, cols]
sum_stat <- apply(df_sub, 1, mean)
return(sum_stat)
}
| /code/calc_sum_stat_v01.R | permissive | jdblischak/2016-09-15-chicago | R | false | false | 298 | r | # Software Carpentry Workshop
# University of Chicago
# 2016-09-16
# First version of function to calculate a summary statistic, which is the mean
# of the columns (cols) in df.
calc_sum_stat <- function(df, cols) {
df_sub <- df[, cols]
sum_stat <- apply(df_sub, 1, mean)
return(sum_stat)
}
|
outcomeData <- NULL
states <- NULL
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomeNumber <- c(11, 17, 23)
names(outcomeNumber) <- outcomes
best <- function(state, outcome) {
## Read outcome data
if (is.null(outcomeData)){
outcomeData <<- read.csv("outcome-of-care-measures.csv", colClasses =... | /best.R | no_license | Goatflakes/r-prog-ass3 | R | false | false | 1,251 | r | outcomeData <- NULL
states <- NULL
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomeNumber <- c(11, 17, 23)
names(outcomeNumber) <- outcomes
best <- function(state, outcome) {
## Read outcome data
if (is.null(outcomeData)){
outcomeData <<- read.csv("outcome-of-care-measures.csv", colClasses =... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additive.R
\name{additive}
\alias{additive}
\title{adiitive}
\usage{
additive(geno, n, samp, p, pi)
}
\description{
additive
}
| /man/additive.Rd | no_license | jyc7385/infolab7 | R | false | true | 205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additive.R
\name{additive}
\alias{additive}
\title{adiitive}
\usage{
additive(geno, n, samp, p, pi)
}
\description{
additive
}
|
library(Devore7)
### Name: ex13.02
### Title: R Data set: ex13.02
### Aliases: ex13.02
### Keywords: datasets
### ** Examples
data(ex13.02)
str(ex13.02)
| /data/genthat_extracted_code/Devore7/examples/ex13.02.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 160 | r | library(Devore7)
### Name: ex13.02
### Title: R Data set: ex13.02
### Aliases: ex13.02
### Keywords: datasets
### ** Examples
data(ex13.02)
str(ex13.02)
|
#' Conveniently message dataframe
#'
#' Conveniently message dataframe using sprintf syntax.
#' Use place holder '%s' for data.frame.
#'
#' @param format_string sprintf style format string
#' @param x data.frame
#' @return NULL
#' @examples
#' x <- data.frame(feature_id = c('F001', 'F002'), symbol = c('FEAT1', 'FEAT2... | /autonomics.support/R/message.R | no_license | bhagwataditya/autonomics0 | R | false | false | 1,042 | r | #' Conveniently message dataframe
#'
#' Conveniently message dataframe using sprintf syntax.
#' Use place holder '%s' for data.frame.
#'
#' @param format_string sprintf style format string
#' @param x data.frame
#' @return NULL
#' @examples
#' x <- data.frame(feature_id = c('F001', 'F002'), symbol = c('FEAT1', 'FEAT2... |
##Reading data
test_activity <- read.table("./UCI HAR Dataset/test/Y_test.txt",header = FALSE)
train_activity <- read.table("./UCI HAR Dataset/train/Y_train.txt",header = FALSE)
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt",header = FALSE)
train_subject <- read.table("./UCI HAR Dataset/train/... | /run_analysis.R | no_license | nazymkm/tidydata | R | false | false | 2,174 | r | ##Reading data
test_activity <- read.table("./UCI HAR Dataset/test/Y_test.txt",header = FALSE)
train_activity <- read.table("./UCI HAR Dataset/train/Y_train.txt",header = FALSE)
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt",header = FALSE)
train_subject <- read.table("./UCI HAR Dataset/train/... |
# Decision Tree Classification
Titanic_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic train.csv")
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
dTree<-rpart(Survived ~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked, data=Titanic_data, method = "class"... | /DecisionTree.R | no_license | yuanyuanzho/R_DecisionTree | R | false | false | 853 | r | # Decision Tree Classification
Titanic_data <- read.csv("/Users/eavy/Downloads/7390/Assignment/Assignment3/Titanic train.csv")
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
dTree<-rpart(Survived ~Pclass+Sex+Age+SibSp+Parch+Fare+Embarked, data=Titanic_data, method = "class"... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alg_classes.R
\docType{class}
\name{alg-class}
\alias{alg-class}
\title{Abstract optimization algorithm class}
\description{
An S4 class to represent an abstract optimization algorithm.
}
\section{Slots}{
\describe{
\item{\code{name}}{algori... | /man/alg-class.Rd | no_license | minghao2016/greed | R | false | true | 334 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alg_classes.R
\docType{class}
\name{alg-class}
\alias{alg-class}
\title{Abstract optimization algorithm class}
\description{
An S4 class to represent an abstract optimization algorithm.
}
\section{Slots}{
\describe{
\item{\code{name}}{algori... |
# internal function of the ms.commander
sample.pars<-function(x){
k<-sample(nrow(x),nrow(x))
for(i in k){
if(c(as.numeric(x[i,4])+as.numeric(x[i,5]))==0){
next
} else {
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
while(samp<=0){
samp<-do.call(x[i,6],a... | /R/parameter_samplers.R | no_license | gehara/PipeMaster | R | false | false | 2,217 | r | # internal function of the ms.commander
sample.pars<-function(x){
k<-sample(nrow(x),nrow(x))
for(i in k){
if(c(as.numeric(x[i,4])+as.numeric(x[i,5]))==0){
next
} else {
samp<-do.call(x[i,6],args=list(1,as.numeric(x[i,4]),as.numeric(x[i,5])),quote=F)
while(samp<=0){
samp<-do.call(x[i,6],a... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagspec.R
\name{nbetaMT}
\alias{nbetaMT}
\title{Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specificati... | /man/nbetaMT.Rd | no_license | englianhu/midasr | R | false | true | 921 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lagspec.R
\name{nbetaMT}
\alias{nbetaMT}
\title{Normalized beta probability density function MIDAS weights specification (MATLAB toolbox compatible)
Calculate MIDAS weights according to normalized beta probability density function specificati... |
# -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or... | /R/treeInfo.R | no_license | APN-Pucky/ranger | R | false | false | 7,115 | r | # -------------------------------------------------------------------------------
# This file is part of Ranger.
#
# Ranger is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or... |
################################
#Importing data
###############################
#Option 1: make a vector
#entering one column of data directly into R (as a vector)
#use c(number, number, number)
#c means combine or concatenate
#10 pigs on diet1
diet1 <- c(60.8, 67, 65, 68.6, 61.7, 69.6, 77.1, 75.2, 71.5, 60.3)
#and... | /02_ImportDataR.R | no_license | dnm5ca/tutorials | R | false | false | 1,545 | r | ################################
#Importing data
###############################
#Option 1: make a vector
#entering one column of data directly into R (as a vector)
#use c(number, number, number)
#c means combine or concatenate
#10 pigs on diet1
diet1 <- c(60.8, 67, 65, 68.6, 61.7, 69.6, 77.1, 75.2, 71.5, 60.3)
#and... |
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 20
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCore... | /sim_pgms/fm/do20/2xcontH0_sc20_do20_mice.R | no_license | yuliasidi/nibinom_apply | R | false | false | 3,317 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'fm'
scenario <- 20
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "fm", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCore... |
"
@Project: Early Warning System
@author: ATPL 1049
@date: May 06 2021
"
# load libaries
library(dplyr)
library(dbplyr)
library(DBI)
library(RMySQL)
library(data.table)
library(reshape2)
library(ggplot2)
library(lubridate)
library(survival)
library(reshape2)
library(Information)
# disable scientific notation
options(... | /earlyWarningSystem.R | no_license | arjunprasanna-azuga/Early-Warning-System | R | false | false | 17,312 | r | "
@Project: Early Warning System
@author: ATPL 1049
@date: May 06 2021
"
# load libaries
library(dplyr)
library(dbplyr)
library(DBI)
library(RMySQL)
library(data.table)
library(reshape2)
library(ggplot2)
library(lubridate)
library(survival)
library(reshape2)
library(Information)
# disable scientific notation
options(... |
# You might need to run this:
# install.packages("DBI","RSQLite","reshape2","ggplot2","scales")
library(dplyr)
library(DBI)
library(reshape2)
library(ggplot2)
library(scales)
# These next lines will need to be modified for your DB
# and system.
path.to.db <- "C:\\Users\\jchan\\Dropbox\\Teaching\\2018_Fall\\AppliedDa... | /dplyr/dplyr_for_sql.R | no_license | a25murray/ada-master | R | false | false | 3,458 | r | # You might need to run this:
# install.packages("DBI","RSQLite","reshape2","ggplot2","scales")
library(dplyr)
library(DBI)
library(reshape2)
library(ggplot2)
library(scales)
# These next lines will need to be modified for your DB
# and system.
path.to.db <- "C:\\Users\\jchan\\Dropbox\\Teaching\\2018_Fall\\AppliedDa... |
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0... | /IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609857243-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 713 | r | testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0... |
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{rmapaus}
\alias{rmapaus}
\alias{rmapaus-package}
\title{rmapaus: mapping Australia}
\description{
The rmapaus package provides spatial boundaries for various Australian regions,
including postcodes and Australian Bureau of Statistics statistic... | /man/rmapaus.Rd | no_license | TuanAnh207/rmapaus | R | false | false | 335 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\docType{package}
\name{rmapaus}
\alias{rmapaus}
\alias{rmapaus-package}
\title{rmapaus: mapping Australia}
\description{
The rmapaus package provides spatial boundaries for various Australian regions,
including postcodes and Australian Bureau of Statistics statistic... |
hb = 0.9
hn = 0.5
hw = 0.1
hb_pr = .05
hn_pr = .9
hw_pr = 0.05
eb = hb*hb_pr + hn*hn_pr + hw*hw_pr
ew = 1 - eb
getE = function(hbp, hnp, hwp){
return (hb*hbp+hn*hnp+hw*hwp)
}
update = function(h,e,chance){
top = h*chance
return (top/e)
}
hb_pr2 = update(hb_pr,eb,hb)
hw_pr2 = update(hw_pr,eb,hw)
hn_pr2 = up... | /src/expectedlossexample.R | no_license | Lokgic/diss-repo | R | false | false | 5,515 | r | hb = 0.9
hn = 0.5
hw = 0.1
hb_pr = .05
hn_pr = .9
hw_pr = 0.05
eb = hb*hb_pr + hn*hn_pr + hw*hw_pr
ew = 1 - eb
getE = function(hbp, hnp, hwp){
return (hb*hbp+hn*hnp+hw*hwp)
}
update = function(h,e,chance){
top = h*chance
return (top/e)
}
hb_pr2 = update(hb_pr,eb,hb)
hw_pr2 = update(hw_pr,eb,hw)
hn_pr2 = up... |
require(caret) #select tuning parameters
require(e1071) #SVM
require(MASS)
train <- read.csv('train.csv', as.is = T);
test <- read.csv('test.csv', as.is = T)
train <- train[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41)]
test <- test[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40)]
t... | /knn-pumpit.R | no_license | knpraveen/PumpItUp | R | false | false | 2,892 | r | require(caret) #select tuning parameters
require(e1071) #SVM
require(MASS)
train <- read.csv('train.csv', as.is = T);
test <- read.csv('test.csv', as.is = T)
train <- train[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40,41)]
test <- test[,c(1,2,5,10,14,18,26,27,28,29,30,31,32,33,34,35,36,37,38,40)]
t... |
library(shiny)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
title = "Cool Maps",
# Sidebar layout with input and output definitions ----
dropdownLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
... | /app.r | no_license | landon-thompson/spatial-project-college | R | false | false | 1,511 | r | library(shiny)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
title = "Cool Maps",
# Sidebar layout with input and output definitions ----
dropdownLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
... |
context("Report Integration")
library(ORFik)
# Make test data
template <- create.experiment(dir = system.file("extdata", "", package = "ORFik"),
exper = "ORFik", txdb = system.file("extdata",
"annotations.gtf",
... | /tests/testthat/test_report.R | permissive | lukun06/ORFik | R | false | false | 653 | r | context("Report Integration")
library(ORFik)
# Make test data
template <- create.experiment(dir = system.file("extdata", "", package = "ORFik"),
exper = "ORFik", txdb = system.file("extdata",
"annotations.gtf",
... |
##Load all packages ----
library(fossil)
library(adehabitatHR)
library(maps)
library(grid)
library(gridExtra)
library(lubridate)
library(tidyverse)
library(rgdal)
library(rworldmap)
library(rgeos)
library(RODBC) ##connect to data base
library(sp)
select <- dplyr::select
##other packages used in this script: cowplot,... | /LBBG_plasticity_pub.R | no_license | jbrow247/LBBG_MigrationPlasticity_Pub | R | false | false | 97,831 | r | ##Load all packages ----
library(fossil)
library(adehabitatHR)
library(maps)
library(grid)
library(gridExtra)
library(lubridate)
library(tidyverse)
library(rgdal)
library(rworldmap)
library(rgeos)
library(RODBC) ##connect to data base
library(sp)
select <- dplyr::select
##other packages used in this script: cowplot,... |
# options(error=recover)
# options(show.error.locations=TRUE)
library(shiny)
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(gridExtra)
library(plyr)
library(Cairo)
#upload size for file set to 40mb
options(shiny.maxRequestSize = 40*1024^2)
Sys.setenv(TZ='EST')
DEFAULT.INTEREVENT <- 12
DEFAULT... | /global.r | no_license | codingbanana/CDFplot | R | false | false | 2,919 | r | # options(error=recover)
# options(show.error.locations=TRUE)
library(shiny)
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(gridExtra)
library(plyr)
library(Cairo)
#upload size for file set to 40mb
options(shiny.maxRequestSize = 40*1024^2)
Sys.setenv(TZ='EST')
DEFAULT.INTEREVENT <- 12
DEFAULT... |
# jdk 받고
#콘솔창에
#install.packages("remotes")
#설치되면
#remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
#문서폴더에 잠이 오질 않네요.txt 파일 받아서 넣으세요
install.packages("stringi")
getwd() #현재 저장된 작업경로
install.packages("wordcloud2") #패키지 설치
Sys.setenv(JAVA_HOME='C:\\Program ... | /SourceCode/1일차/20922 천정윤.R | no_license | cksldfj/SD_R | R | false | false | 1,455 | r | # jdk 받고
#콘솔창에
#install.packages("remotes")
#설치되면
#remotes::install_github('haven-jeon/KoNLP', upgrade = "never", INSTALL_opts=c("--no-multiarch"))
#문서폴더에 잠이 오질 않네요.txt 파일 받아서 넣으세요
install.packages("stringi")
getwd() #현재 저장된 작업경로
install.packages("wordcloud2") #패키지 설치
Sys.setenv(JAVA_HOME='C:\\Program ... |
#Get BestGrad5 from smoothing results
getBestGrad5 <- function(AgeRatioScore_orig, AgeRatioScore_mav2, EduYrs, subgroup = c("adult","child")) {
if (subgroup == "adult") {
# select whether to use the straight 5-year data
# or use Mav2 or mav4 of the 5-year data
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ... | /census_workflow_getBestGrad5.R | no_license | Shelmith-Kariuki/ddharmony | R | false | false | 797 | r |
#Get BestGrad5 from smoothing results
getBestGrad5 <- function(AgeRatioScore_orig, AgeRatioScore_mav2, EduYrs, subgroup = c("adult","child")) {
if (subgroup == "adult") {
# select whether to use the straight 5-year data
# or use Mav2 or mav4 of the 5-year data
BestGrad5 <- NA
if (AgeRatioScore_orig < 4 ... |
##' @title Creates grid over the study area.
##'
##' @description If the argument thegrid of DetectClustersModel() is null, this function is
##' used to create a rectangular grid with a given step.
##' If step is NULL the step used is equal to 0.2*radius.
##' The grid contains the coordinates of the centers of th... | /R/Functions2.R | no_license | cran/DClusterm | R | false | false | 8,606 | r | ##' @title Creates grid over the study area.
##'
##' @description If the argument thegrid of DetectClustersModel() is null, this function is
##' used to create a rectangular grid with a given step.
##' If step is NULL the step used is equal to 0.2*radius.
##' The grid contains the coordinates of the centers of th... |
## Complicated gastritis/duodenitis, re-preparing GBD 2017 data with GBD 2019 data-preparation methods
rm(list=ls())
## Set up working environment
if (Sys.info()["sysname"] == "Linux") {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
} else {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
}... | /gbd_2019/nonfatal_code/digest_gastritis/prepolddata_3200_complicated_gastritis.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 8,621 | r | ## Complicated gastritis/duodenitis, re-preparing GBD 2017 data with GBD 2019 data-preparation methods
rm(list=ls())
## Set up working environment
if (Sys.info()["sysname"] == "Linux") {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
} else {
j <- "FILEPATH_J"
h <- "FILEPATH_H"
l <- "FILEPATH_L"
}... |
#uri-r, count
#tailData <- as.matrix(read.csv("../deferredTLDtail.csv", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEvent.txt.bkp", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEventSort.txt.bkp", sep=",", header=TRUE))
#mimeSize <- as.matrix(read.csv("../massagedMimeSiz... | /clientSideState_TechReport/imgs/processStats (1).R | no_license | jbrunelle/papers | R | false | false | 16,207 | r |
#uri-r, count
#tailData <- as.matrix(read.csv("../deferredTLDtail.csv", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEvent.txt.bkp", sep=",", header=TRUE))
#eventNew <- as.matrix(read.csv("../newURIsByEventSort.txt.bkp", sep=",", header=TRUE))
#mimeSize <- as.matrix(read.csv("../massagedMimeSiz... |
#---------------------------------------------------------------------------
#
# Hidden global environment for class 'Stem' stuff.
#
# Note that this now holds constants/parameters, etc. for other classes
# within sampSurf as well. JHG 16-Dec-2010.
#
# Note that this environment and its bindings are locked so t... | /R/defStemEnv.R | no_license | cran/sampSurf | R | false | false | 26,043 | r | #---------------------------------------------------------------------------
#
# Hidden global environment for class 'Stem' stuff.
#
# Note that this now holds constants/parameters, etc. for other classes
# within sampSurf as well. JHG 16-Dec-2010.
#
# Note that this environment and its bindings are locked so t... |
#############################################
### esta funcion calcula rasters de media ###
### y desvio estandar para una coleccion ###
### de n rasters ndvi ###
### input: carpeta con rasters ndvi ###
### output: raster media y raster sd ###
############################################... | /GO_E05_NDVI_MEAN_SD.r | no_license | mlcastellan/GO | R | false | false | 1,487 | r | #############################################
### esta funcion calcula rasters de media ###
### y desvio estandar para una coleccion ###
### de n rasters ndvi ###
### input: carpeta con rasters ndvi ###
### output: raster media y raster sd ###
############################################... |
#' Adds Controls for Each Pesticide You are Using
#'
#' @param raw.data Raw toxicology in standard Batterham Lab Format
#' @param key Do you want to specify your own insecticide:solvent key? If so put it in here. Default is NULL
#' @param new.key Do you want to specify your own insecticide solvent key? Defaults to FAL... | /R/dmc.control.add.R | no_license | shanedenecke/insect.toxicology | R | false | false | 3,362 | r | #' Adds Controls for Each Pesticide You are Using
#'
#' @param raw.data Raw toxicology in standard Batterham Lab Format
#' @param key Do you want to specify your own insecticide:solvent key? If so put it in here. Default is NULL
#' @param new.key Do you want to specify your own insecticide solvent key? Defaults to FAL... |
\name{rland.graph}
\alias{rland.graph}
\title{
Creates random landscape graph
}
\description{
One of the key functions of the package, which allows the creation of random landscapes (represented as graphs) with two categories: habitat patch and non-habitat matrix. The landscapes can be different depending on the parame... | /man/rland.graph.Rd | no_license | cran/MetaLandSim | R | false | false | 2,881 | rd | \name{rland.graph}
\alias{rland.graph}
\title{
Creates random landscape graph
}
\description{
One of the key functions of the package, which allows the creation of random landscapes (represented as graphs) with two categories: habitat patch and non-habitat matrix. The landscapes can be different depending on the parame... |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapfill.R
\name{Array2Matrix}
\alias{Array2Matrix}
\title{Convert an Array with 4 Dimensions into a Matrix}
\usage{
Array2Matrix(a)
}
\arguments{
\item{a}{Array with 4 dimensions.}
}
\value{
A matrix. If \code{a} has the attribute \code{mp}, ... | /gapfill/man/Array2Matrix.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 941 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gapfill.R
\name{Array2Matrix}
\alias{Array2Matrix}
\title{Convert an Array with 4 Dimensions into a Matrix}
\usage{
Array2Matrix(a)
}
\arguments{
\item{a}{Array with 4 dimensions.}
}
\value{
A matrix. If \code{a} has the attribute \code{mp}, ... |
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(formattable)
#Define Simple interest
calcSimpleInt <- function(p,r,t... | /server.R | no_license | sanjaynvs/datasciencecoursera | R | false | false | 2,508 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(formattable)
#Define Simple interest
calcSimpleInt <- function(p,r,t... |
source("lib/load_exec_align.R")
source("lib/load_data_processing.R")
source("lib/load_verif_lib.R")
source("lib/load_phylo.R")
source("parallel_config.R")
method <- commandArgs(trailingOnly = TRUE)[1]
input_dir <- commandArgs(trailingOnly = TRUE)[2]
output_dir <- commandArgs(trailingOnly = TRUE)[3]
cv_sep <-... | /R/verification/verification_psa.R | no_license | e155721/src | R | false | false | 2,097 | r | source("lib/load_exec_align.R")
source("lib/load_data_processing.R")
source("lib/load_verif_lib.R")
source("lib/load_phylo.R")
source("parallel_config.R")
method <- commandArgs(trailingOnly = TRUE)[1]
input_dir <- commandArgs(trailingOnly = TRUE)[2]
output_dir <- commandArgs(trailingOnly = TRUE)[3]
cv_sep <-... |
#' Any Transaction From One Committee To Another
#'
#' \code{read_all_transactions} returns a dataframe about transaction data
#'
#' @param n_max Integer specifying the max amount of entries in the dataset. Defaults to the possible maximum.
#' @param verbose A progress bar is shown if R is running interactively. Defaul... | /R/fn_transactions.R | no_license | baumer-lab/fec16 | R | false | false | 1,758 | r | #' Any Transaction From One Committee To Another
#'
#' \code{read_all_transactions} returns a dataframe about transaction data
#'
#' @param n_max Integer specifying the max amount of entries in the dataset. Defaults to the possible maximum.
#' @param verbose A progress bar is shown if R is running interactively. Defaul... |
\name{I2edge}
\alias{I2edge}
\title{Create edge data frame from gene - geneset indicator matrix}
\description{Used for input to \code{bp} function, \code{edge} argument}
\usage{
I2edge(I)
}
\arguments{
\item{I}{indicator matrix, rows are genes and columns are genesets}
}
\value{
data frame of edges:
\item{column 1... | /man/I2edge.Rd | no_license | tienv/Rolemodel | R | false | false | 462 | rd | \name{I2edge}
\alias{I2edge}
\title{Create edge data frame from gene - geneset indicator matrix}
\description{Used for input to \code{bp} function, \code{edge} argument}
\usage{
I2edge(I)
}
\arguments{
\item{I}{indicator matrix, rows are genes and columns are genesets}
}
\value{
data frame of edges:
\item{column 1... |
#
# glob-def-usage.R, 7 Feb 20
# Data from:
# Understanding Source Code Evolution Using Abstract Syntax Tree Matching
# Iulian Neamtiu and Jeffrey S. Foster and Michael Hicks
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG variables_C types_C funct... | /sourcecode/glob-def-usage.R | no_license | shanechin/ESEUR-code-data | R | false | false | 970 | r | #
# glob-def-usage.R, 7 Feb 20
# Data from:
# Understanding Source Code Evolution Using Abstract Syntax Tree Matching
# Iulian Neamtiu and Jeffrey S. Foster and Michael Hicks
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG variables_C types_C funct... |
\name{loadNetwork}
\Rdversion{1.1}
\alias{loadNetwork}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Load a Boolean network from a file
}
\description{
Loads a Boolean network or probabilistic Boolean network from a file and converts it to an internal transition table representation.
}
\usage{
... | /man/loadNetwork.Rd | no_license | JacobVisscher/BoolNet | R | false | false | 14,188 | rd | \name{loadNetwork}
\Rdversion{1.1}
\alias{loadNetwork}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Load a Boolean network from a file
}
\description{
Loads a Boolean network or probabilistic Boolean network from a file and converts it to an internal transition table representation.
}
\usage{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.