blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea997373e1c22dc1f7c93acc4c7e3e532b5d46bf
|
c3a10ba8fb8bcd3769728612ff737babd683670a
|
/R/Code/bbg_datamgr.r
|
2a541d1b77348a3da9923e1f1311202fcabb08c6
|
[] |
no_license
|
apexdevelop/working_dir
|
3f707bf81bda1e16d58ea358a0dcd71ce3993722
|
e514c41e99699aa52d3b4a553c99b31b72d45202
|
refs/heads/master
| 2020-03-11T16:48:47.009053
| 2019-05-31T19:01:20
| 2019-05-31T19:01:20
| 130,128,853
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,541
|
r
|
bbg_datamgr.r
|
# You can specify that a row be returned for all dates in the requested period, even when markets or
# closed or otherwise no data is available, by specifying include.non.trading.days=TRUE. If you request
# multiple securities, this is automatically set to TRUE. You can use na.omit or na.exclude to remove
# these rows
library(Rbbg)
conn=blpConnect()
fields=c("CHG_PCT_1D")
securities=c("2600 HK Equity","AA Equity","AWC AU Equity","486 HK Equity", "HNDL IN Equity")
start_date="2010-01-29"
end_date="2017-10-04"
#end_date=Sys.Date()
start.date = as.POSIXct(start_date)
end.date = as.POSIXct(end_date)
x_h=bdh(conn,securities,fields,start.date,end.date,
option_names = c("nonTradingDayFillOption", "nonTradingDayFillMethod"),
option_values = c("ALL_CALENDAR_DAYS", "PREVIOUS_VALUE"),always.display.tickers = TRUE)
#the following need to be exlpored more, because diff col have diff #of NA values
#could assign this to siyuan
#nona_xh=na.omit(x_h)
#list_ret=unstack(nona_xh,CHG_PCT_1D~ticker)
#list_date=unstack(nona_xh,date~ticker)
list_ret=unstack(x_h,CHG_PCT_1D~ticker)
list_date=unstack(x_h,date~ticker)
df.ret=as.data.frame(list_ret)
df.date=as.data.frame(list_date[1])
mtx.date=as.matrix(df.date)
date.date=as.Date(mtx.date)
library(zoo) # Load the zoo package
library(xts) # Load the xts package
rtn.xts=xts(df.ret,date.date)
pathname="C:/users/ychen/documents/git/working_dir/R/Data/"
outfile="alum_factor_rtn.csv"
file_dir_out=paste(pathname,outfile,sep="")
write.csv(rtn.xts,file_dir_out)
|
3bee3b03f6ebe254911e97071cd6b57ffdedec4e
|
b28ca303e610d6ce0a158c44181a103beea50ac8
|
/man/EllipAxes.Rd
|
d15824a81b99e0019fb16cd5db913ac72d7a20bf
|
[] |
no_license
|
cran/RockFab
|
ba61ed4ea01c84f7efa7ff12e354326fbe7e362c
|
87537ff68f2cbc4e680d11a995e0ac61d8db7db8
|
refs/heads/master
| 2022-07-09T16:31:16.194443
| 2022-06-23T06:18:46
| 2022-06-23T06:18:46
| 17,693,392
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 941
|
rd
|
EllipAxes.Rd
|
\name{EllipAxes}
\alias{EllipAxes}
\title{
Calculate ellipsoid axial lengths based on octahedral shear strain and Lode parameter.
}
\description{
Function uses the octahedral shear strain and Lode parameter of a desired strain ellipsoid and returns the normalized axial lengths X Y and Z.
}
\usage{
EllipAxes(es, nu)
}
\arguments{
\item{es}{
Octahedral shear strain. Values must be positive.
}
\item{nu}{
Lode parameter. Values must be between -1 and 1.
}
}
\value{
A numeric vector of length three with values returned in descending order (i.e. X, Y, and Z)
}
\references{
See for example: Ramsay, J. and M. Huber (1993). The techniques of modern structural geology.
}
\author{
Jeffrey R. Webber
}
\note{
Not used in RockFab scripts but can be useful for other endeavors.
}
\examples{
es <- runif(min = 0, max = 3, n = 1)
nu <- runif(min = -1, max = 1, n = 1)
EllipAxes(es = es, nu = nu)
}
|
d2df3a5ce57dee3ea3ad51860de0d8a1d7f0d8b5
|
6e5f307e8ad2619605b6a43d8b4965d9e3c7abaf
|
/plot1.R
|
54e07cc4781936c31b84b517b1382fb0bf49413d
|
[] |
no_license
|
brettryder/ExData_Plotting1
|
13bc668ebd4e6e91531e180355a191d74dad0737
|
b9cc8ad1402c2f9fbf0a7ea82ffb4b012b7afdfc
|
refs/heads/master
| 2020-12-03T02:26:07.895387
| 2014-09-07T14:40:28
| 2014-09-07T14:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
plot1.R
|
cnames<-c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
y<-read.table("household_power_consumption.txt",skip=66637,nrows=2880,sep=";",stringsAsFactors=FALSE,col.names=cnames)
d<-paste(y$Date,y$Time)
datetime<-strptime(d,"%d/%m/%Y %H:%M:%S")
png(file="plot1.png",height=480,width=480,bg="transparent")
hist(y$Global_active_power,xlab="Global Active Power (kilowats)",ylab="Frequency",main="Global Active Power",col=2)
dev.off()
|
f19e3e79b40279c9da7dbaf23474172b685c9fbe
|
ab78c055a5e0a60cb91733bb55442cbc98891255
|
/R/GEOquery_patch.R
|
1be33f1d2c85492150a619423a3d327eb8792cfd
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rmartin84/rbbt-marq
|
efd5cc0646d5eca199798c4ee2201fb97ab57f37
|
b7127afef5fc91256b5af33f06ae1e73a1761ae1
|
refs/heads/master
| 2021-01-21T18:33:47.745915
| 2010-04-11T16:07:07
| 2010-04-11T16:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,817
|
r
|
GEOquery_patch.R
|
library(GEOquery);
# The original version of the function failed if the dataset had extra probe ids not in the platform.
# This version fixes that
"GDS2eSet" <-
function(GDS,do.log2=FALSE,GPL=NULL,AnnotGPL=TRUE) {
require(Biobase)
# exclude non-numeric columns
if(is.null(GPL)) {
GPL <- getGEO(Meta(GDS)$platform,AnnotGPL=AnnotGPL)
}
ord.table <- match(Table(GDS)[,1],Table(GPL)[,1])
inc.columns <- grep('GSM',colnames(Table(GDS)))
mat <- suppressWarnings(as.matrix(apply(Table(GDS)[,inc.columns],2,
function(x) {as.numeric(as.character(x))})))
if(do.log2) {
expr <- log2(mat)
} else {
expr <- mat
}
rownames(expr) <- as.character(Table(GDS)$ID_REF)
tmp <- Columns(GDS)
rownames(tmp) <- as.character(tmp$sample)
pheno <- new("AnnotatedDataFrame",data=tmp)
mabstract=ifelse(is.null(Meta(GDS)$description),"",Meta(GDS)$description)
mpubmedids=ifelse(is.null(Meta(GDS)$pubmed_id),"",Meta(GDS)$pubmed_id)
mtitle=ifelse(is.null(Meta(GDS)$title),"",Meta(GDS)$title)
dt <- Table(GPL)
rownames(dt) <- as.character(dt$ID)
featuredata <- new('AnnotatedDataFrame',data=dt[ord.table,],
varMetadata=data.frame(Column=Columns(GPL)[,1],
labelDescription=Columns(GPL)[,2]))
# use !is.na(ord.table) to remove extra probe ids in GDS and not in GPL
eset <- new('ExpressionSet',exprs=expr[!is.na(ord.table),],phenoData=pheno,
featureData=featuredata[!is.na(ord.table),],
experimentData=new("MIAME",
abstract=mabstract,
title=mtitle,
pubMedIds=mpubmedids,
other=Meta(GDS)))
return(eset)
}
|
877d29b9eade00ab5ef166227f9976f4c4dea051
|
2152276a40e8004ea7c9494fd0094629681ad43b
|
/man/pb_add_tag_column.Rd
|
d45d03a0b6bc9b4b1c5877f531ed762e97384c74
|
[
"MIT"
] |
permissive
|
RMHogervorst/pinboardr
|
10f5b084e0dbead500b569d5589d761063eb74ba
|
d4d145f97c054a09e8923c972acb924a084d4b70
|
refs/heads/master
| 2022-11-30T06:19:09.718937
| 2020-08-08T10:03:10
| 2020-08-08T10:07:16
| 262,958,200
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 623
|
rd
|
pb_add_tag_column.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{pb_add_tag_column}
\alias{pb_add_tag_column}
\title{Extract one tag from tag columns into separate column.}
\usage{
pb_add_tag_column(dataframe, tag)
}
\arguments{
\item{dataframe}{the dataframe of bookmarks}
\item{tag}{tag you would like to extract into a new column}
}
\value{
a dataframe with one extra column, containing TRUE or FALSE for presence of the tag
}
\description{
Convenience function to make working with the dataframe of bookmarks
a bit easier.
}
\examples{
\dontrun{
pb_add_tag_column(all_bookmars, "europe")
}
}
|
eb0fd843a964d3c5cddf94067538eccfc6781479
|
9047685851ec9bebdd8ea43f51eae52a071d7b6a
|
/R/plot.pointestimate.R
|
52d2a2711499af9995d42c9837136b1d4cb2e2db
|
[] |
no_license
|
sarawade/mcclust.ext
|
4f87bb7372efde1d26f3a851ffa4052413acda96
|
4cd79fa68a5807f188285073bf61dd3ae7748c94
|
refs/heads/master
| 2020-06-04T06:30:16.288712
| 2019-06-14T09:43:41
| 2019-06-14T09:43:41
| 191,905,481
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,588
|
r
|
plot.pointestimate.R
|
plot.c.estimate=function(x,data=NULL,dx=NULL,xgrid=NULL,dxgrid=NULL,...){
if(is.null(data)){
stop("data must be supplied")}
if(!is.data.frame(data)){
stop("data must be a data.frame")}
p=ncol(data)
n=nrow(data)
if(!is.matrix(x$cl)) x$cl=matrix(x$cl,nrow=1)
k=apply(x$cl,1,max)
n.c=nrow(x$cl)
par(ask = TRUE)
method.names=x$method
if(method.names=="all"){method.names=names(x$value)}
if(p==1){
x1=data[,1]
if(is.null(dxgrid)){
if(is.null(xgrid)){
dout=density(x1)
}
else{
dout=density(data,n=length(xgrid),from=xgrid[1],to=xgrid[length(xgrid)])
}
xgrid=dout$x
dxgrid=dout$y
}
#Compute density estimate at x1
x2=dx
if(is.null(x2)){
aout=approx(xgrid,dxgrid,xout=x1)
x2=aout$y}
for(i in 1:n.c){
cl=rainbow(k[i])
plot(xgrid,dxgrid,type="l",xlab=names(data),ylab="density",main=paste("Method:",method.names[i]),...)
for(j in 1:k[i]){
points(x1[x$cl[i,]==j], x2[x$cl[i,]==j],col=cl[j],...)
}}
}
if(p==2){
x1=data[,1]
x2=data[,2]
for(i in 1:n.c){
cl=rainbow(k[i])
plot(x1,x2,xlab=names(data)[1],ylab=names(data)[2],main=paste("Method:",method.names[i]),...)
for(j in 1:k[i]){
points(x1[x$cl[i,]==j], x2[x$cl[i,]==j],col=cl[j],...)
}}
}
if(p>2){
x.pca=princomp(data,scores=T)
x1=x.pca$scores[,1]
x2=x.pca$scores[,2]
for(i in 1:n.c){
cl=rainbow(k[i])
plot(x1,x2,xlab="PC 1",ylab="PC 2",main=paste("Method:",method.names[i]),...)
for(j in 1:k[i]){
points(x1[x$cl[i,]==j], x2[x$cl[i,]==j],col=cl[j],...)
}}
}
}
|
eab4b01f8aa71539df553e8be71c487bd15f6b4e
|
4a9bf7ca2c4973b53940c4cc26d58e6f18522d0f
|
/R/updateObject.R
|
07a2d5d6f95d4e6e19dc0393e95dcf76cf5a8938
|
[] |
no_license
|
AswinSSoman/clusterExperiment
|
d3aa604a060ad21fa7b5ab62eeb8bafedd7e105c
|
2328bd62f1b7f2c1c84fd1b25e1944946a63d7f9
|
refs/heads/master
| 2020-04-01T21:00:42.473582
| 2018-09-17T15:16:44
| 2018-09-17T15:16:44
| 153,632,551
| 1
| 1
| null | 2018-10-18T13:57:01
| 2018-10-18T13:57:01
| null |
UTF-8
|
R
| false
| false
| 5,651
|
r
|
updateObject.R
|
#' @title Update old ClusterExperiment object to current class definition
#' @name updateObject
#' @description This function updates ClusterExperiment objects from previous
#' versions of package into the current definition
#' @param object a \code{ClusterExperiment} (or \code{clusterExperiment} from
#' older versions). Must have at a minimum a slot \code{clusterMatrix}.
#' @inheritParams BiocGenerics::updateObject
#' @inheritParams ClusterExperiment-class
#' @details The function creates a valid \code{ClusterExperiment} object by
#' adding the default values of missing slots. It does so by calling the
#' \code{\link{ClusterExperiment}} function, which imputs default (empty)
#' values for missing slots.
#' @details The object is required to have minimal components to be updated.
#' Specifically, it must have all the required elements of a Summarized
#' Experiment as well as the basic slots of a ClusterExperiment object which
#' have not changed over time. These are: \code{clusterMatrix},
#' \code{primaryIndex}, \code{clusterInfo}, \code{transformation},
#' \code{clusterTypes}, \code{clusterLegend}, \code{orderSamples}.
#' @details If \emph{any} of the dendrogram-related slots are missing, ALL of
#' the dendrogram \emph{and} merge related slots will be cleared to default
#' values. Similarly, if \emph{any} of the merge-related slots are missing,
#' ALL of the merge-related slots will be cleared to the default values.
#' @details The function currently only works for object of
#' \code{ClusterExperiment}, not the older name \code{clusterExperiment}.
#' @return A valid \code{ClusterExperiment} object based on the current
#' definition of ClusterExperiment.
#' @seealso \code{\link{ClusterExperiment}}
#' @aliases updateObject,ClusterExperiment-method
#' @rdname updateObject
#' @export
#' @importFrom BiocGenerics updateObject
setMethod(
f = "updateObject",
signature = signature(object = "ClusterExperiment"),
definition = function(object, checkTransformAndAssay=FALSE,...,verbose=FALSE){
#create snames, which is the slots the object actually has
#and will eventually be narrowed down to only those slots will pass to `ClusterExperiment`
#list names of all current required slots
ceSlots<-slotNames(object)
testSnames<-sapply(ceSlots,.hasSlot,object=object)
snames<-ceSlots[testSnames]
#--------
#check has at least the required slots of SummarizedExperiment class
#--------
if(!all(slotNames("SummarizedExperiment") %in% snames)){
missSE<-which(!slotNames("SummarizedExperiment") %in% snames)
stop("given object does not have the basic slots of SummarizedExperiment, cannot be updated (missing:",paste(slotNames("SummarizedExperiment")[missSE],collapse=","),"). To construct a ClusterExperiment object from its original components, use the function 'ClusterExperiment'")
}
#--------
#check has minimal required slots of a clusterExperiment object of any version
#--------
requiredSlots<-c("clusterMatrix","primaryIndex", "clusterInfo","transformation","clusterTypes","clusterLegend","orderSamples")
if(!all(requiredSlots %in% snames)){
missCE<-which(!requiredSlots %in% snames)
stop("given object does not have the basic slots of ClusterExperiment, cannot be updated (missing:",paste(requiredSlots[missCE],collapse=","),"). To construct a ClusterExperiment object from its original components, use the function 'ClusterExperiment'")
}
#--------
#extract either SE or SCE object
#--------
# if(canCoerce(object,"SummarizedExperiment")) se<-updateObject(as(object,"SummarizedExperiment"))
if(canCoerce(object,"SingleCellExperiment")){
#if object was from before SCE requirement (2.0.0)
se<-updateObject(as(object,"SingleCellExperiment"))
}
else{
if(canCoerce(object,"SummarizedExperiment")) se<-updateObject(as(object,"SummarizedExperiment"))
else stop("cannot coerce object to SummarizedExperiment")
}
#--------
# Ignore slots that have to come together, with warnings
#--------
dendroSlots<-c("dendro_samples", "dendro_clusters",
"dendro_index", "dendro_outbranch")
mergeSlots<-c("merge_index",
"merge_dendrocluster_index",
"merge_method", "merge_demethod", "merge_cutoff",
"merge_nodeProp", "merge_nodeMerge")
if(any(!dendroSlots %in% snames)& any(dendroSlots %in% snames)){
warning("'object' does not contain ALL required slots saving the dendro-related information. Updated object will remove all dendro AND merge related slots")
snames<-snames[-which(snames %in% dendroSlots)]
snames<-snames[-which(snames %in% mergeSlots)]
}
if(any(!mergeSlots %in% snames) & any(mergeSlots %in% snames)){
warning("'object' does not contain ALL required slots saving the merge-related information. Updated object will remove all merge related slots")
snames<-snames[-which(snames %in% mergeSlots)]
}
#--------
# Get included slots of ones I create
#--------
myslots<- c("transformation",
"primaryIndex", "clusterInfo",
"clusterTypes", "dendro_samples", "dendro_clusters",
"dendro_index", "dendro_outbranch", "coClustering",
"clusterLegend", "orderSamples", "merge_index",
"merge_dendrocluster_index",
"merge_method", "merge_demethod", "merge_cutoff",
"merge_nodeProp", "merge_nodeMerge")
snames<-snames[snames %in% myslots]
object<-try(do.call("ClusterExperiment",c(list(object=se,clusters=object@clusterMatrix,checkTransformAndAssay=checkTransformAndAssay),attributes(object)[snames])),silent=TRUE)
if(!inherits(object,"try-error")){
return(object)
}
else stop("Attempt to convert did not result in valid object. Here is the error from 'ClusterExperiment':\n",object)
}
)
|
5863f3806950c22c44124d1a578b071c057bbfed
|
91470d02494415246cae0f0754c175d11d21e6de
|
/Analisa_banco_unico.R
|
f265ab2cee75613629425ff2bfbe4e41c127183e
|
[] |
no_license
|
nemesis-lab/pesquisa-suicidio
|
30f28afaf9a7b33800c7ae4d7c593417fe941dd1
|
64b5efe285aaf9f7b73ef40656dbb0dcd6d48d77
|
refs/heads/main
| 2023-05-08T01:57:11.947588
| 2021-05-25T23:08:12
| 2021-05-25T23:08:12
| 346,498,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
Analisa_banco_unico.R
|
library(dplyr)
library(tidyverse)
library(readr)
library(pastecs)
library(fmsb)
library(writexl)
# CARREGA O BANCO
banco <- read_csv("C:/Users/Carlos Garcia Filho/Desktop/Comparativo - Brasil, Argentina e Colômbia/data_final.csv")
# INSPECIONA AS VARIÁVEIS
df <- as.data.frame(table(banco$SEXO))
df <- rename(df, SEXO = Var1 )
df$perc <- round((df$Freq / sum(df$Freq))*100,2)
write.table(df, file = "SEXO.txt", sep = ";", quote = FALSE)
df
df <- as.data.frame(table(banco$CAUSA_CAT))
df <- rename(df, CAUSA_CAT = Var1 )
df$perc <- round((df$Freq / sum(df$Freq))*100,2)
write.table(df, file = "CAUSA_CAT.txt", sep = ";", quote = FALSE)
df
df <- as.data.frame(table(banco$ANO))
df <- rename(df, ANO = Var1 )
df$perc <- round((df$Freq / sum(df$Freq))*100,2)
write.table(df, file = "ANO.txt", sep = ";", quote = FALSE)
df
df <- as.data.frame(table(banco$PAIS))
df <- rename(df, PAIS = Var1 )
df$perc <- round((df$Freq / sum(df$Freq))*100,2)
write.table(df, file = "PAIS.txt", sep = ";", quote = FALSE)
df
res <- stat.desc(banco[, 3])
write.table(res, file = "QUANTITATIVA.txt", sep = ";", quote = FALSE)
res
# RETIRA O SEXO CODIFICADO COMO "9" - ARGENTINA
df_excluido <- subset(banco, SEXO=="9")
write.table(df_excluido, file = "EXCLUIDO.txt", sep = ";", quote = FALSE)
banco <- subset(banco, !SEXO=="9")
# ANÁLISE
# DEFINIR PAIS E ANO, NO FORMATO "Pais" E "09".
escolha_pais <- "Brasil"
escolha_ano <- "09"
# RODAR A ANÁLISE A PARTIR DAQUI
# CRIA PRIMEIRA PARTE DA TABELA DE SAÍDA (TABELA INSTRUMENTO E SEXO POR PAIS)
pais_instrumento <- banco %>%
filter(PAIS == escolha_pais & ANO == escolha_ano) %>%
select(CAUSA_CAT, SEXO, QT) %>%
pivot_wider(names_from = SEXO, values_from = QT) %>%
rename(Masculino = "1", Feminino = "2") %>%
mutate(Porc_Masculino = 100*Masculino/sum(Masculino)) %>%
mutate(Porc_Feminino = 100*Feminino/sum(Feminino)) %>%
bind_rows(summarise_all(., ~if(is.numeric(.)) sum(.) else "TOTAL")) %>%
mutate(obs = 1:n())
# FUNÇÃO PARA CALCULAR O ODDS RATIO
chama <- function(x){
df <- oddsratio(
as.numeric(pais_instrumento[x,2]),
(as.numeric(pais_instrumento[8,2])-as.numeric(pais_instrumento[x,2])),
as.numeric(pais_instrumento[x,3]),
(as.numeric(pais_instrumento[8,3])-as.numeric(pais_instrumento[x,3]))
)
z <- c(x,
as.numeric(df[["p.value"]]),
as.numeric(df[["conf.int"]]),
as.numeric(df[["estimate"]])
)
}
# RODA A FUNÇÃO PARA CALCULAR O ODDS RATIO POR LINHA DA TABELA DE SAÍDA
linha <- c(1,2,3,4,5,6,7,8)
odds_trans <- sapply(linha, chama)
# TRANSPÕE LINHAS E COLUNAS DA TABELA CRIADA PELA FUNÇÃO ODDS
odds_trans <- data.frame(odds_trans)
odds <- data.frame(t(odds_trans))
# RENOMEIA AS VARIÁVEIS DA TABELA CRIADA PELA FUNÇÃO ODDS
odds <- rename(odds, obs = X1, valor_p = X2, limite_inferior = X3, limite_superior = X4, estimativa = X5)
# JUNTA A TABELA INSTRUMENTO E SEXO POR PAIS COM A TABELA CRIADA PELA FUNÇÃO ODDS
pais_instrumento_odds <- merge(pais_instrumento, odds, by="obs")
pais_instrumento_odds$obs <- NULL
nova_ordem_colunas <- c("CAUSA_CAT",
"Masculino", "Porc_Masculino",
"Feminino", "Porc_Feminino",
"estimativa", "limite_inferior", "limite_superior", "valor_p")
pais_instrumento_odds <- pais_instrumento_odds[, nova_ordem_colunas]
file_name_final <- paste("C:/Users/Carlos Garcia Filho/Desktop/Comparativo - Brasil, Argentina e Colômbia/",as.character(escolha_pais),"_",as.character(escolha_ano),".xlsx", sep = "")
write_xlsx(pais_instrumento_odds, file_name_final)
|
422134a64bf10e0e5475677871f7011a778c3600
|
b3b1d17bccd5e33d92acf5ffba72ee2a36c9abda
|
/R/temp_registry.R
|
dfd7cd2f576734ddb60878302f8c90fd1b29398a
|
[] |
no_license
|
HenrikBengtsson/future.batchtools
|
59a0ebac832e890da5a88b029af8b4a6bf9031cd
|
10b8d482625eaf4e802b93088feddda3b775651b
|
refs/heads/develop
| 2023-08-05T06:45:02.100130
| 2023-02-24T10:42:02
| 2023-02-24T10:42:02
| 56,456,829
| 87
| 11
| null | 2023-04-30T10:11:52
| 2016-04-17T20:27:22
|
R
|
UTF-8
|
R
| false
| false
| 4,359
|
r
|
temp_registry.R
|
#' @importFrom batchtools makeRegistry saveRegistry
temp_registry <- local({
## All known batchtools registries
regs <- new.env()
make_registry <- function(cluster.functions = NULL, config = list(), ...) {
## Temporarily disable batchtools output?
## (i.e. messages and progress bars)
debug <- getOption("future.debug", FALSE)
batchtools_output <- getOption("future.batchtools.output", debug)
work.dir <- config$work.dir
if (is.null(work.dir)) work.dir <- getwd()
config$work.dir <- NULL
if (!batchtools_output) {
oopts <- options(batchtools.verbose = FALSE, batchtools.progress = FALSE)
on.exit(options(oopts))
}
## WORKAROUND: batchtools::makeRegistry() updates the RNG state,
## which we must make sure to undo.
with_stealth_rng({
reg <- makeRegistry(work.dir = work.dir, ...)
})
if (!is.null(cluster.functions)) { ### FIXME
reg$cluster.functions <- cluster.functions
}
## Post-tweak the batchtools registry?
## This avoids having to set up a custom batchtools 'conf.file' etc.
if (length(config) > 0L) {
names <- names(config)
for (name in names) reg[[name]] <- config[[name]]
with_stealth_rng({
saveRegistry(reg)
})
}
reg
} ## make_registry()
function(label = "batchtools", path = NULL, config = list(), ...) {
if (is.null(label)) label <- "batchtools"
## The job label (the name on the job queue) - may be duplicated
label <- as.character(label)
stop_if_not(length(label) == 1L, nchar(label) > 0L)
## This session's path holding all of its future batchtools directories
## e.g. .future/<datetimestamp>-<unique_id>/
if (is.null(path)) path <- future_cache_path()
if (length(config) > 0L) {
stop_if_not(is.list(config))
names <- names(config)
stop_if_not(!is.null(names), all(nzchar(names)))
}
## The batchtools subfolder for a specific future - must be unique
prefix <- sprintf("%s_", label)
## FIXME: We need to make sure 'prefix' consists of only valid
## filename characters. /HB 2016-10-19
prefix <- as_valid_directory_prefix(prefix)
## WORKAROUND: Avoid updating the RNG state
with_stealth_rng({
unique <- FALSE
while (!unique) {
## The FutureRegistry key for this batchtools future - must be unique
key <- tempvar(prefix = prefix, value = NA, envir = regs)
## The directory for this batchtools future
## e.g. .future/<datetimestamp>-<unique_id>/<key>/
path_registry <- file.path(path, key)
## Should not happen, but just in case.
unique <- !file.exists(path_registry)
}
})
## FIXME: We need to make sure 'label' consists of only valid
## batchtools ID characters, i.e. it must match regular
## expression "^[a-zA-Z]+[0-9a-zA-Z_]*$".
## /HB 2016-10-19
reg_id <- as_valid_registry_id(label)
make_registry(file.dir = path_registry, config = config, ...)
}
})
drop_non_valid_characters <- function(name, pattern, default = "batchtools") {
as_string <- (length(name) == 1L)
name <- unlist(strsplit(name, split = "", fixed = TRUE), use.names = FALSE)
name[!grepl(pattern, name)] <- ""
if (length(name) == 0L) return(default)
if (as_string) name <- paste(name, collapse = "")
name
}
as_valid_directory_prefix <- function(name) {
pattern <- "^[-._a-zA-Z0-9]+$"
## Nothing to do?
if (grepl(pattern, name)) return(name)
name <- unlist(strsplit(name, split = "", fixed = TRUE), use.names = FALSE)
## All characters must be letters, digits, underscores, dash, or period.
name <- drop_non_valid_characters(name, pattern = pattern)
name <- paste(name, collapse = "")
stop_if_not(grepl(pattern, name))
name
}
as_valid_registry_id <- function(name) {
pattern <- "^[a-zA-Z]+[0-9a-zA-Z_]*$"
## Nothing to do?
if (grepl(pattern, name)) return(name)
name <- unlist(strsplit(name, split = "", fixed = TRUE), use.names = FALSE)
## All characters must be letters, digits, or underscores
name <- drop_non_valid_characters(name, pattern = "[0-9a-zA-Z_]")
name <- name[nzchar(name)]
## First character must be a letter :/
if (!grepl("^[a-zA-Z]+", name[1])) name[1] <- "z"
name <- paste(name, collapse = "")
stop_if_not(grepl(pattern, name))
name
}
|
4d99b9bff78cea5905f57e7c2e4351ccb1296c15
|
f353cccd6aa37ff29641ffd9543fa88dbc03f3d3
|
/stetige/Exponential.R
|
d8b78691db216129012247bec594f4d22656000d
|
[] |
no_license
|
oduerr/stat
|
e41afb6a094237c312c06171d26de1c5d9e99077
|
f35276735941859ba18fcfe513b47673ff4c4582
|
refs/heads/master
| 2023-06-27T10:31:55.971635
| 2023-06-12T07:05:48
| 2023-06-12T07:05:48
| 152,250,292
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47
|
r
|
Exponential.R
|
pexp(2500, rate=1/5000)
qexp(0.5, rate=1/5000)
|
783e86411a55e0db7ff242aca88b787cac2e0879
|
4dc3bead007549c03ad14e589a5ebe7157ac4da0
|
/man/logLik.regional_mix.Rd
|
fa5db7c9fff06b1a4754847d6c48f6f03d14c576
|
[] |
no_license
|
skiptoniam/ecomix
|
428615e5a7fc4af8f834f129b79b3279bb9d132d
|
73f167135380f73b886ab8ce1ead2b0a4fe29e74
|
refs/heads/master
| 2023-04-08T18:52:49.673475
| 2022-05-27T01:49:47
| 2022-05-27T01:49:47
| 80,573,432
| 5
| 2
| null | 2021-01-09T05:29:32
| 2017-01-31T23:36:08
|
R
|
UTF-8
|
R
| false
| true
| 415
|
rd
|
logLik.regional_mix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regional_mix_s3-class.R
\name{logLik.regional_mix}
\alias{logLik.regional_mix}
\title{Extract log-likelihood from a regional_mix model.}
\usage{
\method{logLik}{regional_mix}(object, ...)
}
\arguments{
\item{object}{A fitted regional_mix model}
\item{\\dots}{Ignored}
}
\description{
Extract log-likelihood from a regional_mix model.
}
|
c9c4116c13db90cceca29929b29de4978c018ad4
|
958b5f9aa004a0c7f70330d4cf82fd3d503bc8d4
|
/plot1.R
|
39c782c534e45bd2d26d8c0760df8fde23e00611
|
[] |
no_license
|
AthenaMarine/CreatingPlotsWithR
|
7e7dbf6b8d2df6b1a2911130ee91aec667f09e10
|
5032777c2f5db3733c21d90fc2d17072e4fd1104
|
refs/heads/master
| 2021-01-17T05:38:32.311155
| 2015-09-13T01:20:19
| 2015-09-13T01:20:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,112
|
r
|
plot1.R
|
## plot1.R - This script creates a histogram using test data
## from the UCI Machine Learning Repository on household power consumption data
## for the days February 1-2, 2007
## raw data should be downloaded and unzipped into your working direcotry from here:
## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
library(sqldf)
## load in the data for the first two days in February 2007
connection <- read.csv.sql("household_power_consumption.txt", sql="select * from file where Date in ('1/2/2007','2/2/2007')", header=TRUE, sep=';')
## close the file connection, otherwise you'll get a warning the next time you run the script.
on.exit(close(connection))
## transform the column datatypes from character to what is needed.
house_power <- transform(connection,
Date = as.Date(Date, format="%m/%d/%Y"),
Time = as.POSIXct(strptime(Time, format="%H:%M:%S")),
Global_active_power = as.numeric(Global_active_power),
Global_reactive_power = as.numeric(Global_reactive_power),
Voltage = as.numeric(Voltage),
Global_intensity = as.numeric(Global_intensity),
Sub_metering_1 = as.numeric(Sub_metering_1),
Sub_metering_2 = as.numeric(Sub_metering_2),
Sub_metering_3 = as.numeric(Sub_metering_3))
## this plot is going directly into a png file
png('plot1.png', width=480, height=480, units="px", pointsize=12, bg="white")
## create the histogram
with(house_power, hist(Global_active_power, col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
axes=FALSE))
# adjust the axes labels
axis(side=1, at=c(0,2,4,6), labels=c("0","2","4","6"))
axis(side=2, at=c(0,200,400,600,800,1000, 1200), labels=c("0","200","400","600","800","1000","1200"))
# close the png file so you can view it
dev.off()
# clean up temporary data
rm(house_power)
rm(connection)
|
a59db5d9599a9ab5780561063f0d60ff7915a015
|
2cd2c95d90ef8eef58f415112a80899bd2d7dca9
|
/tests/TWQ-savedvalues.R
|
f225bfc958effa86970e4d2a3e0c0194b41546c3
|
[] |
no_license
|
cran/robustloggamma
|
c945aed6f108395fd347e805fc6137170652ceb7
|
888332487b843ca3dc3c73b82c6e748ec7ac490d
|
refs/heads/master
| 2021-05-16T02:27:17.930624
| 2019-04-22T05:59:44
| 2019-04-22T05:59:44
| 17,699,306
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253
|
r
|
TWQ-savedvalues.R
|
structure(list(TQtau = structure(c(2.08212123663978, 1.76545564459274,
1.19), .Names = c("mu", "sigma", "lambda")), TWQtau = structure(c(2.05574667510975,
1.82264903228846, 1.12), .Names = c("mu", "sigma", "lambda"))), .Names = c("TQtau",
"TWQtau"))
|
b15bd49cdce712e4ce4578b0dc98a3fc901132f3
|
618679439003f1173700c9cabf62988090f04eb2
|
/man/detection_rate.Rd
|
787ab0355171079976e719a3706a8622be357d8f
|
[] |
no_license
|
Stefanos-Apostle/SeSAMeStr
|
7ecd44ec12215c8d2247183ac1d2e675ff641f48
|
e14b581e05fe9adec2da44df1372f23e97de7245
|
refs/heads/main
| 2023-04-19T05:14:35.239874
| 2023-03-24T17:25:51
| 2023-03-24T17:25:51
| 529,333,654
| 1
| 1
| null | 2023-01-19T03:52:48
| 2022-08-26T16:39:15
|
R
|
UTF-8
|
R
| false
| true
| 443
|
rd
|
detection_rate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SeSAMeStr_utils.R
\name{detection_rate}
\alias{detection_rate}
\title{Plot Detection Rate}
\usage{
detection_rate(sdfs, out_dir)
}
\arguments{
\item{sdfs}{list of data frames of intensity values for each replicate; output of get_sdfs}
\item{out_dir}{Path to output directory}
}
\value{
Saves plot to /QC subdir
}
\description{
Function to plot detection rate QC
}
|
0269876b3a231d080b36c22fb555a685bfca90e4
|
4e5934e0c2703813c85befb735b3d91ebd33730d
|
/R/1_GetHMDData_Italy.R
|
897d631e0fac064bdbf0e82c4d6b92908f1b7131
|
[] |
no_license
|
jmaburto/N-IUSSP-Lifespan-Inequality
|
0c51248307192f497b006ba9590ce43ba2a13d86
|
819313e22690d7f1d77de43c9551dd628234ea9f
|
refs/heads/master
| 2021-05-22T13:35:05.620727
| 2020-07-02T12:47:47
| 2020-07-02T12:47:47
| 252,948,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
1_GetHMDData_Italy.R
|
####### Program for getting most recent data from HMD
############# Written by JMA, thanks to Tim Riffe
############# 20/07/2019
library(HMDHFDplus)
library(data.table)
# Set working directory
# get all countries in HMD
XYZ <- getHMDcountries()
# set your username for HMD
us <- "jmaburto@colmex.mx"
# set your password
pw <- "kolmogorov"
# get all the lifetables available from HMD
HMDL <- do.call(rbind,lapply(XYZ, function(x, us, pw){
cat(x,"\n")
Males <- readHMDweb(x,"mltper_1x1",username=us,password=pw)
Females <- readHMDweb(x,"fltper_1x1",username=us,password=pw)
Males$Sex <- "m"
Females$Sex <- "f"
CTRY <- rbind(Females, Males)
CTRY$PopName <- x
CTRY
}, us = us, pw = pw))
# convert to data.table
HMDL <- data.table(HMDL)
# save the data
save(HMDL,file="R/Data/HMD_Data.RData")
# get 1-1 lifetables for Italian females
LT.ITA.1 <- do.call(rbind,lapply(XYZ[26], function(x, us, pw){
cat(x,"\n")
Females <- readHMDweb(x,"fltper_1x1",username=us,password=pw)
Females$Sex <- "f"
Females$PopName <- x
Females
}, us = us, pw = pw))
LT.ITA.1 <- data.table(LT.ITA.1)
# get 1-10 lifetables for Swedish females
LT.ITA.10 <- do.call(rbind,lapply(XYZ[26], function(x, us, pw){
cat(x,"\n")
Females <- readHMDweb(x,"fltper_1x10",username=us,password=pw)
Females$Sex <- "f"
Females$PopName <- x
Females
}, us = us, pw = pw))
LT.ITA.10 <- data.table(LT.ITA.10)
LT.ITA.1[,6:9] <- LT.ITA.1[,6:9]/100000
LT.ITA.10[,6:9] <- LT.ITA.10[,6:9]/100000
save(LT.ITA.1,LT.ITA.10,file="R/Data/Italy_HMD.RData")
|
bd1d8a26c7d8a717dcc4e87b58c06295d6807a66
|
57222f96e553dd2802316928f2f2c7825ef05197
|
/115-bookmarking-updatequerystring/app.R
|
e86fef0a6ec20af4906099db3bf1a36ef4fea0f4
|
[
"MIT"
] |
permissive
|
rstudio/shiny-examples
|
6815bb4d8198e4b90765926a4865fdef1d1dc935
|
c7bf00db4a8a68e579e39ed07d516a33661a853e
|
refs/heads/main
| 2023-08-17T16:35:03.411795
| 2023-08-03T19:51:30
| 2023-08-03T19:51:30
| 13,722,949
| 2,046
| 4,576
|
NOASSERTION
| 2023-08-03T19:51:31
| 2013-10-20T17:05:23
|
JavaScript
|
UTF-8
|
R
| false
| false
| 403
|
r
|
app.R
|
ui <- function(req) {
fluidPage(
textInput("txt", "Text"),
checkboxInput("chk", "Checkbox")
)
}
server <- function(input, output, session) {
observe({
# Trigger this observer every time an input changes
reactiveValuesToList(input)
session$doBookmark()
})
onBookmarked(function(url) {
updateQueryString(url)
})
}
enableBookmarking(store = "url")
shinyApp(ui, server)
|
45683a54d101f14f9fe4683928c6d0e456a124d6
|
50a705f2063721b64eeac8b1906e16bfe5deb26a
|
/clustering/GiniClust2/Rfunction/PreProcess_and_filter_for_10X_full.R
|
6e4630d0cfb41b6ddd41c786197c3b30e9ef3d85
|
[
"MIT"
] |
permissive
|
xianingz/scRNAseqBenchmark_Clustering
|
5a7ee9d66bd18d87087c3d6df8ca7d85660191b5
|
845451743dc76ab0cc9106ad98551b80a0c70012
|
refs/heads/master
| 2020-04-29T18:09:46.732009
| 2019-08-09T16:08:24
| 2019-08-09T16:08:24
| 176,316,263
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 759
|
r
|
PreProcess_and_filter_for_10X_full.R
|
#Preprocess and filter 10X data using 10X code: https://github.com/10XGenomics/single-cell-3prime-paper
wd<-"single-cell-3prime-paper-master/pbmc68k_analysis/"
source(paste(wd,file.path('util.R'),sep=""))
pbmc_68k <- readRDS(paste("data/",file.path('pbmc68k_data.rds'),sep=""))
all_data <- pbmc_68k$all_data
m<-all_data[[1]]$hg19$mat
genes<-all_data[[1]]$hg19$genes
ExprM.RawCounts<-as.matrix(t(m))
rownames(ExprM.RawCounts)<-genes
l<-.normalize_by_umi(m)
genes_used<-genes[l$use_genes]
ExprM.RawCounts.filter<-as.matrix(t(l$m))
rownames(ExprM.RawCounts.filter)<-genes_used
save(ExprM.RawCounts, file=paste("results/",exprimentID, "_ExprM.RData",sep=""))
save(ExprM.RawCounts.filter, file=paste("results/", exprimentID, "_ExprM.filter.RData", sep=""))
|
f76f8a9c10a654f67180fdf01802401491e544fd
|
ba2845eadc8880147e906ab727d322d875226efa
|
/Analyses/source/stanprep_gddmods.R
|
65c867f9c2cad1962f76e36666f069ec2104e53f
|
[] |
no_license
|
AileneKane/radcliffe
|
80e52e7260195a237646e499bf4e3dad4af55330
|
182cd194814e46785d38230027610ea9a499b7e8
|
refs/heads/master
| 2023-04-27T19:55:13.285880
| 2023-04-19T15:15:02
| 2023-04-19T15:15:02
| 49,010,639
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
stanprep_gddmods.R
|
expgdd_bb<-expgdd_gdd[expgdd_gdd$event=="bbd",]
expgdd_lo<-expgdd_gdd[expgdd_gdd$event=="lod",]
expgdd_fl<-expgdd_gdd[expgdd_gdd$event=="ffd",]
datalist.gddbb <- with(expgdd_bb,
list(y = cumgdd_air,
mois = soilmois_janmar, #soil moisture
sp = genus.species,
site = site,
year = styear,
N = nrow(expgdd_bbd),
n_sp = length(unique(expgdd_bbd$genus.species))
)
)
datalist.gddlo<- with(expgdd_lo,
list(y = cumgdd_air,
mois = soilmois_aprjun, #soil moisture
sp = genus.species,
site = site,
year = year,
N = nrow(expgdd_lo),
n_sp = length(unique(expgdd_lo$genus.species))
)
)
datalist.gddfl <- with(expgdd_fl,
list(y = cumgdd_air,
mois = sm, #soil moisture
sp = genus.species,
site = site,
year = year,
N = nrow(expgdd_fL),
n_sp = length(unique(expgdd_fL$genus.species))
)
)
|
c4814a905c999b97c575ebfe4d4196e3eb58b800
|
334145f4753d39c1024d6e4f256d30ee50fe657e
|
/man/LPDistance.Rd
|
ad8c21a701936fe53da814d682f0b0b3cfb692f2
|
[] |
no_license
|
cran/TSdist
|
6aaefaefd78c37fbfb07efe164cdb44c19fc2f53
|
d28f6f0c3aa4c5004caf33724b5c7fc064846553
|
refs/heads/master
| 2022-09-15T21:13:30.275246
| 2022-08-31T08:40:02
| 2022-08-31T08:40:02
| 19,747,325
| 5
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,665
|
rd
|
LPDistance.Rd
|
\name{LPDistance}
\alias{LPDistance}
\title{
Lp distances.
}
\description{
Computes the distance based on the chosen Lp norm between a pair of numeric vectors.
}
\usage{
LPDistance(x, y, method="euclidean", ...)
}
\arguments{
\item{x}{
Numeric vector containing the first time series.
}
\item{y}{
Numeric vector containing the second time series.
}
\item{method}{
A value in "euclidean", "manhattan", "infnorm", "minkowski".
}
\item{...}{
If method="minkowski" a positive integer value must be specified for \code{p}.
}
}
\details{
The distances based on Lp norms are computed between
two numeric vectors using the following formulas:
Euclidean distance: \eqn{\sqrt{(x_i-y_i)^2)}}
Manhattan distance: \eqn{\sum{|x_i-y_i|}}
Infinite norm distance: \eqn{\max{|x_i-y_i|}}
Minkowski distance: \eqn{\sqrt[p]{(x_i-y_i)^p)}}
The two series must have the same length. Furthermore, in the case of the Minkowski distance, \code{p} must be specified as a positive integer value.
%\bold{Note:} These distance measures are all metrics. This means that they can be used directly within kernel machines, for example by inserting them in the Gaussian RBF kernel (Lei and Sun, 2007), because they will provide positive definite Gram matrices.
}
\value{
\item{d}{
The computed distance between the pair of series.
}
}
\author{
Usue Mori, Alexander Mendiburu, Jose A. Lozano.
}
\seealso{
These distances are also implemeted in separate functions. For more information see \code{\link{EuclideanDistance}}, \code{\link{ManhattanDistance}}, \code{\link{MinkowskiDistance}}
and \code{\link{InfNormDistance}}
To calculate this distance measure using \code{ts}, \code{zoo} or \code{xts} objects see \code{\link{TSDistances}}. To calculate distance matrices of time series databases using this measure see \code{\link{TSDatabaseDistances}}.
}
\examples{
# The objects example.series1 and example.series2 are two
# numeric series of length 100 contained in the TSdist package.
data(example.series1)
data(example.series2)
# For information on their generation and shape see help
# page of example.series.
help(example.series)
# Compute the different Lp distances
# Euclidean distance
LPDistance(example.series1, example.series2, method="euclidean")
# Manhattan distance
LPDistance(example.series1, example.series2, method="manhattan")
# Infinite norm distance
LPDistance(example.series1, example.series2, method="infnorm")
# Minkowski distance with p=3.
LPDistance(example.series1, example.series2, method="minkowski", p=3)
}
|
e5ac278c4ab4482240a3b5cc9071bd2282dd74ed
|
994b7b7d412e48848ebf4494de810745595caab2
|
/R/incrementor.R
|
002d4ecf1fe725ab6acecb3f0adf5022e68ac4f3
|
[
"MIT"
] |
permissive
|
cderv/sortable
|
b24f1e8b798f74322cc6fb194411982923178efb
|
621ac4151c7e8fd122439ac2c076049a71c7af5b
|
refs/heads/master
| 2020-09-20T09:09:58.313527
| 2019-11-20T23:44:19
| 2019-11-20T23:44:19
| 224,432,931
| 0
| 0
|
NOASSERTION
| 2019-11-27T13:05:02
| 2019-11-27T13:05:01
| null |
UTF-8
|
R
| false
| false
| 347
|
r
|
incrementor.R
|
incrementor <- function(prefix = "increment_"){
i <- 0
function(){
i <<- i + 1
paste0(prefix, i)
}
}
increment_rank_list <- incrementor("rank_list_id_")
increment_bucket_list <- incrementor("bucket_list_id_")
increment_bucket_group <- incrementor("bucket_group_")
increment_rank_list_input_id <- incrementor("rank_list_shiny_")
|
ba09fa6e5610787c367c6a7aad6014558fa309d7
|
fe8830fa8d36b7484da141ed224160b843f5026f
|
/man/ggbootMV.Rd
|
d4d2042a966d6963666350f15c5c2f45014fc4e8
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MiguelRodo/ggboot
|
6d70ddf743337ac19ab890e0579b59011ccafc1b
|
fa721e57e18e9da9c95219782fb9e96ca84e0d14
|
refs/heads/master
| 2020-03-29T19:57:55.298443
| 2019-02-22T07:07:21
| 2019-02-22T14:05:25
| 131,585,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,556
|
rd
|
ggbootMV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggbootMV.R
\name{ggbootMV}
\alias{ggbootMV}
\title{mv med plot info func}
\usage{
ggbootMV(data, group, B, seed = NULL, comp = 1:2, locType = "mean",
scale = FALSE, checkPlot = FALSE, dispPlot = FALSE, labelVec = NULL,
legendTitle = "Group", colVec = c("darkorchid1", "springgreen2",
"maroon1", "dodgerblue", "red", "yellow3", "cyan2", "orange2"),
addOrigPos = TRUE, pcaAxesLabSize = 2.5, origPosLabSize = 2,
axes = TRUE, points = TRUE, pointAlpha = 1, pointSize = 1,
ellipse = TRUE, ellAlpha = 1, ellSize = 1, quant = FALSE,
quantAlpha = 1, quantSize = 1, density = FALSE, densAlpha = 1,
densSize = 1, boot = TRUE, save = FALSE, name = NULL, env = NULL,
path = NULL, textAdjVal = 0.2, axesLenRatio = NULL, qualCutoff = 0,
selAxisLab = NULL, arrow = FALSE, arrowSize = 2, fontScaleFactor = 1,
trim = 0.2)
}
\arguments{
\item{data}{dataframe. In wide format. Only numeric columns will be used.}
\item{group}{vector. Vector indicating group membership for each observation in \code{data}.
Will be coerced to a character vector.}
\item{B}{integer. Number of bootstrap samples per subgroup.}
\item{comp}{integer vector. Specifies the principal components
whose proportion of variation must be returned. Defaults to 1:2.}
\item{locType}{character. Specifies measure of location to use. Options are
mean, cmwed (column-wise median), oja, weisz (geometric median) and
gmed (a fast version for the geometric median).}
\item{pointSize}{numeric. Size of plot points}
\item{quant}{logical. If \code{TRUE}, then univariate 95 percent percentile bootstrap confidence
intervals are plotted.}
\item{boot}{logical. If TRUE, then bootstrap is re-performed. If FALSE, then
bootstrap values are taken from object with name \code{name} in environment \code{bootList}.}
\item{save}{logical. If \code{TRUE} and \code{boot=TRUE}, then bootList is}
\item{name}{character. Name of object in \code{bootList} to add to or take to. Must not be
NULL if \code{save=TRUE}.}
\item{env}{environment. Environment to save bootstrapped values to.}
\item{path}{character. Path of to save bootstrapped values to, or load bootstrapped values from.}
\item{textAdjVal}{numeric. Value that controls the degree to which the axis labels are shifted from
the endpoints of the axes. Higher values move the axis further right and up.}
\item{axesLenRatio}{numeric. If not NULL, then the ratio of the length of the y-axis divided by the
x-axis is made equal to \code{axesLenRatio}, where length is defined as the difference between the maximum and the
minimum value plotted along that axis. This is done by stretching the shorter axis around its mean. Useful
for making the axes of equal length.}
\item{qualCutoff}{numberic. The minimum sum of the axis predictivity across the principal components selected (by \code{comp}) required for the axis of a variable to be plotted. Only works if \code{arrow=FALSE} (this will change in future).}
\item{selAxisLab}{character vector. Names of columns in \code{data} to print.}
\item{arrow}{logical. If TRUE, then arrows instead of lines are plotted for the axes. The arrows point from
the origin to the largest observed value for its variable on the biplot axis.}
\item{arrowSize}{numeric. Size of the arrows for \code{ggplot2} to use.}
\item{fontScaleFactor}{numeric. Ratio by which to multiply the default font size.}
}
\description{
x
PCA biplot with bootstrap confidence areas.
}
\details{
Plot a PCA biplot with bootstrap confidence ares.
}
|
bd0a92e1d63f3fa444700d165223d123a77aaadd
|
8ff9d6461858f60280e4cc9814cb4febf6fdd608
|
/Plot1.R
|
b9089c8c0b03959bf7ffb686e716001a6b2bb1a8
|
[] |
no_license
|
aditi2345/ExData_Plotting1
|
a38061fcb617de8500363966dcaa281ca33d2735
|
923f8bcb509413ce6f746e72074adf9371504219
|
refs/heads/master
| 2022-07-18T21:01:48.092293
| 2020-05-17T15:49:36
| 2020-05-17T15:49:36
| 264,693,536
| 0
| 0
| null | 2020-05-17T15:07:19
| 2020-05-17T15:07:19
| null |
UTF-8
|
R
| false
| false
| 102
|
r
|
Plot1.R
|
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
|
800fc8d8c0c92fcbc41f8046bc837f7647f60ba2
|
549d00ef71da6563a9493dbc9dfb5c1b0332948d
|
/man/anysearchTwitter.Rd
|
6ace0140a205de0f61330b614d438072f5a60c0d
|
[] |
no_license
|
dusty-turner/puertoRicoR
|
9e419577976fb3f1e4163056f17015225f1fd9f7
|
ee082b777e878059eac0a53e16e5301c67a16e9f
|
refs/heads/master
| 2021-09-15T09:28:22.241889
| 2018-05-29T21:30:25
| 2018-05-29T21:30:25
| 108,535,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 858
|
rd
|
anysearchTwitter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anyseaRchtwitter.R
\name{anysearchTwitter}
\alias{anysearchTwitter}
\title{Any Search Function}
\usage{
anysearchTwitter(searchterm = "Anywhere", n = 1e+05, since = "2017-10-25",
until = "2017-10-26",
olddataname = "Puerto Rico 23 Sept - 25 OCT best.csv",
newdataname = "Puerto Rico 23 Sept - 26 OCT best.csv")
}
\arguments{
\item{searchterm}{"Anywhere"}
\item{n}{"100000"}
\item{since}{'2017-10-25'}
\item{until}{'2017-10-26'}
\item{olddataname}{"Puerto Rico 23 Sept - 25 OCT best.csv"}
\item{newdataname}{"Puerto Rico 23 Sept - 26 OCT best.csv"}
}
\description{
anysearchTwitter: This function scrapes twitter for the specified keyword, appends it to your dataset, and saves an updated CSV to the working directory
}
\examples{
anysearchTwitter()
}
\keyword{Twitter}
|
abc11410a8b326c1f2e71c6060fe333f3ff8125e
|
96b809c3a2961bb76e0c9291b7b18e7b3a5f42c3
|
/hw4/read_graph.R
|
4b792d0132d939664100f953aa1d7686f7bc0675
|
[] |
no_license
|
ra125/STA-523
|
140c4cc0fb3a3785d8402ff051d3ecca9c5b76a7
|
15b1a2fb963797999f9a7fc839bff86f5cdc8be4
|
refs/heads/master
| 2021-01-10T04:54:26.213037
| 2014-11-24T22:15:22
| 2014-11-24T22:15:22
| 44,842,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,498
|
r
|
read_graph.R
|
#1. Read a DOT language of graph data from a txt file
#2. Once the format is not ritht, report error
#3. If not weight for every line, asigned default weight to every edge
#4. Return an object of the graph
library(stringr)
read_graph=function(filename)
{
gdata<-read.table(filename, sep="\n", quote="")
gdata<-unlist(gdata)
#given a vector of characters, return NA if all NA; return the str
strv_na<-function(strv)
{
n=length(strv)
for (i in 1:n)
{
if (!is.na(strv[i])) return(strv[i])
}
return(NA)
}
#
fromnode=rep(NA,length(gdata))
tonode=rep(NA,length(gdata))
weight=rep(NA,length(gdata))
for (i in 1:length(gdata))
{
#style of edge
#style of from node
styfnode1<-str_match(gdata[i],"^([[:alnum:]]+) -> ")[2]
styfnode2<-str_match(gdata[i],"^\"([[:alnum:] ]+)\" -> ")[2]
#style of to node without weight
stytnode1<-str_match(gdata[i]," -> ([[:alnum:]]+) [[]+weight=[[:digit:]e+]+[]]+;$")[2]
stytnode2<-str_match(gdata[i]," -> \"([[:alnum:] ]+)\" [[]weight=[[:digit:]e+]+[]];$")[2]
#style of to node with weight
stytnode3<-str_match(gdata[i]," -> ([[:alnum:]]+);$")[2]
stytnode4<-str_match(gdata[i]," -> \"([[:alnum:] ]+)\";$")[2]
#style of single node
stysnode1<-str_match(gdata[i],"^([[:alnum:]]+);$")[2]
stysnode2<-str_match(gdata[i],"^\"([[:alnum:] ]+)\";$")[2]
fnode<-strv_na(c(styfnode1,styfnode2))
tnode<-strv_na(c(stytnode1,stytnode2,stytnode3,stytnode4))
snode<-strv_na(c(stysnode1,stysnode2))
if (!is.na(fnode) & !is.na(tnode))
{
fromnode[i]=fnode
tonode[i]=tnode
if (is.na(str_match(gdata[i]," [[]weight=([[:digit:]e+]+)[]];$")[2]))
{weight[i]=1} else
{weight[i]=str_match(gdata[i]," [[]weight=([[:digit:]e+]+)[]];$")[2]}
} else
{
if (!is.na(snode))
{
fromnode[i]=snode
# tonode[i]=integer(0)
# weight[i]=numeric(0)
} else
{
stop("The graph file is not valid!")
}
}
}
allnode<-sort(unique(c(fromnode,tonode)))
nnode<-length(allnode)
gobj<-rep(list(list("edges"=integer(0),"weights"=numeric(0))),nnode)
for (i in 1:nnode)
{
names(gobj)[i]=allnode[i]
}
for (i in 1:length(gdata))
{
if (!is.na(tonode[i]))
{
gobj[[fromnode[i]]]$edges=c(gobj[[fromnode[i]]]$edges,(1:nnode)[allnode[]==tonode[i]])
gobj[[fromnode[i]]]$weights=c(gobj[[fromnode[i]]]$weights,as.numeric(weight[i]))
}
}
return(gobj)
}
|
546ca7cd22d01335bd6811891c462a77bba606e0
|
6f0c07f057490adf1cd74c7048f9709996f3cf76
|
/man/plotStats.Rd
|
9947425ce8cba72ebd3d317969a5e70c85e14eea
|
[] |
no_license
|
Al-Stu/seaStackR
|
2da48afa65e25193c23abae56fcdfc9238f3a1be
|
69899c41d048b50bf08869fd9c88078c8e7920c0
|
refs/heads/master
| 2023-01-13T14:27:27.988011
| 2020-11-20T14:17:48
| 2020-11-20T14:17:48
| 302,414,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,070
|
rd
|
plotStats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seaStackR_stats_functions.R
\name{plotStats}
\alias{plotStats}
\title{Add summary statistics to a plot}
\usage{
plotStats(
plot,
SD_fill = "grey30",
SD_colour = NA,
SD_size = NULL,
CI_colour = "red",
CI_size = 2,
CI_width = 1,
show_CI = T,
show_SD = T,
averages_point_size = 3,
mean_shape = 23,
mean_fill = "white",
mean_colour = "black",
mean_stroke = 0.8,
median_shape = 21,
median_fill = "black",
median_colour = "black",
show_mean = T,
show_median = T,
averages_opacity = 0.8,
df_stats = NULL,
confidence_interval = 0.95
)
}
\arguments{
\item{plot}{a histogram or ridge plot ggplot item}
\item{SD_fill}{character string specifying the fill for the standard deviation rectangle}
\item{SD_colour}{character string specifying the colour for the standard deviation rectangle}
\item{SD_size}{height of standard deviation rectangle, gets set to a tenth of the
height of the tallest bin unless specified. NOTE: this does not work on data that
has been processed before being plotted}
\item{CI_colour}{character string specifying the colour for the confidence interval lines}
\item{show_CI}{logical, if false will not plot confidence interval marks, defaults to TRUE}
\item{show_SD}{logical, if false will not plot standard deviation rectangle, defaults to TRUE}
\item{averages_point_size}{point size for the mean, median will be 20 percent smaller, defaults to 3}
\item{mean_shape}{point shape for the mean, defaults to 23 (a diamond)}
\item{mean_fill}{the fill colour for the mean, defaults to 'white'}
\item{mean_colour}{outline colour for the mean, defaults to 'black'}
\item{mean_stroke}{size of the outer line of the symbol for the mean, defaults to 0.8}
\item{median_shape}{point shape for the median, defaults to 21 (a circle)}
\item{median_fill}{the fill colour for the median, defaults to 'black'}
\item{median_colour}{outline colour for the median, defaults to 'black'}
\item{show_mean}{logical, false if the mean is not to be added to the plot, defaults to TRUE}
\item{show_median}{logical, false if the median is not to be added to the plot, defaults to TRUE}
\item{averages_opacity}{alpha value for the mean and median points, numeric between 0 and 1, defaults to 0.8}
\item{df_stats}{summary statistics for plot, if NULL (default) will calculate from data behind
plot. NOTE: this does not work on data that has been processed before being plotted}
\item{confidence_interval}{desired size of the confidence interval, value between 0 and 1 e.g. 0.95 for 95\% (default)}
}
\value{
a ggplot list with original plot and chosen stats added
}
\description{
Add mean, median, standard deviation and/or confidence intervals to a histogram,
ridge plot, density plot, boxplot, violin plot or sea stack plot. NOTE: main plot
must be first layer of \code{plot} for this to work unless summary statistics from
\code{\link{summaryStats}} are added as parameter \code{df_stats}
}
\examples{
plotStats(insect_sprays_formatted_plot)
}
|
48dbecc603861e7861fd3dc9ad92914de73c94c0
|
bffd95e4ee6d169caa6687e18b22611550c8df93
|
/man/cbind.tis.Rd
|
d8fec36cb93ae5c96b68fee0f119565c42f283e0
|
[] |
no_license
|
cran/tis
|
286eb68a7aeb0be636a07babcb16a362d62aa5f2
|
f254c391711e5cbdc4deba8ea68c2a234317d4bd
|
refs/heads/master
| 2021-10-13T14:19:03.631652
| 2021-09-28T18:50:02
| 2021-09-28T18:50:02
| 17,700,509
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,881
|
rd
|
cbind.tis.Rd
|
\name{cbind.tis}
\alias{cbind.tis}
\title{ Combine Series Into a Multivariate (Matrix) Time Indexed Series }
\description{
This is \code{cbind} for \code{tis} objects. It binds several
\code{ts} and \code{tis} objects together into a single matrix time
indexed series.
}
\usage{
\method{cbind}{tis}(\dots, union = F)
}
\arguments{
\item{\dots}{
any number of univariate or multivariate \code{ts} or \code{tis}
objects. All will be converted to \code{tis} objects by
\code{as.tis}, and the result series all must have the same
\code{tif} (time index frequency).
}
\item{union}{
a logical. If \code{union} = F, a matrix created by the
intersection of the time windows for the arguments will be created.
If \code{union} = T, the union of the time windows will be used to
create the matrix.
}
}
\details{
If \code{union} is \code{TRUE} and the series in \dots do not all
start and end on the same time index, the missing observations are
filled with \code{NA}.
The column names of the returned series are determined as follows:
If an argument was given a name in the call, the corresponding column
has that name. If the argument was itself a matrix with column names,
those will be used, otherwise the argument's name is expanded with
digits denoting its respective columns.
}
\value{
a multivariate \code{tis} object.
}
\note{
Class \code{"ts"} has it's own \code{cbind} method which knows nothing
about \code{tis} objects. R generic functions like \code{cbind}
dispatch on the class of their first argument, so if you want to
combine \code{tis} and \code{ts} objects by calling the generic
\code{cbind}, be sure that the first argument is a \code{tis}, not a
\code{ts}. You can always ensure this is the case by wrapping the
first argument in \dots in \code{as.tis()}.
}
\seealso{ \code{\link{cbind}}}
\keyword{ ts }
|
ba65dddd3ce0d9c173cc3615c81cf1265da60999
|
c1a2b48b6e9b7ed98750646b9de2d28ff68651c0
|
/man/ApiCall.Rd
|
54466fdc6681896547bd2caadfff7c0a18502091
|
[] |
no_license
|
shekaralle/SampleAPIPackageR
|
446ca1092b6e15794e4d26bccb5a03c047e163db
|
635d81e1f31a59bbd6fb9c8de3d5074495f9c717
|
refs/heads/master
| 2022-12-13T10:40:21.627531
| 2020-09-11T07:13:52
| 2020-09-11T07:13:52
| 294,166,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
rd
|
ApiCall.Rd
|
\name{ApiCall}
\alias{ApiCall}
\title{Api Call}
\usage{
ApiCall
}
\description{
This function will call an API using GET and will display data if the request was successful with status code, and if the request was not successful it will show the error status code 404
}
\examples{
ApiCall("http://api.open-notify.org/astros.json")
}
|
68babfc1b77222159494c5313df19d5a870ec6c9
|
607b31d18cd361c331135771e4ce2d796dfc16c4
|
/man/processVEP.Rd
|
49b0368be241f4a9df852e6980022bd02daee289
|
[] |
no_license
|
mmm84766/slimR
|
66f44045ab4ac1ebeaa13a8644ca379e6ca172f9
|
6c9d268576c11af27c5b98a5cad5a08ed6cd7253
|
refs/heads/master
| 2020-03-22T15:44:52.494556
| 2017-08-29T13:56:12
| 2017-08-30T09:17:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
processVEP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getVariationData.R
\name{processVEP}
\alias{processVEP}
\title{processVEP}
\usage{
processVEP(vepFilePath, overwriteRDS = FALSE, nodeN = 4)
}
\arguments{
\item{vepFilePath}{path to the VEP results obtained from running
variant_effect_predictor on the given vcfFilePath}
\item{nodeN}{(default: 4) Number of cores to use for parallel processing}
}
\value{
A data.table object
}
\description{
This function processes the output of Variant Effect Predictor to select
missense variants and create some columns that are useful to assess the
pathogenicity of variants
}
|
63919128f45a7e3ff15aa959d03151c085f578df
|
a25dd3919b75d287c0c7573c3f1ad734fba137ae
|
/man/markdown_notes.Rd
|
bb169c1769c87868f727f89c409a4ff5985a8e0a
|
[
"MIT"
] |
permissive
|
danielkovtun/shinyNotes
|
f9beda359987fd2582103b01bcddc695861d92e7
|
18007c347029563408f6f01b675de12f4edea580
|
refs/heads/master
| 2023-02-20T07:56:29.108599
| 2023-02-19T17:35:43
| 2023-02-19T17:35:43
| 229,818,392
| 6
| 0
|
NOASSERTION
| 2023-02-19T17:35:44
| 2019-12-23T20:27:32
|
R
|
UTF-8
|
R
| false
| true
| 718
|
rd
|
markdown_notes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{markdown_notes}
\alias{markdown_notes}
\title{Demo notes formatted with markdown for testing \code{shinynote} module.}
\format{
A \code{tibble} with 3 rows and 3 variables:
\describe{
\item{formatting}{text format type, character class}
\item{category}{type of markdown formatter, character class}
\item{update}{text with markdown syntax, character class}
...
}
}
\usage{
markdown_notes
}
\description{
A dataset containing examples of markdown syntax for including emojis, headers, and code blocks.
Formatted in a structure compatible with the \code{shinyNotes::shinynotes} module.
}
\keyword{datasets}
|
122f1b8722871e0b68d2cdcfb9cc6b8677fd031e
|
da572721f12154f19c7adc42da4ef768da411a8e
|
/tarea2.R
|
43a76afc11cc0e449d1e138e2f1147445039305b
|
[] |
no_license
|
MichaelMobius/tarea_R
|
fd9a5f5006ea66cd62f769a09d4211f23287f30a
|
81d42d58ac02c275779eb75f747346848781e54b
|
refs/heads/master
| 2020-06-25T20:55:47.962534
| 2017-07-13T00:00:00
| 2017-07-13T00:00:00
| 96,988,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
tarea2.R
|
tsunami_data <- read.csv("tsevent.csv",stringsAsFactors=FALSE)
eruption_data <- read.csv("volerup.csv", stringsAsFactors=FALSE)
str(tsunami_data)
str(eruption_data)
tsunami_filter<-tsunami_data[!(is.na(tsunami_filter$LATITUDE)|tsunami_filter$LATITUDE==""), ]
tsunami_filter<-tsunami_filter[!(is.na(tsunami_filter$LONGITUDE)|tsunami_filter$LONGITUDE==""), ]
eruption_filter<-eruption_data[!(is.na(eruption_filter$Latitude)|eruption_filter$Latitude==""), ]
eruption_filter<-eruption_filter[!(is.na(eruption_filter$Longitude)|eruption_filter$Longitude==""), ]
library(leaflet)
library(shiny)
shinyApp(
ui = fluidPage(
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("MapPlot1",height=700)
),
server = function(input, output){
output$MapPlot1 <- renderLeaflet({
leaflet() %>%
addProviderTiles("OpenTopoMap", group="background 1") %>%
addProviderTiles("Esri.WorldStreetMap", group="background 2") %>%
setView(lng = -100, lat = 50, zoom = 2) %>%
addLegend("bottomright",
colors =c("red", "blue"),
labels= c("erupciones", "tsunamis"),
title= "Desastres naturales",
opacity = 1)
})
observe({
#age <- input$time
sites <- tsunami_filter
#filter(findInterval(tsunami_filter$YEAR, c(age - 0.5, age + 0.5)) == 1)
sites2 <- eruption_filter
#filter(findInterval(eruption_filter$Year, c(age - 0.5, age + 0.5)) == 1)
proxy <- leafletProxy("MapPlot1")
proxy %>% clearMarkers() %>%
addCircleMarkers(lng = sites$LONGITUDE,
lat = sites$LATITUDE,
opacity = 0.1,radius=8,color="black",fillColor="blue",stroke = FALSE, fillOpacity = 0.8,group="Tsunamis") %>%
addCircleMarkers(lng = sites2$Longitude,
lat = sites2$Latitude,
opacity = 0.1,radius=8,color="black",fillColor="red",stroke = FALSE, fillOpacity = 0.8,group="Erupciones") %>%
addLayersControl(overlayGroups = c("Tsunamis","Erupciones") , baseGroups = c("background 1","background 2"), options = layersControlOptions(collapsed = FALSE))
})
}
)
|
c0259d3935113a03f949b02000b6b7fa5a92cad6
|
aac79aca9c8d7d5cfc09986673f3212bea1a0721
|
/cachematrix.R
|
edfee1db16bb54ebe186c1e9ff3e838f770962c4
|
[] |
no_license
|
JonathanYde/ProgrammingAssignment2
|
b37fbc9e035549dad68ff47daec2977906860fb9
|
58af35577b1cb91cc84838789058d9262f79fb54
|
refs/heads/master
| 2021-05-18T20:40:02.350924
| 2020-03-30T19:57:20
| 2020-03-30T19:57:20
| 251,409,973
| 0
| 0
| null | 2020-03-30T19:38:04
| 2020-03-30T19:38:03
| null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## I am to write two functions.
#The first function stores a matrix and calculates its inverse matrix.
#The second function either
# 1) calls the solve function from the first function and then stores the inverse matrix in the cache.
# This is performed the first time the function is called.
# or 2) retrieves the inverse matrix from the cache.
# This is performed any subsequent times the function is called.
## Write a short comment describing the first function
##### FIRST EXERCISE #####
# I create the makeCacheMatrix function, which can store a matrix and calculate the the inverse matrix (solve the matrix)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
} # the 'set' function is used to load a matrix into 'makeCacheMatrix'
get <- function() x # the 'get' function is used to return a matrix loaded into 'makeCacheMatrix'
setsolve <- function(solve) m <<- solve # the 'setsolve' function calculates the inverse matrix
getsolve <- function() m # the 'getsolve' function returns the inverse matrix
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Write a short comment describing the second function
##### SECOND EXERCISE #####
# I create the cacheSolve function, which can return the inverse matrix from the cache (if it has been stored in cache) or solve a matrix and then store the inverse matrix in the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()# this line tells the function to call the 'getsolve' function from a previous set of matrix functions generated with makeCacheMatrix
if(!is.null(m)) {
message("getting cached data")
return(m)
}# this line tells the function that if the inverse matrix is in the cache (the cache is not NULL), then return the inverse matrix from the cache
data <- x$get() #this line tells the function to 'get' the matrix from a set of matrix functions and store it temporatily in "data"
m <- solve(data, ...) #this line tells the function to solve the matrix (calculate the inverse matrix) stored in "data"
x$setsolve(m) # this line tells the function to call the 'setsolve' function from a previous set of matrix functions generated with the makeCacheMatrix
m #this line tells the function to return the inverse matrix
}
|
0df712c74aaba5e722a32dd7101deed227588fce
|
e19229006dcd95a02e060877f049f5b30a742781
|
/alc4.R
|
54f6f23784d0ed21ecd1fb3757f9c589a2abf6da
|
[] |
no_license
|
Greg131/RBasics
|
7be20c9ba8a076f2189808674fa32a927424ac49
|
c3e515fbc58824cc895d995fe3a8d76e0a0feb4c
|
refs/heads/master
| 2020-06-04T14:40:30.697545
| 2015-08-11T16:45:08
| 2015-08-11T16:45:08
| 40,327,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,921
|
r
|
alc4.R
|
structure(list(t = c(121, 121, 40, 39, 66, 64, 5, 30, 34, 5,
15, 5, 27, 91, 71, 107, 98, 120, 165, 155, 98, 160, 95, 15, 170,
97, 64, 125, 91, 39, 15, 21, 135, 22, 78, 113, 8, 5, 127, 95,
104, 94, 207, 38, 34, 44, 98, 79, 35, 45, 93, 38, 29, 33, 84,
136, 35, 36, 12, 84, 30, 15, 23, 22, 99, 135, 14, 259, 131, 123,
13, 53, 34, 60, 36, 15, 32, 27, 27, 56, 137, 132, 366, 90, 57,
50, 65, 50, 9, 167, 492, 42, 4, 11, 95, 38, 35, 91, 74, 2, 241,
72, 8, 77, 27, 52, 67, 204, 42, 58, 17, 197, 63, 260, 59, 11,
36, 29, 51, 38, 30, 35, 83, 64, 43), SEVRE = structure(c(1L,
1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L,
2L, 1L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L,
1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L), .Label = c("0",
"1"), class = "factor"), AGE = c(53, 52, 45, 48, 45, 42, 35,
35, 41, 37, 35, 43, 51, 58, 53, 56, 51, 40, 45, 54, 34, 41, 48,
33, 55, 44, 36, 51, 46, 45, 46, 46, 42, 32, 53, 43, 47, 39, 64,
33, 49, 54, 50, 59, 44, 33, 33, 51, 59, 44, 46, 39, 52, 56, 57,
55, 63, 53, 40, 54, 35, 39, 51, 48, 43, 45, 56, 52, 51, 60, 42,
59, 54, 63, 56, 48, 63, 57, 46, 34, 38, 65, 39, 49, 55, 36, 51,
57, 48, 41, 40, 55, 58, 46, 46, 46, 36, 51, 40, 43, 35, 39, 48,
44, 37, 54, 50, 49, 38, 48, 53, 53, 36, 63, 56, 64, 62, 60, 49,
42, 57, 62, 63, 45, 31), SEXE = structure(c(1L, 2L, 2L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L,
2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L,
2L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), .Label = c("1", "2"), class = "factor"),
EDVNEG = structure(c(1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L), .Label = c("0",
"1"), class = "factor")), .Names = c("t", "SEVRE", "AGE",
"SEXE", "EDVNEG"), class = "data.frame", row.names = c(NA, -125L
))
|
4e6ec7a9d816c5fc0b929e8b0fed07b0fd0918fa
|
aad6041bed87bc02c69519e64c3def3fef4177a3
|
/man/loadLm-class.Rd
|
ac24e8a1e6598f7d5d5a930f12ba1d530f6c770a
|
[] |
no_license
|
jordansread/loadflex
|
ac0b5c55ddce683801fce7d2070c205e9b3e30e1
|
9d57dd491449cda5477f36fe7b94b009616e035c
|
refs/heads/master
| 2022-08-07T07:28:49.916697
| 2016-12-12T21:36:08
| 2016-12-12T21:36:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 871
|
rd
|
loadLm-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadLm.R
\docType{class}
\name{loadLm-class}
\alias{loadLm-class}
\title{A load model class specific to simple linear models (\code{\link{lm}}s) for
flux estimation.}
\description{
loadLms can take any lm formula.
}
\section{Slots}{
\describe{
\item{\code{fit}}{the interpolation model to be used.}
\item{\code{ylog}}{logical. If TRUE, this constitutes affirmation that the values
passed to the left-hand side of the model formula will be in log space. If
missing, the value of \code{ylog} will be inferred from the values of
\code{formula} and \code{y.trans.function}, but be warned that this
inference is fallible.}
}}
\seealso{
Other load.model.classes: \code{\link{loadComp-class}},
\code{\link{loadInterp-class}},
\code{\link{loadModel-class}},
\code{\link{loadReg2-class}}
}
|
3ccba09cf6a546c6aac361fc6b28aaee6514e651
|
f84403fdad56b286e3dc5752e0128e7574315dba
|
/W3.R
|
c884402fb32d5a3dd1d28ccec6daa8bac7a11c57
|
[] |
no_license
|
therealowen/ECON-390A
|
4f6ee47c354f312abf5d040eb522033f33266879
|
5b68b5400cc7869622bdbd8b4016c2ba54654da5
|
refs/heads/master
| 2022-02-11T01:25:35.220464
| 2019-04-22T01:50:47
| 2019-04-22T01:50:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
W3.R
|
##ECON 390A W3-NOTE
#Jan 28, 2019.
#Instructor:Masud
getwd()
setwd("~/Desktop/ECON390A")
x <- 2 ;y <- 6;z <- x+y
x_1 <- 6; x_2 <- 18
x^4; x/y
y1 <- "Drak"; y2 <- "M"; y3 <- "S"; y4 <- "B" ##string var
class(x); class(y1)
##data type: integer, Charater(string), Factor, Bollean, logic(T/F)
rm(y1); rm(x); rm(y)
##Four types data: cross section data, time series data, panel data, logitudinal data
x <- c(8,12,15,18,19) ##concatenation
y <- c(2,10,16,12,20);length(x);z <- x+y
head(x);tail(x) ## head()/tail() show first/last five elements
typeof(x) ##show the type of data
z <- c(6,12,"Name","12",0);typeof(z)
v <- list(12,"trump","20","NIU","20.6")
|
de4c3c50cff27fb3a8b8eac7143f3a1deef0cb8b
|
d47833e60e3b9760619cf9c348d97b188f342db3
|
/MobileNetworkDataSimulationTemplate/code/src/deduplication/man/buildCentroids.Rd
|
17c2f8040ae586065ba0a3097f7dcd69150ce120
|
[] |
no_license
|
Lorencrack3/TFG-Lorenzo
|
1a8ef9dedee45edda19ec93146e9f7701d261fbc
|
e781b139e59a338d78bdaf4d5b73605de222cd1c
|
refs/heads/main
| 2023-06-04T00:23:16.141485
| 2021-06-29T21:31:25
| 2021-06-29T21:31:25
| 364,060,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 644
|
rd
|
buildCentroids.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildCentroids.R
\name{buildCentroids}
\alias{buildCentroids}
\title{Builds the centroid of the rectangular tiles.}
\usage{
buildCentroids(ntiles_x, ntiles_y, tile_sizeX, tile_sizeY)
}
\arguments{
\item{ntiles_x}{The number of tiles on OX.}
\item{ntiles_y}{The number of tiles on OY.}
}
\value{
A data.table object with three columns: tile ID, centroidCoord_x and centroidCoord_y.
}
\description{
Builds the centroid of each (rectangular) tile in the grid. The centroid is simple the center of the
tile, i.e. the intersection poit of the diagonals of the tile.
}
|
4b13255927ae2d9a5fb370bd3d099fae358a7247
|
d2cae0fc228057f023001a90daf6983792dc1797
|
/robustness_script.R
|
d065961f608a1d9bca541538a77006a34ad6aaae
|
[] |
no_license
|
losapio/Trait-based-Network-Robustness
|
8ab8f7e57077575d63ec1085caaaeaac4aac822c
|
895dd65232101762838a41647a9a46ad8e6d5485
|
refs/heads/master
| 2020-08-28T00:06:11.982204
| 2019-10-25T12:39:29
| 2019-10-25T12:39:29
| 217,529,044
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,819
|
r
|
robustness_script.R
|
# Resistance of plant–plant networks to biodiversity loss and secondary extinctions following simulated environmental changes
# Gianalberto Losapio* & Christian Schöb
# Department of Evolutionary Biology and Environmental Studies, University of Zurich, Winterthurerstrasse 190, CH-8057 Zurich, Switzerland
# Correspondence author: E-mail: gianalberto.losapio@ieu.uzh.ch
# Functional Ecology, doi: 10.1111/1365-2435.12839
library(FactoMineR) # PCA analysis
library(igraph) # network analysis
library(rnetcarto) # network modularity
library(asreml) # mixed effects models
library(pascal) # Wald test
library(nnet) # multinomial logit model
library(car) # ANOVA tests
# data frame
cushion species LDMC SLA leafarea area
2 Arenaria_tetraquetra_F Agrostis_nevadensis 321.39 8.90 9.90 18
3 Arenaria_tetraquetra_F Lotus_corniculatus 250.00 14.89 17.57 18
6 Arenaria_tetraquetra_F Agrostis_nevadensis 303.49 8.13 8.49 19
7 Arenaria_tetraquetra_F Agrostis_nevadensis 267.15 10.75 11.81 19
8 Arenaria_tetraquetra_F Lotus_corniculatus 226.42 16.29 7.82 19
9 Arenaria_tetraquetra_F Lotus_corniculatus 214.81 18.01 9.40 19
individual sample LA
2 1 1 9.90
3 1 1 17.57
6 1 1 8.49
7 2 1 11.81
8 1 1 7.82
9 2 1 9.40
# build unipartite graph
g <- graph.data.frame(data11, directed=FALSE)
# compute guimera modularity
guimod <- netcarto(g.bip1)
# trait distribution parameters (mean and sd) for each species
ldmc.mean <- tapply(data112$ldmc, data112$species, mean, na.rm = TRUE)
sla.mean <- tapply(data112$sla, data112$species, mean, na.rm = TRUE)
la.mean <- tapply(data112$la, data112$species, mean, na.rm = TRUE)
ldmc.sd <- tapply(data112$ldmc, data112$species, sd, na.rm = TRUE)
sla.sd <- tapply(data112$sla, data112$species, sd, na.rm = TRUE)
la.sd <- tapply(data112$la, data112$species, sd, na.rm = TRUE)
# create vector of trait values for removal
# with 25 fixed interval value for each trait
q=1:25
# ldmc
delta.ldmc = (max(ldmc.mean)-(min(ldmc.mean)))/(length(q)-1)
q.ldmc = q
for (i in 0:length(q.ldmc)){q.ldmc[i] <- min(ldmc.mean)+((i-1)*delta.ldmc)}
q.ldmc <- round(q.ldmc,3)
# sla
delta.sla = (max(sla.mean)-(min(sla.mean)))/(length(q)-1)
q.sla = q
for (i in 0:length(q.sla)){q.sla[i] <- min(sla.mean)+((i-1)*delta.sla)}
q.sla<- round(q.sla,3)
# la
delta.la = (max(la.mean)-(min(la.mean)))/(length(q)-1)
q.la = q
for (i in 0:length(q.la)){q.la[i] <- min(la.mean)+((i-1)*delta.la)}
q.la<- round(q.la,3)
# create matrix of trait values randomly sampled [function: rnorm]
# n. replicates (random samples) = 200
repl = 200
ldmc <- matrix(NA, repl, nrow(ft))
colnames(ldmc) <- rownames(ft)
for (i in 1:nrow(ft)){
ldmc[,i] <- rnorm(repl, ft$ldmc.mean[i], ft$ldmc.sd[i])}
sla <- matrix(NA, repl, nrow(ft))
colnames(sla) <- rownames(ft)
for (i in 1:nrow(ft)){
sla[,i] <- rnorm(repl, ft$sla.mean[i], ft$sla.sd[i])}
laf <- matrix(NA, repl, nrow(ft))
colnames(laf) <- rownames(ft)
for (i in 1:nrow(ft)){
laf[,i] <- rnorm(repl, ft$la.mean[i], ft$la.sd[i])}
### pca for ldmc & sla
# create vector of removal
pca02 <- PCA(ft[,c("ldmc.mean","sla.mean")],graph = F) #axis 1 = high SLA e low LDMC
pca02$var$cor
# scores per specie
pca2 <- -round(pca02$ind$coord[,1],3)
delta.pca2 = (max(pca2)-(min(pca2)))/(length(q)-1)
q.pca2 = q
for (i in 0:length(q.pca2)){q.pca2[i] <- min(pca2)+((i-1)*delta.pca2)}
q.pca2 <- round(q.pca2,3)
# create matrix for pca
pca.02 <- matrix(NA, repl, nrow(ft))
colnames(pca.02) <- rownames(ft)
# order:
for(i in 1:repl){
pca02 <- PCA(cbind(ldmc[i,],sla[i,]), graph = F)
ax <- ifelse(sign(pca02$var$cor[1,1])==sign(pca02$var$cor[2,1]), 2, 1) # select right component (i.e. axis)
pca.02[i,] <- pca02$ind$coord[,ax]
if(pca02$var$cor[1,ax]<0){pca.02[i,] <- -pca.02[i,]} # higher ldmc e lower sla
pve.2[i] <- pca02$eig[ax,2]
}
### function for random extinction model
rm_random_nodes <- function(network, frac, no.rm){ #no.rm= id vertex to not remove (i.e."Open")
if (!is.igraph(network)) {
stop("Not a graph object")
}
a <- network-V(network)[no.rm]
rem = frac*vcount(network)
if (rem>=vcount(a)){rem=vcount(a)}
network <- delete.vertices(network, sample(V(a)$name, rem))
}
### extinction sequences
for(i in 1:length(q)){ # number of steps
for(z in 1:repl){ # number of replicates
# e.g. scenario 1: extinction of species from low LA
rem <- names(which(laf[z,]<=q.la[i])) # species to remove
if (length(rem)==0){g1<-g} # nothing to remove
else{g1 <- delete.vertices(g,rem)} # new network
# for now open is assumed always in the network # later we check if not
# survival
task$network.size[z+(i-1)*repl] <- max(clusters(g1)$csize)-1 # -1 bc "open"
# absolute primary ext
task$prim.ext[z+(i-1)*repl] <- vcount(g)-vcount(g1)
# absolute secondary ext
task$sec.ext[z+(i-1)*repl] <- sum(clusters(g1)$csize==1)
# eigenvector centrality
task$centr[z+(i-1)*repl] <- centralization.evcent(g1)$centralization
# check if open node remain isolated in the network
if (length(neighbors(g1, "Open"))==0){
# if other sp present and networked
if(clusters(g1)$no!=1){
task$prim.ext[z+(i-1)*repl] <- task$prim.ext[z+(i-1)*repl]+1
task$sec.ext[z+(i-1)*repl] = task$sec.ext[z+(i-1)*repl] -1} #
# or if the last one ("desert")
else{
task$network.size[z+(i-1)*repl] = 0
task$prim.ext[z+(i-1)*repl] = vcount(g)-1
task$sec.ext[z+(i-1)*repl] = 0}}
# presence of species
for(k in 1:length(spft)){
if(!is.na(match(spft[k], V(g1)$name))){
task[z+(i-1)*repl,nc+k] <- length(neighbors(g1, spft[k])) # total abundance of sp k
task[z+(i-1)*repl,nc+k] <- ifelse(task[z+(i-1)*repl,nc+k]==0,-1,1)} # presence or sec ext
else{task[z+(i-1)*repl,nc+k] = 0}}
# check if foundation species remain isolated in the network
if (task[z+(i-1)*repl, "A.tetraquetra_F"]==-1){
task$network.size[z+(i-1)*repl] <- task$network.size[z+(i-1)*repl]+1
task$sec.ext[z+(i-1)*repl] <- task$sec.ext[z+(i-1)*repl] -1}
if (task[z+(i-1)*repl, "F.indigesta_F"]==-1){
task$network.size[z+(i-1)*repl] <- task$network.size[z+(i-1)*repl]+1
task$sec.ext[z+(i-1)*repl] <- task$sec.ext[z+(i-1)*repl] -1}
if (task[z+(i-1)*repl, "P.holosteum_F"]==-1){
task$network.size[z+(i-1)*repl] <- task$network.size[z+(i-1)*repl]+1
task$sec.ext[z+(i-1)*repl] <- task$sec.ext[z+(i-1)*repl] -1}
#scenario 5: RANDOM
g1 <- rm_random_nodes(g, q1[i], 3) # new network
# for now open is assumed always in the network # later we check if not
# survival
task$network.size[z+(i-1)*repl+4*m] <- max(clusters(g1)$csize)-1 # -1 bc "open"
# absolute primary ext
task$prim.ext[z+(i-1)*repl+4*m] <- vcount(g)-vcount(g1)
# absolute secondary ext
task$sec.ext[z+(i-1)*repl+4*m] <- sum(clusters(g1)$csize==1)
# eigenvector centrality
task$centr[z+(i-1)*repl+4*m] <- centralization.evcent(g1)$centralization
# check if open node remain isolated in the network
if (length(neighbors(g1, "Open"))==0){
# if other sp present and networked
if(clusters(g1)$no!=1){
task$prim.ext[z+(i-1)*repl+4*m] <- task$prim.ext[z+(i-1)*repl+4*m]+1
task$sec.ext[z+(i-1)*repl+4*m] = task$sec.ext[z+(i-1)*repl+4*m] -1} #
# or if the last one ("desert")
else{
task$network.size[z+(i-1)*repl+4*m] = 0
task$prim.ext[z+(i-1)*repl+4*m] = vcount(g)-1
task$sec.ext[z+(i-1)*repl+4*m] = 0}}
# presence of species
for(k in 1:length(spft)){
if(!is.na(match(spft[k], V(g1)$name))){
task[z+(i-1)*repl+4*m,nc+k] <- length(neighbors(g1, spft[k])) # total abundance of sp k
task[z+(i-1)*repl+4*m,nc+k] <- ifelse(task[z+(i-1)*repl+4*m,nc+k]==0,-1,1)} # presence or sec ext
else{task[z+(i-1)*repl+4*m,nc+k] = 0}}
# check if foundation species remain isolated in the network
if (task[z+(i-1)*repl+4*m, "A.tetraquetra_F"]==-1){
task$network.size[z+(i-1)*repl+4*m] <- task$network.size[z+(i-1)*repl+4*m]+1
task$sec.ext[z+(i-1)*repl+4*m] <- task$sec.ext[z+(i-1)*repl+4*m] -1}
if (task[z+(i-1)*repl+4*m, "F.indigesta_F"]==-1){
task$network.size[z+(i-1)*repl+4*m] <- task$network.size[z+(i-1)*repl+4*m]+1
task$sec.ext[z+(i-1)*repl+4*m] <- task$sec.ext[z+(i-1)*repl+4*m] -1}
if (task[z+(i-1)*repl+4*m, "P.holosteum_F"]==-1){
task$network.size[z+(i-1)*repl+4*m] <- task$network.size[z+(i-1)*repl+4*m]+1
task$sec.ext[z+(i-1)*repl+4*m] <- task$sec.ext[z+(i-1)*repl+4*m] -1}
# control for species associations, i.e. to which module subordinate species belong to
task$A.nevadensis[which(task$A.tetraquetra_F==0 & task$A.nevadensis==1)] <- -1
task$A.vulneraria[which(task$P.holosteum_F==0 & task$A.vulneraria==1)] <- -1
task$A.tetraquetra[which(task$A.tetraquetra_F==0 & task$A.tetraquetra==1)] <- -1
task$C.oporinoides[which(task$P.holosteum_F==0 & task$C.oporinoides==1)] <- -1
task$D.brachyanthus[which(task$P.holosteum_F==0 & task$D.brachyanthus==1)] <- -1
task$E.glaciale[which(task$A.tetraquetra_F==0 & task$E.glaciale==1)] <- -1
task$E.nevadense[which(task$A.tetraquetra_F==0 & task$E.nevadense==1)] <- -1
task$E.willkommii[which(task$A.tetraquetra_F==0 & task$E.willkommii==1)] <- -1
task$F.indigesta[which(task$A.tetraquetra_F==0 & task$F.indigesta==1)] <- -1
task$G.pyrenaicum[which(task$P.holosteum_F==0 & task$G.pyrenaicum==1)] <- -1
task$J.amethystina[which(task$F.indigesta_F==0 & task$J.amethystina==1)] <- -1
task$J.humilis[which(task$P.holosteum_F==0 & task$J.humilis==1)] <- -1
task$K.vallesiana[which(task$A.tetraquetra_F==0 & task$K.vallesiana==1)] <- -1
task$L.boryi[which(task$A.tetraquetra_F==0 & task$L.boryi==1)] <- -1
task$L.pectinata[which(task$A.tetraquetra_F==0 & task$L.pectinata==1)] <- -1
task$L.corniculatus[which(task$P.holosteum_F==0 & task$L.corniculatus==1)] <- -1
task$N.purpurea[which(task$P.holosteum_F==0 & task$N.purpurea==1)] <- -1
task$P.holosteum[which(task$A.tetraquetra_F==0 & task$P.holosteum==1)] <- -1
task$R.angiocarpus[which(task$P.holosteum_F==0 & task$R.angiocarpus==1)] <- -1
task$S.boissieri[which(task$P.holosteum_F==0 & task$S.boissieri==1)] <- -1
task$S.boryi[which(task$F.indigesta_F==0 & task$S.boryi==1)] <- -1
task$T.serpylloides[which(task$A.tetraquetra_F==0 & task$T.serpylloides)] <- -1
### correct for sec.ext and network.size
task$network.size[i] <- sum(task[i,c((nc+1):(nc+1+length(spft)-1))]==1)
task$sec.ext[i] <- sum(task[i,c((nc+1):(nc+1+length(spft)-1))]==-1)
### calculating:
# relative netework size = relative survival
task$rel.net.size = task$network.size/(vcount(g)-1)
# relative secondary extinctions to total extinctions
task$rel.sec.ext2 = task$sec.ext/(task$prim.ext+task$sec.ext)
### statystical analysis
# mixed effects
SURV <- task$rel.net.size
SECEXT <- task$rel.sec.ext2
EXTYPE <- factor(task$scenario, labels=c("Scenario 1","Scenario 2","Scenario 3","Scenario 4","Random"))
NCUSH <- factor(task$cush, ordered=TRUE)
Q <- factor(task$q, ordered=TRUE)
REPL <- factor(task$repl)
reml=asreml(fixed=SURV~EXTYPE*NCUSH, #treatment model
random=~Q/REPL,
rcov=~id(EXTYPE):ar1(Q):REPL,keep.order=TRUE, #error model
data=task,
control=asreml.control(maxiter=50))
test.asreml(reml)
reml=asreml(fixed=SECEXT~EXTYPE*NCUSH, #treatment model
random=~Q/REPL,
rcov=~id(EXTYPE):ar1(Q):REPL,keep.order=TRUE, #error model
data=task,
control=asreml.control(maxiter=50))
test.asreml(reml)
### for sec ext species, differences among species
# multivariate logit model
sp2.nul <- multinom(pers~1, data=task2)
sp2.1 <- multinom(pers~species-1, data=task2)
anova(sp2.nul,sp2.1)
Anova(sp2.1)
coef <- coefficients(sp2.1);coef
ci <- confint(sp2.1)
# testing for species abudance and community membership
task.sp2$spab <- c(98,13,28,4,2,3,34,14,33,18,260,2,48,62,8,249,16,26,249,23,106,3,7,27,7)
task.sp2$secext <- tab.coef2[,4]
task.sp2$surv <- tab.coef2[,1]
spmod2 <- glm(secext~comm*spab, data=task.sp2)
summary(spmod2)
Anova(spmod2)
|
229e21bd3adf7353946802a7366a51eee9455393
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/jmzeng1314/bioweb/ui.R
|
28e6ac1b64648e485573ea54043e3477bf729bb2
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,366
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
library(ape)
library(RCurl)
library(RMySQL)
options(stringsAsFactors = F)
#http://www.r-bloggers.com/mysql-and-r/
#Connect to the MySQL server using the command:
#mysql --user=genome --host=genome-mysql.cse.ucsc.edu -A
#The -A flag is optional but is recommended for speed
#Don't forget EnsEMBL's new US East MySQL mirror too:
#mysql -h useastdb.ensembl.org -u anonymous -P 5306
UCSC_all_tables=read.table("txt_files/UCSC_all_dbs.txt",header = T)
page_UCSC <- fluidPage(
p("这其实是一个UCSC的公共数据库的接口可视化!"),
tags$ul(
tags$li("数据库连接的网址是:genome-mysql.cse.ucsc.edu"),
tags$li("数据库连接用户名是:genome"),
tags$li("数据库连接的密码是:''(一个空字符串,匿名登录)")
),
flowLayout(
selectInput("ChooseUCSCdb","选择数据库",
width='80%',
choices = UCSC_all_tables,
selected = 'hg19'
),## end for selectInput
uiOutput('chooseUCSCtable'),
selectInput("ChooseUCSCrow","选择查看多少行",
width='80%',
choices = c(10,100,1000,'ALL'),
selected=10
)## end for selectInput
),
actionButton('UCSC_search',"运行",width='50%'),
DT::dataTableOutput('UCSC_results')
)
page_NCBI <- fluidPage(
)
page_Ensembl <- fluidPage(
)
page_About <- fluidPage(
)
header=dashboardHeader(
title =p("生信数据库!"
,style="font-size:90%;font-style:oblique"
)
)
sidebar = dashboardSidebar(
conditionalPanel(
condition = "1",
sidebarMenu(
id = "tabs",
hr(),
menuItem("UCSC数据库", tabName = "UCSC", icon = icon("home")),
menuItem("NCBI数据库", tabName = "NCBI", icon = icon("flask")),
menuItem("ENSEMBL数据库", tabName = "Ensembl", icon = icon("life-ring")),
menuItem("About", tabName = "About", icon = icon("info-circle"))
) ## end for sidebarMenu
) ## end for conditionalPanel
) ## end for dashboardSidebar
body=dashboardBody(
tabItems(
tabItem(tabName = "UCSC",page_UCSC),
tabItem(tabName = "NCBI",page_NCBI),
tabItem(tabName = "Ensembl",page_Ensembl),
tabItem(tabName = "About",page_About)
)
)
shinyUI(
dashboardPage(
header,
sidebar,
body,
title = 'bio-tools'
)
)
|
d95cf82cd487d040e873488c9787ce0ce922611c
|
35d2664803bb6878acce80589c7cebc0ce44ee33
|
/code/decommissioned/helper/general/rm_colnms_by_regex_mtch.R
|
fdb4dc582e7a12b57608aadc0802ce4baff77a49
|
[] |
no_license
|
Basketball4Nerds/nba_basketball_research
|
dd6e4e22f6f38438b3cb88346484e32320d9f7df
|
4771940c1f57a5d054cc833eab86ef789a6e5d7e
|
refs/heads/master
| 2020-03-17T01:30:40.239997
| 2018-05-26T18:49:33
| 2018-05-26T18:49:33
| 133,156,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
rm_colnms_by_regex_mtch.R
|
## this function removes any columns matched by given regex expression
rm_colnms_by_regex_mtch <- function(df, regex_expr) {
return(df[ , !grepl(regex_expr, names(df))])
}
|
11303c03985ce325a8ab8164ed4afa0beb39542d
|
2660d7ae26e5570d7cff646c3830b2ae0f10d9b0
|
/cachematrix.R
|
0cf05cbbf92f76d4a8f150d5f1ecbae937503f5e
|
[] |
no_license
|
khemkaiitr/ProgrammingAssignment2
|
dc420d8eab83a80fbab9767256b15ace8a609ba9
|
ef9c9073ea103d52defdb50aa68a6fe543d443b9
|
refs/heads/master
| 2021-01-21T18:25:07.828138
| 2014-04-26T12:10:57
| 2014-04-26T12:10:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,586
|
r
|
cachematrix.R
|
## makeCacheMatrix is a function that returns a list of different functions,
## when an input matrix is provided. Inside these list the four functions are:
# 1. set() sets the value of the matrix using input matrix.
# 2. get() gets the matrix
# 3. setInverse() Sets the value of inverse if calculated
# 4. getInverse() Gets the value of inverse matrix if exist. In the begining it is assigned as NULL value
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <- NULL
}
get <- function() x
setInverse <- function(inverse){
m <<- inverse
}
getInverse <- function() m
list(set = set, get = get, setInverse= setInverse, getInverse = getInverse)
}
## cacheSolve is a function that uses "makeCacheMatrix" function in its #
#implementation. In the begining it checks if the inverse of the input matrix #
#exist in the cache. If exist it prints it otherwise computes the matrix.
#Furhter detailed information is given below:
cacheSolve <- function(x, ...) {
m <- x$getInverse() #uses the getInverse function from the output list and assigns the value to m. If inverse doesnot exist in cache, NULL is assigned to m.
if(!is.null(m)){
message("Getting cache data")
return(m)
}
# if the inverse doesnot exist:
data <- x$get() # assigns the matrix to data
m <- solve(data, ...) # computes the inverse of the data provided
x$setInverse(m) # set the inverse value to m, to be stored in cache
m
## Return a matrix that is the inverse of 'x'
}
|
6d805dc87b6e9a92206c25e1a71a74b574714e7d
|
ab1730e13eb49478aa0cc443d40c68921cbe1662
|
/man/GolfRound.Rd
|
9bf47fc5ba8612df2e4f5f8eea8f0e93555f713e
|
[] |
no_license
|
cran/Lock5Data
|
688fa73667eacea4f4a325763907bba30f55c030
|
770f5fd2b8956dbb0c822133223f9c811561f130
|
refs/heads/master
| 2021-08-02T06:23:39.102769
| 2021-07-22T21:40:10
| 2021-07-22T21:40:10
| 17,680,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
rd
|
GolfRound.Rd
|
\name{GolfRound}
\alias{GolfRound}
\docType{data}
\title{Golf Round}
\description{
Scorecard for 18 holes of golf
}
\format{
A data frame with 18 observations on the following 4 variables.
\describe{
\item{\code{Hole}}{Hole number (1 to 18)}
\item{\code{Distance}}{Length of the hole (in yards)}
\item{\code{Par}}{Par for the hole}
\item{\code{Score}}{Actual number of stokes needed in this round}
}
}
\details{
Data come from a scorecard for one round of golf at the Potsdam Country Club. Par is the expected number of strokes a good golfer should need to complete the hole.
}
\source{
Personal file
}
\keyword{datasets}
|
e1a94104488901af86721988dcc60651947c390b
|
25bdc974db618dbbff5dfdc2e7335e46499d1e7f
|
/r.core/src/test/resources/com/blogspot/miguelinlas3/r/eclipse/core/compiler/bison/sort.R
|
7fba39bcd12156a0df92701bcc65061e46246333
|
[] |
no_license
|
migue/r-ide
|
a67b1953dbac98d5b8845255ac53bf6bd4fd07e9
|
d51e27dd732d25012228943975e41a9b9d85d7de
|
refs/heads/master
| 2016-09-05T09:05:45.818767
| 2011-10-29T21:50:41
| 2011-10-29T21:50:41
| 2,564,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,632
|
r
|
sort.R
|
### Name: sort
### Title: Sorting or Ordering Vectors
### Aliases: sort sort.default sort.POSIXlt sort.int
### Keywords: univar manip arith
### ** Examples
require(stats)
x <- swiss$Education[1:25]
x; sort(x); sort(x, partial = c(10, 15))
median.default # shows you another example for 'partial'
## illustrate 'stable' sorting (of ties):
sort(c(10:3,2:12), method = "sh", index.return=TRUE) # is stable
## $x : 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 12
## $ix: 9 8 10 7 11 6 12 5 13 4 14 3 15 2 16 1 17 18 19
sort(c(10:3,2:12), method = "qu", index.return=TRUE) # is not
## $x : 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 12
## $ix: 9 10 8 7 11 6 12 5 13 4 14 3 15 16 2 17 1 18 19
## ^^^^^
x <- c(1:3, 3:5, 10)
is.unsorted(x) #-> FALSE: is sorted
is.unsorted(x, strictly=TRUE) #-> TRUE : is not (and cannot be) sorted strictly
## Not run: ## Small speed comparison simulation:
##D N <- 2000
##D Sim <- 20
##D rep <- 1000 # << adjust to your CPU
##D c1 <- c2 <- numeric(Sim)
##D for(is in 1:Sim){
##D x <- rnorm(N)
##D c1[is] <- system.time(for(i in 1:rep) sort(x, method = "shell"))[1]
##D c2[is] <- system.time(for(i in 1:rep) sort(x, method = "quick"))[1]
##D stopifnot(sort(x, method = "s") == sort(x, method = "q"))
##D }
##D rbind(ShellSort = c1, QuickSort = c2)
##D cat("Speedup factor of quick sort():\n")
##D summary({qq <- c1 / c2; qq[is.finite(qq)]})
##D
##D ## A larger test
##D x <- rnorm(1e7)
##D system.time(x1 <- sort(x, method = "shell"))
##D system.time(x2 <- sort(x, method = "quick"))
##D stopifnot(identical(x1, x2))
## End(Not run)
|
a043e906d94a85df332687b75fb69b737c1e97fa
|
84c937f0c53026854bc9f7133d63abdbaa5b461f
|
/tests/test_packs.R
|
d8804a6ba7fa477782c9cb643621e516778c7906
|
[] |
no_license
|
MilesMcBain/deplearning
|
9a11efe728604f841c6c293fd4ff7145bc431b91
|
1cec42a230c8c4198cedf9433f1044a9d66619da
|
refs/heads/master
| 2021-01-21T14:47:58.736411
| 2018-06-20T21:47:32
| 2018-06-20T21:47:32
| 95,333,085
| 57
| 3
| null | 2018-06-20T21:47:33
| 2017-06-25T02:09:16
|
R
|
UTF-8
|
R
| false
| false
| 463
|
r
|
test_packs.R
|
library(datapasta)
require("mgcv.helper")
library("tidyr")
library(rstudioapi)
library(A3)
library("abc")
library(naniar)
library(visdat)
library(dplyr)
library(switchr)
library("rtimicropem")
library(notARealPackage)
require("packagemetrics")
#library("dejaVu")
p_load(xml2, jsonlite)
p_load("devtools")
#library(Rborist)
#library(rrtools)
#library(kpmt)
#library(fasjem)
require(packup)
doc <- rstudioapi::getActiveDocumentContext()$contents
depl_check_run()
|
7fba290bffce36018f2dd1106fcfa66fcc7a8308
|
aa2afda5c0cc54c9a80635840cfe6f6e999a4905
|
/carros/app.R
|
290c9bbdf3b815899ae7339e2b7c1cce1e662485
|
[] |
no_license
|
rodrigorocha1/app_rshiny_prever_qualidade_carro
|
70e10835d40908f19475bcccb79f93e31d6ef832
|
23931bb888d39de77b54c4880feed43f362413ac
|
refs/heads/master
| 2022-04-10T12:17:33.217211
| 2020-03-28T21:25:48
| 2020-03-28T21:25:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,772
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(e1071)
carros = read.csv("../car.data", sep = ",")
modelo = naiveBayes(class ~., data = carros)
buying = unique(carros$buying)
maint = unique(carros$maint)
doors = unique(carros$doors)
persons = unique(carros$persons)
lug_boot = unique(carros$lug_boot)
safety = unique(carros$safety)
ui <- fluidPage(
titlePanel("Previsão de Qualidade de veiculos"),
fluidRow(
column(4, selectInput("buying", "Preço: ", choices = buying)),
column(4, selectInput("maint", "Manutenção:", choices = maint)),
column(4, selectInput("doors", "Portas: ", choices = doors))
),
fluidRow(
column(4, selectInput("persons", "Capacidade de Passageiros", choices = persons)),
column(4, selectInput("lug_boot", "Porta Malas", choices = lug_boot)),
column(4, selectInput("safety", "Segurança", choices = safety))
),
fluidRow(
column(12, actionButton("Processar", "Processar"), h1(textOutput("Resultado")) )
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
observeEvent(input$Processar, {
novocarro = data.frame("buying" = input$buying, "maint" = input$maint,
"door" = input$doors, "persons" = input$persons,
"lug_boot" = input$lug_boot, "safety" = input$safety)
predicao = predict(modelo, novocarro)
output$Resultado = renderText({as.character(predicao)})
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
9a7572aa04af49a35fe2f561624d626e471fbbeb
|
9e788d01b18c923961d1fe958c7477c04abbc5cc
|
/plot1.R
|
6686458ac42604dfe3d94f06868d21dd81f74162
|
[] |
no_license
|
amturner5/ExData_Plotting1
|
53a8c12340c1bdfe676551c21df11e860b23592a
|
2266e3f2fe0c7d0e81af07a04c6fee8727f49d78
|
refs/heads/master
| 2020-03-20T21:32:19.587625
| 2018-06-20T04:57:57
| 2018-06-20T04:57:57
| 137,746,094
| 0
| 0
| null | 2018-06-18T11:56:58
| 2018-06-18T11:56:58
| null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
plot1.R
|
##download from url and unzip to local repository
url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
download.file(url, "C:/Users/Dell/test-repo/EDAProgrammingAssignment1/ExData_Plotting1/household_power_consumption.zip")
unzip("C:/Users/Dell/test-repo/EDAProgrammingAssignment1/ExData_Plotting1/household_power_consumption.zip")
file <- "C:/Users/Dell/test-repo/EDAProgrammingAssignment1/ExData_Plotting1/household_power_consumption.txt"
##read in indicated dates
library(sqldf)
df <- read.csv.sql(file, "select * from file where Date = '2/2/2007' or Date = '1/2/2007'",
header = TRUE, sep = ";")
##concatenate date and time and convert to posixlt type
df$Date_Time <- paste(df$Date, df$Time)
df$Date_Time <- strptime(df$Date_Time, "%d/%m/%Y %H:%M:%S")
##open graphics viewer and create plot
png("C:/Users/Dell/test-repo/EDAProgrammingAssignment1/ExData_Plotting1/plot2.R.png", height = 480, width = 480)
with(df, plot(Date_Time, Global_active_power, type = "n", ylab = "Global Active Power (kilowatts)", xlab = ""))
lines(df$Date_Time, df$Global_active_power, type = "l'")
dev.off()
|
749cf1bc056bee13ace5fde897a24ef2e5acc0dd
|
ef49d1238c49c0b8429c5cf00ac86eba407abbe7
|
/man/chapter_11_exercise_24.Rd
|
e54a08935e5639474dc3376d00fb622bde22ea95
|
[] |
no_license
|
yelleKneK/AMCP
|
be46c4969bf4e4bb7849a904664d9b3c17e494ef
|
72e0e0ff5053d42da9a1c0e2e1ec063586634e8a
|
refs/heads/master
| 2022-11-23T06:39:24.885288
| 2020-07-24T19:57:16
| 2020-07-24T19:57:16
| 282,302,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,367
|
rd
|
chapter_11_exercise_24.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{chapter_11_exercise_24}
\alias{chapter_11_exercise_24}
\alias{Chapter_11_Exercise_24}
\alias{C11E24}
\alias{c11e24}
\title{The data used in Chapter 11, Exercise 24}
\format{
An object of class \code{data.frame} with 90 rows and 3 columns.
}
\source{
\url{https://designingexperiments.com/data/}
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and
analyzing data: {A} model comparison perspective}. (3rd ed.). New York, NY: Routledge.
}
\usage{
data(chapter_11_exercise_24)
}
\description{
Data from Chapter 11 Exercise 24 of \emph{Designing Experiments and Analyzing Data: A Model Comparison Perspective} (3rd edition; Maxwell, Delaney, & Kelley).
}
\details{
\itemize{
\item id.
\item judgement.
\item activity.}
}
\section{Synonym}{
C11E24
}
\examples{
# Load the data
data(chapter_11_exercise_24)
# Or, alternatively load the data as
data(C11E24)
# View the structure
str(chapter_11_exercise_24)
# Brief summary of the data.
summary(chapter_11_exercise_24)
}
\references{
Maxwell, S. E., Delaney, H. D., & Kelley, K. (2018). \emph{Designing experiments and analyzing data:
{A} model comparison perspective} (3rd ed.). New York, NY: Routledge.
}
\author{
Ken Kelley \email{kkelley@nd.edu}
}
\keyword{datasets}
|
0dca40a0298128f3c20d2102f4ba8138b43242f8
|
88eb8150423c78d613ff9c0f54b01fb19dc235f2
|
/Practica10/pr10sim.R
|
7faecf0aab4b1b079c35ba60ee37a8fc01d321a7
|
[] |
no_license
|
claratepa/Simulacion
|
f3975321a8df4c0c9988390a066fc11e475f46de
|
4c37582eaed0030949ca9177ec4068200dedb865
|
refs/heads/master
| 2022-11-05T13:39:49.226957
| 2020-06-17T22:48:32
| 2020-06-17T22:48:32
| 235,903,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,014
|
r
|
pr10sim.R
|
library(testit)
suppressMessages(library(doParallel))
clust <- makeCluster(detectCores() - 1)
registerDoParallel(clust)
knapsack <- function(cap, peso, valor) {
n <- length(peso)
pt <- sum(peso)
assert(n == length(valor))
vt <- sum(valor)
if (pt < cap) {
return(vt)
} else {
filas <- cap + 1
cols <- n + 1
tabla <- matrix(rep(-Inf, filas * cols),
nrow = filas, ncol = cols)
for (fila in 1:filas) {
tabla[fila, 1] <- 0
}
rownames(tabla) <- 0:cap
colnames(tabla) <- c(0, valor)
for (objeto in 1:n) {
for (acum in 1:(cap+1)) { # consideramos cada fila de la tabla
anterior <- acum - peso[objeto]
tabla[acum, objeto + 1] <- tabla[acum, objeto]
if (anterior > 0) { # si conocemos una combinacion con ese peso
tabla[acum, objeto + 1] <- max(tabla[acum, objeto], tabla[anterior, objeto] + valor[objeto])
}
}
}
return(max(tabla))
}
}
factible <- function(seleccion, pesos, capacidad) {
return(sum(seleccion * pesos) <= capacidad)
}
objetivo <- function(seleccion, valores) {
return(sum(seleccion * valores))
}
normalizar <- function(data) {
menor <- min(data)
mayor <- max(data)
rango <- mayor - menor
data <- data - menor # > 0
return(data / rango) # entre 0 y 1
}
generador.pesos <- function(cuantos, min, max) {
return(sort(round(normalizar(rnorm(cuantos)) * (max - min) + min)))
}
generador.valores <- function(pesos, min, max) {
n <- length(pesos)
valores <- double()
for (i in 1:n) {
media <- pesos[n]
desv <- runif(1)
valores <- c(valores, rnorm(1, media, desv))
}
valores <- normalizar(valores) * (max - min) + min
return(valores)
}
poblacion.inicial <- function(n, tam) {
pobl <- matrix(rep(FALSE, tam * n), nrow = tam, ncol = n)
for (i in 1:tam) {
pobl[i,] <- round(runif(n))
}
return(as.data.frame(pobl))
}
mutacion <- function(sol, n) {
pos <- sample(1:n, 1)
mut <- sol
mut[pos] <- (!sol[pos]) * 1
return(mut)
}
reproduccion <- function(x, y, n) {
pos <- sample(2:(n-1), 1)
xy <- c(x[1:pos], y[(pos+1):n])
yx <- c(y[1:pos], x[(pos+1):n])
return(c(xy, yx))
}
mutacion2<- function() {
if (runif(1) < pm) {
return(mutacion(p[i,], n))
}
}
reproduccion2<- function() {
padres <- sample(1:tam, 2, replace=FALSE)
hijos_t <- reproduccion(p[padres[1],], p[padres[2],], n)
return(hijos_t)
}
objetivo2<- function() {
obj_t <- double()
obj_t <- c(obj_t, objetivo(p[i,], valores))
return(obj_t)
}
factible2 <- function() {
fact_f <- integer()
fact_f <- c(fact_f, factible(p[i,], pesos, capacidad))
return(fact_f)
}
generador.valores.correlacionados <- function(pesos,min,max) {
n <- length(pesos)
valores <- double()
for (i in 1:n) {
media <- pesos[n]
desv <- runif(1)
valores <- c(valores, pesos[i] + rnorm(1))
}
valores <- normalizar(valores) * (max - min) + min
return(valores)
}
n <- 25
pesos <- generador.pesos(n, 15, 80)
capacidad <- round(sum(pesos) * 0.65)
valores <- generador.valores(pesos, 10, 500)
valores_correlacionados <- generador.valores.correlacionados(pesos,10,500)
valores_inversos <- generador.valores.correlacionados(rev(pesos),10,500)
tiempos <- c()
for(ins in 1:3) {
if(ins == 1) {
valores <- generador.valores(pesos, 10, 500)
} else if(ins == 2) {
valores <- generador.valores.correlacionados(pesos,10,500)
} else if(ins == 3) {
valores <- generador.valores.correlacionados(rev(pesos),10,500)
}
optimo <- system.time(knapsack(capacidad, pesos, valores))
inic <- 200
p <- poblacion.inicial(n, inic)
tam <- dim(p)[1]
assert(tam == inic)
pm <- 0.05
rep <- 50
tmax <- 50
mejores <- double()
clusterExport(clust, c("n", "pm", "valores", "pesos", "capacidad", "objetivo"))
for (iter in 1:tmax) {
tmp1 <- Sys.time()
p$obj <- NULL
p$fact <- NULL
clusterExport(clust, "p")
mut <- foreach(i=1:tam, .combine = rbind) %dopar% mutacion2() #mutacion paralela
p <- rbind(p, mut)
clusterExport(clust, c("tam", "p"))
hijos <- foreach(i=1:rep, .combine = rbind) %dopar% reproduccion2() #reproduccion paralela
p <- rbind(p, hijos[1:n]) # primer hijo
p <- rbind(p, hijos[(n+1):(2*n)]) # segundo hijo
tam <- dim(p)[1]
obj <- double()
fact <- integer()
obj <- foreach(i=1:tam, .combine = rbind) %dopar% objetivo2() #objetivos paralela
fact <- foreach(i=1:tam, .combine = rbind) %dopar% factible2() #factibles paralela
p <- cbind(p, obj)
p <- cbind(p, fact)
mantener <- order(-p[, (n + 2)], -p[, (n + 1)])[1:inic]
p <- p[mantener,]
tam <- dim(p)[1]
assert(tam == inic)
factibles <- p[p$fact == TRUE,]
mejor <- max(factibles$obj)
mejores <- c(mejores, mejor)
tmp2 <- Sys.time()
tiempos <- c(tiempos,(tmp2-tmp1))
}
}
datos25 <- data.frame(pasos = seq(1:50), Independiente = tiempos[1:50], Correlacionado = tiempos[51:100], ICorrelacionado = tiempos[101:150])
library(ggplot2)
png("pr10sim25.png", height = 5, width = 10, units = "cm", res = 600)
gg <- ggplot(datos25, aes(x=pasos,color=d))
gg + geom_line(aes(y=Independiente), color="blue", size=0.3) +
geom_line(aes(y=Correlacionado), color="red", size=0.3) +
geom_line(aes(y=ICorrelacionado), color="green", size=0.3) +
ylab("Tiempo (s)") +
xlab("Pasos") +
theme_light()
graphics.off()
#########
n <- 50
pesos <- generador.pesos(n, 15, 80)
capacidad <- round(sum(pesos) * 0.65)
valores <- generador.valores(pesos, 10, 500)
valores_correlacionados <- generador.valores.correlacionados(pesos,10,500)
valores_inversos <- generador.valores.correlacionados(rev(pesos),10,500)
tiempos <- c()
for(ins in 1:3) {
if(ins == 1) {
valores <- generador.valores(pesos, 10, 500)
} else if(ins == 2) {
valores <- generador.valores.correlacionados(pesos,10,500)
} else if(ins == 3) {
valores <- generador.valores.correlacionados(rev(pesos),10,500)
}
optimo <- system.time(knapsack(capacidad, pesos, valores))
inic <- 200
p <- poblacion.inicial(n, inic)
tam <- dim(p)[1]
assert(tam == inic)
pm <- 0.05
rep <- 50
tmax <- 50
mejores <- double()
clusterExport(clust, c("n", "pm", "valores", "pesos", "capacidad", "objetivo"))
for (iter in 1:tmax) {
tmp1 <- Sys.time()
p$obj <- NULL
p$fact <- NULL
clusterExport(clust, "p")
mut <- foreach(i=1:tam, .combine = rbind) %dopar% mutacion2() #mutacion paralela
p <- rbind(p, mut)
clusterExport(clust, c("tam", "p"))
hijos <- foreach(i=1:rep, .combine = rbind) %dopar% reproduccion2() #reproduccion paralela
p <- rbind(p, hijos[1:n]) # primer hijo
p <- rbind(p, hijos[(n+1):(2*n)]) # segundo hijo
tam <- dim(p)[1]
obj <- double()
fact <- integer()
obj <- foreach(i=1:tam, .combine = rbind) %dopar% objetivo2() #objetivos paralela
fact <- foreach(i=1:tam, .combine = rbind) %dopar% factible2() #factibles paralela
p <- cbind(p, obj)
p <- cbind(p, fact)
mantener <- order(-p[, (n + 2)], -p[, (n + 1)])[1:inic]
p <- p[mantener,]
tam <- dim(p)[1]
assert(tam == inic)
factibles <- p[p$fact == TRUE,]
mejor <- max(factibles$obj)
mejores <- c(mejores, mejor)
tmp2 <- Sys.time()
tiempos <- c(tiempos,(tmp2-tmp1))
}
}
datos50 <- data.frame(pasos = seq(1:50), Independiente = tiempos[1:50], Correlacionado = tiempos[51:100], ICorrelacionado = tiempos[101:150])
library(ggplot2)
png("pr10sim50.png", height = 5, width = 10, units = "cm", res = 600)
gg <- ggplot(datos50, aes(x=pasos,color=d))
gg + geom_line(aes(y=Independiente), color="blue", size=0.3) +
geom_line(aes(y=Correlacionado), color="red", size=0.3) +
geom_line(aes(y=ICorrelacionado), color="green", size=0.3) +
ylab("Tiempo (s)") +
xlab("Pasos") +
theme_light()
graphics.off()
###########
n <- 75
pesos <- generador.pesos(n, 15, 80)
capacidad <- round(sum(pesos) * 0.65)
valores <- generador.valores(pesos, 10, 500)
valores_correlacionados <- generador.valores.correlacionados(pesos,10,500)
valores_inversos <- generador.valores.correlacionados(rev(pesos),10,500)
tiempos <- c()
for(ins in 1:3) {
if(ins == 1) {
valores <- generador.valores(pesos, 10, 500)
} else if(ins == 2) {
valores <- generador.valores.correlacionados(pesos,10,500)
} else if(ins == 3) {
valores <- generador.valores.correlacionados(rev(pesos),10,500)
}
optimo <- system.time(knapsack(capacidad, pesos, valores))
inic <- 200
p <- poblacion.inicial(n, inic)
tam <- dim(p)[1]
assert(tam == inic)
pm <- 0.05
rep <- 50
tmax <- 50
mejores <- double()
clusterExport(clust, c("n", "pm", "valores", "pesos", "capacidad", "objetivo"))
for (iter in 1:tmax) {
tmp1 <- Sys.time()
p$obj <- NULL
p$fact <- NULL
clusterExport(clust, "p")
mut <- foreach(i=1:tam, .combine = rbind) %dopar% mutacion2() #mutacion paralela
p <- rbind(p, mut)
clusterExport(clust, c("tam", "p"))
hijos <- foreach(i=1:rep, .combine = rbind) %dopar% reproduccion2() #reproduccion paralela
p <- rbind(p, hijos[1:n]) # primer hijo
p <- rbind(p, hijos[(n+1):(2*n)]) # segundo hijo
tam <- dim(p)[1]
obj <- double()
fact <- integer()
obj <- foreach(i=1:tam, .combine = rbind) %dopar% objetivo2() #objetivos paralela
fact <- foreach(i=1:tam, .combine = rbind) %dopar% factible2() #factibles paralela
p <- cbind(p, obj)
p <- cbind(p, fact)
mantener <- order(-p[, (n + 2)], -p[, (n + 1)])[1:inic]
p <- p[mantener,]
tam <- dim(p)[1]
assert(tam == inic)
factibles <- p[p$fact == TRUE,]
mejor <- max(factibles$obj)
mejores <- c(mejores, mejor)
tmp2 <- Sys.time()
tiempos <- c(tiempos,(tmp2-tmp1))
}
}
datos75 <- data.frame(pasos = seq(1:50), Independiente = tiempos[1:50], Correlacionado = tiempos[51:100], ICorrelacionado = tiempos[101:150])
library(ggplot2)
png("pr10sim75.png", height = 5, width = 10, units = "cm", res = 600)
gg <- ggplot(datos75, aes(x=pasos,color=d))
gg + geom_line(aes(y=Independiente), color="blue", size=0.3) +
geom_line(aes(y=Correlacionado), color="red", size=0.3) +
geom_line(aes(y=ICorrelacionado), color="green", size=0.3) +
ylab("Tiempo (s)") +
xlab("Pasos") +
theme_light()
graphics.off()
anova <- aov(datos25$Independiente ~ datos50$Independiente)
summary(anova)
anova <- aov(datos25$Independiente ~ datos75$Independiente)
summary(anova)
anova <- aov(datos50$Independiente ~ datos75$Independiente)
summary(anova)
anova <- aov(datos25$Correlacionado ~ datos50$Correlacionado)
summary(anova)
anova <- aov(datos25$Correlacionado ~ datos75$Correlacionado)
summary(anova)
anova <- aov(datos50$Correlacionado ~ datos75$Correlacionado)
summary(anova)
anova <- aov(datos25$ICorrelacionado ~ datos50$ICorrelacionado)
summary(anova)
anova <- aov(datos25$ICorrelacionado ~ datos75$ICorrelacionado)
summary(anova)
anova <- aov(datos50$ICorrelacionado ~ datos75$ICorrelacionado)
summary(anova)
|
95dc808eaae818c2d43c4d416b95f35225ab769b
|
c08542d73325c80137da6dd857bcef07cae664ed
|
/cachematrix.R
|
c971c18fab2691682fc49b391a5895e18984a314
|
[] |
no_license
|
moolins12/ProgrammingAssignment2
|
6f28fc71aab0a92ca147b361a1d4fa4b36f306f5
|
0c1290fbd2c33fcaba4a223efdcaccaf8c088288
|
refs/heads/master
| 2020-03-18T07:41:53.832119
| 2018-05-23T18:35:42
| 2018-05-23T18:35:42
| 134,468,209
| 1
| 0
| null | 2018-05-23T18:35:43
| 2018-05-22T19:51:20
|
R
|
UTF-8
|
R
| false
| false
| 1,333
|
r
|
cachematrix.R
|
## I have created two functions below that execute the matrix inverse operation, but stores the data within the cache
## to improve computing performance should the operation need to be executed many times.
## The first function creates cached variables for executing the inverse within the makeCacheMatrix function. In this way
## the data can be pulled from within the function environment rather than the global environment.
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
x <<- y
inv_mat <<- NULL
}
get <- function() {
x
}
setinverse <- function(inverse) {
inv_mat <<- inverse
}
getinverse <- function() {
inv_mat
}
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## The second function makes use of the predefined functions within makeCacheMatrix to pull the data and execute the inverse
## operation and spit out a result.
cacheSolve <- function(x = matrix(), ...) {
inv_mat <- x$getinverse()
if(!is.null(inv_mat)) {
message("Getting cached data...")
return(inv_mat)
}
data <- x$get()
inv_mat <- solve(data, ...)
x$setinverse(inv_mat)
inv_mat
}
|
662a9f01149f614b1d7c8aa773311172def69523
|
70f1a9a178a4204f675893bf7809131b3a8a9f3b
|
/project5.R
|
dd30db59eef48f931625a586938dda4ba29b453e
|
[] |
no_license
|
yashCS101/modeoftransport
|
4a1488e6e01facbe06c386864e9116f21c18f98e
|
eaffd740d4c9419cb0c7aa209cd1906c670886ed
|
refs/heads/master
| 2022-08-23T17:58:59.768544
| 2020-05-22T07:13:55
| 2020-05-22T07:13:55
| 260,863,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,204
|
r
|
project5.R
|
setwd('C:/Users/USER/Desktop/R Data Sets')
### Importing data
data = read.csv("Cars_edited.csv")
### Libraries
### Data understanding
dim(data)
str(data)
summary(data)
# Checking for NA
sapply(data,function(x) sum(is.na(x)))
#Removing NA
data = na.omit(data)
# Checking # of unique values in each column
sapply(data,function(x) length(unique(x)))
# Converting Transport values to 0 or 1
data$Transport = ifelse(data$Transport == 'Car', 1,0)
# Male = 1 , Female = 0
data$Gender = ifelse(data$Gender == 'Male', 1,0)
table(data$Transport)
sum(data$Transport == 1)/nrow(data)
# Binary variables needs to be converted into factor variables
data$Gender = as.factor(data$Gender)
data$Engineer = as.factor(data$Engineer)
data$MBA = as.factor(data$MBA)
data$license = as.factor(data$license)
data$Transport = as.factor(data$Transport)
#Co-relation check
data$Gender = as.numeric(data$Gender)
data$Engineer = as.numeric(data$Engineer)
data$MBA = as.numeric(data$MBA)
data$Transport = as.numeric(data$Transport)
library(corrplot)
correlations = cor(data[,-9])
summary(correlations)
corrplot(correlations, type="lower", method = 'number', diag = FALSE)
#PCA
eigendata = eigen(correlations)
eigendata
eigenvalues = eigendata$values
eigenvalues
Factor = c(1,2,3,4,5,6,7,8)
Scree = data.frame(Factor,eigenvalues)
plot(Scree, main = "Scree Plot", col = "Blue", ylim = c(0,4))
lines(Scree,col="Red")
library(psych)
pcadatar = principal(data[,-9], nfactors = 3, rotate = "varimax")
pcadatar
pcadata = data.frame(cbind(pcadatar$scores,data$Transport))
pca_train = subset(pcadata, split == TRUE)
pca_test = subset(pcadata, split == FALSE)
str(pca_train)
pca_train$V4 = as.factor(pca_train$V4)
pca_test$V4 = as.factor(pca_test$V4)
pca_logistic = glm(V4~., data=pca_train, family=binomial(link="logit"))
summary(pca_logistic)
pca_test$log.pred = predict(pca_logistic, pca_test[1:3], type="response")
table(pca_test$V4,pca_test$log.pred>0.5)
install.packages("confusionMatrix")
library(caret)
pca_fac = ifelse(pca_test$log.pred>0.5,1,0)
pca_fac=as.factor(pca_fac)
pca_test$V4=as.factor(pca_test$V4)
confusionMatrix(pca_fac,pca_test$V4)
# Univariate Analysis
boxplot(data)
# Bivariate Analysis Continious variables
attach(data)
library(ggplot2)
boxplot(Age~Transport)
ggplot(data, aes(x = Age, y = Transport)) +
geom_density(aes(fill = Transport), alpha = 0.3) +
scale_color_manual(values = c("#868686FF", "#EFC000FF")) +
scale_fill_manual(values = c("darkturquoise", "lightcoral")) + xlim(-30,250)
ggplot(data, aes(x = Transport, y = Gender, fill = Transport)) + geom_boxplot(alpha=0.7)
#Gender vs Transport ???
ggplot(data, aes(x= Gender)) + theme_bw()+
facet_wrap(~Transport ) + geom_bar()+
labs(y= "No. Of People", title = "Gender vs Transport")
# Cars vs gender:
prop.table(table(data$Gender))
ggplot(data, aes(x=Work.Exp)) + geom_histogram(binwidth = 1)
ggplot(data, aes(x=Transport)) + geom_bar() + theme_bw()
ggplot(data, aes(x=Gender, fill= Transport)) + geom_bar()
#Age & Transport as car:
ggplot(data, aes(x=Transport))
ggplot(data, aes(x=Age)) + geom_histogram(binwidth = 5)
ggplot(data, aes(x=Age)) + geom_histogram(binwidth = 1) +
facet_wrap(~Transport)
#license & Transport as car:
ggplot(data, aes(x=license)) + geom_bar()
nrow(data)
ggplot(data, aes(x=license)) + geom_bar() + facet_wrap(~Transport)
table(data$Transport)
#Bi variate Analysis Factor Variables
barplot(table(Transport, Transport), col = c("green","red"), main = 'Transport')
barplot(table(Transport, Gender) , col = c("green","red"), main = 'Gender')
barplot(table(Transport, Engineer), col = c("green","red"), main = 'Engineer')
barplot(table(Transport, MBA), col = c("green","red"), main = 'MBA')
barplot(table(Transport, license), col = c("green","red"), main = 'License')
#Defining Split
set.seed(001)
library(caTools) #for sample.split
split = sample.split(data$Transport, SplitRatio = 0.75)
#Logisticregression(without SMOTE)
logis_train = subset(data[,-5], split == TRUE)
logis_test = subset(data[,-5], split == FALSE)
table(logis_train$Transport)
table(logis_test$Transport)
nosmote_logistic = glm(Transport~., data=logis_train, family=binomial(link="logit"))
summary(nosmote_logistic)
# Check for multicollinearity
library(car)
vif(nosmote_logistic)
logis_test$log.pred = predict(nosmote_logistic, logis_test[1:8], type="response")
table(logis_test$Transport,logis_test$log.pred>0.5)
## Without smote we correctly predicted 9 but missed on 6
#SMOTE
library(DMwR)
smote.train = subset(data[,-5], split == TRUE)
smote.test = subset(data[,-5], split == FALSE)
str(smote.train$Transport)
smote.train$Transport = as.factor(smote.train$Transport)
smote.train$Transport = as.factor(smote.train$Transport)
balanced.gd = SMOTE(Transport ~., smote.train, perc.over = 4800, k = 5, perc.under = 200)
table(balanced.gd$Transport)
#Logistic Regression(with SMOTE)
smote_logistic = glm(Transport~., data=balanced.gd, family=binomial(link="logit"))
summary(smote_logistic)
smote.test$log.pred = predict(smote_logistic, smote.test[1:8], type="response")
table(smote.test$Transport,smote.test$log.pred>0.5)
### Smote with PCA Data
smote.train.pca = subset(pcadata, split == TRUE)
smote.test.pca = subset(pcadata, split == FALSE)
str(data$Transport)
smote.train.pca$V4 = as.factor(smote.train.pca$V4)
balanced.gd.pca = SMOTE(V4 ~., smote.train.pca, perc.over = 4800, k = 5, perc.under = 200)
table(balanced.gd.pca$V4)
sum(balanced.gd.pca==1)/nrow(balanced.gd.pca)
#Logistic Regression(with SMOTE PCA Data)
smote_logistic_pca = glm(V4~., data=balanced.gd.pca, family=binomial(link="logit"))
summary(smote_logistic_pca)
smote.test.pca$log.pred = predict(smote_logistic_pca, smote.test.pca[1:3], type="response")
table(smote.test.pca$V4 ,smote.test.pca$log.pred>0.5)
smote_pca_fac = ifelse(smote.test.pca$log.pred>0.5,1,0)
smote_pca_fac=as.factor(smote_pca_fac)
smote.test.pca$V4=as.factor(smote.test.pca$V4)
confusionMatrix(smote_pca_fac,smote.test.pca$V4)
|
aedd25f258be85c76123ca28276a05e8f26377ed
|
270090ceb851cf0c89f16ec2ce9a76df64b11818
|
/R_code_snow.r
|
f99d40155e103cb489f50c18b50c48b10ba12f60
|
[] |
no_license
|
Mattiacroari/ecologia-del-paesaggio
|
c6009e9f9320e148bcf314c6698529df03f23d70
|
f427e08356c18d23c6958b19681efaec60c8f273
|
refs/heads/master
| 2021-04-23T17:23:19.753710
| 2020-07-21T10:07:35
| 2020-07-21T10:07:35
| 249,945,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,477
|
r
|
R_code_snow.r
|
# prima cosa fare sewd
setwd("C:/lab/")
install.packages("ncdf4") # per caricare immagini da Copernicus
library(ncdf4)
library(raster)
# per visualizzare il file "NC", va prima importato
# raster importa una singola immagine es. neve il 18/5
# brick riporta vari livelli di immagine es. neve dal 18/5 al ...
raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc")
snowmay <- raster("c_gls_SCE500_202005180000_CEURO_MODIS_V1.0.1.nc")
# se da Warning message: ...
# ci dice che non tutto il sistema è stato importato.
# Di tutta la terra abbiamo preso solo la parte sull'Europa
# L'immagine è stata tagliata
# cambiamo colore
cl <- colorRampPalette(c('darkblue','blue','light blue'))(100)
# plottare il dato
plot(snowmay,col=cl)
# copertura nevosa indicata con il bianco
# ci aspettiamo che la copertura nevosa sia calata
# scaricare file dal IOL
# creare una nuova cartella su Lab ed inserire le immagini del file Zip
# cambiare setwd
setwd("C:/lab/snow")
# Esercizio da codici IOL, importare intero pacchetto di file
# put all files into the folder
# rlist=list.files(pattern=".png", full.names=T)
#save raster into list
#con lappy
# list_rast=lapply(rlist, raster)
# EN <- stack(list_rast)
# plot(EN)
# guardare il nome del file
rlist <- list.files(pattern=".tif")
rlist
# lapply applica un comando ad una serie di dati
list_rast <- lapply(rlist, raster)
snow.multitemp <- stack(list_rast) # diamo un nome all'insieme di tutti i file (stack)
plot(snow.multitemp, col=cl)
# per vedere l'immagine iniziale e immagine finale
# si notano le differenze
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r, col=cl)
plot(snow.multitemp$snow2020r, col=cl)
# valore legenda diverso
# mettere funzione zlim=c(0-250) 0-250 sono i numeri che vogliamo
par(mfrow=c(1,2))
plot(snow.multitemp$snow2000r, col=cl, zlim=c(0,250))
plot(snow.multitemp$snow2020r, col=cl, zlim=c(0,250))
# fare differenza tra le due immagini
difsnow = snow.multitemp$snow2020r - snow.multitemp$snow2000r
# colorRampPalette nuova
cldiff <- colorRampPalette(c('blue','white','red'))(100) # rosso è massima differenza, blu minima, bianca poco
plot(difsnow, col=cldiff)
# scaricare prediction.r e metterlo nella cartella snow
source("prediction.r")
# scarico immagine da IOL
#
predicted.snow.2025.norm <- raster("predicted.snow.2025.norm.tif")
# funzione lineare si vede una previsione ragionevole
# "scenari" molto probabile della copertura nevosa
plot(predicted.snow.2025.norm, col=cl)
|
8781748a35b584c92e85c42cb46c9e2aa6445259
|
6cb50a95d62f1318f867d6b695bf3c31bb894d97
|
/inst/doc/dataset_generation.R
|
0da2fc28988bda099872818c1732d5140f8691e2
|
[] |
no_license
|
TankMermaid/seqtime
|
c15364705f265538e826004f8100189a6885ea4b
|
960903d37e2ba95763ef58a1eaf78d94375fe829
|
refs/heads/master
| 2020-03-29T21:34:12.267387
| 2018-06-13T06:46:21
| 2018-06-13T06:46:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
dataset_generation.R
|
## ----setup, include=FALSE------------------------------------------------
library(knitr)
## ----message=FALSE, warning=FALSE----------------------------------------
library(seqtime)
library(ggplot2)
library(reshape2)
## ------------------------------------------------------------------------
N = 50
S = 40
A = generateA(N, "klemm", pep=10, c =0.05)
plotA(A, header="Klemm-Eguiluz interaction matrix")
## ------------------------------------------------------------------------
dataset = generateDataSet(S, A)
dataset = seqtime::normalize(dataset)
dataset = melt(dataset)
colnames(dataset) = c("Species", "Sample", "Abundance")
ggplot(data=dataset, aes(x=dataset$Sample, y=dataset$Abundance, width=1)) + geom_bar(aes(y = dataset$Abundance, x= dataset$Sample, fill=dataset$Species), data=dataset, stat="identity", show.legend=F) + theme(aspect.ratio=.4) + theme_classic()+ ylab("Relative abundance") + xlab("Sample")
## ------------------------------------------------------------------------
env = envGrowthChanges(N, strength=0.8)
dataset = generateDataSet(S, A, env.matrix=env, perturb.count=c(20,20))
dataset = seqtime::normalize(dataset)
dataset = melt(dataset)
colnames(dataset) = c("Species", "Sample", "Abundance")
ggplot(data=dataset, aes(x=dataset$Sample, y=dataset$Abundance, width=1)) + geom_bar(aes(y = dataset$Abundance, x= dataset$Sample, fill=dataset$Species), data=dataset, stat="identity", show.legend=F) + theme(aspect.ratio=.4) + theme_classic()+ ylab("Relative abundance") + xlab("Sample")
|
f54487563f13cd9e7a3bce5e79be4e19a866d7d9
|
dac60727c63329ea417e4a943f52f040b1010ac1
|
/RCheckCode/checkFields.R
|
fc8244cad1cabe1db1502f61c4496e1e5bf21260
|
[] |
no_license
|
duncantl/SOXMLDB
|
fd8ca872f062e0d047b4e0154a6cd4504d53e8b6
|
913c04514fb9bf53ffb9f024a04b698735803f29
|
refs/heads/master
| 2023-05-14T06:39:56.130966
| 2023-05-08T14:16:12
| 2023-05-08T14:16:12
| 172,417,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
checkFields.R
|
library(DBI)
library(RSQLite)
checkDBFields =
function(db, tables = dbListTables(db))
{
structure(lapply(tables, checkTableFields, db), names = tables)
}
checkTableFields =
function(tblName, db)
{
fvars = paste0(tblName, ".vars")
if(!file.exists(fvars))
return(NA)
vars = readLines(fvars)
dbvars = dbListFields(db, tblName)
if(length(vars) != length(dbvars))
return(list(vars = vars, dbvars = dbvars))
if(!all(vars == dbvars))
return(cbind(vars, dbvars))
return(TRUE)
}
|
141540da31edc8219628901ec8710f09f948271a
|
4e759f241817ae276d80124e3cf8a7984c587c6d
|
/stockdata_timer.R
|
65d401bd2ce12f63e601ba9198318192981d1e33
|
[] |
no_license
|
jimmyhenriksson/Public-Sentiment-and-Stock-Performance
|
f5da4b98110c62274adcd8023d52fd538281e3f0
|
9982cb87a8fd4639e34ced49286d1ff7b70edb5b
|
refs/heads/master
| 2023-04-11T10:11:30.011446
| 2021-04-28T20:56:47
| 2021-04-28T20:56:47
| 189,740,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,736
|
r
|
stockdata_timer.R
|
# A script that collects and stores tweets for all
# Dow Jones stocks (and more) by using Rtweet.
# Takes the time and amount constraints into account.
install.packages("rtweet")
install.packages("tidytext")
install.packages("data.table")
install.packages("ggplot2")
install.packages("ROAuth")
install.packages("NLP")
install.packages("ggplot2")
install.packages("syuzhet")
install.packages("tm")
library("ggplot2")
library("NLP")
library("syuzhet")
library("tm")
library("SnowballC")
library("stringi")
library("topicmodels")
library("syuzhet")
library("ROAuth")
library(rtweet)
library(tidytext)
library(data.table)
library(ggplot2)
############################################################
aapl_tweets <- search_tweets(
"aapl", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(aapl_tweets, file = "aapl_tweets_new_190416.rds")
print("Done with AAPL! 1/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
aapl2_tweets <- search_tweets(
"$aapl", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(aapl2_tweets, file = "aapl2_tweets_new_190416.rds")
print("Done with $AAPL! 2/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
apple_tweets <- search_tweets(
"apple", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(apple_tweets, file = "apple_tweets_new_190416.rds")
print("Done with Apple! 3/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
tesla_tweets <- search_tweets(
"tesla", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(tesla_tweets, file = "tesla_tweets_new_190416.rds")
print("Done with Tesla! 4/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
tsla2_tweets <- search_tweets(
"$tsla", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(tsla2_tweets, file = "tsla2_tweets_new_190416.rds")
print("Done with TSLA2! 5/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
tsla_tweets <- search_tweets(
"$tsla", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(tsla_tweets, file = "tsla_tweets_new_190416.rds")
print("Done with TSLA! 6/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
netflix_tweets <- search_tweets(
"netflix", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(netflix_tweets, file = "netflix_tweets_new_190416.rds")
print("Done with Netflix! 7/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
nflx_tweets <- search_tweets(
"nflx", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(nflx_tweets, file = "nflx_tweets_new_190416.rds")
print("Done with NFLX! 8/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
nflx2_tweets <- search_tweets(
"$nflx", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(nflx2_tweets, file = "nflx2_tweets_new_190416.rds")
print("Done with NFLX2! 9/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
ford_tweets <- search_tweets(
"ford", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(ford_tweets, file = "ford_tweets_new_190416.rds")
print("Done with Ford! 10/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
f_tweets <- search_tweets(
"$f", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(f_tweets, file = "f_tweets_new_190416.rds")
print("Done with $F! 11/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
verizon_tweets <- search_tweets(
"verizon", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(verizon_tweets, file = "verizon_tweets_new_190416.rds")
print("Done with Verizon! 12/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
gap_tweets <- search_tweets(
"GAP", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(gap_tweets, file = "gap_tweets_new_190416.rds")
print("Done with GAP! 13/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
gap2_tweets <- search_tweets(
"$GAP", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(gap2_tweets, file = "gap2_tweets_new_190416.rds")
print("Done with GAP2! 14/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
astrazeneca_tweets <- search_tweets(
"astrazeneca", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(astrazeneca_tweets, file = "astrazeneca_tweets_new_190416.rds")
print("Done with AstraZeneca! 15/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
astrazenecaticker_tweets <- search_tweets(
"$AZN", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(astrazenecaticker_tweets, file = "astrazenecaticker_tweets_new_190416.rds")
print("Done with AZN! 16/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
facebook_tweets <- search_tweets(
"facebook", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(facebook_tweets, file = "facebook_tweets_new_190416.rds")
print("Done with Facebook! 17/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
fb_tweets <- search_tweets(
"$fb", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(fb_tweets, file = "fb_tweets_new_190416.rds")
print("Done with fb! 18/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
starbucks_tweets <- search_tweets(
"starbucks", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(starbucks_tweets, file = "starbucks_tweets_new_190416.rds")
print("Done with Starbucks! 19/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
starbucksticker_tweets <- search_tweets(
"$SBUX", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(starbucksticker_tweets, file = "starbucksticker_tweets_new_190416.rds")
print("Done with #SBUX! 20/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
cisco_tweets <- search_tweets(
"cisco", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(cisco_tweets, file = "cisco_tweets_new_190416.rds")
print("Done with Cisco! 21/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
csco_tweets <- search_tweets(
"$csco", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(csco_tweets, file = "csco_tweets_new_190416.rds")
print("Done with Cisco! 22/87 completed.")
############################################################
print("Finished.")
############################################################
############################################################
############################################################
######################## DOW JONES #########################
############################################################
############################################################
############################################################
alcoa_tweets <- search_tweets(
"alcoa", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(alcoa_tweets, file = "alcoa_tweets_new_190416.rds")
print("Done with Alcoa! 23/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
aa_tweets <- search_tweets(
"$aa", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(aa_tweets, file = "aa_tweets_new_190416.rds")
print("Done with $AA! 24/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
altria_tweets <- search_tweets(
"altria", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(altria_tweets, file = "altria_tweets_new_190416.rds")
print("Done with Altria! 25/87 completed")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
mo_tweets <- search_tweets(
"$mo", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(mo_tweets, file = "mo_tweets_new_190416.rds")
print("Done with $MO! 26/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
aig_tweets <- search_tweets(
"$aig", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(aig_tweets, file = "aig_tweets_new_190416.rds")
print("Done with $AIG! 27/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
aig2_tweets <- search_tweets(
"aig", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(aig2_tweets, file = "aig2_tweets_new_190416.rds")
print("Done with AIG! 28/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
americanexpress_tweets <- search_tweets(
"americanexpress", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(americanexpress_tweets, file = "americanexpress_tweets_new_190416.rds")
print("Done with americanexpress! 29/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
amex_tweets <- search_tweets(
"amex", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(x_tweets, file = "amex_tweets_new_190416.rds")
print("Done with Amex! 30/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
axp_tweets <- search_tweets(
"$AXP", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(axp_tweets, file = "axp_tweets_new_190416.rds")
print("Done with $AXP! 31/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
boeing_tweets <- search_tweets(
"boeing", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(boeing_tweets, file = "boeing_tweets_new_190416.rds")
print("Done with Boeing! 32/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
ba_tweets <- search_tweets(
"$ba", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(ba_tweets, file = "ba_tweets_new_190416.rds")
print("Done with $BA! 33/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
caterpillar_tweets <- search_tweets(
"caterpillar", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(caterpillar_tweets, file = "caterpillar_tweets_new_190416.rds")
print("Done with Caterpillar! 34/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
cat_tweets <- search_tweets(
"$cat", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(cat_tweets, file = "cat_tweets_new_190416.rds")
print("Done with $CAT! 35/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
citigroup_tweets <- search_tweets(
"citigroup", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(citigroup_tweets, file = "citigroup_tweets_new_190416.rds")
print("Done with Citigroup! 36/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
c_tweets <- search_tweets(
"$c", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(c_tweets, file = "c_tweets_new_190416.rds")
print("Done with $C! 37/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
citi_tweets <- search_tweets(
"citi", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(citi_tweets, file = "citi_tweets_new_190416.rds")
print("Done with Citi! 38/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
cocacola_tweets <- search_tweets(
"cocacola", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(cocacola_tweets, file = "cocacola_tweets_new_190416.rds")
print("Done with Cocacola! 39/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
ko_tweets <- search_tweets(
"$ko", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(ko_tweets, file = "ko_tweets_new_190416.rds")
print("Done with $KO! 40/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
coca_cola_tweets <- search_tweets(
"coca-cola", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(coca_cola_tweets, file = "coca_cola_tweets_new_190416.rds")
print("Done with Coca-Cola! 41/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
dupont_tweets <- search_tweets(
"dupont", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(dupont_tweets, file = "dupont_tweets_new_190416.rds")
print("Done with Dupont! 42/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
dd_tweets <- search_tweets(
"$dd", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(dd_tweets, file = "dd_tweets_new_190416.rds")
print("Done with $DD! 43/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
exxon_tweets <- search_tweets(
"exxon", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(exxon_tweets, file = "exxon_tweets_new_190416.rds")
print("Done with Exxon! 44/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
xom_tweets <- search_tweets(
"$xom", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(xom_tweets, file = "xom_tweets_new_190416.rds")
print("Done with $XOM! 45/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
general_electric_tweets <- search_tweets(
"general,electric", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(general_electric_tweets, file = "general_electric_tweets_new_190416.rds")
print("Done with General Electric! 46/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
generalelectric_tweets <- search_tweets(
"generalelectric", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(generalelectric_tweets, file = "generalelectric_tweets_new_190416.rds")
print("Done with Generalelectric! 47/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
ge_tweets <- search_tweets(
"$GE", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(ge_tweets, file = "ge_tweets_new_190416.rds")
print("Done with $GE! 48/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
gm_tweets <- search_tweets(
"$GM", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(gm_tweets, file = "gm_tweets_new_190416.rds")
print("Done with $GM! 49/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
general_motors_tweets <- search_tweets(
"general,motors", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(general_motors_tweets, file = "general_motors_tweets_new_190416.rds")
print("Done with general_motors! 50/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
generalmotors_tweets <- search_tweets(
"generalmotors", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(generalmotors_tweets, file = "generalmotors_tweets_new_190416.rds")
print("Done with generalmotors! 51/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
hpq_tweets <- search_tweets(
"$HPQ", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(hpq_tweets, file = "hpq_tweets_new_190416.rds")
print("Done with #hpq! 52/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
hewlett_Packard_tweets <- search_tweets(
"hewlett-packard", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(hewlett_Packard_tweets, file = "hewlett_Packard_tweets_new_190416.rds")
print("Done with Hewlett-Packard! 53/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
hewlettpackard_tweets <- search_tweets(
"hewlett,packard", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(hewlettpackard_tweets, file = "hewlettpackard_tweets_new_190416.rds")
print("Done with hewlettpackard! 54/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
homedepot_tweets <- search_tweets(
"homedepot", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(homedepot_tweets, file = "homedepot_tweets_new_190416.rds")
print("Done with Homedepot! 55/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
home_depot_tweets <- search_tweets(
"home,depot", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(home_depot_tweets, file = "home_depot_tweets_new_190416.rds")
print("Done with Home,depot! 56/87 completed")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
hd_tweets <- search_tweets(
"$HD", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(hd_tweets, file = "hd_tweets_new_190416.rds")
print("Done with $HD! 57/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
honeywell_tweets <- search_tweets(
"honeywell", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(honeywell_tweets, file = "honeywell_tweets_new_190416.rds")
print("Done with Honeywell! 58/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
hon_tweets <- search_tweets(
"$HON", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(hon_tweets, file = "hon_tweets_new_190416.rds")
print("Done with $HON! 59/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
intc_tweets <- search_tweets(
"$intc", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(intc_tweets, file = "intc_tweets_new_190416.rds")
print("Done with $INTC! 60/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
intel_tweets <- search_tweets(
"intel", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(intel_tweets, file = "intel_tweets_new_190416.rds")
print("Done with Intel! 61/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
ibm_tweets <- search_tweets(
"$ibm", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(ibm_tweets, file = "ibm_tweets_new_190416.rds")
print("Done with $IBM! 62/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
jpm_tweets <- search_tweets(
"$jpm", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(jpm_tweets, file = "jpm_tweets_new_190416.rds")
print("Done with $JPM! 63/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
jpmorgan_tweets <- search_tweets(
"jpmorgan", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(jpmorgan_tweets, file = "jpmorgan_tweets_new_190416.rds")
print("Done with JPMorgan! 64/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
jnj_tweets <- search_tweets(
"$jnj", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(jnj_tweets, file = "jnj_tweets_new_190416.rds")
print("Done with $JNJ! 65/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
johnsonandjohnson_tweets <- search_tweets(
"johnsonandjohnson", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(johnsonandjohnson_tweets, file = "johnsonandjohnson_tweets_new_190416.rds")
print("Done with JohnsonandJohnson! 66/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
johnsonandjohnson2_tweets <- search_tweets(
"johnson,&,johnson", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(johnsonandjohnson2_tweets, file = "johnsonandjohnson2_tweets_new_190416.rds")
print("Done with johnsonandjohnson2! 67/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
mcd_tweets <- search_tweets(
"$mcd", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(mcd_tweets, file = "mcd_tweets_new_190416.rds")
print("Done with mcd! 68/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
mcdonalds_tweets <- search_tweets(
"mcdonalds", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(mcdonalds_tweets, file = "mcdonalds_tweets_new_190416.rds")
print("Done with McDonalds! 69/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
mrk_tweets <- search_tweets(
"$mrk", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(mrk_tweets, file = "mrk_tweets_new_190416.rds")
print("Done with $MRK! 70/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
merck_tweets <- search_tweets(
"merck,&,co", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(merck_tweets, file = "merck_tweets_new_190416.rds")
print("Done with Merck! 71/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
msft_tweets <- search_tweets(
"$msft", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(msft_tweets, file = "msft_tweets_new_190416.rds")
print("Done with $MSFT! 72/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
microsoft_tweets <- search_tweets(
"microsoft", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(microsoft_tweets, file = "microsoft_tweets_new_190416.rds")
print("Done with Microsoft! 73/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
pfizer_tweets <- search_tweets(
"pfizer", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(pfizer_tweets, file = "pfizer_tweets_new_190416.rds")
print("Done with Pfizer! 74/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
pfe_tweets <- search_tweets(
"$pfe", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(pfe_tweets, file = "pfe_tweets_new_190416.rds")
print("Done with $PFE! 75/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
procter_and_Gamble_tweets <- search_tweets(
"Procter,&,Gamble", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(procter_and_Gamble_tweets, file = "Procter_and_Gamble_tweets_new_190416.rds")
print("Done with Procter & Gamble! 76/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
procter_Gamble_tweets <- search_tweets(
"Procter,Gamble", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(procter_Gamble_tweets, file = "procter_Gamble_tweets_new_190416.rds")
print("Done with Procter Gamble! 77/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
pg_tweets <- search_tweets(
"$pg", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(pg_tweets, file = "pg_tweets_new_190416.rds")
print("Done with $PG! 78/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
united_technologies_tweets <- search_tweets(
"united,technologies", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(united_technologies_tweets, file = "united_technologies_tweets_new_190416.rds")
print("Done with United_Technologies! 79/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
unitedtechnologies_tweets <- search_tweets(
"unitedtechnologies", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(unitedtechnologies_tweets, file = "unitedtechnologies_tweets_new_190416.rds")
print("Done with unitedtechnologies! 80/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
utx_tweets <- search_tweets(
"$utx", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(utx_tweets, file = "utx_tweets_new_190416.rds")
print("Done with $UTX! 81/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
vz_tweets <- search_tweets(
"$VZ", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(vz_tweets, file = "vz_tweets_new_190416.rds")
print("Done with $VZ! 82/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
walmart_tweets <- search_tweets(
"walmart", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(walmart_tweets, file = "walmart_tweets_new_190416.rds")
print("Done with Walmart! 83/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
wmt_tweets <- search_tweets(
"$wmt", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(wmt_tweets, file = "wmt_tweets_new_190416.rds")
print("Done with $WMT! 84/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
dis_tweets <- search_tweets(
"$dis", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(dis_tweets, file = "dis_tweets_new_190416.rds")
print("Done with $DIS! 85/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
disney_tweets <- search_tweets(
"disney", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(disney_tweets, file = "disney_tweets_new_190416.rds")
print("Done with Disney! 86/87 completed.")
start_time = Sys.time()
Sys.sleep(250)
end_time = Sys.time()
end_time - start_time
############################################################
waltdisneycompany_tweets <- search_tweets(
"walt,disney,company", n = 4500, include_rts = FALSE,
since = "2019-04-16",
until = "2019-04-17"
)
saveRDS(waltdisneycompany_tweets, file = "waltdisneycompany_tweets_new_190416.rds")
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
### Done with Walt Disney Company! 87/87 completed. ###
#######################################################
#######################################################
############ Dow Jones Index finished. ################
#######################################################
#######################################################
#######################################################
|
8e9eba4c5a8d203ad215e2bc60e2e459fa6f4c80
|
361954cc1036c8e77f6410e5c63955260375f071
|
/man/auto_layout.Rd
|
082ddb4fb58e5f604624b095ffdd2d204259bd1a
|
[] |
no_license
|
HemingNM/ENMwizard
|
a4d8f883560e0a5d34c12507489d51e057640f30
|
b8f30a1e7c255ce43c2f45541e418f06879dbc74
|
refs/heads/master
| 2023-06-21T20:25:35.622227
| 2023-06-12T12:36:24
| 2023-06-12T12:36:24
| 104,896,526
| 17
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,430
|
rd
|
auto_layout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/7.f.plot.mdl.sel.crit.diff.R
\name{auto_layout}
\alias{auto_layout}
\title{Automatically select the layout.}
\usage{
auto_layout(n, layout = T)
}
\arguments{
\item{n}{the number of plots}
\item{layout}{should the function return a preallocated layout object? If \code{FALSE}, it returns a matrix}
}
\value{
either a matrix or a layout object
}
\description{
Determine the arrangement of multiple plots in a single panel
Given a particular number of plots, \code{auto_layout} will automatically determine the arrangement of each
plot using the \code{layout} function or par(mfrow=c(nrow, ncol)). See examples.
modified from: https://github.com/cran/fifer/blob/master/R/auto.layout.R
}
\examples{
\dontrun{
## plot six plots
auto_layout(6)
for (i in 1:6){
plot(rnorm(100), rnorm(100))
}
## same as mar(mfrow=c(3,2))
par(mfrow=c(3,2))
for (i in 1:6){
plot(rnorm(100), rnorm(100))
}
## default for odd number of plots using mfrow looks terrible
par(mfrow=c(3,2))
for (i in 1:5){
plot(rnorm(100), rnorm(100))
}
## much better with auto_layout
auto_layout(5)
for (i in 1:5){
plot(rnorm(100), rnorm(100))
}
## see matrices of layouts for multiple plots
##
for(i in 2:6){
m <- auto_layout(i, layout=F)
par(mfrow=c(nrow(m), ncol(m)))
for (j in 1:i){
plot(rnorm(100), rnorm(100))
}
Sys.sleep(1)
}
}
}
\author{
Dustin Fife
}
\keyword{internal}
|
4a9eeb0caf5be5f8b19aaeff68e10d78e0fccd9d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bmrm/examples/hclust.fca.Rd.R
|
3a23da0f0b6f4848f0c3c3aa31e6679e284430f1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 371
|
r
|
hclust.fca.Rd.R
|
library(bmrm)
### Name: hclust.fca
### Title: Find first common ancestor of 2 nodes in an hclust object
### Aliases: hclust.fca
### ** Examples
hc <- hclust(dist(USArrests), "complete")
plot(hc)
A <- outer(seq_along(hc$order),seq_along(hc$order),hclust.fca,hc=hc)
H <- array(hc$height[A],dim(A))
image(H[hc$order,hc$order])
image(A[hc$order,hc$order])
|
784750e637740b68172512d6137f54227edecbb3
|
defc646bb56990743469ae0bffa9cc99252c4b8f
|
/man/chooseColors.Rd
|
962a5fd949a8a8d91927c0f9e2555bca767b02da
|
[] |
no_license
|
feiyoung/PRECAST
|
b1abe112a9d9df173f1e7e9259e92c19a33ea05b
|
b866f4b940e5815b2f85e8a8f8b2d0824809db39
|
refs/heads/main
| 2023-06-08T01:01:41.235579
| 2023-06-05T02:10:15
| 2023-06-05T02:10:15
| 500,674,213
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 769
|
rd
|
chooseColors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{chooseColors}
\alias{chooseColors}
\title{Choose color schema from a palette}
\usage{
chooseColors(
palettes_name = c("Nature 10", "Light 13", "Classic 20", "Blink 23", "Hue n"),
n_colors = 7,
alpha = 1,
plot_colors = FALSE
)
}
\arguments{
\item{palettes_name}{a string, the palette name, one of "Nature 10", "Light 13", "Classic 20", "Blink 23" and "Hue n", default as 'Nature 10'.}
\item{n_colors}{a positive integer, the number of colors.}
\item{alpha}{a positive real, the transparency of the color.}
\item{plot_colors}{a logical value, whether plot the selected colors.}
}
\description{
Choose color schema from a palette
}
\examples{
chooseColors()
}
|
228a493b50b668729b051e1ab88c735d11bac042
|
37750963164b3f1cc706e14dda03a6e28e2d7cf3
|
/eggAnalysis.R
|
8954a8b2d46e081c0f85c7ac23395de95ba8e9f6
|
[] |
no_license
|
zhuzhi/r_idea
|
b4ac53517c6f6f1a835def89b0a6ba9b491fc2e9
|
9aed42f5277101af34c754648823a609b5280c06
|
refs/heads/master
| 2020-04-15T12:47:50.024633
| 2016-12-18T15:09:52
| 2016-12-18T15:09:52
| 64,978,940
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
eggAnalysis.R
|
install.packages("eegAnalysis")
library("eegAnalysis")
sim$data
sim$classes.Id
sim$rec.Id
sim$n.signals
|
1c3351b5a393fa4abe709d4802e6bd43e9d391d3
|
5db2dac679963587ac50ad850ea3a2ccb508465a
|
/data-raw/lotr_study.R
|
aa094544e799013ca8798b095d1d6d1a70e36a8b
|
[
"MIT"
] |
permissive
|
softloud/simeta
|
be88fe336eeee9610086823adce839493781c0ef
|
2a7e979077c57812a7d29c3e23e8c00080e1cb03
|
refs/heads/master
| 2023-04-16T23:27:16.936986
| 2023-03-25T11:49:23
| 2023-03-25T11:49:23
| 200,359,586
| 2
| 2
|
NOASSERTION
| 2020-01-28T09:55:16
| 2019-08-03T09:56:12
|
HTML
|
UTF-8
|
R
| false
| false
| 461
|
r
|
lotr_study.R
|
# https://www.kaggle.com/paultimothymooney/lord-of-the-rings-data/data
library(tidyverse)
lotr_names <-
read_csv("data-raw/lotr_characters.csv") %>%
pluck("name")
lotr_study <-
tibble(
study = lotr_names,
year = seq(1950, 2020) %>% sample(size = length(lotr_names), replace = TRUE)
) %>%
dplyr::filter(str_length(study) < 8) %>%
mutate(study_year = paste(study, year, sep = "_")) %>%
pluck("study_year")
usethis::use_data(lotr_study)
|
89876322f32d7adb6ba7eabe31547dab1920901a
|
92a3de2eaa4a99aa389221059afd037d98a36f7e
|
/covid_interactome/covid_interactome/server.R
|
dde0ba92952315d8988c0a41e76c63594010769d
|
[] |
no_license
|
ictr/shiny_docker
|
8f0f3e5b352c822f196968d1c002478811d35ec2
|
15cdf325472bbda77836c3f3544d5791af0684fb
|
refs/heads/master
| 2022-04-17T13:29:33.648458
| 2020-04-16T07:45:24
| 2020-04-16T07:45:24
| 256,038,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 590
|
r
|
server.R
|
library(shiny)
library(igraph)
load('/srv/shiny-server/covid_interactome/data/data.RData')
# We tweak the "am" field to have nicer factor labels. Since this doesn't
# rely on any user inputs we can do this once at startup and then use the
# value throughout the lifetime of the application
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
output$virus<-renderText({input$virus})
#plot(gr,vertex.frame.color=NA,edge.color="black")
output$aPlot <- renderPlot({
#x<-virusToPlot()
plot(subs[[input$virus]])
})
})
|
46f7861fb072ce4c71a34326602e343e32f0087b
|
ed633d145dfa8b32511b3cb13ba95b822e2559c8
|
/man/comp.cyl.Rd
|
fc9b36f5237c0ffba9a0d1a52a0f0f205d991793
|
[] |
no_license
|
wendellopes/rvswf
|
51a09f034e330fbb7fd58816c3de2b7f7fdba9dc
|
ee243c3e57c711c3259a76051a88cc670dfe9c4b
|
refs/heads/master
| 2020-05-19T19:38:18.987560
| 2016-09-11T22:57:37
| 2016-09-11T22:57:37
| 19,242,694
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
rd
|
comp.cyl.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/comp.cyl.r
\name{comp.cyl}
\alias{comp.cyl}
\title{Compare results for Cylindrical Bessel Functions.}
\usage{
comp.cyl(nmax, x)
}
\arguments{
\item{x}{The argument of \eqn{J_n(x)}.}
\item{n}{The order of the Cylindrical Bessel function.}
}
\value{
Table comparing built-in \code{R} functions, \code{gsl} and \code{rvswf}.
}
\description{
Compare results for Cylindrical Bessel Functions.
}
\details{
Compare results using vswf, built in \code{R} and \code{gsl} algorithms.
}
\examples{
x<-5
nmax<-10
print(comp.cyl(5,3))
}
|
b1abd7fa144abb3bb7618c3a495b53b98d0c2776
|
40c8a75763ed26621ad8c61436c07bc1f4f93da8
|
/man/minBBoxSpatialPolygons.Rd
|
3dec86049228277fcad442d463b85dab5a59971a
|
[] |
no_license
|
RobinKohrs/runout.opt
|
af7fe30b48359bcdb67a7d4d5dee0b04ac74315b
|
4758ea2a23cf085847b3e55ee6ae4a83c0d7171a
|
refs/heads/main
| 2022-12-30T16:33:44.583191
| 2020-10-22T16:47:08
| 2020-10-22T16:47:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 469
|
rd
|
minBBoxSpatialPolygons.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runout_geometry.R
\name{minBBoxSpatialPolygons}
\alias{minBBoxSpatialPolygons}
\title{Minimum area bounding box for spatial polgyons}
\usage{
minBBoxSpatialPolygons(spPolygons)
}
\arguments{
\item{x}{A SpatialPolygonsDataFrame}
}
\value{
A SpatialPolygonsDataFrame with corresponding bounding boxes
}
\description{
Determines min. area bounding box for a single or set of spatial polygons
}
|
9f9918a18ae982e24e1a92f975474a6788a2a8a4
|
c39217c8139268442ce0711a2e538ab4fc724cb9
|
/man/outliers_mahalanobis.Rd
|
e5b2ea0361893a1d096b3d0b0ca0db1c09b71a71
|
[
"MIT"
] |
permissive
|
mdelacre/Routliers
|
008638b50de30732fd2ae6d9fa639e09d658665b
|
e4dac8814ea53db87083ef59d468c05dc6cedeea
|
refs/heads/master
| 2021-06-05T07:18:01.907962
| 2020-10-09T10:12:18
| 2020-10-09T10:12:18
| 145,856,180
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,162
|
rd
|
outliers_mahalanobis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outliers_mahalanobis.R
\name{outliers_mahalanobis}
\alias{outliers_mahalanobis}
\title{mahalanobis function to detect outliers}
\usage{
outliers_mahalanobis(x, alpha, na.rm)
}
\arguments{
\item{x}{matrix of bivariate values from which we want to compute outliers}
\item{alpha}{nominal type I error probability (by default .01)}
\item{na.rm}{set whether Missing Values should be excluded (na.rm = TRUE) or not (na.rm = FALSE) - defaults to TRUE}
}
\value{
Returns Call, Max distance, number of outliers
}
\description{
Detecting multivariate outliers using the Mahalanobis distance
}
\examples{
#### Run outliers_mahalanobis
data(Attacks)
SOC <- rowMeans(Attacks[,c("soc1r","soc2r","soc3r","soc4","soc5","soc6","soc7r",
"soc8","soc9","soc10r","soc11","soc12","soc13")])
HSC <- rowMeans(Attacks[,22:46])
res <- outliers_mahalanobis(x = cbind(SOC,HSC), na.rm = TRUE)
# A list of elements can be extracted from the function,
# such as the position of outliers in the dataset
# and the coordinates of outliers
res$outliers_pos
res$outliers_val
}
\keyword{mahalanobis}
\keyword{outliers}
|
c9c9c8f1ef1a0c327f71030533af69d208153462
|
0add2348eeb978bbe1b910d391627ec37b39b4f8
|
/R/utilities.R
|
5f5af2e8324e951a104fb96b730328e05830f47a
|
[] |
no_license
|
mayerantoine/Rdhis2tracker
|
7df0ada90e2c481e26595cfd05a4de7538726819
|
7c788a8070a3fa403967611892217d0da7c47394
|
refs/heads/master
| 2020-03-19T02:38:06.377666
| 2018-10-25T00:12:01
| 2018-10-25T00:12:01
| 135,646,146
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
utilities.R
|
#' @title Execute a Call to the DHIS2 Web Api
query <- function(url_params) {
if (grepl(x = url_params, pattern = "http"))
{
url <- url_params
}
else {
url <- paste0(getOption("baseurl"), url_params)
}
#should we use try , timeout other http error
result <- GET(url)
stop_for_status(result)
if (http_type(result) != "application/json") {
stop("API did not return json", call. = FALSE)
}
return(content(result, type = "application/json"))
}
#' @title login into the DHIS2 instances
#' @export
loadSecret <- function(secrets=NA) {
#Load from a file
#
#secrets <- read_json("secret2.json")
if (!is.na(secrets)) {
s <- read_json(secrets)
} else {
s <- list(dhis = list())
s$dhis$username <- readline("Username: ")
s$dhis$password <- getPass::getPass()
s$dhis$baseurl <- readline("Server URL (ends with /): ")
}
options("baseurl" = s$dhis$baseurl)
options("secrets" = secrets)
url <- URLencode(URL = paste0(getOption("baseurl"), "api/me"))
#Logging in here will give us a cookie to reuse
# we need to handle connection error from the server side
r <- GET(url , authenticate(s$dhis$username, s$dhis$password),
timeout(60))
r <- content(r, mime = "application/json")
#me <- fromJSON(r)
options("organisationUnit" = r$organisationUnits)
#Set the cache time out in days
options("maxCacheAge" = 7)
}
|
326dcca8c2aaabae67d00ba73f1eefbad7c4a7a5
|
78ca13976f1211a92fc9239eb5d060e8362520f9
|
/CAP.r
|
9b5b56a46127f50840c3a8fb6f23cce070f9b411
|
[] |
no_license
|
seandrummond/ml-football-betting
|
4b8cfc66dcf940d1fed77cc62471f3b2764566e6
|
b9e93f1b799d8fe0649a99a361095baa28f344c9
|
refs/heads/master
| 2020-03-31T00:40:19.141903
| 2018-10-08T20:50:57
| 2018-10-08T20:50:57
| 151,737,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,838
|
r
|
CAP.r
|
####################
# CAP LUT Generation
#
# Description:
# This file takes in the prepared data and built models and calculates the accuracy of each
# model when they model is above a certain level of confidence.
#
####################
###### Import Libraries #####
library(randomForest)
library(caret)
library(readr)
library("e1071")
##### Define Functions ######
main_football_file_reader <- function(x, path){
y <- sub("data", "", x)
z <- read_csv(paste(path, "/", y, ".csv", sep = ""), col_types =
cols(
FTHG = col_double(),
FTHG = col_double(),
HTHG = col_double(),
HTAG = col_double(),
HS = col_double(),
AS = col_double(),
HST = col_double(),
AST = col_double(),
HC = col_double(),
AC = col_double(),
HF = col_double(),
AF = col_double(),
HY= col_double(),
AY= col_double(),
HR= col_double(),
AR= col_double()
)
)
return(z)
}
bet_split <- function(full_data){
#Function to split data set into different bets.
#Abbreviations:
#OU_X = Over/Under X Goals market
#BTS = Both Teams to Score
#FH/SH/FT = First Half/Second Half/Full Time
#FTR/HTR = Full Time Result/Half Time Result
#Inputs:
#full_data = DataFrame
#Outputs:
#data_list = List of DataFrames
#Derived Under/Over Columns
full_data$OU_05 <- ifelse(full_data$FTHG + full_data$FTAG > 0.5, 1, 0)
full_data$OU_15 <- ifelse(full_data$FTHG + full_data$FTAG > 1.5, 1, 0)
full_data$OU_25 <- ifelse(full_data$FTHG + full_data$FTAG > 2.5, 1, 0)
full_data$OU_35 <- ifelse(full_data$FTHG + full_data$FTAG > 3.5, 1, 0)
full_data$OU_45 <- ifelse(full_data$FTHG + full_data$FTAG > 4.5, 1, 0)
full_data$OU_55 <- ifelse(full_data$FTHG + full_data$FTAG > 5.5, 1, 0)
#BTS Columns
full_data$BTS_FT <- ifelse(full_data$FTHG & full_data$FTAG > 0, 1, 0)
full_data$BTS_FH <- ifelse(full_data$HTHG & full_data$HTAG > 0, 1, 0)
full_data$BTS_SH <- ifelse((full_data$FTHG - full_data$HTHG) & (full_data$FTAG - full_data$HTAG) > 0, 1, 0)
#Included Results
FTR_data <- full_data[,c(7, 25:117, 120:129)]
HTR_data <- full_data[,c(10, 25:117,120:129)]
OU_05_data <- full_data[c(130, 25:117,120:129)]
OU_15_data <- full_data[c(131, 25:117,120:129)]
OU_25_data <- full_data[c(132, 25:117,120:129)]
OU_35_data <- full_data[c(133, 25:117,120:129)]
OU_45_data <- full_data[c(134, 25:117,120:129)]
OU_55_data <- full_data[c(135, 25:117,120:129)]
BTS_FT_data <- full_data[c(136, 25:117,120:129)]
BTS_FH_data <- full_data[c(137, 25:117,120:129)]
BTS_SH_data <- full_data[c(138, 25:117,120:129)]
list_names <- c("FTR", "HTR", "OU_05", "OU_15", "OU_25", "OU_35", "OU_45", "OU_55", "BTS_FT", "BTS_FH", "BTS_SH")
data_list = list(FTR_data, HTR_data, OU_05_data, OU_15_data, OU_25_data, OU_35_data, OU_45_data, OU_55_data, BTS_FT_data, BTS_FH_data, BTS_SH_data)
names(data_list) <- list_names
#Factorisation
for (i in 1:length(data_list)){
data_list[[i]][,1] <- as.factor(pull(data_list[[i]][,1]))
data_list[[i]] <- data.frame(data_list[[i]])
}
return(data_list)
}
division_split <- function(data_set){
#Function which divides DataFrame into a list of DataFrames by League
#Order: E0, E1, E2, E3, SP1, D1, I1, SC0, FRA
#Inputs:
#data_set = DataFrame
#Outputs:
#league_data_list = List of DataFrames
#Subset
E0_data <- subset(data_set, E0 == 1)
E1_data <- subset(data_set, E1 == 1)
E2_data <- subset(data_set, E2 == 1)
E3_data <- subset(data_set, E3 == 1)
SP1_data <- subset(data_set, SP1 == 1)
D1_data <- subset(data_set, D1 == 1)
I1_data <- subset(data_set, I1 == 1)
SC0_data <- subset(data_set, SC0 == 1)
FRA_data <- subset(data_set, FRA == 1)
#Select Rows
E0_data <- E0_data[,c(1:95)]
E1_data <- E1_data[,c(1:95)]
E2_data <- E2_data[,c(1:95)]
E3_data <- E3_data[,c(1:95)]
SP1_data <- SP1_data[,c(1:95)]
D1_data <- D1_data[,c(1:95)]
I1_data <- I1_data[,c(1:95)]
SC0_data <- SC0_data[,c(1:95)]
FRA_data <- FRA_data[,c(1:95)]
league_data_list = list(E0_data, E1_data, E2_data, E3_data, SP1_data, D1_data, I1_data, SC0_data, FRA_data)
league_names <- c("E0", "E1", "E2", "E3", "SP1", "D1", "I1", "SC0", "FRA")
names(league_data_list) <- league_names
return(league_data_list)
}
gen_str <- function(x,y){
# x = nrow(index)
k <- list()
for (i in 1:y){
k[[length(k)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
z <- list()
for (i in 1:x){
z[[length(z)+1]] <- assign(paste("y",i,sep=""),k)
}
return(z)
}
three_way_split <- function(x){
#3 Way Cross Validation Split
n_rows <- nrow(x)
mixed <- sample(n_rows)
third_1 <- mixed[1:round(n_rows/3)]
third_2 <- mixed[(round(n_rows/3)+1):round(2*n_rows/3)]
third_3 <- mixed[(round(2*n_rows/3)+1):n_rows]
train_set_1 <- x[c(third_1, third_2),]
test_set_1 <- x[third_3,]
train_set_2 <- x[c(third_1, third_3),]
test_set_2 <- x[third_2,]
train_set_3 <- x[c(third_2, third_3),]
test_set_3 <- x[third_1,]
list_1 <- list(train_set_1, test_set_1)
list_2 <- list(train_set_2, test_set_2)
list_3 <- list(train_set_3, test_set_3)
z <- list(list_1, list_2, list_3)
return (z)
}
#Create Result Architecture
gen_str_results <- function(x,y,z){
# x = nrow(index)
m <- list()
for (i in 1:z){
m[[length(m)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
k <- list()
for (i in 1:y){
k[[length(k)+1]] <- assign(paste("x",i, sep = ""), m)
}
z <- list()
for (i in 1:x){
z[[length(z)+1]] <- assign(paste("y",i,sep=""),k)
}
return(z)
}
#Create Result Architecture
gen_str_results_2 <- function(x,y,z){
# x = nrow(index)
#Generate Lists with 3 Dataframes corresponding to 3 Way Cross Validation
m <- list()
for (i in 1:z){
m[[length(m)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 3 of m in a list for 3 Option Bets
n <- list()
for (i in 1:3){
n[[length(n)+1]] <- assign(paste("x",i, sep = ""), m)
}
#Generate 2 of m in a list for 2 Option Bets
w <- list()
for (i in 1:2){
w[[length(w)+1]] <- assign(paste("x",i, sep = ""), m)
}
#Generate 20 of m in a list for 20 Option Bets
u <- list()
for (i in 1:20){
u[[length(u)+1]] <- assign(paste("x",i, sep = ""), m)
}
#List Together Results Bets
r <- list(n,n,w,w,w,w,w,w,w,w,w,u,u)
#Generate Leagues
a <- list()
for (i in 1:y){
a[[length(a)+1]] <- assign(paste("y",i,sep=""),r)
}
#Generate Bets
z <- list()
for (i in 1:x){
z[[length(z)+1]] <- assign(paste("y",i,sep=""),a)
}
return(z)
}
gen_str_results_3 <- function(x,y,z){
# x = nrow(index)
#Generate 3 dataframes in a list for 3 Option Bets
n <- list()
for (i in 1:3){
n[[length(n)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 2 dataframes in a list for 2 Option Bets
w <- list()
for (i in 1:2){
w[[length(w)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 20 of m in a list for 20 Option Bets
u <- list()
for (i in 1:20){
u[[length(u)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate Lists with 3 of each type corresponding to 3 Way Cross Validation
m <- list()
for (i in 1:z){
m[[length(m)+1]] <- assign(paste("x",i, sep = ""), n)
}
#Generate Lists with 3 of each type corresponding to 3 Way Cross Validation
b <- list()
for (i in 1:z){
b[[length(b)+1]] <- assign(paste("x",i, sep = ""), w)
}
#Generate Lists with 3 of each type corresponding to 3 Way Cross Validation
d <- list()
for (i in 1:z){
d[[length(d)+1]] <- assign(paste("x",i, sep = ""), u)
}
#Generate 9 of each Type corresponding to each league
e <- list()
for (i in 1:y){
e[[length(e)+1]] <- assign(paste("x",i, sep = ""), m)
}
f <- list()
for (i in 1:y){
f[[length(f)+1]] <- assign(paste("x",i, sep = ""), b)
}
g <- list()
for (i in 1:y){
g[[length(g)+1]] <- assign(paste("x",i, sep = ""), d)
}
r <- list(e,e,f,f,f,f,f,f,f,f,f,g,g)
return(r)
}
gen_str_results_4 <- function(x,y,z){
# x = nrow(index)
#Generate 3 dataframes in a list for 3 Option Bets
n <- list()
for (i in 1:3){
n[[length(n)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 2 dataframes in a list for 2 Option Bets
w <- list()
for (i in 1:2){
w[[length(w)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 20 of m in a list for 20 Option Bets
u <- list()
for (i in 1:20){
u[[length(u)+1]] <- assign(paste("x",i, sep = ""), data.frame())
}
#Generate 9 of each Type corresponding to each league
e <- list()
for (i in 1:y){
e[[length(e)+1]] <- assign(paste("x",i, sep = ""), n)
}
f <- list()
for (i in 1:y){
f[[length(f)+1]] <- assign(paste("x",i, sep = ""), w)
}
g <- list()
for (i in 1:y){
g[[length(g)+1]] <- assign(paste("x",i, sep = ""), u)
}
r <- list(e,e,f,f,f,f,f,f,f,f,f,g,g)
return(r)
}
##### Definitions #####
#Confidence Intervals
three_results_conf <- c(0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9)
two_results_conf <- c(0.533, 0.566, 0.6, 0.633, 0.666, 0.7, 0.733, 0.766, 0.8, 0.833, 0.866, 0.9)
league_names <- c("E0", "E1", "E2", "E3", "SP1", "D1", "I1", "SC0", "FRA")
bet_names <- c("FTR", "HTR", "OU_05", "OU_15", "OU_25", "OU_35", "OU_45", "OU_55", "BTS_FT", "BTS_FH", "BTS_SH")
confidence_intervals <- data.frame(three_results_conf,three_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf,two_results_conf)
#Results List
result_results <- c("A", "D", "H")
BTS_OU_result <- c(0,1)
result_outcomes <- list(result_results, result_results, BTS_OU_result, BTS_OU_result, BTS_OU_result, BTS_OU_result, BTS_OU_result, BTS_OU_result, BTS_OU_result,BTS_OU_result,BTS_OU_result)
##### Import Data ######
#Set Working Directory
setwd("C:/Users/Sean Drummond/Data/Clean")
#League Names
test <- c("EL1","EL2","EL3_n","EL4","ESP1","GER1","ITA1","SCT1","FRA1")
#Folder Code
folder_code <- "Main_Data"
#Import Data
data_list <- test %>% lapply(main_football_file_reader, folder_code)
### Rowbind Data and Introduce League Code ###
data_set <- data_list[[1]]
for (i in 2:length(data_list)){
data_set <- rbind(data_set, data_list[[i]])
}
n_t <- names(data_list[[1]])
for (i in 2:length(data_list)){
n_t <- append(n_t, names(data_list[[i]]))
}
data_set$E0 <- ifelse(data_set$Div == "E0", 1,0)
data_set$E1 <- ifelse(data_set$Div == "E1", 1,0)
data_set$E2 <- ifelse(data_set$Div == "E2", 1,0)
data_set$E3 <- ifelse(data_set$Div == "E3", 1,0)
data_set$SP1 <- ifelse(data_set$Div == "SP1", 1,0)
data_set$D1 <- ifelse(data_set$Div == "D1", 1,0)
data_set$I1 <- ifelse(data_set$Div == "I1", 1,0)
data_set$SC0 <- ifelse(data_set$Div == "SC0", 1,0)
data_set$FRA <- ifelse(data_set$Div == "F1", 1, 0)
### Generate Labels for each Bet and Separate Datasets ###
#Bet Split
bs_data_set <- bet_split(data_set)
#Division Split
for (i in 1:length(bs_data_set)){
bs_data_set[[i]] <- division_split(bs_data_set[[i]])
}
### Import Optimum Feature Vectors ###
n_bets <- 11
n_leagues <- 9
chosen_vars <- gen_str(n_bets, n_leagues)
models <- gen_str(n_bets, n_leagues)
for (i in 1:n_bets){
folder_name <- names(bs_data_set)[i]
for (j in 1:n_leagues){
league_name <- names(bs_data_set[[i]])[j]
chosen_vars[[i]][[j]] <- read.csv(paste("Latest_Models\\",folder_name,"\\",league_name,"_vars.csv",sep=""))
}
}
### Subset Data ###
fs_data_set <- gen_str(n_bets, n_leagues)
#Subset Dataset
for (i in 1:n_bets){
for (j in 1:n_leagues){
fs_data_set[[i]][[j]] <- bs_data_set[[i]][[j]][,1]
fs_data_set[[i]][[j]] <- cbind(fs_data_set[[i]][[j]],bs_data_set[[i]][[j]][,as.character(pull(chosen_vars[[i]][[j]]))])
colnames(fs_data_set[[i]][[j]])[1] <- names(bs_data_set[[i]][[j]][1])
}
}
#Set seed
set.seed(10)
##### CAP Generation #####
#Split each Dataset into 3 Train and Test Sets
for (i in 1:n_bets){
for (j in 1:n_leagues){
fs_data_set[[i]][[j]] <- three_way_split(fs_data_set[[i]][[j]])
}
}
#Generate Results Structures
n_cross <- 3
CAP_results <- gen_str_results_3(n_bets, n_leagues, n_cross)
CAP_mean_results <- gen_str_results_4(n_bets, n_leagues, n_cross)
#Predictions
#Each Type of Bet
#n_bets
for (i in 1:n_bets){
#Each League
#n_leagues
folder_name <- bet_names[i]
for (j in 1:2){
league_name <- league_names[j]
#Each Train/Test Set
for (r in 1:n_cross){
#Build model per set
model <- train(fs_data_set[[i]][[j]][[r]][[1]][,-1], fs_data_set[[i]][[j]][[r]][[1]][,1], method = "rf")
#Make predictions on set
prob_prediction <- predict(model, fs_data_set[[i]][[j]][[r]][[2]], type = "prob")
class_prediction <- predict(model, fs_data_set[[i]][[j]][[r]][[2]], type = "raw")
#For each result within the type of Bet
for (k in 1:length(result_outcomes[[i]])){
#For each confidence interval
n_conf <- c()
model_accuracy <- c()
for (m in 1:nrow(confidence_intervals)){
#Index of confidence resets
conf_index <- c()
#For each row in prob_prediction data frame
for (n in 1:nrow(prob_prediction)){
#For each column in prob_prediction data frame
for (p in 1:ncol(prob_prediction)){
#If prob prediction is greater than the confidence interval, record index
if (prob_prediction[n,p] > confidence_intervals[m,i]){
conf_index <- append(conf_index, n)
}
}
}
#Result index where prediction equals type of result
result_index <- which(class_prediction == result_outcomes[[i]][k])
#Unique values of conf_index
conf_index <- unique(conf_index)
#Intersection of Indexes
intersect_index <- intersect(result_index,conf_index)
#Number of values
n_g = length(unique(intersect_index))
n_conf[m] = n_g * 100/length(fs_data_set[[i]][[j]][[r]][[2]][,1] == result_outcomes[[i]][k])
model_accuracy[m] <- mean(fs_data_set[[i]][[j]][[r]][[2]][intersect_index,1] == class_prediction[intersect_index])
if (is.nan(model_accuracy[m])){
model_accuracy[m] <- 0
}
}
CAP_results[[i]][[j]][[r]][[k]] <- data.frame(confidence_intervals[,i], model_accuracy, n_conf)
}
}
CAP_mean_results[[i]][[j]] <- get_mean(CAP_results[[i]][[j]])
#Working Average Insertion
models_folder <- c("Latest_CAP_LUT")
if (j == 1){
dir.create(file.path(models_folder, folder_name))
}
for (k in 1:length(result_outcomes[[i]])){
write.csv(CAP_mean_results[[i]][[j]][[k]], file = paste("Latest_CAP_LUT\\",folder_name,"\\",folder_name,"_",league_name,"_",result_outcomes[[i]][k],"_CAP.csv",sep=""), row.names = FALSE)
}
}
}
|
4ab047e5c6fbef557bb298f58fc121f54a37736c
|
af84dbfbdca0ee1a354924881b6578c37a66efcf
|
/R-Program/Boxplot.R
|
2a8e25c0485d69680638546ae40e67465047fbb4
|
[
"Apache-2.0"
] |
permissive
|
Johnwei386/Warehouse
|
96db3b3b7c258b41688395942f766c2f4299aa56
|
77da078a176930c0107431b7a0ff7b01d6634ba7
|
refs/heads/master
| 2023-07-19T22:12:33.331111
| 2023-04-13T10:15:41
| 2023-04-13T10:15:41
| 95,307,122
| 3
| 0
|
Apache-2.0
| 2023-07-06T21:16:41
| 2017-06-24T15:48:20
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 839
|
r
|
Boxplot.R
|
# R创建箱型图
# boxplot(x, data, notch, varwidth, names, main)
# x是向量或公式,它用来筛选数据帧到不同的值
# data是数据帧。
# notch是逻辑值。 设置为TRUE以绘制凹口。
# varwidth是一个逻辑值。 设置为true以绘制与样本大小成比例的框的宽度。
# names是将打印在每个箱线图下的组标签。
# main用于给图表标题。
#input <- mtcars[,c('mpg', 'cyl')] # 指定列提取内置数据集mtcars
#print(head(input))
# 创建箱型图
png(file = "boxplot.png")
# ~左边的变量是因变量,右边的变量是自变量,代表一个公式的描述
# 这里表示按cyl来筛选mpg的值,相同的cyl值下的mpg项用来做箱型图
boxplot(mpg ~ cyl, data = mtcars, xlab = "Number of Cylinders",
ylab = "Miles Per Gallon", main = "Mileage Data")
dev.off()
|
5a578c93a92085cc9b91d16508841114587dba4c
|
f4105cb1aad7f9110478aa4253a748ee6b585c38
|
/R/PlotActivityByDemogGrp.R
|
5315e872ef59d4bb37891b4daedd00350aabc4e2
|
[] |
no_license
|
kmanlove/SheepBehavior
|
da8611fa81e2a5abfffca7bcf9ec5db696a7bcf2
|
bc54d918212393a9e5d6b0e27364381eef8d3d2e
|
refs/heads/master
| 2021-01-01T20:40:49.984218
| 2015-05-13T16:16:45
| 2015-05-13T16:16:45
| 31,034,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,157
|
r
|
PlotActivityByDemogGrp.R
|
PlotActivityByDemogGrp <- function(data.in) {
# function builds boxplots of activity patterns by demographic group
#
# Args:
#
# data.in = cleaned scans data
#
# Return:
#
# boxplots
lambs.bb <- subset(data.in, Pop == "BB" & IndividID == "l")
lambs.aso <- subset(data.in, Pop == "Aso" & IndividID == "l")
lambs.bb.may <- subset(lambs.bb, JulianDate <= 152)
lambs.bb.june <- subset(lambs.bb, JulianDate >= 153 & JulianDate <= 182)
lambs.bb.july <- subset(lambs.bb, JulianDate >= 183)
lambs.aso.may <- subset(lambs.aso, JulianDate <= 152)
lambs.aso.june <- subset(lambs.aso, JulianDate >= 153 & JulianDate <= 182)
lambs.aso.july <- subset(lambs.aso, JulianDate >= 183)
may.lambs <- subset(data.in, IndividID == "l" & JulianDate <= 152)
june.lambs <- subset(data.in, IndividID == "l" & JulianDate >= 153 & JulianDate <= 182)
july.lambs <- subset(data.in, IndividID == "l" & JulianDate >= 183)
may.lambs <- subset(data.in, IndividID == "l" & JulianDate <= 152)
june.lambs <- subset(data.in, IndividID == "l" & JulianDate >= 153 & JulianDate <= 182)
july.lambs <- subset(data.in, IndividID == "l" & JulianDate >= 183)
may.yr <- subset(data.in, DemogGrp == "Y" & JulianDate <= 152)
june.yr <- subset(data.in, DemogGrp == "Y" & JulianDate >= 153 & JulianDate <= 182)
july.yr <- subset(data.in, DemogGrp == "Y" & JulianDate >= 183)
may.ewewithlamb <- subset(data.in, DemogGrp == "EweWithLamb" & JulianDate <= 152)
june.ewewithlamb <- subset(data.in, DemogGrp == "EweWithLamb" & JulianDate >= 153 & JulianDate <= 182)
july.ewewithlamb <- subset(data.in, DemogGrp == "EweWithLamb" & JulianDate >= 183)
may.nolamb <- subset(data.in, DemogGrp == "NoLamb" & JulianDate <= 152)
june.nolamb <- subset(data.in, DemogGrp == "NoLamb" & JulianDate >= 153 & JulianDate <= 182)
july.nolamb <- subset(data.in, DemogGrp == "NoLamb" & JulianDate >= 183)
par(mfrow = c(4, 3), oma = c(3, 2, 0, 0), mar = c(2, 5, 2, 2))
boxplot(may.lambs$ActivityScore ~ may.lambs$Pop, col = c("grey70", "red"), main = "May", ylab = "Lamb Activity Scores")
boxplot(june.lambs$ActivityScore ~ june.lambs$Pop, col = c("grey70", "red"), main = "June")
boxplot(july.lambs$ActivityScore ~ july.lambs$Pop, col = c("grey70", "red"), main = "July")
boxplot(may.yr$ActivityScore ~ may.yr$Pop, col = c("grey70", "red"), ylab = "Yearling Activity Scores")
boxplot(june.yr$ActivityScore ~ june.yr$Pop, col = c("grey70", "red"))
boxplot(july.yr$ActivityScore ~ july.yr$Pop, col = c("grey70", "red"))
boxplot(may.nolamb$ActivityScore ~ may.nolamb$Pop, col = c("grey70", "red"), ylab = "Ewe-No-Lamb \n Activity Scores")
boxplot(june.nolamb$ActivityScore ~ june.nolamb$Pop, col = c("grey70", "red"))
boxplot(july.nolamb$ActivityScore ~ july.nolamb$Pop, col = c("grey70", "red"))
boxplot(may.ewewithlamb$ActivityScore ~ may.ewewithlamb$Pop, col = c("grey70", "red"), ylab = "Ewe-With-Lamb \n Activity Scores")
boxplot(june.ewewithlamb$ActivityScore ~ june.ewewithlamb$Pop, col = c("grey70", "red"))
boxplot(july.ewewithlamb$ActivityScore ~ july.ewewithlamb$Pop, col = c("grey70", "red"))
}
|
c5590193a7b66357e1a8e94f8f34aa2d936bc39e
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612727160-test.R
|
58f5223ddeee08317a96eff40c72bba4cc0453a3
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
1612727160-test.R
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = c(-1L, NA, -1L, -1L, -1L, -1L, -1L, 1258233921L, 16744494L, 0L, 255L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -246L, 11157162L, -2147287296L, 15461355L, -336860181L, -350214640L, -1L, -14595414L, 906002649L, -640034343L, -642205512L, 666417152L, 0L, 3328L, -643154262L, -1073561601L, -13947925L, 536870912L, 16773119L, 3145727L, -336887762L, 0L, 255L, -1L, -1L, -1L, -1L, -16711680L, 0L, 16777215L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -246L, 11157162L, -2147287296L, 15461355L, -336860181L, -352313306L, 285212671L, -57014L, -1439236352L, -2133206567L, -640034560L, 0L, 16L, -14286848L, 0L, 218159615L), item_score = NA_integer_, person_id = c(353703189L, 353703189L, 353703189L, 353703189L, 353703189L, 353703189L, 353703189L, 353703189L, 353703189L, 353763072L, 9744L, 791358251L, 1179010630L, 1179010630L, 1179003691L, 724249387L, 724249387L, 724249387L, 724249387L, 736829478L, 285212671L, -11862017L, -151587082L, -151584769L, -1L, -254L, 16777215L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, 805306114L, 2092559136L, -1L, -1L, -1L, -65536L, 2494511L, 721824767L, -1L, -1L, NA, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -16121686L, 1051364010L, 1051377666L, -15663175L, 167815742L, -1430256897L, 285194681L, 2092499200L, 0L, NA, 0L, 0L, 255L, 16776973L, -4193537L, 285194681L, -1179010561L, -15859713L, -62976L, -1438776577L, 60395L, -336909525L, 724256326L, 1179010630L, 1179010630L, 724249387L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
a1137f61f71dd0c8429c07337647261c46124c2c
|
847a367fb971e99f05334bc556d447b5d3634a68
|
/wikipedia.R
|
a8d0c81a8338981291735976258f378421dccedc
|
[] |
no_license
|
rodrigoestrellac/Web-Scraping-With-R
|
bc93f5364d2cc695dc9eb29b3a93aa57daed6521
|
dd48e315cd896841fc6357c75dff9a97c44f8c2d
|
refs/heads/master
| 2022-11-21T22:46:22.114006
| 2020-07-20T13:52:24
| 2020-07-20T13:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
wikipedia.R
|
library(tidyverse)
library(rvest)
# Get the data
url <- "https://en.wikipedia.org/wiki/COVID-19_pandemic_by_country_and_territory"
html_data <- read_html(url)
df_raw <- html_data %>%
html_node("#thetable") %>%
html_table() %>%
as_tibble(.name_repair = "unique")
# Clean the data
df_clean <- df_raw %>%
select(location = 2, cases = 3, deaths = 4, recovered = 5) %>%
slice(-((n()-1):n())) %>%
mutate(location = location %>% str_remove_all("\\[.+")) %>%
mutate_at(c("cases", "deaths", "recovered"), function(x){
x %>% str_remove_all(",") %>% as.integer()
})
|
639d2472b55bbc4964ba0553cae7125bc9d9e5a5
|
44039e6fa92f04c587839174ce79797cf4ca575d
|
/R/gage_labels.R
|
5f58809e3fec166cf6b660df28aa19589365820f
|
[
"CC0-1.0"
] |
permissive
|
mpdougherty/razviz
|
58ee97162d9c0360a9b50060f83131a7508cce3d
|
a4edd0b9fe89707ba29c1bb4d767d8011bc5c846
|
refs/heads/master
| 2023-04-04T20:11:30.106910
| 2021-03-26T19:22:08
| 2021-03-26T19:22:08
| 273,563,278
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,857
|
r
|
gage_labels.R
|
#' @title Create Gage Labels
#'
#' @description Create a data frame of gage labels used by the longitudinal
#' profile plot to symbolize stage elevations.
#'
#' @export
#' @param gage_locations data frame; A data frame of gage locations.
#' @param stage_interval_label numeric; The number of feet between gage stage
#' labels. Used to identify the location of gage
#' labels.
#'
#' @return A data frame of gage labels used by the `razviz` longitudinal profile
#' plot to label gage boxes.
#'
gage_labels <- function(gage_locations, stage_interval_label) {
# Create the data frame to hold gage stage labels.
gage_labels <- data.frame(name = character(),
river_mile = double(),
elevation = double(),
stage = double())
# Iterate through gages to populate the gage stage labels.
for (i in gage_locations$name) {
gage <- gage_locations[gage_locations$name == i, ]
name <- gage$name
river_mile <- gage$river_mile
# Identify min and max stage label, rounded to the nearest stage_gage_label.
min_stage <- ceiling(gage$min_stage / stage_interval_label) * stage_interval_label
max_stage <- floor(gage$max_stage / stage_interval_label) * stage_interval_label
# Iterate through the min and max stages for each gage creating the labels.
for (j in seq(min_stage, max_stage, by = stage_interval_label)) {
elevation <- gage$elevation + j
stage <- j
gage_labels <- rbind(gage_labels,
data.frame(name = name,
river_mile = river_mile,
elevation = elevation,
stage = stage))
}
}
return(gage_labels)
}
|
d872d749d3c17a2ca9874fc53f152bc80957c87d
|
925ac2cdee50f0a8858c1a818e0d6a983d7e63f8
|
/cachematrix.R
|
d52fd2c3e7cf79865e2ed2acba2445d9af80d982
|
[] |
no_license
|
DataScientist2016/ProgrammingAssignment2
|
b91ce35076d77bacf71b815f2a4d7bdb534bb228
|
b0325210e729623021eafc0c2f325f443da66294
|
refs/heads/master
| 2021-01-18T01:52:16.201506
| 2016-04-14T14:09:47
| 2016-04-14T14:09:47
| 56,234,573
| 0
| 0
| null | 2016-04-14T12:13:23
| 2016-04-14T12:13:22
| null |
UTF-8
|
R
| false
| false
| 1,225
|
r
|
cachematrix.R
|
##makeCacheMatrix: This function creates a special
##"matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
##set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
##get the value of the matrix
get <- function() x
##set the inverse of the matrix
setinverse <- function(solve) m <<- solve
##get the inverse of the matrix
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##cacheSolve: This function computes the inverse
##of the special "matrix" returned by makeCacheMatrix
##above. If the inverse has already been calculated
##(and the matrix has not changed), then the cachesolve
##should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
##Example:
##g <- matrix(1:4,2,2)
##g
##[,1] [,2]
##[1,] 1 3
##[2,] 2 4
##CachedMarix <- makeCacheMatrix(g)
##cacheSolve(CachedMarix)
##[,1] [,2]
##[1,] -2 1.5
##[2,] 1 -0.5
|
e859b733be82b374689e7cee5f8359db2af9351b
|
fae88e7ffde9e6915b8dc3693761d077d7a84545
|
/R/bmi3.R
|
610d06071bd9e71883a1b31fc795108444ce60be
|
[] |
no_license
|
William2025/MATH4753lu0010
|
436c085ecd758e695e28c582c414286e1b518534
|
32078ada724cb1dac8a2bdbd5630d2e849e248d4
|
refs/heads/master
| 2023-02-28T20:55:46.058145
| 2021-01-29T17:22:18
| 2021-01-29T17:22:18
| 334,214,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
bmi3.R
|
#' Create BMI Groups
#'
#' More detailed description.
#'
#' @param x Numeric vector.
#'
#' @return Factor variable
#'
#' @examples
#' bmi.vals <- rnorm(n = 20, mean = 25, sd = 3)
#' bmi3(bmi.vals)
#'
#' @export
bmi3 <- function(x) {
bmi.groups <- cut(x, breaks = c(0, 25, 30, Inf), right = FALSE)
return(bmi.groups)
}
|
75ad8e345f08fbda613b5dc810351d1572f74d59
|
3162c50b248d9dbb3c210db68d209a916cdc5a56
|
/scripts/processing/gyrb/merge_amplicon_metagenomic_datasets.R
|
dd1b632ccba53e48eb16fea752e077a84a9ff67a
|
[
"MIT"
] |
permissive
|
ahnishida/captive_ape_microbiome
|
b68f818c20df396180e960b1a23e6b0c3daef0ab
|
afa2310dfd53aa94526aedf3d3bc00f9852e7921
|
refs/heads/master
| 2023-04-17T17:20:47.683032
| 2021-06-08T21:21:50
| 2021-06-08T21:21:50
| 287,132,087
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,976
|
r
|
merge_amplicon_metagenomic_datasets.R
|
library(dada2); packageVersion("dada2")
library(phyloseq)
library(DECIPHER)
library(tidyverse)
library(phytools)
library(seqinr)
library(stringr)
library("genefilter")
library(zoo)
setwd('/Volumes/AHN/captive_ape_microbiome')
# this merges amplicon datasets and filters out reads that aren;t assign.
# filtered ASVs are then merged with the gtdbtk reference fasta and used to blastn query metagenomic datasets
#load assign taxonomy script
source('scripts/processing/gyrb/idTaxa.R')
#define inputs/outputs
gyrb_ref_faa = 'ref_seqs/gtdbtk_gyrb.faa'
processing_folder='results/gyrb/processing/gyrb_amp_meta_datasets'
dir.create(processing_folder, recursive=TRUE)
#import data from separate gyrb amplicon runs and human metagenomic data
print('import gyrb_moeller_wild dataset')
gyrb_moeller_wild_path <- "results/gyrb/processing/gyrb_moeller_wild/DADA2"
gyrb_moeller_wild.seqtab.nochim <- readRDS(file.path(gyrb_moeller_wild_path,"seqtab.nochim.rds")) #Opens ASV table
rownames(gyrb_moeller_wild.seqtab.nochim )
print('import gyrb_nishida_captive_wild dataset')
gyrb_nishida_captive_wild_path <- "results/gyrb/processing/gyrb_nishida_captive_wild/DADA2"
gyrb_nishida_captive_wild.seqtab.nochim <- readRDS(file.path(gyrb_nishida_captive_wild_path,"seqtab.nochim.rds")) #Opens ASV table
rownames(gyrb_nishida_captive_wild.seqtab.nochim)
print('import gyrb_nishida_projectchimps dataset')
gyrb_nishida_projectchimps_path <- "results/gyrb/processing/gyrb_nishida_projectchimps/DADA2"
gyrb_nishida_projectchimps.seqtab.nochim <- readRDS(file.path(gyrb_nishida_projectchimps_path,"seqtab.nochim.rds")) #Opens ASV table
rownames(gyrb_nishida_projectchimps.seqtab.nochim )
print("import human_metagenomic_data")
metagenomic_path <- "results/gyrb/processing/metagenomic_samples/final_outputs/gyrb_metagenomic_seqtab.txt"
metagenomic_human_reads <- as.matrix(read.table(metagenomic_path,header=T,
row.names=1, check.names=F, sep="\t")) #Opens ASV table
dim(metagenomic_human_reads)
#merge ASV tables
seqtab <- mergeSequenceTables(gyrb_moeller_wild.seqtab.nochim,
gyrb_nishida_captive_wild.seqtab.nochim,
gyrb_nishida_projectchimps.seqtab.nochim,
metagenomic_human_reads)
saveRDS(seqtab,file.path(processing_folder,'seqtab.rds'))
seqtab = readRDS(file.path(processing_folder,'seqtab.rds'))
# giving our seq headers more manageable names (ASV_1, ASV_2...)
asv_seqs <- colnames(seqtab)
asv_headers <- vector(dim(seqtab)[2], mode="character")
for (i in 1:dim(seqtab)[2]) {
asv_headers[i] <- paste(">ASV", i, sep="_")
}
# making and writing out a fasta of ASV seqs:
asv_fasta <- c(rbind(asv_headers, asv_seqs))
write(asv_fasta, file.path(processing_folder,"ASVs_all.fasta"))
rownames(seqtab)
# count table:
asv_tab <- t(seqtab)
row.names(asv_tab) <- sub(">", "", asv_headers)
# metadata
all_samples_metadata_file = "metadata/metadata_gyrb_amp_meta.txt"
metadata <- read.csv(all_samples_metadata_file,sep='\t')
metadata = metadata %>% #filter metadata based on whether samples are in OTU table
filter(X.SampleID %in% colnames(asv_tab)) %>%
column_to_rownames('X.SampleID')
setdiff(colnames(asv_tab),row.names(metadata)) #samples in otu table that aren't in metadata
#translate ASVs to prot seq and blast
asv_fasta <- readDNAStringSet(file.path(processing_folder,"ASVs_all.fasta"),format = 'fasta')
asv_faa <- Biostrings::translate(DNAStringSet(asv_fasta,start =2)) #translate ASVs
Biostrings::writeXStringSet(asv_faa,file=file.path(processing_folder,"ASVs_all.faa"))
system(paste('./scripts/processing/gyrb/blastp_filter_ASVs.sh', #run blastp of ASVs
file.path(processing_folder,"ASVs_all.faa"),
gyrb_ref_faa,
file.path(processing_folder,"ASVs_all_blastp.txt"),sep=' '))
ASV_blastp_res <- read.table(file.path(processing_folder,"ASVs_all_blastp.txt"))
colnames(ASV_blastp_res) <- c('ASV','sacc','pident','qlen','length','evalue','bitscore','genome',
'salltitles','species','sseq','qseq','sstart','send')
#filter ASV based on alignment length and percent identity
ASVs_w_stop_codons <- names(asv_faa[str_detect(asv_faa,'\\*')]) #identity asvs that when translated have stop codons
ASVs_filtered_blast <- ASV_blastp_res %>%
filter(length>(83*.90)&length<(83*1.10)) %>%
filter(pident > 80) %>%
filter(!ASV %in% ASVs_w_stop_codons)
#there are a few outliers not aligning to the same region as the majority of ASVs
hist(ASVs_filtered_blast$sstart)
hist(ASVs_filtered_blast$send)
#filter ASVs based on where they hit to the alignment
ASVs_filtered_blast <-ASVs_filtered_blast %>%
filter(sstart<150) %>%
filter(send<220)
hist(ASVs_filtered_blast$sstart)
hist(ASVs_filtered_blast$send)
#assign taxonomy
TAX_table <- assign_taxonomy_w_idTAXA(file.path(processing_folder,"ASVs_all.faa"),gyrb_ref_faa)
data.frame(TAX_table) %>% group_by(Family) %>% tally()
outgroup <- data.frame(TAX_table) %>% filter(Family == 'f__F082')
F082_ASVs <- rownames(outgroup)
# import into phyloseq
(ps <- phyloseq(otu_table(asv_tab, taxa_are_rows=TRUE),
sample_data(metadata),
tax_table(as.matrix(TAX_table))))
saveRDS(ps,file.path(processing_folder,'ps.rds'))
ps = readRDS(file.path(processing_folder,'ps.rds'))
ps_filtered <- prune_taxa(ASVs_filtered_blast$ASV,ps) #filter ASVs based on blastp results
ps_Bacteroidales <- subset_taxa(ps_filtered, Order == 'o__Bacteroidales') #filter ASVs based on taxonomy
samples_with_Bacteroidales <- sample_names(ps_Bacteroidales)[sample_sums(ps_Bacteroidales)>0]
ps_Bacteroidales <- prune_samples(samples_with_Bacteroidales, ps_Bacteroidales)
#write fasta
asv_fasta_filt <- asv_fasta[names(asv_fasta) %in% taxa_names(ps_Bacteroidales)]
Biostrings::writeXStringSet(asv_fasta_filt,file=file.path(processing_folder,"ASVs_filtered.fasta"))
asv_faa_filt <- asv_faa[names(asv_faa) %in% taxa_names(ps_Bacteroidales)]
Biostrings::writeXStringSet(asv_faa_filt,file=file.path(processing_folder,"ASVs_filtered.faa"))
#select ref seqs to include with ASVs
gyrb_ref_fasta = 'ref_seqs/gtdbtk_gyrb.fasta'
gyrb_ref <- readDNAStringSet(gyrb_ref_fasta,format = 'fasta')
bacteroidales_ref <- gyrb_ref[str_detect(names(gyrb_ref),'o__Bacteroidales')]
Biostrings::writeXStringSet(bacteroidales_ref ,file=file.path(processing_folder,"bacteroidales_ref.fasta"))
#phylogeny
dir.create(file.path(processing_folder,'phylogeny'), recursive=TRUE)
trimAlign <- function(aln_in,aln_out){
#trim alignment to start and end of ASVs
aln <- Biostrings::readDNAStringSet(aln_in)
ASV1 <- DNAString(as.character(aln['ASV_1']))
ASV1_seq_no_gaps <- as.character(RemoveGaps(aln['ASV_1'],removeGaps = "all"))
first10nt <- stringr::str_sub(ASV1_seq_no_gaps,1,10)
last10nt <- stringr::str_sub(ASV1_seq_no_gaps,-10,-1)
s <- start(matchPattern(first10nt, ASV1))
e <-end(matchPattern(last10nt, ASV1))
alnTrim <- DNAStringSet(aln,start=s,end=e)
seqlengths = width(RemoveGaps(alnTrim,
removeGaps = "all",
processors = 1))
alnTrimFilt <- alnTrim[seqlengths > 250*.95]
alnTrimFilt <- RemoveGaps(alnTrimFilt,
removeGaps = "common")
print(c(length(aln),'seqs in alignment',length(alnTrimFilt),'seqs in trimmed alignment'))
Biostrings::writeXStringSet(alnTrimFilt,file=aln_out)
}
#ref taxa phylogeny
asv_ref <- c(bacteroidales_ref,DNAStringSet(asv_fasta_filt,start=2)) #read in nuc fasta with ref gyrb seqs from gtdbtk
Biostrings::writeXStringSet(asv_ref,file=file.path(processing_folder,'phylogeny',"ASVs_filtered_ref.fasta")) #write ASVs with ref fasta
asv_ref_aln <- AlignTranslation(asv_ref) #align based on AA sequences
Biostrings::writeXStringSet(asv_ref_aln,file=file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln.fasta")) #write alignment
trimAlign(file.path(processing_folder,'phylogeny','ASVs_filtered_ref_aln.fasta'), #trim alignment to where ASV1 starts and ends
file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim.fasta"))
system(paste0('./FastTree-2.1.9 -nt -gtr < ',
file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim.fasta"), #generate phylogeny using fasttree
' > ',
file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim.tre")))
asv_ref_tree <- ape::read.tree(file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim.tre")) #read in phylogeny
asv_tips <- asv_ref_tree$tip.label[str_detect(asv_ref_tree$tip.label,'ASV')] #find ASVs seqs
asv_ref_tree <- keep.tip(asv_ref_tree,asv_tips) #remove ref seqs
outgroup_mrca <- findMRCA(asv_ref_tree,F082_ASVs) #find outgroup
asv_ref_tree_rooted <- reroot(asv_ref_tree,outgroup_mrca) #reroot tree based on outgroup
ape::write.tree(asv_ref_tree_rooted,"tmp.tre")
system(paste0("sed 's/Root;/;/g' tmp.tre > ",
file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim_rooted.tre")))
system("rm tmp.tre")
#any differences in tree tips?
setdiff(asv_ref_tree$tip.label,asv_tree$tip.label)
setdiff(asv_tree$tip.label,asv_ref_tree$tip.label)
setdiff(taxa_names(ps_Bacteroidales),asv_ref_tree$tip.label) #ASV removed by generating phylogeny
setdiff(taxa_names(ps_Bacteroidales),asv_tree$tip.label)
#prune physeq to taxa in phylogeny
ps_Bacteroidales_final <- prune_taxa(asv_tree$tip.label,ps_Bacteroidales)
#output physeq to inputs_folder
inputs_folder <- 'results/gyrb/inputs'
dir.create(inputs_folder, recursive=TRUE)
#write taxonomy table
tax_table_tab <- as.data.frame(tax_table(ps_Bacteroidales_final)) %>%
rownames_to_column(var='ASV')
write.table(tax_table_tab,file.path(inputs_folder,'physeq_Bacteroidales_taxonomy.txt'),quote=F,row.names=F,sep='\t')
#write metadata
metadata_tab <- as.data.frame(sample_data(ps_Bacteroidales_final))
metadata_tab$X.SampleID <- rownames(metadata_tab)
write.table(metadata_tab,file.path(inputs_folder,'physeq_metadata_passing_samples.txt'),quote=F,row.names=F,sep='\t')
#write otu/asv table
otu_table <- as.data.frame(otu_table(ps_Bacteroidales_final))
write.table(otu_table,file.path(inputs_folder,'physeq_Bacteroidales_asv_tab.txt'),quote=F,row.names=T,col.names=NA,sep='\t')
#write fasta
asv_fasta_final <- asv_fasta[names(asv_fasta) %in% taxa_names(ps_Bacteroidales_final)]
Biostrings::writeXStringSet(asv_fasta_final,file=file.path(inputs_folder,"physeq_Bacteroidales_asv.fasta"))
#copy over phylogenies
system(paste('cp',file.path(processing_folder,'phylogeny',"ASVs_filtered_aln_trim_rooted.tre"),
file.path(inputs_folder,'physeq_Bacteroidales_ASVs.tree'),sep=' '))
system(paste('cp',file.path(processing_folder,'phylogeny',"ASVs_filtered_ref_aln_trim_rooted.tre"),
file.path(inputs_folder,'physeq_Bacteroidales_ASVs_ref.tree'),sep=' '))
#write moeller codiv seq files
system(paste0('cp results/gyrb/processing/moeller_sup/moeller_codiv* ',inputs_folder))
|
978a6aecc0f8d2ccb8173758635f156c20357a34
|
85b589129d60b08f61a591d8d336d0b8700b61bc
|
/plot2.R
|
d168dc47776064eebc472e2e321dada1d6e24d83
|
[] |
no_license
|
adrianthong/ExData_Plotting1
|
b74b8a5001b867286200e5e1998ca998eb357785
|
6f04a1254c2807d1804cb0c5f0e07dbaf2bca950
|
refs/heads/master
| 2021-01-18T12:33:54.290274
| 2015-08-04T15:31:46
| 2015-08-04T15:31:46
| 40,188,573
| 0
| 0
| null | 2015-08-04T14:09:41
| 2015-08-04T14:09:41
| null |
UTF-8
|
R
| false
| false
| 1,397
|
r
|
plot2.R
|
### Coursera Course 4: Exploratory Data Analysis
### Date: 4-Aug-2015
### Author: Adrian Thong
### Assignment 1: Plot 2
### Input: household_power_consumption.txt
### Output: plot2.png
### Description:
### Read only lines for the dates 1/2/2007 and 2/2/2007
### Convert to PNG file as required by Assignment 1 plot
# read through file for lines with dates 1/2/2007 and 2/2/2007
# critical assumption: data is sorted by date
selectedrows <- grep(glob2rx("^1/2/2007*|^2/2/2007*"),
readLines(".\\household_power_consumption.txt"))
# load only the required lines to memory
pdata <- read.table(".\\household_power_consumption.txt",
sep = ";", na.strings = "?",
dec = ".", stringsAsFactors = FALSE,
skip = selectedrows[1], nrows = length(selectedrows))
# populate the column names
colnames(pdata) <- read.table(".\\household_power_consumption.txt",
sep = ";", stringsAsFactors = FALSE,
nrows = 1)
# convert dates
datetime <- paste(pdata$Date, pdata$Time, sep = " ")
datetime <- strptime(datetime,"%d/%m/%Y %H:%M:%S", tz = "GMT")
# open png file
png("plot2.png", width = 480, height = 480)
# plot line chart
plot(datetime, pdata$Global_active_power, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
# close file
dev.off()
|
baa1dbb2d3efb9f1515c14ef0e9089d0cbd448f2
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/GUILDS/R/evaluateLogLik2.R
|
764e03158caf2c45a1f657d30cab42f7f642b049
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 776
|
r
|
evaluateLogLik2.R
|
evaluateLogLik <- function(v, theta_x, theta_y, alpha_x, alpha_y,
J, Sx, Sy, Nx, Ny, KDA_X, KDA_Y) {
nx <- v
ny <- 1 - v
I_X <- alpha_x * nx * (J - 1) / (1 - alpha_x * nx - alpha_y * ny)
I_Y <- alpha_y * ny * (J - 1) / (1 - alpha_x * nx - alpha_y * ny)
a <- lgamma(J + 1)
b <- rep(0, length(I_X))
for (cnt in seq_along(I_X)) {
b[cnt] <- lgamma(I_X[cnt] + I_Y[cnt] + J) - lgamma(I_X[cnt] + I_Y[cnt])
}
c <- a - b
l <- ( (theta_x / 2) - 1) * log(nx) + ((theta_y / 2) - 1) * log(ny)
s <- calc_sum_kda(Sx, Nx, I_X, (theta_x / 2), KDA_X)
z <- calc_sum_kda(Sy, Ny, I_Y, (theta_y / 2), KDA_Y)
result <- c(c + l + s + z)
output <- rep(-Inf, length(v))
for (i in seq_along(v)) output[i] <- result[i]
return(output)
}
|
451959195e3dc2e73fb02a66fbcbde771b40d5ba
|
d31a37cdd54374410825ceeed36b9370564dcb43
|
/run_analysis.R
|
1ef175dbd9b3d2f068da2eb437906cab5159221b
|
[] |
no_license
|
aiedward/Getting-and-Cleaning-Data-Project
|
308eb6640adfde24403d8f312fe10ac697c17ed5
|
8e706d3bd30540ec693ad752ad8fcf8488b80f8f
|
refs/heads/master
| 2020-07-24T00:44:05.929598
| 2016-11-15T13:09:30
| 2016-11-15T13:09:30
| 73,798,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,191
|
r
|
run_analysis.R
|
## The data is in the working directory under a sub-directory named "UCI HAR Dataset"
## setwd('C:/Users/Edward')
## indices and tidy names for "mean" or "std" features only
## read names of all featuresdd <- read.table("features.txt")
feature_names_original<-read.table("UCI HAR Dataset/features.txt")
# dim(feature_names_original) # 561 2
# head(feature_names_original)
# feature_names_original[2] ==> look in second column for feature names that include "mean()" or "std()"
## indices for "mean" features only
mean_indices<-grep("mean()", feature_names_original[,2], value=FALSE, fixed=TRUE)
# length(mean_indices) # 33
## indices for "std" features only
std_indices<-grep("std()", feature_names_original[,2], value=FALSE, fixed=TRUE)
# length(std_indices) # 33
## sorted indices for "mean" or "std" features only
indices<-sort(c(mean_indices, std_indices))
# length(indices) # 66
## indices of columns of interest
feature_indices <- (feature_names_original[indices,1])
# length(indices) # 66
## names of columns of interest
feature_names <- (feature_names_original[indices,2]) # get the names
# length(feature_names) # 66
# head(feature_names)
# tail(feature_names)
# ==> feature names are not tidy enough
# ==> need to change "-" to "_" and deal with duplicate "Body" in "BodyBody" feature names
feature_names <- gsub(pattern='-', replacement='_', x=feature_names) # tidy
feature_names <- gsub(pattern='BodyBody', replacement='Body', x=feature_names) # tidy
# head(feature_names)
# tail(feature_names)
# ==> O.K.
## prepare activity labels
# read table
# and assign column names
# in a single line of code
activity_labels<-read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activity_index","activity_description"))
# str(activity_labels) # O.K.
## pre-process the training data
## read training data
## take only selected columns (mean / std)
X_train<-read.table("UCI HAR Dataset/train/X_train.txt")[,feature_indices]
# head(X_train) # column names are v1, v2, ..., v543
# str(X_train) # all column are numeric
# dim(X_train) # 7352 66 ==> O.K.
## assign feature names to training data
colnames(X_train)<-feature_names
# head(X_train) # column names are now more comprehendible
# str(X_train)
## read training subject IDs
X_tr_subject<-read.table("UCI HAR Dataset/train/subject_train.txt")
# head(X_tr_subject) # a single column
## assign a column name instead of "V1"
colnames(X_tr_subject)<-"subject"
# str(X_tr_subject) # 7352 obs. ==> O.K.
## read training activity index
y_train_ind<-read.table("UCI HAR Dataset/train/y_train.txt")
# str(y_train_ind) # a single column
## assign a column name instead of "V1"
colnames(y_train_ind)<-"activity_index"
# str(y_train_ind) # 7352 obs. ==> O.K.
## merge (by binding columns) the subject IDs + activity ind + measurements
X_tr_temp <- cbind(X_tr_subject, y_train_ind, X_train)
# head(X_tr_temp)
# str(X_tr_temp)
## merge (by joining) in order to get activity descriptions (in addition ti IDs)
X_tr_description<-merge(X_tr_temp,activity_labels,by="activity_index", all.x = TRUE ,sort=FALSE)
# head(X_tr_description)
# str(X_tr_description)
# with(X_tr_description, table(subject,activity_description)) # ==> O.K.
## tidy training set:
# subject ID first
# activity descriptions second
# then all the mean / std measurements
X_tr <- X_tr_description[c(2,ncol(X_tr_description),3:(ncol(X_tr_description)-1))]
# head(X_tr)
# str(X_tr) # 7352 obs. of 68 variables ==> O.K.
## pre-process the test data
## read test data
## take only selected columns (mean / std)
X_test<-read.table("UCI HAR Dataset/test/X_test.txt")[,feature_indices]
# head(X_test) # column names are v1, v2, ..., v543
# str(X_test) # all column are numeric
# dim(X_test) # 2947 66 ==> O.K.
## assign feature names to test data
colnames(X_test)<-feature_names
# head(X_test) # column names are now more comprehendible
# str(X_test)
## read test subject IDs
X_ts_subject<-read.table("UCI HAR Dataset/test/subject_test.txt")
# head(X_ts_subject) # a single column
## assign a column name instead of "V1"
colnames(X_ts_subject)<-"subject"
# str(X_ts_subject) # 2947 obs. ==> O.K.
## read test activity index
y_test_ind<-read.table("UCI HAR Dataset/test/y_test.txt")
# str(y_test_ind) # a single column
## assign a column name instead of "V1"
colnames(y_test_ind)<-"activity_index"
# str(y_test_ind) # 2947 obs. ==> O.K.
## merge (by binding columns) the subject IDs + activity ind + measurements
X_ts_temp <- cbind(X_ts_subject, y_test_ind, X_test)
# head(X_ts_temp)
# str(X_ts_temp)
## merge (by joining) in order to get activity desriptions (in addition ti IDs)
X_ts_description<-merge(X_ts_temp,activity_labels,by="activity_index", all.x = TRUE ,sort=FALSE)
# head(X_ts_description)
# str(X_ts_description)
# with(X_ts_description, table(subject,activity_description)) # ==> O.K.
## tidy test set:
# subject ID first
# activity descriptions second
# then all the mean / std measurements
X_ts <- X_ts_description[c(2,ncol(X_ts_description),3:(ncol(X_ts_description)-1))]
# head(X_ts)
# str(X_ts) # 2947 obs. of 68 variables ==> O.K.
## merge (union) training and test datasets
X <- rbind(X_tr,X_ts)
# dim(X)
# str(X) # 10299 obs. of 68 variables ==> O.K.
## aggregate into a wide form:
## average of each variable for each activity and each subject
## aggregate
agg_wide <-aggregate(X[,3:ncol(X)], by=list(X$activity_description, X$subject), FUN=mean, na.rm=TRUE)
# dim(agg_wide) # 180 68
# str(agg_wide) # 1st and 2nd columns are named Group.1 and Group.2 respectively
## assign meaningful column names to 1st and 2nd columns
colnames(agg_wide)[1:2]<-c("Activity_Description","Subject")
## add a prefix "mean_of_" to each computed mean
for (i in 3:ncol(agg_wide)) {colnames(agg_wide)[i]<-paste("Mean_of_",colnames(agg_wide)[i],sep = "")}
# str(agg_wide) # 180 obs. of 68 variables
## order the data by activity_description and subject
agg_wide<-agg_wide[order(agg_wide$activity_description,agg_wide$subject),]
# head(agg_wide,15)
## create tidy text file for submission
write.table(x=agg_wide, file="tidy.txt", row.name=FALSE )
## write the output to a file: tidy aggregation by activity and subject
agg_wide
|
474505ad62d04569c34485b0d159c2e307d6b9e7
|
ef567cee0d5cdce6b6e6a0b4d517acb97c1974af
|
/man/circle.Rd
|
f09caef27ae41ff9719af1df17b0065fd49b4967
|
[] |
no_license
|
znerp/labBook
|
4086b5779374d2d9726466f1665e87d96f0a89e6
|
455c89d025bb51e59b5efddf222bec83ae5a5b29
|
refs/heads/master
| 2021-05-11T06:37:59.619526
| 2018-01-18T14:56:07
| 2018-01-18T14:56:07
| 117,993,672
| 0
| 0
| null | 2018-01-18T14:30:40
| 2018-01-18T14:30:40
| null |
UTF-8
|
R
| false
| true
| 227
|
rd
|
circle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting_functions.R
\name{circle}
\alias{circle}
\title{Draw a circle}
\usage{
circle(x, y, r, num_segs = 100, ...)
}
\description{
Draw a circle
}
|
eb2a4f757063b822b3830990f6f64bff53ce90d9
|
b9192fd071beac2e0b21f3f534b81f7ad9df0231
|
/R/batch_strata_ads.R
|
6de8e05413400af228888b68ba0775ebb5daa9dd
|
[] |
no_license
|
danrthomas/fbsample
|
895424fb8f1a86dd8c929fb8e9b14c89d47bd5aa
|
4a44c5fd2e13e90ba3c072f19703fd16c5fc9d79
|
refs/heads/master
| 2023-03-21T02:26:13.462698
| 2021-03-08T20:19:05
| 2021-03-08T20:19:05
| 345,750,907
| 0
| 0
| null | 2021-03-08T18:44:39
| 2021-03-08T18:11:43
|
R
|
UTF-8
|
R
| false
| false
| 6,622
|
r
|
batch_strata_ads.R
|
#' Create Facebook ads for a batch of strata
#' @param fbacc \code{FB_Ad_account} object that identifies your Facebook Ad account. Note this is not your Ad account number.
#' @param strata_targeting list. Information about each strata that you plan to target.
#' @param optimization_goal character. Optimization goal.
#' @param billing_event character. The billing event.
#' @param promoted_object list. See at \url{https://developers.facebook.com/docs/marketing-api/reference/ad-campaign/promoted-object}.
#' @param campaign_id character. Parent Ad Campaign ID.
#' @param wait_interval integer. Number of seconds to wait between creation of each ad to avoid exceeding the API rate limit.
#' @param show_wait logical. If \code{TRUE}, the console will display a wait time progress bar.
#' @param ... further arguments passed to the API endpoint
#' @return A data.frame with Strata IDs, Ad Set IDs, Ad IDs for ads that have been successfully created.
#' @import fbRads
#' @export
#' @references \url{https://developers.facebook.com/docs/marketing-api/reference/ad-campaign#Creating}
batch_strata_ads <- function(fbacc,
strata_targeting,
optimization_goal = c(
'NONE',
'APP_INSTALLS',
'CLICKS',
'ENGAGED_USERS',
'EXTERNAL',
'EVENT_RESPONSES',
'IMPRESSIONS',
'LINK_CLICKS',
'OFFER_CLAIMS',
'OFFSITE_CONVERSIONS',
'PAGE_ENGAGEMENT',
'PAGE_LIKES',
'POST_ENGAGEMENT',
'REACH',
'SOCIAL_IMPRESSIONS',
'VIDEO_VIEWS'
),
billing_event = c(
'APP_INSTALLS',
'CLICKS',
'IMPRESSIONS',
'LINK_CLICKS',
'OFFER_CLAIMS',
'PAGE_LIKES',
'POST_ENGAGEMENT',
'VIDEO_VIEWS'
),
promoted_object,
campaign_id,
wait_interval = 100,
show_wait,
...) {
## progress bar
total <- 20
pb <- txtProgressBar(
min = 0,
max = 10,
style = 3,
title = "Waiting for next ad upload."
)
## check the number of strata
num_strata <- sum(names(unlist(strata_targeting)) == "strata_id")
## upload ads
if (num_strata > 1) {
## upload ads one by one, with wait time between each upload to avoid
## exceeding API limit rate
strata_output <- data.frame(strata_id = rep(NA, length(strata_targeting)),
adset_id = rep(NA, length(strata_targeting)),
ad_id = rep(NA, length(strata_targeting)))
for (i in 1:length(strata_targeting)) {
tryCatch (
strata_output[i,] <- strata_ad(
fbacc = fbacc,
adset_name = paste0("Ad set: ", strata_targeting[[i]]$strata_id),
optimization_goal = optimization_goal,
billing_event = billing_event,
campaign_id = campaign_id,
promoted_object = promoted_object,
bid_amount = strata_targeting[[i]]$bid_amount,
adset_status = strata_targeting[[i]]$adset_status,
lifetime_budget = strata_targeting[[i]]$lifetime_budget,
ad_status = strata_targeting[[i]]$ad_status,
targeting = strata_targeting[[i]]$targets,
creative_id = strata_targeting[[i]]$creative_id,
start_time = strata_targeting[[i]]$start_time,
end_time = strata_targeting[[i]]$end_time,
ad_name = paste0("Ad: ", strata_targeting[[i]]$strata_id),
strata_id = strata_targeting[[i]]$strata_id
),
error = function(e) {
message(e)
message(paste0(strata_targeting[[i]]$strata_id, ": ad could not be created."))
},
finally = {
if (i != length(strata_targeting)) {
message(
paste0(
"Wait ",
wait_interval ,
" seconds for next ad to be created. ",
i,
" out of ",
length(strata_targeting),
" ads created."
)
)
} else {
message(paste0("\n", i, " out of ", length(strata_targeting), " ads created."))
}
}
)
## wait time progress bar
if (i != length(strata_targeting) & show_wait) {
for (i in 1:total) {
Sys.sleep(wait_interval / total)
setTxtProgressBar(pb, i)
}
close(pb)
}
}
} else if (num_strata == 1) {
strata_output <- data.frame(strata_id = NA,
adset_id = NA,
ad_id = NA)
tryCatch (
strata_output[1,] <- strata_ad(
fbacc = fbacc,
adset_name = paste0("Ad set: ", strata_targeting$strata_id),
optimization_goal = optimization_goal,
billing_event = billing_event,
campaign_id = campaign_id,
promoted_object = promoted_object,
bid_amount = strata_targeting$bid_amount,
adset_status = strata_targeting$adset_status,
lifetime_budget = strata_targeting$lifetime_budget,
ad_status = strata_targeting$ad_status,
targeting = strata_targeting$targets,
creative_id = strata_targeting$creative_id,
start_time = strata_targeting$start_time,
end_time = strata_targeting$end_time,
ad_name = paste0("Ad: ", strata_targeting$strata_id),
strata_id = strata_targeting$strata_id
),
error = function(e) {
message(e)
message(paste0(strata_targeting$strata_id, ": ad could not be created."))
},
finally = {
message(paste0("\n", 1, " out of ", 1, " ads created."))
}
)
} else {
message("Error: the number of strata is undefined.")
}
## return data.frame with Ad Set IDs and Ad IDs of successful ad creations
return(strata_output)
}
|
0d104c9361b8e6beeb2f1543eac2c3db82355ae9
|
ca53a2af3684e94d613c72915d8283b86fd424c7
|
/plot3.R
|
b49b2db2b0faeb5a874e58d539b293e59fe12413
|
[] |
no_license
|
SLaw7/ExData_Plotting1
|
96951aa09fee92be9323009b55b009cbca25390f
|
063b1a42323fad50e901baad0f8fe7d7a200da5f
|
refs/heads/master
| 2020-04-08T10:18:55.327302
| 2015-02-06T17:10:02
| 2015-02-06T17:10:02
| 30,375,585
| 0
| 0
| null | 2015-02-05T20:04:24
| 2015-02-05T20:04:23
| null |
UTF-8
|
R
| false
| false
| 882
|
r
|
plot3.R
|
table <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 2880, skip = 66636, stringsAsFactors = FALSE)
table2 <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", nrows = 5, stringsAsFactors = FALSE)
names(table) <- names(table2)
table <- cbind("Date_Time" = strptime(paste(table[,1],table[,2]), "%d/%m/%Y %H:%M:%S"),table)
png(filename = "plot1.png", width = 480, height = 480)
plot(table$Sub_metering_1 ~ table$Date_Time, type = "n", xlab = "", ylab = "Energy Sub metering" )
lines(table$Date_Time, table$Sub_metering_1, col = "black")
lines(table$Date_Time, table$Sub_metering_2, col = "red")
lines(table$Date_Time, table$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lwd = 1)
dev.off()
|
943069740ba17a5f5e227db873dc2b161dba84ca
|
3678bd577662e320c9bba8bdb6ce8ae554872559
|
/R/fdr_gam.R
|
bf7d96116c6627484edd45ec9b610d8c280b1c26
|
[] |
no_license
|
cran/mSTEM
|
94efece0611b855303c4264f24763430430de4bc
|
a4d566139cce621b9fbdf8719250c830eb408179
|
refs/heads/master
| 2020-08-05T00:00:19.490406
| 2019-10-02T09:00:05
| 2019-10-02T09:00:05
| 212,323,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
fdr_gam.R
|
#' Parallel computing fdr and power of change points estimation for different \code{gamma} and \code{nu}
#'
#' @param c number of cpu cores used for parallel computing
#' @inheritParams GenDY
#' @param Nu a vector of different \code{nu}s
#' @param Gamma a vector of different \code{gamma}s
#' @inheritParams Fdr
#' @inheritParams Fdr
#' @inheritParams ch.est
#' @inheritParams which.cp
#' @param iter iteration times for each combination of \code{gamma} and \code{nu}
#' @return a list of matrix with the same length as \code{Nu}, FDR and Power for different \code{Gamma} are displayed within each matrix
#' @export
#' @import foreach
#' @import doParallel
#' @import parallel
#' @examples \donttest{
#' size=12000
#' a = 1
#' A = a*(1:119)
#' H = seq(100,11900,100)
#' mu = GenMu(A,H,size=size)
#' z = GenZ(nu=2,size=size)
#' Gamma = seq(1,5,1)
#' Nu = seq(0,2,0.5)
#' model = fdr.gam(2,mu,Gamma,Nu,8,H,iter=100)
#'}
fdr.gam = function(c,mu,Gamma,Nu,b,th,B=100,level=0.1,iter=100){
nu = NULL
size = length(mu)
cl = parallel::makeCluster(c)
doParallel::registerDoParallel(cl)
stem = foreach::foreach(nu = Nu) %:%
foreach::foreach(gamma=Gamma,.combine='rbind',.packages="mSTEM") %dopar% {
chest = ch.est(nu,gamma,size,B)
temp = matrix(NA,iter,2);colnames(temp)=c("FDR","Power")
for (i in 1:iter){
y1 = GenDY(mu,GenZ(nu,size),gamma)
cp = which.cp(y1,chest,level)
temp[i,]=unlist(Fdr(uh=c(cp$peak,cp$vall),b=b,th=th))
}
colMeans(temp)
}
parallel::stopCluster(cl)
return(stem)
}
|
cf2901d17093c77a5338c68ca1746229986b6c6d
|
09ee223baa1c6338fd695b415fc7f88a6dcb1720
|
/Sarek/manta-PF.R
|
4f09dd41013c6e221abfb9da78769444d599a982
|
[] |
no_license
|
szilvajuhos/pathfindr
|
47efea3848435c83e881d57eec19271fa07452be
|
b74c8be56fb50cef4a098152079667e2bab48c17
|
refs/heads/master
| 2020-03-29T19:30:15.733699
| 2020-01-30T13:41:15
| 2020-01-30T13:41:15
| 150,266,650
| 0
| 0
| null | 2018-09-25T13:06:36
| 2018-09-25T13:06:35
| null |
UTF-8
|
R
| false
| false
| 19,858
|
r
|
manta-PF.R
|
#!/usr/bin/env Rscript
# call like:
# Rscript ~/dev/pathfindr/Sarek/manta-PF.R -r ~/dev/pathfindr/Sarek/reference_data/ -o bugger -s /data0/btb/P2233/test/
options(warn=0)
suppressWarnings(suppressMessages(library("optparse")))
write("\n *** Pathfindr: filtering and prioritizing somatic mutations \n",stdout())
# Command-line arguments to read
option_list = list(
make_option(c("-r", "--reference"), type="character", default=NULL,
help="reference directory name", metavar="character"),
make_option(c("-o", "--out"), type="character", default="out.txt",
help="output file name [default= %default]", metavar="character"),
make_option(c("-s", "--sample"), type="character", default=getwd(),
help="Sample directory [default= %default]", metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
opt$reference <- '~/dev/pathfindr/Sarek/reference_data/'
opt$sample <- '../TEST'
opt$out <- 'bugger'
if (is.null(opt$reference)) {
print_help(opt_parser)
stop("Reference dir must be supplied.\n", call.=FALSE)
}
# we are using tictoc to benchmark runtimes all over the code
library(tictoc)
tic("Pathfindr")
# Utility finction to print out lists
printList <- function(aList) {
for (item in 1:length(aList)) { write(head(aList[[item]]),stdout()) }
}
# Print out sample name and other handy stuff
printList(list('Sample:',basename(getwd()),'Directory:',getwd(),'Date:',date()))
# reference is a command-line parameter
ref_data <- opt$reference
#ref_data <- "reference_data/"
write(paste("Reference directory: ",ref_data),stdout())
sample_dir <- opt$sample
#sample_dir <- "../TEST"
write(paste("Sample dir: ",sample_dir),stdout())
# we do want to print out tables
write_tables=TRUE
# by default and right now we are using only GRCh38
reference_genome='GRCh38'
write(paste("Reference genome: ",reference_genome),stdout())
# read all filenames we have in the current directory
files <- dir(path=sample_dir,recursive = T,full.names = T)
# get rid filenames that we are not interested in
# TODO actually would be better to list only file that we are interested
remove=unique(c(grep(pattern = '.png',x = files),
grep(pattern = 'Annotation.old',x = files),
grep(pattern = 'Annotation/Manta/VEP.CADD',x = files),
grep(pattern = '/work/',x = files)))
if (length(remove)>0) files <- files[-remove]
##################################### Sample data: we are storing results here #####################################
library(data.table)
sampleData <- data.table(
date=date(),
directory=sample_dir,
name=basename(sample_dir )#,
# patient='',
# sample='',
# freec_cnv_file,
# freec_Tratio_file,
# freec_Nratio_file,
# freec_Tbaf_file,
# ascat_Tratio_file
)
##################################### make chromosomes #####################################
# TODO: read it from the FASTA index file
if (reference_genome=='GRCh38') chrsz <- data.table(
chr = paste0('chr',c("1", "2", "3", "4", "5", "6", "7", "8",
"9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
"20", "21", "22", "X", "Y")),
label = c("1", "2", "3", "4", "5",
"6", "7", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19", "20",
"21", "22", "X", "Y"),
starts = c(0, 253902921, 500669562,
703689723, 898025604, 1084280925, 1259870526, 1424144247, 1574191848,
1717288929, 1855896810, 1995944331, 2134165212, 2253485013, 2363996694,
2470839615, 2565902256, 2654091777, 2739309138, 2802869979, 2871941700,
2923598421, 2979326382, 3140008743),
length = c(248902921, 241766641,
198020161, 189335881, 181255321, 170589601, 159273721, 145047601,
138097081, 133607881, 135047521, 133220881, 114319801, 105511681,
101842921, 90062641, 83189521, 80217361, 58560841, 64071721,
46656721, 50727961, 155682361, 56827081)
)
##################################### Tumor Genes - General Ones #####################################
library(stringr)
tumorgenes_data = paste(ref_data,'cancer_gene_census.csv',sep='')
write(paste("Tumor genes data ",tumorgenes_data),stderr());
tumorgenes = fread(tumorgenes_data,key='Gene Symbol')
tumorgenes$chr=paste0('chr',str_replace(string=tumorgenes$`Genome Location`,pattern = ':.*',replacement = ''))
temp=str_replace(string=tumorgenes$`Genome Location`,pattern = '.*:',replacement = '')
tumorgenes$start=as.numeric(str_replace(string=temp,pattern = '.*-',replacement = ''))
tumorgenes$end=as.numeric(str_replace(string=temp,pattern = '-.*',replacement = ''))
tumorgenes$cumstart=NA; tumorgenes$cumend=NA
for (i in 1:nrow(chrsz)) {
ix <- tumorgenes$chr==chrsz$chr[i]
tumorgenes$cumstart[ix] <- tumorgenes$start[ix]+chrsz$starts[i]
tumorgenes$cumend[ix] <- tumorgenes$end[ix]+chrsz$starts[i]
}
##################################### Tumor Genes - Locals: these are usually cancer-specific extensions #####################################
local_tumorgenes_data = paste(ref_data,'2018_gene_list_tere_ref.csv',sep='')
write(paste("Local tumor genes data ",local_tumorgenes_data),stderr());
local_tumorgenes=fread(local_tumorgenes_data,key='Gene')[Gene!='',1:2]
##################################### Create Tiers #####################################
alltumorgenes=unique(c(tumorgenes$`Gene Symbol`,local_tumorgenes$Gene))
alltier1=union(tumorgenes[Tier==1,`Gene Symbol`],local_tumorgenes[`Tier 1 and 2 for pediatric cancers final`==1,Gene])
alltier2=union(tumorgenes[Tier==2,`Gene Symbol`],local_tumorgenes[`Tier 1 and 2 for pediatric cancers final`==2,Gene])
##################################### ControlFREEC #####################################
loadMantaTumor <- function(manta_tumor_file) {
suppressWarnings(suppressMessages(library(VariantAnnotation)))
# get SweGen AFs: TODO make it optional
swegen_manta_all=fread(paste0(ref_data,'swegen_sv_counts.csv'),key='name')
write("Number of entries in SweGen SV swegen_manta_all",stderr())
write(str(dim(swegen_manta_all)),stderr())
# first collect PASS ids from all samples
allpass=NULL
if (length(manta_tumor_file)>0) for (s in 1:length(manta_tumor_file)) {
vcf=readVcf(file = manta_tumor_file[s],genome = reference_genome) # @TODO save the VCF object, we want to read only once
write(paste("Number of VCF entries:",dim(vcf)),stdout())
# for entries with PASS flag it as TRUE
pass=rowRanges(vcf)$FILTER=='PASS'
write(paste("Number of PASS-ed calls:",sum(pass)),stdout())
allpass=c(allpass,names(vcf)[pass])
}
write(paste("Number of ALL PASS-ed Manta calls to process:",summary(allpass)),stdout())
# then collect variants...
manta_tumor_table=NULL
if (length(manta_tumor_file)>0) for (s in 1:length(manta_tumor_file)) {
sample=strsplit(basename(manta_tumor_file[s]),'[.]')[[1]][1]
cat('Parsing tumor file\n')
cat(manta_tumor_file[s],'\n')
vcf=readVcf(file = manta_tumor_file[s],genome = reference_genome)
vcf=vcf[names(vcf) %in% allpass]
if (length(vcf)>0) {
g=geno(vcf)
inf=info(vcf)
rr=rowRanges(vcf)
# Read counts
srtable=as.data.table(g$SR)
colnames(srtable)=paste0('SplitReadSupport_',c('N','T')) # Verified on a test sample
prtable=as.data.table(g$PR)
colnames(prtable)=paste0('PairedReadSupport_',c('N','T'))
# Calculate allele ratio
temp=data.table(
pr.ref=sapply(prtable$PairedReadSupport_T,"[",1),
sr.ref=sapply(srtable$SplitReadSupport_T,"[",1),
pr.alt=sapply(prtable$PairedReadSupport_T,"[",2),
sr.alt=sapply(srtable$SplitReadSupport_T,"[",2)
)
AFreq=apply(temp[,3:4],1,sum,na.rm=T)/
(apply(temp[,3:4],1,sum,na.rm=T)+apply(temp[,1:2],1,sum,na.rm=T))
# make table of variants
sv_table=cbind(
data.table(ID=as.character(names(vcf)),
sample,
chr=as.character(seqnames(rr))),
as.data.table(rr)[,-1],
AFreq,prtable,srtable,
data.table(Swegen_count=rep(NA,length(vcf)),
rank_score=0,
rank_terms='',
LOH=''
),
as.data.table(inf)
)
sv_table$cumstart=sv_table$start
sv_table$cumend=sv_table$end
sv_table$altchr=sv_table$chr
sv_table$altpos=sv_table$END
sv_table$plot=F
sv_table$arc= -1
# now we have a table of SVs with all the data
# Filter out variants seen 2+ (?) times in reference data
cat("Now get rid of duplicates ...\n")
## Key has only chr,start,end
key=sv_table[,c('chr','start','end')]
key$chr=substr(key$chr,4,6)
key$imprecise='(pr)'
## If imprecise, round the pos to 10
ix=sv_table$IMPRECISE==T
key$imprecise[ix]='(impr)'
key$start[ix]=round(key$start[ix]/10)*10
key$end[ix]=round(key$end[ix]/10)*10
key=paste(key$chr,key$start,key$end,key$imprecise)
# put in Swegen count
sv_table$Swegen_count=swegen_manta_all[key,value]
sv_table$Swegen_count[is.na(sv_table$Swegen_count)]=0
## do the filter
sv_table <- sv_table[Swegen_count<2]
}
cat("Collected SVs so far:\n ")
cat(paste(sum(sv_table$SVTYPE=='INV'),"Inversions\n"))
cat(paste(sum(sv_table$SVTYPE=='DEL'),"Deletions\n"))
cat(paste(sum(sv_table$SVTYPE=='INS'),"Inserts\n"))
cat(paste(sum(sv_table$SVTYPE=='BND'),"Translocations\n"))
cat(paste(sum(sv_table$SVTYPE=='DUP'),"Duplications\n"))
if (exists('sv_table')) if (nrow(sv_table)>0) {
# loop through all and extract endpoint chr and pos <------ TODO: This one must be remade.. (refactor)
for (i in 1:nrow(sv_table)) try( { # <---- sometimes error here, so try..
t=strsplit(x = sv_table$ALT[[i]],split = ':')[[1]]
if (length(t)>1 & t[1]!="<DUP") {
tchr=str_extract(t[1],'[0-9,X,Y]*$')
sv_table$altchr[i] <- paste0('chr',tchr) #else sv_table$altchr[i] <- tchr for non-BTB thingy
tt=str_extract(t[2],'^[0-9]*')
sv_table$altpos[i]=as.numeric(tt)
}
}, silent=T)
# for each chromosome get cumulative pos
sv_table$altcumpos=sv_table$altpos
for (i in 1:nrow(chrsz)) {
ix=sv_table$chr==chrsz$chr[i]
if (sum(ix)>0) {
sv_table$cumstart[ix]=sv_table$start[ix]+chrsz$starts[i]
sv_table$cumend[ix]=sv_table$end[ix]+chrsz$starts[i]
}
ix=sv_table$altchr==chrsz$chr[i]
if (sum(ix)>0) {
sv_table$altcumpos[ix]=sv_table$altpos[ix]+chrsz$starts[i]
}
}
# decide how it is to be plotted (not represented elsewhere, up or down arc)
for (i in 1:nrow(sv_table)) {
if (sv_table$chr[i]==sv_table$altchr[i]) {
# intrachromosomal: plot always, with "positive" arc
sv_table$plot[i]=T
sv_table$arc[i]=1
} else if (sv_table$altcumpos[i] > sv_table$cumstart[i]) {
# interchromosomal: plot if mate is to right (else mate will be plotted) with negative arc
sv_table$plot[i]=T
sv_table$arc[i]=-1
}
}
# snpEff annotation is in the ANN column.
h=strsplit(info(header(vcf))['ANN',][,3],'annotations: \'')[[1]][2]
snpEff_header=trimws(strsplit(h,'\\|')[[1]])
# snpEff annotation is put in snpEff_table
snpEff_table=matrix(data = NA,nrow = length(unlist(sv_table$ANN)),ncol = length(snpEff_header)+1)
colnames(snpEff_table)=c('ID',snpEff_header)
row=1
for (i in 1:nrow(sv_table)) { # for each variant
# for each VEP annotation:
for (j in 1:length(sv_table$ANN[[i]])) if (length(sv_table$ANN[[i]])>0) {
line=strsplit(sv_table$ANN[[i]][j],'\\|')[[1]]
snpEff_table[row,1]=sv_table$ID[i]
snpEff_table[row,1+(1:length(line))]=line
row=row+1
}
}
snpEff_table=unique(as.data.table(snpEff_table))
# Filter out annotations of certain types where the ID has >N annotations of that type
ids=unique(snpEff_table$ID)
for (id in ids) {
common=c('protein_protein_contact', 'duplication', 'structural_interaction_variant', 'inversion',
'transcript_ablation', 'feature_ablation', 'sequence_feature',
'intergenic_region', 'downstream_gene_variant', 'upstream_gene_variant')
annotations=snpEff_table[ID==id,Annotation] # the annotations of this variant
table=table(annotations[annotations %in% common])
table=table[table>20] # the common annotations that appear >N times for this variant
if (length(table)>0) {
remove=which(snpEff_table$ID==id & snpEff_table$Annotation %in% names(table))
snpEff_table=snpEff_table[-remove[-1],] # saves one to make sure the variant has some annotation
}
}
# Add to data (all samples, one table)
manta_tumor_table=rbind(manta_tumor_table,merge(sv_table,snpEff_table,by='ID',all=F))
setkey(manta_tumor_table,'sample')
}
}# done parsing each sample
cat(paste("Parsing done, collected ",length(manta_tumor_table),"entries for further processing\n"))
# Prepare ranking and (some more) filtering
if (!is.null(manta_tumor_table)) if (nrow(manta_tumor_table)>0) {
# ## Make table with most important info for ranking, and report
# selected <- unique(manta_tumor_table[,.(ID,sample,SVTYPE,chr,start,REF,ALT,AFreq,PairedReadSupport_T,SplitReadSupport_T,
# Swegen_count,Rank_score='',Rank_terms='',Gene_Name,Annotation,Annotation_Impact)])
selection=manta_tumor_table[!is.na(Annotation)]
if (nrow(selection)>0) {
# Known cancer genes affect ranking by +1
cat("Increase rank by being a known cancer gene\n")
for (gene in unique(selection$Gene_Name)) if (!is.na(gene)) if (gene!='') {
ix=selection$Gene_Name==gene
gene=sort(strsplit(gene,'&')[[1]])
# cosmic/local Tier2:
if (any(gene %in% alltier2)) {
selection$rank_score[ix]=2
selection$rank_terms[ix]='T2_gene'
}
# tier 1 top priority:
if (any(gene %in% alltier1)) {
selection$rank_score[ix]=2
selection$rank_terms[ix]='T1_gene'
}
}
cat("Add high impact\n")
ix=selection$Annotation_Impact=='HIGH'
if (any(ix)) {
selection$rank_score[ix]=selection$rank_score[ix]+2
selection$rank_terms[ix]=paste(selection$rank_terms[ix],'high_impact')
}
cat("Add moderate impact\n")
ix=selection$Annotation_Impact=='MODERATE'
if (any(ix)) {
selection$rank_score[ix]=selection$rank_score[ix]+1
selection$rank_terms[ix]=paste(selection$rank_terms[ix],'moderate_impact')
}
cat(" +1 for focal\n")
ix=selection$chr==selection$altchr & selection$end-selection$start < 3e6 & selection$altpos-selection$start < 3e6
if (any(ix)) {
selection$rank_score[ix]=selection$rank_score[ix]+1
selection$rank_terms[ix]=paste(selection$rank_terms[ix],'focal')
}
cat("cosmic_>xx, fusion_gene or just fusion\n")
ix=grep('fusion',selection$Annotation)
if (length(ix)>0) for (i in ix) if (selection$Gene_Name[i]!='') {
gene=sort(strsplit(selection$Gene_Name[i],'&')[[1]])
if (paste(gene,collapse = ' ') %in% cosmic_fusions[value>5,name]) {
selection$rank_score[i]=selection$rank_score[i]+3
selection$rank_terms[i]=paste(selection$rank_terms[i],'cosmic_>5')
} else if (paste(gene,collapse = ' ') %in% cosmic_fusions$name) {
selection$rank_score[i]=selection$rank_score[i]+2
selection$rank_terms[i]=paste(selection$rank_terms[i],'cosmic_>1')
}
if (paste(gene,collapse = ' ') %in% allfusionpairs) {
selection$rank_score[i]=selection$rank_score[i]+2
selection$rank_terms[i]=paste(selection$rank_terms[i],'CGC_fusion')
} else if (any(gene %in% allfusion)) {
selection$rank_score[i]=selection$rank_score[i]+1
selection$rank_terms[i]=paste(selection$rank_terms[i],'partial_CGC_fusion')
} else {
selection$rank_score[i]=selection$rank_score[i]+0
selection$rank_terms[i]=paste(selection$rank_terms[i],'fusion')
}
}
cat("-1 for ablation if long del\n")
ix=intersect(
which(selection$SVTYPE=='DEL' & selection$chr==selection$altchr & abs(selection$altpos-selection$start) > 3e6),
grep('ablation',selection$Annotation)
)
if (length(ix)>0) {
selection$rank_score[ix]=selection$rank_score[ix]-1
selection$rank_terms[ix]=paste(selection$rank_terms[ix],'long_del')
}
cat("-1 for duplication if long dup\n")
ix=intersect(
which(selection$SVTYPE=='DUP' & selection$chr==selection$altchr & abs(selection$altpos-selection$start) > 10e6),
grep('duplication',selection$Annotation)
)
if (length(ix)>0) {
selection$rank_score[ix]=selection$rank_score[ix]-1
selection$rank_terms[ix]=paste(selection$rank_terms[ix],'long_dup')
}
firstcols=c('ID','sample','Gene_Name','rank_score','rank_terms','LOH','AFreq','Annotation','Annotation_Impact','Swegen_count')
cols=colnames(selection)
setcolorder(x = selection,neworder = c(firstcols,cols[!cols %in% firstcols]))
manta_tumor_selected <- selection[order(Feature_ID)][order(rank_score,decreasing = T)]
cat("Selected Manta SVs:")
cat(length(manta_tumor_selected))
#write(str(head(manta_tumor_selected)),stderr())
outfile <- paste0(sampleData$name,'_manta_tumor.csv')
# exclude ANN and Gene_ID
manta_tumor_selected_ranked <- subset(manta_tumor_selected[,-c('ANN','Gene_ID')], rank_score > 1)
#manta_tumor_selected_ranked <- subset(manta_tumor_selected, rank_score > 1)
#View(head(manta_tumor_selected_ranked))
#a <- readLines("stdin",n=1);
browser()
write(paste0(" Writing to ",outfile),stdout())
#write.table(manta_tumor_selected_ranked,file=outfile,quote=TRUE,sep=",",na="NA")
#write.csv(manta_tumor_selected_ranked,file=outfile,na="")
fwrite(manta_tumor_selected[,-c('ANN','Gene_ID')][rank_score>1],file=outfile)
write(paste0(" *** Manta results written to ",outfile),stdout())
#tableWrapper(manta_tumor_selected[,-c('ANN','Gene_ID')][rank_score>1])
}
}
}
tic("Manta")
write("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",stderr());
write("x Manta x",stderr());
write("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",stderr());
manta_tumor_file <- grep(pattern = ".*VEP\\.CADD.*Manta_.*vs.*somaticSV.*ann.vcf$",files,value = T)
manta_normal_file <- grep(pattern = ".*VEP\\.CADD.*Manta_.*vs.*diploidSV.*ann.vcf$",files,value = T)[1]
cosmic_fusions = fread(paste0(ref_data,'cosmic_fusions_table.csv'),key = 'name')
swegen_manta_all=fread(paste0(ref_data,'swegen_sv_counts.csv'),key='name')
write("Files for Manta structural variants: ", stdout())
write(paste("manta_tumor_file: ",manta_tumor_file), stdout())
write(paste("manta_normal_file: ",manta_normal_file), stdout())
#printList( list(manta_tumor_file,manta_normal_file) )
# TODO: sort it out to be local or a general on-demand object
allfusionpairs=NULL
allfusion=tumorgenes[grep('fusion',`Role in Cancer`),`Gene Symbol`]
for (i in 1:length(allfusion)) {
t=trimws(strsplit(tumorgenes[allfusion[i],`Translocation Partner`],', ')[[1]])
if (length(t)>0)
for (j in 1:length(t))
allfusionpairs=c(allfusionpairs,paste(sort(c(allfusion[i],t[j])),collapse = ' '))
}
toc()
tic("Database read")
write("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",stderr());
write("x Reading databases for SNV and small indel prioritisation x",stderr());
write("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",stderr());
# These are also needed for Strelka
write("Reading COSMIC tables ...",stdout())
cosmic_fusions = fread(paste0(ref_data,'cosmic_fusions_table.csv'),key = 'name')
toc()
write("Starting Manta tumor processing",stdout())
loadMantaTumor(manta_tumor_file)
#loadMantaNormal(manta_normal_file)
write("\nReady.",stdout())
|
71a224fe0fd9f315755176700e9b8f2ea8b886ed
|
0058c9840491f1f07e7288254529bc4112c6c311
|
/R/interactive_hourly.R
|
54b43a92dbfad4af1aadeaceb5fadc4e7ad3edcf
|
[] |
no_license
|
adamdsmith/MERL
|
d219579bcc750e1f86e62cdb8588e54bf857a776
|
2a2ddd260b189a352c0c00deaa7033a4929bb4ae
|
refs/heads/master
| 2021-07-01T18:36:53.764891
| 2017-09-22T19:58:10
| 2017-09-22T19:58:10
| 104,397,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,668
|
r
|
interactive_hourly.R
|
pacman::p_load(dplyr, dbplyr, leaflet, leaflet.extras,
htmlwidgets, htmltools, viridis, lubridate)
### Extract relevant detection data and join with active receivers
db <- list.files("./Data/", pattern = "*.motus$", full.names = TRUE)
merl <- src_sqlite(db) %>% tbl("alltags") %>%
select(tagProj, tagProjID, id, motusTagID, ts, sig, runLen,
freqsd, tagModel, spEN, depLat, depLon, recv, site,
lat, lon, ant, antType, antBearing) %>%
collect() %>%
filter(runLen > 2, freqsd < 0.1) %>%
mutate(ts = as.POSIXct(ts, origin = "1970-01-01", tz = "GMT"),
lat = case_when(
site == "PR3" ~ 30.4545,
site == "PR2" ~ 30.4298,
site == "CHDE" ~ 38.7703,
site == "SACH" ~ 41.4787,
site == "BISE" ~ 41.1532,
TRUE ~ lat),
lon = case_when(
site == "PR3" ~ -88.5857,
site == "PR2" ~ -88.5944,
site == "CHDE" ~ -75.0852,
site == "SACH" ~ -71.2438,
site == "BISE" ~ -71.5527,
TRUE ~ lon)) %>%
filter(!is.na(lat),
!is.na(site))
attr(merl$ts, "tzone") <- "America/New_York"
### Get deployment data
merl_dep <- read.csv("./Data/deployments.csv",
stringsAsFactors = FALSE) %>%
mutate(id = as.character(id),
depDate = ymd_hm(paste(depDate, depTime)),
xmtr_mb = round(xmtr_wt/wt * 100, 1)) %>%
select(id, sex, age, depDate, att_type, xmtr_mb)
merl <- left_join(merl, merl_dep, by = "id")
merl_det_hist <- merl %>%
mutate(ts_h = round_date(ts, "hours")) %>%
group_by(id, recv, site, ts_h) %>%
arrange(-sig) %>%
filter(row_number() == 1) %>%
mutate(since_rel = as.numeric(difftime(min(ts_h), min(depDate), units = "days"))) %>%
arrange(lat) %>%
ungroup() %>%
mutate(site = factor(site, unique(site)),
label = paste(paste("MERL", id), sex, age, tagModel, sep = ", ")) %>%
arrange(id, ts_h, lat)
elapsedPal <- colorNumeric(palette = viridis(24), domain = merl_det_hist$since_rel)
# Set up separate overlays/colors by tag id
tags <- unique(merl_det_hist$id)
p <- leaflet() %>%
# Base map group
addTiles("http://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}",
group = "Terrain") %>%
addTiles("http://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
group = "Aerial")
for (tag in tags) {
d <- merl_det_hist[merl_det_hist$id == tag, ]
p <- p %>% addCircleMarkers(data = d, lng = ~lon, lat = ~lat,
fillColor = ~elapsedPal(since_rel),
fillOpacity = 1, radius = 10,
color = "black", weight = 1, opacity = 1,
popup = ~paste(paste("MERL:", id),
paste(age, sex),
paste("Tag model:", tagModel),
paste("Site:", site),
paste("First detection:", ts_h),
sep = "<br/>"),
group = as.character(tag)) %>%
hideGroup(tag)
}
p <- p %>% addLayersControl(baseGroups = c("Terrain", "Aerial"),
overlayGroups = tags,
options = layersControlOptions(collapsed = FALSE)) %>%
addLegend("topleft", pal = elapsedPal, values = merl_det_hist$since_rel,
title = "Elapsed days",
opacity = 1) %>%
moveZoomControl()
htmlwidgets::saveWidget(p, file = "MERL_detection_summary.html")
|
f91675c1b43fe1694ce9125b0f165a007563933a
|
32b0b6fbfcd12b206d1433effa8c1db75f4e2235
|
/main_code/delay_report.R
|
ab6e6b91d4107f064c7818ee7e6713ef100103f7
|
[
"Apache-2.0"
] |
permissive
|
wxl1379457192/NPIs_code
|
5854db921d29e336e6eef50ebab26b388c9bed94
|
175efdd814298809b0d37b002d257f677f0a8fc5
|
refs/heads/main
| 2023-06-27T17:59:08.224720
| 2021-08-02T14:28:34
| 2021-08-02T14:28:34
| 353,044,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,593
|
r
|
delay_report.R
|
Reported_delay <-function(COVID_D1,Policy_P1)
{
############# read epidemic and policy data ################
D1 <- read.csv(COVID_D1, stringsAsFactors = FALSE)
P1 <- read.csv(Policy_P1, stringsAsFactors = FALSE)
#D1<-read.csv(url(COVID_D1))
Con_list <-intersect(D1$iso_code, P1$CountryCode)
D1[is.na(D1)]<-0
D1$date <- as.Date(D1$date)
dd<-split(D1,D1$iso_code)
DD_list<-do.call(rbind,lapply(Con_list,function(x){
v<-dd[[x]]
v$New_cases_delay<-0
for(i in 1:nrow(v)){
if(v$new_cases[i]>=0){
mean<-rnorm(1,10.92,0.94)
sd<-rnorm(1,5.41,0.27)
x1 <- round(rnbinom(v$new_cases[i],mu=mean,size=sd))
t<-as.data.frame(table(x1))
if(nrow(t>0)){
for (n in 1:nrow(t)){
date <- v$date[i]-as.numeric(paste(t$x1))[n]
if (nrow(subset(v,date==date))>0){
v$New_cases_delay[v$date==date]<-
v$New_cases_delay[v$date==date]+t$Freq[n]}
}
}
}else{
mean<-rnorm(1,10.92,0.94)
sd<-rnorm(1,5.41,0.27)
x1 <- round(rnbinom(-v$new_cases[i],mu=mean,size=sd))
t<-as.data.frame(table(x1))
if(nrow(t>0)){
for (n in 1:nrow(t)){
date <- v$date[i]-as.numeric(paste(t$x1))[n]
if (nrow(subset(v,date==date))>0){
v$New_cases_delay[v$date==date]<-
v$New_cases_delay[v$date==date]-t$Freq[n]}
}
}
}
}
print(paste(v$iso_code[1],"has been finished"))
return(v)
}))
return(DD_list)
}
|
62ee74d1f40d15bd556cf05a4f4efc9bae5b98b3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dataone/examples/listObjects.Rd.R
|
9287e44140a56d11f89042b556295bcfa86f5ffc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
listObjects.Rd.R
|
library(dataone)
### Name: listObjects
### Title: Retrieve the list of objects that match the search parameters
### Aliases: listObjects listObjects,D1Node-method
### ** Examples
library(dataone)
cn <- CNode("STAGING")
fromDate <- "2013-01-01T01:01:01.000+00:00"
toDate <- "2015-12-31T01:01:01.000+00:00"
formatId <- "eml://ecoinformatics.org/eml-2.1.0"
start <- 0
count <- 5
objects <- listObjects(cn, fromDate=fromDate, toDate=toDate,
formatId=formatId, start=start, count=count)
# Inspect id of first object
objects[1]$objectInfo$identifier
|
225e4e667367c4e82cc24f76e91fbf88760c23e0
|
e05524a39391ee68ac529178e1111e4092e5dcd0
|
/ITWILL_Semiproject/multi_lm.R
|
f9fb62d8ce5af1b02f36f540705fe0002d1682c1
|
[] |
no_license
|
DominKim/R_Data_Anaylsis
|
28d9b7c38777bcb06826d3e34b1e0bff2bc45954
|
6fd207015fe6e3c814e48ca857ac721fad8f9ab3
|
refs/heads/master
| 2022-04-25T02:30:20.818057
| 2020-04-28T11:34:09
| 2020-04-28T11:34:09
| 245,743,926
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,032
|
r
|
multi_lm.R
|
######################################################################
if(!require("cvTools")) install.packages("cvTools"); library(cvTools)
if(!require("car")) install.packages("car"); library(car)
if(!require("corrplot")) install.packages("corrplot"); library(corrplot)
if(!require("dplyr")) install.packages("dplyr"); library(dplyr)
#######################################################################
# 주가 데이터 Input
stock <- read.csv("nor_stock.csv", stringsAsFactors = F, encoding = "euc-kr")
str(stock)
dim(data)
data <- stock[-c(1:4)]
# train / test set 설정
idx <- sample(nrow(data), nrow(data)*0.7)
train <- data[idx,]
test <- data[-idx,]
# 다중회귀분석 + 교차검정 필요
multi_model <- lm(주가 ~ ., train)
summary(multi_model)
# Multiple R-squared: 0.2196, Adjusted R-squared: 0.2149
# 모델평가
multi_pred <- predict(multi_model, test)
cor(test$주가, multi_pred) # 0.5578614
mse <- test$주가 - multi_pred
mean(mse^2) # 0.002975705
# 유의도 > 0.05 변수 추출
novalue <- character()
novalue_num <- numeric()
cnt <- 1
for (i in 1:length(summary(multi_model)$coefficients[,4])) {
if(summary(multi_model)$coefficients[,4][i] > 0.05){
novalue[cnt] <- names(summary(multi_model)$coefficients[,4][i])
novalue_num[cnt] <- summary(multi_model)$coefficients[,4][i]
cnt <- cnt + 1
}
}
paste(novalue, collapse = ",-")
# "(Intercept) 당기순익 영업흐름 BPS 매출증가 지배ROE" 변수들은 주가를 예측하는데 유의하지 못한 변수들이다.
# 그러므로 이러한 변수들을 제거하고 다중회귀분석을 다시 돌려볼 필요가 있다.
# 변수 제거
names(data)
re_data <- data %>% select(-당기순익,-영업흐름,-BPS,-매출증가,-지배ROE)
str(re_data)
# train /test set 재설정
re_train <- re_data[idx,]
re_test <- re_data[-idx,]
# 다중회귀분석
re_multi_model <- lm(주가 ~ ., re_train)
summary(re_multi_model)
# Multiple R-squared: 0.2169, Adjusted R-squared: 0.2141
# 모델평가
re_multi_pred <- predict(re_multi_model, re_test)
cor(re_test$주가, re_multi_pred) # 0.551524
mse <- re_test$주가 - re_multi_pred
mean(mse^2) # 0.003018186
# 오히려 설명력은 떨어지고 예측력은 거의 비슷하다. > 공선성을 확인해서 변수제거를 시도해 볼 필요가 있다.
# 다중공선성(Multicolinearity)확인
novar <- character()
cnt <- 1
for (i in 1:length(names(vif(multi_model)))) {
if (sqrt(vif(multi_model))[i] > 2){
novar[cnt] <- names(vif(multi_model))[i]
cnt <- cnt + 1
}
}
paste(novar, collapse = " ,-")
# "자본총계 매출액 영업이익 당기순익 영익률 매출이익" 변수들이 실제로 높은 공선성을 보인다.
# 상관분석을 통해 상관성이 높은 변수 둘 중 중요도가 낮은 변수 제거
corrplot(cor(data[-1]), method = "number", type = "upper", diag = F)
# 상관분석을 통해 서로 높은 상관도를 보이는 변수들은 영업이익 당기순익(0.98) 자본총계 매출액(0.92)
# 이다. 그리고, 자본총계 같은 경우에는 공선성이 높은 변수들 중 4개랑 높은 양의 상관성을 보이므로
# 필히 제거가 필요하다.
# 공선성 높은 변수제거 자본총계
re_data <- data %>% select(-자본총계)
# train /test set 재설정
re_train <- re_data[idx,]
re_test <- re_data[-idx,]
# 다중회귀분석
re_multi_model <- lm(주가 ~ ., re_train)
summary(re_multi_model)
# Multiple R-squared: 0.2117, Adjusted R-squared: 0.2073
# 모델평가
re_multi_pred <- predict(re_multi_model, re_test)
cor(re_test$주가, re_multi_pred) # 0.5406467
mse <- re_test$주가 - re_multi_pred
mean(mse^2) # 0.003044309
# 자본총계만 제거시에는 오히려 낮은 예측력과 설명력을 보임
# 공선성 높은 변수제거 자본총계, 매출액
re_data <- data %>% select(-자본총계, -매출액)
# train /test set 재설정
re_train <- re_data[idx,]
re_test <- re_data[-idx,]
# 다중회귀분석
re_multi_model <- lm(주가 ~ ., re_train)
summary(re_multi_model)
# Multiple R-squared: 0.2046, Adjusted R-squared: 0.2006
# 모델평가
re_multi_pred <- predict(re_multi_model, re_test)
cor(re_test$주가, re_multi_pred) # 0.5557501
mse <- re_test$주가 - re_multi_pred
mean(mse^2) # 0.002937004
# 위와 마찬가지로 오히려 예측력과 설명력이 떨어짐
# 공선성 높은 변수제거 자본총계, 매출액, 영업이익
re_data <- data %>% select(-자본총계, -매출액, -영업이익)
# train /test set 재설정
re_train <- re_data[idx,]
re_test <- re_data[-idx,]
# 다중회귀분석
re_multi_model <- lm(주가 ~ ., re_train)
summary(re_multi_model)
# Multiple R-squared: 0.2006, Adjusted R-squared: 0.197
# 모델평가
re_multi_pred <- predict(re_multi_model, re_test)
cor(re_test$주가, re_multi_pred) # 0.5525958
mse <- re_test$주가 - re_multi_pred
mean(mse^2) # 0.002928888
# 공선성 높은 변수제거 자본총계, 매출액, 영업이익, 당기순익
re_data <- data %>% select(-자본총계, -매출액, -영업이익, -당기순익)
# train /test set 재설정
re_train <- re_data[idx,]
re_test <- re_data[-idx,]
# 다중회귀분석
re_multi_model <- lm(주가 ~ ., re_train)
summary(re_multi_model)
# Multiple R-squared: 0.1636, Adjusted R-squared: 0.1603
# 모델평가
re_multi_pred <- predict(re_multi_model, re_test)
cor(re_test$주가, re_multi_pred) # 0.4672997
mse <- re_test$주가 - re_multi_pred
mean(mse^2) # 0.00338315
# 공선성이 높은 변수들을 제거 했음에도 불구하고 오히려 설명력과 예측력이 떨어지므로
# 다른 알고리즘을 사용하는게 주가 예측모델에 더 적합하다.
# 다중회귀분석 교차검정
# k = 3 : d1 =50, d2 = 50, d3 = 50
cross <- cvFolds(n = nrow(data), K = 3, R = 2, type = "random")
cross # Fold : dataset Index : rownum
str(cross)
K <- 1:3 # k겹
R <- 1:2 # set
ACC <- numeric(); ACC2 <- numeric(); ACC3 <- numeric()
cnt <- 1
for (r in R) { # set = 열 index(2회)
cat("R = ", r, "\n")
for (k in K) { # k겹 = 행 index(3회)
idx <- cross$subsets[cross$which == k, r]
# cat("K = ", k, "\n")
# print(idx)
kmulti_test <- data[idx, ] # 검정용(50)
kmulti_train <- data[-idx, ] # 훈련용(100)
kmulti_model <- lm(주가 ~ ., kmulti_train)
kmulti_pred <- predict(kmulti_model, kmulti_test)
ACC[cnt] <- cor(kmulti_test$주가, kmulti_pred)
ACC2[cnt] <- mean((kmulti_test$주가 - kmulti_pred)^2)
ACC3[cnt] <- summary(kmulti_model)$adj.r.squared
cnt <- cnt + 1 # 카운터
}
}
mean(ACC) # 0.385987 상관성
mean(ACC2) # 0.007918064 mse
mean(ACC3) # 0.2883298 설명력
# 다중회귀 모델을 교차검정한 결과도 상관성과 설명력은 0.4 미만이므로 다른 알고리즘을 채택해서 예측을
# 해볼필요가 있다.
|
49850f12f69431bafc76d7f8f0d070592947d23d
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/prcbench/tests/testthat/test_tool_pm.R
|
ff1372994fcc4f3649e179e47f7821a86eeba246
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,448
|
r
|
test_tool_pm.R
|
context("Tool: PerfMeas")
# Test ToolPerfMeas
# create_toolset
#
test_that("ToolPerfMeas - R6ClassGenerator", {
expect_true(is(ToolPerfMeas, "R6ClassGenerator"))
expect_equal(attr(ToolPerfMeas, "name"), "ToolPerfMeas_generator")
expect_equal(grep("PerfMeas",
body(ToolPerfMeas$private_methods$f_wrapper))[[1]], 2)
})
test_that("ToolPerfMeas - R6", {
toolset <- ToolPerfMeas$new()
expect_true(is(toolset, "ToolPerfMeas"))
expect_true(is(toolset, "ToolIFBase"))
expect_true(is(toolset, "R6"))
})
test_that("create_toolset", {
toolset1 <- create_toolset("PERF")[[1]]
expect_true(is(toolset1, "ToolPerfMeas"))
expect_equal(toolset1$get_toolname(), "PerfMeas")
toolset2 <- create_toolset("perf")[[1]]
expect_true(is(toolset2, "ToolPerfMeas"))
expect_equal(toolset2$get_toolname(), "PerfMeas")
})
test_that("create_toolset: calc_auc", {
toolset1 <- create_toolset("PerfMeas")[[1]]
expect_equal(environment(toolset1$clone)$private$def_calc_auc, TRUE)
toolset2 <- create_toolset("PerfMeas", calc_auc = FALSE)[[1]]
expect_equal(environment(toolset2$clone)$private$def_calc_auc, FALSE)
})
test_that("create_toolset: store_res", {
toolset1 <- create_toolset("PerfMeas")[[1]]
expect_equal(environment(toolset1$clone)$private$def_store_res, TRUE)
toolset2 <- create_toolset("PerfMeas", store_res = FALSE)[[1]]
expect_equal(environment(toolset2$clone)$private$def_store_res, FALSE)
})
|
e8852acaf4f6942a9f40713a47612fd87e93ed21
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleanalyticsv3.auto/man/McfData.query.Rd
|
3589eff951f631fe7c65518c0f88e7af53feebb3
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,271
|
rd
|
McfData.query.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_objects.R
\name{McfData.query}
\alias{McfData.query}
\title{McfData.query Object}
\usage{
McfData.query(dimensions = NULL, end.date = NULL, filters = NULL,
ids = NULL, max.results = NULL, metrics = NULL, samplingLevel = NULL,
segment = NULL, sort = NULL, start.date = NULL, start.index = NULL)
}
\arguments{
\item{dimensions}{List of analytics dimensions}
\item{end.date}{End date}
\item{filters}{Comma-separated list of dimension or metric filters}
\item{ids}{Unique table ID}
\item{max.results}{Maximum results per page}
\item{metrics}{List of analytics metrics}
\item{samplingLevel}{Desired sampling level}
\item{segment}{Analytics advanced segment}
\item{sort}{List of dimensions or metrics based on which Analytics data is sorted}
\item{start.date}{Start date}
\item{start.index}{Start index}
}
\value{
McfData.query object
}
\description{
McfData.query Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Analytics data request query parameters.
}
\seealso{
Other McfData functions: \code{\link{McfData.columnHeaders}},
\code{\link{McfData.profileInfo}},
\code{\link{McfData.totalsForAllResults}},
\code{\link{McfData}}
}
|
176697a3a089d64ba9e71f3c38dbf1b652ef91ab
|
97c60175123108718c5cc1a767b7eb0d9df714db
|
/05-Gap-Filling/03_plot_model_Rsq_with_diff_3_day_ave_weights.R
|
1621deb77a89707afd465116e98d0da6c02940dd
|
[
"Apache-2.0"
] |
permissive
|
gioscript/Missing-Drainage-Data
|
12b170311b957a9280890ce8041274f5edbbfb3d
|
25330b00161aaf0c7b1ac195bc4e92f9e4c13307
|
refs/heads/master
| 2020-04-04T12:30:16.493702
| 2020-02-28T22:49:26
| 2020-02-28T22:49:26
| 155,928,632
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,799
|
r
|
03_plot_model_Rsq_with_diff_3_day_ave_weights.R
|
# this peice of code is used to find the best weights for calculating 3-day moving average precipitation
# based on visual results it looks that:
# a. non-weighted (i.e. equally weighted) average performs better than 10-30-60
# b. improvments over non-weighted average was achieved by giving more weight to the seond day precip (e.g. 25-45-30)
# function to fit linear model
reg_model <- function(df) {
lm(flow ~ rain_3 - 1, data = df)
}
reg_model_weighted <- function(df) {
lm(flow ~ rain_3_weighted - 1, data = df)
}
rain_limit = 0.45
# function to calculate 3-day weighted rolling/moving average
rollmean.weighted = function(x, W1 = .25, W2 = .45, W3 = .30) {
if (sum(is.na(x)) == 3) {
NA
} else {
replace(x[1], is.na(x[1]), 0) * W1 +
replace(x[2], is.na(x[2]), 0) * W2 +
replace(x[3], is.na(x[3]), 0) * W3
}
sum(x*c(W1, W2, W3))
}
# add 3-day moving ave precipitation
data <-
tile_flow %>%
# select only Free and Controlled drainage plots
filter(dwm %in% c("FD", "CD")) %>%
group_by(siteid, plotid, dwm) %>%
nest() %>%
# count number of plots per site
group_by(siteid) %>%
mutate(plot_count = n_distinct(plotid)) %>%
# find sites where dwm treatment was swapped
group_by(siteid, dwm) %>%
mutate(plot_count = n_distinct(plotid)) %>%
group_by(siteid) %>%
mutate(dwm_count = n_distinct(plotid)) %>%
# assign reps by taking into account plots with swapped treatment
mutate(rep = ifelse(plot_count == dwm_count, 1, NA),
rep = ifelse(dwm_count == 1, dwm_count, rep),
rep = ifelse(is.na(rep), 1:plot_count, rep)) %>%
select(-ends_with("count")) %>%
unnest() %>%
# add season
mutate(season = factor(quarter(date), labels = c("winter", "spring", "summer", "fall")),
rep = as.factor(rep)) %>%
select(siteid, plotid, dwm, rep, year, season , date, flow, rain = precip_on_site) %>%
# calculate limitting tile flow as mean daily summer flow
group_by(siteid, plotid, dwm, rep) %>%
mutate(min_flow = mean(flow[season == "summer"], na.rm = TRUE),
# correct limitting flow so it is not > 0.3 mm/day
min_flow = ifelse(min_flow > 0.3, 0.3, min_flow)) %>%
# calculate 3-day average precip
group_by(siteid, plotid) %>%
mutate(rain_3 = rollapplyr(rain, 3, mean, na.rm = TRUE, partial = TRUE),
# calculated weighted moving average precip
rain_3_weighted = rollapplyr(rain, width = 3, FUN = rollmean.weighted,
partial = FALSE, fill = NA),
# handle days which are out of the rolling range = first two recoreds in this case
rain_3_weighted = ifelse(is.na(rain_3_weighted), rain_3, rain_3_weighted),
# leave calculated 3-day avarage only for those days when it rained
# OR rain was negligible (see the plot above)
rain_3 = ifelse(rain > rain_limit, rain_3, 0),
rain_3_weighted = ifelse(rain > rain_limit, rain_3_weighted, 0)) %>%
group_by(siteid, plotid, dwm, rep) %>%
nest()
# Develop "Seasonal" Regression model -------------------------------------
# predicting peak flows caused by precipitation event
reg_models <-
data %>%
# filter data to be used for regression
mutate(reg_data = map(.x = data,
.f = ~ .x %>%
# remove days when there was no rain OR tile flow was below minimum limit
filter(flow > min_flow, rain_3 > 0))) %>%
# add season to nesting
unnest(reg_data) %>%
group_by(siteid, plotid, dwm, rep, season) %>%
nest(.key = "reg_data") %>%
# fit the model
mutate(reg_model = map(reg_data, reg_model),
reg_model_weighted = map(reg_data, reg_model_weighted))
# plot R2 values of two models agains each other
reg_models %>%
mutate(R_reg_model = reg_model %>% map(glance) %>% map_dbl("r.squared"),
R_wei_model = reg_model_weighted %>% map(glance) %>% map_dbl("r.squared"),
n_of_points = reg_data %>% map_dbl(nrow)) %>%
ggplot(aes(R_reg_model, R_wei_model)) +
geom_point(aes(col=siteid)) +
geom_smooth(method = "lm", se = FALSE) +
geom_abline(slope = 1, intercept = 0) +
facet_wrap(~ season)
# plot regression models
reg_models %>%
mutate(R_reg_model = reg_model %>% map(glance) %>% map_dbl("r.squared"),
R_wei_model = reg_model_weighted %>% map(glance) %>% map_dbl("r.squared"),
n_of_points = reg_data %>% map_dbl(nrow),
Predictions = reg_model_weighted %>% map(augment)) %>%
unnest(Predictions) %>%
filter(siteid %in% c("DPAC")) %>%
# filter(flow > 1 & rain_3_weighted > 1) %>%
ggplot(aes(x=rain_3_weighted, y=flow)) +
geom_point() +
geom_smooth(method = "lm", formula = y ~ I(x^2) - 1, se = FALSE, col = "green")+
geom_smooth(method = "lm", formula = y ~ sqrt(x) - 1, se = FALSE, col = "red")+
geom_smooth(method = "lm", formula = y ~ I(x^1.5) - 1, se = FALSE, col = "black")+
geom_smooth(method = "lm", formula = y ~ x - 1, se = FALSE) +
facet_grid(plotid ~ season, scales = "free_y") +
theme_bw()
# plot R2 values of two models agains each other
reg_models %>%
mutate(R_reg_model = reg_model %>% map(glance) %>% map_dbl("r.squared"),
R_wei_model = reg_model_weighted %>% map(glance) %>% map_dbl("r.squared"),
n_of_points = reg_data %>% map_dbl(nrow),
Predictions = reg_model_weighted %>% map(augment)) %>%
unnest(Predictions) %>%
filter(siteid %in% c("SERF_IA", "SERF_SD")) %>%
# filter(flow > 1 & rain_3_weighted > 1) %>%
ggplot(aes(x=flow)) +
geom_density(adjust = 5, fill = "orange") +
facet_grid(plotid ~ season, scales = "free_y") +
theme_bw()
|
7e05633cc57376e98d3d03650d738e1f64aca328
|
0ea96e7b4d16c6595415d657103f7a2391421706
|
/R/plot_functions.R
|
0e480a6e71d9019b228427b678e704634d985521
|
[
"MIT"
] |
permissive
|
dozmorovlab/multiHiCcompare
|
c913334612e9b6fc8807341d370f11715ed51da0
|
dcfe4aaa8eaef45e203f3d7f806232bb613d2c9b
|
refs/heads/master
| 2022-04-29T02:56:04.486050
| 2022-04-22T01:43:22
| 2022-04-22T01:43:22
| 144,622,761
| 4
| 7
|
NOASSERTION
| 2020-03-30T13:53:16
| 2018-08-13T19:10:22
|
R
|
UTF-8
|
R
| false
| false
| 9,059
|
r
|
plot_functions.R
|
#' Make MD plots for all combinations of a condition
#'
#' @importFrom graphics abline legend lines par persp
#' points smoothScatter axis
#' @param hicexp A hicexp object.
#' @param prow The number of rows to use for the
#' grid of MD plots. Defaults to 3.
#' @param pcol The number of columns to use for
#' the grid of MD plots. Defaults to 3.
#' @param plot.chr A specific chromosome or
#' set of chromosome which you want to plot.
#' This should be a numeric value, i.e. to
#' plot chromosome 1 set plot.chr = 1, to
#' plot chromosomes 1 and 5 set plot.chr
#' = c(1, 5). Defaults to NA indicating that
#' all chromosomes present in the hicexp
#' will be plotted.
#' @param plot.loess Logical, should a loess curve
#' be plotted over the MD plots. Note setting
#' this to TRUE will increase the computational
#' time for plotting.
#'
#' @return A set of MD plots.
#'
#'
#' @export
#' @examples
#' data("hicexp2")
#' MD_hicexp(hicexp2)
MD_hicexp <- function(hicexp, prow = 3, pcol = 3, plot.chr = NA, plot.loess = FALSE) {
# check if more than one chromosome then split
if (length(unique(hic_table(hicexp)$chr)) > 1) {
chr_table <- split(hic_table(hicexp), hic_table(hicexp)$chr)
# check if chr to plot is specified
if (!is.na(plot.chr[1])) {
chrs.to.plot <- which(as.numeric(names(chr_table)) %in% plot.chr)
if (length(chrs.to.plot) < 1) {
stop("Chr selected in plot.chr does not exist in the data")
}
tmp <- lapply(chr_table[chrs.to.plot], .MD.hicexp.chr, prow = prow,
pcol = pcol, plot.loess = plot.loess)
} else {
# otherwise plot every chr
tmp <- lapply(chr_table, .MD.hicexp.chr, prow = prow, pcol = pcol, plot.loess = plot.loess)
}
} else {
# if only a single chr just plot
.MD.hicexp.chr(hic_table(hicexp), prow = prow, pcol = pcol, plot.loess = plot.loess)
}
}
.MD.hicexp.chr <- function(chr_table, prow, pcol, plot.loess) {
# save par
old_par <- par()
# get all unique pairs
samples <- 5:ncol(chr_table)
combinations <- combn(samples, 2)
# make M matrix
M_matrix <- matrix(nrow = nrow(chr_table), ncol = ncol(combinations))
plot_list <- list()
par(mfrow = c(prow, pcol), mai = c(0.3, 0.3, 0.2, 0.1))
for (j in 1:ncol(combinations)) {
M_matrix[,j] <- log2( (chr_table[, combinations[1,j], with = FALSE] + 1)[[1]] /
(chr_table[, combinations[2,j], with = FALSE] + 1)[[1]] )
# make MD plot
.MD.smooth(M_matrix[,j], chr_table$D, title = paste0('chr', chr_table$chr[1],
' ',
'Sample ',
combinations[1,j] - 4,
' vs. ', combinations[2,j] - 4),
ylab = '', xlab = '',
plot.loess = plot.loess)
}
# reset par
suppressWarnings(par(old_par))
}
#' Plot a composite MD plot with the results of a comparison
#' @param hicexp A hicexp object which has
#' had a multiHiCcompare comparison step performed on it.
#' @param plot.chr A specific chromosome or
#' set of chromosome which you want to plot.
#' This should be a numeric value, i.e. to
#' plot chromosome 1 set plot.chr = 1, to
#' plot chromosomes 1 and 5 set plot.chr
#' = c(1, 5). Defaults to NA indicating that
#' all chromosomes present in the hicexp
#' will be plotted.
#' @param D.range Allows for subsetting of the plot by
#' Distance. Set to proportion of total distance
#' that you want to be displayed. Value of 1
#' indicates that the entire distance range
#' will be displayed. Defaults to 1.
#' @return An MD plot
#' @examples
#' data("hicexp_diff")
#' MD_composite(hicexp_diff)
#' @export
MD_composite <- function(hicexp, plot.chr = NA, D.range = 1) {
# check to make sure data has been compared
if (nrow(results(hicexp)) < 1) {
stop("You must compare the Hi-C data first before using
this plot function")
}
# check D.range
if (D.range > 1 | D.range <= 0) {
stop('D.range must be less than or equal to 1 and greater than 0.')
}
# check if more than one chromosome then split
if (length(unique(results(hicexp)$chr)) > 1) {
chr_table <- split(results(hicexp), results(hicexp)$chr)
# check if chr to plot is specified
if (!is.na(plot.chr[1])) {
chrs.to.plot <- which(as.numeric(names(chr_table)) %in% plot.chr)
if (length(chrs.to.plot) < 1) {
stop("Chr selected in plot.chr does not exist in the data")
}
tmp <- lapply(chr_table[chrs.to.plot], function(x) {
.MD.smooth(x$logFC, x$D, x$p.adj, title = paste0('chr', x$chr[1]),
plot.loess = FALSE, D.range = D.range)
})
} else {
# otherwise plot every chr
tmp <- lapply(chr_table, function(x) {
.MD.smooth(x$logFC, x$D, x$p.adj, title = paste0('chr', x$chr[1]),
plot.loess = FALSE, D.range = D.range)
})
}
} else {
# if only a single chr just plot
.MD.smooth(M = results(hicexp)$logFC, D = results(hicexp)$D,
p.val = results(hicexp)$p.adj,
title = "Composite MD Plot",
plot.loess = FALSE, D.range = D.range)
}
}
.MD.smooth <- function(M, D, p.val = NA, title = 'MD Plot', ylab = 'M',
xlab = 'Distance', plot.loess = FALSE, D.range = 1) {
# subset plot by D
if (D.range < 1) {
max.D <- max(D)
cut.point <- ceiling(D.range * max.D)
# subset M, D, p.val vectors based on cut.point
keep <- D <= cut.point
D <- D[keep]
M <- M[keep]
p.val <- p.val[keep]
}
# smooth scatter version
smoothScatter(D, M, xlab = xlab, ylab = ylab, main = title, cex.main = 0.85)
abline(h = 0)
if (!is.na(p.val[1])) {
p0.001 <- which(p.val < 0.001)
p0.05 <- which(p.val >= 0.001 & p.val < 0.05)
points(D[p0.001], M[p0.001], col = "red", pch = 20)
points(D[p0.05], M[p0.05], col = 'yellow', pch = 20)
legend('bottomright', legend = c('P < 0.001', 'P < 0.05'),
fill = c('red', 'yellow'), bty = 'n', horiz = TRUE)
}
# add loess fit to plot
if (plot.loess) {
lines(loess.smooth(D, M, span = 1/2, degree = 1), col = 'red')
}
}
#' Function to visualize p-values from multiHiCcompare results
#'
#' @param hicexp A hicexp object that has been
#' normalized and has had differences detected.
#' @param alpha The alpha level at which you will
#' call a p-value significant. If this is set to
#' a numeric value then any p-values >= alpha will
#' be set to 1 for the visualization in the heatmap.
#' Defaults to NA for visualization of all p-values.
#' @param chr The numeric value for the chromosome that
#' you want to plot. Set to 0 to plot all chromosomes
#' in the dataset.
#' @details The goal of this function is to visualize
#' where in the Hi-C matrix the differences are
#' occuring between two experimental conditions.
#' The function will produce a heatmap of the
#' -log10(p-values) * sign(logFC)
#' to visualize where the
#' significant differences between the datasets
#' are occuring on the genome.
#' @return A heatmap
#' @examples
#' data("hicexp_diff")
#' pval_heatmap(hicexp_diff, chr = 22)
#' @importFrom pheatmap pheatmap
#' @export
pval_heatmap <- function(hicexp, alpha = NA, chr = 0) {
# check input
if (nrow(results(hicexp)) < 1) {
stop("You must compare the hicexp first.")
}
if (!is.numeric(chr)) {
stop("chr should be a numeric value")
}
if (chr != 0 & sum(chr == as.numeric(unique(results(hicexp)$chr))) < 1) {
stop("The value of chr selected does not appear in the hicexp")
}
# if chr = 0 split data up by chr
if (chr == 0) {
chr.list <- split(results(hicexp), results(hicexp)$chr)
} else {
# otherwise subset data to just the selected chr
chr.list <- list(subset(results(hicexp)[chr == chr,]))
}
# convert to sparse matrix
m <- lapply(chr.list, function(x) {
new.table <- cbind(x$region1, x$region2, x$p.adj)
return(new.table)
})
# convert to full matrix
m <- lapply(m, HiCcompare::sparse2full)
# get fold change
fc<- lapply(chr.list, function(x) {
new.table <- cbind(x$region1, x$region2, x$logFC)
return(new.table)
})
fc <- lapply(fc, HiCcompare::sparse2full)
# remove non significant values from matrix if alpha is set to a value
if (!is.na(alpha)) {
for (i in seq_len(length(m))) {
m[[i]][m[[i]] >= alpha] <- 1
}
}
# plot heatmap
mapply(function(m, fc) {
pheatmap::pheatmap(-log10(m) * sign(fc), cluster_rows = FALSE,
cluster_cols = FALSE, show_rownames = FALSE,
show_colnames = FALSE)
},
m, fc, SIMPLIFY = FALSE)
}
|
8446003031e58ecec1650e1a807e363126fa05d5
|
17bce903f04119fd1e888216675eea2834191315
|
/man/dplookup.Rd
|
f021a81b0a241631ac9a878417f36aabf77e45f1
|
[] |
no_license
|
cran/UScancer
|
a81d91adbf571edd9d575c6887efb3886d3f4346
|
c8b0bde7b24f5ec32957e5b32b072a27737d32b0
|
refs/heads/master
| 2020-12-24T16:49:53.609812
| 2014-08-08T00:00:00
| 2014-08-08T00:00:00
| 17,693,982
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 949
|
rd
|
dplookup.Rd
|
\name{dplookup}
\alias{dplookup}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Census demographics profile lookup table
}
\description{
A lookup table for US census demographic profile data element to its full name. Only contains the codes for sex and age groups.
}
\usage{data(dplookup)}
\format{
A \code{\link{data.frame}} object with 39 rows with the following 2 variables.
\describe{
\item{\code{code}}{Demographic profile data element code}
\item{\code{desc}}{A simplified description of the code. i.e. Male ages 5 to 9 is M5_9}
}
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
Original data from \url{http://gis.drcog.org/datacatalog/sites/default/.../Census_Table_Descriptions.xls}.
}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(dplookup)
}
\keyword{datasets}
|
83a77a9027ee5f7684ba47eeeaabd5e58ac19aad
|
d690af5c19bb0d6b723e1b8f1687794b4e0f8830
|
/tests/testthat/test-keras-mlp-classification.R
|
235862b18103773c55faddfec7f8dd68cf4dd0c5
|
[
"MIT"
] |
permissive
|
roldanalex/safepredict
|
03113c5095518fef7c007c7e98342ecf15c0f9dc
|
05c3b9c8770583221a73b7b68f88805402630f5f
|
refs/heads/master
| 2021-10-09T11:32:24.866936
| 2018-12-27T06:11:45
| 2018-12-27T06:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,300
|
r
|
test-keras-mlp-classification.R
|
# context("test-keras-mlp-classification")
#
# library(testthat)
# context("simple neural network execution with keras")
# library(parsnip)
# library(tibble)
#
# ###################################################################
#
# num_pred <- names(iris)[1:4]
#
# iris_keras <- mlp(mode = "classification", hidden_units = 2)
#
#
#
# test_that('keras classification prediction', {
#
# skip_if_not_installed("keras")
# library(keras)
#
# xy_fit <- parsnip::fit_xy(
# iris_keras,
# x = iris[, num_pred],
# y = iris$Species,
# engine = "keras",
# control = ctrl
# )
#
# xy_pred <- predict_classes(xy_fit$fit, x = as.matrix(iris[1:8, num_pred]))
# xy_pred <- factor(levels(iris$Species)[xy_pred + 1], levels = levels(iris$Species))
# expect_equal(xy_pred, predict_class(xy_fit, new_data = iris[1:8, num_pred]))
#
# keras::backend()$clear_session()
#
# form_fit <- parsnip::fit(
# iris_keras,
# Species ~ .,
# data = iris,
# engine = "keras",
# control = ctrl
# )
#
# form_pred <- predict_classes(form_fit$fit, x = as.matrix(iris[1:8, num_pred]))
# form_pred <- factor(levels(iris$Species)[form_pred + 1], levels = levels(iris$Species))
# expect_equal(form_pred, predict_class(form_fit, new_data = iris[1:8, num_pred]))
#
# keras::backend()$clear_session()
# })
#
#
# test_that('keras classification probabilities', {
#
# skip_if_not_installed("keras")
#
# xy_fit <- parsnip::fit_xy(
# iris_keras,
# x = iris[, num_pred],
# y = iris$Species,
# engine = "keras",
# control = ctrl
# )
#
# xy_pred <- predict_proba(xy_fit$fit, x = as.matrix(iris[1:8, num_pred]))
# xy_pred <- as_tibble(xy_pred)
# colnames(xy_pred) <- levels(iris$Species)
# expect_equal(xy_pred, predict_classprob(xy_fit, new_data = iris[1:8, num_pred]))
#
# keras::backend()$clear_session()
#
# form_fit <- parsnip::fit(
# iris_keras,
# Species ~ .,
# data = iris,
# engine = "keras",
# control = ctrl
# )
#
# form_pred <- predict_proba(form_fit$fit, x = as.matrix(iris[1:8, num_pred]))
# form_pred <- as_tibble(form_pred)
# colnames(form_pred) <- levels(iris$Species)
# expect_equal(form_pred, predict_classprob(form_fit, new_data = iris[1:8, num_pred]))
#
# keras::backend()$clear_session()
# })
#
|
03bd564d1bb78675aa7dfa71fe987d461cf2f9c9
|
92348974c2b669227c168e5264affe0922c153dc
|
/integrateIt/R/integrateIt-internal.R
|
ae2d395817af78f5982ba354e7f4c47bc243fc51
|
[] |
no_license
|
jmetz2131/Metz-Midterm
|
a33a363345f75cfeef1aecc9eff96e1ea5f06260
|
247e7be34c283a249ed4c30ad6a4725397ff74fa
|
refs/heads/master
| 2021-01-10T09:53:53.619416
| 2016-03-24T06:14:42
| 2016-03-24T06:14:42
| 54,599,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,963
|
r
|
integrateIt-internal.R
|
.__C__Candidate <-
<S4 object of class structure("classRepresentation", package = "methods")>
.__C__Simp <-
<S4 object of class structure("classRepresentation", package = "methods")>
.__C__Squares <-
<S4 object of class structure("classRepresentation", package = "methods")>
.__C__Trap <-
<S4 object of class structure("classRepresentation", package = "methods")>
`.__T__chooseCandidate:.GlobalEnv` <-
<environment>
`.__T__createCandidate:.GlobalEnv` <-
<environment>
`.__T__initialize:methods` <-
<environment>
`.__T__integrateIt:.GlobalEnv` <-
<environment>
`.__T__multiplySquares:.GlobalEnv` <-
<environment>
`.__T__print:base` <-
<environment>
`.__T__propNeeded:.GlobalEnv` <-
<environment>
`.__T__show:methods` <-
<environment>
.Random.seed <-
c(403L, 14L, -1536710369L, 1620556149L, 745158668L, 858628578L,
1100724925L, -227338953L, -1442014754L, -1474199784L, 712201851L,
-1266772967L, -1756789592L, -1805713834L, -320349487L, -1690281341L,
-558321390L, -1071844188L, 142615655L, 174927293L, 1218916148L,
1159662746L, 1275081445L, 1873349759L, 1894319622L, -2081205872L,
579750963L, 2086405073L, 1604635008L, 1534370910L, -1947831383L,
-1220629925L, -1379103126L, 369668876L, 768005455L, 1523338757L,
1952232636L, 288613778L, 1034534541L, 1431655623L, -1149636178L,
1904679944L, 1846267915L, 28615913L, -212728904L, -841200538L,
-1954353919L, 712354707L, -45470654L, 709678068L, -852501001L,
-1396702739L, -838602108L, -2009209686L, -2120117739L, 775040687L,
611781814L, -118506784L, 1151428131L, -602347199L, -1825606608L,
-1970147762L, 1251441337L, -936599861L, 1058146170L, -551473540L,
249899711L, 542116757L, -1141003348L, 614941058L, -1202682019L,
1995090519L, -1848925890L, -478535304L, -1630856613L, -1201852999L,
1929498632L, 164508342L, 1205930481L, 63324195L, -80117646L,
-1974459708L, 775050375L, -1963186083L, -1581994412L, 1819626618L,
-427714619L, -259679969L, 1565776038L, 1079392816L, 1270284499L,
-510521615L, 533818912L, -159754050L, -300860215L, -619727237L,
-1431882678L, 58662060L, -1847014481L, -1246810267L, 890045724L,
-39449230L, 2074756589L, 846711335L, -847205810L, 975728296L,
-1338563413L, -1704426039L, -837252648L, -1568087930L, -1765315231L,
629072115L, 1096453986L, 706341332L, 1744542935L, -127443251L,
1029576292L, -1715432374L, -2048187979L, -951571185L, 1854933526L,
1953236160L, 1076314755L, 1997049505L, -498368368L, -1170199698L,
-688212711L, -891877973L, 589366170L, -1023763620L, 84916831L,
-71704011L, 1085958604L, 1677779234L, 1335518077L, -1880211337L,
2011497886L, -308134824L, -640423493L, -771051687L, 1115131112L,
289264918L, 1686554257L, -119960765L, -201173422L, -702400284L,
1861233447L, 547872637L, -948604300L, 739241050L, 720991397L,
-2087973185L, -1572768442L, 876067792L, 1903793651L, 875929873L,
-927847104L, 1225878814L, 713365993L, -1559782629L, -1877572310L,
1790565964L, 1638656527L, 1849896261L, 1663872764L, 1215874386L,
-276960563L, 1805943431L, 727369070L, 1298923336L, -516273333L,
746042025L, 975615992L, -899410138L, -207173567L, 1112009171L,
-1619426430L, -49223756L, 718110519L, -193014227L, -1869453500L,
144823402L, 1284771541L, -784054161L, -1424258698L, -1782140256L,
-528900253L, -134482943L, 1830420336L, -2013350130L, 539472249L,
-505985781L, -1073291718L, -680782532L, 370154111L, -445087275L,
-100769812L, 336445762L, 928939677L, -1505961193L, -1532966274L,
-1043654088L, 1972395291L, -2085483911L, 699533000L, 2125995766L,
-551160015L, -1928695965L, 123634994L, 245814916L, -943078713L,
369205661L, 1247274260L, -594334790L, -1629118971L, -1495965729L,
858314342L, -63831568L, -1879339245L, -2091642703L, 649900896L,
280781054L, 1361891465L, 2111111099L, 2015529354L, 1325152364L,
-1150166545L, 1873089061L, -1067039012L, 59587912L, 1542264506L,
-23477488L, 1442167020L, -1244869548L, -1970541806L, 829705632L,
-442550324L, -1063455904L, -1785365310L, 1138139688L, -12894804L,
444910396L, -892174478L, 609656624L, -1801370588L, -2866632L,
668067546L, -800750704L, 1299814652L, -786660972L, 1682719554L,
-1828570624L, -2134178756L, -39540144L, 1124041890L, -1192544440L,
1465687564L, 1401954012L, -155385774L, 1692311680L, 1267096404L,
-1587878936L, 66061050L, -10117296L, 553484332L, -1483665036L,
185676434L, 2117861984L, 685704332L, -364038816L, 1182656738L,
1799602344L, -1378972980L, 2068047356L, 124854194L, 440956912L,
40650180L, 1271912792L, -30231174L, 588645808L, -935794244L,
1247984212L, 307621314L, -636568800L, -490427460L, -1475132336L,
-1902223390L, 1777222504L, -1587869876L, -1681097988L, -742761742L,
-1510887168L, -971959180L, -1888126456L, 1986628026L, 455981520L,
298273772L, 1233504148L, 434785490L, 489769056L, -664477364L,
542451488L, -1640469886L, -2131072536L, 1285034988L, -638714308L,
1591548530L, 788488048L, -427069148L, -704745928L, -1774838374L,
1892890064L, 1038193084L, 1052213908L, -1549478270L, -703606336L,
894869308L, -1941014896L, -1260621022L, -1933007352L, -630999284L,
649820060L, -1159970798L, 587670400L, 1306558228L, 2039171368L,
-1022271430L, -473311280L, -736163220L, -1224225740L, 5092498L,
997440864L, 1641435468L, 981740000L, -1981587550L, 1284534184L,
-2108476276L, 1108906940L, 924645490L, -1809902608L, 457179204L,
-2120256424L, -622325702L, -1494392720L, -645754308L, 1229634324L,
-1655316798L, -1800266016L, 1137251452L, -1729363760L, 953045922L,
1664619048L, -2021563124L, 484096956L, -1391442638L, -1018770240L,
-1514996172L, -612337976L, -1632676166L, -681967472L, 2030418668L,
-934073772L, -82612590L, -672621792L, -1220498484L, 543341792L,
-1727812926L, -1973264344L, 527567788L, -546260292L, -1145215502L,
1806288432L, -1406505052L, -1952152136L, 1093263194L, 178444432L,
1228368764L, -944315628L, -488267454L, 1959199488L, 137621436L,
-1269025968L, 1150186786L, 1977097800L, 779905036L, -752005028L,
-436520494L, 105232896L, 1747902804L, -120920856L, 1607310586L,
-1708255536L, 990498092L, 17131764L, 1643148690L, -485024L, 31209484L,
868220640L, 48200290L, -620224856L, -1254468020L, 1670953852L,
786969906L, 362643056L, -62751932L, -305523752L, -915628166L,
389018160L, 654674876L, -1966595500L, -296432574L, 1762531488L,
-2016623940L, 324541904L, -83716254L, -986524952L, -1036540724L,
-1830244868L, -1093182990L, 1753659008L, -1566841356L, -1332391288L,
-150288582L, 2063408976L, 1931857004L, -1631769196L, -311358382L,
1696635360L, -39660340L, -1549143904L, -981128830L, -1706280728L,
84599532L, 1729525052L, -574321550L, -2079161232L, 2030201892L,
969072696L, 1751747354L, -45695792L, -1205683396L, 1893771156L,
-1446301822L, -44968512L, -2087624132L, 1934064016L, -601908702L,
-810652408L, 456989324L, -1937983716L, 1753829522L, -1405525632L,
36323860L, 67715880L, 1269327418L, 1487174096L, 198923414L, -1198216541L,
-395703195L, -557442686L, -1624743184L, 924939929L, 250894987L,
1300052860L, -1603863126L, 1700124127L, 999859049L, 737919870L,
1508801388L, 74026941L, 814562439L, 1933611120L, -531080434L,
-2022393797L, -1545930979L, -262580790L, -1851281688L, -1148280367L,
-1686342717L, 425807044L, -51997838L, -2081993145L, -313889263L,
-1824309354L, 1527694068L, 2067391621L, 208837071L, -659616568L,
-1670185626L, -1144840301L, -315145995L, -123189038L, -1467648224L,
-792348919L, 276013115L, -225897076L, -929936358L, -521396081L,
1526396889L, 72685678L, 978557948L, -253539987L, 1341178647L,
-509910304L, 1519395006L, 608947851L, 931763469L, 1072552250L,
224356728L, -2009461983L, -1789715501L, 1272259316L, -1381758078L,
624720407L, 2025412833L, 1390655014L, -1150193884L, 1844223957L,
-311744641L, 1291080216L, -826519242L, -1075380285L, 861584901L,
-230429278L, 1549873552L, -1640380103L, 289443307L, 1120038620L,
727325450L, 442209791L, -781783991L, -950956578L, -621814388L,
-270325411L, 313786727L, 96158352L, -727229266L, 28020059L, 284815677L,
-1748657238L, 94424136L, 1965765745L, -155565597L, -1786307356L,
-1044199406L, 1345842151L, -859679823L, 1334914550L, 606490644L,
1505132325L, 1898162223L, -85823000L, 300861126L, 193496947L,
-1448763435L, 1836406706L, -1790602880L, -220321431L, 1119960539L,
-34900692L, 1135705914L, 185758191L, 1041466489L, 1228273422L,
330511068L, -1860808371L, -37712713L, -1070033344L, -851658978L,
1890799083L, -1398771347L, 976752602L, 863574424L, 1501243393L,
-1679816269L, -570474284L, -1149073950L, -525283209L, 1014035905L,
932732742L, 225553028L, 1393316917L, -86977505L, 1777234808L,
-1029096874L, 204703459L, -127623003L, -1016008382L, -984893648L,
939770713L, -332052277L, 639154492L, 44004714L, 1913356191L,
-23524823L, -1647553346L, -200782420L, 1238135549L, 1315487303L,
849370416L, -1378931122L, -3449221L, 1202761181L, -1925837814L,
-540529368L, 1739400081L, 657995139L, -23260540L, -598437710L,
-23498745L, 1195699281L, -983106602L, -192970060L, 2013471429L,
528385039L, -2055199608L, -360020570L, -989213357L, -908851531L,
-1602733038L, 1683414496L, -508116919L, -2126043525L, -477106356L,
1739287258L, -907796942L)
|
dd4705d3066d4e43a8ee85cc5ba694332af6df08
|
df7c8f0ffc8ea6314f36f0cff60522ecabeb50a7
|
/R/iron_condour.R
|
65b36e950af015a14cb8196ae084c53aa5c288af
|
[] |
no_license
|
cran/roptions
|
60ac5e47b9da1668fee96061ad0a678a3503fef1
|
eae716417df9453ea387a2bd289dffdd26562726
|
refs/heads/master
| 2022-06-24T16:41:53.934062
| 2020-05-11T10:10:06
| 2020-05-11T10:10:06
| 263,169,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,842
|
r
|
iron_condour.R
|
#' @title Iron Condour Strategy Function
#' @description This function can be used to develop a Iron Condour Strategy.
#' @param k_long_call Excercise Price of Long call Option
#' @param k_short_call Excercise Price of Short call Option
#' @param k_long_put Excercise Price of Long Put Option
#' @param k_short_put Excercise Price of Short Put Option
#' @param c1 Premium of Long call Option
#' @param c2 Premium of Short call Option
#' @param p1 Premium of Long Put Option
#' @param p2 Premium of Short Put Option
#' @param llimit Lower limit of stock price at Expiration., Default: 20
#' @param ulimit Upper Limit of Stock Price at Expiration, Default: 20
#' @return OUTPUT_DESCRIPTION Returns the profit/loss generated from the strategy along with the profit/loss of individual contract and an interactive graph for the same.
#' @details An Iron condor is an options strategy created with four options consisting of two puts (one long and one short) and two calls (one long and one short), and four strike prices, all with the same expiration date.
#' @examples
#' iron.condour(100, 95, 105, 102, 2.3, 1.25, 3.2, 2.3)
#' @rdname iron.condour
#' @export
#' @importFrom purrr map_dbl
#' @import ggplot2
#' @importFrom plotly ggplotly
#' @importFrom stats pnorm
iron.condour = function(k_long_call, k_short_call, k_long_put, k_short_put, c1, c2, p1, p2, llimit = 20, ulimit = 20){
stock_price_at_expiration = round((k_long_call - llimit)):round((ulimit + k_long_call))
long_call = (map_dbl(stock_price_at_expiration , .f = ~max(.x - k_long_call,0))) - c1
short_call = (-1* map_dbl(stock_price_at_expiration, .f = ~max(.x - k_short_call,0))) + c2
long_put = (map_dbl(stock_price_at_expiration, .f = ~max(k_long_put - .x,0))) - p1
short_put = put_option = ( -1 * map_dbl(stock_price_at_expiration, .f = ~max(k_short_put - .x,0))) + p2
profit_loss = long_call + short_call + long_put + short_put
df = data.frame(stock_price_at_expiration, long_call, short_call, long_put, short_put, profit_loss)
p1 = ggplot(data = df) +
geom_line(aes(x = stock_price_at_expiration, y = long_call, colour = 'long_call')) +
geom_line(aes(x = stock_price_at_expiration, y = short_call, colour = 'short_call')) +
geom_line(aes(x = stock_price_at_expiration, y = long_put, colour = 'long_put')) +
geom_line(aes(x = stock_price_at_expiration, y = short_put, colour = 'short_put')) +
geom_line(aes(x = stock_price_at_expiration, y = profit_loss, colour = 'profit_loss')) +
labs(x = 'stock price at expiration', y = 'profit/loss', title = 'Iron Condour Plot', color = 'Option contract') +
scale_colour_manual('', breaks = c('long_call', 'short_call', 'profit_loss', 'long_put', 'short_put'), values = c('blue', 'red', 'black', 'green', 'yellow'))
print(df)
print(ggplotly(p1))
}
|
e379475cf0f0e868da551a822fd08f152e9e29a3
|
8f2f7844354a8b68ad1afe5e5fd322b6bd1b1b8e
|
/lab4/5.R
|
894a642354e12546ad8bdd76d2bc7f0f46454186
|
[] |
no_license
|
KUGDev/EMMvSA
|
0f6f97df07ce5e4c0e50272863b5b8481a730853
|
08bfdeab9f84c333774aefb0ed341a444dce2656
|
refs/heads/master
| 2023-04-13T22:53:15.367460
| 2021-05-02T23:10:44
| 2021-05-02T23:10:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,409
|
r
|
5.R
|
library(e1071)
area.pallete = function (n = 3)
{
cols = rainbow (n)
cols [1 : 3] = c("PaleGreen", "PaleTurquoise", "Pink")
return(cols)
}
symbols.pallete = c("SeaGreen", "Blue", "Red")
set.seed(0)
C = 1
kernel = "polynomial"
degree = 1
data = iris[c("Petal.Width", "Petal.Length", "Species")]
trainIdx = sample(nrow(data), nrow(data) / 2, replace = FALSE)
train = data[trainIdx,]
dataTest = data[-trainIdx,]
objects = data[trainIdx, c("Petal.Width", "Petal.Length")]
testObjects = data[-trainIdx, c("Petal.Width", "Petal.Length")]
test_model = function(C, kernel, degree, gamma)
{
linearModel = svm(Species ~ ., data = train, type = "C-classification", cost = C, kernel = kernel, degree = degree, gamma = gamma)
error_count = function(t)
{
return (sum(t[c(2, 3, 4, 6, 7, 8)]))
}
forecastsTrain = predict(linearModel, objects)
train_table = table(train$"Species", forecastsTrain)
plot(linearModel, train, grid = 250, symbolPalette = symbols.pallete, color.palette = area.pallete)
x11()
forecastsTest = predict(linearModel, testObjects)
test_table = table(dataTest$"Species", forecastsTest)
plot(linearModel, dataTest, grid = 250, symbolPalette = symbols.pallete, color.palette = area.pallete)
return (c(error_count(train_table), error_count(test_table)))
}
kernel = "radial"
C = 1
errors = test_model(C, kernel, 1, 500)
print(errors)
|
4436ca7ee2896ce1f9a73ae1d13a831a920ed4bc
|
8f599a3252717ce917ea6a408db24fbc17de8428
|
/hope mci sandbox/Judgements.R
|
3907003437eeb338e8eb974759cd32fc567a7d8a
|
[] |
no_license
|
cmishra/dsicapstone-predicting_extremism
|
423ef9f4c55192f8d79bdef328e3cee5e0725296
|
35fe30e0254e83f5f2ec0f5800106f85549e2bac
|
refs/heads/master
| 2021-01-21T04:27:45.746344
| 2016-04-07T18:28:47
| 2016-04-07T18:28:47
| 42,951,048
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,303
|
r
|
Judgements.R
|
remove(list=ls())
library(tm)
library(topicmodels)
library(openNLP)
library(NLP)
library(stringr)
setwd('/Users/hopeemac/Documents/Education/Classes/UVA MSDS/15F/Capstone/westboro_sermons.pdf')
# Import all WBC Sermons
corpus <- VCorpus(DirSource('.'))
doc1 <- as.String(corpus[[1]])
##### Sentence segmentation -- assumes you have a document (our "text" variable)
#####
sentence.annotator = Maxent_Sent_Token_Annotator()
sentence.annotations = annotate(doc1, sentence.annotator)
#####
##### Tokenization -- buids on sentence segmentation
#####
token.annotator = Maxent_Word_Token_Annotator()
token.annotations = annotate(doc1, token.annotator, sentence.annotations)
# Prints all Tokens (sentences and words)
doc1[token.annotations]
##### Sentence segmentation and tokenization are basic steps required for many other analyses: POS tagging, NEI, and syntactic parsing.
#####
##### Part-of-speech (POS) tagging -- builds on tokenization
#####
pos.annotator = Maxent_POS_Tag_Annotator()
pos.annotations = annotate(doc1, pos.annotator, token.annotations)
master <- as.data.frame(doc1[sentence.annotations])
View(master)
# Create Word only POS Subset
# POSw <- subset(pos.annotations, type == "word")
POSs <- as.data.frame(subset(pos.annotations, type == "sentence"))
# substr(doc1,1,127)
# View(n[which(n$start >= 1 & n$end < 127),])
# List of Row Numbers for Sentence Words, For first Sentence Only
c <- as.numeric(unlist(((POSs[1,"features"][[1]]))))
View(c)
# pos.annotations[c(148,149)]]
master$words <- NA
### KEY! WORKING 1/12
wordsAll <- list()
for (i in seq(nrow(POSs))) {
x <- as.numeric(unlist(((POSs[i,"features"][[1]]))))
y <- as.data.frame(pos.annotations[x])
words <- c()
for (j in seq(1,nrow(y))) {
words <- append(words,substr(doc1,y[j,"start"],y[j,"end"]))
# print(paste("added ",substr(doc1,y[i,"start"],y[i,"end"])))
}
wordsAll[[i]] <- list(words)
}
master$words <- wordsAll
View(master)
### KEY! v0.2 Adding in POS Tags
wordsAll <- list()
partsAll <- list()
for (i in seq(nrow(POSs))) {
x <- as.numeric(unlist(((POSs[i,"features"][[1]]))))
y <- as.data.frame(pos.annotations[x])
words <- c()
parts <- c()
for (j in seq(1,nrow(y))) {
words <- append(words,substr(doc1,y[j,"start"],y[j,"end"]))
parts <- append(parts,unlist(y[j,"features"]))
# print(paste("added ",substr(doc1,y[i,"start"],y[i,"end"])))
}
wordsAll[[i]] <- list(words)
partsAll[[i]] <- list(parts)
}
master$words <- wordsAll
master$parts <- partsAll
View(master)
### KEY! v0.3 Checking for all 3 parts
toBe <- c("is","was","am","are","were","been","be","being")
wordsAll <- list()
partsAll <- list()
adjAll <- list()
toBeAll <- list()
nounAll <- list()
for (i in seq(nrow(POSs))) {
x <- as.numeric(unlist(((POSs[i,"features"][[1]]))))
y <- as.data.frame(pos.annotations[x])
words <- c()
parts <- c()
adj_flag <- 0
toBe_flag <- 0
noun_flag <- 0
for (j in seq(1,nrow(y))) {
word <- substr(doc1,y[j,"start"],y[j,"end"])
if (toBe_flag == 0) {
if (word %in% toBe) {
toBe_flag <- 1
}
}
words <- append(words,word)
part <- unlist(y[j,"features"])
if (adj_flag == 0) {
# print(adj_flag)
if (part == 'JJ') {
adj_flag <- 1
# print(adj_flag)
}
# print(adj_flag)
}
# print(adj_flag)
# Check for Noun
if (noun_flag == 0) {
# print(adj_flag)
if (part == 'NN') {
noun_flag <- 1
# print(adj_flag)
}
# print(adj_flag)
}
# print(adj_flag)
parts <- append(parts, part)
# print(paste("added ",substr(doc1,y[i,"start"],y[i,"end"])))
}
wordsAll[[i]] <- list(words)
partsAll[[i]] <- list(parts)
adjAll[[i]] <- adj_flag
toBeAll[[i]] <- toBe_flag
nounAll[[i]] <- noun_flag
}
master$words <- wordsAll
master$parts <- partsAll
master$adj <- adjAll
master$toBe <- toBeAll
master$noun <- nounAll
View(master)
master$judgement <- ifelse(master$adj == 1 & master$toBe == 1 & master$noun == 1, 1, 0)
View(master)
### KEY! v0.4 All documents in Corpus
toBe <- c("is","was","am","are","were","been","be","being")
# Iterate through all Documents
bigMaster <- c()
for (d in 1:4){
# for (d in 1:length(corpus)){
doc <- as.String(corpus[[d]])
print(meta(corpus[[d]], "id"))
##### Sentence segmentation -- assumes you have a document (our "text" variable)
#####
sentence.annotator = Maxent_Sent_Token_Annotator()
sentence.annotations = annotate(doc, sentence.annotator)
#####
##### Tokenization -- buids on sentence segmentation
#####
token.annotator = Maxent_Word_Token_Annotator()
token.annotations = annotate(doc, token.annotator, sentence.annotations)
# Prints all Tokens (sentences and words)
##### Sentence segmentation and tokenization are basic steps required for many other analyses: POS tagging, NEI, and syntactic parsing.
#####
##### Part-of-speech (POS) tagging -- builds on tokenization
#####
pos.annotator = Maxent_POS_Tag_Annotator()
pos.annotations = annotate(doc, pos.annotator, token.annotations)
### PROBLEM
master <- as.data.frame(doc[sentence.annotations])
# View(master)
# Create Word only POS Subset
# POSw <- subset(pos.annotations, type == "word")
POSs <- as.data.frame(subset(pos.annotations, type == "sentence"))
# View(POSs)
wordsAll <- list()
partsAll <- list()
adjAll <- list()
toBeAll <- list()
nounAll <- list()
for (i in seq(nrow(POSs))) {
x <- as.numeric(unlist(((POSs[i,"features"][[1]]))))
y <- as.data.frame(pos.annotations[x])
words <- c()
parts <- c()
adj_flag <- 0
toBe_flag <- 0
noun_flag <- 0
for (j in seq(1,nrow(y))) {
word <- substr(doc,y[j,"start"],y[j,"end"])
if (toBe_flag == 0) {
if (word %in% toBe) {
toBe_flag <- 1
}
}
words <- append(words,word)
part <- unlist(y[j,"features"])
if (adj_flag == 0) {
# print(adj_flag)
if (part == 'JJ') {
adj_flag <- 1
# print(adj_flag)
}
# print(adj_flag)
}
# print(adj_flag)
# Check for Noun
if (noun_flag == 0) {
# print(adj_flag)
if (part == 'NN') {
noun_flag <- 1
# print(adj_flag)
}
# print(adj_flag)
}
# print(adj_flag)
parts <- append(parts, part)
# print(paste("added ",substr(doc1,y[i,"start"],y[i,"end"])))
}
wordsAll[[i]] <- list(words)
partsAll[[i]] <- list(parts)
adjAll[[i]] <- adj_flag
toBeAll[[i]] <- toBe_flag
nounAll[[i]] <- noun_flag
}
master$words <- wordsAll
master$parts <- partsAll
master$adj <- adjAll
master$toBe <- toBeAll
master$noun <- nounAll
# View(master)
master$judgement <- ifelse(master$adj == 1 & master$toBe == 1 & master$noun == 1, 1, 0)
# View(master)
master$docName <- meta(corpus[[d]], "id")
print(head(master))
bigMaster <- rbind(bigMaster, master)
}
write.csv(bigMaster)
####
# Gets all the info for the Words in the First Sentence
d <- as.data.frame(pos.annotations[c])
View(d)
# n$features[1]
# unlist
# Get Sentences
# doc1[POSs]
# Needs to be turned into an Apply Function
words <- c()
for (i in seq(1,nrow(d))) {
words <- append(words,substr(doc1,d[i,"start"],d[i,"end"]))
print(paste("added ",substr(doc1,d[i,"start"],d[i,"end"])))
}
View(d)
d$words <- words
n$words <- c()
View(n)
n[1,"words"] <- list(words)
# Needs to be turned into an Apply Function
parts <- c()
for (i in seq(1,nrow(d))) {
parts <- append(parts,unlist(d[i,"features"]))
#print(paste("added ",substr(doc1,d[i,"start"],d[i,"end"])))
}
d$parts <- parts
View(d)
# Find Sentences with a Form of To Be (Maybe add 'm and 've later)
toBe <- c("is","was","am","are","were","been","be","being")
for (word in d$words) {
if (word %in% toBe) {
print("Contains to Be!")
}
else {
print("not")
}
}
master$words %in% toBe
# To check if tobe in Words list for a specific row.
master$words[[20]][[1]] %in% toBe
# Check for JJ, JJR, JJS
for (pos in d$parts) {
if (pos %in% c('JJ','JJR','JJS')) {
print("Contains ADJ!")
}
else {
print("not")
}
}
# Check for NN, NNP, NNS, NNPS
for (pos in d$parts) {
if (pos %in% c('NN','NNP', 'NNS', 'NNPS')) {
print("Contains Noun")
}
else {
print("not")
}
}
nounCheck <- function(X) {
if (X %in% c('NN','NNP', 'NNS', 'NNPS')) {
print("Contains Noun")
}
}
lapply(d$parts, nounCheck)
for (sentence in sent) {
print(sentence)
}
#######################################
sent <- annotate(doc1, sent_token_annotator)
head(sent)
# Turn into Sentences into a Dataframe
sentences <- as.data.frame(doc1[sent], stringsAsFactors = F)
head(sentences, 10)
View(sentences)
# Get T/F for each sentence for each word
# Need to get into workable data structure
b <- sapply(toBe, grepl, apply(sentences, 1, print), ignore.case=TRUE)
View(b)
# Get Words per Sentence as Tokens into List
sentences[13,1]
toBeResults <- apply(sentences, 1, print)
apply(sentences, 1, print)
toBe <-data.frame(is = toBeResults)
View(toBe)
View(toBeResults)
s <- sentences[toBeResults,]
View(s)
paste0(" ","is"," ")
keywords <- c("dog", "cat", "bird")
strings <- c("Do you have a dog?", "My cat ate by bird.", "Let's get icecream!")
lapply(sentences, print)
b <- sapply(toBe, grepl, apply(sentences, 1, print), ignore.case=TRUE)
b$total <- sum(b[,1:7])
b.DF <- as.data.frame(b)
View(sentences)
View(b)
?sapply
# Extract which Form of To Be was found
# Identify if there is an Adjective in the Sentence
posResults <- lapply(s, annotate, f = pos_tag_annotator, a = sent)
doc1a <- annotate(doc1, list(word_token_annotator, sent_token_annotator))
sentPOS <- str_split(Corpus.tagged[[1]],"./.")
sentPOS
Corpus.tagged[[1]]
r <- data.frame(x = c(1,2,4))
View(r)
r$list <- list(3:4, 7:9, 7:10)
r$list[1]
# Only Works with Corpus
doc1[sentence.annotations] # display original document, viewed by way of sentence annotations
View(doc1[sentence.annotations])
sent <- doc1[sentence.annotations]
View(sent)
adj_flag <- 0
part <- 'JJ'
if (adj_flag == 0) {
print(adj_flag)
if (part == 'JJ') {
adj_flag <- 1
print(adj_flag)
}
print(adj_flag)
}
print(adj_flag)
(part == 'JJ') {
adj_flag <- 1
}
|
ecbc1269d75e6b41982a1a7e7f53f4f0be8bf2fd
|
1547182269fd52c15f1c554227264d730f57ebb2
|
/R/mgMEM.R
|
3a88c0d54b5fc6de2b6123b3a25f18cec05df77e
|
[] |
no_license
|
cran/memgene
|
084fbf5a3bf28229a198f265bca94a436f592ea4
|
6672a2f264d0a75120527ea31d1b82c28b5870a8
|
refs/heads/master
| 2022-03-16T22:52:16.117662
| 2022-02-21T18:00:02
| 2022-02-21T18:00:02
| 20,113,465
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,905
|
r
|
mgMEM.R
|
mgMEM <-
function(locD, truncation = NULL, transformation = NULL) {
locD <- as.matrix(locD)
## Internal function to perform truncation and transformation
.distTransform <- function(locD, transformation, truncProp) {
locDTr <- locD
if (is.null(truncProp) || !(truncProp)) {
truncProp <- 1
}
if ((truncProp < 0) || (truncProp > 1)) {
stop("memgene: distance truncation proportion must be between 0 and 1", call. = FALSE)
}
truncRange <- quantile(locD, truncProp)
if (!is.null(transformation)) {
transformation <- tolower(transformation)
if (transformation == "exponential") {
locDTr[locDTr > truncRange] <- 0
locDTr <- exp(-locDTr / truncRange)
locDTr <- 1 - locDTr / max(locDTr)
locDTr <- as.matrix(as.dist(locDTr))
locDTr[locDTr == 0] <- 1
} else if (transformation == "gaussian") {
locDTr[locDTr > truncRange] <- 0
locDTr <- exp(-(locDTr / truncRange)^2)
locDTr <- 1 - locDTr / max(locDTr)
locDTr <- as.matrix(as.dist(locDTr))
locDTr[locDTr == 0] <- 1
}
} else {
## CHECK **
## Is this correct way to truncate when there is no transformation?
locDTr[locDTr > truncRange] <- 1
}
return(locDTr)
}
## Truncation to a specified distance and transformation if required
# if (is.numeric(truncation) || !is.null(transformation)) {
locD <- .distTransform(locD, transformation, truncation)
# }
## Truncate (or additionally truncate) the input distance matrix
## by determining the longest link on a minimum spanning tree
## of the points, and then setting the threshold as 4x this distance
if (is.null(truncation) || is.numeric(truncation)) {
## Produce a minimum spanning tree using vegan
spanning <- vegan::spantree(locD)
threshh <- max(spanning$dist)
locD_truncated <- locD
locD_truncated[locD_truncated > threshh] <- 4 * threshh
# note that locD_truncated has a main diagonal of zero so
# that is congruent with the MEM framework as in Dray et al. (2006)
# double-centred truncated matrix
diag(locD_truncated) <- 4 * threshh
} else {
## Truncation was FALSE or some other value
locD_truncated <- locD
}
## Double-centre truncated matrix
row.wt <- rep(1, nrow(locD_truncated))
col.wt <- rep(1, ncol(locD_truncated))
st <- sum(col.wt)
sr <- sum(row.wt)
row.wt <- row.wt / sr
col.wt <- col.wt / st
Centered_Matrix <- -0.5 * (locD_truncated * locD_truncated)
row.mean <- apply(row.wt * Centered_Matrix, 2, sum)
col.mean <- apply(col.wt * t(Centered_Matrix), 2, sum)
col.mean <- col.mean - sum(row.mean * col.wt)
Centered_Matrix <- sweep(Centered_Matrix, 2, row.mean)
Centered_Matrix <- t(sweep(t(Centered_Matrix), 2, col.mean))
## Extract MEMs
D.eig <- eigen(Centered_Matrix, symmetric = TRUE)
## Remove eigenvector with positive, near-zero variance due to centering
Zero_eigenvalue <- which(D.eig$values == min(D.eig$values[D.eig$values > 0]))
valuesMEM <- D.eig$values[-Zero_eigenvalue]
vectorsMEM <- subset(D.eig$vectors, select = -Zero_eigenvalue)
weight <- sqrt(abs(valuesMEM))
## Standardize
vectorsMEM <- vectorsMEM %*% diag(weight)
if (is.null(truncation)) {
truncationStr <- "MST"
} else if (!truncation) {
truncationStr <- "None"
} else if (is.numeric(truncation)) {
truncationStr <- paste(round(truncation * 100), "% + MST", sep = "")
}
if (is.null(transformation)) {
transformationStr <- "None"
} else {
transformationStr <- transformation
}
return(list(
transformation = transformationStr, truncation = truncationStr,
valuesMEM = valuesMEM, vectorsMEM = vectorsMEM
))
}
|
0ac7b885386e6cb372faa7f0a53803e6dd859ce7
|
06f61362cc94229cc4438ee8770f2bc6007efeae
|
/R/Temp2018/Optimize.ibd.R
|
6f408747dd4e9698023d896974e2e5162c522698
|
[] |
no_license
|
bellerbrock/OptimalDesignMM
|
39ce6fa32212d4756d8866a329c79bf6cb6c6a27
|
8217e9475913511dede791b282df50e1c528f1bc
|
refs/heads/master
| 2021-09-11T18:15:44.140016
| 2018-04-10T21:05:52
| 2018-04-10T21:05:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,492
|
r
|
Optimize.ibd.R
|
#' Optimize a RCB Design using a pairwise swap algorithm
#'
#' \code{Optimize.ibd} optimizes an experimental design using a simple pairwise swap algorithm.
#'
#' @import Matrix
#'
#' @param matdf an experimental layout (from function rcbd)
#' @param n the number of iterations desired
#' @param traceI a trace or determinant value calculated from matdf ???
#' @param criteria either ``A" or ``D"
#' @param Rinv a matrix calculated for matdf (from function Rmatrix)
#' @param Ginv a matrix calculated for matdf (from function Gmatrix)
#' @param K a matrix calculated for matdf (from function VarCov.rcbd)
#' @param plot a logical argument for obtaining plots
#'
#' @return a vector of all traces from the n iterations,
#' a matrix with two rows: one with the accepted (successful) swaps
#' and the other column with the iteration index, and an optimal
#' ``improved" design
#'
#' @examples
#' ## Example 1: optimize a single generated design
#' VarG = 0.3; rhox = 0.6; rhoy = 0.6; nugget = 0; criteria="A"
#' blocks = 3; rb = 3; cb = 5; Tr = 9; Tc = 5; trt = length(c(1:15))
#' matdf<- rcbd(blocks, trt,rb,cb, Tr, Tc,plot=TRUE)
#' Rinv = Rmatrix(matdf,VarG,rhox=0.6,rhoy=0.6,nugget=0)
#' m = length(unique(matdf[,"Treatments"]))
#' Ginv <- round((1/VarG) * Matrix::Diagonal(m),7)
#'
#' res <- VarCov.rcbd(matdf,Ginv,Rinv,rhox=0.6,rhoy=0.6,VarG=0.3,criteria="A")
#' attributes(res)
#' traceI=res$OptimScore
#' K = res$K
#'
#' ans <- Optimize.rcbd(matdf=matdf,n=10,traceI=res$OptimScore,
#' criteria="A",Rinv=Rinv, Ginv=Ginv,K=K,plot=TRUE)
#' attributes(ans)
#'
#'
#' ## TEST SAG EXAMPLE
#'
#'matdf <- rcbd(nblock=4, ntrt=30, rb=5, cb=6)
#' head(matdf)
#' desplot(Rep~Col+Row, matdf, text=Treatment, cex=1, show.key=FALSE, main=NULL)
#'
#'
#'
#' ntrt <- 30
#' nblock <- 2; rb <- 5; cb <- 6
#' VarG <- 0.3; VarE <- 0.7; rhox <- 0.6; rhoy <- 0.6;
#' matdf <- rcbd(nblock=nblock,ntrt=ntrt,rb=rb,cb=cb)
#' head(matdf)
#' desplot(Rep~Col+Row, matdf, text=Treatment, cex=1, show.key=FALSE, main=NULL)
#' Rinv <- Rmatrix(matdf,VarE,rhox,rhoy)
#' Ginv <- Gmatrix(trt,VarG)
#' resD <- VarCov.rcbd(matdf,criteria="A",Ginv,Rinv) # K is not provided but calculated
#' OptimScore <- resD$OptimScore
#' K <- resD$K
#'
#' ans <- Optimize.rcbd(matdf=matdf,n=500,OptimScore=OptimScore,
#' criteria="A",Ginv=Ginv,Rinv=Rinv,K=K,plot=TRUE)
#'
#'
#' resD <- VarCov.rcbd(matdf,criteria="A",Ginv,Rinv,K) # K is provided
#'
#' (InitScore<-resD$OptimScore)
#' (OldScore<-resD$OptimScore)
#' Oldmatdf<-matdf
#' print(OldScore)
#' iter<-800
#' for (i in 1:iter) {
#' #newmatdf <- SwapMethods(matdf,pairs=1,swapmethod="within")
#' #newmatdf <- SwapMethods(matdf,pairs=4,swapmethod="across")
#' newmatdf <- SwapMethods(Oldmatdf,pairs=2,swapmethod="any")
#' NewScore <- VarCov.rcbd(newmatdf,criteria="A",Ginv,Rinv,K)$OptimScore # K is provided
#' if(NewScore < OldScore){
#' OldScore <- NewScore
#' Oldmatdf <- newmatdf
#' print(OldScore)
#' }
#' }
#'
#'
#' @export
#'
#' @seealso \code{\link{rcbd}} for design of initial experiments
Optimize.rcbd<- function(matdf,n=100,OptimScore,criteria="A",Ginv,Rinv,K,plot=FALSE) {
newmatdf <- matdf
Score <- OptimScore
mat <- NULL
mat <- rbind(mat, c(value = Score, iterations = 0))
Design_best <- newmatdf
Des <- list()
TRACE <- c()
newmatdf <- SwapPair(matdf = matdf)
for (i in 2:n) {
newmatdf <- SwapPair(matdf = newmatdf) ### This changes....
TRACE[i] <- VarCov.rcbd(newmatdf,criteria,Ginv,Rinv,K)[[1]]
Des[[i]] <- newmatdf
if (VarCov.rcbd(newmatdf,criteria,Ginv,Rinv,K)[[1]] < Score) {
print(sprintf("Swapping within blocks: %d", i, "complete\n",
sep = ""))
Design_best <- Des[[i]] <- newmatdf
Design_best <- newmatdf
Score <- VarCov.rcbd(newmatdf,criteria,Ginv,Rinv,K)[[1]]
mat <- rbind(mat, c(value = Score, iterations = i))
}
if (VarCov.rcbd(newmatdf,criteria,Ginv,Rinv,K)[[1]] > Score & nrow(mat) <= 1) {
newmatdf <- matdf
Des[[i]] <- matdf
Design_best <- matdf
}
if (VarCov.rcbd(newmatdf,criteria,Ginv,Rinv,K)[[1]] > Score & nrow(mat) > 1) {
newmatdf <- Des[[length(Des) - 1]]
Des[[i]] <- newmatdf
Design_best <- newmatdf
}
}
if(plot==TRUE){
Design_best = as.data.frame(Design_best)
P = ggplot2::ggplot(data=Design_best)+geom_text(aes(x=Design_best$Col,
y=as.factor(Design_best$Row),
label=Design_best$Treatments,
col=as.factor(Design_best$Reps)), size=6) +
scale_y_discrete("Row coordinates") +
scale_x_discrete("Column coordinates") +
scale_color_discrete("Block") +
ggtitle(substitute(paste("Improved design after n = ", n,~ " iterations", sep="")))+
theme(text = element_text(size=15,face="bold"),
plot.title = element_text(size=20, face="bold",vjust=2),
axis.text=element_text(size=17,face="bold"),
axis.title=element_text(face="bold"),
legend.title = element_text(face="bold")) +
coord_fixed()
print(P)
}
ODE = (((mat[1,"value"]) - (mat[nrow(mat),"value"]))/(mat[1,"value"]))*100
print(sprintf("ODE due to swapping pairs of treatments within blocks is: %f", ODE, "complete\n",
sep = ""))
list(TRACE = c(as.vector(mat[1, "value"]), TRACE[!is.na(TRACE)]), mat = mat,
Design_best = Design_best)
}
|
32935514b8ad044adfaad5bff1a50914b7ace0fd
|
ae4102a9effc90e20c79098109a3b1f6cbfa3b9c
|
/man/sofa-package.Rd
|
9349ca49c963417e0697a3f6ac152908907c5de2
|
[
"MIT"
] |
permissive
|
lmilev/sofa
|
c539764de516ff7c1338d93ef80d62287cb3c6d2
|
549d333e41a333a59aea25db12bc568b47be22c4
|
refs/heads/master
| 2021-01-24T09:57:10.138729
| 2018-01-28T17:56:52
| 2018-01-28T17:56:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,290
|
rd
|
sofa-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sofa-package.r
\docType{package}
\name{sofa-package}
\alias{sofa-package}
\alias{sofa}
\title{R client for CouchDB.}
\description{
Relax.
}
\section{About sofa}{
\pkg{sofa} provides an interface to the NoSQL database CouchDB
(\url{http://couchdb.apache.org}). Methods are provided for managing
databases within CouchDB, including creating/deleting/updating/transferring,
and managing documents within databases. One can connect with a local
CouchDB instance, or a remote CouchDB databases such as Cloudant
(\url{https://cloudant.com}). Documents can be inserted directly from
vectors, lists, data.frames, and JSON.
}
\section{Client connections}{
All functions take as their first parameter a client connection object,
or a \strong{cushion}. Create the object with \link{Cushion}. You
can have multiple connection objects in an R session.
}
\section{CouchDB versions}{
\pkg{sofa} was built assuming CouchDB version 2 or greater. Some
functionality of this package will work with versions < 2, while
some may not (mango queries, see \code{\link[=db_query]{db_query()}}). I don't
plan to support older CouchDB versions per se.
}
\author{
Scott Chamberlain \email{myrmecocystus@gmail.com}
}
\keyword{package}
|
2482560a2b9f5dd83fe74044c1e5e75d90c65f82
|
67b12601515fbfa1c555d14310a111b62250d5a1
|
/bugs/fallarmyworm_nonlinear.R
|
57da3e582f354c604b4aa0bccfb18350b8faa738
|
[] |
no_license
|
cesaraustralia/darabug2
|
11e2faeb38783e40ba22b7165976fec5b6cc530d
|
2e5b0466dac19c69980c74b7eb55d4ddac14aed8
|
refs/heads/master
| 2023-07-17T14:19:50.898006
| 2021-08-25T06:44:36
| 2021-08-25T06:44:36
| 85,540,254
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,505
|
r
|
fallarmyworm_nonlinear.R
|
fallarmyworm_nonlinear<-function(){
# returns a structure containing species name, developmental function for each stage, and life-history category for each stage (egg, immature, pupa, adult)
name <- 'Fall armyworm (non-linear)'
sciname<-'Spodoptera frugiperda'
source <- 'Ali, A., R. G. Luttrell, and J. C. Schneider. 1990. Effects of Temperature and Larval Diet on Development of the Fall Armyworm (Lepidoptera: Noctuidae). Annals of the Entomological Society of America 83:725–733.'
# development as a function of temperature, C
schoolfield = function(tempK, RHO25, HA, HL=-1e9, HH=-1e9, TL=1, TH=1){
R = 1.987 # cal/degree/mol (Boltzmanns constant)
RHO25 * tempK/298.15 * exp((HA/R)*(1/298.15 - 1/tempK)) /
(1 + exp((HL/R)*(1/TL - 1/tempK)) +
exp((HH/R)*(1/TH - 1/tempK)))
}
egg =function(temp){
tempK = temp + 273.15
dev.rate = 0.0011*tempK*exp(21.7 - 6480.9/tempK)/
(1 + exp(-132.45 + 38356/tempK) + exp(86.96 - 27119.3/tempK))
return(dev.rate)
}
L1=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.3542, HA=11429)
return(dev.rate)
}
L2=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.6774, HA=21377, TH=306.7, HH=44561)
return(dev.rate)
}
L3=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.5463, HA=15457, TH=310.9, HH=71604)
return(dev.rate)
}
L4=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.8922, HA=28132, TH=300.4, HH=30324)
return(dev.rate)
}
L5=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.4130, HA=10631)
return(dev.rate)
}
L6=function(temp){
tempK = temp + 273.15
dev.rate =
schoolfield(tempK=tempK, RHO25=0.2038, HA=15601, TH=309.1, HH=83892)
return(dev.rate)
}
pupa=function(temp){
tempK = temp + 273.15
dev.rate = 0.00051*tempK*exp(5.35 - 1593.69/tempK)/
(1 + exp(-70.79 + 20894.94/tempK))
return(dev.rate)
}
dev.funs<-list(egg=egg,L1=L1,L2=L2,L3=L3,L4=L4,L5=L5,L6=L6,
pupa=pupa)
life<-c('egg','immature','immature','immature','immature','immature','immature','pupa') # possibly write script to search for adult and egg/pupa and assume all else immature.
return(list(name=name,dev.funs=dev.funs,life=life, sciname=sciname,
source = source))
}
|
03508544414917040abb883f590545b4eb143f91
|
3fb5533d68b4b8b7949e99442f6ba7ff6ef61735
|
/man/agTrend.gam-package.Rd
|
6ae265f656dd1a139b16f87bcc7591ee8545d049
|
[] |
no_license
|
dsjohnson/agTrend.gam
|
7555015c8c81eb3748b7c877a50766a727987393
|
b3deac6e1937711c84751febe6c87387afb58439
|
refs/heads/master
| 2020-04-05T01:26:42.017711
| 2018-11-22T00:49:56
| 2018-11-22T00:49:56
| 156,436,565
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,381
|
rd
|
agTrend.gam-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agTrend.gam-package.R
\docType{package}
\name{agTrend.gam-package}
\alias{agTrend.gam-package}
\alias{agTrend.gam}
\title{R package for fitting temporal trends to abundence data aggregated over large regions when subregions have missing data}
\description{
This package fits a log-linear trend models to regions aggregated over sites. The sites may contain missing surveys that
are not temporally aligned with the missing data at other sites, making direct aggregation impossible. The functions within the package
model the indivdual sites with a semi-parametric (possibly, zero-inflated) model to interpolate missing data from which regional aggregations
can be made. By using Monte Carlo approach, on can sample from the posterior predictive distribution of the regional aggregations
Then calculate the log-linear trend over the time period of interest as a derived parameter. Using the posterior predictive distribution
allows incorporation of both parameter uncertainty as well as uncertainty due to sampling the local abundance processes.
}
\details{
\tabular{ll}{ Package: \tab agTrend.gam\cr
Type: \tab Package\cr
Version: \tab 0.01.9000\cr
Date: \tab 2018-10-24\cr
License: \tab CC0\cr
LazyLoad: \tab
yes\cr }
}
\author{
Devin S. Johnson
Maintainer: Devin S. Johnson <devin.johnson@noaa.gov>
}
|
5830a16ae89564126305e9567f0cd471cbcb1333
|
8652da947f69556909d32037b5a05801571231c5
|
/tests/testthat.R
|
d108d32033dd073435f746788e3177eb805d974c
|
[
"MIT"
] |
permissive
|
AndreSjuve/texaid
|
54b5dcb412fe6aba567eb0d71df7447a92b9e53d
|
3d175d9e03c8714d01b9191d07f59bd83ab09021
|
refs/heads/master
| 2023-08-18T09:14:18.482368
| 2021-10-01T15:29:01
| 2021-10-01T15:29:01
| 412,502,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(texaid)
test_check("texaid")
|
18590ff94e4ba4939bf574578dfbb290cebae833
|
687a85612a09c61a77853b70acffbc383de6e5aa
|
/man/brca.Rd
|
632716aa4afd0398a4255124b564f7346fd37c53
|
[] |
no_license
|
dinukap/AdaSampling
|
5b319993ee93707fb0f27b1c34bb1ecc4743248f
|
81978628d1f5dec40599de3a07b052c24d9569e0
|
refs/heads/master
| 2021-05-10T07:57:37.390294
| 2018-03-02T03:56:12
| 2018-03-02T03:56:12
| 118,868,301
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,256
|
rd
|
brca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{brca}
\alias{brca}
\title{Wisconsin Breast Cancer Database (1991)}
\format{A data frame with 683 rows and 10 variables:
\describe{
\item{clt}{Clump thickness, 1 - 10}
\item{ucs}{Uniformity of cell size, 1 - 10}
\item{uch}{Uniformity of cell shape, 1 - 10}
\item{mad}{Marginal adhesion, 1 - 10}
\item{ecs}{Single epithelial cell size, 1 - 10}
\item{nuc}{Bare nuclei, 1 - 10}
\item{chr}{Bland chromatin, 1 - 10}
\item{ncl}{Normal nucleoli, 1 - 10}
\item{mit}{Mitoses, 1 - 10}
\item{cla}{Class, benign or malignant}
}}
\source{
\url{https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data}
}
\usage{
brca
}
\description{
A cleaned version of the original Wisconsin Breast Cancer
dataset containing histological information about 683
breast cancer samples collected from patients at the
University of Wisconsin Hospitals, Madison by
Dr. William H. Wolberg between January
1989 and November 1991.
}
\references{
O. L. Mangasarian and W. H. Wolberg: "Cancer diagnosis via linear
programming", \emph{SIAM News}, Volume 23, Number 5, September 1990, pp 1 & 18.
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.