blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83c23e5879d5584b514d4db478e9100055b08db8
|
13c3cdcac474dcf24885fec9c4bb5baa060ba6c7
|
/LINESprocFromNetwork2.r
|
14f673f604c57633b8b4a5421ff8bff4d77a3f9d
|
[] |
no_license
|
kpierce8/hydroNetworks
|
98661dd422729c8a5bfa23c57322ee7daf8244c3
|
e3c7bb870243d711c169e0c7c4ade2d191591928
|
refs/heads/master
| 2020-06-03T15:08:27.359905
| 2011-04-20T21:23:05
| 2011-04-20T21:23:05
| 1,642,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,964
|
r
|
LINESprocFromNetwork2.r
|
library(igraph)
library(foreign)
memory.limit(1024*3.75)
source( "E:\\CODE_local\\HydroNetworks_local\\Trunk\\NetFunctions.r")
#source( "g:\\str24project\\Trunk\\NetFunctions.r")
wria <- c("WA")
LINEID <- c("OBJECTID") #wchydro is LLID
LENGTH <- c("Shape_Length") #wchydro is LLID
library(RODBC)
cnn1 <- odbcConnectAccess("c:/data/projectslocal/pointsholder.mdb")
#cnn1 <- odbcConnectAccess("g:\\str24project\\StrNetOut.mdb")
edges.dbf <- sqlFetch(cnn1,"edgesNOCOL")
nodes.dbf<- sqlFetch(cnn1,"junctionsNOCOL")
edges.dbf <- edges.dbf[,-c(match("Shape",names(edges.dbf)))]
nodes.dbf <- nodes.dbf[,-c(match("SHAPE",names(nodes.dbf)))]
demValues <- read.csv("C:\\Documents and Settings\\pierckbp\\Local Settings\\Temp\\newsamp2")
sum(nodes.dbf$POINT_X-demValues$x)
nodes.dbf <- data.frame(nodes.dbf, WADEM=demValues$wadem10)
nodes.dbf<- nodes.dbf[order(nodes.dbf$OBJECTID),]
idcol<- match(LINEID,names(edges.dbf))
lencol<- match(LENGTH,names(edges.dbf))
tocol<- match("ToNode",names(edges.dbf))
fromcol<- match("FromNode",names(edges.dbf))
#
##################################################################################
#Below ReachIdentifier, Length, ToNode and FromNode
edgesnet <- data.frame(edges.dbf[,c(idcol,lencol, tocol, fromcol)])
options("scipen"=6)
nona <- is.na(apply(edgesnet[,3:4],1,sum))
edgesnet.filt <- edgesnet[nona == 'FALSE',]
edgesnet.attr <- edges.dbf[nona == 'FALSE',]
rm(nona)
rm(edgesnet)
##Write Graph Connectivity File##
dim(edgesnet.filt)
table.norow(edgesnet.filt[,3:4],paste("c:/data/projectslocal/segments/SEGnet.GR",wria,".txt",sep=""),col.names=F)
#table.norow(edgesnet.filt[,3:4],paste("g:/str24project/SEGnet.GR",wria,".txt",sep=""),col.names=F)
##################################################################################
##Import Graph and add identifier attributes
#graphset<- read.graph(paste("g:/str24project/SEGnet.GR",wria,".txt",sep=""))
graphset<- read.graph(paste("c:/data/projectslocal/segments/SEGnet.GR",wria,".txt",sep=""))
graphset <- set.edge.attribute(graphset,"length",E(graphset),edgesnet.attr[,lencol])
graphset <- set.edge.attribute(graphset,"weight",E(graphset),edgesnet.attr[,lencol])
graphset <- set.edge.attribute(graphset,"OBJECTID",E(graphset),edgesnet.attr[,idcol])
graphset <- set.edge.attribute(graphset,"ToNode",E(graphset),edgesnet.filt$ToNode)
graphset <- set.edge.attribute(graphset,"FromNode",E(graphset),edgesnet.filt$FromNode)
lastv <- length(V(graphset))
graphset <- set.vertex.attribute(graphset,"nodes",V(graphset),c(0,nodes.dbf$OBJECTID))
graphset <- set.vertex.attribute(graphset,"WADEM",V(graphset),c(0,nodes.dbf$WADEM))
V(graphset)$nodes[1:10]
##END LINEWORK PREP##
##########################################################
##CLUSTER VECTOR
# Cluters are based on vertices but vertices have no attributes
# the "temp" vectors below add the cluster attributes to the vertices
# reading the graph appears to prepend one vertex which is not
# present in the nodes prior to export, need to check this
################ BELOW NOT CAPTURED IN CLUST ASSIGN function
bob.clust <- clusters(graphset)
temp1 <- bob.clust$membership #get cluster number
temp2 <- data.frame(clustid=temp1,clustsize=bob.clust$csize[temp1+1]) #match to cluster size, cluster 1 is position 2
temp3 <- data.frame(NODEID=c(0,nodes.dbf$OBJECTID),temp2)
temp3[1,] <- 0
graphset <- set.vertex.attribute(graphset,"members",V(graphset),c(bob.clust$csize[temp1+1]))
graphset <- set.vertex.attribute(graphset,"membership",V(graphset),bob.clust$membership)
graphset <- set.vertex.attribute(graphset,"UNINDEX",V(graphset),temp3$UNINDEX)
graphset <- set.vertex.attribute(graphset,"NODEID",V(graphset),temp3$NODEID)
graphset <- set.vertex.attribute(graphset,"COAST",V(graphset),c(0,nodes.dbf$CoastCol))
graphset <- set.vertex.attribute(graphset,"DEGREE",V(graphset),degree(graphset,v=V(graphset),mode=c("total")))
V(graphset)$members[1:100]
V(graphset)$NODEID[1:100]
V(graphset)$membership[1:100]
V(graphset)$DEGREE[1:100]
V(graphset)$WADEM[1:100]
cfrom.nodes <- E(graphset)$FromNode
cto.nodes <- E(graphset)$ToNode
length(cfrom.nodes)
length(cto.nodes)
allnodes <- V(graphset)$NODEID
setElev <- data.frame(NODEID=V(graphset)$NODEID, ClustID=V(graphset)$membership, DEGREE=V(graphset)$DEGREE, DEM=V(graphset)$WADEM, ClustSize = V(graphset)$members, ISSINK = 0, TIES = -1,COAST=V(graphset)$COAST)
list.vertex.attributes(graphset)
list.edge.attributes(graphset)
cmfrom <- match(cfrom.nodes, allnodes) #added zero to account for shift in temp3
cmto <- match(cto.nodes, allnodes)
tindexes<- data.frame(ClusterSize=temp3[cmfrom,dim(temp3)[2]],ClusterID=temp3[cmfrom,dim(temp3)[2]-1])
edges.dbf.clust <- data.frame(edgesnet.attr,tonode=temp3[cmto,1],fromnode=temp3[cmfrom,1],tindexes,toElev=setElev[cmto,"DEM"],fromElev=setElev[cmfrom,"DEM"])
tanGradient <- (edges.dbf.clust$fromElev- edges.dbf.clust$toElev)/edges.dbf.clust$Shape_Length
Gradient <- atan(tanGradient)/2/pi*360
negGrad<-Gradient[Gradient<0]
length(negGrad[!is.na(negGrad)])
hist(negGrad[!is.na(negGrad)])
edges.dbf.clust <- data.frame(edges.dbf.clust, Gradient)
#WRITE OUT CLUSTER vector
#write.dbf(edges.dbf.clust,paste("e:/str24project/edgetest2",wria,".dbf",sep=""))
#write.dbf(setElev,paste("e:/str24project/nodestest",wria,".dbf",sep=""))
write.dbf(edges.dbf.clust,paste("c:/data/projectslocal/segments/edgetest2",wria,".dbf",sep=""))
write.dbf(setElev,paste("c:/data/projectslocal/segments/nodestest",wria,".dbf",sep=""))
cor(edges.dbf$OBJECTID, edges.dbf.clust$OBJECTID)
cor(nodes.dbf$OBJECTID, temp3$NODEID[-1])
rm(temp3)
########################################
#
#gr1<- subgraph(graphset,V(graphset)[V(graphset)$members > 100])
#summary(gr1)
#uni.clus<-unique(get.vertex.attribute(gr1,"membership",V(gr1)))
#
#clusters(gr1)
#uni.clus
#
#
#gr2 <- as.undirected(gr1,mode=c("each"))
#summary(gr1)
#summary(gr2)
#bob <- get.shortest.paths(gr2,1,200)
#bob2 <- pathlengths(bob,gr2)
#bob3 <- pathsums(bob2)
|
c6db436ff8100849a4599e19afac8e28e2f77dbd
|
fdb4b33f73ec0f7a04f738fa3edc1e65d60f22fa
|
/final_name_table.R
|
237b13102783d503950d5fcf35c15541b48b1937
|
[] |
no_license
|
Mira0507/popular_baby_names
|
066033a8a1cb9991e1e9c7377cfd951a1894e380
|
df4d39d57c5bfbeebc4e44788f6554f407bae9a0
|
refs/heads/master
| 2022-04-12T06:52:41.848497
| 2020-04-11T02:45:26
| 2020-04-11T02:45:26
| 253,302,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,168
|
r
|
final_name_table.R
|
library(tidyverse)
library(formattable)
eboy <- read_csv('e_boys.csv')
egirl <- read_csv('e_girls.csv')
uboy <- read_csv('us_boys.csv')
ugirl <- read_csv('us_girls.csv')
boy_names <- data.frame(England_Wales_Boy = eboy[1:20, 'Name'],
England_Wales_Boy = eboy[1:20, 'Count'],
US_Boy = uboy[1:20, 'Name'],
US_Boy = uboy[1:20, 'Counts'])
girl_names <- data.frame(England_Wales_Girl = egirl[1:20, 'Name'],
England_Wales_Girl = egirl[1:20, 'Count'],
US_Girl = ugirl[1:20, 'Name'],
US_Girl = ugirl[1:20, 'Counts'])
name <- c('Names in England/Wales',
'Numbers in England/Wales',
'Names in the United States',
'Numbers in the United States')
names(boy_names) <- name
names(girl_names) <- name
# tables for boys and girls
table_boys <- formattable(boy_names,
list(area(col = c('Numbers in England/Wales','Numbers in the United States')) ~ normalize_bar("gold", 0.2)))
table_girls <- formattable(girl_names,
list(area(col = c('Numbers in England/Wales','Numbers in the United States')) ~ normalize_bar("greenyellow", 0.2)))
# ebn20 = 20 popular england boy names
# ubn20 = 20 popular us boy names
ebn20 <- boy_names[[1]]
ubn20 <- boy_names[[3]]
# egn20 = 20 popular england girl names
# ugn20 = 20 popular us girl names
egn20 <- girl_names[[1]]
ugn20 <- girl_names[[3]]
# unique_ebn20 = england boy names not popular in the us
# unique_ubn20 = us boy names not popular in england
# common_bn20 = common boy names
unique_ebn20 <- ebn20[!ebn20 %in% ubn20]
unique_ubn20 <- ubn20[!ubn20 %in% ebn20]
common_bn20 <- ebn20[ebn20 %in% ubn20]
# unique_egn20 = england girl names not popular in the us
# unique_ugn20 = us girl names not popular in england
# common_gn20 = common girl names
unique_egn20 <- egn20[!egn20 %in% ugn20]
unique_ugn20 <- ugn20[!ugn20 %in% egn20]
common_gn20 <- egn20[egn20 %in% ugn20]
u_boys_top10 <- boy_names[1:10, 3]
u_girls_top10 <- girl_names[1:10, 3]
e_boys_top10 <- boy_names[1:10, 1]
e_girls_top10 <- girl_names[1:10, 1]
|
e2a34dd71d71f3d0f59fb160fb2647f8a404f1cc
|
cda850c653292f955410e904cc81af7f646e040f
|
/cachematrix.R
|
b3cb9d52efde4094670be52c17dca9b84c37768a
|
[] |
no_license
|
ReProgrammatic/ProgrammingAssignment2
|
1c92edfe81c56d2d19604d8343ac365be690f9a8
|
6cf9cde5cd4d6601e20f69477868fe882a37fba4
|
refs/heads/master
| 2020-12-25T03:20:27.138802
| 2016-02-13T16:50:55
| 2016-02-13T16:50:55
| 51,652,153
| 0
| 0
| null | 2016-02-13T15:43:36
| 2016-02-13T15:43:35
| null |
UTF-8
|
R
| false
| false
| 1,395
|
r
|
cachematrix.R
|
# cachematrix.R: A set of functions that caches the inverse when working with
# matrices. Assignment for the third week of the Coursera/JHU R Programming class.
## A function that creates a list that caches the inverse of a matrix for use
## with the cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL # the cached inverse of the matrix
get <- function(){x} # return the set or provided matrix
set <- function(new_value){
#set the matrix and NULL the cache since we have a new matrix
x <<- new_value
inverse <<- NULL
}
getinverse <- function(){inverse} # return the cached inverse (may be NULL)
setinverse <- function(new_inverse){
#set the inverse in the cache
inverse <<- new_inverse
}
list(get=get, set=set, getinverse=getinverse, setinverse=setinverse)
}
## gets the inverse from a CacheMatrix list, using cache if available.
## No invalidation is enacted when using different arguments for the solve
## function, use with care.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if (is.null(inverse)){
message("inverse was not cached, computing and caching it")
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
}
else {
message("using cached inverse")
}
inverse
}
|
1575b1e2943897c070e8465427d6e564331dcc4e
|
07bd88725a753783fe892df3bee11c511be6b7f3
|
/man/bst.sel.Rd
|
0da816e3996c41c30b226a1ed0499beb96e054f5
|
[] |
no_license
|
cran/bst
|
aaf796323637dea5bdf4efa35cf3b88c2be4dbc4
|
53dbd049b094eee6de2c02c00969900f01da88d2
|
refs/heads/master
| 2023-01-12T09:01:02.160478
| 2023-01-06T17:50:56
| 2023-01-06T17:50:56
| 17,694,897
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,609
|
rd
|
bst.sel.Rd
|
\name{bst.sel}
\alias{bst.sel}
\title{Function to select number of predictors}
\description{Function to determine the first q predictors in the boosting path, or perform (10-fold) cross-validation and determine the optimal set of parameters}
\usage{
bst.sel(x, y, q, type=c("firstq", "cv"), ...)
}
\arguments{
\item{x}{Design matrix (without intercept).}
\item{y}{Continuous response vector for linear regression}
\item{q}{Maximum number of predictors that should be selected if \code{type="firstq"}.}
\item{type}{if \code{type="firstq"}, return the first \code{q} predictors in the boosting path. if \code{type="cv"}, perform (10-fold) cross-validation and determine the optimal set of parameters}
\item{...}{Further arguments to be passed to \code{\link{bst}}, \code{\link{cv.bst}.}}
}
\details{Function to determine the first q predictors in the boosting path, or perform (10-fold) cross-validation and determine the optimal set of parameters. This may be used for p-value calculation. See below.}
\value{Vector of selected predictors.}
\author{Zhu Wang}
\examples{
\dontrun{
x <- matrix(rnorm(100*100), nrow = 100, ncol = 100)
y <- x[,1] * 2 + x[,2] * 2.5 + rnorm(100)
sel <- bst.sel(x, y, q=10)
library("hdi")
fit.multi <- hdi(x, y, method = "multi.split",
model.selector =bst.sel,
args.model.selector=list(type="firstq", q=10))
fit.multi
fit.multi$pval[1:10] ## the first 10 p-values
fit.multi <- hdi(x, y, method = "multi.split",
model.selector =bst.sel,
args.model.selector=list(type="cv"))
fit.multi
fit.multi$pval[1:10] ## the first 10 p-values
}
}
\keyword{models}
\keyword{regression}
|
d9c9ee13eacd28c8f9e0f41c4e70b8674fafa18f
|
7e195a7670b2fffae511a16e85cd5e430d111e92
|
/Simulations/No Early Harm Scenario/low_eefun_NEH.R
|
fc256a49ae892ad9c96c25de72d2084480139a1d
|
[] |
no_license
|
bblette1/SACE_for_PSEM
|
5c07b4ea7d9f64be2b7d365ad452abcdbb6f5773
|
2e13ef5bd7362776a9d1d859a58ac2f06c3b718a
|
refs/heads/master
| 2020-04-20T22:57:35.534561
| 2019-02-22T17:05:28
| 2019-02-22T17:05:28
| 169,156,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,110
|
r
|
low_eefun_NEH.R
|
# Estimating equation vector for ignorance interval lower bound under NEH
# Used for M estimation variance calculation
# Full-cohort estimating equations
low_eefun <- function(data, beta0range, beta1range, beta2range, beta3range) {
function(theta) {
with(data, c( (1-Y_tau)*(1-Z)*(Y-theta[1]), # risk_0 = theta[1]
theta[2]*(1-Y_tau)*Z - (1-Y_tau)*(1-Z), # p_ytau0eq0_giv_ytau1eq0 = v = theta[2]
(1-Y_tau)*Z*((!is.na(S_star) & S_star==1)-theta[3]), # p_1 = theta[3]
theta[4]*theta[2] + theta[5]*(1-theta[2]) - theta[3], # p_10_first = theta[4]
exp(min(beta3range))*theta[5]/(1-theta[5]) - theta[4]/(1-theta[4]), # P(S_star(1) = 1 | Y_tau(1) = 0, Y_tau(0) = 1)_first = theta[5]
theta[1] - theta[6]*(1-theta[4]) - theta[7]*theta[4], # risk_0_00 = theta[6]
exp(min(beta0range))*theta[7]/(1-theta[7]) - theta[6]/(1-theta[6]), # risk_0_10 = theta[7]
Z*(1-Y_tau)*(!is.na(S_star) & S_star==0)*(Y-theta[8]), # row1 = theta[8]
(1-Z)*(1-Y_tau-theta[9]), # P(Y_tau(0) = 0) = theta[9]
(1-Y_tau)*(!is.na(S_star) & S_star==0)*Z - Z*theta[10], # P(Y_tau(1) = 0, S_star(1) = 0) = theta[10]
(1-theta[4])*theta[9] - theta[11]*theta[10], # P(Y_tau(0) = 0, S_star(0) = 0 | Y_tau(1) = 0, S*(1) = 0) = theta[11]
theta[8] - theta[12]*theta[11] - theta[13]*(1-theta[11]), # risk_1_00 = theta[12]
exp(max(beta1range))*theta[13]/(1-theta[13]) - theta[12]/(1-theta[12]), # risk_1_0star = theta[13]
Z*(1-Y_tau)*(!is.na(S_star) & S_star==1)*(Y-theta[14]), # row2 = theta[14]
(1-Y_tau)*(!is.na(S_star) & S_star==1)*Z - Z*theta[15], # P(Y_tau(1) = 0, S_star(1) = 1) = theta[15]
theta[4]*theta[11] - theta[16]*theta[15], # P(Y_tau(0) = 0, S_star(0) = 0 | Y_tau(1) = 0, S*(1) = 1) = theta[16]
theta[14] - theta[17]*theta[16] - theta[18]*(1-theta[16]), # risk_1_10 = theta[17]
exp(min(beta2range))*theta[18]/(1-theta[18]) - theta[17]/(1-theta[17]), # risk_1_1star = theta[18]
theta[19] - theta[17] + theta[7] + (theta[12] - theta[6]) # CEP(1,0) - CEP(0,0) = theta[19]
))
}
}
# Case-cohort sampling estimating equations
low_eefun_cc <- function(data, beta0range, beta1range, beta2range, beta3range) {
function(theta) {
with(data, c( (1-Y_tau)*(1-Z)*(Y-theta[1]), # risk_0 = theta[1]
theta[2]*(1-Y_tau)*Z - (1-Y_tau)*(1-Z), # p_ytau0eq0_giv_ytau1eq0 = v = theta[2]
(1-Y_tau)*Z*(S_star-theta[3])*(1/theta[20]*(1-Y)*R+Y), # p_1 = theta[3]
theta[4]*theta[2] + theta[5]*(1-theta[2]) - theta[3], # p_10_first = theta[4]
exp(min(beta3range))*theta[5]/(1-theta[5]) - theta[4]/(1-theta[4]), # P(S_star(1) = 1 | Y_tau(1) = 0, Y_tau(0) = 1)_first = theta[5]
theta[1] - theta[6]*(1-theta[4]) - theta[7]*theta[4], # risk_0_00 = theta[6]
exp(min(beta0range))*theta[7]/(1-theta[7]) - theta[6]/(1-theta[6]), # risk_0_10 = theta[7]
Z*(1-Y_tau)*(1-S_star)*(1/theta[20]*(1-Y)*R+Y)*(Y-theta[8]), # row1 = theta[8]
(1-Z)*(1-Y_tau-theta[9]), # P(Y_tau(0) = 0) = theta[9]
Z*((1-S_star)*(1-Y_tau)*(1/theta[20]*(1-Y)*R+Y) - theta[10]), # P(Y_tau(1) = 0, S_star(1) = 0) = theta[10]
(1-theta[4])*theta[9] - theta[11]*theta[10], # P(Y_tau(0) = 0, S_star(0) = 0 | Y_tau(1) = 0, S*(1) = 0) = theta[11]
theta[8] - theta[12]*theta[11] - theta[13]*(1-theta[11]), # risk_1_00 = theta[12]
exp(max(beta1range))*theta[13]/(1-theta[13]) - theta[12]/(1-theta[12]), # risk_1_0star = theta[13]
Z*(1-Y_tau)*S_star*(1/theta[20]*(1-Y)*R+Y)*(Y-theta[14]), # row2 = theta[14]
Z*(S_star*(1-Y_tau)*(1/theta[20]*(1-Y)*R+Y) - theta[15]), # P(Y_tau(1) = 0, S_star(1) = 1) = theta[15]
theta[4]*theta[11] - theta[16]*theta[15], # P(Y_tau(0) = 0, S_star(0) = 0 | Y_tau(1) = 0, S*(1) = 1) = theta[16]
theta[14] - theta[17]*theta[16] - theta[18]*(1-theta[16]), # risk_1_10 = theta[17]
exp(min(beta2range))*theta[18]/(1-theta[18]) - theta[17]/(1-theta[17]), # risk_1_1star = theta[18]
theta[19] - theta[17] + theta[7] + (theta[12] - theta[6]), # CEP(1,0) - CEP(0,0) = theta[19]
(1-Y)*(R-theta[20]) # pi_hat = theta[20]
))
}
}
|
b44ed5460a6616c92123f10374035470481b03db
|
1d8f5112758327e42ca8507f851326133cb10055
|
/src/CleanMatches.R
|
851d712530ef95ad902307ca27f2c974bd3a7067
|
[] |
no_license
|
EduardoClark/Football-Homicides
|
0a7234dfffabd9b229712be43bbfc770808becec
|
47c036d94805346a6fe80714370e6579fed2e26b
|
refs/heads/master
| 2021-01-01T05:48:16.845297
| 2013-12-09T19:53:41
| 2013-12-09T19:53:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,521
|
r
|
CleanMatches.R
|
###################################
### Author:Eduardo Clark
### Project: Homicides and Fútbol
### Date: September 2013
### For mediotiempo.com
###################################
#### Clean Match Data and create final dataframe with all matches
### Merge those with long month name
Partial <- rbind(F2009, F2010, F2011)
Splitter <- regexpr("\\[", Partial$Fecha)
Splitter <- ifelse(Splitter==-1, 1000, Splitter)
Partial$Fecha <- substr(Partial$Fecha, 0, Splitter)
remove(Splitter)
Partial$Fecha <- tolower(Partial$Fecha)
Months <- data.frame(Long=c("enero", "febrero", "marzo", "abril", "mayo", "junio",
"julio", "agosto", "septiembre", "octubre",
"noviembre", "diciembre"), Number=c(1:12))
Months$Short <- substr(Months$Long, 0,3)
Months$Long <- as.character(Months$Long)
for (i in 1:12){
tmp <- Months$Long[i]
tmp1 <- Months$Number[i]
Partial$Fecha <- gsub(tmp, tmp1, Partial$Fecha)
remove(tmp, tmp1)
}
remove(i)
Partial1 <- rbind(F2007, F2008)
Partial1$Fecha <- tolower(Partial1$Fecha)
for (i in 1:12){
tmp <- Months$Short[i]
tmp1 <- Months$Number[i]
Partial1$Fecha <- gsub(tmp, tmp1, Partial1$Fecha)
remove(tmp, tmp1)
}
remove(i)
###Remove Redundants
Complete <- rbind(Partial1, Partial)
remove(list=ls(pattern="F"), Months, Partial, Partial1)
###As.Date
Complete$Fecha <- as.Date(x=Complete$Fecha, format="%d %m %Y")
Complete$Marcador <- as.character(Complete$Marcador)
Complete$GolesCasa <- as.numeric(substr(Complete$Marcador, 0,1))
Complete$GolesVisita <- as.numeric(substr(Complete$Marcador, 4,5))
Complete$Gana <- ifelse(Complete$GolesCasa > Complete$GolesVisita, "Casa", "Empate")
Complete$Gana <- ifelse(Complete$GolesCasa < Complete$GolesVisita, "Visita", Complete$Gana)
##Correct some team names
Complete$Local <- gsub("Estudiantes Tecos", "Tecos", Complete$Local)
Complete$Local <- gsub("Tecos", "Estudiantes", Complete$Local)
Complete$Local <- gsub("Querétaro FC", "Querétaro" , Complete$Local)
Complete$Local <- gsub("Monarcas Morelia", "Morelia" , Complete$Local)
Complete$Local <- gsub("Jaguares de Chiapas", "Jaguares" , Complete$Local)
Complete$Local <- gsub("Puebla FC", "Puebla" , Complete$Local)
Complete$Local <- gsub("Santos Laguna","Santos" , Complete$Local)
Complete$Local <- gsub("Tigres de la UANL","Tigres" , Complete$Local)
Complete$Local <- gsub("Tigres UANL","Tigres" , Complete$Local)
Complete$Local <- gsub("UNAM","Pumas" , Complete$Local)
Complete$Local <- gsub("Toluca FC","Toluca" , Complete$Local)
Complete$Visitante <- gsub("Estudiantes Tecos", "Tecos", Complete$Visitante)
Complete$Visitante <- gsub("Tecos", "Estudiantes", Complete$Visitante)
Complete$Visitante <- gsub("Querétaro FC", "Querétaro" , Complete$Visitante)
Complete$Visitante <- gsub("Monarcas Morelia", "Morelia" , Complete$Visitante)
Complete$Visitante <- gsub("Jaguares de Chiapas", "Jaguares" , Complete$Visitante)
Complete$Visitante <- gsub("Puebla FC", "Puebla" , Complete$Visitante)
Complete$Visitante <- gsub("Santos Laguna","Santos" , Complete$Visitante)
Complete$Visitante <- gsub("Tigres de la UANL","Tigres" , Complete$Visitante)
Complete$Visitante <- gsub("Tigres UANL","Tigres" , Complete$Visitante)
Complete$Visitante <- gsub("UNAM","Pumas" , Complete$Visitante)
Complete$Visitante <- gsub("Deportivo Toluca","Toluca" , Complete$Visitante)
Complete$Visitante <- gsub("San luis","San Luis" , Complete$Visitante)
### Write Final Dataframe to csv
write.csv(Complete, "out-data/Matches2007_2011.csv", row.names=FALSE)
|
da620fe9c96b1ac4f3419a1218cb1a4115de71e6
|
138849c08c1f6fa0fc9f6fdd23d54caf2e184ce3
|
/R/schedule.R
|
392fec2eecb8cbca77a56461c30b036f46fecf85
|
[
"MIT"
] |
permissive
|
jameslairdsmith/gs
|
01c0e8951e5ab1e3c5f54681eef9a9d029fd314c
|
146ec9e72a632bc13c70c6abe1e329bfe2d14b34
|
refs/heads/master
| 2023-07-20T10:25:07.519470
| 2023-07-09T08:34:16
| 2023-07-09T08:34:16
| 156,515,663
| 10
| 0
|
NOASSERTION
| 2019-09-30T17:32:45
| 2018-11-07T08:39:52
|
R
|
UTF-8
|
R
| false
| false
| 4,268
|
r
|
schedule.R
|
#' Get the events of a schedule
#'
#' Get the event dates or datetimes from a schedule.
#'
#' @param x A schedule object.
#' @param from,to,during The limits to place on the output (see *Details*).
#' @param period_type The period type of the output. Eg. "day" (the default),
#' "hour" etc. Can be any unit accepted by `lubridate::period()`.
#' @param n The increment of the period type. Eg. for events occurring every
#' half-hour the `period_type` should "minute" and `n` should be set to
#' `30`.
#' @keywords schedule
#' @return A date or datetime object.
#' @details
#' `schedule_days()` is a convenience function for `schedule()` where
#' `period_type` is pre-filled as "day". Likewise for `schedule_hours()`,
#' where `period_type` is pre-filled as "hour". These functions are
#' recommended as they cover the most common use cases.
#'
#' The `from` and `to` arguments set limits on the output.
#' They are only required when the schedule `x` doesn't
#' have implicit limits (and therefore has an infinite number of possible
#' events).
#'
#' * `from` and `to` can each be either:
#' + a single date or datetime value or,
#' + A numeric year.
#' - In the case of `from`, a numeric year translates to the
#' start date of the year eg. `from = 2000` translates to
#' `as.Date("2000-01-01")`.
#' - In the case of `to` it translates to the end of the year eg.
#' `to = 2001` translates to `as.Date("2001-12-31")`.
#' * `during` is a shortcut for when setting a single year limit. Eg.
#' `during = 2000` is the equivalent of setting `from = as.Date("2000-01-01")`
#' and `to = as.Date("2000-12-31")`.
#'
#' @examples
#' library(magrittr)
#' on_paydays <- on_mday(25)
#'
#' schedule_days(on_paydays,
#' from = as.Date("2000-06-01"),
#' to = as.Date("2000-12-01"))
#'
#' schedule_days(on_paydays, from = 2000, to = 2001)
#'
#' schedule_days(on_paydays, during = 2000)
#'
#' on_jan_paydays <- only_occur(on_paydays, in_month("Jan"))
#'
#' schedule_hours(on_jan_paydays, during = 2000)
#'
#' on_jan_payday_2002 <-
#' on_paydays %>%
#' only_occur(in_month("Jan")) %>%
#' only_occur(in_year(2002))
#'
#' ## No limits required
#'
#' schedule_days(on_jan_payday_2002)
#' schedule_hours(on_jan_payday_2002)
#'
#' schedule(on_jan_payday_2002, period_type = "minute", n = 30)
#' @export
schedule <- function(x,
from = NULL,
to = NULL,
during = NULL,
period_type = "day",
n = 1){
from <- get_from(x = x, from = from, during = during)
to <- get_to(x = x, to = to, during = during)
date_seq <- make_period_seq(start = from,
end = to,
period_unit = period_type,
period_n = n)
date_seq[happen(x, date_seq)]
}
#' @rdname schedule
#' @export
schedule_days <- function(x, from = NULL, to = NULL, during = NULL){
schedule(x = x, from = from, to = to, during = during, period_type = "day")
}
#' @rdname schedule
#' @export
schedule_hours <- function(x, from = NULL, to = NULL, during = NULL){
schedule(x = x, from = from, to = to, during = during, period_type = "hour")
}
#' @rdname schedule
#' @export
schedule_next_days <- function(x, n, from, limit = lubridate::years(1)){
on_my_schedule <- x
start_date <- from
changing_date <- start_date
date_vector <- integer(0)
class(date_vector) <- "Date"
period_length <- limit
end_limit <- start_date + period_length
n_limit <- n
n <- 1
if(happen(on_my_schedule, start_date)){
date_vector <- c(date_vector, start_date)
n <- n + 1
}
while(n <= n_limit & changing_date < end_limit){
changing_date <- changing_date + days(1)
if(happen(on_my_schedule, changing_date)){
n <- n + 1
date_vector <- c(changing_date, date_vector)
}
}
sort(date_vector)
}
make_period_seq <- function(start, end, period_unit = "days", period_n = 1){
one_period <- lubridate::period(num = period_n, units = period_unit)
my_interval <- lubridate::interval(start - one_period , end + lubridate::days(1))
num_periods <- (my_interval / one_period) - 2
start + 0:num_periods * one_period
}
|
599afe262f89721138c5425a9f2f29df5bb16b35
|
3caefccdc44a00da4eac6ec4ec4c39fa26bc2280
|
/plot1.R
|
4bf2d7c794659ec25b73f2a7f3cbbd42c5f0fe9d
|
[] |
no_license
|
hawk-eye/ExData_Plotting1
|
5550ebd1b29ae9cc553e786d294c8b1f7eb84d27
|
5c14aaffe87c453aba42e8ded7c562f82b202fbe
|
refs/heads/master
| 2021-01-16T21:15:42.993613
| 2015-09-13T23:30:04
| 2015-09-13T23:30:04
| 42,415,513
| 0
| 0
| null | 2015-09-13T22:34:00
| 2015-09-13T22:34:00
| null |
UTF-8
|
R
| false
| false
| 725
|
r
|
plot1.R
|
# set wd to git repo folder
setwd(".")
colnames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
colclasses <- c("character", "character", "character", "character", "character", "character", "character", "character", "character")
data = read.table("./household_power_consumption.txt", sep=";", col.names = colnames, colClasses = colclasses,header=TRUE)
data$Date <- strptime(data$Date, "%d/%m/%Y")
sub_data <- subset(data, Date>="2007-02-01" & Date<="2007-02-02")
png(file="plot1.png")
hist(as.numeric(sub_data$Global_active_power), col="red", main="Global Active Power",xlab="Global Active Power (kilowatts")
dev.off()
|
e8d8d175a31425968b855eebda7e586200a4c509
|
4da1b4f9a30d813fdfc68cc6a1a3387da187b432
|
/function/dof/htmlpdfr/fs_funceval_groupwider.R
|
58fc2ac759e29557d2f275fcd0d2d2239d5dd75b
|
[
"MIT"
] |
permissive
|
FanWangEcon/R4Econ
|
50b21f0f53fd73722ab49f3383102df001415fda
|
7c7c4af32721817b2bc18944cfa17ecd8fb1f848
|
refs/heads/master
| 2023-04-03T18:41:31.038769
| 2023-03-23T05:06:53
| 2023-03-23T05:06:53
| 173,583,807
| 19
| 14
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,782
|
r
|
fs_funceval_groupwider.R
|
## ----global_options, include = FALSE------------------------------------------------------------------------------------------------
try(source("../../.Rprofile"))
## -----------------------------------------------------------------------------------------------------------------------------------
# Define
it_M <- 3
it_P <- 5
svr_m <- 'group_m'
svr_mp <- 'info_mp'
# dataframe
set.seed(123)
df_panel_skeleton <- as_tibble(matrix(it_P, nrow=it_M, ncol=1)) %>%
rowid_to_column(var = svr_m) %>%
uncount(V1) %>%
group_by(!!sym(svr_m)) %>% mutate(!!sym(svr_mp) := row_number()) %>%
ungroup() %>%
rowwise() %>% mutate(wage = rnorm(1, 100, 10),
savings = rnorm(1, 200, 30)) %>%
ungroup() %>%
rowid_to_column(var = "id_ji")
# Print
kable(df_panel_skeleton) %>% kable_styling_fc()
## -----------------------------------------------------------------------------------------------------------------------------------
# define function
ffi_subset_mean_sd <- function(df_sub, it_round=1) {
#' A function that generates mean and sd for several variables
#'
#' @description
#' Assume there are two variables in df_sub wage and savings
#'
#' @param df_sub dataframe where each individual row is a different
#' data point, over which we compute mean and sd, Assum there are two
#' variables, savings and wage
#' @param it_round integer rounding for resulting dataframe
#' @return a dataframe where each row is aggregate for a different type
#' of variablea and each column is a different statistics
fl_wage_mn = mean(df_sub$wage)
fl_wage_sd = sd(df_sub$wage)
fl_save_mn = mean(df_sub$savings)
fl_save_sd = sd(df_sub$savings)
fl_wgsv_mn = mean(df_sub$wage + df_sub$savings)
fl_wgsv_sd = sd(df_sub$wage + df_sub$savings)
ar_mn <- c(fl_wage_mn, fl_save_mn, fl_wgsv_mn)
ar_sd <- c(fl_wage_sd, fl_save_sd, fl_wgsv_sd)
ar_st_row_lab <- c('wage', 'savings', 'wage_and_savings')
mt_stats <- cbind(ar_mn, ar_sd)
mt_stats <- round(mt_stats, it_round)
ar_st_varnames <- c('mean', 'sd', 'variables')
df_combine <- as_tibble(mt_stats) %>%
add_column(ar_st_row_lab) %>%
rename_all(~c(ar_st_varnames)) %>%
select(variables, 'mean', 'sd') %>%
rowid_to_column(var = "id_q")
return(df_combine)
}
# testing function
ffi_subset_mean_sd(df_panel_skeleton %>% filter(!!sym(svr_m)==1))
## -----------------------------------------------------------------------------------------------------------------------------------
# run group stats and stack dataframes
df_outputs <- df_panel_skeleton %>% group_by(!!sym(svr_m)) %>%
do(df_stats = ffi_subset_mean_sd(., it_round=2)) %>%
unnest() %>%
rowid_to_column(var = "id_mq")
# print
kable(df_outputs) %>% kable_styling_fc()
|
16ca6d6bb940398a821de414e8f40cc768836459
|
b0eee00386b0b20ddac18152e8a6d91d08f69cdf
|
/margins.R
|
27ad85914324bd5acb1b113c3f1d5f975e578837
|
[] |
no_license
|
btskinner/rmargins
|
04f4ccec30a0b73d366e2666696185a34338b4eb
|
912b4242f76e62acbdc213ebaa06f24811f395ee
|
refs/heads/master
| 2020-08-30T08:12:19.456101
| 2019-10-29T15:40:59
| 2019-10-29T15:40:59
| 218,315,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,193
|
r
|
margins.R
|
## Gist to do margins in R -----------------------------------------------------
##
## Run your regression model and then use the predict() function with new data,
## which is just a synthetic data set that you make from the model.matrix() of
## of the regression (the X matrix in matrix notation).
##
## You create a new data frame of N x K size where,
##
## N := # rows that equal number of margins to predict across
## K := # columns that match covariates used in regression model
##
## For the variable that you want margins, make each row a unique marginal
## value. For a binary, you will have 2 rows and the column will have two
## values, 0 and 1. If it's continuous and you want marginal predictions every
## 10 steps, then you'll have 9 rows, for 10 through 90 (by 10 each step).
##
## All other column variables should take on some consistent value. The
## easiest is at their means, but if you have dummies that represent a
## particular group, you could just set the value at that.
## -----------------------------------------------------------------------------
## --------------------------------------
## logistic regression
## --------------------------------------
## read in fake data
df <- read.csv('./fake_data.csv')
## run logit
mod <- glm(y ~ x1 + x2 + x3 + x4, data = df, family = binomial(link = 'logit'))
summary(mod)
## ---------------------------------------------------------------------
## Margins for unit change in binary variable (x3)
## ---------------------------------------------------------------------
## (1) get model matrix from glm() object
mm <- model.matrix(mod)
head(mm)
## (2) drop intercept column of ones b/c we don't need it
mm <- mm[,-1]
head(mm)
## (3) convert to data.frame to make life easier
df_mm <- as.data.frame(mm)
## -----------------------------------------
## VERSION 1: all other variables atmeans
## -----------------------------------------
## NB: this should be equivalent to Stata -margins x3, atmeans-
## (4) make "new data" where # rows == # margins for key var, averages elsewhere
new_df <- data.frame(x1 = mean(df_mm$x1),
x2 = mean(df_mm$x2),
x3 = c(0,1), # two margins, 0/1, for x3
x4 = mean(df_mm$x4))
new_df
## (5) use predict() with new data, setting type to get probs
pp <- predict(mod, newdata = new_df, se.fit = TRUE, type = 'response')
pp
## check difference (Stata: -margins, dydx(x3) atmeans-)
pp$fit[2] - pp$fit[1]
## NB: getting the SE on this difference involves some gnarly second partial
## derivatives that I'm not sure how to compute offhand in base R
## -----------------------------------------
## VERSION 2: x4 == 1, others atmeans
## -----------------------------------------
## NB: this should be equivalent to Stata -margins x3, at(x4 = 1) atmeans-
## (4) make "new data" where # rows == # margins for key var, averages elsewhere
new_df <- data.frame(x1 = mean(df_mm$x1),
x2 = mean(df_mm$x2),
x3 = c(0,1), # two margins, 0/1, for x3
x4 = 1) # x4 == 1
new_df
## (5) use predict() with new data, setting type to get probs
pp <- predict(mod, newdata = new_df, se.fit = TRUE, type = 'response')
pp
## ---------------------------------------------------------------------
## Margins for unit change in continuous variable (x1)
## ---------------------------------------------------------------------
## NB: this should be equivalent to Stata -margins, at(x1 = (-4(1)4)) atmeans-
## get idea of range
summary(df$x1)
## (4) make "new data" where # rows == # margins for key var, averages elsewhere
new_df <- data.frame(x1 = seq(from = -4, to = 4, by = 1),
x2 = mean(df_mm$x2),
x3 = mean(df_mm$x3),
x4 = mean(df_mm$x4))
new_df
## (5) use predict() with new data, setting type to get probs
pp <- predict(mod, newdata = new_df, se.fit = TRUE, type = 'response')
format(pp, scientific = FALSE)
## -----------------------------------------------------------------------------
## END SCRIPT
################################################################################
|
1723d65172fadf31794d6f6fc405381b2aa5f56f
|
68f3a836df8c2af90f8e9870b9ce236df4dc8b88
|
/DAL1/Maylath - DAL1.R
|
e604e461fb59677daee81d0cda33f26ad43f2c44
|
[] |
no_license
|
mmaylath/Maylath-POSC-3410
|
ec15825c2b62feea1b3093db6b241e77bf5360e4
|
1e8de09806feb6af49798b157ae740e689281493
|
refs/heads/main
| 2023-04-13T15:50:59.880506
| 2021-04-21T03:55:34
| 2021-04-21T03:55:34
| 329,317,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,248
|
r
|
Maylath - DAL1.R
|
# Title: DAL 1 ####
# Author: Madeleine Maylath
# Date: 1/13/2021
# Lesson 1 ####
# My favorite movie is Interstellar.
# Lesson 2: R Basics ####
# Let's Try Some Basic Arithmetic ####
# Sum of 1 and 1
1+1
# Divide 365 by 12
365/12
# Your turn, multiply 10 by 12
10*12
# Your turn, add 28 + 38
28+38
# Order of Operations in R
1+1*(365/12)
# Multiply 6 by 6
6*6
# Divide 9 by 3.142
9/3.142
# Learning to assign variables ####
#Run this line of code to assign your first variable.
myFirstVar<- "Hello world!"
# Run myFirstVar in the console.
myFirstVar
# Code a second variable with a number.
mySecondVar<-16
# Run myFirstVar in the console.
mySecondVar
#Now let's practice assigning variables.
# Assign "Clemson" to`home`
home <- "Clemson"
# Assign "Tigers" to`h_mascot`
h_mascot <- "Tigers"
# Assign "Ohio State" to`away
away<-"Ohio State"
# Assign 'Buckeyes' to 'a_mascot'
a_mascot <- "Buckeyes"
# Assign 31 to 'homeScore'
homeScore <- 31
# Assign 0 to 'awayScore'
awayScore <- 0
# Assign TRUE to homeWin
homeWin <- TRUE
# Assign FALSE to awayWin
awayWin <- FALSE
# Use class to identify the data type for the following variables.
class(h_mascot)
class(homeScore)
class(homeWin)
# Use str to identify the data types for the following varaibles.
str(h_mascot)
str(homeScore)
str(homeWin)
# str defines the class and value of variables.
# Can sports scores ever be represented as decimals? No. We need to
# convert 'homeScore' and 'awayScore' to integers.
# Convert 'homeScore' to integer and assign to the same var name.
homeScore <- as.integer(homeScore)
homeScore
# Now it is your turn.
#Convert 'awayScore' to integer and assign to same var name.
awayScore <- as.integer(awayScore)
# Make a numeric vector by using the syntax 'c(#,#,#,#)'; check its structure.
vector_numeric <- c(12, 8, 16, 4, 15)
str(vector_numeric)
#Make a numeric vector,`myNumericVector`, on your own; check its structure.
myNumericVector <- c(11, 12, 15, 17)
str(myNumericVector)
# Make a logical vector; check its structure.
vector_logical <-c(TRUE, TRUE, FALSE, T, F)
str(vector_logical)
# Make a logical vector,`myLogicalVector`on your own; check its structure.
myLogicalVector <- c(TRUE, FALSE, FALSE, T)
str(myLogicalVector)
# Make a character vector; check it's structure.
vector_character <-c("Montana", "Aikman", "Manning", "Favre", "Mahomes")
str(vector_character)
# Make a character vector,`myCharVector`on your own; check its structure.
myCharVector <- c("John","Paul","George","Ringo")
str(myCharVector)
# Make a list of the vectors that I created: drAllardList; check its structure.
drAllardList <-list(vector_numeric, vector_logical, vector_character)
str(drAllardList)
# Notice a few things about the list: (1) A list is made up of vectors with
# different types, (2) the list is made up of vectors of different lengths,
# (3) running str() on list results in console output telling us "List of 3".
# Make a list of the vectors YOU created: myList; check its structure.
myList <- list(myNumericVector, myLogicalVector, myCharVector)
str(myList)
# Create data fame: QB_df; print in console; check structure
QB_df <-data.frame(vector_character, vector_numeric, vector_logical)
QB_df
str(QB_df)
# Note: It is a good practice to add'_df" to the names of objects that are data frames.
# print the numeric column to the console; use syntax: dataframe$columnName
QB_df$vector_numeric
# print the the character column to the console; use syntax: dataframe$columnName
QB_df$vector_character
# Rename QB_df$vector_character as QB_df$qbName
names(QB_df)[names(QB_df)=="vector_character"] <- "qbName"
str(QB_df)
# What is going on here?
# it is going into the names of the objects in the data frame, looking for the
# one named vector_character, and changing this to qbName. This renames the
# column in the data frame, but not the original vector that was placed in the
# data frame.This is called indexing, which is used to change specific values.
# Select the first row of QB_df
QB_df[1,]
# Select the first column of QB_df
QB_df[,1]
# Select the`qbName`column using ""
QB_df[,"qbName"]
# If you want to keep the complexity of the data frame, you can use the following formats.
QB_df[1]
QB_df["qbName"]
# It is important to know the difference between these approaches because we will use each for different reasons.
# Select the cell that is at the intersection of the 3rd row and 2nd column
QB_df[3,2]
#Now it is your turn.
My_df <- data.frame(myCharVector,myNumericVector,myLogicalVector)
str(My_df)
# Select the first row of your dataframe
My_df[1,]
# Select the 3rd column of your dataframe
My_df[,3]
# Select the cell that is at the intersection of the 1st row and 2nd column of your data frame
My_df[1,2]
# What type of data structure is returned by calling names(QB_df)?
names(QB_df)
# Answer: a vector.
# If we want to rename a specific element of the vector, then we need to use indexing to select the element of the vector.
names(QB_df)[names(QB_df)=="vector_numeric"]
# Now we need to assign a new value to it.
names(QB_df)[names(QB_df)=="vector_numeric"] <- "jerseyNum"
# Repeat this process for the 3rd column: HoFer (Hall of Famer)
names(QB_df)[names(QB_df)=="vector_logical"] <- "HoFer"
|
0871163516035f89b0bf4ce65b37cade9dd94b79
|
c03754bd79a6f9baf6bd55e74483bed1a2dbe8a7
|
/R/LAMMPSWriter.R
|
981c6796fcf1cbe462c12e0727f2c2459f35831f
|
[] |
no_license
|
julienide/Atoms
|
675ee51c5803bef9868838b1a229dc9d037bce9b
|
d805bd893a152bd4edc350a2fb18de0ed2d19a3c
|
refs/heads/master
| 2021-04-27T06:57:27.989087
| 2018-06-19T09:09:53
| 2018-06-19T09:09:53
| 122,623,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,007
|
r
|
LAMMPSWriter.R
|
#' writeLAMMPS
#'
#'
#' @name writeLAMMPS
#' @export
writeLAMMPS <- function(x, file, type)
UseMethod("writeLAMMPS")
#' @rdname writeLAMMPS
#' @export
writeLAMMPS.Atoms <- function(x, file, type = "full"){
LAMMPSDataTypes <- c(
"angle", "atomic", "body", "bond",
"charge", "dipole", "dpd", "electron",
"ellipsoid", "full", "line", "meso",
"molecular", "peri", "smd", "sphere",
"template", "tri", "wavepacket", "hybrid")
if(!is.character(file) || length(file) != 1L)
stop("'file' must be a cahracter vector of length 1")
if(!is.character(type) || length(type) != 1L)
stop("'type' must be a cahracter vector of length 1")
if(!type %in% LAMMPSDataTypes)
stop("'type' must be one of: ", paste(LAMMPSDataTypes, collapse = ", "))
if(is.null(x$atmtype))
stop("Missing atom-type ('x$atmtype' is null)")
atmtype <- as.integer(as.factor(x$atmtype))
atoms <- switch(
type,
angle = {stop("Not implemented yet")},
atomic = {
# atom-ID atom-type x y z
sprintf(
fmt = "%6i %6i %12.6f %12.6f %12.6f # %s",
1:natom(x), atmtype, x$x, x$y, x$z, x$atmtype)
},
body = {stop("Not implemented yet")},
bond = {stop("Not implemented yet")},
charge = {
if(is.null(x$charge))
stop("Missing charges ('x$charge' is null)")
# atom-ID atom-type q x y z
sprintf(
fmt = "%6i %6i %12.6f %12.6f %12.6f %12.6f # %s",
1:natom(x), atmtype, x$charge, x$x, x$y, x$z, x$atmtype)
},
dipole = {stop("Not implemented yet")},
dpd = {stop("Not implemented yet")},
electron = {stop("Not implemented yet")},
ellipsoid = {stop("Not implemented yet")},
full = {
if(is.null(x$resnumb))
stop("Missing molecule-ID ('x$resnumb' is null)")
if(is.null(x$charge))
stop("Missing charges ('x$charge' is null)")
# atom-ID molecule-ID atom-type q x y z
sprintf(
fmt = "%6i %6i %6i %12.6f %12.6f %12.6f %12.6f # %s",
1:natom(x), x$resnumb, atmtype, x$charge, x$x, x$y, x$z, x$atmtype)
},
line = {stop("Not implemented yet")},
meso = {stop("Not implemented yet")},
molecular = {stop("Not implemented yet")},
peri = {stop("Not implemented yet")},
smd = {stop("Not implemented yet")},
sphere = {stop("Not implemented yet")},
template = {stop("Not implemented yet")},
tri = {stop("Not implemented yet")},
wavepacket = {stop("Not implemented yet")},
hybrid = {stop("Not implemented yet")}
)
if(is.null(x$atmtype))
x$atmtype <- x$atmname
fmt <- max(nchar(x$atmtype))
fmt <- paste0("%-", fmt, "s")
masses <- data.frame(
atmtype = as.integer(as.factor(x$atmtype)),
atmname = x$atmtype,
stringsAsFactors = FALSE)
if(!is.null(x$mass)){
masses$mass <- x$mass
} else {
masses$mass <- mass(masses$atmname)
}
masses <- unique(masses)
if(anyDuplicated(masses$atmtype))
stop("Atom with same type have different masses")
types <- topoDict(x, unique = FALSE)
types$bonds[types$bonds$atm1 > types$bonds$atm2, ] <-
types$bonds[types$bonds$atm1 > types$bonds$atm2, 2:1]
types$angles[types$angles$atm1 > types$angles$atm3, ] <-
types$angles[types$angles$atm1 > types$angles$atm3, 3:1]
types$dihedrals[types$dihedrals$atm2 > types$dihedrals$atm3, ] <-
types$dihedrals[types$dihedrals$atm2 > types$dihedrals$atm3, 4:1]
types$dihedrals[types$dihedrals$atm2 == types$dihedrals$atm3 &
types$dihedrals$atm1 > types$dihedrals$atm4, ] <-
types$dihedrals[types$dihedrals$atm2 == types$dihedrals$atm3 &
types$dihedrals$atm1 > types$dihedrals$atm4, 4:1]
types$impropers[types$impropers$atm2 > types$impropers$atm3, 2:3] <-
types$impropers[types$impropers$atm2 > types$impropers$atm3, 3:2]
types$impropers[types$impropers$atm3 > types$impropers$atm4, 3:4] <-
types$impropers[types$impropers$atm3 > types$impropers$atm4, 4:3]
types$impropers[types$impropers$atm2 > types$impropers$atm3, 2:3] <-
types$impropers[types$impropers$atm2 > types$impropers$atm3, 3:2]
types$bonds <- as.factor(
sprintf(
fmt = paste(rep(fmt, 2L), collapse = " "),
types$bonds$atm1,
types$bonds$atm2))
types$angles <- as.factor(
sprintf(
fmt = paste(rep(fmt, 3L), collapse = " "),
types$angles$atm1,
types$angles$atm2,
types$angles$atm3))
types$dihedrals <- as.factor(
sprintf(
fmt = paste(rep(fmt, 4L), collapse = " "),
types$dihedrals$atm1,
types$dihedrals$atm2,
types$dihedrals$atm3,
types$dihedrals$atm4))
types$impropers <- as.factor(
sprintf(
fmt = paste(rep(fmt, 4L), collapse = " "),
types$impropers$atm1,
types$impropers$atm2,
types$impropers$atm3,
types$impropers$atm4))
BOrder <- order(types$bonds)
AOrder <- order(types$angles)
DOrder <- order(types$dihedrals)
IOrder <- order(types$impropers)
types$bonds <- types$bonds[BOrder]
types$angles <- types$angles[AOrder]
types$dihedrals <- types$dihedrals[DOrder]
types$impropers <- types$impropers[IOrder]
x@bonds <- bonds(x)[BOrder, ]
x@angles <- angles(x)[AOrder, ]
x@dihedrals <- dihedrals(x)[DOrder, ]
x@impropers <- impropers(x)[IOrder, ]
btypes <- as.integer(types$bonds)
atypes <- as.integer(types$angles)
dtypes <- as.integer(types$dihedrals)
itypes <- as.integer(types$impropers)
types$bonds <- unique(types$bonds)
types$angles <- unique(types$angles)
types$dihedrals <- unique(types$dihedrals)
types$impropers <- unique(types$impropers)
types$bonds <- structure(as.integer(types$bonds), names = as.character(types$bonds))
types$angles <- structure(as.integer(types$angles), names = as.character(types$angles))
types$dihedrals <- structure(as.integer(types$dihedrals), names = as.character(types$dihedrals))
types$impropers <- structure(as.integer(types$impropers), names = as.character(types$impropers))
cell <- round(cell(x), digits = 5L)
lines <- c("LAMMPS data file", "")
lines <- c(lines, sprintf(fmt = "%6i atoms", natom(x)))
if(nrow(bonds(x)))
lines <- c(lines, sprintf(fmt = "%6i bonds", nrow(bonds(x))))
if(nrow(angles(x)))
lines <- c(lines, sprintf(fmt = "%6i angles", nrow(angles(x))))
if(nrow(dihedrals(x)))
lines <- c(lines, sprintf(fmt = "%6i dihedrals", nrow(dihedrals(x))))
if(nrow(impropers(x)))
lines <- c(lines, sprintf(fmt = "%6i impropers", nrow(impropers(x))))
lines <- c(lines, "")
cat(lines, file = file, sep = "\n")
lines <- c(sprintf(fmt = "%6i atom types", length(unique(x$atmtype))))
if(length(types$bonds))
lines <- c(lines, sprintf(fmt = "%6i bond types", length(types$bonds)))
if(length(types$angles))
lines <- c(lines, sprintf(fmt = "%6i angle types", length(types$angles)))
if(length(types$dihedrals))
lines <- c(lines, sprintf(fmt = "%6i dihedral types", length(types$dihedrals)))
if(length(types$impropers))
lines <- c(lines, sprintf(fmt = "%6i improper types", length(types$impropers)))
lines <- c(lines, "")
cat(lines, file = file, sep = "\n", append = TRUE)
lines <- c(
sprintf(fmt = "%12.6f %12.6f xlo xhi", 0.0, cell[1L, 1L]),
sprintf(fmt = "%12.6f %12.6f ylo yhi", 0.0, cell[2L, 2L]),
sprintf(fmt = "%12.6f %12.6f zlo zhi", 0.0, cell[3L, 3L]))
if(any(cell[upper.tri(cell)] != 0.0)){
lines <- c(
lines,
sprintf(
fmt = "%12.6f %12.6f %12.6f xy xz yz",
cell[1L, 2L], cell[1L, 3L], cell[2L, 3L]))
}
cat(lines, "", file = file, sep = "\n", append = TRUE)
cat(
"Masses\n",
sprintf(fmt = "%6i %12.6f # %s",
masses$atmtype, masses$mass, masses$atmname),
"",
file = file, sep = "\n", append = TRUE)
cat(
"Pair Coeffs\n",
sprintf(fmt = "%6i 0.0000 0.0000 # %s",
masses$atmtype, masses$atmname),
"",
file = file, sep = "\n", append = TRUE)
if(length(types$bonds)){
cat(
"Bond Coeffs\n",
sprintf(fmt = "%6i 0.0000 0.0000 # %s",
types$bonds, names(types$bonds)),
"",
file = file, sep = "\n", append = TRUE)
}
if(length(types$angles)){
cat(
"Angle Coeffs\n",
sprintf(fmt = "%6i 0.0000 0.0000 # %s",
types$angles, names(types$angles)),
"",
file = file, sep = "\n", append = TRUE)
}
if(length(types$dihedrals)){
cat(
"Dihedrals Coeffs\n",
sprintf(fmt = "%6i 0.0000 0.0000 # %s",
types$dihedrals, names(types$dihedrals)),
"",
file = file, sep = "\n", append = TRUE)
}
if(length(types$impropers)){
cat(
"Impropers Coeffs\n",
sprintf(fmt = "%6i 0.0000 0.0000 # %s",
types$impropers, names(types$impropers)),
"",
file = file, sep = "\n", append = TRUE)
}
cat("Atoms\n",
atoms,
"",
file = file, sep = "\n", append = TRUE)
if(nrow(bonds(x))){
cat(
"Bonds\n",
sprintf(fmt = "%6i %6i %6i %6i",
1:nrow(bonds(x)), btypes, bonds(x)$atm1, bonds(x)$atm2),
"",
file = file, sep = "\n", append = TRUE)
}
if(nrow(angles(x))){
cat(
"Angles\n",
sprintf(fmt = "%6i %6i %6i %6i %6i",
1:nrow(angles(x)), atypes,
angles(x)$atm1, angles(x)$atm2, angles(x)$atm3),
"",
file = file, sep = "\n", append = TRUE)
}
if(nrow(dihedrals(x))){
cat(
"Dihedrals\n",
sprintf(fmt = "%6i %6i %6i %6i %6i %6i",
1:nrow(dihedrals(x)), dtypes,
dihedrals(x)$atm1, dihedrals(x)$atm2,
dihedrals(x)$atm3, dihedrals(x)$atm4),
"",
file = file, sep = "\n", append = TRUE)
}
if(nrow(impropers(x))){
cat(
"Impropers\n",
sprintf(fmt = "%6i %6i %6i %6i %6i %6i",
1:nrow(impropers(x)), itypes,
impropers(x)$atm1, impropers(x)$atm2,
impropers(x)$atm3, impropers(x)$atm4),
"",
file = file, sep = "\n", append = TRUE)
}
}
|
1ef1c512ef55b376b8a97712b7360b0aa74259f0
|
abdcca10c0dbc45eeb312abe7d8de03792ebb96d
|
/ggplot2/ggplot2.R
|
944354c53ece1d03eb07236db8c1dd043829b322
|
[] |
no_license
|
alescion/02-DataVis-5Ways
|
ad4b9109388e935b1f9dddfe21a3fc5fc3f97dc8
|
d2b773e21d11cf745aa5fcbd09c33f56beadd090
|
refs/heads/main
| 2023-03-01T23:07:59.673770
| 2021-02-13T02:29:59
| 2021-02-13T02:29:59
| 338,480,827
| 0
| 0
| null | 2021-02-13T02:28:31
| 2021-02-13T02:28:31
| null |
UTF-8
|
R
| false
| false
| 249
|
r
|
ggplot2.R
|
library(ggplot2)
# Run in the directory of this script
cars <- read.csv(file="../cars-sample.csv",head=TRUE,sep=",")
p <- ggplot(data=cars, mapping=aes(x=Weight, y=MPG, color=Manufacturer)) +
geom_point(aes(size=Weight), alpha=0.5)
print(p)
|
82c5ca552e574c5760c8bab00f8cdc832c070510
|
c03d282847ddbcd8da22158e2b3fe8e85ca6f5fc
|
/cm_analysis/criminal_analysis.R
|
5191c24b8e3b7194e44026d457892950679e0dc9
|
[] |
no_license
|
morganbooker/criminal_minds_analysis
|
bc7a3d6a920463bb19b121806176580d8e682d5a
|
985bfea359829f0686262750331586987b9cea4f
|
refs/heads/master
| 2020-09-14T23:26:45.755222
| 2019-12-13T22:37:14
| 2019-12-13T22:37:14
| 223,291,782
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,451
|
r
|
criminal_analysis.R
|
#### LIBRARIES ####
# Load in necessary libraries
library(fs)
library(tidyverse)
#### DATA FRAMES #####
# Load in data frames
cm_words_bau <- cm_words %>%
filter(word %in% bau) %>%
mutate(word = case_when(
word == "spencer" | word == "spence" | word == "reid" ~ "Spencer Reid",
word == "derek" | word == "morgan" ~ "Derek Morgan",
word == "aaron" | word == "hotch" | word == "hotchner" ~ "Aaron Hotchner",
word == "david" | word == "dave" | word == "rossi" ~ "David Rossi",
word == "jason" | word == "gideon" ~ "Jason Gideon",
word == "emily" | word == "prentiss" ~ "Emily Prentiss",
word == "jennifer" | word == "jj" | word == "jareau" ~ "Jennifer Jareau",
word == "penelope" | word == "garcia" ~ "Penelope Garcia",
word == "elle" | word == "greenaway" ~ "Elle Greenaway"
))
##### CHARACTERS #####
# All plots are made in a similar way. First, I group by season and filter out
# the specific character name. Then, I group by season, episode, and whether the
# episode was caught and count these values, dropping missing caught data so
# that I can have the right type of grouping for the plot. Next, I create a
# ggplot where episode is the grouping variable, I then dodge episodes so that
# each episode appears as its own bar on the plot. I then facet by season so we
# can see all episodes in a season, then used my own color scale because I
# thought it looked nicer than the default and I changed the theme because I
# didn't like how the default gray background looked.
# Spencer Reid
reid <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Spencer Reid") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Spencer Reid?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Aaron Hotchner
hotch <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Aaron Hotchner") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Aaron Hotchner?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# David Rossi
rossi <- cm_words_bau %>%
group_by(season) %>%
filter(word == "David Rossi") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying David Rossi?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Derek Morgan
morgan <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Derek Morgan") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Derek Morgan?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Emily Prentiss
prentiss <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Emily Prentiss") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Emily Prentiss?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Jason Gideon
gideon <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Jason Gideon") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Jason Gideon?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Jennifer Jareau
jj <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Jennifer Jareau") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Jennifer Jareau?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Penelope Garcia
garcia <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Penelope Garcia") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Penelope Garcia?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Elle Greenaway
greenaway <- cm_words_bau %>%
group_by(season) %>%
filter(word == "Elle Greenaway") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying Elle Greenaway?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Write out the objects
write_rds(reid, "./cm_analysis/objects/reid.rds")
write_rds(hotch, "./cm_analysis/objects/hotch.rds")
write_rds(rossi, "./cm_analysis/objects/rossi.rds")
write_rds(morgan, "./cm_analysis/objects/morgan.rds")
write_rds(prentiss, "./cm_analysis/objects/prentiss.rds")
write_rds(gideon, "./cm_analysis/objects/gideon.rds")
write_rds(jj, "./cm_analysis/objects/jj.rds")
write_rds(garcia, "./cm_analysis/objects/garcia.rds")
write_rds(greenaway, "./cm_analysis/objects/greenaway.rds")
##### BUZZWORDS #####
# All plots are made in a similar way. First, I group by season and filter out
# the specific buzzword. Then, I group by season, episode, and whether the
# episode was caught and count these values, dropping missing caught data so
# that I can have the right type of grouping for the plot. Next, I create a
# ggplot where episode is the grouping variable, I then dodge episodes so that
# each episode appears as its own bar on the plot. I then facet by season so we
# can see all episodes in a season, then used my own color scale because I
# thought it looked nicer than the default and I changed the theme because I
# didn't like how the default gray background looked.
# Only the Top 10 Buzzwords from the general overview were used for simplicity
# reasons.
# Unsub
unsub <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "unsub") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying unsub?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Kill
kill <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "kill") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying kill?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Victim
victim <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "victim") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying victim?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
## Killer
killer <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "killer") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying killer?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Profile
profile <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "profile") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying profile?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Murder
murder <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "murder") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying murder?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Serial
serial <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "serial") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying serial?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Blood
blood <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "blood") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying blood?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Suspect
suspect <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "suspect") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying suspect?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Criminal
criminal <- cm_words_buzz %>%
group_by(season) %>%
filter(word == "criminal") %>%
group_by(season, episode, caught) %>%
count() %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, group = episode, fill = caught)) +
geom_col(position = "dodge", color = "white", show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Times Said in Episode",
title = "Is there a relationship between the criminal being caught and saying criminal?",
subtitle = "Based on the first five seasons of Criminal Minds",
caption = "Note: each bar represents a different episode")
# Write out objects
write_rds(unsub, "./cm_analysis/objects/unsub.rds")
write_rds(kill, "./cm_analysis/objects/kill.rds")
write_rds(victim, "./cm_analysis/objects/victim.rds")
write_rds(killer, "./cm_analysis/objects/killer.rds")
write_rds(profile, "./cm_analysis/objects/profile.rds")
write_rds(murder, "./cm_analysis/objects/murder.rds")
write_rds(serial, "./cm_analysis/objects/serial.rds")
write_rds(blood, "./cm_analysis/objects/blood.rds")
write_rds(suspect, "./cm_analysis/objects/suspect.rds")
write_rds(criminal, "./cm_analysis/objects/criminal.rds")
##### CRIMINAL TYPE #####
# All plots are made in a similar way. First, I group by season and filter out
# the specific criminal type. Then, I group by season and whether the episode
# was caught and count these values, dropping missing caught data so that I can
# have the right type of grouping for the plot. I don't need to group by episode
# like I did previously because (except on rare occasions) there is usually only
# one killer per episode. Next, I create a ggplot and facet by season, then I
# used my own color scale because I thought it looked nicer than the default and
# I changed the theme because I didn't like how the default gray background
# looked.
# Only the top 10 killer types from the general overview section were used for
# simplicity reasons
# Serial Killer
serial_killer <- cm_type %>%
group_by(season) %>%
filter(value == "serial killer") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Serial Killers",
title = "Is there a relationship between being a serial killer and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Kidnapper
kidnapper <- cm_type %>%
group_by(season) %>%
filter(value == "kidnapper") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Kidnappers",
title = "Is there a relationship between being a kidnapper and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Spree Killer
spree_killer <- cm_type %>%
group_by(season) %>%
filter(value == "spree killer") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Spree Killers",
title = "Is there a relationship between being a spree killer and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Stalker
stalker <- cm_type %>%
group_by(season) %>%
filter(value == "stalker") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Stalkers",
title = "Is there a relationship between being a stalker and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Cop Killer
cop_killer <- cm_type %>%
group_by(season) %>%
filter(value == "cop killer") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Cop Killers",
title = "Is there a relationship between being a cop killer and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Serial Rapist
serial_rapist <- cm_type %>%
group_by(season) %>%
filter(value == "serial rapist") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Serial Rapists",
title = "Is there a relationship between being a serial rapist and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Copycat
copycat <- cm_type %>%
group_by(season) %>%
filter(value == "copycat") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Copycats",
title = "Is there a relationship between being a copycat killer and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Robber
robber <- cm_type %>%
group_by(season) %>%
filter(value == "robber") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Robbers",
title = "Is there a relationship between being a robber and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Family Annihilator
family_a <- cm_type %>%
group_by(season) %>%
filter(value == "family annihilator") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Serial Killers",
title = "Is there a relationship between being a family annihilator and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Proxy Killer
proxy_killer <- cm_type %>%
group_by(season) %>%
filter(value == "proxy killer") %>%
count(value, caught) %>%
ggplot(aes(x = caught, y = n, fill = caught)) +
geom_col(show.legend = FALSE) +
facet_wrap(~season) +
scale_fill_manual(values = c("dodgerblue", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = "Number of Proxy Killers",
title = "Is there a relationship between being a proxy killer and being caught?",
subtitle = "Based on the first five seasons of Criminal Minds")
# Write out objects
write_rds(serial_killer, "./cm_analysis/objects/serial_killer.rds")
write_rds(kidnapper, "./cm_analysis/objects/kidnapper.rds")
write_rds(spree_killer, "./cm_analysis/objects/spree_killer.rds")
write_rds(stalker, "./cm_analysis/objects/stalker.rds")
write_rds(cop_killer, "./cm_analysis/objects/cop_killer.rds")
write_rds(serial_rapist, "./cm_analysis/objects/serial_rapist.rds")
write_rds(copycat, "./cm_analysis/objects/copycat.rds")
write_rds(robber, "./cm_analysis/objects/robber.rds")
write_rds(family_a, "./cm_analysis/objects/family_a.rds")
write_rds(proxy_killer, "./cm_analysis/objects/proxy_killer.rds")
##### ALIVE #####
# All plots are made in a similar way. First, I group by season and filter out
# the specific season. Then, I count how many times the criminal was caught and
# alive, dropping missing caught data so I can have the right type of grouping
# for the plot. There's no need to group by episode here because the criminal
# can only be caught or escape once per episode, so grouping by episode is
# redunant and dodging by episode would make the plot very messy for no good
# reason. Next, I create a ggplot and dodge by position so it's clear what the
# counts are for each as opposed to the default stacked chart. I then used my
# own color scale because I thought it looked nicer than the default and I
# changed the theme because I didn't like how the default gray background
# looked.
# Season One
alive_1 <- cm_season %>%
group_by(season) %>%
filter(season == "Season 1") %>%
count(caught, alive) %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, fill = alive)) +
geom_col(position = "dodge", color = "white") +
scale_fill_manual(values = c("darkslateblue", "dodgerblue",
"darkorchid3", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = NULL,
title = "Are criminals caught dead or alive?",
subtitle = "Based on the season one of Criminal Minds",
fill = "Criminal Alive?")
# Season Two
alive_2 <- cm_season %>%
group_by(season) %>%
filter(season == "Season 2") %>%
count(caught, alive) %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, fill = alive)) +
geom_col(position = "dodge", color = "white") +
scale_fill_manual(values = c("darkslateblue", "dodgerblue",
"darkorchid3", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = NULL,
title = "Are criminals caught dead or alive?",
subtitle = "Based on the season two of Criminal Minds",
fill = "Criminal Alive?")
# Season Three
alive_3 <- cm_season %>%
group_by(season) %>%
filter(season == "Season 3") %>%
count(caught, alive) %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, fill = alive)) +
geom_col(position = "dodge", color = "white") +
scale_fill_manual(values = c("darkslateblue", "dodgerblue",
"darkorchid3", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = NULL,
title = "Are criminals caught dead or alive?",
subtitle = "Based on the season three of Criminal Minds",
fill = "Criminal Alive?")
# Season Four
alive_4 <- cm_season %>%
group_by(season) %>%
filter(season == "Season 4") %>%
count(caught, alive) %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, fill = alive)) +
geom_col(position = "dodge", color = "white") +
scale_fill_manual(values = c("darkslateblue", "dodgerblue",
"darkorchid3", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = NULL,
title = "Are criminals caught dead or alive?",
subtitle = "Based on the season four of Criminal Minds",
fill = "Criminal Alive?")
# Season Five
alive_5 <- cm_season %>%
group_by(season) %>%
filter(season == "Season 5") %>%
count(caught, alive) %>%
drop_na(caught) %>%
ggplot(aes(x = caught, y = n, fill = alive)) +
geom_col(position = "dodge", color = "white") +
scale_fill_manual(values = c("darkslateblue", "dodgerblue",
"darkorchid3", "firebrick2")) +
theme_light() +
labs(x = "Criminal Caught?",
y = NULL,
title = "Are criminals caught dead or alive?",
subtitle = "Based on the season five of Criminal Minds",
fill = "Criminal Alive?")
# Write out objects
write_rds(alive_1, "./cm_analysis/objects/alive_1.rds")
write_rds(alive_2, "./cm_analysis/objects/alive_2.rds")
write_rds(alive_3, "./cm_analysis/objects/alive_3.rds")
write_rds(alive_4, "./cm_analysis/objects/alive_4.rds")
write_rds(alive_5, "./cm_analysis/objects/alive_5.rds")
|
a3ecdabfa266b09e067acfe12275543257f48b6a
|
a845452e3eb0a62a930b472bba9784e3ab8c34f8
|
/man/chancer.twitter.Rd
|
62c773cba0224be9a873aa53a8fd741f348c0856
|
[] |
no_license
|
martineastwood/chancer
|
79206f2c7cc7f5c32fb50aac0559c478ec67840d
|
67ec091b7e9586caa95cc0adc16acf6bdcded82d
|
refs/heads/master
| 2016-09-05T12:51:49.915751
| 2014-12-11T20:22:23
| 2014-12-11T20:22:23
| 27,889,911
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 240
|
rd
|
chancer.twitter.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{chancer.twitter}
\alias{chancer.twitter}
\title{Get a random twitter handle}
\usage{
chancer.twitter()
}
\description{
Get a random twitter handle
}
\examples{
chancer.twitter()
}
|
8b65cbc1700f100f934b672761a97ae6f15c69f7
|
3e049d608e79fe049d24a20a00da8d09d214bae8
|
/plot6.R
|
ea61b7451b07f9497e90174feaf53800ee82ce2c
|
[] |
no_license
|
awgroeneveld/ExplDataAnalysisProject2
|
ea4b7785cb177b388701cd2a6389bf4617b11074
|
4d66057d3a8a565985f6f9434a7dffb3c765b1d2
|
refs/heads/master
| 2021-01-20T04:37:05.546105
| 2017-04-28T13:08:43
| 2017-04-28T13:08:43
| 89,707,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
r
|
plot6.R
|
# Assume data files are in working dir
# Since loading takes a while, check if items are already in environment
library(ggplot2)
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
# Create subset for Coal
SCC_vehicle <- subset(SCC, grepl("vehicle", EI.Sector, ignore.case=TRUE))
NEI_vehicle_baltimore <- subset(NEI, fips=="24510" & SCC %in% SCC_vehicle$SCC)
NEI_vehicle_lacounty <- subset(NEI, fips=="06037" & SCC %in% SCC_vehicle$SCC)
# Aggregate by Year and Type
aggregatedTotalVehicleBaltimoreByYear <- aggregate(Emissions ~ year, NEI_vehicle_baltimore, sum)
aggregatedTotalVehicleLaCountyByYear <- aggregate(Emissions ~ year, NEI_vehicle_lacounty, sum)
# Define the scale factor using 1999 as a base
maxBaltimore<-aggregatedTotalVehicleBaltimoreByYear[aggregatedTotalVehicleBaltimoreByYear$year==1999,2];
maxLaCounty<-aggregatedTotalVehicleLaCountyByYear[aggregatedTotalVehicleLaCountyByYear$year==1999,2];
# Scale the data
scaledBaltimore<-data.frame(year=aggregatedTotalVehicleBaltimoreByYear$year,
region=rep("Baltimore",4),
Emissions=aggregatedTotalVehicleBaltimoreByYear$Emissions/maxBaltimore)
scaledLaCounty<-data.frame(year=aggregatedTotalVehicleLaCountyByYear$year,
region=rep("LA County",4),
Emissions=aggregatedTotalVehicleLaCountyByYear$Emissions/maxLaCounty)
# Concatenate the data frames
allScaled<-rbind(scaledBaltimore,scaledLaCounty)
png("plot6.png",width=640,height=480)
g <- ggplot(allScaled, aes(x=factor(year), y=Emissions, fill = region))
g <- g + geom_bar(stat="identity",position="dodge") +
xlab("Years") +
ylab("Total PM2.5 Emissions Relative to 1999") +
ggtitle("Total Vehicle Emissions Relative to 1999 in Baltimore and Los Angeles County from 1999 to 2008")
print(g)
dev.off()
|
3a813757d8609a91c9753e7cd543684ee6359aa8
|
367d1e3344a5e792b3c10c29276dd3aed58e92b1
|
/Labs/Lab04B/app_scatterplot.R
|
86e8732eccac0a749b019df90815b7acc6998194
|
[] |
no_license
|
Bkwon23/First-Repository
|
015b8ab90613d410b2e25ba3fa2d8bd7a84f8d92
|
710b801c734e10608e69587f3eb41424bfab1206
|
refs/heads/master
| 2023-04-19T12:25:25.173537
| 2021-05-14T04:48:29
| 2021-05-14T04:48:29
| 339,508,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,684
|
r
|
app_scatterplot.R
|
library(shiny)
library(shinythemes)
library(tidyverse)
library(DT)
library(ggrepel)
library(fivethirtyeight)
###############
# import data #
###############
nflsuspensions1 <- fivethirtyeight::nfl_suspensions %>%
group_by(year) %>%
count(year)
###################################################
# define choice values and labels for user inputs #
###################################################
# define vectors for choice values and labels
# can then refer to them in server as well (not just in defining widgets)
# for radio button, can be separate
# (have choiceValues and choiceNames options, rather than just choices)
size_choice_values <- c("year")
size_choice_names <- c("Year")
names(size_choice_values) <- size_choice_names
############
# ui #
############
ui <- navbarPage(
title="NFL Suspensions",
tabPanel(
title = "Scatterplot",
sidebarLayout(
sidebarPanel(
selectInput(inputId = "id_name"
, label = "Choose variable of interest:"
, choices = size_choice_values
, selected = "year")
),
mainPanel(
plotOutput(outputId = "scatter")
)
)
)
)
############
# server #
############
server <- function(input,output){
# TAB 2: INTERACTIVE SCATTERPLOT
output$scatter <- renderPlot({
nflsuspensions1 %>%
ggplot(aes_string(x="year", y= "n")) +
geom_point(color = "#2c7fb8") +
labs(x = "Year", y = "Number of NFL Players Suspended"
, title = "NFL Players' Suspensions by Year")
})
}
####################
# call to shinyApp #
####################
shinyApp(ui = ui, server = server)
|
57064d48705eb2ce3d2a37dbfc370da49d4b306c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OIdata/examples/sp500.Rd.R
|
e2b1434d87a04ee150110aea4194e83c4a8e3e79
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 386
|
r
|
sp500.Rd.R
|
library(OIdata)
### Name: sp500
### Title: Financial information for 50 S&P 500 companies
### Aliases: sp500
### Keywords: datasets SP 500 stocks financial money
### ** Examples
data(sp500)
plot(sp500$ent_value, sp500$earn_before)
plot(sp500$ev_over_rev, sp500$forward_pe)
plot(sp500$ent_value, sp500$earn_before, log="xy")
plot(sp500$ev_over_rev, sp500$forward_pe, log="xy")
|
fe2581f1315a2b03f7e6db7f489cf62e58f3f580
|
1aa413301db92dd918278f7d309260b0130f8cd8
|
/R/get_text.R
|
c85c188ba2a309fef8ec47e1f9c1ed27ce51c738
|
[] |
no_license
|
philgee1981/btcecho
|
19467886cb335ddd3a6f28e67ac3edf883d979ab
|
37b01871ecb72da58e11643394c89428b9c8adf9
|
refs/heads/master
| 2022-01-10T01:23:24.811566
| 2019-05-27T15:50:27
| 2019-05-27T15:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 318
|
r
|
get_text.R
|
get_text <-
function(data_line,sentiment,texte)
gsub("ZZ",as.numeric(data_line$percent_change_24h),
gsub("YY",abs(as.numeric(data_line$percent_change_24h)),
gsub("XX",data_line$links,
sample(as.character(texte$description[texte$sentiment==sentiment]),1, replace = T))))
|
2b9e674d7c9dc928801717bd54fc6fdc033409da
|
5143de6915d642fe4bde933fc821b6f60d401df3
|
/app.r
|
b19a8494edf9371af7da0ac1ce50931430018def
|
[] |
no_license
|
evilla-19/shiny_annotation_times
|
8e762d14b828d09bc075258ff1fff8b62cd8fc88
|
ebe5bc68d2131165a012d841435d8845ab3e7529
|
refs/heads/master
| 2020-03-22T21:59:06.534281
| 2018-11-01T07:33:56
| 2018-11-01T07:33:56
| 140,726,336
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,783
|
r
|
app.r
|
########################################
########## libraries ###################
########################################
library(shiny)
library(ggplot2)
library(dplyr)
library(DT)
library(chron)
library(plotly)
########################################
########## read in data ################
########################################
# setwd('/Users/benito/Documents/SourceData/Shiny')
annotation_data <- read.delim('Live_Manuscripts.txt', stringsAsFactors = FALSE)
annotation_data = annotation_data[1:186, -c(19:28) ]
annotation_data$Journal = as.factor(annotation_data$Journal)
########################################
########## Helper functions ############
########################################
convert_times = function(input_time)
# function to convert timestamps to minutes
{
60 * 24 * as.numeric(times(input_time))
}
annotation_data$timeInMins = convert_times(annotation_data$Time.required)
########################################
########## UI layout ###################
########################################
ui = fluidPage(
theme = 'styles.css',
absolutePanel(left = '25%', right = '0%',width = 'auto',
titlePanel('Visualization of Manu\'s annotation times'),
sidebarLayout(
sidebarPanel(
wellPanel(style = 'height:400px',
h3('Manuscript statistics'),
p('Choose which variables to plot on the y axis for each manuscript. Some variables are not available for all manuscripts.'),
selectInput(
inputId = 'y_axis',
choices = colnames(annotation_data)[c(14:19)],
label = colnames(annotation_data)[c(14:19)],
selected = colnames(annotation_data)[19]
),
checkboxInput(
inputId = 'colorByJournal',
label = 'Check to color by Journal',
value = FALSE
),
checkboxInput(
inputId = 'displayMean',
label = 'Check to display mean of y axis',
value = FALSE
)
),
wellPanel(style = 'height:400px',
h3('Journal statistics'),
p('Choose which average variable to plot on the y axis for each journal. Except for time, all the other variables are based only on the subset of manuscripts with underlying source data.'),
selectInput(
inputId = 'journal_y_axis',
choices = colnames(annotation_data)[14:19],
label = colnames(annotation_data)[14:19],
selected = colnames(annotation_data)[19]
)
)
),
sidebarPanel(width = 4,
wellPanel(
plotOutput(height = '360px',
outputId = 'ms_vs_variable')
),
wellPanel(
plotOutput(height = '360px',
outputId = 'journal_vs_variable')
)
)
))
)
#####################################
########## Server ###################
#####################################
server = function(input, output)
{
# First reactive, for first plot of manuscript stats
selected_y_var = reactive(
{
req(input$y_axis)
dplyr::select(annotation_data, matches(input$y_axis))
}
)
# First plot, of manuscript stats, conditional to whether or not the checkbox for 'color per journal' has been checked
output$ms_vs_variable = renderPlot(
{
timeInMins = annotation_data$timeInMins
if (input$colorByJournal == FALSE)
{
mainplot =
ggplot(annotation_data,
aes(x = 1:length(annotation_data$Tracking.number),y = selected_y_var())) +
geom_bar(stat = 'identity', fill = '#1a3f7a', width = 0.6) +
xlab('Manuscript') +
ylab(names(selected_y_var())) +
ggtitle(paste(names(selected_y_var()), 'per manuscript')) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black")
)
if (input$displayMean == FALSE)
{
mainplot
}
else
{
y_axis_mean = mean(na.omit(selected_y_var()[,1]))
# observe({print(selected_y_var())})
# print(y_axis_mean)
mainplot + geom_hline(yintercept = y_axis_mean, col = 'darkred', size = 0.5)
}
}
else
{
mainplot =
ggplot(annotation_data,
aes(x = 1:length(annotation_data$Tracking.number),y = selected_y_var(), fill = Journal)) +
geom_bar(stat = 'identity', width = 0.6) +
xlab('Manuscript') +
ylab(names(selected_y_var())) +
ggtitle(paste(names(selected_y_var()), 'per manuscript')) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.position = 'top'
)
if (input$displayMean == FALSE)
{
mainplot
}
else
{
y_axis_mean = mean(na.omit(selected_y_var()[,1]))
# observe({print(selected_y_var())})
# print(y_axis_mean)
mainplot + geom_hline(yintercept = y_axis_mean, col = 'darkred', size = 0.5)
}
}
}
)
# Second reactive element to capture the selected variable for y in the manuscript stats
selected_y_var_journal = reactive(
{
req(input$journal_y_axis)
dplyr::select(annotation_data, matches(input$journal_y_axis))
}
)
output$journal_vs_variable = renderPlot(
{
mainplot =
ggplot(annotation_data,
aes(x = annotation_data$Journal, y = selected_y_var_journal(), fill = Journal)) +
geom_boxplot(alpha = 0.5) +
geom_point(aes(col = Journal)) +
xlab('Journal') +
ylab(names(selected_y_var_journal())) +
ggtitle(paste(names(selected_y_var_journal()), 'per manuscript')) +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black"),
legend.position = 'top')
mainplot
}
)
}
shinyApp(ui = ui, server = server)
# shiny::runApp(display.mode="showcase")
|
7226cd7fade03afd3df9b1c7d0a290b369252d31
|
08bd5a7e355670ae7677e7676dc216e0a14c2a5a
|
/PASCAL/write_GWAS_metadata_non_neurological.R
|
0a285e8b93285ad34b253570d931acbf5f4135db
|
[] |
no_license
|
zmx21/GSK_NI
|
1b79f1933e245a1deab902eebf83370f8c6af4ea
|
3e89f15e4d906d704193346fa0ea6b914e89a881
|
refs/heads/master
| 2020-03-18T08:09:21.607329
| 2018-09-02T03:15:19
| 2018-09-02T03:15:19
| 134,493,420
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,588
|
r
|
write_GWAS_metadata_non_neurological.R
|
library(data.table)
library(dplyr)
# source('annotate_clusters.R')
GetGWASMetdataNonNeuro <- function(sigClusters=NULL,clusterLevel=T){
GWASMetadata <- read.csv('/local/data/public/zmx21/zmx21_private/GSK/GWAS/GWASMetadata_NonNeuro.csv',stringsAsFactors = F)
if(!clusterLevel){
#Get significant genes included in GWAS studies
PASCAL_raw_Path <- '/local/data/public/zmx21/zmx21_private/GSK/GWAS/PASCAL_results/KEGG_Ensembl_Coding/'
PASCAL_results_raw <- dir(PASCAL_raw_Path)
PASCAL_results_raw <- PASCAL_results_raw[grep('sum.genescores',PASCAL_results_raw)]
numSigGenesCoding <- sapply(PASCAL_results_raw,function(x) data.table::fread(paste0(PASCAL_raw_Path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% nrow)
PASCAL_raw_Path <- '/local/data/public/zmx21/zmx21_private/GSK/GWAS/PASCAL_results/KEGG_Ensembl_All/'
PASCAL_results_raw <- dir(PASCAL_raw_Path)
PASCAL_results_raw <- PASCAL_results_raw[grep('sum.genescores',PASCAL_results_raw)]
numSigGenesAll <- sapply(PASCAL_results_raw,function(x) data.table::fread(paste0(PASCAL_raw_Path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% nrow)
StudyName <- sapply(names(numSigGenesCoding),function(x) unlist(strsplit(x,'.sum'))[1])
StudyName <- sapply(StudyName,function(x) gsub('[.]','_',x))
df <- data.frame(StudyName,numSigGenesCoding,numSigGenesAll)
colnames(df) <- c('Study Name','Sig Coding Genes','Sig Coding + lncRNA Genes')
rownames(df) <- c()
df <- inner_join(df,GWASMetadata,by=c('Study Name'='StudyName')) %>% dplyr::rename('Num Subjects'=N)
grid.arrange(tableGrob(df))
}else{
#Genes which are include in Coding co-exp network
PASCAL_cluster_coding_path <- '/local/data/public/zmx21/zmx21_private/GSK/GWAS/PASCAL_results_non_neurological/Pearson_Cor0p2//CodingGenesMicroglia_Pearson_cor0p2_abs//'
PASCAL_results_coding <- dir(PASCAL_cluster_coding_path)
PASCAL_results_coding <- PASCAL_results_coding[grep('sum.genescores',PASCAL_results_coding)]
numSigGenesIncludedCoding <- sapply(PASCAL_results_coding,function(x) data.table::fread(paste0(PASCAL_cluster_coding_path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% nrow)
sigGenesCoding <- lapply(PASCAL_results_coding,function(x) data.table::fread(paste0(PASCAL_cluster_coding_path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% select(gene_id) %>% t() %>% as.vector)
#Genes which are included in Coding + lncRNA co-exp network.
PASCAL_cluster_all_path <- '/local/data/public/zmx21/zmx21_private/GSK/GWAS/PASCAL_results_non_neurological/Pearson_Cor0p2/AllGenesMicroglia_Pearson_cor0p2_abs/'
PASCAL_results_all <- dir(PASCAL_cluster_all_path)
PASCAL_results_all <- PASCAL_results_all[grep('sum.genescores',PASCAL_results_all)]
numSigGenesIncludedAll <- sapply(PASCAL_results_all,function(x) data.table::fread(paste0(PASCAL_cluster_all_path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% nrow)
sigGenesAll <- lapply(PASCAL_results_all,function(x) data.table::fread(paste0(PASCAL_cluster_all_path,x)) %>%
dplyr::filter(pvalue < 5e-8) %>% select(gene_id) %>% t() %>% as.vector)
#Store results in DF
SigStudiesStats <- data.frame(N_Sig_Coding_Genes_In_Network=numSigGenesIncludedCoding,
N_Sig_Coding_and_LncRNA_Genes_In_Network=numSigGenesIncludedAll)
SigStudiesStats$StudyName <- sapply(rownames(SigStudiesStats),function(x) unlist(strsplit(x,'.sum'))[1])
SigStudiesStats$StudyName <- sapply(SigStudiesStats$StudyName,function(x) gsub('[.]','_',x))
numSigClusters <- sigClusters %>% dplyr::count(StudyName)
numSigClusters$n[is.na(numSigClusters$n)] <- 0 #Replace NA with 0
#Find number of significant genes in significant cluster, for coding and coding+lncRNA co-exp network
numSigGenesInSigClustersCoding <- rep(NA,length(sigGenesCoding))
numSigGenesInSigClustersAll <- rep(NA,length(sigGenesCoding))
for(i in 1:length(sigGenesCoding)){
currentStudy <- SigStudiesStats$StudyName[i]
#consider clusters from coding network
currentStudySigClusters <- dplyr::filter(sigClusters,StudyName==currentStudy & Biotype=='coding')
currentStudyIncludedGenes <- unique(unlist(currentStudySigClusters$Genes))
numSigGenesInSigClustersCoding[i] <- length(intersect(sigGenesCoding[[i]],currentStudyIncludedGenes))
#consider clusters from coding + lncRNA network
currentStudySigClusters <- dplyr::filter(sigClusters,StudyName==currentStudy & Biotype=='all')
currentStudyIncludedGenes <- unique(unlist(currentStudySigClusters$Genes))
numSigGenesInSigClustersAll[i] <- length(intersect(sigGenesAll[[i]],currentStudyIncludedGenes))
}
SigStudiesStats$N_SigCodingGenesInSigClusters <- numSigGenesInSigClustersCoding
SigStudiesStats$N_SigAllGenesInSigClusters <- numSigGenesInSigClustersAll
GWASFullMetadata <- dplyr::left_join(SigStudiesStats,GWASMetadata,by=c('StudyName'='StudyName')) %>%
dplyr::left_join(numSigClusters,by=c('StudyName'='StudyName')) %>% rename_('N_Subjects'='N','N_Sig_Clusters'='n')
GWASFullMetadata <- GWASFullMetadata[,c('StudyName','N_Sig_Coding_Genes_In_Network',
'N_SigCodingGenesInSigClusters','N_Sig_Coding_and_LncRNA_Genes_In_Network',
'N_SigAllGenesInSigClusters')]
colnames(GWASFullMetadata) <- c('Study Name','Sig Coding Genes \n In Network',
'Sig Coding Genes \n In Sig Cluster',
'Sig Coding + lncRNA Genes \n In Network',
'Sig Coding + lncRNA Genes \n In Sig Cluster')
totalRow <- data.frame('TOTAL',sum(GWASFullMetadata[,2]),
sum(GWASFullMetadata[,3]),
sum(GWASFullMetadata[,4]),
sum(GWASFullMetadata[,5]))
colnames(totalRow) <- colnames(GWASFullMetadata)
GWASFullMetadata <- rbind(GWASFullMetadata,totalRow)
library(gridExtra)
t1 <- ttheme_default(core=list(
fg_params=list(fontface=c(rep("plain", nrow(GWASFullMetadata)-1), "bold.italic")),
bg_params = list(fill=c(rep(c("grey95", "grey90"),
length.out=nrow(GWASFullMetadata)-1), "yellow"))
))
grid.arrange(tableGrob(GWASFullMetadata,theme=t1))
}
}
|
2ee831f0d6efd5db0e89a0ab161781637f49c297
|
d30d0e8bd889a4c24e81af6c1a974d046857a2e5
|
/Final Project/model_code.R
|
a6c07fbaabd3b5edc44be39f1ae9cc51a2ac284d
|
[] |
no_license
|
jmerranko/infsci_2725
|
a9f75354d7bf5f7ad6cc97e11900ac9f1a2513db
|
7f29c901fa4cec9b97e3c4aef5795986e1b528ba
|
refs/heads/master
| 2021-01-10T09:54:00.006511
| 2016-04-18T19:32:55
| 2016-04-18T19:32:55
| 50,963,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,329
|
r
|
model_code.R
|
library(foreign)
library(ROCR)
library(e1071)
library(randomForest)
library(party)
library(FSelector)
library(rpart)
library(gbm)
set.seed(1)
model=read.csv("C:/Users/John/Desktop/Project/final_predictors_no_numbers.csv")
final_test=read.csv("C:/Users/John/Desktop/Project/final_test_no_numbers.csv")
model$brand_match_ratio=ifelse(is.na(model$brand_match_ratio),
median(na.omit(model$brand_match_ratio)),
model$brand_match_ratio)
final_test$brand_match_ratio=ifelse(is.na(final_test$brand_match_ratio),
median(na.omit(final_test$brand_match_ratio)),
final_test$brand_match_ratio)
model$brand_match_yn=ifelse(is.na(model$brand_match_ratio), 0, model$brand_match_ratio)
model$brand_match_yn=ifelse(model$brand_match_ratio>0,1,0)
final_test$brand_match_yn=ifelse(is.na(final_test$brand_match_ratio), 0, final_test$brand_match_ratio)
final_test$brand_match_yn=ifelse(final_test$brand_match_ratio>0,1,0)
summary(lm(data=model, relevance~brand_match_ratio))
#summary(lm(data=model, relevance~brand_match_yn))
summary(lm(data=model, relevance~title_match_ratio))
summary(lm(data=model, relevance~details_match_ratio))
summary(lm(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio))
tree=rpart(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio)
plot(tree, uniform=TRUE)
text(tree, all=TRUE, cex=.75)
rf1=randomForest(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, ntree=500, mtry=1)
rf2=randomForest(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, ntree=1000, mtry=1)
svm1cv=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="linear", cross=2)
svm1cv$tot.MSE
svm2cv=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="polynomial", order=2, cross=2)
svm2cv$tot.MSE
svm3cv=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="polynomial", order=3, cross=2)
svm3cv$tot.MSE
svm4cv=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="radial", cross=2)
svm4cv$tot.MSE
gbm1cv=gbm(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, distribution="gaussian", n.trees=500, cv.folds=2)
mean(gbm1cv$train.error)
gbm2cv=gbm(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, distribution="gaussian", n.trees=1000, cv.folds=2)
mean(gbm2cv$train.error)
#rf1=randomForest(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, ntree=500, mtry=3)
#rf2=randomForest(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, ntree=1000, mtry=3)
svm1=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="linear")
svm2=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="polynomial", order=2)
svm3=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="polynomial", order=3)
svm4=svm(seed=1, data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, kernel="radial")
gbm1=gbm(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, distribution="gaussian", n.trees=500)
gbm2=gbm(data=model, relevance~details_match_ratio+brand_match_ratio+title_match_ratio, distribution="gaussian", n.trees=1000)
rf1_predict=predict(rf1, newdata=final_test)
rf2_predict=predict(rf2, newdata=final_test)
svm1_predict=predict(svm1, newdata=final_test)
svm2_predict=predict(svm2, newdata=final_test)
svm3_predict=predict(svm3, newdata=final_test)
svm4_predict=predict(svm4, newdata=final_test)
gbm1_predict=predict(gbm1, newdata=final_test, n.trees=500)
gbm2_predict=predict(gbm2, newdata=final_test, n.trees=1000)
rf1_predict_out=data.frame(id=final_test$id, relevance=rf1_predict)
rf2_predict_out=data.frame(id=final_test$id, relevance=rf2_predict)
svm1_predict_out=data.frame(id=final_test$id, relevance=svm1_predict)
svm2_predict_out=data.frame(id=final_test$id, relevance=svm2_predict)
svm3_predict_out=data.frame(id=final_test$id, relevance=svm3_predict)
svm4_predict_out=data.frame(id=final_test$id, relevance=svm4_predict)
gbm1_predict_out=data.frame(id=final_test$id, relevance=gbm1_predict)
gbm2_predict_out=data.frame(id=final_test$id, relevance=gbm2_predict)
write.csv(rf1_predict_out, "C:/Users/John/Desktop/Project/Predictions/rf1_predict_out_nn.csv")
write.csv(rf2_predict_out, "C:/Users/John/Desktop/Project/Predictions/rf2_predict_out_nn.csv")
write.csv(svm1_predict_out, "C:/Users/John/Desktop/Project/Predictions/svm1_predict_out_nn.csv")
write.csv(svm2_predict_out, "C:/Users/John/Desktop/Project/Predictions/svm2_predict_out_nn.csv")
write.csv(svm3_predict_out, "C:/Users/John/Desktop/Project/Predictions/svm3_predict_out_nn.csv")
write.csv(svm4_predict_out, "C:/Users/John/Desktop/Project/Predictions/svm4_predict_out_nn.csv")
write.csv(gbm1_predict_out, "C:/Users/John/Desktop/Project/Predictions/gbm1_predict_out_nn.csv")
write.csv(gbm2_predict_out, "C:/Users/John/Desktop/Project/Predictions/gbm2_predict_out_nn.csv")
|
d90ec32e2f57670cd83fd3f15e3c1036fa7f6945
|
49c918bc3e90a2eb8ec00d687d723cd0a8c9d008
|
/man/fit_dynamic_model.Rd
|
4dd5a1c72043c190b706997cf5b752cb7d99cc74
|
[
"MIT"
] |
permissive
|
sbfnk/measles.katanga
|
06feeb9d47e6ac829ff5c2f7b90560342fd21aea
|
5b1a1eee5c1217edb15f071647e90bb0a4680674
|
refs/heads/master
| 2020-06-25T19:44:45.559197
| 2019-11-08T15:21:39
| 2019-11-08T15:21:39
| 199,405,257
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,207
|
rd
|
fit_dynamic_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_dynamic_model.r
\name{fit_dynamic_model}
\alias{fit_dynamic_model}
\title{Fit a dynamic model to the measles outbreak in Katanga}
\usage{
fit_dynamic_model(model, year = 2015, end = 5, start_threshold = 10,
vaccine_delay = 0, nbdata, particles, under_5_multiplier = 0.169)
}
\arguments{
\item{model}{a 'bi_model'}
\item{year}{the year to which to fit}
\item{end}{how many weeks to retain after the last data point}
\item{start_threshold}{when this number is exceeded over a two-week period,
start fitting}
\item{vaccine_delay}{delay in vaccine protection; default: 0 (instantaneous protection)}
\item{nbdata}{number of data points to require; if fewer data points are
available in a health zone, the health zone will be discarded}
\item{particles}{number of particles, if given; otherwise will be determined
from the variance of the log-likelihood estimate}
\item{under_5_multiplier}{multiplier for population size to get under-5
population size; default: 0.169}
}
\value{
libbi object containing the posterior samples
}
\description{
Uses the LibBi interface provided by the rbi package
}
\author{
Sebastian Funk
}
|
1f544eddd2a1760483755fe41090d0e6de42976f
|
ef3c1289fd1bc261481967bfc6600dc60f47d1c0
|
/Code.R
|
fca0df513ee008628cc892b73192febfc7e69399
|
[] |
no_license
|
lpabbisetty/Thesis-Project
|
1d56c3c656534789179ee8a6bd6d6ae5883d5fc8
|
7a4dffeff2e7bd597839fdf28e0460a58dfc4725
|
refs/heads/master
| 2021-01-25T04:22:43.694832
| 2017-06-14T01:08:03
| 2017-06-14T01:08:03
| 93,436,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 477
|
r
|
Code.R
|
---
title: "Thesis Project Code"
output:
html_document:
keep_md: yes
---
#trimming and mapping
#```{r}
# fastqc
#trimmomatic
#fastqc
#kallisto
#mv BrapaV2.1PacBio.cds.fa.ft.removed.updated BrapaV2.1PacBio.cds.fa
#kallisto index -i BrapaV2.1PacBio.cds.kai BrapaV2.1PacBio.cds.fa
#kallisto quant --single --plaintext -l 250 -s 50 -t 12 -i ~/reference/brapa/BrapaV2.1PacBio.cds.kai -o kallisto_output br_rnaseq_bt_sush_4_7_16e_S8_L004_R1_001.fastq.gz_trimmed2.fastq
```
|
3c59ae699590e2baed9d9d1ee5b188814b114b26
|
0b13ffc2398a6c3359f071ce086e2cb893113d27
|
/man/align_taxa.Rd
|
c44404b18253d07c8baba7a13ff0633be2e5c641
|
[] |
no_license
|
snubian/apcnames
|
b5beca7f24bf8409dd73bac70f622afff5d4dd4a
|
b169266255397530821bbece9c63d36837eb3cc6
|
refs/heads/master
| 2022-12-07T00:25:30.481445
| 2020-09-01T23:58:39
| 2020-09-01T23:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 584
|
rd
|
align_taxa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_names.R
\name{align_taxa}
\alias{align_taxa}
\title{Checks all taxa within against our list of known species
If not found, and update=TRUE, checks the unknown species against}
\usage{
align_taxa(
original_name,
output = "taxonomic_updates.csv",
max_distance_abs = 3,
max_distance_rel = 0.2,
ver = default_version()
)
}
\arguments{
\item{diffchar}{}
}
\description{
Checks all taxa within against our list of known species
If not found, and update=TRUE, checks the unknown species against
}
|
df4735fefac9bd6aa585488d326afaefb00b7b5b
|
f8f9f7f90086c118810020513ded3153388627c4
|
/EstudoCaso1.R
|
21bd7447371f32c800ef1a38aefa7759cb921f93
|
[] |
no_license
|
reginaborges/Analise_risco_cartao
|
e56d37787b0f94b217842c2895e15ab65ac8c8b4
|
d3a9eef2221f3d6b7f7231b5fec4d2f638cdc090
|
refs/heads/main
| 2023-08-17T02:06:12.278549
| 2021-09-21T12:29:08
| 2021-09-21T12:29:08
| 408,810,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,479
|
r
|
EstudoCaso1.R
|
# Estudo de Caso 1 - Análise de Dados em Operadoras de Cartão de Crédito
# Define pasta de trabalho
setwd("~/Dropbox/DSA/AnaliseEstatisticaI/Modulo02/EstudoCaso")
# Instala e carrega os pacotes
install.packages("mlbench")
install.packages("caret")
install.packages("e1071")
library(mlbench)
library(caret)
library(e1071)
# Carrega o dataset
mydata <- read.csv("database.csv")
View(mydata)
# Verifica o balanceamento das variáveis
skewness(mydata$LIMIT_BAL)
histogram(mydata$LIMIT_BAL)
# Sumariza os dados
summary(mydata)
str(mydata)
# Calcula os parâmetros de pré-processamento
preprocessParams <- preProcess(mydata, method=c("BoxCox"))
print(preprocessParams)
# Transforma o dataset usando os parâmetros
transformed <- predict(preprocessParams, mydata)
mydata <- transformed
# Sumariza os dados
str(mydata)
skewness(mydata$LIMIT_BAL)
histogram(mydata$LIMIT_BAL)
# Transforma as variáveis categóricas
mydata$default.payment.next.month <- factor(mydata$default.payment.next.month)
mydata$SEX <- as.factor(mydata$SEX)
mydata$EDUCATION <- as.factor(mydata$EDUCATION)
mydata$MARRIAGE <- as.factor(mydata$MARRIAGE)
mydata = na.omit(mydata)
summary(mydata)
str(mydata)
# Divide os dados em treino e teste
row <- nrow(mydata)
row
set.seed(12345)
trainindex <- sample(row, 0.7*row, replace=FALSE)
training <- mydata[trainindex,]
validation <- mydata[-trainindex,]
trainingx<- training
trainingy <- training[,24]
validationx <- validation[,-24]
validationy <- validation[,24]
# Usa Random Forest para encontrar as variáveis mais relevantes
install.packages("randomForest")
library(randomForest)
rfModel = randomForest( training$default.payment.next.month ~ ., data=training, ntree=500 )
varImpPlot(rfModel)
# Construção do Modelo com KNN
# Prepara os datasets com as melhores variáveis preditoras
trainingx<- training[,-c(2,3,4,5,24)]
trainingy <- training[,24]
validationx <- validation[,-c(2,3,4,5,24)]
validationy <- validation[,24]
# Modelo KNN
knnModel = train(x=trainingx, y=trainingy, method="knn",
preProc=c("center","scale"),
tuneLength=10)
knnModel
# Plot da acurácia
plot(knnModel$results$k,
knnModel$results$Accuracy,
type="o",
xlab="Número de Vizinhos Mais Próximos (K)",
ylab="Acurácia",
main="Modelo KNN Para Previsão de Concessão de Cartão de Crédito")
# Fazendo previsões
knnPred = predict(knnModel, newdata=validationx)
knnPred
|
164e638c34634f9caa42b68f4c69dee6c45367cf
|
6ce02ecff727693de3b59a73fdf5b21e84d1af56
|
/190911/day5.R
|
8cc11cde371e9134fdb5b1e96ea74f3bc40f8ac4
|
[] |
no_license
|
jinpy7016/TIL
|
a487a513d1c5e4a146a1dc85bd20216696e3221b
|
576b64b344e0d572dbb5cf4d0648027c8563308d
|
refs/heads/master
| 2022-12-11T13:42:21.217212
| 2019-12-05T08:19:41
| 2019-12-05T08:19:41
| 188,196,885
| 0
| 0
| null | 2022-12-08T07:26:58
| 2019-05-23T08:52:15
|
Python
|
UTF-8
|
R
| false
| false
| 13,376
|
r
|
day5.R
|
install.packages("reshape2")
library(reshape2)
#melt
head(airquality)
names(airquality) <- tolower(names(airquality))
head(airquality)
melt_test <- melt(airquality)
head(melt_test)
tail(melt_test)
melt_test2 <- melt(airquality, id.vars = c("month","wind"),measure.vars = "ozone")
head(melt_test2)
#cast
names(airquality) <- tolower(names(airquality))
head(airquality)
aq_melt <- melt(airquality,id=c("month","day"),na.rm = T)
head(aq_melt)
aq_dcast <- dcast(aq_melt, month+day~variable)
head(aq_dcast)
View(airquality)
View(aq_melt)
View(aq_dcast)
#acast
acast(aq_melt, day~month~variable)
acast(aq_melt,month~variable,mean)
dcast(aq_melt,month~variable,mean)
#KoNLP
install.packages("KoNLP")
install.packages("wordcloud2")
library(KoNLP)
useSystemDic()
useSejongDic()
useNIADic()
word_data <- readLines("./data/애국가(가사).txt")
word_data
word_data2<- sapply(word_data,extractNoun,USE.NAMES=F)
word_data2
add_words<-c("백두산","남산","철갑","가을","하늘","달")
buildDictionary(user_dic = data.frame(add_words,rep("ncn",length(add_words))),replace_usr_dic = T)
word_data2 <-sapply(word_data,extractNoun,USE.NAMES = F)
word_data2
undata<-unlist(word_data2)
undata
word_table <- table(undata)
word_table
undata2<-Filter(function(x){nchar(x) >=2},undata)
word_table2<-table(undata2)
word_table2
sort(word_table2,decreasing = T)
install.packages("wordcloud2")
library(wordcloud2)
wordcloud2(word_table2)
wordcloud2(word_table2,color = "random-light",backgroundColor = "black")
wordcloud2(word_table2,fontFamily = "맑은 고딕",size=1.2,color="random-light",backgroundColor="black",shape="star")
#naver 검색api
urlStr <- "https://openapi.naver.com/v1/search/blog.xml?"
searchString<-"query=코타키나발루"
searchString <- iconv(searchString, to="UTF-8")
searchString <- URLencode(searchString)
searchString
etcString<-"&display=100&start=1&sort=sim"
reqUrl<-paste(urlStr,searchString,etcString,sep="")
reqUrl
library(httr)
clientID<-'myclientID'
clientSecret<-'myclientSecret'
apiResult <- GET(reqUrl, add_headers("X-Naver-Client-Id"=clientID,"X-Naver-Client-Secret"=clientSecret))
apiResult
str(apiResult)
apiResult$content
str(apiResult$content)
result<-rawToChar(apiResult$content)
result
Encoding(result)<-"UTF-8"
result
refinedStr <- result
#XML 태그를 공란으로 치환
refinedStr <- gsub("<(\\/?)(\\w ?+)*([^<>]*)>", " ", refinedStr)
refinedStr
#단락을 표현하는 불필요한 문자를 공란으로 치환
refinedStr <- gsub("[[:punct:]]", " ", refinedStr)
refinedStr
#영어 소문자를 공란으로 치환
refinedStr <- gsub("[a-zA-Z]", " ", refinedStr)
refinedStr
#숫자를 공란으로 치환
refinedStr <- gsub("[0-9]", " ", refinedStr)
refinedStr
#여러 공란은 한 개의 공란으로 변경
refinedStr <- gsub(" +", " ", refinedStr)
refinedStr
library(KoNLP)
library(rJava)
nouns<- extractNoun( refinedStr )
str(nouns)
nouns[1:40]
#길이가 1인 문자를 제외
nouns <-nouns[nchar(nouns) > 1]
#제외할 특정 단어를 정의
excluNouns <- c("코타키나발루", "얼마" , "오늘", "으로", "해서", "API", "저희", "정도")
nouns <- nouns [!nouns %in% excluNouns ]
nouns [1:40]
#빈도수 기준으로 상위 50개 단어 추출
wordT <- sort(table(nouns), decreasing=T)[1:50]
wordT
#wordcloud2 패키지
# wordcloud2 (data, size, shape)
#단어와 빈도수 정보가 포함된 데이터프레임 또는 테이블, 글자 크기, 워드 클라우드의 전체 모양(circle:기본값, cardioid, diamond, triangle, star등)
#install.packages("wordcloud2")
library(wordcloud2)
wordcloud2(wordT, size=3, shape="diamond")
#########################################################
# 영문서 형태소 분석 및 워드클라우드
#install
install.packages("tm")
install.packages("SnowballC")
install.packages("RColorBrewer")
install.packages("wordcloud")
#load
library("tm")
library("SnowballC")
library("RColorBrewer")
library("wordcloud")
filePath <- "http://www.sthda.com/sthda/RDoc/example-files/martin-luther-king-i-have-a-dream-speech.txt"
text<-readLines(filePath)
str(text)
#vectorsource()함수는 문자형 벡터 생성
docs<-Corpus(VectorSource(text))
head(docs)
# 텍스트의 특수 문자 등을 대체하기 위해 tm_map () 함수를 사용하여 변환이 수행됩니다.
# “/”,“@”및“|”을 공백으로 바꿉니다.
toSpace <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, toSpace, "/")
docs <- tm_map(docs, toSpace, "@")
docs <- tm_map(docs, toSpace, "\\|")
head(docs)
# 소문자로 변환
docs <- tm_map(docs, content_transformer(tolower))
# 수치 데이터 제거
docs <- tm_map(docs, removeNumbers)
# 영어 불용어 제거
docs <- tm_map(docs, removeWords, stopwords("english"))
# 벡터 구조로 사용자가 직접 불용어 설정 , 제거
docs <- tm_map(docs, removeWords, c("blabla1", "blabla2"))
# 문장 부호 punctuations
docs <- tm_map(docs, removePunctuation)
# 공백 제거
docs <- tm_map(docs, stripWhitespace)
# 텍스트 형태소 분석
docs <- tm_map(docs, stemDocument)
docs
# 문서 매트릭스는 단어의 빈도를 포함하는 테이블입니다.
# 열 이름은 단어이고 행 이름은 문서입니다.
# text mining 패키지에서 문서 매트릭스를 생성하는 함수 사용
dtm <- TermDocumentMatrix(docs)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
head(d, 10)
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 1,
max.words=200, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
###########################################################
#ggplot2
install.packages("ggplot2")
library(ggplot2)
str(airquality)
ggplot(airquality,aes(x=day,y=temp))+geom_point()
ggplot(airquality,aes(x=day,y=temp))+geom_point(size=2,color="red")
ggplot(airquality,aes(x=day,y=temp))+geom_line()
ggplot(airquality,aes(x=day,y=temp))+geom_line()+geom_point()
#막대그래프
ggplot(mtcars,aes(x=cyl))+geom_bar(width = 0.5)
ggplot(mtcars,aes(x=factor(cyl)))+geom_bar(width = 0.5)
ggplot(mtcars,aes(x=factor(cyl)))+geom_bar(aes(fill = factor(gear)))
#선버서트/원 그래프
ggplot(mtcars,aes(x=factor(cyl)))+geom_bar(aes(fill = factor(gear)))+coord_polar()
ggplot(mtcars,aes(x=factor(cyl)))+geom_bar(aes(fill = factor(gear)))+coord_polar(theta="y")
#상자/히스토그램
ggplot(airquality,aes(x=day,y=temp,group=day))+geom_boxplot()
ggplot(airquality,aes(temp))+geom_histogram(binwidth=1)
#그래프의 이해를 높이는 객체 추가하기
str(economics)
ggplot(economics,aes(x=date,y=psavert))+geom_line()+
geom_abline(intercept = 12.18671,slope = -0.0005444)
ggplot(economics,aes(x=date,y=psavert))+geom_line()+
geom_hline(yintercept = mean(economics$psavert))
#install.packages("dplyr")
library(dplyr)
x_inter<-filter(economics,psavert==min(economics$psavert))$date
ggplot(economics,aes(x=date,y=psavert))+geom_line()+
geom_vline(xintercept = x_inter)
#텍스트 입력 및 도형 그리기
ggplot(airquality,aes(x=day,y=temp))+geom_point()+
geom_text(aes(label=temp,vjust=0,hjust=0))
ggplot(mtcars,aes(x=wt,y=mpg))+geom_point()+
annotate("rect",xmin=3,xmax=4,ymin=12,ymax=21,alpha=0.5,fill="skyblue")
ggplot(mtcars,aes(x=wt,y=mpg))+geom_point()+
annotate("rect",xmin=3,xmax=4,ymin=12,ymax=21,alpha=0.5,fill="skyblue")+
annotate("segment",x=2.5,xend=3.7,y=10,yend=17,color="red",arrow=arrow())
ggplot(mtcars,aes(x=wt,y=mpg))+geom_point()+
annotate("rect",xmin=3,xmax=4,ymin=12,ymax=21,alpha=0.5,fill="skyblue")+
annotate("segment",x=2.5,xend=3.7,y=10,yend=17,color="red",arrow=arrow())+
annotate("text",x=2.5,y=10,label="point")
#그래프 제목 및 축 제목을 추가하고 디자인 테마 적용하기
ggplot(mtcars,aes(x=gear))+geom_bar()+
labs(x="기어수",y="자동차수",title="변속기 기어별 자동차수")
##############################################################
#scrapping
#############################################################
install.packages('rvest')
library(rvest)
#스크래핑할 웹 사이트 URL을 변수에 저장
url <- 'http://www.imdb.com/search/title?count=100&release_date=2016,2016&title_type=feature'
#웹 사이트로부터 HTML code 읽기
webpage <- read_html(url)
webpage
# 스크래핑할 데이터 - rank, title, description, runtime, genre, rating, metascore, votes, gross_earning_in_Mil, director, actor
#랭킹이 포함된 CSS selector를 찾아서 R 코드로 가져오기
rank_data_html <- html_nodes(webpage,'.text-primary')
#랭킹 데이터를 텍스트로 가져오기
rank_data <- html_text(rank_data_html)
head(rank_data)
#랭킹 데이터를 수치형 데이터로 변환
rank_data<-as.numeric(rank_data)
head(rank_data)
#str(rank_data)
#length(rank_data)
title_data_html<-html_nodes(webpage,'.lister-item-header a')
title_data<-html_text(title_data_html)
length(title_data)
title_data
desc_data_html<-html_nodes(webpage,'.ratings-bar+ .text-muted')
desc_data <- html_text(desc_data_html)
length(desc_data)
library(stringr)
desc_data<-gsub("\n","",str_trim(desc_data))
head(desc_data)
runtime_data_html<-html_nodes(webpage,'.lister-item-header+ .text-muted .runtime')
runtime_data <- html_text(runtime_data_html)
runtime_data<-gsub(" min","",runtime_data)
runtime_data<-as.numeric(runtime_data)
genre_data_html<-html_nodes(webpage,'.lister-item-header+ .text-muted .genre')
genre_data <- html_text(genre_data_html)
genre_data<-gsub("\n","",genre_data)
#1개이상의 공백을 제거하는 데이터 처리
genre_data<-gsub(" ","",genre_data)
#, . * 특수문자 제거하는 데이터 처리
genre_data<-gsub(",.*","",genre_data)
#문자열 데이터를 범주형 데이터로 변환 처리
genre_data<-as.factor(genre_data)
genre_data
#votes 영역의 CSS selectors를 이용한 스크래핑
votes_data_html<-html_nodes(webpage,'.sort-num_votes-visible span:nth-child(2)')
#votes 데이터 text로 가져오기
votes_data<-html_text(votes_data_html)
#콤마(,) 제거 데이터 처리
votes_data<-gsub(",","",votes_data)
#votes 데이터를 numerical으로 변환 데이터 처리
as.numeric(votes_data)
#IMDB rating 영역의 CSS selectors를 이용한 스크래핑
rating_data_html<-html_nodes(webpage,'.ratings-bar strong')
#IMDB rating 데이터 text로 가져오기
rating_data<-html_text(rating_data_html)
##IMDB rating 데이터를 numerical으로 변환 데이터 처리
as.numeric(rating_data)
#감독
director_data_html<-html_nodes(webpage,'.text-muted+ p a:nth-child(1)')
director_data<-html_text(director_data_html)
director_data<-as.factor(director_data)
director_data
#배우
actor_data_html<-html_nodes(webpage,'.lister-item-content .ghost+ a')
actor_data<-html_text(actor_data_html)
actor_data<-as.factor(actor_data)
actor_data
# metascore 영역의 CSS selectors를 이용한 스크래핑
metascore_data_html <- html_nodes(webpage,'.metascore')
# metascore 데이터 text로 가져오기
metascore_data <- html_text(metascore_data_html)
head(metascore_data)
#1개 이상의 공백 제거
metascore_data<-gsub(" ","",metascore_data)
length(metascore_data)
metascore_data
#metascore 누락된 데이터 NA처리하기 - 29,58, 73, 96
for (i in c(29,58, 73, 96)){
a<-metascore_data[1:(i-1)] #리스트로 확인
b<-metascore_data[i:length(metascore_data)]
metascore_data<-append(a,list("NA"))
metascore_data<-append(metascore_data,b)
}
metascore_data
# metascore 데이터를 numerical으로 변환 데이터 처리
metascore_data<-as.numeric(metascore_data)
# metascore 데이터 개수 확인
length(metascore_data)
#metascore 요약 통계 확인
summary(metascore_data)
# gross revenue(총수익) 영역의 CSS selectors를 이용한 스크래핑
revenue_data_html <-html_nodes(webpage,'.sort-num_votes-visible span:nth-child(5)')
# gross revenue(총수익) 데이터 text로 가져오기
revenue_data<-html_text(revenue_data_html)
revenue_data
# '$' 와 'M' 기호 제거 데이터 처리
revenue_data<-gsub("[$M]","",revenue_data)
# gross revenue(총수익) 데이터 개수 확인
length(revenue_data)
# 누락된 데이터 NA로 채우기 29,45,57,62,73,93,98
for (i in c(29,45,57,62,73,93,98)){
a<-revenue_data[1:(i-1)] #리스트로 확인
b<-revenue_data[i:length(revenue_data)]
revenue_data<-append(a,list("NA"))
revenue_data<-append(revenue_data,b)
}
# gross revenue(총수익) 데이터를 numerical으로 변환 데이터 처리
revenue_data<-as.numeric(revenue_data)
# gross revenue(총수익) 데이터 개수 확인
length(revenue_data)
revenue_data
# gross revenue(총수익) 요약 통계 확인
summary(revenue_data)
#data.frame으로 변환
movies_df<-data.frame(Rank = rank_data, Title = title_data,
Description = desc_data, Runtime = runtime_data,
Genre = genre_data, Rating = rating_data,
Metascore = metascore_data, Votes = votes_data,
Director = director_data, Actor = actor_data)
#변환된 data.frame 구조 확인
str(movies_df)
library('ggplot2')
#상영시간이 가장 긴 필름의 장르는?
#x축 상영시간, y축 장르별 필름 수
qplot(data = movies_df,Runtime,fill = Genre,bins = 30)
#상영시간이 130-160 분인 장르중 votes가 가장 높은 것은?
ggplot(movies_df,aes(x=Runtime,y=Rating))+
geom_point(aes(size=Votes,col=Genre))
|
3ed75b15dd3e942fcbee03c1dcc303e28bec9775
|
87d46af5a5ffb1129f093e54950f01122abcdfe9
|
/R/r-zlinear.R
|
af771aec84c0b18dc44f05f87afdf972901cc5f9
|
[
"Apache-2.0"
] |
permissive
|
rtlemos/rcrandom
|
532ed70b6e92e4bc603ca65bdbfdd95c42bb0ee6
|
d4c0319d5fe46791f22f26b24ab29c28865ca274
|
refs/heads/master
| 2021-01-21T14:07:46.584400
| 2016-07-05T03:03:57
| 2016-07-05T03:03:57
| 56,735,954
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,043
|
r
|
r-zlinear.R
|
#' ZLinear
#'
#' Build piecewise linear log-probability density functions.
#'
#' This Reference Class is analogous to ZQuadratic.
#' Objects of this class provide approximations to the input
#' log-p.d.f. that are slightly more numerically stable,
#' but probably worse, than those of ZQuadratic.
#' To mitigate the worse fit, more evaluations should be
#' provided. For details about this classe's fields and
#' methods, see those in ZQuadratic.
#' Although this class is not virtual, it is not exported.
#'
#'
#' @field n numeric.
#' @field x numeric.
#' @field y numeric.
#' @field beta0 numeric.
#' @field beta1 numeric.
#' @field dx numeric.
#' @field normct numeric.
#' @field pcdf numeric.
#' @field fns list.
#'
#' @importFrom methods new
#' @export ZLinear
#' @exportClass ZLinear
#'
#' @examples
#' x <- seq(-4, 4, 0.3)
#' y <- dnorm(x, log = TRUE)
#' zq <- ZLinear(x = x, y = y)
#' do.log <- TRUE #switch to FALSE to see p.d.f.
#' zq$plot.fit(do.log)
#' lines(seq(-4, 4, 0.1), dnorm(seq(-4, 4, 0.1),
#' log = do.log), ty = "l", lwd = 1, lty = 2)
#'
ZLinear <- setRefClass(
Class = "ZLinear",
fields = list(
n = "numeric",
x = "numeric",
y = "numeric",
beta0 = "numeric",
beta1 = "numeric",
dx = "numeric",
normct = "numeric",
pcdf = "numeric",
fns = "list"),
methods = list(
initialize = function(x = NULL, y = NULL,
useFortran = FALSE){
if (is.null(x) | is.null(y)) return()
nx <- length(x)
if (nx %% 2 != 1) {
stop("Must use an odd number of evaluations.")
}
myf <- if (useFortran) get.ffns() else get.rfns()
coeffs <- myf$betas.linear(nx, x, y)
.self$x <- x #x-coordinates of evaluations
.self$y <- y #these are the log-density evaluations
.self$n <- nx
.self$beta0 <- coeffs$b0
.self$beta1 <- coeffs$b1
.self$dx <- x[2:nx] - x[1:(nx - 1)]
integrals <- myf$integrals.linear(coeffs$b0, coeffs$b1, 0, 1)
.self$normct <- sum(integrals * .self$dx)
.self$pcdf <- cumsum(integrals * .self$dx) / .self$normct
.self$fns <- myf
},
pdf = function(z, do.log = FALSE){
return(.self$fns$pdf.linear(
z, .self$x, .self$dx, .self$beta0, .self$beta1,
.self$normct, do.log, .self$fns$pdf.quadratic))
},
cdf = function(z){
return(.self$fns$cdf.linear(
z, .self$x, .self$dx, .self$beta0, .self$beta1,
.self$pcdf, .self$normct))
},
invcdf = function(p){
return(.self$fns$invcdf.linear(
p, .self$x, .self$dx, .self$beta0, .self$beta1,
.self$pcdf, .self$normct))
},
plot.fit = function(do.log = TRUE){
if (do.log) {
py <- .self$y
mlab <- "piecewise linear approx. to log-pdf"
yl <- "log-pdf"
} else {
py <- exp(.self$y)
mlab <- "approximation to pdf"
yl <- "pdf"
}
plot(.self$x, py, xlab = "z-coord", ylab = yl,
main = mlab)
for (i in 1:(.self$n - 1)) {
fx <- seq(.self$x[i], .self$x[i + 1], length = 10)
dd <- (fx - .self$x[i]) / .self$dx[i]
fy <- .self$beta0[i] + .self$beta1[i] * dd
ly <- if (do.log) fy else exp(fy)
lines(fx, ly, col = i, lwd = 4)
}
},
#-------------------------------------------------------
# Private functions ------------------------------------
#-------------------------------------------------------
iget.coord = function(){
return(.self$x)
},
iget.logd = function(){
return(.self$y)
},
iget.multiply.constant = function(k){
if (k > 0) {
coords <- list(x = k * .self$x, y = .self$y)
} else {
coords <- list(x = k * rev(.self$x),
y = rev(.self$y))
}
return(coords)
},
is.operation.allowed = function(operation, argclass){
switch( operation,
"/" = FALSE,
"*" = {any(argclass == c("numeric", "matrix",
"Constant",
"Uniform"))},
"-" = ,
"+" = {any(argclass == c("numeric",
"matrix",
"Constant",
"Uniform",
"Normal",
"ZQuadratic",
"ZLinear"))},
FALSE #default
)
},
iset.operate = function(operation, operand,
operand.name, operand.side,
my.name){
if (!.self$is.operation.allowed(operation,
class(operand))) {
stop("ZLinear: Invalid operation.")
}
my.specs <- list(n = length(.self$x), x = .self$x,
dx = .self$dx,
b0 = .self$beta0, b1 = .self$beta1,
b2 = rep(0,length(.self$beta0)),
normct = .self$normct)
switch(
class(operand),
"ZLinear" = {
op.specs <- list(
n = length(operand$x),
x = operand$x,
dx = operand$dx,
b0 = operand$beta0,
b1 = operand$beta1,
b2 = rep(0, length(operand$beta0)),
normct = operand$normct)
if (operand.side == "left") {
coords <-
.self$fns$convolution.zquadratic.zquadratic(
l = op.specs,
r = my.specs,
operation = operation,
find.cutpoints = .self$myf$find.cutpoints,
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi)
} else {
coords <-
.self$fns$convolution.zquadratic.zquadratic(
l = op.specs,
r = my.specs, operation = operation,
find.cutpoints = .self$myf$find.cutpoints,
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi)
}
},
"ZQuadratic" = {
op.specs <- list(n = length(operand$x),
x = operand$x,
dx = operand$dx,
b0 = operand$beta0,
b1 = operand$beta1,
b2 = operand$beta2,
normct = operand$normct)
if (operand.side == "left") {
coords <-
.self$fns$convolution.zquadratic.zquadratic(
l = op.specs, r = my.specs,
operation = operation,
find.cutpoints = .self$myf$find.cutpoints,
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi)
} else {
coords <-
.self$fns$convolution.zquadratic.zquadratic(
l = op.specs,
r = my.specs, operation = operation,
find.cutpoints = .self$myf$find.cutpoints,
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi)
}
},
"Uniform" = {
switch(
operation,
"-" = ,
"+" = ,
"*" = {
unif.lb <- operand$param$lb$parameter(
1, eval = TRUE)
unif.ub <- operand$param$ub$parameter(
1, eval = TRUE)
unif.n <- operand$nr
op.specs <- list(lb = unif.lb, ub = unif.ub,
n = unif.n )
coords <-
.self$fns$convolution.zquadratic.uniform(
zq = my.specs, unif = op.specs,
operation = operation,
find.cutpoints.zu =
.self$fns$find.cutpoints.zu,
integralsMult = .self$fns$integralsMult,
ExpIntegral = .self$fns$expint)
},
stop("ZLinear: Invalid operation, stage 2.")
)
},
"Normal" = {
mu <- operand$parameter(id = 1, eval = TRUE)
tau <- operand$parameter(id = 2, eval = TRUE)
nr <- operand$nr
if (is(mu,"Constant")) {
mu.val <- mu$parameter(id = 1, eval = TRUE)
} else {
stop("mean?")
}
if (is(tau,"Constant")) {
tau.val <- tau$parameter(id = 1, eval = TRUE)
} else {
stop("var?")
}
op.specs <- list(m = mu.val, v = tau.val)
if (operand.side == "left" & operation == "-") {
# Normal - ZLinear
tmp.coord <- .self$iget.multiply.constant(-1.0)
tmp <- ZLinear(x = tmp.coord$x, y = tmp.coord$y)
tmp.specs <- list(
n = length(tmp$x),
x = tmp$x,
dx = tmp$dx,
b0 = tmp$beta0,
b1 = tmp$beta1,
b2 = rep(0, length(tmp$beta0)),
normct = tmp$normct)
coords <-
.self$fns$convolution.zquadratic.normal(
zq = tmp.specs, normal = op.specs,
operation = "+",
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi)
} else {
coords <-
.self$fns$convolution.zquadratic.normal(
zq = my.specs, normal = op.specs,
operation = operation,
integrals = .self$fns$integrals.linear,
erf = .self$fns$erf,
erfi = .self$fns$erfi )
}
},
stop("ZLinear: Invalid operation, stage 2") #default
)
return(coords)
}
)
)
|
880a832ced2ad634930f96355034f5e209778fe4
|
ca85aca58c75b824f5f7668673a4e9958e3be049
|
/man/digform.Rd
|
f67b7d63957acc48138f3439363d495ac31d239d
|
[] |
no_license
|
jmjablons/icager
|
1a2fd0586f73682b7f4dd3578e5c2a50af46638e
|
7dddccef96b892ccf3750a8af348f2f547d46937
|
refs/heads/master
| 2022-12-03T14:54:28.876604
| 2020-08-17T13:13:52
| 2020-08-17T13:13:52
| 228,870,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 631
|
rd
|
digform.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{digform}
\alias{digform}
\title{Provide Formatting Standards On Call}
\usage{
digform(version = NULL)
}
\arguments{
\item{version}{Character or integer. Version of software:
\describe{
\item{#1}{"new", "star", "newer", 1}
\item{#2}{"old", "plus", "older, "proper", 2}
}}
}
\value{
a named list
}
\description{
\code{digform()} returns a list of variables specifying
experimental files formats,
which varies dependenting on software version
}
\details{
It's called also inside other package's functions.
}
\examples{
digform(version = 1)
}
|
4a215cf0aa1e001b3d15ce178a47e6178052ef84
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/photobiology/examples/getTfrType.Rd.R
|
c7ca1afb3930e3899ce53bd02aa999ff8ac46fc6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 158
|
r
|
getTfrType.Rd.R
|
library(photobiology)
### Name: getTfrType
### Title: Get the "Tfr.type" attribute
### Aliases: getTfrType
### ** Examples
getTfrType(polyester.spct)
|
1512f1083d0545753a3ef7281e111b2f69158d2a
|
3b661bdf871a79038cbd2eda3842a19aada9d266
|
/homework/Lab3.R
|
07aa10bd52716e365ea0441d9bba0d22994efe9d
|
[] |
no_license
|
HaoliangZheng/Statistical-Computing
|
3fd3460c74311ac2828a9aec10fab4ac7dc0b76a
|
1d8634d5b02f1e69a2a0b89d098ca19f86d9b49d
|
refs/heads/master
| 2020-04-27T10:58:27.570861
| 2019-03-13T12:08:46
| 2019-03-13T12:08:46
| 174,277,942
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,701
|
r
|
Lab3.R
|
##apply(X, MARGIN, FUN, ...)
ary=array(1:27,c(3,3,3));ary
apply(ary,1,sum)
apply(ary,c(1,2),sum)
dat=data.frame(a=1:10,b=1:10);dat
apply(dat,1,sum)
#对数组、数据框,按行或列进行循环计算
##tapply(X, INDEX, FUN = NULL, ..., simplify = TRUE)
##(table,分组计算)
x=1:10;x
set.seed(2)
t=round(runif(10,0,2));t
tapply(x,t,sum)
#以向量t为索引,把数据集X进行分组,再apply
##mapply(FUN, ..., MoreArgs = NULL, SIMPLIFY = TRUE,USE.NAMES = TRUE)
##(multivariate,sapply的变形,可接收多个数据)
n=rep(4,4);mean=rep(0,4);var=rep(1,4)
mapply(rnorm,n,mean,var)
#生成4个符合正态分布的数据集,均值和方差均为0和1
#其中n,mean,var均作为输入rnorm的参数
###对list计算
##lapply(X, FUN, ...)
##(list)
set.seed(2)
x=list(a=1:10,b=rnorm(50,0,1),c=c(TRUE,FALSE,FALSE,TRUE));x
lapply(x,mean)
##sapply(X, FUN, ..., simplify = TRUE, USE.NAMES = TRUE)
##(simplify,lapply的简化版,增加了2个参数simplify和USE.NAMES)
sapply(x,mean)
class(lapply(x,mean));class(sapply(x,mean))
#让输出看起来更友好,返回值为向量,而不是list
lapply(x,mean)
sapply(x,mean,simplify=FALSE,USE.NAMES=FALSE)
class(lapply(x,mean));class(sapply(x,mean,simplify=FALSE,USE.NAMES=FALSE))
#如果simplify=FALSE和USE.NAMES=FALSE,那么sapply函数就等于lapply函数
##vapply(X, FUN, FUN.VALUE, ..., USE.NAMES = TRUE)
##(value,控制返回值,类似于sapply)
vapply(x,mean,FUN.VALUE = 0)
vapply(x,mean,FUN.VALUE = as.integer(0))
#当返回值设为整型时,因不满足要求,所以报错
##rapply(object, f, classes = "ANY", deflt = NULL,
## how = c("unlist", "replace", "list"), ...)
##(recursive,对list的每个元素进行递归遍历,如果list包括子元素则继续遍历)
lst=list(x=list(x1=1:3,x2=4:6));lst
lapply(lst,sum)
#此时不能使用lapply
rapply(lst,sum,how="unlist")
rapply(lst,sum,how="replace")
###其他
##simplify2array(x, higher = TRUE)
set.seed(2)
x=list(a=1:10,b=rnorm(50,0,1),c=c(TRUE,FALSE,FALSE,TRUE));
simplify2array(lapply(x,mean))
sapply(x,mean)
class(simplify2array(lapply(x,mean)));class(sapply(x,mean))
#apply族的simplify=TRUE通过调用simplify2array得以实现
##sweep(x, MARGIN, STATS, FUN = "-", check.margin = TRUE, ...)
##STATS使其与apply不同的主要一点
M=matrix(1:12,ncol=3);M
sweep (M, 2, c (1: 3), "+")
#每一列的每一项加其对应列数
##aggregate(x, by, FUN, ..., simplify = TRUE, drop = TRUE)
##(将数据进行分组,然后对每一组数据进行函数统计,最后把结果组合成表格返回)
state.x77
state.region
aggregate(state.x77, list(Region = state.region), mean)
#对美国50个州八个指标的数据集根据地区进行分类求均值
|
c769360da2efa02cab5976ebdeb56c54d042fef5
|
8ef27de17d0110828d77ca91b4f4e71af73fc12f
|
/R/plotDistributionAssay.R
|
dfaa6bcba52fac9e831c38d5ead4aa4dea8eb8df
|
[] |
no_license
|
marcpaga/pulsedSilac
|
95537ce75dc65a9573186708b2917ac700c7cbe6
|
23e5e48083b5edfc99c5dbc42bef487610bec5af
|
refs/heads/master
| 2020-05-17T09:29:16.236700
| 2020-03-07T12:58:50
| 2020-03-07T12:58:50
| 183,634,007
| 2
| 0
| null | 2019-12-05T09:54:02
| 2019-04-26T13:31:31
|
R
|
UTF-8
|
R
| false
| false
| 5,275
|
r
|
plotDistributionAssay.R
|
#' @rdname plotDistributionAssay
#' @name plotDistributionAssay
#' @title Distribution of assay data per condition and timepoint.
#'
#' @description Plot the distribution of the data stored in an assay using
#' boxplots or density distributions.
#'
#' @param x A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
#' \code{SilacProteomicsExperiment} object.
#' @param assayName Name of the assay to use in the plot.
#' @param plotType A \code{character} indicating which geometry to plot:
#' 'boxplot' or 'density'. (default = 'density')
#' @param returnDataFrame A \code{logical} indicating if the \code{data.frame}
#' used for the plot should be returned instead.
#' @param mode A \code{character} indicating which level of data to use,
#' either "protein" or "peptide". Only relevant for ProteomicsExperiment
#' inputs.
#' @param conditionCol A \code{character}, which indicates the column name
#' in colData(x) that defines the different experiment conditions.
#' @param timeCol A \code{character}, which indicates the column name
#' in colData(x) that defines the different timepoints.
#' @param ... Unused.
#'
#' @return A ggplot2 object or a \code{data.frame} with the data that would be
#' plotted.
#'
#' @examples
#' data('wormsPE')
#' plotDistributionAssay(wormsPE, assayName = 'ratio')
#'
#' @importFrom ggridges geom_density_ridges
#' @import ggplot2
#' @export
setGeneric('plotDistributionAssay', function(x, ...){
standardGeneric('plotDistributionAssay')
})
#' @rdname plotDistributionAssay
#' @export
setMethod('plotDistributionAssay', 'SilacProteinExperiment',
function(x,
assayName,
plotType = 'boxplot',
returnDataFrame = FALSE,
conditionCol,
timeCol) {
## argument checker ----------------------------------------------------------
if (!assayName %in% names(assays(x))) {
txt <- sprintf('%s not found in assay names', assayName)
stop(txt)
}
if (!plotType %in% c('boxplot', 'density')) {
txt <- c('plotType must be "boxplot" or "density"')
stop(txt)
}
## cb palette
cbPalette <- c("#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
if (!missing(conditionCol)) {
metadata(x)[['conditionCol']] <- conditionCol
}
if (!missing(timeCol)) {
metadata(x)[['timeCol']] <- timeCol
}
## Data and options processing -----------------------------------------------
mat <- assays(x)[[assayName]]
loopCols <- .loopWrapper(x, 'conditionCol')
## make a data.frame for each condition
for (i in seq_along(loopCols)) {
if (i == 1) {
dfList <- list()
}
timeCol <- .giveMetaoption(x, 'timeCol')
if (is.na(timeCol)) {
timeVec <- seq_along(loopCols[[i]])
} else {
timeVec <- colData(x)[, .giveMetaoption(x, 'timeCol')][loopCols[[i]]]
}
values <- as.vector(mat[, loopCols[[i]]])
tempDf <- data.frame(value = values,
time = rep(timeVec, each = nrow(mat)))
if (!is.null(names(loopCols)[i])) {
tempDf$condition <- names(loopCols)[i]
} else {
tempDf$condition <- 'condition'
}
dfList[[i]] <- tempDf
}
## join all the data.frames
plotDf <- do.call('rbind', dfList)
plotDf$time <- as.factor(plotDf$time)
plotDf$condition <- as.factor(plotDf$condition)
if (returnDataFrame) {
colnames(plotDf)[1] <- assayName
return(plotDf)
}
## plotting ------------------------------------------------------------------
if (plotType == 'density') {
p <- ggplot(data = plotDf) +
geom_density_ridges(aes_string(x = 'value',
y = 'time',
fill = 'condition')) +
scale_fill_manual(values = cbPalette) +
facet_wrap(~condition) +
labs(x = assayName) +
theme_bw()
} else if (plotType == 'boxplot') {
p <- ggplot(data = plotDf) +
geom_boxplot(aes_string(x = 'time',
y = 'value',
fill = 'condition')) +
scale_fill_manual(values = cbPalette) +
labs(y = assayName) +
theme_bw()
}
p
})
#' @rdname plotDistributionAssay
#' @export
setMethod('plotDistributionAssay', 'SilacPeptideExperiment',
function(x,
assayName,
plotType = 'boxplot',
returnDataFrame = FALSE,
conditionCol,
timeCol) {
callNextMethod()
})
#' @rdname plotDistributionAssay
#' @export
setMethod('plotDistributionAssay', 'SilacProteomicsExperiment',
function(x,
assayName,
mode = 'protein',
plotType = 'boxplot',
returnDataFrame = FALSE,
conditionCol,
timeCol) {
experiment <- switch(mode,
protein = x@SilacProteinExperiment,
peptide = x@SilacPeptideExperiment)
plotDistributionAssay(x = experiment,
assayName = assayName,
plotType = plotType,
returnDataFrame = returnDataFrame,
conditionCol = conditionCol,
timeCol = timeCol)
})
|
226a815ee3027bc9bc88628e0f0866927706b549
|
9021558019864ccb907fc7ce25432563f932bbd3
|
/R/bnnSurvivalBaseLearner.R
|
a39e673effd7e92acac20beae63207286c0b52b8
|
[] |
no_license
|
mnwright/bnnSurvival
|
5cd58925995ce3e2d55c249f392575bc93828c94
|
694072bc55d210dde9e3cccf03e1fe7a29dfd0e3
|
refs/heads/master
| 2021-01-10T06:06:34.396035
| 2017-07-26T20:02:25
| 2017-07-26T20:02:25
| 52,605,495
| 1
| 1
| null | 2016-02-29T12:59:40
| 2016-02-26T13:15:46
|
R
|
UTF-8
|
R
| false
| false
| 3,014
|
r
|
bnnSurvivalBaseLearner.R
|
setClass("bnnSurvivalBaseLearner",
representation(
bootstrap_sample = "integer",
feature_space = "integer")
)
## Constructor: Randomly generate bootstrap sample and feature space
bnnSurvivalBaseLearner <- function(num_samples, num_features, num_features_per_base_learner,
replace, sample_fraction) {
## Bootstrap samples
if (!replace & sample_fraction == 1) {
bootstrap_sample <- 1:num_samples
} else {
bootstrap_sample <- sample(num_samples, num_samples * sample_fraction, replace = replace)
}
## Select a subset of features if not all
if (num_features_per_base_learner == num_features) {
feature_space = 1:num_features
} else {
feature_space = sample(num_features_per_base_learner,
num_features_per_base_learner, replace = FALSE)
}
## Create object
new("bnnSurvivalBaseLearner",
bootstrap_sample = bootstrap_sample,
feature_space = feature_space)
}
##' Compute prediction for all samples.
##' @param object bnnSurvivalBaseLearner object
##' @param train_data Training data (with response)
##' @param test_data Test data (without response)
##' @param timepoints Timepoint to predict at
##' @param metric Metric used
##' @param weighting_function Weighting function used
##' @param k Number of nearest neighbors
##' @import stats
setMethod("predict",
signature("bnnSurvivalBaseLearner"),
function(object, train_data, test_data, timepoints, metric, weighting_function, k) {
## Bootstrap sample and subsample features of training data
train_features <- train_data[object@bootstrap_sample,
object@feature_space+2, drop = FALSE]
train_response <- train_data[object@bootstrap_sample,
c(1,2), drop = FALSE]
## Compute distances to training obs for all test obs
if (metric == "mahalanobis") {
train_cov <- cov(train_features)
## Ignore all-equal features
idx_nonzero <- rowMeans(train_cov) != 0
## Compute distances
distances <- apply(test_data[, object@feature_space[idx_nonzero], drop = FALSE], 1,
mahalanobis,
x = train_features[, idx_nonzero, drop = FALSE],
cov = train_cov[idx_nonzero, idx_nonzero, drop = FALSE],
tol = 1e-25)
} else {
stop("Currently no other distance metrics supported.")
}
## Sort rows or columns, get indices
temp <- apply(distances, 2, sort, index.return = TRUE)
## Compute Kaplan-Meier estimator using the k nearest neighbors for each test obs
survival <- t(sapply(temp, function(x) {
weighted_kaplan_meier(response = train_response[x$ix[1:k], , drop = FALSE],
weights = weighting_function(x$x[1:k]),
timepoints = timepoints)
}))
## Return a matrix with predictions for all test samples and timepoints
return(survival)
}
)
|
bb6000e316f03b230bbcd887f8d257d3f1f1100e
|
d048d1c7b18ee45baac7af24d1d63fb6f4aa94f4
|
/R/checkbehaviour.R
|
da401e78b7cce3580a4c5558745ef1533309e3b8
|
[] |
no_license
|
gobbios/socialindices
|
723eacb83b0d631e6e5c188d905edaf3e9c05511
|
511f282ef35bdbb253273f9fc0fbaed3ff66f909
|
refs/heads/master
| 2023-02-18T02:53:23.729892
| 2023-02-04T10:37:43
| 2023-02-04T10:37:43
| 96,256,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 966
|
r
|
checkbehaviour.R
|
#' data checks
#'
#' check interaction data for inconsistencies
#'
#' @param b.source data frame with behavioural data
#'
#' @return NULL
#' @export
#'
#' @examples
#' # nothing yet
checkbehaviour <- function(b.source) {
x <- b.source
temp <- varname("IDs", x)
if(length(temp)==2) {
res <- which(as.character(x[,temp[1]])==as.character(x[,temp[2]]))
if(length(res)>0) {
warning("focal and partner were identical")
res <- x[res, ]
}
}
if(length(temp)==3) {
res1 <- which(as.numeric(apply(x, 1, function(x)length(unique(c(x[temp[1]], x[temp[2]], x[temp[3]]))))) == 3)
res2 <- which(as.character(x[,temp[3]])==as.character(x[,temp[2]]))
if(length(c(res1,res2))>0) {
warning("focal was not actor or receiver; or actor and receiver were identical")
res <- x[sort(c(res1,res2)), ]
}
}
if(exists("res")) {
return(res)
} else {
return(paste(as.character(temp), "found in the data set"))
}
}
|
ba3b53ebaa1374c762430bf2a89fb74b225beacc
|
358c57a5a40607272997d8428390fb1a4b11b815
|
/R/fastvarImp.R
|
15764b7a23b6bdca9e6d43a68e404578fe73aa98
|
[] |
no_license
|
nicolas-robette/moreparty
|
6ae430db14c807b78077807b2fe6d8cc419d52b3
|
ee447af5def57b964e523e108020b40e048b0df7
|
refs/heads/master
| 2023-08-10T18:41:29.628127
| 2023-07-20T22:28:49
| 2023-07-20T22:28:49
| 214,511,417
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,795
|
r
|
fastvarImp.R
|
#' @import party measures varImp
#' @importFrom stats complete.cases
#' @export
fastvarImp = function(object, mincriterion = 0, conditional = FALSE, threshold = 0.2,
nperm = 1, OOB = TRUE, pre1.0_0 = conditional, measure = "multiclass.Brier",
parallel = TRUE, ...) {
# Some tests
measureList = measures::listAllMeasures()
if (!(measure %in% measureList[, 1]))
stop("measure should be a measure of the measures package")
# Test the Class
response = object@responses
CLASS = all(response@is_nominal | response@is_ordinal)
PROB = measureList$probabilities[measureList[,1] == measure]
MEASURECLASS = measureList$task[measureList[,1] == measure]
if (CLASS & (MEASURECLASS %in% c("regression", "multilabel")))
stop("Measure is not suitable for classification")
if (!CLASS & !(MEASURECLASS %in% "regression"))
stop("Measure is not suitable for regression")
MEASUREMINIMIZE = measureList$minimize[measureList[,1] == measure]
input = object@data@get("input")
xnames = colnames(input)
inp = initVariableFrame(input, trafo = NULL)
y = object@responses@variables[[1]]
if (length(response@variables) != 1)
stop("cannot compute variable importance measure for multivariate response")
if (conditional || pre1.0_0) {
if (!all(complete.cases(inp@variables)))
stop("cannot compute variable importance measure with missing values")
}
if (CLASS) {
if (PROB) {
error = function(x, oob, ...) {
xoob = t(sapply(x, function(x) x))[oob,]
colnames(xoob) = levels(y)
yoob = y[oob]
return(do.call(measure, list(xoob, yoob, ...)))
}
}else {
error = function(x, oob, ...) {
xoob = t(sapply(x, function(x) x))[oob,]
colnames(xoob) = levels(y)
xoob = colnames(xoob)[max.col(xoob,ties.method="first")]
yoob = y[oob]
return(do.call(measure, list(yoob, xoob, ...)))
}
}
} else {
error = function(x, oob, ...) {
xoob = unlist(x)[oob]
yoob = y[oob]
return(do.call(measure, list(xoob, yoob, ...)))
}
}
w = object@initweights
if (max(abs(w - 1)) > sqrt(.Machine$double.eps))
warning(sQuote("varImp"), " with non-unity weights might give misleading results")
foo <- function(b) {
perror = matrix(0, nrow = nperm * length(object@ensemble), ncol = length(xnames))
colnames(perror) = xnames
tree <- object@ensemble[[b]]
if (OOB) {
oob = object@weights[[b]] == 0
} else {
oob = rep(TRUE, length(xnames))
}
p = party_intern(tree, inp, mincriterion, -1L, fun = "R_predict")
eoob = error(p, oob, ...)
for (j in unique(varIDs(tree))) {
for (per in 1:nperm) {
if (conditional || pre1.0_0) {
tmp = inp
ccl = create_cond_list(conditional, threshold,
xnames[j], input)
if (is.null(ccl)) {
perm = sample(which(oob))
}
else {
perm = conditional_perm(ccl, xnames, input,
tree, oob)
}
tmp@variables[[j]][which(oob)] = tmp@variables[[j]][perm]
p = party_intern(tree, tmp, mincriterion, -1L, fun = "R_predict")
} else {
p = party_intern(tree, inp, mincriterion, as.integer(j), fun = "R_predict")
}
minSign = ifelse(MEASUREMINIMIZE, 1, -1)
perror[(per + (b - 1) * nperm), j] = minSign * (error(p,oob, ...) - eoob)
}
}
return(perror)
}
liste_perror <- plyr::alply(1:length(object@ensemble), 1, .fun=foo, .parallel=parallel, .paropts=list(.packages="party"))
all_perror <- Reduce('+',liste_perror)
all_perror = as.data.frame(all_perror)
return(MeanDecrease = colMeans(all_perror, na.rm = TRUE))
}
|
685e19b8d96bf0b9633367a604d5ef84bcbae26d
|
8b0dee9d51374e8bced0f0fd8efa8b6f0c14c9d7
|
/man/pullstrength.Rd
|
eeeb1f691f6d041f5734cbd7d7a328d602ad6b79
|
[] |
no_license
|
rwoldford/qqtest
|
4b9c595ea4c8f7e9ee6f1947e5f94e20c72be0a0
|
f3737db73bfd00e36067d394d749a7232c3f3bb9
|
refs/heads/master
| 2021-02-11T16:26:56.146877
| 2020-03-16T15:49:28
| 2020-03-16T15:49:28
| 244,509,892
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,589
|
rd
|
pullstrength.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pullstrength.R
\docType{data}
\name{pullstrength}
\alias{pullstrength}
\title{Strength of pull for 519 males aged 23-26.}
\format{A data frame with 7 rows and 4 variates:
\describe{
\item{strength}{Pull strength lower bound in pounds.}
\item{nCases}{Number of cases observed with pull strength between this bound and the next.}
\item{percentCases}{Percent of cases observed with pull strength between this bound and the next.}
\item{percentCumulative}{Cumulative percent of cases observed with pull strength up to this bound.}
\item{percentAdjustedCumulative}{Adjust Galton's cumulative percent to include only half the cases between this bound and the next.}
}}
\source{
"Natural Inheritance",
Francis Galton, (1889), Table 1, page 199.
}
\usage{
pullstrength
}
\description{
From measurements made by Francis Galton at the International Health Exhibition in 1884.
}
\details{
\code{qqtest(pullstrength$strength, p=pullstrength$percentCumulative/100, np=519, dist="uniform", main="Galton's ogive of pull strength for 519 males aged 23-26", xlab="Cumulative Proportions (Adjusted)",yAxisAsProbs=FALSE, ylab="Strength in lbs.", type="o")} will effect Galton's Ogive.
\code{qqtest(pullstrength$strength, p=pullstrength$percentAdjustedCumulative/100, np=519, dist="normal", main="Gaussian qqplot of pull strength for 519 males aged 23-26", xlab="Cumulative Proportions (Adjusted)",yAxisAsProbs=FALSE, ylab="Strength in lbs.", type="o")} will effect a normal qqplot for this data.
}
\keyword{datasets}
|
e61fa485f4ee3733506839560cc2b8ccd2158062
|
cb20852e1607f6145194199bcca82b040090edd4
|
/code/var_gamma/join_files.R
|
03b8898ed30da1fb0f5f2d83d0fe72a523e372ed
|
[
"MIT"
] |
permissive
|
yangchuhua/gtex-gwas-analysis
|
7f2f60950ae362ffd1dc8734804dfe36d230cef6
|
300aa123dd769ab175fbf7bb9b26c9e3e4439fa0
|
refs/heads/master
| 2023-07-04T16:50:41.747961
| 2021-08-06T17:35:09
| 2021-08-06T17:35:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
join_files.R
|
library(dplyr)
library(stringr)
rows <- vector(mode="list", length=0)
dir <- "var_gammas"
for (file in list.files(dir)) {
df <- read.table(file.path(dir, file), header=TRUE)
tissue <- str_match(pattern="(.*)_var_gamma.txt", string=file)[2]
df$tissue <- tissue
rows <- c(rows, list(df))
}
df <- bind_rows(rows)
write.table(df, row.names=FALSE, quote=FALSE, "kk.txt", sep="\t")
|
345332ec2b7bb1258ddfb9b51cae2369e2140bc5
|
fdab0c18eab28477d0980723c5ac5b4ba10c506f
|
/veryoldpictures/shocks/low/span2/1yr/RUNME.r
|
3d319d3f97d8bbbd717c9223a953e8bf2ae1fc32
|
[
"MIT"
] |
permissive
|
MIT-Informatics/PreservationSimulation
|
58b53595841c39e1fe00a05241be43ed0bcf6430
|
38c6641a25108022ce8f225a352f566ad007b0f3
|
refs/heads/master
| 2021-08-25T10:35:46.066554
| 2021-08-24T20:17:13
| 2021-08-24T20:17:13
| 17,369,426
| 9
| 0
|
NOASSERTION
| 2021-03-20T02:55:37
| 2014-03-03T15:03:30
|
R
|
UTF-8
|
R
| false
| false
| 143
|
r
|
RUNME.r
|
# where to run?
# setwd("C:/cygwin64/home/landau/working/PreservationSimulation/shocks/low/span2/1yr")
source("./GetShockLowData_1yr_span2.r")
|
c855dad24708277529b6554aaffaaa91b008a6f8
|
722b1b22e1c2cf99d79d23dbe1181c103236b3b2
|
/Scripts/plotmaker.R
|
b2968127e058f899042ae1e909d21c7d3a01ba71
|
[] |
no_license
|
GFJudd33/UNHRCprocedures
|
4bd15c1a2410f64cf0a08386499260965b885859
|
8af19f845c4460535476cf579fa3b3eba71825ae
|
refs/heads/master
| 2020-12-24T16:07:33.194168
| 2016-03-10T13:49:28
| 2016-03-10T13:49:28
| 40,205,597
| 0
| 0
| null | 2015-08-04T19:57:55
| 2015-08-04T19:45:42
| null |
UTF-8
|
R
| false
| false
| 1,508
|
r
|
plotmaker.R
|
################################################################################
###
### Plot generator for UNHRC engagement.
### Created: 9/23/2015
### Last Modified: 9/23/2015
### Author: B C Smith
###
### Script for user-friendly plot generation.
###
### Inputs:
### COMPARE = A vector of COW country codes for states to be used in plot
### STYEAR = First year for comparison.
### ENDYEAR = Last year for comparison.
###
###
################################################################################
require(ggplot2)
### Set WD
setwd("~/Google Drive/Research/IO_latent.engage")
load("Output/output.data.RData")
plotmaker <- function(compare,
styear,
endyear){
Year <- c(styear:endyear)
# Create time series dataset
ts <- output.data[output.data$COWid %in% compare,]
ts <- ts[ts$Year %in% Year,]
tsplot <- qplot(Year, theta, data = ts, colour = Country)
tsplot <- tsplot + geom_smooth(aes(ymin = theta80lower,
ymax = theta80upper),
data = ts,
stat = "identity")
tsplot <- tsplot + theme_bw() + ggtitle("Posterior Mean and 80% Credible Interval")
tsplot <- tsplot + ylab(expression(theta)) + xlab("Year")
return(tsplot) }
# Compare US and Canada from 2004-2008 just for kicks.
plotmaker(compare = c(2, #USA
20), #Canada
styear = 2004,
endyear = 2008
)
|
8ac3a72b228e5916194a39aa6d2094d7ad6ce859
|
1764b413b0650aa8c9c7a9382bf1f93e26df9eda
|
/benchmark/benchmark_real_kddcup_final.R
|
a27a335d89fc11f7a9f7f202fc968467aeb72e03
|
[
"MIT"
] |
permissive
|
pombredanne/bloomstream
|
f0eb3d265d2c5c742a4726edf5977dc3d07dd131
|
9195e3dea4bc7321a9dbf7a968d9c66285e38f7a
|
refs/heads/master
| 2020-12-06T10:48:36.656806
| 2019-01-08T11:50:32
| 2019-01-08T11:50:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,187
|
r
|
benchmark_real_kddcup_final.R
|
library("stream")
library("streamMOA")
library(hcstream)
source("DSC_HCStream.R")
source("benchmark_extract_utils.R")
source("benchmark_comparison_utils_final.R")
source("benchmark_algs_utils_final.R")
source("benchmark_plot_utils.R")
# KDDCUP99 NETWORK INTRUSION DETECTION DATASET
# 24 attack types + non attack
# continuous attributes: 1, 5, 6, 8:11, 13:20, 23:41
ds.props <- new.env()
ds.props$ds = "kddcup99"
ds.props$fbase = "~/Desktop/benchmarks/datasets/kddcup99/"
ds.props$dsFile = "kddcup99.data.gz"
ds.props$dataCols = c(1, 5, 6, 8:11, 13:20, 23:41)
ds.props$labelCol = 42
ds.props$k = 25
ds.props$entryNo = 4898430
ds.props$wSize = 1000
ds.props$topNo = 150 # only 16 from 150 meet minLabelNo criteria
ds.props$wPadding = 3
ds.props$minLabelNo = 3
#ds.props$measures = c("numClasses", "numMicroClusters", "numMacroClusters", "purity", "cRand", "NMI", "precision", "Rand")
#ds.props$measures = c("numMicroClusters", "numMacroClusters", "purity", "cRand", "NMI", "precision", "Rand")
ds.props$measures = c("numMicroClusters","numMacroClusters", "purity", "Rand")
#type - Use micro- or macro-clusters for evaluation. Auto used the class of dsc to decide.
#assign - Assign points to micro or macro-clusters?
# D-Stream : type="macro", assign="micro",
ds.props$algAssign <- list(
"D-Stream" = "micro",
"CluStream" = "micro",
"DenStream" = "micro",
"BloomStream" = "macro"
)
ds.props$algType <- list(
# "D-Stream" = "micro", original
"D-Stream" = "macro", # micro
"CluStream" = "macro", # micro
"DenStream" = "macro", # micro
"BloomStream" = "macro" # macro
)
ds.props$streamScale <- list(
# "D-Stream" = TRUE, original
"D-Stream" = TRUE,
"CluStream" = TRUE,
"DenStream" = TRUE,
"BloomStream" = TRUE
)
#1 scan and save most interesting windows
#saveTopWindows()
#2 create separate file with each window interval
#ds.props$top.windows = loadTopWindows()
#genTopRows()
#3 compare algorithms against each window interval
#compareAlgorithms()
#compareAlgorithm(2)
#4 plot algorithm comparison evaluation
#topIdxs = c(2,3,5,8,10) # c(1,2,3,4,5)
#topIdxs = c(1,2,3,4,5,6,7,8,9,10) # c(1,2,3,4,5)
# top 8 rand : 7,6, 46,45, 12,13, 2,13
# top 4 rand: 149, 76, 86, 55
# top purity bloom: 12, 3, 2, 10
# top rand bloom: 2, 13, 6, 14 | 13 2 1 11 |
topIdxs = c(1, 2, 4, 6, 7, 13) # 7,6, 1,7, 13,4, 13,2
#topIdxs = c(7,1, 13, 2) # 7,6, 1,7, 13,4, 13,2
plotEvaluation(allIdxs = c(1:14), topIdxs = topIdxs, measure = "Rand")
# "purity", "cRand", "NMI", "precision", "Rand")
#"numClasses", "numMicroClusters", "numMacroClusters", NMI
# "classNo","winNo","labels"
# 7,"4000","1-26, 2-21, 3-185, 4-536, 5-77, 6-138, 7-17"
# 7,"5000","1-26, 2-25, 3-229, 4-287, 5-31, 6-341, 7-61"
# 7,"6000","1-142, 2-123, 3-140, 4-84, 5-185, 6-246, 7-80"
# 7,"12000","1-27, 2-40, 3-188, 4-427, 5-145, 6-146, 7-27"
# 7,"13000","1-95, 2-74, 3-178, 4-113, 5-143, 6-245, 7-152"
# 7,"233000","1-220, 2-523, 3-99, 4-2, 5-20, 6-74, 7-62"
# 7,"234000","1-253, 2-490, 3-90, 4-3, 5-22, 6-72, 7-70"
# 7,"235000","1-290, 2-453, 3-60, 4-3, 5-22, 6-94, 7-78"
# 7,"237000","1-204, 2-494, 3-85, 4-1, 5-44, 6-168, 7-4"
# 7,"248000","1-62, 2-519, 3-271, 4-1, 5-39, 6-94, 7-14"
|
0ac0c8f717066a11dc0b3259c8ed436fe4a526b9
|
0fb039e61656457c70a7466e73f1324ce898013c
|
/pdf_text.R
|
c157c67c75f0fdbf68d69b10de1504a04c8a7a8d
|
[] |
no_license
|
vijayrajmeena/pdf-text-via-tiff
|
3636c89f290a43fe65e35390bd9f468565cf23d2
|
12d4ce326a805a7a63613b741e07899f48a45a50
|
refs/heads/master
| 2021-01-01T18:51:45.564339
| 2017-07-26T18:20:44
| 2017-07-26T18:20:44
| 98,452,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 791
|
r
|
pdf_text.R
|
library(pdftools)
library(tiff)
library(tesseract)
dest <- "C:/Users/XXXX/YYY/ZZZ"
# make a vector of PDF file names
myfiles <- list.files(path = dest, pattern = ".pdf", full.names = TRUE)
sapply(myfiles, FUN = function(i){
file.rename(from = i, to = paste0(dirname(i), "\\", gsub(" ", "", basename(i))))
})
lapply(myfiles, function(i){
for(j in 1:pdf_info(i)$pages){
bitmap <- pdf_render_page(i, dpi = 300, numeric = TRUE,page = j)
tiff::writeTIFF(bitmap,paste0(strsplit(i,"[.]")[[c(1,1)]], j , ".tiff", collapse = "_"))
}
})
myfilestiff <- list.files(path = dest, pattern = ".tiff", full.names = TRUE)
for(k in 1:(length(myfilestiff))){
out <- ocr(myfilestiff[[k]])
write.csv(out, paste0(strsplit(myfilestiff[[k]],"[.]")[[c(1,1)]],".txt"), row.names = FALSE)
}
|
9720a108b0f30b2d89108f7fa5b6b9b0f893461f
|
c2f1df344efb95a137b6449a9ec7221d654afcd8
|
/iris.r
|
d164d244c3b571b48411732660ad007b29b600ec
|
[] |
no_license
|
gtg7784/R_svm
|
091b629fe5a9644c9e6e5e2abe516bfa1a0a3037
|
964a75d5ce1cf6d43152374e2a3c6708457c3156
|
refs/heads/master
| 2022-06-25T12:51:49.944035
| 2020-05-02T17:18:43
| 2020-05-02T17:18:43
| 253,235,690
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,539
|
r
|
iris.r
|
# SVM : Iris Prediction
install.packages("e1071")
library(e1071)
set.seed(123)
x <- matrix(rnorm(20*2),ncol=2) # Normal distribution
y <- c(rep(-1,10),rep(1,10)) # Scatter
x[y==1, ] <-x[y==1, ] +1
plot(x, col=(3-y)) #
dataset <- data.frame(x=x,y=as.factor(y)) # Binary Classification as.factor(y)
str(dataset)
svm_fit <- svm(y ~ ., data=dataset, kernel='linear',cost=10 , scale = F) # cost Margin for learning The higher the cost, the wider the margin
svm_fit
plot(svm_fit, dataset)
attributes(svm_fit)
svm_fit$index # Check Vector
summary(svm_fit)
# Changing Cost
svm_fit <- svm(y ~ ., data=dataset, kernel='linear',cost=0.1 , scale = F) # cost 0.1
plot(svm_fit,dataset)
svm_fit$index # result: 14
# finding best cost - cross validation tune()
set.seed(123) # making Random Number
tune.out <-tune(svm, y ~., data = dataset, kernel='linear', range=list(cost=c(0.001,0.01,0.1,1.5,10,50)))
summary(tune.out) # best cost = 1.5
bestmod <-tune.out$best.model
bestmod
summary(bestmod)
# Prediction
xtest <- matrix(rnorm(20 * 2),ncol=2) # Sampling
ytest <- sample(c(-1,1),20,rep=T) # Sampling
xtest
ytest
xtest[ytest==1, ] <-xtest[ytest==1, ] +1
testda <- data.frame(x=xtest, y=as.factor(ytest))
testda
ypred <- predict(bestmod,testda)
table(예측값=ypred, 실제값=testda$y)
(5+10)/nrow(testda)
# Iris Dataset Prediction
model <- svm(Species ~., data=iris) #Species을 기준으로 분류
model # Support Vectors 만듦
pred <- predict(model, iris[,-5])
pred
table(pred,iris$Species) #교차분할표
(50 + 48 + 48) / nrow(iris)
|
0e814c624b3b4271842a32c1256e05773f441593
|
717ba891c2624890e482e48abcde055ed87a830c
|
/variantComparisons.R
|
aeea9c5d673f3e6fff1592909865bc75015bd86e
|
[] |
no_license
|
yob1997/testing_repo
|
14713c0c562c1ee803d5581a60e0d796e3b68d96
|
1f47f0d7dc51b12581466380ae382cce09b01b2d
|
refs/heads/master
| 2020-11-25T20:03:16.150406
| 2019-12-18T13:31:25
| 2019-12-18T13:31:25
| 228,823,648
| 0
| 0
| null | 2019-12-18T13:31:27
| 2019-12-18T11:14:28
|
Shell
|
UTF-8
|
R
| false
| false
| 9,301
|
r
|
variantComparisons.R
|
# - Comparison of all samples against the T=0h sample;
# o make a table which lists the number of differential variants found for each of those samples,
# and also include the corresponding experimental metadata (experiment type like DCTK, SCTK; time point of sample; replicate ID, strain ID)
# o an R list object for each sample ID, containing another list with:
# - the specific variant identifiers;
# - the genes IDs to which the variants are related (if known
#
# https://pfam.xfam.org/search#tabview=tab1
# https://www.ebi.ac.uk/interpro/
## LOAD PACKAGES #########################################################################################################################
pkgs <- c("vcfR", "adegenet", "adegraphics", "pegas", "StAMPP", "lattice", "gplots", "ape", "ggmap", "ggplot2", "reshape","readxl")
pkgs <- c("vcfR", "readxl","writexl","tidyr","stringr","readr")
for (pkg in pkgs){
if (!pkg %in% installed.packages()) {
install.packages(pkg, dependencies = TRUE)
}
}
lapply(pkgs, library, character.only = TRUE)
remove (pkgs,pkg)
## READ DATA #########################################################################################################################
# meta data of samples
meta <- read_excel("Sample overview_seq.xlsx")
meta <- as.data.frame(meta)
meta$varCountRaw <- 0
meta$varCountFilt <- 0
strainL <- list()
strainDF <- data.frame()
hyponames <- c("KPN700603" = "NPBKJIEB",
"KPN9884" = "OLPKOKPN",
"KPN9749" = "AGIDONFK")
## loop through samples
for (strain in c("KPN9884", "KPN9749", "KPN700603")){
print (strain)
strain <- "KPN9749"
# read vcf file with vcfR
vcf <- read.vcfR(paste0("genome_data/toolOut/GATK_pipe_output/",strain,"/annotated.vcf.gz"))
## cleaning ####
# remove STK_52 & FSTK_120, sample introduces problems.
if (strain == "KPN9884"){vcf <- vcf[,head(colnames(vcf@gt),-1)]}
if (strain == "KPN9749"){vcf <- vcf[,colnames(subset(vcf@gt,select=-c(FSTK_120)))]}
# add varcount per sample to meta
gt <- extract.gt(vcf, element = "GT",as.numeric = T)
for (col in colnames(gt)){
meta[which(meta$`new lable`==col),]$varCountRaw <- sum(gt[,col],na.rm = T)
}
# get all NG c0 samples and put their genotypes in a dataframe.
NGsamples <- meta[which(meta$replicate=="NG" & meta$strain==strain & meta$CONC=="c0"),]$`new lable`
NGvariations <- data.frame(vcf@gt[,c(colnames(vcf@gt)%in%NGsamples)])
# loop though the columns of the NG dataframe, check if the NG samples have the variation (TRUE/FALSE)
for (column in NGsamples){
NGvariations[,column] <- startsWith(vcf@gt[,column], "1:")
}
# only keep the variations that don't occur in NG c0 samples
keepvar <- sapply(1:nrow(NGvariations),function(x) all(NGvariations[x,]==F))
# apply keep to vcfR object
vcf1 <- vcf[keepvar,]
# remove variants where all samples have the same as the reference (artifact of removed problematic samples)
filtempty <- data.frame(vcf1@gt)
for (column in colnames(vcf1@gt[,-1])){
filtempty[,column] <- startsWith(vcf1@gt[,column],"0:") | startsWith(vcf1@gt[,column],".:")
}
# get variants that can cause colistin resistance
keepvar <- sapply(1:nrow(filtempty),function(x) any(filtempty[x,-1]==F))
# apply keep to vcfR object
vcf2 <- vcf1[keepvar,]
## potentional variants that cause colR ####
gt <- extract.gt(vcf1, element = "GT",as.numeric = T)
for (col in colnames(gt)){
meta[which(meta$`new lable`==col),]$varCountFilt <- sum(gt[,col],na.rm = T)
}
# create dataframe for all possible colR variants in the strain
if (strain == "KPN9884"){strainR <- "KPN9596-R"}
if (strain == "KPN9749"){strainR <- "KPN9497-R"}
sampleDF <- data.frame()
for (sample in colnames(vcf2@gt[,-1])){
# get vcf for the sample, select it's variations and get the annotation for the variations
tempvcf <- vcf2[,c("FORMAT", sample)]
pick <- startsWith(tempvcf@gt[,sample],"1:")
if (length(which(pick))==0){next()}
info <- extract.info(tempvcf, element = "ANN",as.numeric = F)
# create a dataframe for all variations of the sample & add info
variations <- data.frame("sample"=sample, "POS" = tempvcf@fix[pick,c("POS")], "REF"=tempvcf@fix[pick,c("REF")], "ALT"=tempvcf@fix[pick,c("ALT")] )
variations$info <- info[pick]
# bind the variationsDF to the sampleDF, creating one DF with all variants for the strain
sampleDF <- rbind(sampleDF, variations)
rownames(sampleDF) <- NULL
}
sampleDF <- merge(meta[,c("strain","replicate","Time point","CONC","new lable")], sampleDF, by.y="sample", by.x="new lable")
# split sampleDF$info into multiple columns save into strainDF
annotationCols <- c("ALT.info","annotation","putative_impact","gene_name","gene_ID","feature_type","feature_ID",
"Transcript_biotype","total","HGVS.c","HGVS.p","cDNA_position","CDS_position","Protein_position")
strainDF <- data.frame(separate(sampleDF, info, into=annotationCols ,sep="\\|"))
strainDF <- strainDF[!duplicated(strainDF),]
strainDF$ALT.info <- NULL
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@testing@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
strainDF$gene_name
ID <- "_00001"
gff_line <- system(paste0("cat genome_data/toolOut/prokka/",strain,"/",strain,".gff | egrep ",ID),intern = T)
product <- sub("^.*;product=", "", gff_line)
inference <- sub(";.*","",sub("^.*;inference=", "", gff_line))
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@testing@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# add dataframe of strain to the list of dataframes
strainL[[strain]] <- strainDF
## create FASTA of hypothetical proteins ####
# get list of sequences for the hypothetical proteins
hypo_prots <- strainDF[contains(hyponames[strain],vars = strainDF$gene_name),c("gene_name","gene_ID", "new.lable")]
hypo_prots <- unique(hypo_prots$gene_ID)
# read Nucleotide FASTA file of all the prediction transcripts (CDS, rRNA, tRNA, tmRNA, misc_RNA)
sequences <- readDNAStringSet(paste0("genome_data/toolOut/prokka/",strain,"/",strain,".ffn"))
# set file save name
file_s <- "genome_data/toolOut/variantComparison_output/hypothetical_proteins.fa"
for (protein in hypo_prots){
if (grepl("-", protein)){ # two gene names
write.table(paste0("## ~~>",protein,"|",strain), file = file_s, append = T,row.names = F, quote = F,col.names = F) #write protein names to .fa file
protein_u <- unlist(unique(strsplit(protein, "-")))
for (i in protein_u){
write.table(paste0(">",i,"|",strain), file = file_s, append = T, quote = F, row.names = F,col.names = F)
prot_seq <- sequences[grep(i, names(sequences))]
write.table(prot_seq, file = file_s, append = T, quote = F, row.names = F,col.names = F)
}
write.table(paste0("## <~~",protein,"|",strain), file = file_s, append = T,row.names = F, quote = F,col.names = F)
}else{ # one gene name
write.table(paste0(">",protein,"|",strain), file = file_s, append = T,row.names = F, quote = F,col.names = F) #write protein names to .fa file
prot_seq <- sequences[grep(protein, names(sequences))]
write.table(prot_seq, file = file_s, append = T, quote = F, row.names = F,col.names = F)
}
}
}
# write variation dataframes to xlsx file
write_xlsx(strainL, "genome_data/toolOut/variantComparison_output/snpList.xlsx")
# get important columns and write to csv file
meta_save <- na.omit(meta[,c("strain", "experiment", "Time point", "CONC", "replicate", "New ID", "new lable","varCountRaw", "varCountFilt")])
write.csv(meta_save, file = "genome_data/toolOut/variantComparison_output/numberOfSNPs.csv")
quit()
#https://cran.rstudio.com/web/packages/g3viz/vignettes/introduction.html#31_example_1:_visualize_genetic_mutation_data_from_maf_file
# create plots ###EXPERIMENTAL
#vcfR
gff <- read.delim(paste0("genome_data/toolOut/prokka/",strain,"/",strain,".gff"), header = F, comment.char = "#")
gff.genes <- gff[gff[,3]=="gene",]
dna <- ape::read.dna(gzfile("genome_data/toolOut/consensus/DTK_35_consensus.fa"), format = "fasta")
mat.dna <- as.matrix(dna)
chrom <- create.chromR(vcf=vcf1, ann = gff.genes, seq = mat.dna)
gt <- extract.gt(vcf1, element = "GT",as.numeric = T)
barplot(apply(gt, MARGIN=2, count, na.rm=TRUE),las = 3, main = strain)
plot(chrom)
chrom <- proc.chromR(chrom, verbose = T)
chromoqc(chrom, dp.alpha = 22)
# GenVisR
library(GenVisR)
waterfall(vcf1)
# variantannotation
library(VariantAnnotation)
}
##edit gewerkte uren
library(xlsx)
library(dplyr)
uren <- read_xlsx("gewerkte uren_n.xlsx") %>%
mutate(
datum = as.Date(as.numeric(datum), origin = "1899-12-30"),
begin = format(begin, "%H:%M"), # “That’s what I do: I drink and I know things.”
eind = format(eind, "%H:%M"),
totaal = format(totaal, "%H:%M"),
`totaal week` = format(`totaal week`, "%H:%M")
)
as.Date(uren$begin, origin = "1/1/1990 00:00")
uren[68,c("begin","eind","totaal")] <- c("11:00","14:00","3:00")
uren[67,c("begin","eind","totaal")] <- c("10:30","17:30","7:00")
write_xlsx(uren, "gewerkte uren_n.xlsx")
|
1782cdf5342a33435de2a57852c430619973b1f1
|
590bcfd7d763bcde72a2dd7b114c72d830138af1
|
/finalvisualizations/shinyanimation.R
|
85ce473e697295934639ff68623d38ee70267a24
|
[] |
no_license
|
stelmacm/budwormvisualizationStats744
|
d76a8edea81e8956902e8c32fc9449faed077463
|
a19d72fc4a30d670c592bb3c9d7e47832306796b
|
refs/heads/master
| 2020-09-10T17:18:17.409876
| 2020-01-02T03:36:53
| 2020-01-02T03:36:53
| 221,774,988
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,538
|
r
|
shinyanimation.R
|
library(plotly)
library(ggplot2)
library(tidyverse)
library(gapminder)
library(gganimate)
library(shiny)
library(grid)
library(colorspace)
#Set up palette and import and data and factor
cbPalette <- rev(sequential_hcl(7, 'BurgYl'))
newdata <- readr::read_csv("https://raw.githubusercontent.com/stelmacm/budwormvisualizationStats744/master/datasets/ttm_tall2.csv")
newdata$prov <-sapply(newdata$prov, toupper)
newdata$prov <- factor(newdata$prov)
newdata$temp <- factor(newdata$temp)
newdata$individual <- factor(newdata$individual)
newdata$stage <- factor(newdata$stage)
#Create the Shiny App
ui <- fluidPage(
titlePanel("Budworm Evolution over Time"),
sidebarLayout(
sidebarPanel(
selectInput("prov", "Colony:",
c("Ontario" = "ON",
"North West Territories" = "NWT",
"New Brunswick" = "NB",
"Quebec" = "QC")),
br(),
selectInput("temp", "Temperature:",
c("5" = "5",
"10" = "10",
"15" = "15",
"20" = "20",
"25" = "25",
"30" = "30",
"35" = "35"
)),
br(),
selectInput("temp1", "Temperature:",
c("5" = "5",
"10" = "10",
"15" = "15",
"20" = "20",
"25" = "25",
"30" = "30",
"35" = "35"
), selected = "10")
),
mainPanel(
imageOutput("budwormplot")
),
)
)
#Shiny Server
server <- function(input, output){
#segment the data and have the subsets be reactive
target_data <- reactive({
a <- subset(newdata, newdata$prov %in% input$prov)
a <- droplevels(a)
r <- subset(a, a$temp %in% input$temp)
r$labs <- "df1"
b <- subset(a, a$temp %in% input$temp1)
b$labs <- "df2"
d <- rbind(r,b)
d <- droplevels(d)
return(d)
})
# Reactive expression to create data frame of all input values
output$budwormplot <- renderImage({
#create the horizontal line
outfile <- tempfile('tempe.gif')
df <- target_data()
d1 <- (df
%>% filter(labs=="df2" & (temp == input$temp1))
%>% pull(individual)
%>% as.numeric()
%>% max()
)
#Create the graphic
tempe <- ggplot(df, aes(progress, individual, group = individual, shape = temp)) +
geom_point(size = 2, aes(color = stage), stroke = 2) +
scale_color_manual(values = cbPalette)+
scale_shape_manual(values = c(1,16)) +
transition_reveal(day) +
coord_cartesian(clip = 'off') +
theme_classic() +
theme(plot.margin = margin(5.5, 40, 5.5, 5.5),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
text = element_text(size=15)) +
labs(colour = "Stage", x = "Larval Stage", y = "Budworm",
title = "Development of Budworm Larvae Through Time", shape = "Temperature in °")+
scale_x_continuous(breaks = 0:5, labels = c("L2", "L3","L4","L5","L6","Pupa")) +
geom_hline(yintercept = d1, linetype = "dashed")
#save the animation and plot it
anim_save("tempe.gif", tempe)
list(src = "tempe.gif",
contentType = 'gif',
width = 600,
height = 600,
alt = "This is alternate text"
)}, deleteFile = FALSE)
}
# Create Shiny app ----
shinyApp(ui, server)
|
878f154c1914ed03e20c675a6767ca64dde54c6e
|
6c584706e6eab645e11357bde8f393013c69e4c9
|
/Linguagem de Programação Estatística/Aula 1/exercicio1.r
|
bed4eb834b7b792660cbba02657dd97b77d154c2
|
[] |
no_license
|
charlesartbr/fiap-mba-big-data-data-science
|
cce1b64c301187a049cd9929d5fafd7e6985503e
|
de4d8372a7ce26ac8e4556925416e5c9e1932020
|
refs/heads/master
| 2022-09-05T00:33:21.367281
| 2022-08-09T14:01:28
| 2022-08-09T14:01:28
| 185,289,505
| 0
| 1
| null | 2021-01-15T16:47:00
| 2019-05-07T00:10:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 467
|
r
|
exercicio1.r
|
# Execício 1
aa <- ((sqrt(16) / 2) * 3 ^ 2) / 2 * (9 - 2 ^ 3)
aa
# Execício 2
bb <- -(-2 ^ 3) + ((-1) ^ 0) - sqrt(25 - 3 ^ 2) - (5 ^ 3) / 25
bb
# Execício 3
aa > bb
# Execício 4
a = c(1, 2, 3)
# Execício 5
a + 1
# Execício 6
a / 2
# Execício 7
a * 2
# Execício 8
b <- c(4, 5, 6)
# Execício 9
a + b
# Execício 10
a - b
# Execício 11
c <- c(1, 3, 5)
c == a
# Execício 12
a < c
# Execício 13
sqrt(a + b)
|
60d114a7747d5362f07cbd94a1d0c01b845cb549
|
b2180e2c3ceb047c1103f042fff2af2d54146815
|
/man/SAENET.predict.Rd
|
dfc1cc906dc271e8b5c6c8b7d023bd3edcc2d1f2
|
[] |
no_license
|
cran/SAENET
|
323c57888760b6f91731fab0d0ea69cfab2634e5
|
f37c03c673dceea8052dcea5e961fa768974a966
|
refs/heads/master
| 2020-05-07T12:39:06.840609
| 2015-06-04T00:00:00
| 2015-06-04T00:00:00
| 30,255,903
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,813
|
rd
|
SAENET.predict.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/SAENET.R
\name{SAENET.predict}
\alias{SAENET.predict}
\title{Obtain the compressed representation of new data for specified layers from a stacked autoencoder.}
\usage{
SAENET.predict(h, new.data, layers = c(1), all.layers = FALSE)
}
\arguments{
\item{h}{The object returned from \code{SAENET.train()}}
\item{new.data}{A matrix of training data.}
\item{layers}{A numeric vector indicating which layers of the stacked autoencoder to return output for}
\item{all.layers}{A boolean value indicating whether to override \code{layers} and return the encoded output for all layers. Defaults to \code{FALSE}}
}
\value{
A list, for which each element corresponds to the output of \code{predict.autoencoder()} from package \code{autoencoder} for the specified layers of the stacked autoencoder.
}
\description{
Obtain the compressed representation of new data for specified layers from a stacked autoencoder.
}
\examples{
library(autoencoder)
data(iris)
#### Train a stacked sparse autoencoder with a (5,3) architecture and
#### a relatively minor sparsity penalty. Try experimenting with the
#### lambda and beta parameters if you haven't worked with sparse
#### autoencoders before - it's worth inspecting the final layer
#### to ensure that output activations haven't simply converged to the value of
#### rho that you gave (which is the desired activation level on average).
#### If the lambda/beta parameters are set high, this is likely to happen.
output <- SAENET.train(as.matrix(iris[1:100,1:4]), n.nodes = c(5,3),
lambda = 1e-5, beta = 1e-5, rho = 0.01, epsilon = 0.01)
predict.out <- SAENET.predict(output, as.matrix(iris[101:150,1:4]), layers = c(2))
}
|
07bf80bd59a269fea90faa81d55b09424156b3b5
|
f81ac43a1d02013a9cb9eebc2a7d92da4cae9169
|
/R/values2labels.R
|
a5cc2d316e03ee94109b6cf2d0f0926f65c8c13a
|
[] |
no_license
|
gdemin/expss
|
67d7df59bd4dad2287f49403741840598e01f4a6
|
668d7bace676b555cb34d5e0d633fad516c0f19b
|
refs/heads/master
| 2023-08-31T03:27:40.220828
| 2023-07-16T21:41:53
| 2023-07-16T21:41:53
| 31,271,628
| 83
| 15
| null | 2022-11-02T18:53:17
| 2015-02-24T17:16:42
|
R
|
UTF-8
|
R
| false
| false
| 1,576
|
r
|
values2labels.R
|
#' Replace vector/matrix/data.frame/list values with corresponding value labels.
#'
#' \code{values2labels} replaces vector/matrix/data.frame/list values with
#' corresponding value labels. If there are no labels for some values they are
#' converted to characters in most cases. If there are no labels at all for
#' variable it remains unchanged. \code{v2l} is just shortcut to \code{values2labels}.
#'
#' @param x vector/matrix/data.frame/list
#' @return Object of the same form as x but with value labels instead of values.
#'
#' @seealso \link{names2labels}, \link{val_lab}, \link{var_lab}
#' @examples
#' data(mtcars)
#' var_lab(mtcars$mpg) = NULL
#' val_lab(mtcars$am) = c(" automatic" = 0, " manual" = 1)
#'
#' summary(lm(mpg ~ ., data = values2labels(mtcars[,c("mpg","am")])))
#' @export
values2labels = function(x){
UseMethod("values2labels")
}
#' @export
values2labels.default = function(x){
vallab = val_lab(x)
if(is.null(vallab)) return(x)
res = names(vallab)[match(x,vallab,incomparables = NA)]
res_na = is.na(res)
if(any(res_na)) res[res_na] = x[res_na]
var_lab(res) = var_lab(x)
res
}
#' @export
values2labels.matrix = function(x){
res = values2labels.default(x)
res = matrix(res, nrow = nrow(x), ncol = ncol(x))
res
}
#' @export
values2labels.data.frame = function(x){
values2labels.list(x)
}
#' @export
values2labels.list = function(x){
for (each in seq_along(x)){
x[[each]] = values2labels(x[[each]])
}
x
}
#' @export
#' @rdname values2labels
v2l = values2labels
|
54bbbcc0eec5a7864f915d673b89810f03fdfde9
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/gutenbergr/tests/testthat/test-metadata.R
|
946909b186565754919d6de86d0f967bcb3e077b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
r
|
test-metadata.R
|
context("Gutenberg metadata")
test_that("gutenberg_works does appropriate filtering by default", {
w <- gutenberg_works()
expect_true(all(w$language == "en"))
expect_false(any(grepl("Copyright", w$rights)))
expect_gt(nrow(w), 40000)
})
test_that("gutenberg_works takes filtering conditions", {
w2 <- gutenberg_works(author == "Shakespeare, William")
expect_gt(nrow(w2), 30)
expect_true(all(w2$author == "Shakespeare, William"))
})
test_that("gutenberg_works does appropriate filtering by language", {
w_de <- gutenberg_works(languages = "de")
expect_true(all(w_de$language == "de"))
w_lang <- gutenberg_works(languages = NULL)
expect_gt(length(unique(w_lang$language)), 50)
})
|
168c3272de3e2d2dfe1393d2b032bf1b8bf29eed
|
e953c138d3808d92fcc9848824985be5bc42f034
|
/r/ifnotnull/2.r
|
bcb42da5ccf4c5ed557f13e04534f5cdf941fc4f
|
[] |
no_license
|
hotoku/samples
|
1cf3f7006ae8ba9bae3a52113cdce6d1e1d32c5a
|
ce0d95d87e08386d9eb83d7983bd2eaff0682793
|
refs/heads/main
| 2023-08-09T09:05:15.185012
| 2023-08-04T09:29:06
| 2023-08-04T09:29:06
| 222,609,036
| 0
| 0
| null | 2022-03-30T01:44:03
| 2019-11-19T04:35:27
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 810
|
r
|
2.r
|
if_not_null <- function(val, default=NULL){
if(!is.null(val)){
val
} else {
default
}
}
is.rstudio <- function(){
commandArgs()[1] == "RStudio"
}
mystop <- function(msg){
stop(msg)
}
if_not_null(1) %>%
if_not_null(l1$val) # => 1
if_not_null(1) %>%
if_not_null(stop("val is not set")) # => 1
if_not_null(NULL) %>%
if_not_null(10) # => 10
# if_not_null(NULL) %>%
# if_not_null(stop("val is not set")) # error
if_not_null(1) %>%
if_not_null(if(is.rstudio()) 10 else stop("val is not set")) # => 1
if_not_null(1) %>%
if_not_null(if(!is.rstudio()) 10 else stop("val is not set")) # => 1
if_not_null(NULL) %>%
if_not_null(if(is.rstudio()) 10 else stop("val is not set")) # => 10
if_not_null(NULL) %>%
if_not_null(if(!is.rstudio()) 10 else stop("val is not set")) # error
|
ea57475281e5c6826685ab10f909be268d3b64e5
|
20c8677824f667184fb4c3c1eb83ed97fe205121
|
/Plot1.R
|
1c56752f6f74a83ee66bbc64afff2ba5b6b87211
|
[] |
no_license
|
acsabarreto/ExData_Plotting1
|
22cba7a0a6d4fd3add8ced73eecff09f2908882b
|
92f4ac451c00a6dee64f58b1f4437626257d2786
|
refs/heads/master
| 2020-12-30T14:01:15.221203
| 2017-05-15T00:30:05
| 2017-05-15T00:30:05
| 91,279,853
| 0
| 0
| null | 2017-05-15T00:22:35
| 2017-05-15T00:22:35
| null |
UTF-8
|
R
| false
| false
| 470
|
r
|
Plot1.R
|
holding <- read.table("household_power_consumption.txt", header = TRUE, skip = 66636, nrows = 2881, sep = ";")
colnames(holding) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
holding$Date <- strptime(holding$Date, "%d/%m/%Y")
png("plot1.png")
hist(holding$Global_active_power, col = "red", main = "Global Active Power",xlab = "Global Actrive Power(Kilowatts)")
dev.off()
|
8eb03768706c6e2d674de455637a5d50181c3994
|
1d7b8d97be6d3b3aed26bc19ea6855bbdb2d21bc
|
/man/estimate-Diffusion-method.Rd
|
e050bcda9f4e97eb8466c34bb4e79acbad280284
|
[] |
no_license
|
cran/BaPreStoPro
|
18633df8e18b518225e7c01147473684d9369a46
|
f3e8f06b07ec4b4ca0be9de3d481734d5e154c31
|
refs/heads/master
| 2021-01-19T01:11:31.691784
| 2016-06-07T14:28:11
| 2016-06-07T14:28:11
| 60,611,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,464
|
rd
|
estimate-Diffusion-method.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/estimate.R
\docType{methods}
\name{estimate,Diffusion-method}
\alias{estimate,Diffusion-method}
\title{Estimation for diffusion process}
\usage{
\S4method{estimate}{Diffusion}(model.class, t, data, nMCMC, propSd,
adapt = TRUE, proposal = c("normal", "lognormal"))
}
\arguments{
\item{model.class}{class of the diffusion process model including all required information, see \code{\link{Diffusion-class}}}
\item{t}{vector of time points}
\item{data}{vector of observation variables}
\item{nMCMC}{length of Markov chain}
\item{propSd}{vector of proposal variances for \eqn{\phi}}
\item{adapt}{if TRUE (default), proposal variance is adapted}
\item{proposal}{proposal density: "normal" (default) or "lognormal" (for positive parameters)}
}
\description{
Bayesian estimation of the parameters \eqn{\phi} and \eqn{\gamma^2} of the stochastic process
\eqn{dY_t = b(\phi,t,Y_t)dt + \gamma \widetilde{s}(t,Y_t)dW_t}.
}
\examples{
model <- set.to.class("Diffusion", parameter = list(phi = 0.5, gamma2 = 0.01))
t <- seq(0, 1, by = 0.01)
data <- simulate(model, t = t, y0 = 0.5, plot.series = TRUE)
est_diff <- estimate(model, t, data, 1000)
plot(est_diff)
}
\references{
Hermann, S., K. Ickstadt and C. H. Mueller (2016).
Bayesian Prediction of Crack Growth Based on a Hierarchical Diffusion Model.
Applied Stochastic Models in Business and Industry, DOI: 10.1002/asmb.2175.
}
|
ea8446bc7670a4a23d4f738dd2c4652397600535
|
c3d47a7fd6b256fa25d7a801d6403ed098436e27
|
/cachematrix.R
|
e3919db8e571444a25ec74e233f9933fa9fda909
|
[] |
no_license
|
jiyanez/ProgrammingAssignment2
|
5946c92c93bf3597f089e6a2e457f391e9db65fd
|
d93411a74a61f01a4b5f9a59d27b147d7df4686e
|
refs/heads/master
| 2022-11-15T08:51:39.200122
| 2020-07-06T11:05:34
| 2020-07-06T11:05:34
| 277,514,307
| 0
| 0
| null | 2020-07-06T10:41:11
| 2020-07-06T10:41:10
| null |
UTF-8
|
R
| false
| false
| 1,294
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix sets a list of 4 functions. The first one sets the matrix,
## the second one recovers the input matrix, the third one caches the inverse,
## and the last one recovers the cached inverse
## Note that if you give a matrix as the argument, you don't need to use the
## set function
makeCacheMatrix <- function(x = matrix()) {
## The variable inv will cache the inverse. It's declared NULL when we
## computed the inverse.
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve finds the inverse of a matrix. If the inverse has already been
## computed, the functions returns the cached inverse
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
input_matrix <- x$get()
inv <- solve(input_matrix, ...)
x$setinv(inv)
inv
}
|
585592576f56b64d47bd112fdf9dfff40558397e
|
e754c3be8ed820b03964fcd0d9717315c3451061
|
/man/performance_distribution.Rd
|
d124c3ed67b198d26c8507fc53c86c47034936e8
|
[] |
no_license
|
lejarx/Rnumerai
|
73b6791a1cc0ab0655c2fd9c97f3683d4e12f291
|
9bb22342c4ceb1c3b47a311b91967e6062f58bc1
|
refs/heads/master
| 2022-04-20T12:35:04.516834
| 2020-04-17T19:12:49
| 2020-04-17T19:12:49
| 258,499,341
| 1
| 0
| null | 2020-04-24T11:56:29
| 2020-04-24T11:56:28
| null |
UTF-8
|
R
| false
| true
| 635
|
rd
|
performance_distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/source.R
\name{performance_distribution}
\alias{performance_distribution}
\title{Get the performance of the user as a distribution}
\usage{
performance_distribution(
username,
metric,
merge = FALSE,
round_aggregate = TRUE
)
}
\arguments{
\item{username}{A vector of one or more usernames}
\item{metric}{A statistic, as a character vector.}
\item{merge}{If TRUE, combine the usernames into a single result}
\item{round_aggregate}{If TRUE, aggregate the submission data by round}
}
\description{
Get the performance of the user as a distribution
}
|
74e9fa70b75f989e3ee9f65465a556481c29db0f
|
16db8326cd3a6ef625f82e21719c5a3b01b4f0bd
|
/project one week 2/project one/Population, Samples, and Estimates Exercises.R
|
c72f48b0c0db4134b8aa24685986adff3b48cc3d
|
[] |
no_license
|
diegomali/Statistics-and-R
|
8d27091b52bf0b0743d3c4b5daa2938bdf392d2b
|
86c8b00baf6434a277a8a9daf481cceba315bd72
|
refs/heads/master
| 2020-06-22T13:06:58.481172
| 2016-11-30T20:06:07
| 2016-11-30T20:06:07
| 74,591,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,981
|
r
|
Population, Samples, and Estimates Exercises.R
|
library(downloader)
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/mice_pheno.csv"
filename <- basename(url)
download(url, destfile=filename)
dat <- read.csv(filename)
##We will remove the lines that contain missing values:
dat <- na.omit( dat )
library(dplyr)
controlMales <- filter(dat, Sex=="M") %>% filter(Diet=="chow") %>% select(Bodyweight) %>% unlist
#Use dplyr to create a vector x with the body weight of all males on the control (chow) diet. What is this population's average?
x <-mean(controlMales)
#Now use the rafalib package and use the popsd function to compute the population standard deviation.
library(rafalib)
popsd(controlMales)
#Set the seed at 1. Take a random sample X of size 25 from x. What is the sample average?
set.seed(1)
XOb <- mean(sample(controlMales, 25))
#Use dplyr to create a vector y with the body weight of all males on the high fat hf) diet. What is this population's average?
controlMalesHF <- filter(dat, Sex=="M") %>% filter(Diet=="hf") %>% select(Bodyweight) %>% unlist
y <- mean(controlMalesHF)
#Now use the rafalib package and use the popsd function to compute the population standard deviation.
popsd(controlMalesHF)
#Set the seed at 1. Take a random sample Y of size 25 from y. What is the sample average?
set.seed(1)
YOb <- mean(sample(controlMalesHF, 25))
#What is the difference in absolute value between y¯−x¯ and Y¯−X¯?
(y - x) - (YOb - XOb)
#Repeat the above for females. Make sure to set the seed to 1 before each sample call. What is the difference in absolute value between y¯−x¯ and X¯−Y¯?
controlFemales <- filter(dat, Sex=="F" & Diet=="chow") %>% select(Bodyweight) %>% unlist
controlFemalesHF <- filter(dat, Sex=="F" & Diet=="hf") %>% select(Bodyweight) %>% unlist
x <- mean(controlFemales)
y <- mean(controlFemalesHF)
set.seed(1)
XOb <- mean(sample(controlFemales, 25))
set.seed(1)
YOb <- mean(sample(controlFemalesHF, 25))
abs((y - x) - (YOb - XOb))
|
3a1b3767e05c0bbf7f596bbf0b0cd38c35ff93fb
|
d73d1fffc2c69ed18638262380186db28ef129c8
|
/R/pdfinalsignal.R
|
f0af336e1ad16d9eb035ec5b79bb00b9f8e99442
|
[] |
no_license
|
estalenberg/ptrm
|
7f48a545e679fcefcddaf7009c8f7304e21883bf
|
262755ead3ee8b6e0900775134ac401e799ddc4c
|
refs/heads/master
| 2020-04-30T12:26:35.587724
| 2019-07-31T09:22:56
| 2019-07-31T09:22:56
| 176,826,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,957
|
r
|
pdfinalsignal.R
|
#' Final dynamic price signal
#'
#' Formula for calculating the final dynamic price signal over hours and years
#'
#' @param yearslabel label of projection years from 2020
#' @param pd_ev peak demand adjusted for offgrid, ee, solar, batteries and ev
#' @param signal.in dynamic variable of dynamic price signal
#'
#' @export
#'
pdfinalsignal_fun=function(yearslabel,pd_ev,signal.in){
sig.num=c(0,0.375,0.75)
sig.lab=c("none","mid-strength","strong")
sig.df=cbind.data.frame(sig.num,sig.lab)
signal=as.numeric(sig.df$sig.num[sig.df$sig.lab==signal.in])
signal.in=signal/100 #convert to percent
#final dynamic price signal
#grouped by hours
#need an empty matrix for noyears*24
tmp <- matrix(NA, ncol=length(yearslabel), nrow=24)
tmp=as.data.frame(tmp)
names(tmp)=yearslabel
pd=tmp
#row numbers 1:24 refer to 24 hour time
#step 20: 5pm = 17
pd[17,]=round(pd_ev[17,],digits=5) #step19[17]
#step 21: 10pm-midnight
pd[22,]=round(pd_ev[22,],digits=5) #step19[17]
pd[23,]=round(pd_ev[23,],digits=5) #step19[17]
pd[24,]=round(pd_ev[24,],digits=5) #step19[17]
#step 22:6pm - 9pm (18-21)
#=G290*(1-'SAPN Dynamic variables'!$F$12)^('SAPN Repex'!F$2-'SAPN Repex'!$E$2)
for(i in 1:length(pd)){
for(j in 18:21){
pd[j,i]=round(pd_ev[j,i]*(1-signal.in)^((yearslabel[i]+1)-yearslabel[1]),digits=5)
}
}
#check y6
#round(pd_ev[20,6]*(1-signal.in)^((yearslabel[6]+1)-yearslabel[1]),digits=5)
#pd_ev not the same 2681 not 2677
#step 23: 1am-8am (1:8)
#=(G273)+((SUM(G$290:G$293)-SUM(G$378:G$381))/8*20%)
for(i in 1:length(pd)){
for(j in 1:8){
pd[j,i]=round((pd_ev[j,i]+((sum(pd_ev[18:21,i])-sum(pd[18:21,i]))/8*0.2)),digits=5)
}
}
#step 24: 9am-4pm (9:16)
#=(G281)+((SUM(G$290:G$293)-SUM(G$378:G$381))/8*80%)
for(i in 1:length(pd)){
for(j in 9:16){
pd[j,i]=round((pd_ev[j,i]+((sum(pd_ev[18:21,i])-sum(pd[18:21,i]))/8*0.8)),digits=5)
}
}
return(pd)
}
|
4fb0e1d28f8d415d100859c8f7b7de63d6a22258
|
b74674a867526ef01ad189adda2da14b8be4fa59
|
/man/filter_mentions.Rd
|
109521f62c20768a242941179fb4b848e13d093d
|
[
"MIT"
] |
permissive
|
brandseye/brandseyer2
|
d33a84c7194b397068793da6a305154addf73aa2
|
59fab98f11b9b05cbe5cac6409b2453374b7267e
|
refs/heads/master
| 2021-09-15T17:17:38.686089
| 2021-09-01T19:04:48
| 2021-09-01T19:04:48
| 132,138,395
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,482
|
rd
|
filter_mentions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_language.R
\name{filter_mentions}
\alias{filter_mentions}
\title{Filter for only specific mentions}
\usage{
filter_mentions(.account, filter)
}
\arguments{
\item{.account}{An account or \code{\link[=query]{query()}} object.}
\item{filter}{A filter string.}
}
\value{
A query object
}
\description{
\code{filter_mentions()} causes counting and mention operations
to occur on only those mentions matching the filter. This is
part of the \code{\link[=query]{query()}} language.
}
\section{Data models}{
The filter language often uses IDs to represent countries, languages, and so on.
These can be found by using either the filter panel in \href{https://analyse.brandseye.com}{Analyse},
or the data model functions.
\itemize{
\item \code{\link[=data_model_countries]{data_model_countries()}}
\item \code{\link[=data_model_languages]{data_model_languages()}}
\item \code{\link[=data_model_networks]{data_model_networks()}}
}
}
\examples{
account("TEST01AA") \%>\%
filter_mentions("published inthelast week")
}
\seealso{
Other verbs for the query language: \code{\link[=group_mentions_by]{group_mentions_by()}},
\code{\link[=compare_mentions]{compare_mentions()}}, \code{\link[=with_mention_fields]{with_mention_fields()}}, \code{\link[=with_mention_order]{with_mention_order()}}, \code{\link[=with_account]{with_account()}}.
\code{\link[=query]{query()}} is a way to manually create queries.
}
|
83cf941a3af6834c4d7d3e905bf3d6cfb8623a7b
|
d308f49b3ad7e7b9a63c2e88386b3d6c64bd0959
|
/homework/lab02.R
|
3609f9746de28fd0a64a62517c00fdbcd0a750b9
|
[] |
no_license
|
jihunsuk/DS_lecture
|
f982aacb4b5e3ce7b41311aef66b90d973488812
|
d50c829224443aef6fca75073dc4b75932c18a6f
|
refs/heads/master
| 2020-03-25T19:45:49.732218
| 2018-08-09T04:10:32
| 2018-08-09T04:10:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
lab02.R
|
data(iris)
newiris <- iris
newiris$Species <- NULL
kc <- kmeans(newiris, 3)
str(kc)
table(iris$Species , kc$cluster)
plot(newiris[,c("Sepal.Length", "Sepal.Width")], col = kc$cluster)
points(kc$centers[,c("Sepal.Length", "Sepal.Width")], col = 1:3, pch = 8, cex = 2)
library(cluster)
dissE <- daisy(newiris)
sk <- silhouette(kc$cluster, dissE)
plot(sk)
y <- matrix(rnorm(50), 10, 5, dimnames=list(paste("g", 1:10, sep=""), paste("t", 1:5, sep="")))
c <- cor(t(y), method="spearman")
d <- as.dist(1-c)
hr <- hclust(d, method="complete", members=NULL)
par(mfrow = c(2,2))
plot(hr, hang = 0.1)
plot(hr, hang = -1)
plot(as.dendrogram(hr), edgePar=list(col=3, lwd=4), horiz=T)
mycl <- cutree(hr, h=max(hr$height)/2)
mycl[hr$labels[hr$order]]
plot(hr)
rect.hclust(hr, k=5, border="red")
|
9fdb32821e669e2a91028b76ac44787f57fe7078
|
4440ab544b02ff806e7386494bb959f633c05cfb
|
/tests/testthat.R
|
2076dd575845e5fe32e3678ad6ac5adf020e45f8
|
[] |
no_license
|
AndBooth/deconcat
|
da2208f809de910852db2e1dc1676508ae4c9010
|
32b8d30ce73193f593c0508ee3dc4cf1f2d8c0e9
|
refs/heads/master
| 2020-03-10T14:10:44.947833
| 2018-04-13T15:20:18
| 2018-04-13T15:20:18
| 129,419,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(deconcat)
test_check("deconcat")
|
244cd05549947227cb7b132075f951ca5e1b6d54
|
91f90bb3b4df7dafe932f9d1ed1466938957d90e
|
/R/slsPkgs.R
|
d712949cf832060f1f93095687c35ade01249490
|
[] |
no_license
|
nemochina2008/paper_kilimanjaro_scintillometer
|
1c7447d5135923bf2088c9b4b89404edbfb26f8b
|
9d90e42345c95a4129c9976fb27aa0e7eafad49c
|
refs/heads/master
| 2021-05-16T17:52:53.016234
| 2017-08-22T06:48:01
| 2017-08-22T06:48:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
slsPkgs.R
|
lib <- c("doParallel", "caret", "lubridate", "plotrix", "plyr", "dplyr",
"reshape2", "scales", "ggplot2", "latticeExtra", "gridExtra", "grid",
"TSdist", "Rsenal", "Orcs", "rgeos", "rasterVis",
"rgdal", "MODIS", "zoo", "raster", "kza", "stargazer")
jnk <- sapply(lib, function(i) {
suppressMessages(library(i, character.only = TRUE))
})
# library(OpenStreetMap)
|
b6b954c689c6cbcb3835248b26beac6fe7b4aff9
|
045c744b365954f244104044e6c539b3a4b31584
|
/getDATRAS.R
|
13c64e93e332663e6abc06380443ff2c50194fe4
|
[] |
no_license
|
CrystalTu/DATRASscript
|
cfaf7029e0d50b26195362cf94bbdd6f229c6f37
|
a4a9aa0cdf61f3fc37932c1c1ff2aa04acac7ac7
|
refs/heads/master
| 2021-01-10T17:38:45.711559
| 2015-10-08T11:04:35
| 2015-10-08T11:04:35
| 43,882,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,013
|
r
|
getDATRAS.R
|
#' getDATRASfun.R
#' Extracts age data files from DATRAS
#' @param record, survey, startyear, endyear, startquarter, endquarter, parallel = FALSE, cores = NULL
#' @return none
#' @seealso none
#' @details the update is slow, avoiding straining the server or client.
#' please allow this call to run overnight for a complete upgrade.
#' @keywords download, DATRAS, survey, age, length
#' @examples \dontrun{
#' getDATRAS()
#' }
#' @export
#
getDATRAS <- function(record, survey, startyear, endyear, quarters, parallel = FALSE, cores = NULL) {
# library(XML)
# library(doParallel)
# library(parallel)
# library(foreach)
# library(data.table)
strt <- Sys.time()
#
seqYear <- startyear:endyear
#
if(!record %in% c("HL", "HH", "CA") ) stop("Please specify record type: HH (haul meta-data),
HL (Species length-based data),
CA (species age-based data)")
getURL <- apply(expand.grid(record, survey, seqYear, quarters),
1,
function(x) paste0("http://datras.ices.dk/WebServices/DATRASWebService.asmx/get",
x[1],
"data?survey=", x[2],
"&year=", x[3],
"&quarter=", x[4]))
#
if(parallel == TRUE) {
if(missing("cores")) stop("Please specify how many cores you wish to devote to this task.")
#
unregister <- function() {
env <- foreach:::.foreachGlobals
rm(list=ls(name=env), pos=env)
} # close unregister
#
cl <- makeCluster(cores)
registerDoParallel(cores = cl)
#
getDATA <- foreach(temp = getURL,
.combine = function(...) rbindlist(list(...), fill = TRUE),
.multicombine = T,
.inorder = F,
.maxcombine = 1000,
.packages = c("XML", "data.table")) %dopar% {
data.table(t(xmlSApply(xmlRoot(xmlTreeParse(temp, isURL = T, options = HUGE, useInternalNodes = T)),
function(x) xmlSApply(x, xmlValue))))
} # close foreach %dopar%
stopCluster(cl)
unregister()
} # close parallel == TRUE
#
if(parallel == FALSE) {
getDATA <- foreach(temp = getURL,
.combine = function(...) rbindlist(list(...), fill = TRUE),
.multicombine=T,
.inorder=F,
.maxcombine=1000,
.packages = c("XML", "data.table")) %do% {
data.table(t(xmlSApply(xmlRoot(xmlTreeParse(temp, isURL = T, options = HUGE, useInternalNodes = T)),
function(x) xmlSApply(x, xmlValue))))
} # close foreach %do%
} # close parallel == FALSE
print(Sys.time()-strt)
return(getDATA)
} # close function
|
e677c2c4251ff8e868f44b975721da64dcb45c83
|
2f3c391660eb3c5da973eabb83bd1a645efb330d
|
/Efficiency_LY_20200306.r
|
c2dbe2344b633ae2c547935b527e1db418881ad4
|
[] |
no_license
|
ly129/MiCM2020
|
0a8d82b64042d95ebee38f770d3739ad15cfa76b
|
1e3c721fb31156cdf5d3c1b1f97062d3734fcf1d
|
refs/heads/master
| 2021-01-06T02:08:25.833551
| 2020-03-07T19:56:33
| 2020-03-07T19:56:33
| 241,197,307
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,825
|
r
|
Efficiency_LY_20200306.r
|
# double
class(5); is.double(5)
# integer
class(5L); is.double(5L)
# How precise is double precision?
options(digits = 22) # show more digits in output
print(1/3)
options(digits = 7) # back to the default
object.size(rep(5, 10))
object.size(rep(5L, 10))
# logical
class(TRUE); class(F)
# character
class("TRUE")
# Not important for this workshop
fac <- as.factor(c(1, 5, 11, 3))
fac
class(fac)
# R has an algorithm to decide the order of the levels
fac.ch <- as.factor(c("B", "a", "1", "ab", "b", "A"))
fac.ch
# Scalar - a vector of length 1
myscalar <- 5
myscalar
class(myscalar)
# Vector
myvector <- c(1, 1, 2, 3, 5, 8)
myvector
class(myvector)
# Matrix - a 2d array
mymatrix <- matrix(c(1, 1, 2, 3, 5, 8), nrow = 2, byrow = FALSE)
mymatrix
class(mymatrix)
str(mymatrix)
# Array - not important for this workshop
myarray <- array(c(1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144), dim = c(2, 2, 3))
print(myarray) # print() is not needed if run in R or Rstudio.
class(myarray)
# List - very important for the workshop
mylist <- list(Title = "R Beyond the Basics",
Duration = c(2, 2),
sections = as.factor(c(1, 2, 3, 4)),
Date = as.Date("2020-03-06"),
Lunch_provided = FALSE,
Feedbacks = c("Amazing!", "Great workshop!", "Yi is the best!", "Wow!")
)
print(mylist) # No need for print if running in R or Rstudio
class(mylist)
# Access data stored in lists
mylist$Title
# or
mylist[[6]]
# Further
mylist$Duration[1]
mylist[[6]][2]
# Elements in lists can have different data types
lapply(mylist, class) # We will talk about lapply() later
# Elements in list can have different lengths
lapply(mylist, length)
# Data frames - most commonly used for analyses
head(mtcars)
# Access a column (variable) in data frames
mtcars$mpg
# Vectorized operation
# system.time(operation) returns the time needed to run the 'operation'
t <- system.time( x1 <- sqrt(1:1000000) )
head(x1)
# For loop with memory pre-allocation
x2 <- rep(NA, 1000000)
t0 <- proc.time()
for (i in 1:1000000) {
x2[i] <- sqrt(i)
}
t1 <- proc.time()
identical(x1, x2) # Check whether results are the same
# For loop without memory pre-allocation
x3 <- NULL
t2 <- proc.time()
for (i in 1:1000000) {
x3[i] <- sqrt(i)
}
t3 <- proc.time()
# As we can see, R is not very fast with loops.
t; t1 - t0; t3 - t2
# ?proc.time
# microbenchmark runs the code multiple times and take a summary
# Use well-developped R function
library(microbenchmark)
result <- microbenchmark(sqrt(500),
500^0.5,
unit = "ns", times = 1000
)
summary(result)
# Result in nanoseconds
data <- read.csv("https://raw.githubusercontent.com/ly129/MiCM2020/master/sample.csv", header = TRUE)
head(data, 8)
summary(data)
mean(data$Wr.Hnd)
mean(data$Height)
mean(data$Height, na.rm = T)
# Choose the continuous variables
names(data)
cts <- c("Wr.Hnd", "NW.Hnd", "Pulse", "Height", "Age")
cts.data <- data[, cts]
head(cts.data)
# Calculate the mean
apply(X = cts.data, MARGIN = 2, FUN = mean)
apply(cts.data, MARGIN = 2, FUN = mean, na.rm = TRUE)
sex.tab <- table(data$Sex)
sex.tab
prop.table(sex.tab)
smoke.tab <- table(data$Smoke)
smoke.tab
table(data$Sex, data$Smoke)
# I prefer this...
table(data[, c("Sex", "Smoke")])
# aggregate() syntax 1
aggregate(x = data$Wr.Hnd, by = list(data$Sex), FUN = sd)
# aggregate() syntax 2
aggregate(Wr.Hnd~Sex, data = data, FUN = sd)
# by()
by(data = data$Wr.Hnd, INDICES = list(data$Sex), FUN = sd)
# tapply()
tapply(X = data$Wr.Hnd, INDEX = list(data$Sex), FUN = sd)
# Return a list using tapply()
tapply(X = data$Wr.Hnd,
INDEX = list(data$Sex),
FUN = sd,
simplify = F)
# Syntax 1
aggregate(x = data$Wr.Hnd,
by = list(data$Sex, data$Smoke),
FUN = sd)
# Syntax 2
aggregate(Wr.Hnd~Sex+Smoke, data = data, FUN = sd)
# Syntax 1
aggregate(x = cbind(wh = data$Wr.Hnd, nwh = data$NW.Hnd),
by = list(sex = data$Sex, smoke = data$Smoke),
FUN = sd)
# Syntax 2
aggregate(cbind(Wr.Hnd, NW.Hnd)~Sex+Smoke, data = data, FUN = sd)
aggregate(Wr.Hnd~Sex+Smoke, data = data, FUN = print)
aggregate(Wr.Hnd~Sex+Smoke, data = data, FUN = length)
par(mfrow = c(2,2))
aggregate(Wr.Hnd~Smoke, data = data, FUN = hist, main = "hist")
vec <- 1:5
vec
ifelse(vec>3, yes = "big", no = "small")
data$Adult <- ifelse(data$Age>=18, yes = "Yes", no = "No")
head(data)
if (data$Age >= 18) {
data$Adult2 = "Yes"
} else {
data$Adult2 = "No"
}
head(data)
# Delete Adult2
data <- subset(data, select=-c(Adult2))
cut.points <- c(0, 16, 18, 20, 22, Inf)
data$Hn.Grp <- cut(data$Wr.Hnd, breaks = cut.points, right = T)
head(data)
# labels as default
# Set labels to false
data$Hn.Grp <- cut(data$Wr.Hnd, breaks = cut.points,
right = T, labels = FALSE)
head(data)
# Customized labels
custom.label <- c("TP/XS", "P/S", "M/M", "G/L", "TG/XL")
data$Hn.Grp <- cut(data$Wr.Hnd, breaks = cut.points,
right = T, labels = custom.label)
head(data)
aggregate(Wr.Hnd~Hn.Grp, data = data, FUN = mean)
# cut.points <- c(0, 16, 18, 20, 22, Inf)
num <- 1:10
let <- sample(letters[1:3], size = 10, replace = T)
cbind(num, let)
split(num, let)
wr.hn.grp <- split(x = data$Wr.Hnd, f = data$Hn.Grp)
wr.hn.grp
# lapply
la <- lapply(wr.hn.grp, FUN = mean)
la
# sapply
sapply(wr.hn.grp, FUN = mean, simplify = T)
sapply(wr.hn.grp, FUN = mean, simplify = F)
# vapply *
# Safer than sapply(), and a little bit faster
# because FUN.VALUE has to be specified that length and type should match
va <- vapply(wr.hn.grp, summary, FUN.VALUE = c("Min." = numeric(1),
"1st Qu." = numeric(1),
"Median" = numeric(1),
"Mean" = numeric(1),
"3rd Qu." = numeric(1),
"Max." = numeric(1)))
va
# aggregate(Wr.Hnd~Smoke, data = data, FUN = ...)
# tapply(X = data$Wr.Hnd, INDEX = list(data$Smoke), FUN = ...)
sample.means <- aggregate(Wr.Hnd~Smoke, data = data, FUN = mean)[,2]
sample.var <- aggregate(Wr.Hnd~Smoke, data = data, FUN = var)[,2]
n <- aggregate(Wr.Hnd~Smoke, data = data, FUN = length)[,2]
t <- qt(p = 0.025, df = n - 1, lower.tail = FALSE)
# sample.means; sample.var
lb <- sample.means - t * sqrt(sample.var / n); lb
ub <- sample.means + t * sqrt(sample.var / n); ub
# How many times did we aggregate according to the group? Can on aggregate only once?
# The structure
func_name <- function(argument){
statement
}
# Build the function
times2 <- function(x) {
fx = 2 * x
return(fx)
}
# Use the function
times2(x = 5)
# or
times2(3)
# R has operators that do this
9 %/% 2
9 %% 2
int.div <- function(a, b){
int <- floor(a/b)
mod <- a - int*b
return(list(integer = int, modulus = mod))
}
# class(result)
# Recall: how do we access the modulus?
result <- int.div(21, 4)
result
result$modulus
int.div <- function(a, b){
int <- a%/%b
mod <- a%%b
return(cat(a, "%%", b, ": \n integer =", int,"\n ------------------", " \n modulus =", mod, "\n"))
}
int.div(21,4)
int.div <- function(a, b){
int <- a%/%b
mod <- a%%b
return(c(numerator = a, denom = b))
}
int.div(21, 4)
# No need to worry about the details here.
# Just want to show that functions do not always have to return() something.
AIcanadian <- function(who, reply_to) {
system(paste("say -v", who, "Sorry!"))
}
# AIcanadian("Alex", "Sorry I stepped on your foot.")
# Train my chatbot - AlphaGo style.
# I'll let Alex and Victoria talk to each other.
# MacOS has their voices recorded.
# chat_log <- rep(NA, 8)
# for (i in 1:8) {
# if (i == 1) {
# chat_log[1] <- "Sorry I stepped on your foot."
# system("say -v Victoria Sorry, I stepped on your foot.")
# } else {
# if (i %% 2 == 0)
# chat_log[i] <- AIcanadian("Alex", chat_log[i - 1])
# else
# chat_log[i] <- AIcanadian("Victoria", chat_log[i - 1])
# }
# }
# chat_log
data_summary <- function(func) {
data <- read.csv("https://raw.githubusercontent.com/ly129/MiCM2020/master/sample.csv", header = TRUE)
by(data = data$Wr.Hnd, INDICES = list(data$Smoke), FUN = func)
}
data_summary(var)
a_times_2_unless_you_want.something.else.but.I.refuse.3 <- function(a, b=2){
if (b == 3) {
stop("I refuse 3!")
}
if (b == 4){
warning("4 sucks too")
}
a*b
}
a_times_2_unless_you_want.something.else.but.I.refuse.3(a = 5)
a_times_2_unless_you_want.something.else.but.I.refuse.3(a = 5, b = 4)
# a_times_2_unless_you_want.something.else.but.I.refuse.3(a = 5, b = 3)
sample.ci <- function(x){
mean <- mean(x)
var <- var(x)
n <- length(x)
t <- qt(p = .025, df = n - 1, lower.tail = FALSE)
lb <- mean - t * sqrt(var / n); lb
ub <- mean + t * sqrt(var / n); ub
return(c(lower = lb, upper = ub))
}
sample.ci(c(1453,45,14,51,235,123,4,123,412))
aggregate(Wr.Hnd~Smoke, data = data, FUN = sample.ci)
set.seed(20200306)
N <- 200
height <- round(rnorm(n = N, mean = 180, sd = 10)) # in centimeter
weight <- round(rnorm(n = N, mean = 80, sd = 10)) # in kilograms
age <- round(rnorm(n = N, mean = 50, sd = 10))
treatment <- sample(c(TRUE, FALSE), size = N, replace = T, prob = c(0.3,0.7))
HF <- sample(c(TRUE, FALSE), size = N, replace = T, prob = c(0.1,0.9))
fake <- data.frame(height, weight, age, treatment, HF)
head(fake)
names(fake)
fake$BMI <- fake$weight/(fake$height/100)^2
head(fake)
cut.pts <- c(-Inf, 18.5, 25, 30, Inf)
labs <- c("Underweight", "Normal weight", "Overweight", "Obesity")
fake$BMI.cat <- cut(fake$BMI, breaks = cut.pts, labels = labs, right = F)
head(fake)
# aggregate() or tapply()
aggregate(BMI~BMI.cat, data = fake, FUN = mean)
# split() and lapply()
BMI.grp <- split(fake$BMI, f = fake$BMI.cat)
lapply(BMI.grp, FUN = mean)
# Trick:
FALSE + TRUE + TRUE
F + F + T + T
aggregate(HF~BMI.cat+treatment, data = fake, FUN = sum)
tab2by2 <- function(data, treatment, outcome){
sub <- data[, c(treatment, outcome)]
return(table(sub))
}
tab2by2(fake, treatment = "treatment", outcome = "HF")
tab2by2.pro <- function(data, treatment, outcome, treatment.threshold, outcome.threshold){
tx <- data[, treatment]
rx <- data[, outcome]
if (length(table(tx))>2) {
if (missing(treatment.threshold)) {
stop("Non-binary treatment. Please provide a threshold.")
} else {
binary.treatment <- ifelse(tx<=treatment.threshold,
yes = paste("<=", treatment.threshold),
no = paste(">", treatment.threshold))
}
} else {
binary.treatment <- tx
}
if (length(table(rx))>2) {
if (missing(outcome.threshold)) {
stop("Non-binary outcome. Please provide a threshold.")
} else {
binary.outcome <- ifelse(rx<=outcome.threshold,
yes = paste("<=", outcome.threshold),
no = paste(">", outcome.threshold))
}
} else {
binary.outcome <- rx
}
return(table(treatment = binary.treatment, outcome = binary.outcome))
}
tab2by2.pro(fake, treatment = "age", outcome = "BMI")
tab2by2.pro(fake, treatment = "age", outcome = "BMI", treatment.threshold = 50)
tab2by2.pro(fake, treatment = "age", outcome = "BMI", treatment.threshold = 50, outcome.threshold = 20)
tab2by2.pro(fake, treatment = "age", outcome = "HF")
# HF is binary, so it is OK if "outcome.threshold" is missing.
tab2by2.pro(fake, treatment = "age", outcome = "HF", treatment.threshold = 50)
|
bce0c4c16c2cb27351b118e2e77476043fb030aa
|
1757fb5db6b90cfb4a09e73cf1178fdbe851bf3d
|
/train_bs_par.R
|
a69ed97a504401d8568862380078299e7f9a0e8b
|
[] |
no_license
|
gvschweinitz/BLS_19_Does-Machine-Learning-Help-Us-Predict-Banking-Crises
|
b03a2ea494926033fed547d1b0e5a11f80696e8f
|
578a204b6f4fade8bf05793535607b68a463ea3a
|
refs/heads/master
| 2023-01-21T10:03:37.193918
| 2020-12-01T09:28:38
| 2020-12-01T09:28:38
| 313,629,287
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,785
|
r
|
train_bs_par.R
|
train_bs_par <- function(data.db.train,data.db.test,parameters,methodz,do.bootstrap=TRUE,R.BS=1000,blocklength=12,sample.countries = FALSE,min_cr = 5){
# ------------------------------------
# CALL
# train_bs(data.db.train,data.db.test,parameters,methodz,R.BS,blocklength)
# train_bs(data.db.train,data.db.test,parameters,methodz,R.BS,blocklength,sample.countries)
# train_bs(data.db.train,data.db.test,parameters,methodz,R.BS,blocklength,sample.countries,min_cr)
# ------------------------------------
# DESCRIPTION
# performs a bootstrap on data from data.db.train in order to provide a bootstrapped forecast on data.db.test
# In case of in-sample estimation, data.db.train (for bootstrapping) and data.db.test (for forecasting) should be identical
# Bootstrap specificities:
# - block bootstrap
# - cross sectional blocks (cannot be excluded at the current moment)
# - panel out-of-sample (dropping whole countries)
# Note that this function transforms input data.tables into data.frames for estimation purposes
# ------------------------------------
# INPUT
# data.db.train: in-sample estimation data.table with country, year, quarter, c0, pre, latepre, post, and early warning indicators
# data.db.test: out-of-sample prediction data.table with country, year, quarter, c0, pre, latepre, post, and early warning indicators
# cols.string: column names (pre and estimation variables)
# parameters: parameters from preparation scripts
# methodz: vector with methods to be estimated
# do.bootstrap: Boolean if bootstrap on training data should be performed or not
# R.BS: number of bootstrap draws
# blocklength: number of adjacent periods in a block; RECOMMENDED: EARLY WARNING HORIZON. Reduced if only smaller blocks are available.
# sample.countries: TRUE for panel out-of-sample bootstrap, FALSE otherwise (NOT RECOMMENDED FOR RECURSIVE ESTIMATION).
# min_cr: minimum number of pre-crisis observations in bootstrap sample. Needed for valid estimations
# ------------------------------------
# OUTPUT
# list containing
# - data.out: T x output x R.BS array, where
# T is the number of observations in data.db.test
# output is the number of outputs (methodz x c(Prob, PrioP, AUC, MSE))
# Gregor von Schweinitz 2020-11-17
if (!do.bootstrap){
return(train_bs(data.db.train,data.db.test,parameters,methodz,do.bootstrap=do.bootstrap))
}
# have parallel framework only for bootstrap estimations
b <- blocklength
# Define all possible start and end-dates for bootstrap blocks
# Account for the fact that blocks might need to be shorter than b periods in order to give every observation an equal chance to be drawn
# In order to manage shorter blocks at the beginning, we give the additional (b-1) starting observations "fictitious earlier starting dates"
time.vec <- sort(unique(data.db.train[,Date]))
time.add <- seq(-(b-1)/4,0,1/4)
time.vec <- c(time.vec[1]+time.add,time.vec[2:length(time.vec)])
sample.master <- data.frame(pos1=NA,pos2=NA,Date=NA,Country=NA)
for (coun in unique(data.db.train$Country)){
pos <- which(data.db.train$Country==coun)
pos1 <- c(rep(pos[1],b-1),pos)
pos2 <- c(pos,rep(pos[length(pos)],b-1))
time.coun <- data.db.train[pos,Date]
time.coun1 <- data.db.train[pos1,Date]
time.coun2 <- data.db.train[pos2,Date]
time.add1 <- c(time.add[1:min(b,length(pos1))],rep(0,max(0,length(pos1)-b)))
# Account for missing observations, as latepre, c0, post are not included in data.db.train
pos.na <- which(time.coun1+b/4-0.25<time.coun2)
for (k in pos.na){pos2[k] <- pos[max(which(time.coun<=time.coun1[k]+b/4-0.25))]}
time.coun2 <- data.db.train[pos2,Date]
sample.master <- rbind(sample.master,cbind(pos1=pos1,pos2=pos2,Date=time.coun1+time.add1,Country=coun))
}
sample.master <- sample.master[2:dim(sample.master)[1],]
rm(time.add,coun,pos,pos1,pos2,time.coun,time.coun1,time.coun2,time.add1,pos.na)
output.names <- c("PriorP",
paste("Prob(", methodz, ")", sep=""),
paste("OT(", methodz, ")", sep=""))
data.out.r <- array(dim=c(dim(data.db.test)[1],length(output.names)),
dimnames=list(rownames(data.db.test),output.names))
methodz.agg <- parameters$methodz.agg # adjusted, if a method yields no results
Nobs <- dim(data.db.train)[1]
data.test <- cbind(obs=Nobs+1:dim(data.db.test)[1],data.db.test)
# FOREACH LOOP OVER BOOTSTRAP DRAWS
acomb <- function(...) abind(...,along=3)
packages.export <- c("data.table","e1071","kknn","nnet","optimx","plyr","pROC","randomForest","rpart")
vars.export <- c("columns","calc.train.bs","calculate.threshold")
print("starting parallel loop")
# data.out <- foreach(r=1:R.BS,.combine=acomb,.packages=packages.export,.export=vars.export) %do% {
data.out <- foreach(r=1:R.BS,.combine=acomb,.packages=packages.export,.export=vars.export) %dopar% {
if(r%%10==0) print(r)
#### sample data ####
PriorP <- 0
while (PriorP<min_cr/Nobs){
# randomly drawn countries (with replacement)
if (sample.countries) {
countries.bs <- sample(unique(data.db.train$Country),replace=TRUE)
}else{
countries.bs <- unique(data.db.train$Country)
}
pos.bs1 <- unlist(lapply(countries.bs,grep,sample.master$Country))
sample.bs <- sample.master[pos.bs1,] # restrict sample.master to selected countries
# randomly drawn starting times
time.bs <- sample(time.vec,replace=TRUE)
pos.bs2 <- unlist(lapply(time.bs,grep,sample.bs$Date))
# get blocks from data.train
pos.bs <- unlist(apply(sample.bs[pos.bs2,],1,function(x) seq(as.numeric(x["pos1"]),as.numeric(x["pos2"]))))
data.train <- data.db.train[pos.bs[1:Nobs],]
PriorP <- mean(data.train[,pre])
rm(countries.bs,pos.bs1,sample.bs,time.bs,pos.bs2,pos.bs)
}
realizations <- data.train[,pre]
#### Standardization ####
if (parameters$standardizeList){
sdvars <- unique(unlist(parameters$cols.expl))
colStats <- data.train[,lapply(.SD, function(x) c(mean(x),sd(x))),.SDcols = sdvars]
data.train.norm = data.train
data.test.norm = data.test
for (sdcol in sdvars){
data.train.norm[,(sdcol) := scale(data.train[,sdcol,with=FALSE],colStats[1,sdcol,with=FALSE],colStats[2,sdcol,with=FALSE])]
data.test.norm[,(sdcol) := scale(data.test[,sdcol,with=FALSE],colStats[1,sdcol,with=FALSE],colStats[2,sdcol,with=FALSE])]
}
}else { # no standardization:
data.train.norm = data.train
data.test.norm = data.test
}
for (method in methodz){
# SET PARAMETERS BY METHOD
method.calc <- strsplit(method,".",fixed=TRUE)[[1]][1]
dnum <- as.numeric(strsplit(method,".",fixed=TRUE)[[1]][2])
usevars <- c("Country","Date",parameters$cols.expl[[dnum]],"c0",parameters$col.resp,"latepre","post")
data.train.use <- as.data.frame(data.train.norm[,usevars,with=FALSE])
data.test.use <- as.data.frame(data.test.norm[,usevars,with=FALSE])
# parameters$logitvariables <- parameters$cols.expl[[dnum]]
# parameters$datavariables <- parameters$cols.expl[[dnum]]
cols.string <- columns(parameters$col.resp, parameters$cols.expl[[dnum]])
if (!parameters$optimizationMode){
# Set hyperparameters of method based on paramtable (which comes from an exogenous csv-file specifying hyperparameters for all estimation methods)
namecols <- grep("_name",colnames(parameters$paramtable))
valcols <- grep("_val",colnames(parameters$paramtable))
for (col in 1:length(namecols)){
if (!is.na(parameters$paramtable[method,namecols[col]])){
parameters[[parameters$paramtable[method,namecols[col]]]] <- parameters$paramtable[method,valcols[col]]
}
}
}
train.out <- NULL
try(train.out <- calc.train.bs(method.calc, cols.string, data.train.use, data.test.use, parameters),silent=FALSE)
if (!is.null(train.out)){
data.out.r[,paste("Prob(", method, ")", sep="")] <- train.out$temp.test
threshold <- calculate.threshold(realizations, train.out$temp.train, parameters$mu, optimizethreshold = parameters$optimizethreshold, evaluate = FALSE, PriorP=PriorP)$threshold
data.out.r[,paste("OT(", method, ")", sep="")] <- threshold
}else{
data.out.r[,paste("Prob(", method, ")", sep="")] <- NA
data.out.r[,paste("OT(", method, ")", sep="")] <- mean(realizations)
}
}
return(data.out.r)
}
return(list(data.out=data.out))
}
|
277bce6f74fcfd2d772e4b19b12efbe09ea9afa7
|
e1289344fb044f39fa637744fe32e1f1cd12771e
|
/User_Info
|
ebb407f47084a49e9e0ca1c6cb1e80a32de48bc9
|
[] |
no_license
|
sarachronister/BioSense_RODBC
|
6575618c1e1c2755dce7bb9b12e81ea262d8160d
|
c31c72680395c6e410a0ea7d658ab797760aac86
|
refs/heads/master
| 2020-05-07T19:37:28.344391
| 2019-04-12T23:12:08
| 2019-04-12T23:12:08
| 180,821,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 312
|
User_Info
|
# Replace the *********** with your BioSense username and password in the appropriate lines
# Update when you need to change your password
# Save to your home directory "~/"
password <- dput(charToRaw("***********"))
UserID <- dput(charToRaw("***********"))
username <- dput(charToRaw("BIOSENSE\\***********"))
|
|
7c60ddf388ee21b70958904ea7a7d726a5633780
|
291681834ef34a96b587ecc3c5ba46dd9661019c
|
/1-simple-deep-net.R
|
39088b1d604120f9a7550bb39f92e14a7fb358f9
|
[] |
no_license
|
Vassar-COGS-319/lab-8-f20-jodeleeuw
|
5a84303feb2d6e478e58d03f2f507373b0e269e6
|
478174e4e97c8872ae8993a29018767489540d2c
|
refs/heads/main
| 2023-01-08T20:37:37.667736
| 2020-11-02T21:33:00
| 2020-11-02T21:33:00
| 309,488,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,233
|
r
|
1-simple-deep-net.R
|
Sys.setenv('CUDA_VISIBLE_DEVICES'="0")
# The aim of this lab exercise is to show you how to construct and train a deep
# neural network using modern machine learning software packages.
# We are going to use tensorflow and keras.
# tensorflow is a machine learning library developed by Google. It is open source,
# widely used, has lots of instructional content available online, and works with
# R. keras is an abstraction layer on top of tensorflow. It simplifies the process
# of creating and training deep neural networks by making the language more
# like the kind of language we use to talk about neural networks conceptually.
# PART 1: LOADING IN A DATASET
# Training any deep neural network from scratch requires a large set of data.
# For this exercise we are going to use a data set called Fashion MNIST.
# This data set has 70,000 grayscale images of 10 different categories of clothing:
# 0 = T-shirt/top
# 1 = Trouser
# 2 = Pullover
# 3 = Dress
# 4 = Coat
# 5 = Sandal
# 6 = Shirt
# 7 = Sneaker
# 8 = Bag
# 9 = Ankle boot
# Each image in the dataset is 28x28 pixels (784 pixels total).
# Fashion MNIST is meant to be a kind of "benchmark" test. It's not too hard to
# create a neural network that can classify the images correctly, but it's also
# not trivial. Showing that a network can work on Fashion MNIST is a way to
# demonstrate feasibility. Because it is a commonly-used benchmark, keras includes
# a function to easily get it.
library(keras) # this is installed on our GPU-server
fashion_mnist <- dataset_fashion_mnist()
# If you look at fashion_mnist in your environment panel, you'll see that it is a
# list with two elements: train and test. The train list contains data for 60,000
# images, used to train the network, and the test list contains data for 10,000
# images, used to test the network.
# Why do you think we need to test the network with different images than the ones
# that we train the network with? What problem does this relate to from our
# previous discussions of model fitting?
# Let's create four variables here to simplify access to the data:
train_images <- fashion_mnist$train$x
train_labels <- fashion_mnist$train$y
test_images <- fashion_mnist$test$x
test_labels <- fashion_mnist$test$y
# We can look at the raw data associated with one image like this:
train_images[1,,]
# It's a 28 x 28 matrix, with values from 0-255. 0 = black; 255 = white.
# It's going to be a lot easier to work with the data if we change the
# scale from 0-1 instead of 0-255. Let's do that for both the training
# and test images.
train_images <- train_images / 255
test_images <- test_images / 255
# Here's a function to visualize one image:
visualize.image <- function(img){
img <- t(apply(img,2,rev)) # rotates the image clockwise 90 degrees
image(1:28,1:28, img,col=gray((0:255)/255), xaxt = 'n', yaxt ='n',xlab=NA, ylab=NA)
}
# Let's try it on the first image:
visualize.image(train_images[60,,])
# What is it? We can look at the category label for each image by looking
# at the corresponding label.
train_labels[60]
# This is category 9. If you scroll up you'll see that category 9 is "ankle boot".
# PART 2: BUILD THE NEURAL NETWORK!
# The real power of Keras is that we can assemble a set of layers to construct different
# neural network architectures. Here we are going to build a simple dense network where
# each layer is fully connected to each other layer.
# Initialize the model
model <- keras_model_sequential()
# Add layers
model %>%
layer_flatten(input_shape=c(28,28)) %>%
layer_dense(units=128, activation = 'relu') %>%
layer_dense(units=64, activation = 'relu') %>%
layer_dense(units=10, activation = 'softmax')
# The first layer (layer_flatten) simply takes our 28 x 28 images and flattens them out
# into a 784 element vector. There are no trainable weights associated with this layer.
# In fact, we could do this flattening ourselves outside of the neural network, but
# this layer makes it so easy that it would be silly to not do it here.
# The second layer has 128 units. Each unit is fully connected to all 784 inputs.
# The activation function for this unit is relu.
# The final layer has 10 units. Each unit is fully connected to all 128 units from the
# previous layer. These 10 output units can be used to represent each of the 10 categories
# that the network is learning. Our goal will be that every time an ANKLE BOOT is shown
# as input, the activation of UNIT 9 will be 1.0 and the activation of the other units
# will be 0. The softmax activation function ensures that the total activation of the
# layer adds up to 1.0. This allows us to treat the output as a probability distribution.
# PART 3: COMPILE THE MODEL
# Once we have the network architecture in place we need to specify a few more features
# before we are ready to train the model.
# We need to specify the discrepancy function.
# This is what we will use to compare the desired and actual output. Keras has many different
# choices, each appropriate for a different kind of data. Here we use
# 'sparse_categorical_crossentropy', which is an appropriate discrepancy function when
# you are classifying each item into one of many categories and the output represents
# the probability of being in a particular category. Note that in machine learning lingo
# the discrepancy function is called the loss function.
# We also need to specify the optimizer.
# The optimizer is the algorithm used to update the weights of the neural network.
# It's analogous to choosing Nelder-Mead vs. Genetic Algorithms for parameter estimation.
# There are several different optimizers available, and for more complicated models
# it might be worth trying different optimizers out to find the one the works best.
# Here we can just pick one and go with it.
# Finally, we can specify metrics that we want keras to track while the network is training.
# This will help us understand if the network is learning to categorize the objects correctly.
model %>% compile(
optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = c('accuracy')
)
# PART 4: TRAIN THE MODEL!
# To train the model we provide it with the training input, the output we want it to produce,
# and the length of training. In machine learning lingo, an epoch is one presentation of all
# the inputs to the network. Here we will train the network for 10 epochs. This will mean
# the network sees each of the 60,000 images in the training set 10 times.
model %>% fit(train_images, train_labels, epochs=10, validation_data = list(test_images, test_labels))
# PART 5: CHECK THE MODEL
# We can use the predict() function to see how the model classifies the images
# in the test data.
test.predictions <- model %>% predict_classes(test_images)
# We can compare these predictions with the test_labels to see where the model
# gets the right classification
wrong.answers <- which(test.predictions != test_labels)
# We can visualize a few of these to see what the network had trouble with.
visualize.image(test_images[wrong.answers[2],,]) # show the image
test.predictions[wrong.answers[2]] # category that the model predicted
test_labels[wrong.answers[2]] # correct answer
|
fec30acc5404526ff6a4190841bfb726f58f7812
|
9fae6e827ec9da28342f4fbc495f817706c7042a
|
/.Rprofile
|
96eb73b697e2c56dda0e8f45ad85114cb56aff2b
|
[] |
no_license
|
marionhr/Spatial-Extreme-Value-Modelling-of-Sea-Level-Data-in-the-Oslo-Fjord
|
5d8feb64079c8708f6b9680679fc336257a3c549
|
85d019616689a07f71438481d751cd50f37e54b2
|
refs/heads/master
| 2023-06-17T05:45:27.344478
| 2021-07-17T14:55:30
| 2021-07-17T14:55:30
| 382,840,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
rprofile
|
.Rprofile
|
.First <- function(){
myPaths <- .libPaths()
myPaths <- c("C:/Users/mario/Git/R",myPaths)
.libPaths(myPaths)
}
|
aaf92aeca93e0efedcb6512b74127a9341e4b3c0
|
7062d1f3dea3e1481e00062e3aa11f9d9db3ced7
|
/run_analysis.R
|
4b264f47db020d88fdf4b92346c956d503e5b785
|
[] |
no_license
|
MarcoVH/Getting-Project
|
f09f479e46c4f2a332d142ae71910f7df4685ba1
|
dfd8a653b5a2e68b6edff802e930145989a1b168
|
refs/heads/master
| 2021-01-10T16:19:08.795995
| 2015-10-25T14:52:35
| 2015-10-25T14:52:35
| 44,799,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,447
|
r
|
run_analysis.R
|
#Set your working directory to where the "UCI HAR Dataset" folder is, as in:
#setwd("C:/Users/Marco/Documents/Data Science Specialization/Getting and Cleaning Data/Project")
# Package check -----------------------------------------------------------
if (!require("data.table")) {
install.packages("data.table")
}
require("data.table")
if (!require("reshape2")) {
install.packages("reshape2")
}
require("reshape2")
# Loading data ------------------------------------------------------------
# Test
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# Train
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Labels
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
# Merge train and test data (Part 1) --------------------------------------
X<-rbind(X_test, X_train)
y<-rbind(y_test, y_train)
subject<-rbind(subject_test, subject_train)
# Removes old data
rm(X_test, X_train, y_test, y_train, subject_test, subject_train)
# Labeling data -----------------------------------------------------------
# Column names for X
names(X)<-features
# Adds descriptive activity names to name the activities in y
y[,2] = activity_labels[y[,1]]
# Column names for y and subject
names(y) = c("Activity_ID", "Activity_Label")
names(subject) = "Subject"
# Extracts only measurements on the mean and standard deviation -----------
Indicator1<-grepl("mean()", features, fixed=T)
Indicator2<-grepl("std()", features, fixed=T)
Indicator<-as.logical(Indicator1+Indicator2)
X<-X[,Indicator]
# Merge train and test data (Part 2) --------------------------------------
Data<-cbind(subject, X, y)
# Removes old data
rm(subject, X, y)
# Tidy data ---------------------------------------------------------------
# Fixed
id_labels<-c("Subject","Activity_Label", "Activity_ID")
# Variable
variable_labels<- setdiff(colnames(Data), id_labels)
# Melt and cast (reshape2)
melt_data<-melt(Data, id=id_labels, measure = variable_labels)
Neat<-dcast(melt_data, Subject + Activity_Label ~ variable, mean)
# Write tidy data to a file
write.table(Neat, file = "./Neat.txt", row.names=F)
|
7fc087521580ed2f5f7b832d087bb45aa348f34a
|
eee62499791ec04e22614ccc2a46f4e2210da9f6
|
/man/smoothfda.Rd
|
06c9fe1164b9c50f0a1260cd4b9f3e552a992e54
|
[] |
no_license
|
dapr12/fda.classification
|
df0f9936744d84a945c6be13ebb467986bfbdd54
|
2c129c59f71bf645c88fc715f62699cf7458bebf
|
refs/heads/master
| 2020-04-01T09:32:53.097672
| 2014-08-21T19:19:55
| 2014-08-21T19:19:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 847
|
rd
|
smoothfda.Rd
|
% Generated by roxygen2 (4.0.1.99): do not edit by hand
\name{smoothfda}
\alias{smoothfda}
\title{Smoothfda - Computes the functional spatial median of the functional data}
\usage{
smoothfda(fdaobj, bandwidth, degree = 1)
}
\arguments{
\item{fdaobj}{An object of the FDA Class.}
\item{bandwitdh}{Bandwidth}
\item{degree}{Degree of the polynomial}
}
\value{
A component list with:
\itemize{
\item{"Data"}{}
\item{"argvals"}{}
\item{"rangevals"}{}
\item{YSmooth}{}
\item{"CVScore"}{}
\item{Mean}{}
}
}
\description{
Smoothfda - Computes the functional spatial median of the functional data
}
\examples{
\dontrun{
#Computing the Median
matplot(time,CVScores$YSmooth, type="l")
lines(time, Median$median, col="red",lwd = 4)
}
}
\references{
Ramsay, James O., and Silverman, Bernard W. (2002), Applied Functional Data Analysis, Springer,New York.
}
|
187631a66ebd2a9ca214a68be8a5c4c5fb04acbe
|
8cc7a398b6616ee30692bef651d67fea406d546e
|
/Market Response.R
|
8a49abf3c46c595f806053a2e437990a0cd385cc
|
[] |
no_license
|
sinafakhar/Market-Response
|
435ca5218c24c183782916bfeb5072b80c891078
|
c0bf7290309d6e59dd375b57e6cba280b55deff0
|
refs/heads/main
| 2023-02-02T11:55:57.202269
| 2020-11-25T21:23:40
| 2020-11-25T21:23:40
| 313,301,914
| 0
| 1
| null | 2020-12-19T11:49:59
| 2020-11-16T12:54:30
|
HTML
|
UTF-8
|
R
| false
| false
| 18,211
|
r
|
Market Response.R
|
setwd("./Market Response/")
# getwd()
library(readxl)
library(ggplot2)
library(skimr)
library(DataExplorer)
library(dplyr)
library(Hmisc)
library(corrplot)
library(PerformanceAnalytics)
library(coefplot)
library(gpairs)
library(lmtest)
library(caret)
library(MLmetrics)
data = read_excel("Data.xls")
str(data)
sum(is.na(data))
# What is the market share of the brand, in value and in volume?
##### Market shares in Chains
colnames(data)
data%>%dplyr::select(3:10)%>%summarise_all(funs(sum))
pie_data=data%>% group_by(Chain)%>% summarise(total = sum(REXONASales))
pie_data_p=pie_data%>% mutate(total_p= round(total/sum(total),2))
pie_data_p
mycols <- c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF","#CC79A7")
ggplot(pie_data_p, aes(x = "", y = total_p, fill = Chain)) +
geom_bar(width = 1, stat = "identity", color = "white") +
coord_polar("y", start = 0)+
geom_text(aes(y = total_p, label = paste0(round(total_p*100), "%")),position = position_stack(vjust = 0.5), color = "black")+
scale_fill_manual(values = mycols) +
theme_void()
##### Market shares of brands in total market
pie_share= data%>%dplyr::select(3:10)%>%colSums()
pie_share=as.data.frame(t(pie_share))
pie_share=as.data.frame(t(pie_share))
colnames(pie_share)='sales_volume'
pie_share
pie_share$sales_percent= round(pie_share$sales_volume/sum(pie_share$sales_volume),2)
pie_share <- cbind(brands = rownames(pie_share), pie_share)
rownames(pie_share) <- 1:nrow(pie_share)
mycols2 <- c("#999999", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(pie_share, aes(x = "", y = sales_percent, fill = brands)) +
geom_bar(width = 1, stat = "identity", color = "white") +
coord_polar("y", start = 0)+
geom_text(aes(y = sales_percent, label = paste0(round(sales_percent*100), "%")),position = position_stack(vjust = 0.5), color = "black")+
scale_fill_manual(values = mycols2) +
theme_void()
##################Is there an evolution in that market share?
odd_numbers= data%>% filter(REXONASales>200)
####### Evolution of sales in each retailer
data_albert= data%>% filter(Chain=="ALBERT HEIJN")%>%dplyr::select(1:10)
data_albert$WEEK=(seq(1:nrow(data_albert)))
data_c= data%>% filter(Chain=="C-1000")%>%dplyr::select(1:10)
data_c$WEEK=(seq(1:nrow(data_c)))
data_jumbo= data%>% filter(Chain=="JUMBO")%>%dplyr::select(1:10)
data_jumbo$WEEK=(seq(1:nrow(data_jumbo)))
data_edah= data%>% filter(Chain=="EDAH")%>%dplyr::select(1:10)
data_edah$WEEK=(seq(1:nrow(data_edah)))
data_super= data%>% filter(Chain=="SUPER DE BOER")%>%dplyr::select(1:10)
data_super$WEEK=(seq(1:nrow(data_super)))
data_total= rbind(data_albert,data_c,data_jumbo,data_edah,data_super) # this is market sales
data_total%>%filter(Chain=='ALBERT HEIJN')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_total%>%filter(Chain=='C-1000')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_total%>%filter(Chain=='JUMBO')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_total%>%filter(Chain=='EDAH')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_total%>%filter(Chain=='SUPER DE BOER')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
#######
####### Evolution of market share in each retailer
data_total_n= data_total%>%dplyr:: select(c(-1,-2))
data_total_nn=data_total%>%dplyr::select(c(1,2))
data_total_percent= round(data_total_n/rowSums(data_total_n),2)
data_share = cbind(data_total_nn,data_total_percent)
View(data_share) # This is market shares
data_share%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_share%>%filter(Chain=='ALBERT HEIJN')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_share%>%filter(Chain=='C-1000')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_share%>%filter(Chain=='JUMBO')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_share%>%filter(Chain=='EDAH')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
data_share%>%filter(Chain=='SUPER DE BOER')%>%ggplot( aes(x=WEEK, y=REXONASales)) +
geom_line()
###### ODD NUMBERS
odd_numbers= data_share%>% filter(REXONASales>0.4 |REXONASales<0.1 )
View(odd_numbers)
rexo_mean=mean(data$REXONASales)
rexo_std= sd(data$REXONASales)
odd_numbers= data%>% filter(REXONASales>rexo_mean+(3*(rexo_std)) |REXONASales< rexo_mean-(1*(rexo_std)))
View(odd_numbers)
rexo_mean=mean(data$REXONADISP)
rexo_std= sd(data$REXONADISP)
odd_numbers= data%>% filter(REXONADISP>rexo_mean+(3*(rexo_std)) |REXONADISP< rexo_mean-(1*(rexo_std)))
View(odd_numbers)
rexo_mean=mean(data$REXONAFEAT)
rexo_std= sd(data$REXONAFEAT)
odd_numbers= data%>% filter(REXONAFEAT>rexo_mean+(3*(rexo_std)) |REXONAFEAT< rexo_mean-(1*(rexo_std)))
View(odd_numbers)
#How does this market share differ between retailers?
data_share%>% group_by(Chain)%>% dplyr::select(3:10)%>%summarise(mean(REXONASales)) #Volume
data$rexona_value = (data$REXONASales*data$REXONAPrice)
data$nivea_value = (data$NIVEASales*data$NIVEAPrice)
data$dove_value = (data$DOVESales*data$DOVEPrice)
data$fa_value = (data$FASales*data$FAPrice)
data$vogue_value = (data$VOGUESales*data$VOGUEPrice)
data$axe_value = (data$AXESales*data$AXEPrice)
data$a8X4_value = (data$`8X4Sales`*data$`8X4Price`)
data$sanex_value = (data$SANEXSales*data$SANEXPrice)
## Calculations for market value
data_value= round(data[51:58]/rowSums(data[51:58]),2)
data_value_final= cbind(data[1:2],data_value)
data_value_final%>% group_by(Chain)%>%summarise(mean(rexona_value))
b=data_value_final%>%summarise(mean(rexona_value))
c=data_value_final%>%summarise(mean(axe_value))
d= data_value_final%>%summarise(mean(nivea_value))
e= data_value_final%>%summarise(mean(fa_value))
f= data_value_final%>%summarise(mean(a8X4_value))
g= data_value_final%>%summarise(mean(vogue_value))
h= data_value_final%>%summarise(mean(sanex_value))
k= data_value_final%>%summarise(mean(dove_value))
b+c+d+e+f+g+h+k #Sanity check to show sum of value shares is 1
###Evolution in total market
data_value_final$WEEK= gsub("W","",data$WEEK)
data_value_final$WEEK=as.numeric(data_value_final$WEEK)
evolution=data_value_final%>% group_by(WEEK)%>% summarise(median(rexona_value))
colnames(evolution)=c("Week","Rexona_Market_Share")
ggplot(evolution, aes(x=Week)) +
geom_line(aes(y = Rexona_Market_Share), color = "darkred")
data$sum= rowSums(data[3:10])
summarize = data%>%dplyr::select(c(2,51))%>%group_by(WEEK)%>% summarise(Score = sum(sum))
summarize$WEEK=gsub("W","",summarize$WEEK)
summarize$WEEK=as.numeric(summarize$WEEK)
colnames(summarize)=c("Week","Market_Size_Volume")
ggplot(summarize, aes(x=Week)) +
geom_line(aes(y = Market_Size_Volume), color = "darkred")
#How does the brand's price level and promotional support levels compare to the
#levels observed with competing brands? Are the insights retailer specific?
##Price Display Feature Feature&Display
a= aggregate (data[,43:50],list(data$Chain),mean)
colMeans(a[,2:8])
#Can you visually detect evidence of a price war among the brands and/or
#supermarkets? If yes, explain how you got to that conclusion.
colnames(data)
data_albert= data%>% filter(Chain=="ALBERT HEIJN")
data_c= data%>% filter(Chain=="C-1000")
data_jumbo= data%>% filter(Chain=="JUMBO")
data_edah= data%>% filter(Chain=="EDAH")
data_super= data%>% filter(Chain=="SUPER DE BOER")
data_albert$WEEK=(seq(1:nrow(data_albert)))
data_c$WEEK=(seq(1:nrow(data_c)))
data_jumbo$WEEK=(seq(1:nrow(data_jumbo)))
data_edah$WEEK=(seq(1:nrow(data_edah)))
data_super$WEEK=(seq(1:nrow(data_super)))
data_cor= rbind(data_albert,data_c, data_jumbo,data_edah,data_super)
Rexona = diff(data_cor$REXONAPrice,1,1)
Dove = diff(data_cor$DOVEPrice,1,1)
Fa= diff(data_cor$FAPrice,1,1)
Nivea = diff(data_cor$NIVEAPrice,1,1)
Sanex = diff(data_cor$SANEXPrice,1,1)
Vogue = diff(data_cor$VOGUEPrice,1,1)
`8X4Price` = diff(data_cor$`8X4Price`,1,1)
df = data.frame(Rexona, Dove,Fa, Nivea, Sanex, Vogue,`8X4Price` )
cormat<- rcorr(as.matrix(df))
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
flattenCorrMatrix(cormat$r, cormat$P)
res= cor(df)
corrplot(res, type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45)
# Is there any evidence of seasonality in the market shares? Describe what evidence
# you use to make the seasonality assessment. Is there evidence of an up-or
# downward trend in the share of your selected brand?
### Seasonality
#Based on time serries chart there is no seasonality.
library(data.table)
rexona_data = data[,colnames(data) %like% "REXONA" ]
############## MODEL1: LINEAR ###################
data$WEEK=(seq(1:nrow(data)))
colnames(data)[43]='FADF'
colnames(data)[44]='NIVEADF'
colnames(data)[45]='REXONADF'
colnames(data)[46]='SANEXDF'
colnames(data)[47]='VOGUEDF'
colnames(data)[48]='a8X4DF'
colnames(data)[49]='DOVEDF'
colnames(data)[50]='AXEDF'
data_date= data%>% filter(Chain=='ALBERT HEIJN')%>% dplyr::select(c(1,2))
test = round(data[3:10]/rowSums(data[3:10]),2)
test= test[1:124,]
data_c=data%>% filter(Chain=='ALBERT HEIJN')%>% dplyr::select(11:50)
data1 = cbind(data_date,test,data_c)
View(data1)
#######PRICE WAR
colors <- c("Rexona" = "darkred", "Dove" = "steelblue")
ggplot(data1, aes(x=WEEK)) +
geom_line(aes(y = REXONAPrice), color = "darkred") +
geom_line(aes(y = DOVEPrice), color="steelblue", linetype="twodash")+
labs(x = "Weeks", y = "Price", color = "Legend") ## Legend
# geom_line(aes(y = NIVEAPrice), color = "#0073C2FF")+
# geom_line(aes(y = FAPrice), color = "#EFC000FF")+
# geom_line(aes(y = AXEPrice), color = "#868686FF")+
# geom_line(aes(y = `8X4Price`), color = "#CD534CFF")+
# geom_line(aes(y = VOGUEPrice), color = "#CC79A7")
plot(data1$WEEK, data1$REXONAPrice, type = "b", frame = FALSE, pch = 19,
col = "red", xlab = "Week", ylab = "Price")
# Add a second line
lines(data1$WEEK, data1$DOVEPrice, pch = 18, col = "blue", type = "b", lty = 2)
# Add a legend to the plot
op <- par(cex = 0.6)
legend("topright", legend=c("Rexona Price", "Dove Price"),
col=c("red", "blue"), lty = 1:2, pch=1, bty = 'n')
cormat<- rcorr(as.matrix(data1[,11:20])) #This shows the main price wars is for DOVE, VOGUE,SANEX
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
flattenCorrMatrix(cormat$r, cormat$P)
market_share_albert = data_share%>% filter(Chain=="ALBERT HEIJN")
cormat<- rcorr(as.matrix(market_share_albert[,3:10])) #This shows the main competitors is AXE
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
flattenCorrMatrix(cormat$r, cormat$P)
corrplot(cor(market_share_albert[,3:10]),type= 'upper', method = 'number')
############################## MODELS##########################################
data1_log= log(data1[,c(-1,-2)])
data1_log= cbind(data1_log[1:8],data1[11:50])
data1_scaled= scale(data1[,-1])
data1_scaled_log= scale(data1_log)
data1_scaled = as.data.frame(data1_scaled)
data1_scaled_log = as.data.frame(data1_scaled_log)
gpairs(data1[3:10])
gpairs(data1_log[2:9])
gpairs(data1_scaled_log[1:8])
# Split the data into training and test set
train.data <- data1[1:100, ]
test.data <- data1[101:124, ]
chart.Correlation(train.data[,c(6,14,29,45,37,34,19)], histogram=TRUE, pch=19) # Transforing is necessary
m1 = lm(REXONASales~REXONADISP+REXONAFEAT+REXONADF+REXONAPrice+DOVEPrice+AXEDISP,train.data) #R2=0.77
summary(m1)
RSS <- c(crossprod(m1$residuals))
MSE <- RSS / length(m1$residuals)
RMSE1 <- sqrt(MSE)
sig2 <- RSS / m1$df.residual
predictions1 <- m1 %>% predict(train.data)
data.frame(
RMSE = RMSE(predictions1, train.data$REXONASales),
R2 = R2(predictions1, train.data$REXONASales)
)
MAPE(predictions1,train.data$REXONASales)
coefplot(m1, intercept= F,outerCI=1.96, lwdOuter = 1.5,
ylab= "Variables",xlab= 'Association with Rexona market share')
plot(m1)
colMeans( data[,colnames(data) %like% "AXE"])
#Multicolinearity test
car::vif(m1)
cormat<- rcorr(as.matrix(train.data[,c("REXONADISP","REXONAFEAT",
'REXONADF','REXONAPrice',
'DOVEPrice','AXEDISP')]))
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
flattenCorrMatrix(cormat$r, cormat$P)
#Heterosedasticity test
gqtest(REXONASales~REXONADISP+REXONAFEAT+REXONADF+REXONAPrice+DOVEPrice+AXEDISP,data=train.data)
bptest(m1)
train.data$resi <- m1$residuals
ggplot(data = train.data, aes(y = resi, x = REXONASales)) + geom_point(col = 'blue') + geom_abline(slope = 0)
# Out of sample performance
predictions <- m1 %>% predict(test.data)
data.frame(
RMSE = RMSE(predictions, test.data$REXONASales),
R2 = R2(predictions, test.data$REXONASales)
)
MAPE(predictions,test.data$REXONASales)
#########Multiplicative model####################
m2 = lm(log(REXONASales)~log(REXONADISP+1)+log(REXONAFEAT+1)+log(REXONADF+1)+log(AXEDISP+1)+log(DOVEPrice)+log(REXONAPrice),train.data) #R2=0.72
summary(m2)
lnsales = -1.3196 + 1.1117 * log(REXONADISP + 1) -1.4443 * log(REXONAFEAT + 1) +
2.5353 * log(REXONADF + 1) -0.6392 * log(AXEDISP + 1) + 0.9476 * log(DOVEPrice) -1.4025 * log(REXONAPrice)
coefplot(m2, intercept= F,outerCI=1.96, lwdOuter = 1.5,
ylab= "Variables",xlab= 'Association with Rexona market share')
#Heterosedasticity test
gqtest(log(REXONASales)~log(REXONADISP+1)+log(REXONAFEAT+1)+log(REXONADF+1)+log(AXEDISP+1)+log(DOVEPrice)+log(REXONAPrice),data=train.data)
train.data <- train.data %>%
mutate(lnsales = -1.3196 + 1.1117 * log(REXONADISP + 1) -1.4443 * log(REXONAFEAT + 1) +
2.5353 * log(REXONADF + 1) -0.6392 * log(AXEDISP + 1) + 0.9476 * log(DOVEPrice) -1.4025 * log(REXONAPrice))
train.data <- train.data %>%
mutate(predicted_sales = exp(1) ^ lnsales)
train.data <- train.data %>%
mutate(se = (REXONASales - predicted_sales)^2)
train.data <- train.data %>%
mutate(ape = abs((REXONASales - predicted_sales)/REXONASales))
mape_train = sum(train.data$ape)/length(train.data$ape)
mape_train
rmse_train <- sqrt(sum(train.data$se)/length(train.data$se))
rmse_train
cor(train.data$REXONASales,train.data$predicted_sales)^2
test.data <- test.data %>%
mutate(lnsales = -1.3196 + 1.1117 * log(REXONADISP + 1) -1.4443 * log(REXONAFEAT + 1) +
2.5353 * log(REXONADF + 1) -0.6392 * log(AXEDISP + 1) + 0.9476 * log(DOVEPrice) -1.4025 * log(REXONAPrice))
test.data <- test.data %>%
mutate(predicted_sales = exp(1) ^ lnsales)
test.data <- test.data %>%
mutate(se = (REXONASales - predicted_sales)^2)
test.data <- test.data %>%
mutate(ape = abs((REXONASales - predicted_sales)/REXONASales))
mape_test = sum(test.data$ape)/length(test.data$ape)
mape_test
rmse_test <- sqrt(sum(test.data$se)/length(test.data$se))
rmse_test
cor(test.data$REXONASales,test.data$predicted_sales)^2
#Multicolinearity test
car::vif(m2)
cormat<- rcorr(as.matrix(train.data[,c("REXONADISP","REXONAFEAT",
'REXONADF','REXONAPrice',
'DOVEPrice','AXEDISP')]))
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
flattenCorrMatrix(cormat$r, cormat$P)
#Heterosedasticity test
gqtest(lm(log(REXONASales)~log(REXONADISP+1)+log(REXONAFEAT+1)+log(REXONADF+1)+log(AXEDISP+1)+log(DOVEPrice)+log(REXONAPrice),train.data))
bptest(m2)
train.data$resi <- m2$residuals
ggplot(data = train.data, aes(y = resi, x = REXONASales)) + geom_point(col = 'blue') + geom_abline(slope = 0)
# Out of sample performance
predictions <- m2 %>% predict(test.data)
data.frame(
RMSE = RMSE(predictions, test.data$REXONASales),
R2 = R2(predictions, test.data$REXONASales)
)
MAPE(predictions,test.data$REXONASales)
############ Range Constrainst
m3 = lm(log(REXONASales)~ REXONADISP+REXONAFEAT+REXONADF+REXONAPrice+DOVEPrice+AXEDISP,train.data) #R2=0.77
summary(m3)
coefplot(m3, intercept= F,outerCI=1.96, lwdOuter = 1.5,
ylab= "Variables",xlab= 'Association with Rexona market share')
#Heterosedasticity test
gqtest(log(REXONASales)~REXONADISP+REXONAFEAT+REXONADF+REXONAPrice+DOVEPrice+AXEDISP,data=train.data)
train.data <- train.data %>%
mutate(lnsales_2 = -1.12278 + 0.75306 * REXONADISP -0.72086 * REXONAFEAT +
1.83470 * REXONADF -0.76310 * REXONAPrice + 0.51854* DOVEPrice -0.44669 * AXEDISP)
train.data <- train.data %>%
mutate(predicted_sales_2 = exp(1)^lnsales_2)
train.data <- train.data %>%
mutate(se_2 = (REXONASales - predicted_sales_2)^2)
train.data <- train.data %>%
mutate(ape_2 = abs((REXONASales - predicted_sales_2)/REXONASales))
mape_train_2 = sum(train.data$ape_2)/length(train.data$ape_2)
mape_train_2
rmse_train_2 <- sqrt(sum(train.data$se_2)/length(train.data$se_2))
rmse_train_2
cor(train.data$REXONASales,train.data$predicted_sales_2)^2
test.data <- test.data %>%
mutate(lnsales_2 = -1.12278 + 0.75306 * REXONADISP -0.72086 * REXONAFEAT +
1.83470 * REXONADF -0.76310 * REXONAPrice + 0.51854* DOVEPrice -0.44669 * AXEDISP)
test.data <- test.data %>%
mutate(predicted_sales_2 = exp(1) ^ lnsales_2)
test.data <- test.data %>%
mutate(se_2 = (REXONASales - predicted_sales_2)^2)
test.data <- test.data %>%
mutate(ape_2 = abs((REXONASales - predicted_sales_2)/REXONASales))
mape_test_2 = sum(test.data$ape_2)/length(test.data$ape_2)
mape_test_2
rmse_test_2 <- sqrt(sum(test.data$se_2)/length(test.data$se_2))
rmse_test_2
cor(test.data$REXONASales,test.data$predicted_sales_2)^2
|
8e99ae77bf14cee39642cd33e4a7bba598597609
|
8672c4a4e0b99cd479e93ead76b34b290ed6cbdd
|
/Markdown-Notebooks/predimred.R
|
870e21d1d70d278ff1133117151b54384dd6f56a
|
[] |
no_license
|
knudson1/dimred
|
7f308f9a1bdb58687d3904dba959832b1f527493
|
93d3954450e391862a1ecd99518284e17fc882b1
|
refs/heads/master
| 2022-11-26T02:43:18.363686
| 2020-08-06T18:28:19
| 2020-08-06T18:28:19
| 281,415,534
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30
|
r
|
predimred.R
|
library(glmm)
data(salamander)
|
a48fd1fbd906b4d9ea38b7f1b57ef58cddf4184a
|
9fd5e51e5dd1b4541c5c1a6052530d801073a026
|
/man/print.deseasonalize.Rd
|
fb371ff4031dcf7c5861538aeb5d397faa1b0efe
|
[] |
no_license
|
cran/deseasonalize
|
8d056778bd4909b61a1687ee7219e6fd537984e8
|
fde22060bb555ec3a8ec8cfa3b5ffd0c85bd5022
|
refs/heads/master
| 2016-09-06T04:25:01.167970
| 2013-04-10T00:00:00
| 2013-04-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
rd
|
print.deseasonalize.Rd
|
\name{print.deseasonalize}
\alias{print.deseasonalize}
\title{ Print Method for "deseasonalize" Object }
\description{
A terse summary is given.
}
\usage{
\method{print}{deseasonalize}(x, ...)
}
\arguments{
\item{x}{ object of class "deseasonalize" }
\item{...}{ optional arguments }
}
\value{
A terse summary is displayed
}
\author{ A.I. McLeod}
\seealso{
\code{\link{summary.deseasonalize}}
}
\examples{
ds(nottem, Fm=6, Fs=6, type="monthly", searchQ=FALSE)
}
\keyword{ ts }
|
92bd876ce66893ed5e42390569286d9a1e036924
|
56b6b269936453d4730e2f14759d42b1082849af
|
/R/column_or_string.R
|
a30f004ca9ce632bc1a07cf941fcece912ffd535
|
[
"MIT"
] |
permissive
|
AJFOWLER/adjham
|
c9b4e3aa902756ea8d3705e9bbd2f4244035404a
|
12133bdf21db283a8fdfda465212cb6d0a7e3e41
|
refs/heads/master
| 2023-03-09T00:17:06.984643
| 2021-02-24T11:24:09
| 2021-02-24T11:24:09
| 340,010,746
| 0
| 0
| null | 2021-02-24T11:24:10
| 2021-02-18T10:16:18
|
R
|
UTF-8
|
R
| false
| false
| 175
|
r
|
column_or_string.R
|
#' Column or strings
#' @description This function will detemrine columns or pasted string; not necessary currently
#'
#'
#'
column_or_string <- function(){
return(TRUE)
}
|
9ea68ff286ee53ebda459d07fa7058ab938ff4d0
|
011ef657c4c1fd79cb4ac68d86a9d04e70056e6e
|
/R/import_json.R
|
384d90a7325e48dcd130f88ee12beaa6f1fc4e23
|
[] |
no_license
|
cdcepi/predx
|
8cf3d52c6a2c75a277f5de08b8f8793810c74ee3
|
82917511064bdaebaf9982eec98be2169b8d1288
|
refs/heads/master
| 2021-06-24T16:26:32.315548
| 2019-12-27T16:35:16
| 2019-12-27T16:35:16
| 174,559,039
| 6
| 3
| null | 2019-10-28T16:40:07
| 2019-03-08T15:13:15
|
R
|
UTF-8
|
R
| false
| false
| 1,288
|
r
|
import_json.R
|
#' Import a \code{predx}-formatted JSON file as a \code{predx} data frame
#'
#' @param json_file
#'
#' @return A \code{predx} data frame.
#' @export
#'
#' @examples
#' predx_demo <- as.predx_df(list(
#' location = c('Mercury', 'Venus', 'Earth'),
#' target = 'habitability',
#' predx = list(Binary(1e-4), Binary(1e-4), Binary(1))
#' ))
#' json_tempfile <- tempfile()
#' export_json(predx_demo, json_tempfile)
#' import_json(json_tempfile)
import_json <- function(file) {
x <- jsonlite::fromJSON(file, flatten = T)
if (!('predx_class' %in% names(x))) {
stop('predx_class missing')
}
# identify and convert predx columns from import
these_predx_cols <- names(x)[stringr::str_detect(names(x), 'predx\\.')]
these_predx_cols <- stringr::str_remove(these_predx_cols, 'predx\\.')
names(x) <- stringr::str_remove(names(x), 'predx\\.')
# convert to predx_df
x <- dplyr::as_tibble(x)
x <- tidyr::nest(x, predx = these_predx_cols)
x$predx <- lapply(x$predx,
function(x) {
lapply(x, { function(x) x[[1]] } )
})
x$predx <- to_predx(x$predx, x$predx_class)
if (any(check_conversion_errors(x$predx))) {
print("Conversion errors found. Check predx column for errors.")
return(x)
} else {
validate_predx_df(x)
return(x)
}
}
|
41dd51f9d5fd417ba21f5d750c3fe005bf7c6276
|
138c2843cd7f8aff3bd326f669961260a2c77b8f
|
/R/clevr_dataset.R
|
64106acc38817c74e79458d2ce3b0a67102e0588
|
[] |
no_license
|
leslie-arch/R-vqa
|
30a27c39c0b562ddbcaf31cca85446bffcc6469b
|
6bec4b883c40583326cbff64afc02932524834b3
|
refs/heads/main
| 2023-08-08T04:48:08.471098
| 2021-09-23T03:10:07
| 2021-09-23T03:10:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,875
|
r
|
clevr_dataset.R
|
source("mask_utils.R")
new_clevr_dataset <- function(name, cfg)
{
message('Creating dataset: ', name)
value <- list()
image_dir <- paste(cfg$DATASETS$DIR, cfg$DATASETS[[toupper(name)]][['IMAGE']], sep = '/')
anns_file <- paste(cfg$DATASETS$DIR, cfg$DATASETS[[toupper(name)]][['ANNS']], sep = '/')
class(value) <- "ClevrDataset"
value[['name']] <- name
value[['image_directory']] <- image_dir
value[['anns_file']] <- anns_file
scenes <- fromJSON(file = anns_file)
value[['scenes']] <- as.list(scenes$scenes)
if (cfg$CLEVR$COMP_CAT)
{
colors = c('blue', 'brown', 'cyan', 'gray', 'green', 'purple', 'red', 'yellow')
materials = c('rubber', 'metal')
shapes = c('cube', 'cylinder', 'sphere')
category_ids = list()
categories = list()
cat_id = 1
for (c in colors)
{
for (m in materials)
{
for (s in shapes)
{
categories <- append(categories, paste(c, m, s, sep = ' '))
category_ids <- append(category_ids, cat_id)
cat_id <- cat_id + 1
}
}
}
}
else
{
category_ids = list(1, 2, 3)
categories = list('cube', 'cylinder', 'sphere')
}
names(category_ids) <- categories
#(category_ids)
value[['category_to_id_map']] <- category_ids
value[['classes']] <- c('__background__', categories)
value[['num_classes']] <- length(value$classes)
#cache dir
cache_path <- paste(cfg$DATA_DIR, 'cache', sep = '/')
#message('--------- cache path: ', cache_path)
if (! file.exists(cache_path))
{
dir.create(cache_path)
}
value[['cache_path']] <- cache_path
return(value)
}
valid_cached_keys <- function(obj) UseMethod('valid_cached_keys')
valid_cached_keys.ClevrDataset <- function(obj)
{
keys <- c('boxes', 'segms', 'gt_classes', 'seg_areas', 'gt_overlaps',
'is_crowd', 'box_to_gt_ind_map')
return(keys)
}
# get_roidb 被定义为泛型函数
# A <- function(obj) UseMethod(B)
# 当调用A函数时,会寻找并调用名为 B.<class> 的方法
# 所以,通常定义时A和B相同
get_roidb <- function(obj, ...) UseMethod('get_roidb', obj)
get_roidb.ClevrDataset <- function(obj,
gt_cache = FALSE,
proposal_file = NULL,
min_proposal_size = 2,
proposal_limit = -1,
crowd_filter_thresh = 0)
{
nimgs <- length(obj$scenes)
entries <- list()
#message('------------ images: ', nimgs)
for(i in 1:nimgs)
{
entries[[i]] <- list(file_name = obj$scenes[[i]]$image_filename,
height = 320,
width = 480,
id = obj$scenes[[i]]$image_index)
}
for(i in 1:length(entries))
{
entries[[i]] <- proc_roidb_entry(obj, entries[[i]])
}
if (gt_cache)
{
gt_cache_path <- paste(obj$cache_path, paste0(obj$name, '_gt_roidb.pkl'), sep = '/')
message('ground truth: ', gt_cache_path)
if(file.exists(gt_cache_path))
{
entries <- add_gt_from_cache(obj, entries, gt_cache_path)
}
else
{
for(i in 1:length(entries))
{
entries[[i]] <- add_gt_annotations(obj, entries[[i]])
}
}
}
if(!is.null(proposal_file))
{
#add_proposals_from_file(obj,
# proposal_file,
# min_proposal_size,
# proposal_limit,
# crowd_filter_thresh)
message('--------: ', proposal_file)
}
#roidb <- add_class_assignments(obj)
return(obj)
}
proc_roidb_entry <- function(obj, entry) UseMethod('proc_roidb_entry', obj)
proc_roidb_entry.ClevrDataset <- function(obj, entry)
{
# """Adds empty metadata fields to an roidb entry."""
# Reference back to the parent dataset
#entry['dataset'] = self
# Make file_name an abs path
im_path = paste(obj$image_directory, entry$file_name, sep = '/')
if(!file.exists(im_path)) stop(sprintf('Image not found: %s', im_path))
entry[['image']] <- im_path
entry[['flipped']] <- FALSE
entry[['has_visible_keypoints']] = FALSE
# Empty placeholders
entry[['boxes']] <- matrix(nrow = 0, ncol = 4)
entry[['segms']] = list()
entry[['gt_classes']] = vector(mode = 'numeric')
entry[['seg_areas']] = vector(mode = 'numeric')
#entry[['gt_overlaps']] = scipy.sparse.csr_matrix(
# np.empty((0, self.num_classes), dtype=np.float32)
#)
entry[['gt_overlaps']] <- matrix(nrow = 0, ncol = obj$num_classes)
entry[['is_crowd']] = vector(mode = 'logical')
# 'box_to_gt_ind_map': Shape is (#rois). Maps from each roi to the index
# in the list of rois that satisfy np.where(entry['gt_classes'] > 0)
entry[['box_to_gt_ind_map']] = vector(mode = 'logical')
# Remove unwanted fields that come from the json file (if they exist)
entry[['file_name']] <- NULL
return(entry)
}
add_gt_from_cache <- function(obj, entries, gt_cache_path) UseMethod('add_gt_from_cache', obj, ...)
add_gt_fram_cache.ClevrDataset <- function(obj, entries, gt_cache_path)
{
return(NULL)
}
add_gt_annotations <- function(obj, entry) UseMethod('add_gt_annotations', obj, ...)
add_gt_annotations.ClevrDataset <- function(obj, entry)
{
print('---------------------- add_gt_annotations:')
en_id <- entry[['id']] + 1
en_objs <- obj$scenes[[en_id]][['objects']]
valid_objs <- list()
valid_segms <- list()
width <- entry[['width']]
height <- entry[['height']]
for(en_obj in en_objs)
{
rle <- preprocess_rle(obj, en_obj[['mask']])
mask <- decode_rle(rle)
bbox <- rle_mask_to_boxes(as.list(rle))
}
return(NULL)
}
preprocess_rle <- function(obj, mask) UseMethod('preprocess_rle', obj, ...)
preprocess_rle.ClevrDataset <- function(obj, mask)
{
print('------------- preprocess_rle:')
mask_utils_decode(mask)
return(mask)
}
|
e749985afc40aa16dfeb0ccdf0dd861f7d07ecb5
|
13e11079ba6fcd554a2f6015cd4e40204d1cb292
|
/man/get_all_datasets.Rd
|
af17085a3cccd063101873edcf48165869cb9ad3
|
[
"MIT"
] |
permissive
|
hut34/hutr-package
|
cdae26631d0f4e640b7c22f1eff703692a47b8ad
|
3519c7f5acc5fe2bdb12a96f42539542ecd11b32
|
refs/heads/master
| 2020-12-28T18:33:10.945324
| 2020-04-24T03:25:38
| 2020-04-24T03:25:38
| 238,440,955
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
get_all_datasets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_all_datasets.R
\name{get_all_datasets}
\alias{get_all_datasets}
\title{get_all_datasets}
\usage{
get_all_datasets()
}
\value{
The metadata of all uploaded or purchased data is returned as a dataframe.
}
\description{
\code{get_all_datasets} provides information about all datasets immediately available to the user.
}
\details{
This function calls the hut and returns a dataframe of all data that the current user
has either purchased or has uploaded themselves.
}
\examples{
get_all_datasets()
}
|
8800bec897b1f19890419893dbbff036726feefb
|
2873487ba14d0f0658baa0aca0c804cbd53bd055
|
/tests/testthat/helper-file.R
|
d42786caf0e8a5e3ebc496838e9f36a259de5458
|
[] |
no_license
|
colearendt/xlsx
|
ca930d09339f795d41427333369f95ef6e73538e
|
4ed1d0127fdec1581808656b9c636b0fbd0ac4fd
|
refs/heads/main
| 2022-07-11T14:21:46.066697
| 2022-01-30T09:30:59
| 2022-01-30T09:30:59
| 39,347,430
| 53
| 26
| null | 2022-06-29T18:42:07
| 2015-07-19T20:13:53
|
R
|
UTF-8
|
R
| false
| false
| 447
|
r
|
helper-file.R
|
test_ref <- function(file) {
rprojroot::is_testthat$find_file(paste0('ref/',file))
}
test_tmp <- function(file) {
tmp_folder <- rprojroot::is_testthat$find_file('tmp')
if (!file.exists(tmp_folder))
dir.create(tmp_folder)
rprojroot::is_testthat$find_file(paste0('tmp/',file))
}
remove_tmp <- function() {
tmp_folder <- rprojroot::is_testthat$find_file('tmp')
if (file.exists(tmp_folder))
unlink(tmp_folder, recursive=TRUE)
}
|
6f3cd37a0bb2b62eac7667e2e41bf220e15f2b38
|
62095a14e8bb00d56d0ab904d67e092f40792cfd
|
/r-base-dev-ext/requirements.R
|
cd8b856f1f93bce58abc4a1507d98b5705dc8dca
|
[] |
no_license
|
mikebirdgeneau/dockerfiles
|
3b5ff40b7f947e5734bdf1755340b48d58d83ee4
|
4dc0c8167df6508463f00a4920a5502c42648746
|
refs/heads/master
| 2021-01-20T20:37:04.883579
| 2017-04-26T04:49:35
| 2017-04-26T04:49:35
| 65,513,473
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
requirements.R
|
pkg_list <- c(
"roxygen2",
"data.table",
"ggplot2",
"gridExtra",
"lubridate",
"stringr",
"foreach",
"sp",
"gstat",
"rgdal",
"GA",
"R6",
"lintr",
"testthat",
"caret",
"forecast",
"shiny",
"bookdown",
"FinCal",
"documair",
"rmarkdown",
"mvtnorm",
"miniUI",
"pander")
|
a9c366cb4cf6b88df0ff5486d17fd678405cf50a
|
c18729fa6d0bf8b882588c4566e1125f0e3a0c4b
|
/R/simulate_ar1.R
|
6eeaa284ad927b6f505015a685a32f5df6e02204
|
[] |
no_license
|
yiqunchen/SpikeInference
|
6cf1b05777fb27bd673b73c80cb4e4923dc7c74d
|
f92ebb3686e1c831bec27a08df3083fb7dd0ea55
|
refs/heads/main
| 2023-07-08T11:17:03.983740
| 2021-07-29T01:38:25
| 2021-07-29T01:38:25
| 332,932,190
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,628
|
r
|
simulate_ar1.R
|
#' Simulate fluorescence trace based on a simple AR-1 generative model
#'
#' @details
#' Simulate fluorescence trace based on a simple AR-1 generative model:
#' \deqn{y_t = c_t + \epsilon_t, \epsilon_t \sim N(0, \sigma^2),}
#' \deqn{c_t = gam * c_{t-1} + s_t,}
#' \deqn{s_t \sim Poisson(poisMean).}
#'
#' @param n Numeric; length of the time series
#' @param gam Numeric; AR-1 decay rate
#' @param poisMean Numeric; mean for Poisson distributed spikes
#' @param sd Numeric; standard deviation
#' @param seed Numeric; random seed
#' @param c0 Numeric; initial calcium concentration, default to 0
#' @return
#' \itemize{
#' \item \code{spikes} -- A list of timesteps at which a spike occurs
#' \item \code{fl} -- The noisy fluorescence \eqn{y_t}
#' \item \code{conc} -- The true calcium concentration \eqn{c_t}
#' }
#' @examples
#' sim <- simulate_ar1(n = 500, gam = 0.998, poisMean = 0.009, sd = 0.05, seed = 1)
#' plot(sim)
#' @import stats
#' @export
simulate_ar1 <- function(n, gam, poisMean, sd, seed, c0 = 0)
{
set.seed(seed)
stopifnot(poisMean>=0)
stopifnot(sd>=0)
stopifnot(c0>=0)
eta <- numeric(n)
c <- numeric(n)
f <- numeric(n)
for (i in 1:n)
{
eta[i] <- rpois(1, poisMean)
if (i > 1){
c[i] <- gam * c[i - 1] + eta[i]
}
else{
c[i] <- c0+eta[i]
}
f[i] <- c[i] + rnorm(n = 1, mean = 0, sd = sd)
}
spikesOut <- unique((eta > 0) * c(1:n))
out <- list(spikes = spikesOut[-1]-1, fl = f, conc = c, call = match.call(),
gam = gam, poisMean = poisMean, type = "ar1",
sd = sd, seed = seed)
class(out) <- "simdata"
return(out)
}
|
2202ca20e9d365eda774fc8a6a743e1d41abea76
|
0ea4a50d0feb866b1ed722ce7a29c867f1f44184
|
/rprog-009/Projs/3/best.R
|
6c342c5603746531aad79c69b5be78d058953a8e
|
[] |
no_license
|
agarxiv/datasciencecoursera
|
13d594ff5ff05451dff3d6259fd6781112a9e002
|
21a76db991512f8753729ee79d0281d2e38c71a8
|
refs/heads/master
| 2021-01-19T14:56:30.513623
| 2015-02-03T22:09:34
| 2015-02-03T22:09:34
| 26,572,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
best.R
|
best <- function(state, outcome) {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with lowest 30-day death
## rate
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
if(state %in% data$State){
data.state <- data[data$State==state,]
} else {
stop('invalid state')
}
options(warn=-1)
if(outcome=='heart attack'){
res=as.character(data.state[with(data.state, order(as.numeric(data.state[,11]),data.state[,2],na.last=NA)),][1,][2])
} else if(outcome=='heart failure'){
res=as.character(data.state[with(data.state, order(as.numeric(data.state[,17]),data.state[,2],na.last=NA)),][1,][2])
} else if(outcome=='pneumonia'){
res=as.character(data.state[with(data.state, order(as.numeric(data.state[,23]),data.state[,2],na.last=NA)),][1,][2])
} else {
stop('invalid outcome')
}
res
}
|
753d527bbe73e73cbed0df8966b586c27cda9484
|
1da61f5738750db2e5bbec4ac0562d5a09b6e7c7
|
/R/admin-panel.R
|
d6c8d54add9dd1e2386aca5a2e60e41d3e560fe0
|
[
"MIT"
] |
permissive
|
fmhoeger/psychTestR
|
03607ed434d8cec5164224b691ae58d2e2c1999b
|
01d06ea2b3b37d5283c02bb81450118374385127
|
refs/heads/master
| 2020-12-14T03:34:36.205333
| 2020-10-04T21:50:54
| 2020-10-04T21:50:54
| 274,735,900
| 2
| 0
|
NOASSERTION
| 2020-10-04T21:50:55
| 2020-06-24T18:01:22
|
R
|
UTF-8
|
R
| false
| false
| 19,221
|
r
|
admin-panel.R
|
admin_panel.statistics.ui <- shinyBS::bsModal(
"admin_panel.statistics.ui",
title = "Statistics",
trigger = "admin_panel.statistics.open",
shiny::uiOutput("admin_panel.statistics.num_participants"),
shiny::uiOutput("admin_panel.statistics.average_time"),
shiny::uiOutput("admin_panel.statistics.latest_results"),
shiny::actionButton("admin_panel.statistics.refresh", "Refresh")
)
admin_panel.ui.logged_out <- shiny::tags$div(
id = "admin_panel.ui.logged_out",
shinyBS::tipify(
el = shiny::tags$p(shiny::actionLink("admin_login_trigger", "Admin login")),
title = "Click here to enter your administrator credentials."
)
)
admin_panel.ui.results <- shiny::fluidRow(shiny::fluidRow(
shiny::column(
6,
shinyBS::tipify(
el = shiny::p(shiny::downloadButton("admin_panel.download_current_results.rds",
"Current (RDS)")),
title = paste0("Downloads current participant's results as an RDS file. ",
"Downloaded results can be read into R using the ",
"function readRDS()."),
placement = "top"
),
shinyBS::tipify(
el = shiny::p(shiny::downloadButton("admin_panel.download_all_results.rds",
"All (RDS)")),
title = paste0("Downloads all participants' results as a zip file of RDS files. ",
"Individual participant's files can then be read into R ",
"using the function <em>readRDS()</em>."),
placement = "top")
),
shiny::column(
6,
shinyBS::tipify(
el = shiny::p(shiny::downloadButton("admin_panel.download_current_results.csv",
"Current (CSV)")),
title = paste0("Downloads current participant's results as a CSV file. ",
"CSV files will typically contain less detailed results ",
"than equivalent RDS files."),
placement = "top"
),
shinyBS::tipify(
el = shiny::p(shiny::downloadButton("admin_panel.download_all_results.csv",
"All (CSV)")),
title = paste0("Downloads all participants' results as a single CSV file. ",
"CSV files will typically contain less detailed results ",
"than equivalent RDS files."),
placement = "top")
)),
shinyBS::tipify(
shiny::p(shiny::actionButton("admin_panel.delete_results",
"Delete all",
onclick = "confirm_delete_results();")),
title = "Backs up and then deletes all results.",
placement = "top"
))
admin_panel.ui.errors <- shiny::fluidRow(
shinyBS::tipify(
el = shiny::p(shiny::downloadButton("admin_panel.download_errors",
"Download")),
title = paste0("Downloads error logs as a zip file. ",
"Explore individual error logs in R by loading the file ",
"with <em>load()</em> and then calling <em>debugger()</em> ",
"on the resulting object."),
placement = "top"),
shinyBS::tipify(
shiny::p(shiny::actionButton("admin_panel.delete_errors",
"Delete",
onclick = "confirm_delete_errors();")),
title = "Deletes all error logs",
placement = "top"
))
admin_panel.ui.pilot <- shinyBS::tipify(
# periods in the uiOutput label seem to break shinyBS
el = shiny::tags$div(shiny::uiOutput("admin_panel_pilot_ui")),
title = paste0("Pilot mode affects only the current participant. ",
"In pilot mode, testing proceeds as normal, ",
"but the saved results are marked as pilot results."),
placement = "top"
)
admin_panel.ui.availability <- shiny::uiOutput("admin_panel_open_close_buttons")
admin_panel.ui.misc <- shiny::fluidRow(
shinyBS::tipify(
el = shiny::p(shiny::actionButton("admin_panel.statistics.open", "Statistics")),
title = "Displays participation statistics."
),
shinyBS::tipify(
shiny::p(shiny::actionButton("admin_panel.clear_sessions",
"Clear sessions",
onclick = "confirm_clear_sessions();")),
placement = "top",
title = paste0("Clears session files. ",
"Current testing sessions will not be interrupted. ",
"However, participants will be not be able to use URLs ",
"to resume testing sessions last active ",
"before session clearing.")),
shinyBS::tipify(
el = shiny::p(shiny::actionButton("admin_logout", "Exit admin mode",
style = "color: white; background-color: #c62121")),
title = "Signs out of administration mode.",
placement = "top"
))
admin_panel.ui.logged_in <- shiny::fluidRow(
shiny::fluidRow(
shiny::h3("Admin"),
align = "center",
shiny::uiOutput("page_admin_ui"),
admin_panel.statistics.ui,
shiny::column(4, shiny::h4("Results"), admin_panel.ui.results),
shiny::column(2, shiny::h4("Error logs"), admin_panel.ui.errors),
shiny::column(2, shiny::h4("Piloting"), admin_panel.ui.pilot),
shiny::column(2, shiny::h4("Availability"), admin_panel.ui.availability),
shiny::column(2, shiny::h4("Misc."), admin_panel.ui.misc)
),
shiny::fluidRow(shiny::uiOutput("custom_admin_panel"))
)
admin_panel.render_ui <- function(state, output, elts, opt) {
output$admin_panel.ui <- shiny::renderUI({
if (admin(state)) admin_panel.ui.logged_in else admin_panel.ui.logged_out
})
output$page_admin_ui <- shiny::renderUI({
if (admin(state)) get_current_elt(state, elts, opt, eval = TRUE)@admin_ui
})
}
admin_panel.modals <- shinyBS::bsModal(
"admin_login_popup", "Admin login",
"null_trigger", size = "small",
shiny::wellPanel(
align = "center",
shiny::tags$p(shiny::passwordInput("admin_password", label = "Password")),
shiny::tags$p(shiny::actionButton(inputId = "submit_admin_password", "Submit")),
onkeypress = paste0("if (event.keyCode == 13) ",
"document.getElementById('submit_admin_password').click()")))
admin_panel.observe.admin_login_trigger <- function(input, session) {
shiny::observeEvent(input$admin_login_trigger, {
shinyBS::toggleModal(session, "admin_login_popup", toggle = "open")
# shinyjs::runjs('document.getElementById("admin_password").focus();')
})
}
admin_panel.observe.submit_admin_password <- function(state, input, session,
opt) {
shiny::observeEvent(
input$submit_admin_password, {
if (input$admin_password == opt$admin_password) {
admin(state) <- TRUE
shinyBS::toggleModal(session, "admin_login_popup", toggle = "close")
} else {
shinyjs::alert("Incorrect password.")
}
})
}
admin_panel.observe.admin_logout <- function(state, input, session) {
shiny::observeEvent(input$admin_logout, {
admin(state) <- FALSE
print(admin(state))
shiny::updateTextInput(session, "admin_password", value = "")
})
}
admin_panel.observers <- function(state, input, output, session, opt) {
list(
admin_panel.observe.admin_login_trigger(input, session),
admin_panel.observe.submit_admin_password(state, input, session, opt),
admin_panel.observe.admin_logout(state, input, session),
admin_panel.observe_open_close_buttons(input, output, session, opt),
admin_panel.delete_results.observers(state, input, opt),
admin_panel.delete_errors.observers(state, input, opt),
admin_panel.clear_sessions.observers(state, input, opt),
admin_panel.statistics.num_participants(input, output, opt),
admin_panel.statistics.latest_results(input, output, opt),
admin_panel.statistics.average_time(input, output, opt),
admin_panel.statistics.open(input, session),
admin_panel.observe.pilot_mode(state, input, output, session)
)
}
admin_panel.observe.pilot_mode <- function(state, input, output, session) {
output$admin_panel_pilot_ui <- shiny::renderUI({
pilot <- pilot(state)
highlight_style <- "color: white; background-color: #c62121"
btn.pilot <- shiny::actionButton("admin_panel.pilot_mode", "Pilot",
style = if (pilot) highlight_style)
btn.live <- shiny::actionButton("admin_panel.live_mode", "Live",
style = if (!pilot) highlight_style)
shiny::div(
shiny::p(btn.pilot), shiny::p(btn.live)
)
})
list(
shiny::observeEvent(input$admin_panel.pilot_mode, {
if (pilot(state)) {
shiny::showNotification("Already in pilot mode.", type = "warning")
} else {
pilot(state) <- TRUE
shiny::showNotification("Entering pilot mode.", type = "message")
}
}),
shiny::observeEvent(input$admin_panel.live_mode, {
if (!pilot(state)) {
shiny::showNotification("Already in live mode.", type = "warning")
} else {
pilot(state) <- FALSE
shiny::showNotification("Entering live mode.", type = "message")
}
})
)
}
admin_panel.statistics.open <- function(input, session) {
shiny::observeEvent(input$admin_panel.statistics.open,
shinyBS::toggleModal(session,
"admin_panel.statistics.ui",
toggle = "open"))
}
admin_panel.statistics.num_participants <- function(input, output, opt) {
output$admin_panel.statistics.num_participants <- shiny::renderUI({
input$admin_panel.statistics.refresh
input$admin_panel.statistics.open
shiny::showNotification("Refreshing statistics...")
df <- tabulate_results(opt, include_pilot = FALSE)
n_complete <- sum(df$complete)
n_part_complete <- sum(!df$complete)
shiny::p(
"The output directory contains results for ",
shiny::strong(format(n_complete, scientific = FALSE)),
" completed ",
ngettext(n_complete, "session", "sessions"),
" and ",
shiny::strong(format(n_part_complete, scientific = FALSE)),
" partly completed ",
ngettext(n_part_complete, "session.", "sessions.")
)
})
}
admin_panel.statistics.latest_results <- function(input, output, opt) {
output$admin_panel.statistics.latest_results <- shiny::renderUI({
input$admin_panel.statistics.refresh
input$admin_panel.statistics.open
files <- tabulate_results(opt, include_pilot = FALSE)
if (nrow(files) > 0L) {
latest_file <- files$file[[which.max(files$id)]]
latest_path <- file.path(opt$results_dir, latest_file)
latest_data <- readRDS(latest_path)
latest_time <- as.list(latest_data)$session$current_time
if (!is.null(latest_time)) {
time_diff <- Sys.time() - latest_time
time_diff_formatted <- sprintf("(%s %s ago)",
format(as.numeric(time_diff), digits = 3),
units(time_diff))
shiny::p("Last data saved at: ",
shiny::strong(format(latest_time, format = "%Y-%m-%d %H:%M:%S %Z")),
time_diff_formatted)
}
}
})
}
admin_panel.statistics.average_time <- function(input, output, opt) {
output$admin_panel.statistics.average_time <- shiny::renderUI({
input$admin_panel.statistics.refresh
input$admin_panel.statistics.open
files <- tabulate_results(opt, include_pilot = FALSE)
files <- files[files$complete, ]
if (nrow(files) > 0L) {
data <- lapply(files$full_file, readRDS)
time_taken <- vapply(data, function(x) {
difftime(x$session$current_time, x$session$time_started, units = "mins")
}, numeric(1))
M <- mean(time_taken)
SD <- sd(time_taken)
shiny::p(shiny::HTML(sprintf(
"Mean completion time: <strong>%s</strong> min. (SD = <strong>%s</strong>)",
format(M, digits = 3L),
format(SD, digits = 3L))))
}
})
}
admin_panel.delete_results.observers <- function(state, input, opt) {
shiny::observeEvent(
input$admin_panel.confirm_delete_results,
if (admin(state)) admin_panel.delete_results.actual(opt))
}
admin_panel.delete_errors.observers <- function(state, input, opt) {
shiny::observeEvent(
input$admin_panel.confirm_delete_errors,
if (admin(state)) admin_panel.delete_errors.actual(opt))
}
admin_panel.clear_sessions.observers <- function(state, input, opt) {
shiny::observeEvent(
input$admin_panel.confirm_clear_sessions,
if (admin(state)) admin_panel.clear_sessions.actual(opt))
}
admin_panel.delete_results.actual <- function(opt) {
dir <- opt$results_archive_dir
R.utils::mkdirs(dir)
file <- paste0(format(Sys.time(),
format = "date=%Y-%m-%d&time=%H-%M-%S&tz=%Z"),
".zip")
path <- file.path(dir, file)
shiny::showNotification("Creating results backup...")
zip_dir(dir = opt$results_dir, output_file = path)
if (file.exists(path)) {
shiny::showNotification("Backup created.")
unlink(opt$results_dir, recursive = TRUE)
Sys.sleep(0.01)
dir.create(opt$results_dir)
dir.create(opt$supplementary_results_dir)
shiny::showNotification("Deleted results.")
} else {
shiny::showNotification(
"Backup failed, deleting cancelled.")
}
}
admin_panel.delete_errors.actual <- function(opt) {
unlink(opt$error_dir, recursive = TRUE)
Sys.sleep(0.01)
dir.create(opt$error_dir)
shiny::showNotification("Deleted error logs.")
}
admin_panel.clear_sessions.actual <- function(opt) {
dir <- opt$session_dir
unlink(dir, recursive = TRUE)
Sys.sleep(0.01)
dir.create(dir)
shiny::showNotification("Successfully cleared session files.")
}
admin_panel.observe_open_close_buttons <- function(input, output, session, opt) {
output$admin_panel_open_close_buttons <- shiny::renderUI({
shiny::invalidateLater(500, session)
input$admin_panel.close_test
input$admin_panel.open_test
highlight_style <- "color: white; background-color: #c62121"
closed <- is_test_closed(opt)
btn.open <- shiny::actionButton("admin_panel.open_test", "Open test",
style = if (!closed) highlight_style)
btn.close <- shiny::actionButton("admin_panel.close_test", "Close test",
style = if (closed) highlight_style)
btn.open <- shinyBS::tipify(
el = btn.open,
title = "Allows new participants to take the test.",
placement = "top"
)
btn.close <- shinyBS::tipify(
el = btn.close,
title = "Prevents new participants from starting the test.",
placement = "top"
)
shiny::div(shiny::p(btn.open),
shiny::p(btn.close))
})
list(
shiny::observeEvent(input$admin_panel.close_test, close_test(opt)),
shiny::observeEvent(input$admin_panel.open_test, open_test(opt))
)
}
admin_panel.handle_downloads <- function(state, output, opt) {
admin_panel.handle_downloads.current_results.rds(state, output)
admin_panel.handle_downloads.all_results.rds(state, output, opt)
admin_panel.handle_downloads.current_results.csv(state, output)
admin_panel.handle_downloads.all_results.csv(state, output, opt)
admin_panel.handle_download.errors(state, output, opt)
}
admin_panel.handle_downloads.current_results.rds <- function(state, output) {
output$admin_panel.download_current_results.rds <- shiny::downloadHandler(
filename = "results.rds",
content = function(file) saveRDS(get_results(state, add_session_info = TRUE,
complete = FALSE),
file = file)
)
}
admin_panel.handle_downloads.current_results.csv <- function(state, output) {
output$admin_panel.download_current_results.csv <- shiny::downloadHandler(
filename = "results.csv",
content = function(file) {
df <- tryCatch({
as.data.frame(get_results(
state, complete = FALSE, add_session_info = TRUE)) %>%
list_cols_to_json()
}, error = function(e) {
msg <- "Failed to create csv file. Try saving an RDS file instead."
shiny::showNotification(msg, type = "error")
data.frame(result = msg)
})
write.csv(df, file, row.names = FALSE)
}
)
}
admin_panel.handle_download.errors <- function(state, output, opt) {
output$admin_panel.download_errors <- shiny::downloadHandler(
filename = "errors.zip",
content = function(file) zip_dir(opt$error_dir, file)
)
}
admin_panel.handle_downloads.all_results.rds <- function(state, output, opt) {
output$admin_panel.download_all_results.rds <- shiny::downloadHandler(
filename = "results.zip",
content = function(file) zip_dir(opt$results_dir, file)
)
}
admin_panel.handle_downloads.all_results.csv <- function(state, output, opt) {
output$admin_panel.download_all_results.csv <- shiny::downloadHandler(
filename = "results.csv",
content = function(file) {
df <- tryCatch({
df_all_results(opt$results_dir) %>%
list_cols_to_json()
}, error = function(e) {
print(e)
msg <- "Failed to create csv file. Try saving an RDS file instead."
shiny::showNotification(msg, type = "error")
data.frame(result = msg)
})
write.csv(df, file, row.names = FALSE)
}
)
}
list_cols_to_json <- function(df) {
which_list <- purrr::map_lgl(df, is.list) %>% which()
for (i in which_list) {
df[[i]] <- purrr::map_chr(df[[i]], jsonlite::toJSON)
}
df
}
zip_dir <- function(dir, output_file) {
if (!dir.exists(dir)) stop("<dir> doesn't exist")
old_wd <- getwd()
dir <- gsub("/$", "", dir)
dir_parent <- dirname(dir)
dir_name <- basename(dir)
output_full_path <- file.path(normalizePath(dirname(output_file)),
basename(output_file))
tryCatch({
setwd(dir_parent)
utils::zip(zipfile = output_full_path, files = dir_name)
setwd(old_wd)
}, error = function(e) {
setwd(old_wd)
shinyjs::alert("failed to create zip file")
}
)
}
df_all_results <- function(results_dir) {
files <- list_results_files(results_dir, full.names = TRUE)
if (length(files) == 0L) return(data.frame())
data <- lapply(files, readRDS)
data_df <- lapply(data, as.data.frame)
any_cols_duplicated <- any(vapply(data_df,
function(df) anyDuplicated(names(df)),
integer(1)) > 0L)
if (any_cols_duplicated) {
msg <- "CSV export cannot cope with duplicated fields in results objects."
shiny::showNotification(msg, type = "error")
stop(msg)
}
df <- do.call(plyr::rbind.fill, data_df)
df <- df[order(df$session.current_time, decreasing = FALSE), ]
df
}
admin_panel.server <- function(state, input, output, session, opt, elts) {
if (opt$enable_admin_panel) {
admin_panel.render_ui(state, output, elts, opt)
admin_panel.handle_downloads(state, output, opt)
admin_panel.observers(state, input, output, session, opt)
}
}
|
9a7633c7c6de3e363ae620a755ec7d66e047b1ac
|
a693cd98dd57799c39a48c3b2433d91dbc7d1217
|
/R/invoicer_email.R
|
67e7994710e92aed09322fa1c23d5bac6481bf9b
|
[] |
no_license
|
anthonypileggi/invoicer
|
c9bf75904fa30f9f66ff098b794e6391e01c40ed
|
5bfbf876a7bc03fb6e82055f00427e3d46f4dffb
|
refs/heads/master
| 2020-03-29T06:34:17.966329
| 2018-11-12T14:22:06
| 2018-11-12T14:22:06
| 149,630,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
invoicer_email.R
|
#' Send an invoice via email
#' @param x invoice object
#' @param id invoice id
#' @export
invoicer_email <- function(x, id) {
# get details for the invoice we are sending
if (is.null(x)) {
if (is.null(id))
return("You must provide either an invoice object (x) or an invoice id (id).")
# identify file using invoice id
invoices <- invoicer_get_invoices()
x <- invoices[invoices$id == id, ]
}
# check if email can/should be sent
if (nrow(x) == 0)
return(message("No matching invoice found. No email sent."))
if (nrow(x) > 1)
return(message("More than 1 matching invoice found. No email sent."))
# compose/send email
invoicer_email_send(x)
}
# Helpers --------------------
#' Setup emails (gmail only)
#' @export
invoicer_email_setup <- function() {
me <- invoicer_get_company()
blastula::create_email_creds_file(
user = me$email,
password = Sys.getenv("INVOICER_EMAIL_PASSWORD"),
provider = "gmail",
sender = me$name,
creds_file_name = "~/.e_creds"
)
}
#' Compose/send an invoice email
#' @param x invoice data
#' @param preview preview email
invoicer_email_send <- function(x, preview = FALSE) {
# load info
me <- invoicer_get_company()
client <- dplyr::filter(invoicer_get_clients(), client == x$client)
# download invoice file
pdf_file <- file.path(tempdir(), "invoice.pdf")
#pdf_file <- paste0("invoice_", invoice$id, ".pdf")
googledrive::drive_download(
googledrive::as_id(x$drive_id),
path = pdf_file
)
# compose email
# TODO: include image inline: blastula::add_image(pdf_file)
email <-
blastula::compose_email(
body = "
Dear {client},
Your invoice for the billing period {format(start_date, '%m/%d/%Y')} - {format(end_date, '%m/%d/%Y')} is attached.
Thanks!
{sender}",
footer = "Sent on {blastula::add_readable_time()}",
id = x$id,
start_date = x$start_date,
end_date = x$end_date,
sender = me$name,
client = x$client
)
# preview email
if (preview)
blastula::preview_email(email = email)
# send email
blastula::send_email_out(
email,
from = me$email,
to = stringr::str_trim(stringr::str_split(client$email, ",")[[1]]), # parse the comma-separated list of client email addresses
cc = me$email,
subject = paste0("Invoice from ", me$name, " (", format(x$start_date, "%m/%d/%Y"), " - ", format(x$end_date, "%m/%d/%Y"), ")"),
attachments = pdf_file,
creds_file = "~/.e_creds"
)
unlink(pdf_file)
}
|
cd05720161704f9e1ae0356e39981a3863cb4779
|
b90990e94b63b4a18eb6f3b0332bdae16f38591a
|
/code_to_run_AD_Github.R
|
d748043dc60f2838f0fc95c4a1bce595a653fcd8
|
[] |
no_license
|
Yuanzhi-H/Real-Time-Detection-Methods
|
228c7a85c3140816a6ffd0cc996955eef020caa4
|
5cd5bf5910dc282e41a82efe67e328af346a11b1
|
refs/heads/master
| 2020-04-11T13:02:47.722425
| 2018-12-14T15:22:42
| 2018-12-14T15:22:42
| 161,801,860
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,230
|
r
|
code_to_run_AD_Github.R
|
# Use the code below for the real-time simulation of the turning process
# Last edited on 20/11/2018
# Yuanzhi Huang
# Clear workspace and load in packages ------------------------------------
rm(list=ls())
require(ggplot2)
# Load data ---------------------------------------------------------------
DATBASE=readRDS(file="Turn_DATBASE.rds")
# Source in functions -----------------------------------------------------
source("RealTimeMachine.R") # the main function for the test
source("runGESD.R") # function for the GESD test
# Run "RealTimeMachine" on the training data ------------------------------
# Define the candidate weights
wset=seq(0.8,3,0.2)
# Define the threshold-based rule
minfval=0.5;mindiff=minfval*0.1
ntrain=12 # number of runs for training
ntests= 4 # number of runs for testing
nbroke= 5 # number of runs with broken tool
ntotal=ntrain+ntests+nbroke
# Create a new csv file to save all the results
newcsv=TRUE
# Turn on the training mode (runmode=1) and take data from the 12 runs of progressive wear
runmode=1
denominator=ntrain
# Choose the detection method (first digit of Methodset=1,2,3) and the data (second digit of Methodset=1,2) to use
Methodset=c(11,12,21,22)
# Choose a team streaming scheme
for(RW in c( 0,60)){
# If we use the rolling window scheme, it is feasible to run the GESD test
if (RW!=0){Methodset=c(Methodset,31,32)}
for(Method in Methodset){
Result=RealTimeMachine(DATBASE,Method,wset,RW,minfval,mindiff,runmode)
out=Result[[1]]
# Calculate the mean of the tool wear prediction scores
out[,c(4,7)]=round(out[,c(4,7)]/denominator,2)
tosave=cbind(out[,1],out[,4],out[,7],Method%/%10,RW,Method%%10)
if (newcsv){
colnames(tosave)=c("W","Score1","Score2","Method","Stream","Difference")
write.table(tosave,"out.csv",F,F,",",row.names=F,col.names=T)
newcsv=FALSE
}else{
write.table(tosave,"out.csv",T,F,",",row.names=F,col.names=F)
}
# save the results:
# 1. weight
# 2. mean score in the univariate case
# 3. mean score in the multivariate case based on the Euclidean distance
# 4. detection method (1 for statistical process control; 2 for chi-squared test; 3 for GESD test)
# 5. time streaming (0 for rolling origin; 60 for rolling window of 60 seconds each)
# 6. data (1 for first-order difference; 2 for minimum successive difference)
}}
# Test performance --------------------------------------------------------
# Use the rolling origin scheme
RW=0
# Use statistical process control method for the first-order difference data
Method=11
# Turn on the test mode
runmode=2
denominator=ntests
Result=RealTimeMachine(DATBASE,Method,wset,RW,minfval,mindiff,runmode)
out=Result[[1]]
out[,c(4,7)]=round(out[,c(4,7)]/denominator,2)
write.table(cbind(out[,1],out[,4],out[,7],Method%/%10,RW,Method%%10),"out_tests.csv",T,F,",",row.names=F,col.names=F)
# Run a second test on the broken tool cases (OPTIONAL)
runmode=3
denominator=nbroke
Result=RealTimeMachine(DATBASE,Method,wset,RW,minfval,mindiff,runmode)
out=Result[[1]]
out[,c(4,7)]=round(out[,c(4,7)]/denominator,2)
write.table(cbind(out[,1],out[,4],out[,7],Method%/%10,RW,Method%%10),"out_broke.csv",T,F,",",row.names=F,col.names=F)
# Plot results ---------------------------------------------------------
# FIG. 5 of the paper
# Load the results to plot the mean scores across different scenarios
allscores=read.csv("out.csv",T,)
allscores[,4]=factor(allscores[,4])
allscores[,5]=factor(allscores[,5])
allscores[,6]=factor(allscores[,6])
levels(allscores[,4])[1]= "Control"
levels(allscores[,4])[2]= "Chi2 Test"
levels(allscores[,4])[3]= "GESD Test"
levels(allscores[,5])[1]= "RO"
levels(allscores[,5])[2]= "RW=60"
levels(allscores[,6])[1]= "FOD"
levels(allscores[,6])[2]= "MSD"
# Mean scores in the univariate case
qplot(W ,Score1,data=allscores,color=Method,shape=Method,facets=Stream~Difference,geom="point",
xlab="Weight",ylab="Mean Performance Score",ylim=c(0,0.75))+geom_line(aes(linetype=Method))+
theme_bw()
# Mean scores in the multivariate case
qplot(W*0.5,Score2,data=allscores,color=Method,shape=Method,facets=Stream~Difference,geom="point",
xlab="Weight",ylab="Mean Performance Score",ylim=c(0,0.75))+geom_line(aes(linetype=Method))+
theme_bw()
# FIG. 6 of the paper
# Below we plot and examine how accurate our predictions are
# Define the weight
wset=1.2
# Turn on the mode when data of all the 21 runs are fed to "RealTimeMachine"
runmode=4
# The vector below contains the time when tool wear has reached 150 micrometres
brokeset=c(86.9,115.9,625.9,165.6,45.0,107.8,538.3,383.4,75.0,48.7,68.4,193.8,20.0,65.7,351.6,361.5,30.0,39.5,142.5,144.6,55.0)
# The vector below indicates the order of the 21 runs, which was chosen in random
randruns=c(1:4,8,10:12,15:16,18:19,6:7,14,20,5,9,13,17,21)
brokeset=brokeset[randruns]
Result=RealTimeMachine(DATBASE,Method,wset,RW,minfval,mindiff,runmode)
Test=as.factor(c(0,0,0,0,2,1,1,0,2,0,0,0,2,1,0,0,2,0,0,1,2))
Test=Test[randruns]
levels(Test)=c("No","Yes","Broken")
Score=round(Result[[2]][-nrow(Result[[2]]),8],2)
qplot(brokeset,Result[[2]][-nrow(Result[[2]]),4],color=Score,shape=Test,xlab="Tool Wear Time (Seconds)",ylab="Predicted Time (Seconds)",xlim=c(0,650),ylim=c(0,650))+
geom_abline(intercept=0,slope=1,lty=2,col="red")+
geom_point(size=2)+
theme_bw()
|
be1481e001cda45d17c1d9489bfc2d50795bd387
|
d064a4ee7b4f3d11afb5c089e55b77f2e6882692
|
/man/Etna_boundary.Rd
|
4f7ad8e17f4eb3f907f8b2d48e442ef605f01add
|
[] |
no_license
|
cran/movecost
|
9a35613d033990acf2104d98fff4a242dd7178ee
|
2d1b2e6d39f569bbc4d20e0434f7ee1800344128
|
refs/heads/master
| 2023-04-13T16:03:42.612617
| 2023-04-12T08:20:07
| 2023-04-12T08:20:07
| 145,903,913
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 459
|
rd
|
Etna_boundary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Etna_boundary.r
\docType{data}
\name{Etna_boundary}
\alias{Etna_boundary}
\title{Dataset: bounding polygon representing a study area on Mount Etna (Sicily, Italy)}
\format{
SpatialPolygonDataFrame
}
\usage{
data(Etna_boundary)
}
\description{
A SpatialPolygonDataFrame representing an area on Mount Etna (Sicily, Italy), to be used to download elevation data.
}
\keyword{datasets}
|
b9d45d6f5b0995b89eb6e6168279a6ac0ad815e4
|
452a067dc37b86e4370045f3b799cbaf17b7d7d5
|
/man/print.mjca.rd
|
4b0f5c1528cb488b8b1240def3f71ddc1f9c948b
|
[] |
no_license
|
cran/ca
|
3752d68a550b2531561f8cfac07e3190ee13ab49
|
00cc046a4c3158f017c0ddfc03fd9acb6d38d2b8
|
refs/heads/master
| 2021-01-22T07:13:55.357336
| 2020-01-24T05:59:53
| 2020-01-24T05:59:53
| 17,694,909
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,215
|
rd
|
print.mjca.rd
|
\name{print.mjca}
\alias{print.mjca}
\title{Printing mjca objects}
\description{Printing method for multiple and joint correspondence analysis objects}
\usage{\method{print}{mjca}(x, ...) }
\arguments{
\item{x}{Multiple or joint correspondence analysis object returned by \code{\link{mjca}}}
\item{...}{Further arguments are ignored}
}
\details{
The function \code{print.mjca} gives the basic statistics of the \code{mjca} object. First the eigenvalues (that is, principal inertias) and their percentages with respect to total inertia are printed. Then for the rows and columns respectively, the following are printed: the masses, chi-square distances of the points to the centroid (i.e., centroid of the active points), point inertias (for active points only) and principal coordinates on the first \code{nd} dimensions requested (default = 2 dimensions). The function \code{\link{summary.mjca}} gives more detailed results about the inertia contributions of each point on each principal axis.\cr
For supplementary points, masses and inertias are not applicable.
}
\seealso{\code{\link{mjca}}}
\examples{
data("wg93")
print(mjca(wg93[,1:4]))
# equivalent to:
mjca(wg93[,1:4])
}
|
e13fd1edeec59a5a24c42a6e72c15cc0e55fa0f7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/macleish/examples/mass_gis.Rd.R
|
49e022333b88aaea120ff577b1d4ffe6a3793111
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
mass_gis.Rd.R
|
library(macleish)
### Name: mass_gis
### Title: Retrieve elevation layers from MassGIS
### Aliases: mass_gis macleish_intersect
### ** Examples
## Not run:
##D # have to download the shapefiles...cound take a while...
##D elevation <- mass_gis()
##D macleish_elevation <- macleish_intersect(elevation)
##D if (require(sp)) {
##D plot(macleish_elevation)
##D }
##D
##D dcr_trails <- mass_gis("dcrtrails")
##D
## End(Not run)
|
3e93442cdbbf8a4135534514207c1b5ceec672f4
|
da5acef6a354bf77a53c0c538afc671f91c77abe
|
/cachematrix.R
|
2d47e29e75587848dbdef9a0cc0812d1b33ab8a3
|
[] |
no_license
|
Matsolof/ProgrammingAssignment2
|
6a4403b4af250160b9bec476e02c5805550a689c
|
43d95cd1185953f6fc5f93facc6c1b5f1230fd68
|
refs/heads/master
| 2021-01-21T07:53:18.217199
| 2014-12-17T12:48:08
| 2014-12-17T12:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,030
|
r
|
cachematrix.R
|
## With these two functions any invertible matrix may be
## inverted and the inverted result cashed
## This function (object?) creates an instance of a certain
## matrix and its inverse (by calling casheSolve)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv<<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv<- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function calculates the inverse of a given matrix
## after first checking for its existense in the cashe
## in case it is delivered directly from there
cacheSolve<- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
amatrix<-makeCacheMatrix(matrix(c(1,2,3,4),nrow=2, ncol=2))
amatrix$get()
cacheSolve(amatrix)
|
4629da5a0ba9362d8d8bd6904038fef14d4f304f
|
07e2aa9f2a3a3f1fb365254045705208906aa913
|
/Week 6 R Challenge.R
|
8b3fba6f4960aace33d0948393edd9a88008434e
|
[] |
no_license
|
LSESUCODING/Bootcamps
|
8e43f70fd9c984e56af215494375357895e147e8
|
19476ef724eb58be033fd313017ec08deb5c86df
|
refs/heads/master
| 2020-08-02T09:03:27.279066
| 2020-03-01T10:02:08
| 2020-03-01T10:02:08
| 211,296,460
| 3
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
Week 6 R Challenge.R
|
install.packages("rgl")
library(rgl)
open3d() # create a new plot
spheres3d(x = 0, y = 0, z = 0, radius = 1) # unit sphere centered at origin
axes3d() # add axes
|
2a8da1345fc67517dbcfcc698938bfd29b6469b7
|
f18f32ed6fc1a8d132fdc697210b3e78d9381893
|
/src/ConvertModelPerfDataFrame.R
|
363f69094e694dac90d0666fe9bfcba723c4b9e8
|
[] |
no_license
|
jimthompson5802/kaggle-OttoGroup
|
43b98fd800101e80cf3bf601d86263a079ade68c
|
1e14c6187f69af1511d4c0677a7274116bc6d74b
|
refs/heads/master
| 2021-01-10T09:11:15.892890
| 2015-07-15T23:32:34
| 2015-07-15T23:32:34
| 36,255,130
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
ConvertModelPerfDataFrame.R
|
##
# code to convert to new model Performance data frame
#
WORK.DIR <- "./src/gbm2_model"
# convert current bestTune data to string
bestTune <- apply(modPerf.df[,9:35],1,function(x){as.list(x)})
bestTune <- sapply(bestTune,function(x){paste(names(x),as.character(x),sep="=",collapse=",")},
simplify=TRUE)
# model.parms <- apply(modPerf.df["ntree"],1,function(x){as.list(x)})
# model.parms <- sapply(model.parms,function(x){paste(names(x),as.character(x),sep="=",collapse=",")},
# simplify=TRUE)
n <- length(bestTune)
modelPerf.df <- data.frame(modPerf.df[,1:8],
improved=rep("",n),
bestTune=bestTune,
tune.grid=rep("",n),
model.parms=rep("",n),
comment=rep("",n),
features=modPerf.df[,"features"],
stringsAsFactors=FALSE)
save(modelPerf.df,file=paste0(WORK.DIR,"/modelPerf.RData"))
|
e35f58b67203c4195996b12df60d5d13370e8d29
|
68d55860e526f82501a8db3b1afc5a869142feef
|
/PredApp/findWords.R
|
b41137ccbfa5d68ef89b9364362ad25acd03a1b7
|
[] |
no_license
|
diegommora/Capstone_Project
|
3b4d9392e8b0cf8766ec0d36d6d0ca79d825a981
|
640222d174baadcf64496c8f16ee0a183f630e22
|
refs/heads/master
| 2020-07-02T14:11:15.540351
| 2019-08-09T23:21:57
| 2019-08-09T23:21:57
| 201,552,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 879
|
r
|
findWords.R
|
findwordsQuad <- function(sentkn,quadgramFinal,trigramFinal,bigramFinal){
l <- length(sentkn)
predictquad <- quadgramFinal %>% filter(X1==sentkn[l-2] &
X2==sentkn[l-1] &
X3==sentkn[l])
predicttri <- trigramFinal %>% filter(X1==sentkn[l-1] &
X2==sentkn[l])
predictbi <- bigramFinal %>% filter(X1==sentkn[l])
if (nrow(predictquad)!=0){
predictW <- predictquad[4]
}
else if (nrow(predicttri)!=0){
predictW <- predicttri[3]
}
else if (nrow(predictbi)!=0){
predictW <- predictbi[2]
}
else predictW <- "no hay nada"
return(as.character(predictW[1,]))
}
|
04a310df9bbcfdb78bfc0654ea1c51e55c018550
|
656dc61dba9cba909935d2e50cd6c23ee4f49977
|
/RforML/Code/1. Data Types.R
|
7115cbd610263085def4641569d9380b41cc6d08
|
[] |
no_license
|
akshafmulla/RProgrammingforML
|
e391696c70d9c3e11e0a03b8ae00f976e8c6b680
|
57666331b7a2bccf5ddb1d7e464ce25a6bc620a2
|
refs/heads/master
| 2020-04-25T18:50:30.152156
| 2019-02-27T22:46:49
| 2019-02-27T22:46:49
| 172,998,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,448
|
r
|
1. Data Types.R
|
#shortcut to run Ctrl+enter
2+3
a = 2+3
a
## Assignment ##
x = 3
x
a3 = 3
x <- 3 # alt + - "<-"
x = 3
x = x+1
x
############################################################
## Data Types in R ##
############################################################
# Numeric - real and decimals
num1 <- 1
num2 <- 1.7
class(num1)
class(num2)
# Integer
num <- 3L
class(num)
is.integer(num)
is.integer(num1)
as.integer(num1)
#Complex
num4 <- as.complex(num1)
num4
a <- -20
class(a)
sqrt(a)
as.complex(a)
sqrt(as.complex(a))
sqrt(-20)
sqrt(-20+0i)
#Character
ch1 <- "RProgramming"
class(ch1)
ch1 <- c("st", "at", "is")
#Logical
x = c(3,7,1,2)
class(x)
y <- x > 2
y
class(y)
u = TRUE; v = FALSE
u & v # U AND v
u | v # u OR v
!u # negation of u
###################################
## coercing data type ##
###################################
z = c(1,2,3,4,5,6)
z
class(z)
y <- 1:6
y
class(y)
#------------------------
z = 3
class(z)
z <- as.integer(z)
class(z)
z = c(0, 1,2,3,4,5,6)
class(z)
z <- as.integer(z)
class(z)
z <- as.logical(z)
z
class(z)
z <- as.character(z)
z
class(z)
a = 10
a = as.logical(a)
a
# Nonsensical coercion results in NAs
x <- c("a", "b", "c")
as.numeric(x)
as.logical(x)
############################################################
## Operators in R ##
############################################################
###################################
## Aithmetic Operators ##
###################################
# Airthmetic Operators '+' Addition #
# Addition in vectors of same length #
v <- c(2, 3, 4)
v
print (v)
t <- c(5, 6, 7)
t
p <- v + t
p
# Addition in vectors of different length; lengths are multiples of each other #
v <- c(2, 3, 4, 8, 9, 10)
v
print (v)
t <- c(5, 6, 7)
t
p <- v + t
p
# Addition in vectors of different length; lengths are not multiples of each other #
v <- c(2, 3, 4, 8, 9, 10, 11, 12)
v
print (v)
t <- c(5, 6, 7)
t
p <- v + t
p
# Warning message: In v + t : longer object length is not a multiple of shorter object length
# Airthmetic Operators '-' Substraction #
v <- c(2, 3, 4)
v
t <- c(5, 6, 7)
t
p <- v - t
p
q <- t - v
q
# Airthmetic Operators '*' Multiplication #
p = v * t
p
# Airthmetic Operators '/' Division #
v <- c(20, 30, 40)
v
t <- c(5, 6, 7)
t
p <- v / t
p
v <- c(20, 30, 42)
p <- v / t
p
# Airthmetic Operators '%%' remainder of the first vector when it is divided with the second vector #
v <- c(22, 37, 42)
v
t <- c(5, 6, 7)
t
p <- v %% t
p
#--------------
v <- c(22,37,41)
v
t <- c(5,6,7)
t
p <- v %% t
p
#--------------
# Airthmetic Operators '%/%' Quotient #
v <- c(22, 37, 42)
v
t <- c(5, 6, 7)
t
p <- v %/% t
p
# Airthmetic Operators '^' the first vector is raised to the power of second vector #
v <- c(10, 11, 12)
v
t <- c(2, 3, 4)
t
p <- v ^ t
p
#----------------
v <- c(10.5, 11, 12)
v
t <- c(2, 3, 4.5)
t
p <- v ^ t
p
###################################
## Relational Operators ##
###################################
# Relational operator ">" , Checks if each element of the first vector is
#greater than the corresponding element of the second vector
v <- c(2,5.5,6,9)
t <- c(8,2.5,14,9)
print(v>t)
# Relational operator "<", Checks if each element of the first vector is less
#than the corresponding element of the second vector.
print(v < t)
# Relational operator "==", Checks if each element of the first vector is
#equal to the corresponding element of the second vector.
v <- c(2,5.5,6,9)
t <- c(8,2.5,14,9)
print(v == t)
# Relational operator "<=", Checks if each element of the first vector is less than or equal
#to the corresponding element of the second vector.
v <- c(2,5.5,6,9)
t <- c(8,2.5,14,9)
print(v<=t)
# Relational operator ">=", Checks if each element of the first vector is
#greater than or equal to the corresponding element of the second vector.
v <- c(2,5.5,6,9)
t <- c(8,2.5,14,9)
print(v>=t)
v <- c(100,1,1)
t <- c(2,2.5,3)
v>=t
# Relational operator "!=", checks if each element of the first vector is unequal
#to the corresponding element of the second vector.
v <- c(2,5.5,6,9)
t <- c(8,2.5,14,9)
print(v!=t)
|
0da0dba9792ae9effa4ed5a04d91cb430e9e9000
|
2cf8d3bfa83c0c36f29731c2a58f4f86ca88b1ff
|
/QuGene/Data-processing/var-files.r
|
a33b31624676c9acb15104c67c67c4b82fcee6fa
|
[] |
no_license
|
Cork18/peas-andlove
|
169b42b6015da424411e51d6dddbc218f772f9fd
|
f9a3ab1a49333e49dedbd715ae5c79baf10b1e21
|
refs/heads/master
| 2023-08-23T10:31:52.221291
| 2021-11-04T17:33:30
| 2021-11-04T17:33:30
| 400,336,812
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,384
|
r
|
var-files.r
|
#########
VAR FILES
#########
###################
READING IN THE DATA
setwd("/Users/jennylin/files/McGill/Thesis/QUGENE/3run_output")
library("readr")
SWvar <- read_table("z_out2.var", col_names=TRUE, locale=default_locale(), skip=1,)
library("dplyr")
varsort <- SWvar %>% filter(Trait == "SW") %>% dplyr::select(2, 3, 4, 12, 13, 14)
varsort$Strategy <-as.factor(varsort$Strategy)
varsort$Run <-as.factor(varsort$Run)
varsort$Cycle <-as.factor(varsort$Cycle)
##########
LINE GRAPH
vargrp <- varsort %>% group_by(Strategy, Cycle) %>% summarize(VarADD_1=mean(VarADD_1,na.rm=TRUE))
varline <- ggplot(data=vargrp,aes(x=Cycle, y=VarADD_1, group=Strategy)) +
geom_line(aes(color=Strategy)) +
scale_colour_discrete(name = "Strategy", labels = c("Bulk", "Mass", "SSD", "Pedigree")) +
ylab("Variance") +
ggtitle("Genetic variance")
############################
ALTERNATIVELY (NO AVERAGING)
vargrp2 <- varsort %>% group_by(Strategy, Cycle)
varpoint2 <- ggplot(data=vargrp2,aes(x=Cycle, y=VarADD_1, group=Strategy)) +
geom_point(aes(color=Strategy)) +
scale_colour_discrete(name = "Strategy", labels = c("Bulk", "Mass", "SSD", "Pedigree")) +
ylab("Variance") +
ggtitle("Genetic variance")
#######
BOXPLOT
library("ggplot2")
varplot <- ggplot(varsort, aes(x=Cycle, y=VarADD_1, fill=Strategy)) +
geom_boxplot() +
labs(fill="Strategy") +
ylab("Variance") +
ggtitle("Genetic variance")
### Separated
var5 <- varsort %>% filter(Cycle == "5")
var10 <- varsort %>% filter(Cycle == "10")
var15 <- varsort %>% filter(Cycle == "15")
var20 <- varsort %>% filter(Cycle == "20")
varplot5 <- ggplot(var5, aes(x=Cycle, y=VarADD_1, fill=Strategy)) +
geom_boxplot() +
labs(fill="Strategy") +
ylab("Variance") +
ggtitle("Genetic variance at cycle 5")
varplot10 <- ggplot(var10, aes(x=Cycle, y=VarADD_1, fill=Strategy)) +
geom_boxplot() +
labs(fill="Strategy") +
ylab("Variance") +
ggtitle("Genetic variance at cycle 10")
varplot15 <- ggplot(var15, aes(x=Cycle, y=VarADD_1, fill=Strategy)) +
geom_boxplot() +
labs(fill="Strategy") +
ylab("Variance") +
ggtitle("Genetic variance at cycle 15")
varplot20 <- ggplot(var20, aes(x=Cycle, y=VarADD_1, fill=Strategy)) +
geom_boxplot() +
labs(fill="Strategy") +
ylab("Variance") +
ggtitle("Genetic variance at cycle 20")
|
cc9388991c75130ba3e6d04011bf096c4f90d870
|
1289c7c8bc395d751cc3fdc89041967e331785fd
|
/9.29/beta estimate.r
|
6dfb620cbc961944e2dfa5e8bacb799e289c7ae0
|
[] |
no_license
|
Quincy15/Statistical-Software
|
fb67e35b46f89bc3cdb54a4eb2989ac3e0f1e98a
|
5171371b797dd46694261adc78938cda9890b8ea
|
refs/heads/master
| 2022-12-22T15:06:25.975713
| 2020-09-29T01:57:44
| 2020-09-29T01:57:44
| 296,006,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,409
|
r
|
beta estimate.r
|
rm(list = ls())
#install.packages('Quandl')
#install.packages('quantmod')
library(zoo)
library(xts)
library(Quandl)
library(TTR)
library(quantmod)
#---------------------1.下载和清洗数据-------------------------------
Quandl.api_key("QGc4jxwf72tdN3h9BzSs")
#单只股票的数据/Microsoft Corporation (MSFT) Stock Prices
G1 <- Quandl('EOD/MSFT', start_date = '2013-01-31',
end_date = '2016-11-30')
data1 <- G1$Close
#整个股票市场的数据/S&P500
getSymbols("^GSPC", src="yahoo", from = '2013-01-31', to = '2016-11-30')
G2 <- as.data.frame(GSPC)
G2$Date <- as.Date(row.names(G2))
G2 <- G2[order(G2[, 7], decreasing = TRUE), ]
row.names(G2) <- 1:nrow(G2)
data2 <- G2$GSPC.Adjusted
#无风险资产的数据/Overnight London Interbank Offered Rate
#LIBOR: https://www.quandl.com/data/FRED/USDONTD156N-Overnight-
#London-Interbank-Offered-Rate-LIBOR-based-on-U-S-Dollar
G3 <- Quandl("FRED/USDONTD156N", start_date = '2013-01-31',
end_date = '2016-11-30')
data3 <- G3$VALUE
#查看3个数据集中的样本量
sapply(list(data1, data2, data3), length)
#识别共同日期
date_inter12 <- intersect(G1$Date, G2$Date)
date_inter23 <- intersect(G1$Date, G3$DATE)
cdates1 <- intersect(date_inter12, date_inter23)
cdates <- Reduce(intersect, list(G1$Date, G2$Date, G3$DATE))
sum(cdates == cdates1)
data1 <- G1[G1$Date %in% cdates, 'Close']
data2 <- G2[G2$Date %in% cdates, 'GSPC.Adjusted']
data3 <- G3[G3$DATE %in% cdates, 'VALUE']
#--------------------------2.贝塔估计--------------------------
#简单估计
#日对数收益率函数
logreturn <- function(x) log(head(x, -1) / tail(x, -1))
#无风险资产的对数收益率
rft <- log(1 + tail(data3, -1)/36000 * diff(cdates))
#风险溢价函数
riskpremium <- function(x) logreturn(x) - rft
beta <- cov(riskpremium(data1), riskpremium(data2)) / var(riskpremium(data2))
#基于线性回归估计beta (截距项不为0)
(fit <- lm(riskpremium(data1) ~ riskpremium(data2)))
#可视化
plot(riskpremium(data2), riskpremium(data1))
abline(fit, col = 'red')
#截距项为0
fit2 <- lm(riskpremium(data1) ~ -1 + riskpremium(data2)) #-1表示没有截距项, +1表示有截距项
summary(fit2)
#残差检验
par(mfrow = c(2, 2))
#par(mfcol = c(2, 2))
plot(fit2)
#图1:检验残差与真实值之间是否无关
#图2:检验残差是否服从正态分布
#图3:检验等方差假设
#图4:检验极端点
|
1b354fd59fe196ed00f72bb5f1612a83129fede8
|
8d4b2f0ec29916d0e9c0b72aa6d3d37d6afa1dcd
|
/inst/slurm2/sim.R
|
ef9effb370212d266edf7e8f2d93c5bf8072a5a8
|
[] |
no_license
|
EpiModel/EpiABC
|
fbffee553b2d384990dba43c18fac790bb56f5bf
|
ad80ccb31734e24d42d282232796716afbd00383
|
refs/heads/master
| 2021-06-24T00:03:20.664401
| 2020-12-05T14:09:47
| 2020-12-05T14:09:47
| 164,778,789
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
sim.R
|
library("methods")
suppressMessages(library("EpiABC"))
suppressMessages(library("EpiModel"))
prep <- readRDS("data/abc.prep.rda")
batch <- as.numeric(Sys.getenv("SLURM_ARRAY_TASK_ID"))
wave <- as.numeric(Sys.getenv("wave"))
# Wave 0
if (wave == 0) {
abc_smc_wave(input = prep, wave = wave, batch = batch)
} else {
abc_smc_wave(wave = wave, batch = batch)
}
merge_abc(wave = wave)
abc_smc_process(wave = wave)
|
d7e01a35699a90d477d730dec90893b705bf1659
|
5f323f30bd7641a4990d9e7f4890f474c785c2f0
|
/R/resource.R
|
42fed2c3a15f1ba4691e8df2d8b5337370793aff
|
[] |
no_license
|
cadnza/pkgTestTools
|
810023fedee454fc47e1bb3318c6fa2c06ecb6c3
|
34a75894d764bce34671846f6b5daaa9b9988a71
|
refs/heads/master
| 2023-06-26T07:45:47.854291
| 2021-08-02T00:31:09
| 2021-08-02T00:31:09
| 391,668,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 324
|
r
|
resource.R
|
# Get function to load resource from inst ----
resource <- function(path){
root <- rprojroot::find_package_root_file()
instDir <- "inst"
pathInstalled <- file.path(root,path)
if(file.exists(pathInstalled)){
return(pathInstalled)
}else{
pathUninstalled <- file.path(root,instDir,path)
return(pathUninstalled)
}
}
|
1e3286d58cb1203a560670de7c1dd31ecf31463b
|
dc82fe420c0433e256bc5245bba1793ade7e530c
|
/ui.R
|
86ac6321bfb08b2a9ad660ccb797a07c19e040ec
|
[] |
no_license
|
lytze/QPRD
|
8582a61ef3fea5cede316a12a0d1d4c7836b187e
|
3417173f8079ce2db88e0f8a5f886e1d84b57734
|
refs/heads/master
| 2020-07-24T17:12:18.702142
| 2019-09-12T10:16:19
| 2019-09-12T10:16:19
| 207,992,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,242
|
r
|
ui.R
|
require(shiny)
require(shinythemes)
shinyUI(fluidPage(theme = shinytheme("united"),
tags$head(
tags$title('QPRD by Lytze'),
tags$link(rel = "stylesheet", href = "lib/font-awesome/css/font-awesome.min.css")
),
titlePanel(
h2("Quantile / Probability / Random Distributoin")
),
sidebarLayout(position = "right",
sidebarPanel(
selectInput(
inputId = "I_select_distribution",
label = "Select Distribution",
choices = c("Standarized Normal" = "Z", "Student's T" = "T"),
selected = "Normal"
),
tags$hr(),
conditionalPanel(
condition = "input.I_select_distribution == 'Z'",
sliderInput(
label = "Quantile",
inputId = "I_Z_quantile",
value = 0.50, min = 0.00, max = 1.00, step = 0.001
),
sliderInput(
label = "Statistic (Z)",
inputId = "I_Z_stat",
value = 0.00, min = -5, max = 5, step = 0.001
)
),
conditionalPanel(
condition = "input.I_select_distribution == 'T'",
numericInput(
label = "Degree of Freedom",
inputId = "I_T_df",
value = 10, min = 2, max = Inf, step = 1
),
sliderInput(
label = "Quantile",
inputId = "I_T_quantile",
value = 0.50, min = 0.00, max = 1.00, step = 0.001
),
sliderInput(
label = "Statistic (T)",
inputId = "I_T_stat",
value = 0.00, min = -5, max = 5, step = 0.01
)
)
),
mainPanel(
conditionalPanel(condition = "input.I_select_distribution == 'Z'",
plotOutput(outputId = "O_Z_distribution")),
conditionalPanel(condition = "input.I_select_distribution == 'T'",
plotOutput(outputId = "O_T_distribution"))
)
)
))
|
62dd1f1cc6745c1ff409a8085f1a8e8843169816
|
0b28447d54df04e8261b2149851ed0bcc6505566
|
/man/plotMakefile.Rd
|
d4d7a2f13cd81125140edcb38018cb454ff68dd9
|
[] |
no_license
|
tdhock/plotMakefile
|
fc86557596eb56f26a0ed07601972631f9b80fcc
|
5d1776847168490d9ac9d149c9baa33cb8e42b13
|
refs/heads/master
| 2021-01-19T00:44:35.194949
| 2014-02-21T14:21:05
| 2014-02-21T14:21:05
| 16,812,564
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,833
|
rd
|
plotMakefile.Rd
|
\name{plotMakefile}
\alias{plotMakefile}
\title{plotMakefile}
\description{plot a Makefile using library(diagram).}
\usage{plotMakefile(makefile, sort.fun = sortDefault, colors = NULL,
categories = NULL, curve = 0, segment.from = 0.1, segment.to = 0.9,
box.type = "none", legend.x = "bottom", ...)}
\arguments{
\item{makefile}{Path to the Makefile.}
\item{sort.fun}{Function that takes a numeric vector of vertical/y values of the
file names to plot, and returns the files names in the order they
should appear on the plot.}
\item{colors}{Named character vector that maps categories to plot colors.}
\item{categories}{Named character vector that maps each category to a regular
expression for matching filenames.}
\item{curve}{passed to plotmat.}
\item{segment.from}{passed to plotmat.}
\item{segment.to}{passed to plotmat.}
\item{box.type}{passed to plotmat.}
\item{legend.x}{passed to legend as x.}
\item{\dots}{passed to plotmat.}
}
\value{Return value of plotmat, plus colors used in the legend, and
categories for the text labels.}
\author{Toby Dylan Hocking}
\examples{
## Default sorting may result in a plot with edge crossings.
f <- system.file(file.path("Makefiles", "custom-capture.mk"),
package="plotMakefile")
plotMakefile(f)
## You can adjust this by providing a custom sort.fun, and you can
## adjust other plotmat parameters such as main (plot title).
sorter <- sortValues("table-variants.R", "trios.RData",
"sample.list.RData", "table-noise.R")
plotMakefile(f, sort.fun=sorter,
main="custom capture variant detection project")
## If you want to just plot everything in black without a legend,
## specify that all files belong to the same category.
plotMakefile(f, categories=".*")
}
|
400ee004c71e3fc1d1c227a7c1e7755c48777493
|
5916f0096fb77d11bfd9a69b607095d205a6f333
|
/OTUtable-package/OTUtable/man/metadata.Rd
|
af5aa8f74e2602a3d0f3a760419b5c3b5dcf186a
|
[] |
no_license
|
McMahonLab/North_Temperate_Lakes-Microbial_Observatory
|
35123ec4ec8f591a7d9f864bc1f30a67a71e006e
|
63543ee4cf7424125ac3064a5429b66855ccff73
|
refs/heads/master
| 2021-01-18T01:57:02.207024
| 2017-06-29T16:16:40
| 2017-06-29T16:16:40
| 47,776,636
| 2
| 2
| null | 2017-06-29T16:16:40
| 2015-12-10T17:21:50
|
R
|
UTF-8
|
R
| false
| false
| 653
|
rd
|
metadata.Rd
|
\name{metadata}
\alias{metadata}
\title{
Lake metadata for OTU table
}
\description{
A dataset containing temperature and oxygen profiles from the lakes in this study
}
\usage{
data(taxonomy)
}
\format{
A dataframe with 6 columns (measured variables) and 13,607 rows (depth profiles)
}
\details{
Missing data indicated by NA
Some sample dates and metadata dates may not match up exactly; if this presents an issue, please email and I will look at our written records for the right date
Epilimnion and hypolimnion samples each have an identical depth profile entry; search for just one or the other
}
\author{
Alexandra Linz <amlinz16@gmail.com>
}
|
4f68392f9bdb99b7d712723a478d1c15877c61d1
|
92e240738a4ccf673b9f3610386eaa08eef26d6f
|
/volatility/low-vol/returns.R
|
cce3889a7641677003aef6679b87b90b14c9db0d
|
[] |
no_license
|
stockviz/blog
|
564a4671202b92a2d63f13f0207fd8a35810c0b6
|
e00c055742a1229c612669ee29d846a6e2475a43
|
refs/heads/master
| 2023-09-01T15:59:07.746886
| 2023-08-31T04:01:37
| 2023-08-31T04:01:37
| 138,372,618
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,028
|
r
|
returns.R
|
library('RODBC')
library('quantmod')
library('PerformanceAnalytics')
library('lubridate')
library('ggplot2')
library('ggthemes')
library('reshape2')
library('dplyr')
source("d:/stockviz/r/config.r")
source("D:/StockViz/public/blog/common/plot.common.R")
options(stringsAsFactors = FALSE)
options("scipen"=100)
reportPath <- "."
indexName1<-"NIFTY 50 TR"
indexName2<-"NIFTY LOW VOLATILITY 50 TR"
startDate<-as.Date("2004-01-01")
endDate<-as.Date("2019-04-30")
cumLb<-seq(2, 12*5)
lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 13 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, ldbname, ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE)
pxDf1<-sqlQuery(lcon, sprintf("select PX_CLOSE, TIME_STAMP from BHAV_INDEX where INDEX_NAME='%s' and time_stamp >= '%s' and time_stamp <= '%s'", indexName1, startDate, endDate))
pxDf2<-sqlQuery(lcon, sprintf("select PX_CLOSE, TIME_STAMP from BHAV_INDEX where INDEX_NAME='%s' and time_stamp >= '%s' and time_stamp <= '%s'", indexName2, startDate, endDate))
pXts1<-xts(pxDf1[,1], as.Date(pxDf1[,2]))
pXts2<-xts(pxDf2[,1], as.Date(pxDf2[,2]))
dXts<-merge(monthlyReturn(pXts1), monthlyReturn(pXts2))
dXts<-dXts[-1,]
dXts<-dXts[-nrow(dXts),]
names(dXts)<-c(indexName1, indexName2)
diffXts<-dXts[,2]-dXts[,1]
sdDf<-data.frame(100*diffXts)
names(sdDf)<-c('DIFF')
sdDf$T<-index(diffXts)
firstDate<-first(index(diffXts))
lastDate<-last(index(diffXts))
xAxisTicks<-seq(from=firstDate, to=lastDate, length.out=10)
pdf(NULL)
ggplot(sdDf, aes(x=T, y=DIFF)) +
theme_economist() +
geom_line() +
scale_x_date(breaks = xAxisTicks) +
labs(x='', y='%', color='', title=sprintf("%s - %s Return Differential", indexName2, indexName1), subtitle=sprintf("monthly returns [%s:%s]", first(index(dXts)), last(index(dXts)))) +
annotate("text", x=lastDate, y=min(sdDf$DIFF, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.monthly.return-diff.png", reportPath, indexName1, indexName2), width=16, height=8, units="in")
########################
cumDiffXts<-NULL
for(lb in cumLb){
cdiff1<-rollapply(dXts[,1], lb, Return.cumulative)
cdiff2<-rollapply(dXts[,2], lb, Return.cumulative)
cumDiffXts<-merge.xts(cumDiffXts, cdiff2-cdiff1)
}
numNegatives<-apply(data.frame(cumDiffXts), 2, function(X) sum(ifelse(X < 0, 1, 0), na.rm=T))
lbVsNeg <- data.frame(LB=cumLb, NN=numNegatives)
ggplot(lbVsNeg, aes(x=LB, y=NN)) +
theme_economist() +
geom_point() +
labs(x='Holding Period', y='#', color='', title=sprintf("%s - %s Number of Negative Differentials vs. Holding Period", indexName2, indexName1), subtitle=sprintf("monthly returns [%s:%s]", first(index(dXts)), last(index(dXts)))) +
annotate("text", x=max(lbVsNeg$LB, na.rm=T), y=min(lbVsNeg$NN, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.negative.holding-period.png", reportPath, indexName1, indexName2), width=16, height=8, units="in")
|
ca9f9cd36146e3f423c501c80c0bc7d4bdd4db54
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8229_1/rinput.R
|
a44be156df5b356bb9a4891470b3c37dc72e94c0
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8229_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8229_1_unrooted.txt")
|
de0e8603f17d945c2655167a6b09617ca9ef73d2
|
469e730e10ba8e75ed7dcced6797a881400265ed
|
/bn_learn_prac_discrete.R
|
aa69aac9ff9f8f0435598a0d8ecb52ea9978910f
|
[] |
no_license
|
msank00/bayesianNetworkLearning
|
d9d38b71bcf986cb999e6a466ef757952652790a
|
9e9432af4bbf2ab6447c97f6dcd361ada863a38c
|
refs/heads/master
| 2020-05-16T21:05:09.421188
| 2019-04-24T20:21:10
| 2019-04-24T20:21:10
| 183,296,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,190
|
r
|
bn_learn_prac_discrete.R
|
# URL: http://www.bnlearn.com/examples/fit/
dir_name = "/home/sankarshan/Documents/code/bayesianLearningInR"
setwd(dir_name)
library(bnlearn)
data(learning.test)
# data generation.
LV3 = c("a", "b", "c")
a = sample(LV3, 5000, prob = rep(1/3, 3), replace = TRUE)
c = sample(LV3, 5000, prob = c(0.75, 0.2, 0.05), replace = TRUE)
f = sample(c("a", "b"), 5000, prob = rep(1/2, 2), replace = TRUE)
b = a
b[b == "a"] = sample(LV3, length(which(b == "a")), prob = c(0.8, 0.1, 0.1), replace = TRUE)
b[b == "b"] = sample(LV3, length(which(b == "b")), prob = c(0.4, 0.2, 0.4), replace = TRUE)
b[b == "c"] = sample(LV3, length(which(b == "c")), prob = c(0.1, 0.1, 0.8), replace = TRUE)
d = apply(cbind(a,c), 1, paste, collapse= ":")
d[d == "a:a"] = sample(LV3, length(which(d == "a:a")), prob = c(0.8, 0.1, 0.1), replace = TRUE)
d[d == "a:b"] = sample(LV3, length(which(d == "a:b")), prob = c(0.2, 0.1, 0.7), replace = TRUE)
d[d == "a:c"] = sample(LV3, length(which(d == "a:c")), prob = c(0.4, 0.2, 0.4), replace = TRUE)
d[d == "b:a"] = sample(LV3, length(which(d == "b:a")), prob = c(0.1, 0.8, 0.1), replace = TRUE)
d[d == "b:b"] = sample(LV3, length(which(d == "b:b")), prob = c(0.9, 0.05, 0.05), replace = TRUE)
d[d == "b:c"] = sample(LV3, length(which(d == "b:c")), prob = c(0.3, 0.4, 0.3), replace = TRUE)
d[d == "c:a"] = sample(LV3, length(which(d == "c:a")), prob = c(0.1, 0.1, 0.8), replace = TRUE)
d[d == "c:b"] = sample(LV3, length(which(d == "c:b")), prob = c(0.25, 0.5, 0.25), replace = TRUE)
d[d == "c:c"] = sample(LV3, length(which(d == "c:c")), prob = c(0.15, 0.45, 0.4), replace = TRUE)
e = apply(cbind(b,f), 1, paste, collapse= ":")
e[e == "a:a"] = sample(LV3, length(which(e == "a:a")), prob = c(0.8, 0.1, 0.1), replace = TRUE)
e[e == "a:b"] = sample(LV3, length(which(e == "a:b")), prob = c(0.4, 0.5, 0.1), replace = TRUE)
e[e == "b:a"] = sample(LV3, length(which(e == "b:a")), prob = c(0.2, 0.2, 0.6), replace = TRUE)
e[e == "b:b"] = sample(LV3, length(which(e == "b:b")), prob = c(0.3, 0.4, 0.3), replace = TRUE)
e[e == "c:a"] = sample(LV3, length(which(e == "c:a")), prob = c(0.1, 0.1, 0.8), replace = TRUE)
e[e == "c:b"] = sample(LV3, length(which(e == "c:b")), prob = c(0.25, 0.5, 0.25), replace = TRUE)
learning.test2 = data.frame(
A = factor(a, levels = LV3),
B = factor(b, levels = LV3),
C = factor(c, levels = LV3),
D = factor(d, levels = LV3),
E = factor(e, levels = LV3),
F = factor(f, levels = c("a", "b"))
)
pdag = iamb(learning.test)
pdag
score(set.arc(pdag, from = "A", to = "B"), learning.test)
score(set.arc(pdag, from = "B", to = "A"), learning.test)
dag = set.arc(pdag, from = "B", to = "A")
dag = pdag2dag(pdag, ordering = c("A", "B", "C", "D", "E", "F"))
fit = bn.fit(dag, learning.test)
fit
fit$D
#=======================
pdag2 = iamb(learning.test2)
pdag2
score(set.arc(pdag2, from = "A", to = "B"), learning.test2)
score(set.arc(pdag2, from = "B", to = "A"), learning.test2)
dag2 = set.arc(pdag2, from = "B", to = "A")
dag2 = pdag2dag(pdag2, ordering = c("A", "B", "C", "D", "E", "F"))
fit2 = bn.fit(dag2, learning.test2)
fit2$D
fit$D
bn.fit.barchart(fit$D)
bn.fit.barchart(fit2$D)
bn.fit.dotplot(fit$D)
bn.fit.dotplot(fit2$D)
|
6d9cba76f0e5490c9c5427c59c92cded24df144e
|
3c37e20c65d3918fede609648fccc7e997527c47
|
/R/noise.R
|
2380d276b8ae32b5f62a49034b264b787ff13b6c
|
[] |
no_license
|
cran/Rwave
|
67cb1e7b8e234c1f49b1ac4cbadef024a04afc21
|
72a5fc18fdb0c4ae4cf6aa9985617268585ffae5
|
refs/heads/master
| 2022-11-04T20:48:09.750339
| 2022-10-21T22:17:49
| 2022-10-21T22:17:49
| 17,713,902
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,984
|
r
|
noise.R
|
#########################################################################
# $Log: Noise.S,v $
# Revision 1.2 1995/04/05 18:56:55 bruno
# *** empty log message ***
#
# Revision 1.1 1995/04/02 01:04:16 bruno
# Initial revision
#
# (c) Copyright 1997
# by
# Author: Rene Carmona, Bruno Torresani, Wen-Liang Hwang
# Princeton University
# All right reserved
#########################################################################
tfpct <- function(input,percent=.8,plot=TRUE)
#########################################################################
# tfpct:
# ------
# compute a percentile of time-frequency representation frequency
# by frequency
#
# input:
# ------
# input: modulus of the continuous wavelet transform (2D array)
# percent: value of the percentile to be computed
# plot: if set to TRUE, displays the values of the energy as a
# function of the scale
#
# output:
# -------
# output: 1D array of size nbscales containing the noise estimate
#
#########################################################################
{
nscale <- dim(input)[2]
output <- numeric(nscale)
for(i in 1:nscale) output[i] <- quantile(input[,i],percent)
if(plot)plot.ts(output)
output
}
tfmean <- function(input,plot=TRUE)
#########################################################################
# tfmean:
# -------
# compute the mean of time-frequency representation frequency
# by frequency
#
# input:
# ------
# input: modulus of the continuous wavelet transform (2D array)
# plot: if set to TRUE, displays the values of the energy as a
# function of the scale
#
# output:
# -------
# output: 1D array of size nbscales containing the noise estimate
#
#########################################################################
{
nscale <- dim(input)[2]
output <- numeric(nscale)
for(i in 1:nscale) output[i] <- mean(input[,i])
if(plot) plot.ts(output)
output
}
tfvar <- function(input,plot=TRUE)
#########################################################################
# tfvar:
# ------
# compute the variance of time-frequency representation frequency
# by frequency
#
# input:
# ------
# input: modulus of the continuous wavelet transform (2D array)
# plot: if set to TRUE, displays the values of the energy as a
# function of the scale
#
# output:
# -------
# output: 1D array of size nbscales containing the noise estimate
#
#########################################################################
{
nscale <- dim(input)[2]
output <- numeric(nscale)
for(i in 1:nscale) output[i] <- var(input[,i])
if(plot) plot.ts(output)
output
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.