blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30b9b6e29c32675425a82d925a55f6fa9ea16d36
|
1080ae69aa33d9cadbdaad080c6142f10c4ea704
|
/cachematrix.R
|
4fdaf77c3c79d248e37a0990122c3b31646fc1e4
|
[] |
no_license
|
zergood/ProgrammingAssignment2
|
e7e7224cb38a228965f07f0fa3ac0f9852dac30e
|
f7f8dce6988c6a89c5d7456a53c916a2f51041dc
|
refs/heads/master
| 2021-01-18T05:24:23.080023
| 2015-02-19T05:28:22
| 2015-02-19T05:28:22
| 31,001,799
| 0
| 0
| null | 2015-02-19T04:53:33
| 2015-02-19T04:53:33
| null |
UTF-8
|
R
| false
| false
| 777
|
r
|
cachematrix.R
|
## This function creates a cache matrix. This matrix is
# used by cacheSolve method to cache the inverse matrix of it.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() x
setInv <- function(inverse) inv <<- inverse
getInv <- function() inv
list(get = get, setInv = setInv, getInv = getInv)
}
## Use the cache matrix from makeCacheMatrix function to calculate
# the inverse matrix of it. The invese matrix is saved to cached matrix.
# So you don't need to recalculate it next time.
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if(!is.null(inv)){
message("inverse was cached")
return(inv)
}
inv <- solve(x$get())
x$setInv(inv)
inv
}
|
d12f6b7c453f219e3c4f6178f2253bb823a33dc9
|
9fecce6f3ef41202cdcc855f4b0baff36131eacc
|
/Analysis/old_analysis/VMS/results/2014-07-24/jitter_VMS.R
|
6007d1d6866b8a7826142f6e8d6e7208077ca504
|
[] |
no_license
|
emfuller/cnh
|
0487e9647837d8fc999850b5951ff6331f9a5159
|
8b36faf8c73607d92e59e392fff3c0094b389d26
|
refs/heads/master
| 2021-05-01T08:02:52.200343
| 2019-04-06T18:25:48
| 2019-04-06T18:25:48
| 28,717,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,790
|
r
|
jitter_VMS.R
|
# hard code some shrimp vessels. find vessels that do little but shrimp by using FTL tickets to find veids and gear. Any boat that as a grgroup of TWS (shrimp trawl). This can include prawn shrimp, but like, whatever man. (for the moment)
# find shrimp boats
# load data
FTL <- read.csv("/Volumes/NOAA_Data/CNH/Data/Catch/FTL_2009-2013_w-o-c_samhouri.csv", stringsAsFactors=FALSE)
# find vessel IDs for those that land shrimp
sv <- unique(subset(FTL, grgroup == "TWS", select = veid)$veid)
# find vessels that do shrimp
shrimpers <- subset(FTL, veid %in% sv)
# what kinds of gears besides shrimp - aggregate
barplot(table(shrimpers$grgroup),las=2, bor=F, col="steelblue")
# what kind of gears besides shrimp - by vessel
# cannot figure out how to do this tonight, doing a for loop
gear_combo <- data.frame(veid = sv, gc = rep("hi",length(sv)),stringsAsFactors=F)
for (i in 1:length(sv)){
vessel <- subset(shrimpers, veid == sv[i])
gears <- sort(unique(vessel$grgroup))
pain <- gsub(" ","",x=toString(gears))
painful <- gsub(",","",pain)
gear_combo[i,2] <- as.character(painful)
}
barplot(table(gear_combo$gc),col="steelblue",bor=F,las=2, ylab="number of vessels with combination of gear")
# number of vessels have just tws. Do any of them have VMS?
js <- subset(gear_combo, gc == "TWS")
# load VMS
VMS <- read.csv("/Volumes/NOAA_Data/CNH/VMS_cleaning/results/2014-03-02/VMS_woDups.csv", stringsAsFactors=F)
just_s <- subset(VMS, Doc_Number %in% js$veid)
plot(just_s$Longitude,just_s$Latitude,asp=1,pch=19, cex=.15, col = as.numeric(as.factor(just_s$Vessel_Name)))
map("state",add=T)
# results in 8 vessels,
jittered <- just_s
# change lat/lon into complex number so can be annonymized
jittered$complex <- jittered$Longitude + 1i * jittered$Latitude
# for each vessel, subtract mean, so that it's centered and can't see relation to one another. This maintains vessels' relationship to itself, but erases relationships between vessels.
changed <- ddply(jittered, .(Vessel_Name),mutate, newcomplex = complex - mean(complex))
changed$vessel_id <- as.numeric(as.factor(changed$Vessel_Name))
#lookup code
changed$ref <- seq(1,nrow(changed))
forAngela <- select(changed, vessel_id, Latitude, Longitude, Date_Time, Avg_Speed, Avg_Direction, onland, newcomplex)
forAngela$Longitude <- Re(forAngela$newcomplex)
forAngela$Latitude <- Im(forAngela$newcomplex)
forAngela$newcomplex <- NULL
foo <- subset(forAngela, vessel_id==8)
plot(foo$Longitude,foo$Latitude, pch=19, cex=.15)
write.csv(forAngela, file="/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-24/jittered_VMS.csv")
write.csv(changed, file="/Volumes/NOAA_Data/CNH/Analysis/VMS/2014-07-24/ref_jittered.csv")
|
5d054f84df48a8c67becf48962cd1c13dccaf91e
|
57744ab6fedc2d4b8719fc51dce84e10189a0a7f
|
/rrdfqbpresent/R/MakeHTMLfromQb.R
|
7a37676da88c81b2d780eb9cafef96952e0ada26
|
[] |
no_license
|
rjsheperd/rrdfqbcrnd0
|
3e808ccd56ccf0b26c3c5f80bec9e4d1c83e4f84
|
f7131281d5e4a415451dbd08859fac50d9b8a46d
|
refs/heads/master
| 2023-04-03T01:00:46.279742
| 2020-05-04T19:10:43
| 2020-05-04T19:10:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,869
|
r
|
MakeHTMLfromQb.R
|
##' Make HTML table representing RDF data cube
##' @param store RDF data store containing cube
##' @param rowdim Row dimensions
##' @param coldim Column dimensions
##' @param idrow idrows
##' @param idcol idcols
##' @param htmlfile path to file with HTML
##' @param useRDFa if TRUE include RDFa markup (default)
##' @param compactDimColumns if TRUE compact dimension columns and add pretty header (default)
##' @param showProcedure If TRUE show in each row the projection procedurevalue
##' @param debug If TRUE give debug information
##' @return path to file with HTML
##' @inheritParams GetObservationsSparqlQuery
##' @export
MakeHTMLfromQb<- function( store, forsparqlprefix, dsdName, domainName,
dimensions, rowdim, coldim, idrow, idcol,
htmlfile=NULL, useRDFa=TRUE, compactDimColumns=TRUE,
showProcedure=TRUE, debug=FALSE ) {
# ToDo(mja): the result from GetTwoDimTableFromQb is wrong
qbtest<- GetTwoDimTableFromQb( store, forsparqlprefix, domainName, rowdim, coldim )
## names(attributes(qbtest))
## options(width=200)
## knitr::kable(qbtest[order(strtoi(qbtest$rowno)),])
oDx<-attr(qbtest,"observationsDesc")
## knitr::kable(oDx)
oDxx<- oDx[! is.na(oDx$s),]
oD<- oDxx[order(strtoi(oDxx$rowno)),]
## print(colnames(oD))
## TODO(mja): ensure measurefmt is always defined - this is a quick fix
if (!("measurefmt" %in% names(oD))) {
oD$measurefmt<- " "
}
presrowvarindex<- unique(oD$rowno)
colvarindex<- unique(oD$colno)
cellpartnoindex<- unique(oD$cellpartno)
Showit<- function() {
print(presrowvarindex)
print(colvarindex)
print(cellpartnoindex)
print(oD[,c("s","rowno","colno","cellpartno")])
}
if (debug) { Showit() }
# Determine variable names in Od dataframe
presrowvarvalue<- gsub("(crnd-dimension:|crnd-attribute:|crnd-measure:)(.*)","\\2value", rowdim)
presrowvarIRI <- gsub("(crnd-dimension:|crnd-attribute:|crnd-measure:)(.*)","\\2IRI", rowdim)
presrowvarlabel<- gsub("(crnd-dimension:|crnd-attribute:|crnd-measure:)(.*)","\\2label", rowdim)
presidcolvalue<- gsub("(crnd-dimension:|crnd-attribute:|crnd-measure:)(.*)","\\2value", idcol)
presidcollabel<- gsub("(crnd-dimension:|crnd-attribute:|crnd-measure:)(.*)","\\2label", idcol)
## add code for embedding the cube as turtle
## determine cube compontents except observations,
## as the observations are stored as RDFa
if (is.null(htmlfile)) {
htmlfile<- file.path(system.file("extdata/sample-cfg", package="rrdfqbpresent"), "test.html")
# htmlfile<- file.path(tempdir(),"test.html")
}
cat("<!DOCTYPE HTML>\n", file=htmlfile, append=FALSE)
cat('
<html>
<head>
<meta charset="UTF-8">
<title>DEMO table as html</title>
',
ifelse(useRDFa,
'
<script src="jquery-2.1.3.min.js"></script>
<link rel="stylesheet" href="jquery-ui-1.11.3.custom/jquery-ui.css"/>
<script src="jquery-ui-1.11.3.custom/jquery-ui.min.js"></script>
<script src="RDFa.min.1.4.0.js"></script>
', ''),
## '
## <style>
## #table {
## line-height:30px;
## background-color:#eeeeee;
## height:1000px;
## width:750px;
## float:left;
## padding:5px;
## }
## #drop{
## width:300px;
## background-color:green;
## float:left;
## padding:10px;
## }
## ',
'
</style>
</head>
<script>
"use strict";
'
,
## '
## function allowDrop(ev)
## {
## ev.preventDefault();
## }
## function drag(ev)
## {
## ev.dataTransfer.setData("Text",ev.target.id);
## console.log("Dragging: ", ev.target.id);
## }
## function drop(ev)
## {
## ev.preventDefault();
## var data=ev.dataTransfer.getData("Text");
## console.log("Dropping: ", data);
## // from http://stackoverflow.com/questions/13007582/html5-drag-and-copy
## var nodeCopy = document.getElementById(data).cloneNode(true);
## nodeCopy.id = "copy"+nodeCopy.id;
## // end from http://stackoverflow.com/questions/13007582/html5-drag-and-copy
## var newelem = document.createElement("P");
## newelem.appendChild(nodeCopy);
## ev.target.appendChild(newelem);
## }
## $(document).ready(function(){
## GreenTurtle.attach(document)
## })
## ',
'
function obsclick(obssubject)
{
alert("Observation " + obssubject )
}
'
,
'
</script>
<body>
<h1>',
dsdName,
'</h1>
'
, file=htmlfile, append=TRUE
)
cat('<div id="container">', file=htmlfile, append=TRUE)
cat("<div id='table'>\n", file=htmlfile, append=TRUE)
cat("<table border>\n", file=htmlfile, append=TRUE)
if (TRUE || compactDimColumns) {
useidrow<- vector(mode="character",length=0)
hasallidrow<- vector(mode="character",length=0)
useidheader<- vector(mode="character",length=0)
maxNoOfNonALL<-0
# has to use to OR approach to identify the cells that goes in the same rowno!!
or<- 1
for (rr in presrowvarindex) {
thisNoOfNonALL<-0
for (rowidname in idrow) {
if (debug) { cat("Row ", rr, ", observation (or) ", or, ", rowidname", rowidname, ", contents: ", oD[or,rowidname], "\n") }
if ( is.na(oD[or,rowidname]) || oD[or,rowidname]=="_ALL_" ) {
if (!is.element(rowidname,hasallidrow) ) {
hasallidrow<- c(hasallidrow, rowidname)
}
} else {
thisNoOfNonALL<-thisNoOfNonALL+1
}
}
## cat("THis no columns with not ALL ", thisNoOfNonALL, "\n" )
maxNoOfNonALL<- max(maxNoOfNonALL, thisNoOfNonALL)
## Advance to next row
for (cc in colvarindex) {
cpindex<-0
for (cp in cellpartnoindex) {
cpindex<- cpindex+1
if (oD$rowno[or]==rr & oD$colno[or]==cc & oD$cellpartno[or]==cp ) {
or<- or+1
}
}
}
}
## cat("Max no columns with not ALL ", maxNoOfNonALL, "\n" )
## cat("ID rows ", idrow, "\n")
## cat("ID rows with at least one _ALL_ value", hasallidrow, "\n")
alwaysshowidrow<- setdiff(idrow, hasallidrow)
## cat("ID rows with no _ALL_ value", alwaysshowidrow, "\n")
}
## make the header row(s) for the columns
headerrowvarindex<- c(1)
or<- 1
for (rr in headerrowvarindex) {
cat("<tr>", file=htmlfile, append=TRUE)
# print(rr)
if (!compactDimColumns) {
## START make the row identification
for (rowidname in idrow) {
## cat("<th>", rowidname, "</th>", file=htmlfile, append=TRUE)
## this is not a long term approach
cat("<th><a href=\"",oD[or,gsub("(^.*)value$","\\1IRI",rowidname)],"\">", oD[or,gsub("(^.*)value$","\\1label",rowidname)], "</a></th>", file=htmlfile, append=TRUE)
}
## END make the row identification
} else {
}
## START identify all column related information to be projected into column
if (showProcedure) {
cat("<th>", "Variable", "</th>", file=htmlfile, append=TRUE)
cat("<th>", "Statistics", "</th>", file=htmlfile, append=TRUE)
}
## END identify all column related information to be projected into column
for (cc in colvarindex) {
cpindex<-0
cat("<th colspan=\"", length(cellpartnoindex), "\">", file=htmlfile, append=TRUE)
prevvalue<- " "
for (cp in cellpartnoindex) {
cpindex<- cpindex+1
## cat( oD[or, presidcolvalue ] , file=htmlfile, append=TRUE)
## TODO: make better solution
if (prevvalue != oD[or,presidcolvalue]) {
cat("<a href=\"",oD[or,gsub("(^.*)value$","\\1",presidcolvalue)],"\">", oD[or,presidcolvalue], "</a>", file=htmlfile, append=TRUE)
prevvalue<- oD[or,presidcolvalue]
}
or<- or+1
}
cat("</th>\n", file=htmlfile, append=TRUE)
}
cat("</tr>", "\n", file=htmlfile, append=TRUE)
}
## data rows
or<- 1
if (debug) { cat("Start data rows\n") }
for (rr in presrowvarindex) {
if (debug) { cat("Data rows: Row ", rr, ", observation (or) ", or, ", rowidname", rowidname, ", contents: ", oD[or,rowidname], "\n") }
cat("<tr>", file=htmlfile, append=TRUE)
# print(rr)
## START make the row identification
if (oD$rowno[or]==rr) {
for (rowidname in idrow) {
## this is not a long term approach
cat("<td>",
"<a href=\"",oD[or,gsub("(^.*)value$","\\1",rowidname)],"\">",
oD[or,rowidname],
"</a>",
"</td>", file=htmlfile, append=TRUE)
}
}
## END make the row identification
## START identify all column related information to be projected into column
if (showProcedure) {
xor<- or
xrowid<- rep("", length(cellpartnoindex))
yrowid<- rep("", length(cellpartnoindex))
for (cc in colvarindex) {
cpindex<-0
for (cp in cellpartnoindex) {
cpindex<- cpindex+1
if (oD$rowno[xor]==rr & oD$colno[xor]==cc & oD$cellpartno[xor]==cp ) {
if (!is.na(oD$factorvalue[xor]) && yrowid[cpindex]=="") {
yrowid[cpindex]<-paste0("<a href=\"",oD$factor[xor],"\">",
oD$factorvalue[xor], "</a>",collapse="")
}
if (!is.na(oD$procedurevalue[xor]) && xrowid[cpindex]=="") {
xrowid[cpindex]<-paste0("<a href=\"",oD$procedure[xor],"\">",
oD$procedurevalue[xor], "</a>",collapse="")
}
xor<- xor+1
}
}
}
cat("<td>", paste(yrowid,collapse=", ",sep=""), "</td>", file=htmlfile, append=TRUE)
cat("<td>", paste(xrowid,collapse=", ",sep=""), "</td>", file=htmlfile, append=TRUE)
}
## END identify all column related information to be projected into column
##
for (cc in colvarindex) {
cpindex<-0
for (cp in cellpartnoindex) {
cpindex<- cpindex+1
cat("<td>", file=htmlfile, append=TRUE)
## if (cpindex>1) {
## ## separator between cells should be taken from data
## cat(" ", file=htmlfile, append=TRUE)
## }
if (debug) {
cat("colvarindex:",
" rowno ", oD$rowno[or],"==", rr,
" colno ", oD$colno[or], "==", cc,
" cellparno ", oD$cellpartno[or], "==", cp,
"\n" )
}
if (oD$rowno[or]==rr & oD$colno[or]==cc & oD$cellpartno[or]==cp ) {
## The observation
## next line is for simple fly-over
if (useRDFa) {
cat(paste0("<a title=\"", oD$measureIRI[or], "\"",
" onclick=obsclick(\"", oD$measureIRI[or], "\")",
">\n" ), file=htmlfile, append=TRUE)
cat(paste0('<span ', 'id="', gsub("ds:","",oD$s[or]), '"',
'resource="', oD$s[or],'"',
' typeof="qb:Observation" ',
## TODO(mja): how to use draggable: Disable draggable for now
## ' draggable="true" ondragstart="drag(event)"',
'>\n' ),
file=htmlfile, append=TRUE)
} else {
cat(paste0("<a href=\"", oD$measureIRI[or], "\"",
">\n" ), file=htmlfile, append=TRUE)
}
## TODO(mja) how to store dataSet information
## cat(paste0('<span property="qb:dataSet" resource="', 'ds:', dsdName,'">\n' ), file=htmlfile, append=TRUE)
## TODO(mja) how to show dimensions
## for (prop in dimensions) {
## cat( paste0('<span property="', prop, '"', ' resource="', oD[or, gsub("crnd-dimension:|crnd-attribute:|crnd-measure:", "", prop)], '">\n' ), file=htmlfile, append=TRUE)
## }
if (debug) { cat("Observation: ", oD$measure[or],"\n" ) }
## formatting to applied to measure
if (oD$measurefmt[or] != " ") {
cat(sprintf(oD$measurefmt[or],as.numeric(oD$measure[or])), file=htmlfile, append=TRUE)
}
else {
cat(paste0(oD$measure[or]), file=htmlfile, append=TRUE)
}
## for (prop in dimensions) {
## cat( '</span>\n', file=htmlfile, append=TRUE)
## }
## dataSet information
## cat( '</span>\n', file=htmlfile, append=TRUE)
if (useRDFa) {
cat( '</span>\n', file=htmlfile, append=TRUE)
}
cat(paste0("</a>\n"), file=htmlfile, append=TRUE)
or<- or+1
cat("</td>\n", file=htmlfile, append=TRUE)
}
}
# cat("</td>\n", file=htmlfile, append=TRUE)
}
cat("</tr>", "\n", file=htmlfile, append=TRUE)
if (debug) { cat("End of for, or ", or, "\n" ) }
}
cat("</table>\n", file=htmlfile, append=TRUE)
cat("</div>\n", file=htmlfile, append=TRUE)
## TODO(mja): consider how to use this with dropping
## cat('
## <div id="droparea">
## Drag and drop over the green text below.
## <table>
## <tr><td>
## <span style="width:100px" id="drop" ondrop="drop(event)" ondragover="allowDrop(event)">Drop here...</span>
## </td></tr>
## </table>
## </div>
## ', file=htmlfile, append=TRUE)
cat('
</div>
</body>
</html>
', file=htmlfile, append=TRUE)
htmlfile
}
|
0d30bbf79bf9bc4004de0a8df238d1a5a49ed63c
|
1fa7298bebfe9676c1910fefbf2fe38b70b36145
|
/Proj2_Q1.r
|
65f9334537bc7b2fef5d2269818efe678f218544
|
[] |
no_license
|
unmeshphadke/EDA-Coursera-CourseProject2
|
da9b32e2bd0e451f6bed4d091c8a03d9606e5dff
|
5507936a3ea9cf895577ddf18aaced2e2fa5cc31
|
refs/heads/master
| 2016-09-05T11:32:54.993973
| 2015-06-27T13:16:11
| 2015-06-27T13:16:11
| 38,110,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
Proj2_Q1.r
|
library(reshape2)
library(ggplot2)
library(plyr)
library(dplyr)
#The files are assumed to b present in the current working directory in R.
#Reading in the files.
NEI<-readRDS("summarySCC_PM25.rds")
SCC<-readRDS("Source_Classification_Code.rds")
m<-tapply(NEI$Emissions,NEI$year,sum)
df<-data.frame(m,as.numeric(names(m)))
names(df)<-c("PM25","Year")
png(file="plot.png")
plot(df$Year,df$PM25,xlab="Year",ylab="PM25 emitted (in tonnes)",col="blue",pch=20,cex=3,main="Total PM25 Emissions in the US")
dev.off()
#From the plot , it is clearly seen that the PM25 Emission has decreased significantly from 1999 to 2008
|
eacce2165603a2f961b5238f5f4ccdaa30fafe92
|
d07d3fb9860333579866a73a9b027b2ef9b5d21c
|
/R/zi_zipfpss.R
|
3ccbefccc6922ef7c77314252faa77f672b17dc5
|
[] |
no_license
|
ardlop/zipfextR
|
ceac463a21c74dcf765af7751dfa50959d215a36
|
cf0ec21cd7d6ccb4a81935692b179309922885d4
|
refs/heads/master
| 2021-06-21T16:35:14.384189
| 2020-05-07T10:20:49
| 2020-05-07T10:20:49
| 98,761,867
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,185
|
r
|
zi_zipfpss.R
|
#' The Zero Inflated Zipf-Poisson Stop Sum Distribution (ZI Zipf-PSS).
#'
#' Probability mass function for the zero inflated Zipf-PSS distribution with parameters \eqn{\alpha}, \eqn{\lambda} and \eqn{w}.
#' The support of thezero inflated Zipf-PSS distribution are the positive integer numbers including the zero value.
#'
#' @name zi_zipfpss
#' @aliases d_zi_zipfpss
#'
#' @param x Vector of positive integer values.
#' @param alpha Value of the \eqn{\alpha} parameter (\eqn{\alpha > 1} ).
#' @param lambda Value of the \eqn{\lambda} parameter (\eqn{\lambda > 0} ).
#' @param w Value of the \eqn{w} parameter (0 < \eqn{w < 1} ).
#' @param log Logical; if TRUE, probabilities p are given as log(p).
#'
#' @details
#' The support of the \eqn{\lambda} parameter increases when the distribution is truncated at zero being
#' \eqn{\lambda \geq 0}. It has been proved that when \eqn{\lambda = 0} one has the degenerated version of the distribution at one.
#'
#' @references {
#' Panjer, H. H. (1981). Recursive evaluation of a family of compound
#' distributions. ASTIN Bulletin: The Journal of the IAA, 12(1), 22-26.
#'
#' Sundt, B., & Jewell, W. S. (1981). Further results on recursive evaluation of
#' compound distributions. ASTIN Bulletin: The Journal of the IAA, 12(1), 27-39.
#' }
NULL
#> NULL
.prec.zi_zipfpss.checkparams <- function(alpha, lambda, w){
if(!is.numeric(alpha) | alpha <= 1){
stop('Incorrect alpha parameter. This parameter should be greater than one.')
}
if(!is.numeric(lambda) | lambda < 0){
stop('Incorrect lambda parameter. You should provide a numeric value.')
}
if(!is.numeric(w) | any(w <= 0) | any(w > 1)){
stop('Incorrect w parameter. You should provide a numeric value.')
}
}
#' @rdname zi_zipfpss
#' @export
d_zi_zipfpss <- function(x, alpha, lambda, w, log = FALSE){
.prec.zipfpss.checkXvalue(x)
.prec.zi_zipfpss.checkparams(alpha, lambda, w)
values <- sapply(x, function(i, alpha, lambda, w, log){
if(i == 0){
return(w + (1 - w)*dzipfpss(i, alpha, lambda, log))
} else {
return((1-w)*dzipfpss(i, alpha, lambda, log))
}
}, alpha = alpha, lambda = lambda, w = w, log = log)
return(values)
}
|
027bdb7d1bb8f328545a3e301d19a5ed54a65671
|
756585020e5b9051fe4ebd4450e81b114fc18d1e
|
/configuration/rmarkdown/shared_variables.R
|
2af422411587a72227238ee73da5a549052dffed
|
[] |
no_license
|
TheAcetoLab/diamantopoulou-ctc-dynamics
|
bc70b4445058ca60eb901ce8dd6f7162c0028dff
|
e3cc57d99de30669acb505180f10287b686ddf93
|
refs/heads/master
| 2023-04-17T18:38:32.249089
| 2022-06-28T08:59:15
| 2022-06-28T08:59:15
| 485,323,596
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
shared_variables.R
|
key_circadian_genes <- c(
BMAL1 = 'ARNTL',
CLOCK = 'CLOCK',
CRY1 = 'CRY1',
CRY2 = 'CRY2',
PER1 = 'PER1',
PER2 = 'PER2',
PER3 = 'PER3',
NR1D1 = 'NR1D1',
NR1D2 = 'NR1D2',
CSNK1D = 'CSNK1D',
CSNK1E = 'CSNK1E')
use_feature_type <- c('protein_coding', 'lncRNA', 'snoRNA', 'miRNA')
|
345c62a046647bc334820528fc26a067d1144cc9
|
ba3e9adb05f6fd7e9dbfb69f608f147a00c149b2
|
/lecture/데청캠_동국대/수업/빅데이터 개론 & R/03/day3_실습.R
|
87822898b9f2134f41bae343f08b31307a7e652a
|
[] |
no_license
|
friendly-blackdog/To_FullStack
|
193f247429dff935ecfebc2b170937c6f620f89d
|
04cdebadd8214b84f6d7e8fe3021dcf80eb98244
|
refs/heads/master
| 2022-11-26T01:31:45.948805
| 2020-08-04T13:01:42
| 2020-08-04T13:01:42
| 258,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,866
|
r
|
day3_실습.R
|
library(readxl)
exam <- read.csv("csv_exam.csv")
exam
head(exam)
tail(exam, 10)
tail(exam)
View(exam)
dim(exam)
str(exam)
summary(exam)
# ggplo2의 mpg 데이터를 데이터 프레임 형태로 불러오기
mpg <- as.data.frame(ggplot2::mpg)
head(mpg)
tail(mpg)
View(mpg)
dim(mpg)
str(mpg)
library(dplyr)
df_raw <- data.frame(var1 = c(1,2,1),
var2 = c(2,3,2))
df_raw
df_new <- df_raw
df_new
df_new <- rename(df_new, v2 = var2)
df_new
mpg_copy <- mpg
mpg_copy <- rename(mpg_copy, city = cty, highway = hwy)
head(mpg_copy, 2)
df <- data.frame(var1 = c(4,3,8),
var2 = c(2,6,1))
df$var_sum <- df$var1 + df$var2
df
df$var_mean <- (df$var1 + df$var2)/2
df
mpg$total <- (mpg$cty + mpg$hwy)/2
head(mpg, 2)
mean(mpg$total)
summary(mpg$total)
hist(mpg$total)
mpg$test <- ifelse(mpg$total >= 20, 'pass', 'fail')
head(mpg, 7>1)
table(mpg$test)
library(ggplot2)
qplot(mpg$test)
mpg$grade <- ifelse(mpg$total > 30, 'A',
ifelse(mpg$total >= 20, 'B', 'C'))
table(mpg$grade)
qplt(mpg$grade)
qplt(mpg$grade)
qplot(mpg$grade)
mpg$grade2 <- ifelse(mpg$total >= 30, "A", ifelse(mpg$total >= 25, "B",
ifelse(mpg$total >= 20, "C", "D")))
table(mpg$grade2)
ggplot(midwest)
midwest <- rename(midwest, total = totla, asian = asian)
head(midwest,0)
midwest$asian_per <- (midwest$asian / midwest$total)
hist(midwest$asian_per)
mean(midwest$asian_per)
ifelse()
midwest$asian
#연습 --------------------------------------
midiwest <- as.data.frame(ggplot2::midwest)
head(midwest)
library(dplyr)
midwest$ratio <- (midwest$asian/midwest$total)*100
hist(midwest$ratio)
asia_mean <- mean(midwest$ratio)
midwest$asia_group <- ifelse(midwest$ratio > asia_mean, 'large', 'small')
table(midwest$asia_group)
qplot(midwest$asia_group)
exam <- read.csv('csv_exam.csv')
exam
exam %>% filter(class == 1)
exam %>% filter(class == 2)
exam %>% filter(class != 1)
exam %>% filter(math > 50)
exam %>% filter(math < 50)
exam %>% filter(class %in% c(1,3,5))
#연습 ------------------------------------------------------
mpg_low <- mpg %>% filter(displ <= 4)
mpg_high <- mpg %>% filter(displ > 5)
mean(mpg_low$hwy)
mean(mpg_high$hwy)
mpg_audi <- mpg %>% filter(manufacturer == 'audi')
mpg_toyota <- mpg %>% filter(manufacturer == 'toyota')
mean(mpg_audi$cty)
mean(mpg_toyota$cty)
mpg_chev <- mpg %>% filter(manufacturer == 'chevrolet')
mpg_ford <- mpg %>% filter(manufacturer == 'ford')
mpg_honda <- mpg %>% filter(manufacturer == 'honda')
mpg_group <- mpg %>% filter(manufacturer %in% c("chevrolet", "ford", "honda"))
mean(mpg_group$hwy)
exam
library(readxl)
library(dplyr)
exam <- read.csv('csv_exam.csv')
exam
exam %>% select(class, math, english)
exam %>%
filter(class == 1) %>%
select(english)
mpg <- as.data.frame(ggplot2::mpg)
|
88e2f007c56a40f960e7a8591802266160e9db56
|
fef283e0284fe54beae26cb4145b6314c6858727
|
/man/put_imageset.Rd
|
974283efcd1142c1b19115ad97cbbee343f3ae30
|
[] |
no_license
|
hadley/rifftron
|
f76c4b1b2f04cb8611182b360b5b5e10803abb66
|
b67a740d289eb0f04bc892603914f75dcf8c9154
|
refs/heads/master
| 2021-01-13T01:55:03.798989
| 2014-03-12T14:30:12
| 2014-03-12T14:30:12
| 17,642,261
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
rd
|
put_imageset.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{put_imageset}
\alias{put_imageset}
\title{PUT an imageset.}
\usage{
put_imageset(project, set, path, key = api_key())
}
\arguments{
\item{project,set}{Name of project and image set}
\item{path}{character vector of images to upload}
\item{key}{difftron api key, see \code{\link{api_key}}
for more details}
}
\description{
PUT an imageset.
}
\examples{
\donttest{
png("test.png"); plot(runif(10)); dev.off()
put_imageset("test", "test2", "test.png")
unlink("test.png")
}
}
\keyword{internal}
|
5dff16146f0e70917c0951cb9f878c6ebd0fa150
|
bd34186a17415b174073bbb3f2d2e8054f845ccc
|
/data/sample-laser-radar-measurement-data-1.out.txt.R
|
ee827fe8d05b4c3cb6b6baa920194f2ce2e48076
|
[] |
no_license
|
binliu-base/T2P2-CarND-Unscented-Kalman-Filter
|
446ca2f32564fbaebc913d21dc6a520977c8d582
|
3d1c6b252c96c34a36bd7ab3ba4b6fd5e04e2827
|
refs/heads/master
| 2021-01-19T02:46:24.678760
| 2017-04-15T15:23:08
| 2017-04-15T15:23:08
| 87,291,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69,839
|
r
|
sample-laser-radar-measurement-data-1.out.txt.R
|
type px py v yaw_angle yaw_rate vx vy px_measured py_measured px_true py_true vx_true vy_true NIS
R 8.46292 0.243462 3.04035 0.0287681 0 3.03909 0.0874531 8.46292 0.243462 8.6 0.25 -3.00029 0 0
R 8.47496 0.248178 1.81352 0.0196726 -0.000786221 1.81317 0.0356745 8.56759 0.241943 8.45 0.25 0 0 3.04699
R 8.36251 0.249987 -0.673436 0.0324491 0.000618854 -0.673081 -0.0218486 8.42544 0.254042 8.35 0.25 -1.81979 0 17.2324
R 8.03625 0.234816 -2.46629 0.0125169 -0.00416142 -2.4661 -0.0308696 7.93286 0.188391 8.05 0.2 -3.99976 -0.99994 46.7421
R 7.7304 0.210522 -2.69264 0.0143196 0.00925105 -2.69236 -0.0385562 7.61269 0.155818 7.7 0.15 -2.99982 0 4.32795
R 7.49533 0.122819 -2.59337 0.227371 0.141343 -2.52662 -0.58459 7.50815 0.0953827 7.45 0.100001 -1.8165 -0.908239 1.70849
R 7.20336 0.0631811 -2.67517 0.1993 0.0229903 -2.62222 -0.52964 7.20598 0.00492725 7.2 9.49949e-07 -2.72851 -0.909507 1.11567
R 6.96268 -0.128844 -2.59064 0.441551 0.448449 -2.34217 -1.10709 6.74585 -0.143185 6.95 -0.15 -1.81898 -1.81898 6.32685
R 6.73333 -0.306304 -2.60543 0.571553 0.584117 -2.19133 -1.40938 6.66966 -0.346668 6.75 -0.349999 -2.00076 -2.00075 0.183518
R 6.58864 -0.588202 -2.61816 0.951983 1.33298 -1.51871 -2.13266 6.60862 -0.557035 6.6 -0.549999 -0.998836 -1.99769 6.66339
R 6.48224 -0.853036 -2.63316 1.21568 1.62363 -0.915553 -2.46887 6.40696 -0.842921 6.5 -0.85 -0.908747 -3.63498 1.39756
R 6.3317 -1.08477 -2.57373 1.16204 1.12698 -1.02297 -2.3617 6.38626 -1.15695 6.35 -1.15 -1.81897 -1.81898 8.64943
R 6.36757 -1.4273 -2.6497 1.59169 1.6921 0.055354 -2.64912 6.43125 -1.48148 6.3 -1.45 0.909739 -3.63894 24.1706
R 6.30754 -1.68878 -2.58575 1.58329 1.35734 0.0323062 -2.58554 6.3228 -1.81431 6.3 -1.8 -0.999494 -2.9985 14.4479
R 6.34922 -1.98806 -2.65316 1.77998 1.44833 0.550951 -2.59533 6.31851 -2.10969 6.3 -2.1 0.999984 -2.99994 3.73202
R 6.36115 -2.2737 -2.63383 1.83491 1.29559 0.687577 -2.54249 6.32593 -2.40018 6.35 -2.4 0 -2.72613 6.57146
R 6.43699 -2.58026 -2.66731 1.9609 1.27121 1.01435 -2.46691 6.43125 -2.71878 6.4 -2.7 0.909499 -2.72851 0.473346
R 6.49539 -2.86058 -2.65391 2.01663 1.1652 1.14439 -2.3945 6.46536 -3.00452 6.45 -3 0 -2.99754 5.24227
R 6.58555 -3.15027 -2.6856 2.09714 1.11789 1.34918 -2.3221 6.52249 -3.32007 6.5 -3.3 0.910402 -2.7312 0.74624
R 6.71596 -3.44641 -2.73206 2.19271 1.09417 1.59168 -2.22052 6.6358 -3.59197 6.65 -3.6 1.81722 -2.72583 1.57132
R 6.85915 -3.72685 -2.73956 2.26175 1.03423 1.74584 -2.11122 6.83625 -3.92723 6.8 -3.9 0.907988 -2.72396 2.10527
R 6.99774 -4.00745 -2.74251 2.30848 0.9615 1.84455 -2.02954 6.86943 -4.18507 6.9 -4.2 0.909334 -2.72801 2.55822
R 7.1159 -4.28332 -2.70836 2.32133 0.858972 1.84718 -1.98069 7.05156 -4.49706 7 -4.45 0.910054 -1.8201 11.1522
R 7.2512 -4.54724 -2.69364 2.34604 0.788225 1.88525 -1.92393 7.10252 -4.67934 7.1 -4.7 0.908302 -2.72489 2.42341
R 7.40696 -4.82938 -2.72725 2.37255 0.735052 1.95973 -1.89666 7.3138 -5.05204 7.25 -5 1.81907 -2.72861 2.6398
R 7.5417 -5.10605 -2.71808 2.37217 0.656142 1.95243 -1.89103 7.29446 -5.23429 7.4 -5.3 0.908194 -2.7246 1.84618
R 7.68703 -5.38955 -2.72107 2.36742 0.581721 1.94557 -1.90236 7.63082 -5.61863 7.55 -5.55 1.81974 -1.81974 0.966176
R 7.83621 -5.65191 -2.7075 2.3665 0.518342 1.93412 -1.89466 7.60376 -5.74774 7.7 -5.8 0.90983 -2.72951 1.69637
R 7.9982 -5.92529 -2.75461 2.36938 0.476716 1.97332 -1.92194 7.82165 -6.06822 7.85 -6.1 1.9986 -2.9979 9.29235
R 8.15187 -6.19421 -2.75707 2.35569 0.414654 1.94856 -1.95052 8.17338 -6.44684 8.05 -6.35 1.81738 -1.81739 1.15246
R 8.31562 -6.45343 -2.74742 2.34962 0.364388 1.92991 -1.95545 8.08628 -6.51855 8.2 -6.6 0.909293 -2.72787 1.18187
R 8.46621 -6.72861 -2.74669 2.32786 0.306465 1.8864 -1.99644 8.25648 -6.78122 8.35 -6.85 1.81994 -1.81993 0.505981
R 8.61259 -6.97853 -2.73693 2.31196 0.260394 1.84783 -2.01898 8.39359 -7.01743 8.5 -7.1 1.00006 -3.00018 0.760386
R 8.76676 -7.24063 -2.77559 2.29944 0.230714 1.84815 -2.07082 8.64403 -7.39047 8.65 -7.4 1.99807 -2.99712 6.27025
R 8.89189 -7.51685 -2.75412 2.25453 0.160434 1.73975 -2.13505 8.87176 -7.71912 8.8 -7.65 0.909955 -1.8199 5.40492
R 9.02883 -7.78536 -2.75955 2.22822 0.121484 1.68631 -2.18437 8.88333 -7.88636 8.9 -7.9 0.999086 -2.9973 0.386987
R 9.1176 -8.05989 -2.72036 2.16625 0.0429773 1.5258 -2.25217 8.8564 -8.01424 9 -8.15 0.909309 -1.8186 7.77541
R 9.24277 -8.32716 -2.72764 2.1417 0.018743 1.474 -2.29507 9.17781 -8.47056 9.1 -8.4 1.0004 -3.00119 0.541572
R 9.31682 -8.60998 -2.70026 2.07428 -0.0548738 1.30283 -2.36517 9.22922 -8.6898 9.2 -8.65 0.909508 -1.819 4.93481
R 9.48985 -8.87396 -2.76898 2.11185 0.0150988 1.42613 -2.37348 9.37748 -9.00609 9.35 -8.95 1.99965 -3.99928 31.9231
R 9.56961 -9.15607 -2.76261 2.05601 -0.0433673 1.28847 -2.44373 9.50096 -9.25642 9.5 -9.25 0.999584 -1.99917 2.07328
R 9.64649 -9.42423 -2.75055 2.00906 -0.0882569 1.16725 -2.4906 9.47534 -9.42627 9.55 -9.5 0 -3.00023 1.54718
R 9.72898 -9.70909 -2.77021 1.97557 -0.104032 1.09094 -2.54636 9.69942 -9.88416 9.6 -9.8 0.999005 -2.997 1.24459
R 9.76468 -10.0071 -2.75306 1.89987 -0.174476 0.889694 -2.60533 9.60087 -9.96395 9.7 -10.05 0.909855 -1.81969 3.69667
R 9.83601 -10.2901 -2.752 1.86558 -0.191026 0.799554 -2.63329 9.92627 -10.4745 9.75 -10.3 0 -3.00065 0.559969
R 9.88786 -10.5823 -2.75953 1.82944 -0.199954 0.7058 -2.66774 9.71825 -10.5087 9.8 -10.6 0.90817 -2.72451 0.846486
R 9.91299 -10.8877 -2.74759 1.76466 -0.249986 0.529319 -2.69612 9.88465 -10.9383 9.85 -10.9 0 -2.73074 2.23952
R 9.93181 -11.1787 -2.74645 1.71652 -0.270199 0.398802 -2.71734 9.83952 -11.1692 9.85 -11.2 0 -2.99965 0.0799399
R 9.93817 -11.4647 -2.75296 1.66964 -0.286686 0.271668 -2.73952 9.87961 -11.5637 9.85 -11.5 0 -2.99843 0.0784782
R 9.85043 -11.7499 -2.72806 1.5356 -0.419636 -0.0960038 -2.72637 9.85311 -11.8523 9.8 -11.75 -1.0003 -2.00061 17.0946
R 9.79864 -12.0185 -2.72086 1.46655 -0.449529 -0.283123 -2.70609 9.69694 -11.9706 9.7 -12 -1.0004 -3.00119 0.387972
R 9.6829 -12.2908 -2.71439 1.34569 -0.543627 -0.605881 -2.6459 9.72311 -12.3964 9.6 -12.25 -0.999885 -1.99981 6.13563
R 9.50672 -12.5324 -2.69583 1.1834 -0.683634 -1.01843 -2.49606 9.39039 -12.3902 9.45 -12.45 -1.99803 -1.99805 14.0567
R 9.35061 -12.7727 -2.68676 1.07567 -0.727475 -1.27659 -2.3641 9.34456 -12.7106 9.3 -12.65 -0.909773 -1.81953 0.825099
R 9.08881 -12.9676 -2.67813 0.86091 -0.915409 -1.74547 -2.03119 9.15326 -13.0031 9.05 -12.85 -3.99257 -1.99629 25.4705
R 8.81357 -13.1337 -2.68075 0.683953 -1.0137 -2.0778 -1.69386 8.71632 -12.9613 8.75 -13 -1.8213 -0.910651 4.68252
R 8.53413 -13.2492 -2.6775 0.505551 -1.11031 -2.34257 -1.29669 8.54518 -13.1702 8.5 -13.1 -2.997 -0.999005 5.40292
R 8.21692 -13.3075 -2.68793 0.297188 -1.23164 -2.5701 -0.787112 8.22926 -13.1698 8.2 -13.15 -2.72741 0 9.32367
R 7.90254 -13.3219 -2.68255 0.115319 -1.28737 -2.66473 -0.308663 7.82914 -13.0525 7.9 -13.15 -2.72986 0 1.85621
R 7.65256 -13.3468 -2.64033 0.0273599 -1.21687 -2.63934 -0.0722301 7.66146 -13.3079 7.6 -13.2 -2.9979 -0.999304 7.69996
R 7.3623 -13.3131 -2.63937 -0.115068 -1.22657 -2.62192 0.303037 7.21108 -13.0985 7.3 -13.25 -2.72846 0 0.820386
R 7.06669 -13.2161 -2.66278 -0.293035 -1.28608 -2.54927 0.769168 6.88472 -12.9588 7 -13.2 -2.99784 0.999284 4.087
R 6.79529 -13.1222 -2.66825 -0.423196 -1.27637 -2.43286 1.09579 6.77424 -13.2337 6.7 -13.1 -2.72787 0.909275 0.196704
R 6.53058 -13.0321 -2.63648 -0.507279 -1.1964 -2.30447 1.28081 6.40137 -13.0799 6.4 -13.05 -2.72678 0 10.3934
R 6.26156 -12.9224 -2.63591 -0.59458 -1.14046 -2.18355 1.47654 6.10143 -13.0076 6.1 -13 -2.72966 0.909888 2.269
R 5.99873 -12.7562 -2.69967 -0.720575 -1.15747 -2.0286 1.78129 5.79854 -12.8478 5.8 -12.85 -2.99777 1.99851 6.61348
R 5.74918 -12.6159 -2.68721 -0.777461 -1.0758 -1.91517 1.885 5.50547 -12.5721 5.55 -12.7 -1.81798 0.908995 8.54187
R 5.49583 -12.4322 -2.72447 -0.861246 -1.04425 -1.77497 2.06693 5.27506 -12.4782 5.3 -12.55 -2.72627 1.81753 1.05169
R 5.24006 -12.261 -2.73501 -0.907318 -0.972785 -1.68438 2.15479 5.04446 -12.3808 5.05 -12.35 -1.8199 1.81989 1.22858
R 5.01024 -12.0624 -2.76023 -0.963931 -0.927741 -1.57415 2.26736 4.75061 -11.9803 4.85 -12.15 -1.99928 1.99929 0.659497
R 4.80145 -11.852 -2.77873 -1.02126 -0.891079 -1.4513 2.36961 4.69113 -11.9084 4.7 -11.9 -0.908236 2.72471 0.319699
R 4.63818 -11.6145 -2.74587 -1.09223 -0.860607 -1.26449 2.43738 4.59746 -11.6205 4.6 -11.65 -0.909847 1.81971 5.53596
R 4.47937 -11.3391 -2.82507 -1.1766 -0.860878 -1.08501 2.6084 4.47442 -11.3673 4.5 -11.35 -1.0008 4.0032 26.6335
R 4.41579 -11.0622 -2.77836 -1.28788 -0.882848 -0.775595 2.6679 4.43713 -11.024 4.45 -11.05 0 1.81614 10.286
R 4.37083 -10.763 -2.76906 -1.40142 -0.899659 -0.466761 2.72944 4.44745 -10.786 4.45 -10.8 0 2.72555 0.100068
R 4.3723 -10.4629 -2.75491 -1.52442 -0.925157 -0.127712 2.75195 4.46329 -10.5756 4.45 -10.5 0 2.729 0.323625
R 4.41065 -10.1625 -2.74303 -1.65278 -0.95486 0.224628 2.73381 4.51895 -10.2367 4.5 -10.2 0.908962 2.72687 0.491175
R 4.48845 -9.86662 -2.7381 -1.78077 -0.976041 0.57072 2.67796 4.63224 -9.9707 4.6 -9.9 0.909756 2.72927 0.361345
R 4.56419 -9.57105 -2.74581 -1.87407 -0.949565 0.820032 2.6205 4.60757 -9.4335 4.7 -9.6 0.90859 2.72578 1.73065
R 4.71813 -9.29541 -2.74498 -2.0124 -0.982875 1.17319 2.48164 4.89179 -9.39473 4.85 -9.3 1.81967 2.72952 0.514703
R 4.90305 -9.03891 -2.75738 -2.14879 -1.01392 1.50647 2.30948 4.95209 -8.96272 5 -9.05 0.999884 1.99977 0.186222
R 5.06427 -8.8042 -2.75303 -2.23501 -0.980874 1.69707 2.16774 5.20806 -8.88942 5.15 -8.8 1.81805 2.72708 1.55491
R 5.27844 -8.58091 -2.76628 -2.34976 -0.987817 1.94343 1.9686 5.3785 -8.56768 5.35 -8.55 1.99956 1.99957 0.116443
R 5.47841 -8.36758 -2.76587 -2.42295 -0.939948 2.08187 1.82096 5.56465 -8.44845 5.5 -8.35 0.909045 1.81807 2.06898
R 5.66481 -8.14515 -2.75122 -2.46382 -0.856751 2.14313 1.72518 5.64921 -8.02513 5.7 -8.1 2.72608 2.72608 4.62009
R 5.88877 -7.92797 -2.75511 -2.5131 -0.790507 2.22864 1.6198 5.93492 -7.84119 5.95 -7.85 1.81801 1.81801 3.03598
R 6.11232 -7.71258 -2.75441 -2.54306 -0.708779 2.27559 1.55192 6.13741 -7.63243 6.15 -7.65 1.81798 1.81798 4.49793
R 6.34352 -7.51009 -2.75178 -2.57177 -0.638749 2.31699 1.48453 6.39143 -7.46358 6.35 -7.45 1.82027 1.82027 3.32336
R 6.58192 -7.3382 -2.75822 -2.62367 -0.623607 2.39648 1.36553 6.47916 -7.09783 6.6 -7.25 2.99862 1.99908 0.809926
R 6.78768 -7.1179 -2.74461 -2.58876 -0.481009 2.33577 1.44119 6.68682 -6.93297 6.8 -7.05 0.908005 1.816 15.2294
R 7.04001 -6.94605 -2.74813 -2.6285 -0.467797 2.39425 1.34898 7.08741 -6.92602 7 -6.85 2.72856 1.81904 0.0426362
R 7.26553 -6.73424 -2.7363 -2.59894 -0.354475 2.34321 1.41305 7.29976 -6.68807 7.25 -6.65 1.8202 1.8202 9.04677
R 7.53302 -6.60016 -2.75546 -2.67101 -0.413985 2.45595 1.24934 7.53544 -6.5372 7.5 -6.5 2.99964 0.999884 4.42173
R 7.77674 -6.4408 -2.75317 -2.68192 -0.371819 2.46739 1.22146 7.67559 -6.34628 7.75 -6.4 1.81709 0.908549 1.24876
R 8.03056 -6.29391 -2.74147 -2.69802 -0.33798 2.47617 1.17655 8.03231 -6.28406 8 -6.25 2.72931 1.81954 0.903081
R 8.25551 -6.11418 -2.72633 -2.66381 -0.242295 2.42103 1.25359 8.21488 -6.01843 8.25 -6.05 1.99977 1.99976 7.07798
R 8.51802 -5.97865 -2.7403 -2.70169 -0.269627 2.4794 1.16696 8.50318 -5.89335 8.5 -5.9 2.72796 0.909326 1.64106
R 8.76063 -5.7993 -2.72517 -2.66409 -0.174647 2.42035 1.25239 8.77185 -5.74314 8.75 -5.75 1.81776 1.81775 7.86413
R 8.98688 -5.61795 -2.71528 -2.62438 -0.0960498 2.36012 1.34261 9.02394 -5.6024 8.95 -5.55 1.99657 1.99656 4.53271
R 9.23677 -5.47088 -2.72539 -2.64843 -0.128118 2.40063 1.29023 9.04088 -5.29691 9.2 -5.4 2.72728 0.909094 3.00794
R 9.46276 -5.27078 -2.71018 -2.5915 -0.0313967 2.31037 1.41678 9.39579 -5.21659 9.45 -5.25 1.81816 1.81815 7.45623
R 9.68896 -5.07432 -2.70002 -2.5435 0.0399376 2.23132 1.5203 9.75019 -5.14652 9.65 -5.05 1.82036 1.82036 4.1364
R 9.90188 -4.86286 -2.6903 -2.48559 0.112424 2.13189 1.64096 9.9198 -4.86941 9.85 -4.85 1.81578 1.81577 3.75345
R 10.0954 -4.63691 -2.67772 -2.41569 0.1901 2.00266 1.77751 10.0029 -4.54771 10.05 -4.6 1.82039 2.7306 4.33097
R 10.2815 -4.39333 -2.67641 -2.34721 0.24867 1.87542 1.90944 10.1981 -4.26556 10.25 -4.3 1.81769 2.72652 1.85086
R 10.4972 -4.16934 -2.69196 -2.33183 0.222651 1.85656 1.94931 10.5985 -4.12247 10.45 -4.05 1.81792 1.81792 1.2181
R 10.6856 -3.9233 -2.69989 -2.28815 0.240191 1.77489 2.0345 10.7092 -3.85166 10.65 -3.85 1.81903 1.81904 0.0651886
R 10.832 -3.64933 -2.6976 -2.19369 0.333384 1.57374 2.19098 10.795 -3.60617 10.8 -3.6 0.909773 2.72931 5.99176
R 11.0058 -3.41932 -2.70369 -2.1861 0.285009 1.56059 2.20783 10.9584 -3.30584 10.95 -3.3 1.99953 2.99928 2.91561
R 11.1204 -3.12575 -2.71589 -2.0886 0.371271 1.34429 2.35986 11.1178 -2.98807 11.1 -3 0.907725 2.72317 4.41194
R 11.2328 -2.82286 -2.72798 -2.00994 0.41423 1.15985 2.46913 11.1491 -2.68183 11.2 -2.7 0.908929 2.72678 0.919259
R 11.234 -2.45837 -2.74152 -1.80687 0.632319 0.641202 2.66548 11.1188 -2.36202 11.2 -2.4 -0.909276 2.72782 32.0451
R 11.3494 -2.19051 -2.72929 -1.82869 0.488636 0.69609 2.63903 11.2029 -2.09837 11.25 -2.1 1.81915 2.72871 18.5092
R 11.3856 -1.87286 -2.74019 -1.73911 0.533544 0.459048 2.70146 11.3011 -1.79507 11.35 -1.8 0 2.73035 1.22074
R 11.3374 -1.5539 -2.75034 -1.5904 0.658709 0.0539087 2.74981 11.0622 -1.44342 11.3 -1.5 -0.999165 2.99748 10.8112
R 11.2763 -1.24535 -2.76167 -1.46367 0.73527 -0.295279 2.74584 11.1658 -1.23674 11.2 -1.25 -0.909822 1.81964 3.78426
R 11.12 -0.951782 -2.77315 -1.26846 0.900821 -0.825709 2.64737 10.8534 -0.929069 11.05 -0.95 -1.99817 3.99632 20.1793
R 11.0062 -0.666613 -2.77666 -1.17396 0.889454 -1.07318 2.56089 10.8608 -0.663121 10.9 -0.649999 -0.908549 1.8171 0.697967
R 10.8316 -0.398323 -2.78439 -1.03279 0.948615 -1.42679 2.39105 10.8451 -0.437482 10.75 -0.45 -1.8174 1.81738 2.5337
R 10.5688 -0.170424 -2.8105 -0.828425 1.08747 -1.9 2.07096 10.5062 -0.245062 10.5 -0.25 -2.72796 1.81864 16.389
R 10.3461 0.0258755 -2.78594 -0.713612 1.07546 -2.10618 1.82359 10.4884 -0.0812124 10.25 -0.0999996 -1.81964 0.909826 1.30369
R 10.0648 0.16745 -2.77713 -0.552168 1.12804 -2.36442 1.4567 9.92588 0.00395802 10 9.49949e-07 -2.73049 0.910169 1.58317
R 9.7619 0.255594 -2.76198 -0.384745 1.17721 -2.56007 1.03663 9.63973 0.0585714 9.7 0.0500011 -2.72439 0 1.08702
R 9.44292 0.280824 -2.74192 -0.205664 1.2303 -2.68413 0.559947 9.32242 0.112393 9.4 0.100001 -2.72773 0.90923 0.707014
R 9.12955 0.27922 -2.73043 -0.0404957 1.26041 -2.72819 0.110541 9.0614 0.20077 9.1 0.2 -2.72652 0.908843 0.145947
R 8.84266 0.243444 -2.68135 0.110922 1.27445 -2.66487 -0.296811 8.90199 0.247143 8.85 0.25 -1.81847 0 7.98971
R 8.54279 0.244902 -2.73292 0.210279 1.22668 -2.67273 -0.570451 8.46355 0.258809 8.55 0.25 -3.6326 0 10.719
R 8.24299 0.18297 -2.74568 0.330921 1.20934 -2.59671 -0.892109 8.05161 0.241868 8.2 0.25 -2.72841 0 0.546575
R 7.95242 0.11269 -2.75505 0.4297 1.17317 -2.50459 -1.14775 7.78653 0.205765 7.9 0.2 -2.72708 -0.909025 0.553012
R 7.66864 0.0279338 -2.7655 0.509315 1.11781 -2.4145 -1.3484 7.77346 0.0969346 7.6 0.100001 -2.73169 -0.910554 1.53974
R 7.37683 -0.035277 -2.78935 0.538507 1.01185 -2.39459 -1.43053 7.15329 0.0425189 7.3 0.0500011 -2.99789 0 7.54076
R 7.13592 -0.179567 -2.77386 0.643753 1.02374 -2.21867 -1.66488 7.17215 -0.0469302 7.05 -0.0499995 -1.81811 -1.81813 2.58295
R 6.89398 -0.345665 -2.76531 0.739864 1.0138 -2.04234 -1.86434 6.83008 -0.250179 6.85 -0.25 -1.81646 -1.81646 0.500617
R 6.6833 -0.541546 -2.7521 0.844672 1.0148 -1.82733 -2.05789 6.74798 -0.457727 6.65 -0.45 -1.81901 -1.819 0.461506
R 6.52812 -0.809022 -2.74889 1.01165 1.09784 -1.45819 -2.33025 6.565 -0.754748 6.5 -0.749999 -0.909789 -3.63914 6.44287
R 6.4003 -1.07623 -2.75377 1.14452 1.12071 -1.13863 -2.50734 6.44213 -1.11266 6.4 -1.1 -1.0002 -3.00061 0.352186
R 6.30089 -1.33763 -2.75631 1.25491 1.11426 -0.856276 -2.61993 6.39474 -1.36501 6.3 -1.35 -1.00035 -2.00072 0.193213
R 6.26254 -1.62825 -2.77112 1.41655 1.18784 -0.42574 -2.73822 6.24787 -1.61119 6.25 -1.6 0 -2.99288 4.53751
R 6.27791 -1.94224 -2.7915 1.59758 1.26837 0.0747474 -2.7905 6.41111 -1.93027 6.3 -1.9 0.909739 -2.72921 5.62853
R 6.3066 -2.22883 -2.76367 1.71824 1.23773 0.406019 -2.73368 6.53066 -2.26346 6.35 -2.2 0 -2.72782 2.50024
R 6.35181 -2.51191 -2.74167 1.82727 1.19887 0.695482 -2.65199 6.27034 -2.51837 6.35 -2.55 0 -3.63689 2.12395
R 6.44224 -2.81571 -2.76352 1.96024 1.20116 1.04923 -2.55659 6.45883 -2.92313 6.4 -2.9 0.908854 -2.72658 0.511768
R 6.54542 -3.10662 -2.77082 2.06586 1.1667 1.31639 -2.43814 6.49154 -3.20004 6.5 -3.2 0.908863 -2.72658 0.742133
R 6.67021 -3.38546 -2.77108 2.16 1.12297 1.53987 -2.30383 6.70146 -3.53446 6.6 -3.5 0.908714 -2.72613 1.62325
R 6.80535 -3.66078 -2.77712 2.24026 1.07296 1.72339 -2.17769 6.70956 -3.81739 6.7 -3.8 0.909565 -2.72871 1.40071
R 6.934 -3.9357 -2.77249 2.29231 0.998584 1.83129 -2.08161 6.73758 -4.06653 6.8 -4.1 0.908566 -2.72568 3.47242
R 7.09875 -4.21953 -2.81424 2.35557 0.954068 1.98871 -1.99122 7.10395 -4.49091 6.95 -4.4 1.81821 -2.72732 2.39182
R 7.24992 -4.49375 -2.8136 2.38786 0.878129 2.05151 -1.92553 7.02393 -4.64459 7.1 -4.7 0.91085 -2.73253 2.41744
R 7.37761 -4.76476 -2.79762 2.3907 0.78473 2.04528 -1.9088 7.24619 -4.97656 7.2 -4.95 1.00033 -2.00068 4.69676
R 7.52697 -5.02246 -2.83193 2.41145 0.73425 2.11001 -1.88883 7.22054 -5.09604 7.35 -5.2 1.9976 -2.9964 5.81777
R 7.65811 -5.27991 -2.78883 2.40321 0.645588 2.06249 -1.87715 7.40989 -5.38617 7.5 -5.45 0.908252 -1.8165 10.2528
R 7.816 -5.54871 -2.79198 2.40754 0.583608 2.07295 -1.87031 7.39379 -5.52612 7.65 -5.7 1.81785 -2.72677 2.63357
R 7.96285 -5.80876 -2.74574 2.3952 0.504815 2.01576 -1.86435 7.84699 -6.00602 7.8 -5.95 0.909078 -1.81815 11.0463
R 8.12706 -6.06899 -2.75031 2.39464 0.453586 2.01807 -1.86859 7.89511 -6.1416 7.95 -6.2 1.8197 -2.72955 1.39198
R 8.29974 -6.34335 -2.76878 2.38635 0.400344 2.01597 -1.8979 8.2234 -6.53735 8.15 -6.5 1.8187 -2.72807 1.5443
R 8.44201 -6.61191 -2.73084 2.35578 0.325477 1.9302 -1.93179 8.32206 -6.77114 8.3 -6.75 0.908979 -1.81795 7.56976
R 8.5756 -6.87652 -2.70769 2.32575 0.262752 1.85547 -1.97201 8.2713 -6.91098 8.4 -7 0.909094 -2.72732 1.44448
R 8.72035 -7.16032 -2.72392 2.29697 0.212995 1.80871 -2.03674 8.54154 -7.25975 8.55 -7.3 1.81751 -2.72628 2.525
R 8.84001 -7.45074 -2.69834 2.24294 0.134569 1.68015 -2.11143 8.83598 -7.68162 8.7 -7.55 0.908615 -1.81723 5.60425
R 8.99034 -7.71675 -2.71371 2.22988 0.114331 1.66186 -2.14533 8.77916 -7.72457 8.85 -7.8 1.8215 -2.73224 3.06091
R 9.131 -7.98808 -2.73074 2.20605 0.0839144 1.62038 -2.19803 9.1081 -8.22433 9 -8.1 0.999025 -2.99707 1.06791
R 9.21216 -8.26968 -2.69327 2.1354 -0.000946109 1.44112 -2.27528 9.13825 -8.4037 9.1 -8.35 0.908285 -1.81657 9.20118
R 9.32622 -8.54748 -2.69112 2.10129 -0.0316239 1.36159 -2.32125 9.22081 -8.59155 9.2 -8.6 0.91012 -2.73036 0.119191
R 9.42646 -8.83725 -2.69319 2.05863 -0.0666862 1.26232 -2.37904 9.30208 -8.90831 9.3 -8.9 0.909524 -2.72856 0.165679
R 9.52247 -9.11798 -2.70651 2.02599 -0.0843177 1.18988 -2.43092 9.39993 -9.18842 9.4 -9.2 0.999066 -2.99725 1.08255
R 9.59908 -9.41233 -2.7168 1.97851 -0.117432 1.07724 -2.49411 9.48751 -9.46421 9.5 -9.5 0.909706 -2.7291 0.152892
R 9.68169 -9.70265 -2.73182 1.94507 -0.131312 0.99874 -2.54271 9.57122 -9.75525 9.6 -9.8 1.00048 -3.00143 0.623881
R 9.72861 -9.98624 -2.73191 1.88917 -0.173961 0.85514 -2.59462 9.61433 -9.96417 9.7 -10.05 0.998785 -1.99755 0.551529
R 9.77122 -10.2706 -2.71403 1.82678 -0.229061 0.687183 -2.62559 9.77731 -10.3295 9.75 -10.3 0 -2.72851 4.52841
R 9.84763 -10.5714 -2.72937 1.80827 -0.215332 0.642085 -2.65277 9.89592 -10.7137 9.8 -10.6 0.909012 -2.72704 1.00373
R 9.88 -10.8604 -2.72401 1.75596 -0.250476 0.501504 -2.67745 9.84397 -10.9287 9.85 -10.9 0 -3.00318 1.2066
R 9.90556 -11.1379 -2.72821 1.71697 -0.261773 0.397363 -2.69911 9.74773 -11.0482 9.85 -11.2 0 -3.00013 0.368768
R 9.92223 -11.4266 -2.74288 1.68076 -0.264382 0.300997 -2.72631 9.79495 -11.4686 9.85 -11.5 0 -2.99592 0.460097
R 9.8382 -11.7271 -2.72078 1.54668 -0.396859 -0.0656096 -2.71999 9.76634 -11.6976 9.8 -11.75 -0.908731 -1.81746 16.6133
R 9.77564 -12.0272 -2.71384 1.45918 -0.450117 -0.302283 -2.69695 9.7856 -12.1262 9.7 -12 -0.909591 -2.72875 1.87509
R 9.6861 -12.3201 -2.70928 1.37173 -0.491955 -0.53577 -2.65577 9.57233 -12.2948 9.6 -12.3 -0.908879 -2.72667 0.649289
R 9.50884 -12.5973 -2.69695 1.21175 -0.625872 -0.947659 -2.52497 9.37132 -12.4833 9.45 -12.55 -1.82006 -1.82006 11.767
R 9.2706 -12.8243 -2.68299 1.01142 -0.802302 -1.42374 -2.27406 9.16109 -12.6848 9.2 -12.75 -3.00047 -2.00033 19.8247
R 8.99626 -13.0178 -2.68217 0.815102 -0.941636 -1.83942 -1.95207 8.85918 -12.7953 8.95 -12.9 -1.81543 -0.907707 9.7594
R 8.74826 -13.1867 -2.67083 0.671922 -0.991522 -2.09026 -1.66257 8.66865 -13.0114 8.7 -13.05 -2.73134 -1.82089 1.28509
R 8.40163 -13.2662 -2.6862 0.415613 -1.19404 -2.45753 -1.08456 8.27267 -12.996 8.4 -13.15 -2.99683 0 25.7507
R 8.13707 -13.3372 -2.66486 0.288112 -1.19088 -2.55502 -0.757202 8.04571 -13.0646 8.1 -13.2 -2.72707 -0.909028 1.29021
R 7.83339 -13.3707 -2.6671 0.123913 -1.23322 -2.64665 -0.329644 7.86599 -13.4154 7.8 -13.25 -2.7295 0 0.929325
R 7.54273 -13.3614 -2.66321 -0.0228051 -1.24886 -2.66251 0.0607293 7.50783 -13.2944 7.5 -13.25 -2.99994 0 0.0663591
R 7.24139 -13.2907 -2.67997 -0.201721 -1.3071 -2.62563 0.536949 7.19781 -13.1973 7.2 -13.2 -2.72698 0.908995 2.45316
R 6.95242 -13.1952 -2.67364 -0.350299 -1.30431 -2.51127 0.917537 6.79473 -13.0433 6.85 -13.15 -3.63709 0 0.877318
R 6.67051 -13.0868 -2.69877 -0.486716 -1.30061 -2.38538 1.26229 6.54114 -13.2079 6.5 -13.1 -3.0006 1.00018 0.290707
R 6.41314 -13.0143 -2.65005 -0.536008 -1.17775 -2.2784 1.3534 6.27012 -13.1139 6.25 -13.05 -1.81729 0 24.7574
R 6.14218 -12.8688 -2.68265 -0.654469 -1.16909 -2.12834 1.63303 5.99498 -12.9442 6 -12.95 -2.72861 1.81906 0.896504
R 5.89653 -12.7251 -2.68698 -0.737461 -1.12161 -1.98884 1.80675 5.73761 -12.8672 5.7 -12.8 -2.99988 0.999964 1.39353
R 5.65284 -12.6073 -2.67671 -0.775425 -1.0337 -1.9115 1.87375 5.45236 -12.7392 5.45 -12.7 -2.00001 1 6.60102
R 5.4198 -12.4541 -2.69748 -0.836076 -0.990775 -1.80834 2.00158 5.26451 -12.5511 5.25 -12.55 -1.99732 1.99733 0.213724
R 5.19161 -12.2832 -2.70364 -0.892203 -0.942107 -1.69707 2.10466 5.1317 -12.5744 5.05 -12.35 -1.8206 1.82059 1.1736
R 4.98088 -12.0574 -2.76046 -0.982363 -0.943732 -1.53222 2.29618 4.89215 -12.1623 4.85 -12.1 -1.9998 2.99969 8.09506
R 4.80055 -11.8463 -2.75581 -1.04289 -0.906305 -1.38818 2.38064 4.6421 -11.7788 4.7 -11.85 -1.00002 2.00003 2.13436
R 4.64038 -11.6039 -2.79009 -1.12073 -0.896549 -1.21376 2.51225 4.58347 -11.632 4.6 -11.6 -0.998955 2.99688 3.03873
R 4.51896 -11.3347 -2.81788 -1.2106 -0.89615 -0.993185 2.63705 4.46846 -11.2092 4.5 -11.3 -1.00106 3.00319 2.20346
R 4.45809 -11.057 -2.77657 -1.32256 -0.916657 -0.682187 2.69147 4.43672 -11.0976 4.45 -11.05 0 1.8168 9.1081
R 4.39884 -10.7749 -2.77682 -1.41663 -0.914372 -0.426408 2.74388 4.40556 -10.7068 4.45 -10.8 0 3.00139 0.0965441
R 4.44072 -10.5025 -2.73335 -1.56412 -0.982831 -0.0182507 2.73329 4.44716 -10.5583 4.45 -10.55 0 2.00041 9.27064
R 4.46158 -10.239 -2.7219 -1.66988 -0.994438 0.269257 2.70855 4.45602 -10.2142 4.5 -10.3 0.999584 2.99875 0.271189
R 4.50142 -9.95907 -2.72456 -1.76403 -0.977144 0.523209 2.67385 4.56608 -10.005 4.55 -10 0 2.72717 1.02723
R 4.59843 -9.67289 -2.71996 -1.89273 -1.00304 0.860601 2.58022 4.62339 -9.6559 4.65 -9.7 1.8193 2.72895 0.41792
R 4.67507 -9.39663 -2.72857 -1.95877 -0.944513 1.03225 2.52578 4.74852 -9.27389 4.8 -9.4 1.00006 3.00019 4.15638
R 4.87544 -9.15815 -2.73675 -2.12995 -1.03541 1.45175 2.31996 4.98605 -9.20516 4.95 -9.15 2.00004 2.00005 4.57447
R 4.97951 -8.91583 -2.73117 -2.16353 -0.93551 1.5257 2.26528 5.12819 -8.95913 5.1 -8.9 0.999704 2.99911 8.81084
R 5.19489 -8.69053 -2.74533 -2.29818 -0.976751 1.82542 2.05053 5.33883 -8.81618 5.25 -8.65 1.81848 1.81849 1.29954
R 5.34387 -8.45171 -2.73616 -2.32612 -0.869934 1.8757 1.99206 5.38848 -8.31037 5.45 -8.4 2.00068 3.00103 9.29867
R 5.55673 -8.24303 -2.75751 -2.41651 -0.869688 2.06384 1.82878 5.56632 -8.04704 5.65 -8.15 1.99952 1.99953 0.455283
R 5.73415 -8.02193 -2.75712 -2.43701 -0.771882 2.10061 1.78582 5.78426 -7.91676 5.8 -7.95 0.999944 1.99989 7.62784
R 5.92672 -7.82185 -2.75593 -2.47564 -0.717759 2.16708 1.70263 5.92249 -7.69769 5.95 -7.75 1.99772 1.99772 2.29017
R 6.13864 -7.61792 -2.75679 -2.51229 -0.663902 2.22869 1.6226 6.16773 -7.55437 6.15 -7.55 1.8199 1.8199 2.00174
R 6.37684 -7.43919 -2.76858 -2.58104 -0.669748 2.34488 1.47193 6.25322 -7.17734 6.4 -7.35 2.99605 1.99736 1.27406
R 6.6354 -7.27263 -2.78489 -2.64389 -0.660324 2.44703 1.32953 6.67172 -7.23041 6.65 -7.2 1.8206 0.910303 0.0389024
R 6.85041 -7.08949 -2.76286 -2.64089 -0.557861 2.4237 1.32629 6.79888 -6.99594 6.85 -7.05 1.99848 1.99848 8.9519
R 7.10294 -6.94096 -2.76498 -2.68534 -0.539713 2.48215 1.21822 7.17955 -6.9446 7.1 -6.85 2.72688 1.81792 0.222897
R 7.34148 -6.73928 -2.7537 -2.66023 -0.420792 2.44078 1.27492 7.42782 -6.72337 7.35 -6.65 1.81821 1.81821 10.8787
R 7.59532 -6.56273 -2.75419 -2.66893 -0.373165 2.45222 1.25387 7.54452 -6.5069 7.55 -6.5 1.81726 0.908632 1.17527
R 7.86654 -6.43751 -2.75491 -2.72314 -0.396697 2.51722 1.11945 7.8635 -6.45394 7.8 -6.4 2.72951 0.909839 0.720735
R 8.08413 -6.2683 -2.72627 -2.69262 -0.296817 2.45608 1.18332 7.89525 -6.11857 8.05 -6.25 2.00047 2.00048 9.06956
R 8.32161 -6.11765 -2.71778 -2.69337 -0.258983 2.44931 1.1778 8.31423 -6.05705 8.3 -6.05 2.72702 1.81801 0.831878
R 8.56361 -5.95855 -2.71478 -2.68221 -0.208512 2.43333 1.20372 8.53167 -5.90349 8.55 -5.9 2.00127 1.00064 1.6916
R 8.80749 -5.83169 -2.7197 -2.70178 -0.211411 2.46087 1.15797 8.88764 -5.82241 8.8 -5.75 2.99963 1.99976 0.172421
R 9.03698 -5.65992 -2.71926 -2.66349 -0.130799 2.41435 1.25112 9.21669 -5.64514 9.05 -5.55 1.99699 1.997 5.33729
R 9.26168 -5.45806 -2.71638 -2.60754 -0.04134 2.33813 1.38271 9.16723 -5.29709 9.25 -5.35 1.8166 1.81659 5.36494
R 9.48634 -5.23844 -2.71354 -2.54695 0.0401146 2.24775 1.52017 9.39027 -5.1041 9.45 -5.15 1.8222 1.82219 3.85899
R 9.71206 -5.05822 -2.71628 -2.53385 0.0432752 2.22989 1.55105 9.62802 -5.02338 9.65 -5 1.99604 0.998028 0.209559
R 9.91032 -4.85652 -2.70232 -2.47812 0.113094 2.12905 1.66423 9.88102 -4.86216 9.85 -4.85 1.81895 1.81894 3.80269
R 10.1035 -4.63764 -2.68584 -2.41272 0.187084 2.00344 1.78884 10.0747 -4.62454 10.05 -4.6 1.81966 2.72951 4.41546
R 10.2949 -4.40486 -2.68361 -2.3593 0.22479 1.90349 1.89169 10.1743 -4.32841 10.25 -4.35 1.81908 1.81907 0.717433
R 10.4777 -4.18531 -2.68336 -2.31452 0.25171 1.81673 1.97482 10.56 -4.15868 10.45 -4.1 2.00061 3.0009 0.476413
R 10.6722 -3.97475 -2.70051 -2.30426 0.221432 1.80784 2.0061 10.7738 -3.89129 10.65 -3.85 1.99963 1.99964 1.58308
R 10.7924 -3.70008 -2.70322 -2.18907 0.34689 1.56687 2.2028 10.7674 -3.58281 10.8 -3.6 0.999844 2.99952 11.5491
R 10.968 -3.4688 -2.72159 -2.18352 0.293534 1.56519 2.22648 10.9601 -3.36033 10.95 -3.35 2.00065 2.00064 3.77694
R 11.0791 -3.19336 -2.73014 -2.08879 0.381203 1.3518 2.37198 11.0282 -3.07488 11.1 -3.1 0.998546 2.99563 5.72627
R 11.1485 -2.87239 -2.74398 -1.94672 0.522066 1.0074 2.55237 11.1386 -2.78926 11.15 -2.8 0 2.73104 14.9825
R 11.1995 -2.55082 -2.75095 -1.82763 0.603761 0.698782 2.66072 11.1042 -2.54354 11.15 -2.55 0 1.81785 4.21125
R 11.3151 -2.30978 -2.73068 -1.84845 0.477548 0.748479 2.6261 11.1658 -2.25651 11.25 -2.25 1.99733 3.99465 14.6952
R 11.3616 -2.00143 -2.74753 -1.76464 0.520152 0.529274 2.69607 11.3117 -1.92025 11.35 -1.95 0 1.81854 1.3028
R 11.3972 -1.70253 -2.74545 -1.69446 0.538025 0.33866 2.72448 11.4185 -1.70062 11.35 -1.7 0 2.72762 0.203679
R 11.345 -1.3812 -2.75239 -1.54828 0.660859 -0.0619609 2.75169 11.2541 -1.38373 11.3 -1.4 -0.908846 2.72653 10.9969
R 11.2247 -1.06493 -2.76293 -1.36954 0.809965 -0.552316 2.70717 11.0754 -1.08747 11.15 -1.1 -1.82014 2.73021 15.8863
R 11.0969 -0.781745 -2.76678 -1.2358 0.871578 -0.909614 2.61299 10.9247 -0.791254 11 -0.799999 -1.00036 3.00107 2.28088
R 10.9128 -0.520775 -2.78926 -1.06766 0.981462 -1.34491 2.4436 10.7146 -0.485949 10.85 -0.5 -2.00003 3.00006 9.57975
R 10.7331 -0.269857 -2.79658 -0.945457 1.00279 -1.63704 2.26737 10.6895 -0.304531 10.65 -0.3 -1.81779 0.908892 0.240943
R 10.48 -0.072638 -2.81251 -0.763121 1.11008 -2.03255 1.94395 10.5032 -0.154501 10.4 -0.15 -3.00061 2.00041 10.2687
R 10.2535 0.0910346 -2.79032 -0.649313 1.10433 -2.22249 1.68714 10.1407 -0.0721585 10.15 -0.0499992 -1.99645 0 1.49454
R 9.98019 0.201985 -2.77207 -0.493572 1.15247 -2.44121 1.31334 10.0898 -0.00166396 9.9 9.49949e-07 -2.73175 0.910584 1.20792
R 9.67073 0.263883 -2.77119 -0.315722 1.21837 -2.63422 0.860463 9.50401 0.0833188 9.6 0.100001 -2.99544 0.998471 2.95795
R 9.38719 0.291127 -2.71659 -0.174294 1.22412 -2.67543 0.471091 9.22819 0.16178 9.35 0.15 -1.81963 0 7.73139
R 9.09227 0.282559 -2.71651 -0.0161661 1.25675 -2.71615 0.0439133 9.0729 0.205961 9.1 0.2 -3.00059 1.0002 0.882581
R 8.81884 0.26775 -2.72467 0.111825 1.25912 -2.70765 -0.304051 8.84168 0.257617 8.8 0.25 -2.99995 0 0.449839
R 8.54211 0.226914 -2.73816 0.233977 1.25258 -2.66355 -0.634837 8.43549 0.247188 8.5 0.25 -2.99323 0 0.472318
R 8.26146 0.164337 -2.74518 0.350131 1.23573 -2.57863 -0.941655 8.27073 0.243988 8.2 0.25 -2.73233 0 0.116323
R 7.9805 0.117943 -2.76376 0.424633 1.17477 -2.51831 -1.13863 7.98053 0.204587 7.9 0.2 -3.00234 -1.00078 1.57513
R 7.69582 0.0668908 -2.79606 0.472295 1.09056 -2.48997 -1.27202 7.46173 0.143776 7.6 0.15 -2.99599 0 5.83512
R 7.44061 -0.0622329 -2.78268 0.584404 1.09965 -2.32087 -1.53521 7.37986 0.0951006 7.35 0.100001 -1.81801 -0.908998 3.73265
R 7.16439 -0.142516 -2.77706 0.616981 1.0016 -2.26505 -1.60674 7.32548 -0.0461763 7.1 -0.0499995 -2.72495 -1.81664 4.66789
R 6.95791 -0.371339 -2.75822 0.787333 1.09099 -1.94658 -1.95413 7.09668 -0.264116 6.9 -0.25 -0.910634 -1.82126 12.8952
R 6.69434 -0.457435 -2.74518 0.762292 0.91462 -1.98546 -1.89577 6.64638 -0.45061 6.7 -0.45 -2.99641 -1.99759 19.3828
R 6.5251 -0.710301 -2.74806 0.915639 0.993966 -1.67435 -2.17908 6.50462 -0.718 6.5 -0.7 -0.909955 -2.72986 6.07078
R 6.39658 -0.990321 -2.7556 1.0754 1.06833 -1.30995 -2.42433 6.42547 -0.999512 6.4 -1 -0.999224 -2.99766 3.64878
R 6.2726 -1.26122 -2.76275 1.19034 1.06863 -1.02593 -2.56521 6.14523 -1.27904 6.3 -1.3 -0.908722 -2.72618 0.227849
R 6.26141 -1.62666 -2.8002 1.43099 1.23945 -0.390216 -2.77288 6.37158 -1.60661 6.3 -1.6 0.910485 -2.73144 20.4586
R 6.26248 -1.91734 -2.79094 1.57522 1.25734 0.0123513 -2.79091 6.34 -1.88987 6.35 -1.9 0 -2.99515 0.168137
R 6.28998 -2.19805 -2.77083 1.69789 1.23872 0.351207 -2.74848 6.45002 -2.23466 6.35 -2.2 0 -2.72856 1.27014
R 6.37204 -2.50359 -2.78066 1.85705 1.26871 0.785152 -2.66751 6.4979 -2.56206 6.4 -2.5 0.908227 -2.7247 0.961552
R 6.43466 -2.77069 -2.72826 1.93669 1.17939 0.976138 -2.54766 6.46538 -2.81029 6.45 -2.8 0 -2.72732 13.6768
R 6.54575 -3.05482 -2.74292 2.05984 1.17466 1.28857 -2.42141 6.36717 -3.08903 6.5 -3.15 0.909078 -3.6363 0.705754
R 6.66329 -3.34111 -2.75105 2.15011 1.12442 1.50606 -2.30219 6.6454 -3.53435 6.6 -3.5 0.910038 -2.7301 1.94473
R 6.78696 -3.62518 -2.7544 2.21848 1.05603 1.66184 -2.1966 6.70981 -3.81998 6.7 -3.8 0.908095 -2.7243 3.46417
R 6.90382 -3.90466 -2.74359 2.26088 0.969551 1.74658 -2.11583 6.72525 -4.05329 6.8 -4.1 0.910949 -2.73284 5.9923
R 7.00554 -4.17272 -2.71786 2.27573 0.868464 1.76113 -2.07006 6.86491 -4.32753 6.9 -4.35 0.999834 -1.99968 10.1433
R 7.14713 -4.42859 -2.75335 2.32273 0.833001 1.88068 -2.01097 6.99339 -4.57763 7.05 -4.6 1.99653 -2.99479 2.43765
R 7.27641 -4.67726 -2.74016 2.33969 0.759625 1.90536 -1.96929 7.11889 -4.79341 7.2 -4.85 1.00098 -2.00196 4.80265
R 7.42389 -4.92639 -2.72956 2.36145 0.698338 1.94022 -1.91992 7.28536 -5.09276 7.3 -5.1 0.90969 -2.72906 2.79899
R 7.58318 -5.19736 -2.77393 2.38199 0.653409 2.01141 -1.91021 7.33914 -5.30864 7.45 -5.4 1.99976 -2.99964 6.82863
R 7.7136 -5.4527 -2.75517 2.37074 0.573611 1.97633 -1.91966 7.59167 -5.64447 7.6 -5.65 1.00038 -2.00076 5.62655
R 7.87103 -5.71359 -2.77081 2.37582 0.522828 1.99734 -1.92043 7.6402 -5.81769 7.75 -5.9 1.81659 -2.7249 1.57093
R 8.01418 -5.98404 -2.73536 2.35598 0.442796 1.93378 -1.9346 7.91394 -6.15143 7.9 -6.15 0.909251 -1.81851 9.43931
R 8.17367 -6.25193 -2.73558 2.34863 0.389657 1.91967 -1.94892 8.10134 -6.38026 8.05 -6.35 1.81576 -1.81577 0.266544
R 8.35904 -6.50501 -2.75215 2.3592 0.361303 1.9519 -1.94021 8.3424 -6.65648 8.25 -6.6 1.8218 -2.73269 1.18125
R 8.52156 -6.7651 -2.74085 2.3466 0.310533 1.91938 -1.95659 8.46635 -6.96022 8.4 -6.9 0.907987 -2.72401 1.39772
R 8.66636 -7.04234 -2.73437 2.31618 0.249329 1.85459 -2.0093 8.53661 -7.23635 8.5 -7.2 0.910054 -2.73014 0.786815
R 8.79272 -7.32755 -2.74073 2.27379 0.186162 1.7719 -2.09093 8.70486 -7.48959 8.65 -7.45 1.99827 -1.99828 0.340957
R 8.92852 -7.58954 -2.7354 2.24699 0.14213 1.71189 -2.13351 8.74707 -7.66927 8.8 -7.7 0.909061 -2.72717 0.491998
R 9.05056 -7.87676 -2.72731 2.20334 0.0853921 1.61237 -2.19966 8.88546 -7.96913 8.9 -8 0.908945 -2.72688 0.636698
R 9.15799 -8.17515 -2.72346 2.15082 0.02682 1.49258 -2.27803 9.03618 -8.239 9.05 -8.25 1.82026 -1.82027 0.333284
R 9.29696 -8.43318 -2.7367 2.14134 0.020561 1.47806 -2.30323 9.14011 -8.44049 9.2 -8.5 0.999884 -2.99963 1.59885
R 9.39928 -8.6987 -2.74124 2.10663 -0.0116503 1.39957 -2.35703 9.23666 -8.722 9.3 -8.8 0.999284 -2.99783 0.343471
R 9.47775 -8.98877 -2.7229 2.04157 -0.0810024 1.23505 -2.42669 9.47823 -9.13791 9.4 -9.05 0.909209 -1.81844 5.24094
R 9.57469 -9.27345 -2.71846 2.00481 -0.107401 1.14316 -2.46642 9.44585 -9.27158 9.5 -9.3 0.908483 -2.72543 0.177783
R 9.6732 -9.56494 -2.72422 1.97309 -0.124097 1.06661 -2.50673 9.6872 -9.68187 9.6 -9.6 0.909756 -2.72927 0.161108
R 9.78407 -9.84858 -2.74691 1.96089 -0.115544 1.04459 -2.54055 9.86489 -10.0711 9.7 -9.9 0.998626 -2.99586 1.81715
R 9.82832 -10.139 -2.72934 1.89254 -0.179456 0.863087 -2.58928 9.75497 -10.1932 9.75 -10.2 0 -2.72857 4.16527
R 9.88316 -10.448 -2.73909 1.84772 -0.19826 0.748854 -2.63474 9.77249 -10.4428 9.8 -10.5 0.909773 -2.7293 0.292397
R 9.91033 -10.755 -2.72837 1.77879 -0.253373 0.563399 -2.66957 9.86786 -10.8223 9.85 -10.8 0 -2.72846 2.74439
R 9.92418 -11.0636 -2.7264 1.71618 -0.289789 0.394978 -2.69764 9.84594 -11.0973 9.85 -11.1 0 -2.72748 0.45125
R 9.93368 -11.3622 -2.73473 1.6685 -0.302136 0.266777 -2.72169 9.8926 -11.4631 9.85 -11.4 0 -2.9967 0.137206
R 9.89656 -11.6651 -2.73117 1.58047 -0.369483 0.0264177 -2.73104 9.89882 -11.7746 9.8 -11.7 -0.909227 -2.72768 3.49041
R 9.81238 -11.9789 -2.72857 1.47149 -0.447662 -0.270521 -2.71513 9.75738 -12.0161 9.7 -12 -0.908714 -2.72613 2.86958
R 9.67184 -12.2795 -2.72063 1.3288 -0.561368 -0.651976 -2.64135 9.60373 -12.2428 9.6 -12.25 -0.909639 -1.81931 7.22999
R 9.47305 -12.5408 -2.699 1.14879 -0.715689 -1.1055 -2.4622 9.38815 -12.3607 9.45 -12.45 -1.81824 -1.81825 15.1413
R 9.24823 -12.7737 -2.68704 0.966236 -0.856305 -1.52732 -2.21077 9.33568 -12.8372 9.2 -12.65 -2.72578 -1.81718 12.4513
R 9.01641 -12.9774 -2.67789 0.822595 -0.915637 -1.82183 -1.96266 8.97697 -12.8161 8.95 -12.85 -1.8192 -1.81921 1.36888
R 8.76813 -13.1481 -2.66795 0.682288 -0.963585 -2.07068 -1.68233 8.64934 -12.9537 8.7 -13.05 -2.72781 -1.81854 1.26811
R 8.42216 -13.2523 -2.69045 0.438123 -1.14807 -2.43634 -1.1414 8.43462 -13.263 8.4 -13.15 -2.72644 0 21.0465
R 8.11014 -13.2946 -2.69679 0.238167 -1.25412 -2.62066 -0.636232 8.10139 -13.1447 8.1 -13.15 -3.00083 0 6.53222
R 7.84254 -13.333 -2.67274 0.121923 -1.2266 -2.6529 -0.325063 7.85182 -13.2702 7.8 -13.2 -2.72553 -0.908516 1.64945
R 7.51671 -13.2995 -2.69563 -0.0754912 -1.30865 -2.68795 0.203303 7.51359 -13.2891 7.5 -13.2 -2.72544 0.908483 5.15149
R 7.23281 -13.2498 -2.67119 -0.206509 -1.28345 -2.61443 0.547713 7.19553 -13.2012 7.2 -13.15 -2.73035 0 1.98918
R 6.95007 -13.1725 -2.66251 -0.337766 -1.26533 -2.51207 0.882302 6.87644 -13.2105 6.85 -13.15 -3.63616 0 0.734305
R 6.67428 -13.1216 -2.62484 -0.400417 -1.15505 -2.41722 1.02317 6.53838 -13.1322 6.55 -13.15 -1.81656 0 16.3337
R 6.40084 -13.0271 -2.62637 -0.496546 -1.11599 -2.30919 1.25118 6.22921 -13.0167 6.3 -13.1 -2.73109 0.910352 0.999627
R 6.1292 -12.8658 -2.67229 -0.635237 -1.14183 -2.15101 1.58565 5.95573 -12.8757 6 -12.95 -2.72331 1.81553 3.97504
R 5.87241 -12.7273 -2.66693 -0.712466 -1.08312 -2.0182 1.74338 5.73901 -12.8863 5.7 -12.8 -2.72842 0.909475 2.98302
R 5.61062 -12.561 -2.70202 -0.797586 -1.05141 -1.88719 1.93376 5.41889 -12.699 5.4 -12.65 -2.731 1.82067 0.734478
R 5.34953 -12.3863 -2.72603 -0.860228 -0.995568 -1.77809 2.06631 5.16911 -12.5003 5.15 -12.45 -1.81821 1.8182 0.469394
R 5.11953 -12.2041 -2.7391 -0.917994 -0.947845 -1.66377 2.1759 4.99996 -12.3538 4.95 -12.25 -1.99957 1.99957 0.434968
R 4.92126 -12.022 -2.72363 -0.965168 -0.894137 -1.5505 2.23921 4.83937 -12.0439 4.8 -12.05 -0.999954 1.99993 3.78507
R 4.74941 -11.7787 -2.77209 -1.05588 -0.900953 -1.36515 2.41265 4.5839 -11.6711 4.65 -11.8 -2.0002 3.00031 6.04904
R 4.59911 -11.5572 -2.74539 -1.11696 -0.867762 -1.20363 2.46748 4.53697 -11.5591 4.55 -11.55 0 1.99749 6.4709
R 4.46574 -11.2746 -2.80022 -1.20857 -0.8693 -0.992283 2.61851 4.47604 -11.2089 4.5 -11.25 -0.90916 3.63662 12.3568
R 4.42909 -10.9763 -2.7452 -1.34293 -0.911313 -0.620146 2.67424 4.39651 -10.8822 4.45 -10.95 0 1.81807 15.5426
R 4.40151 -10.685 -2.72877 -1.45877 -0.929337 -0.305042 2.71167 4.493 -10.7885 4.45 -10.7 0 2.72865 0.472429
R 4.40792 -10.3859 -2.71975 -1.5788 -0.949051 0.0217756 2.71966 4.44399 -10.3941 4.45 -10.4 0 2.72392 0.216943
R 4.44854 -10.0906 -2.7116 -1.70064 -0.969335 0.351088 2.68877 4.53204 -10.1814 4.5 -10.1 0.909624 2.72885 0.384984
R 4.53546 -9.79382 -2.71207 -1.8319 -0.994642 0.700124 2.62015 4.57622 -9.70975 4.6 -9.8 0.909111 2.72733 0.208091
R 4.65697 -9.50992 -2.71396 -1.9621 -1.01511 1.0351 2.50881 4.76627 -9.54952 4.75 -9.5 1.81921 2.7288 0.186964
R 4.77946 -9.23278 -2.72009 -2.05277 -0.978067 1.26084 2.41023 4.89701 -9.18082 4.9 -9.2 0.908739 2.72622 2.05509
R 4.98424 -8.9782 -2.73178 -2.20413 -1.02709 1.61676 2.20198 5.09012 -9.06486 5.05 -8.95 1.8169 1.81688 1.18317
R 5.15394 -8.73086 -2.73262 -2.28297 -0.975937 1.78572 2.06843 5.21345 -8.63875 5.25 -8.7 1.8201 2.73014 2.15036
R 5.34463 -8.48878 -2.73906 -2.35575 -0.923202 1.93594 1.93767 5.35874 -8.39385 5.4 -8.45 0.910244 1.82049 2.06357
R 5.49423 -8.25733 -2.72052 -2.36622 -0.80001 1.9429 1.90431 5.63723 -8.3352 5.55 -8.2 1.99657 2.99484 11.824
R 5.70507 -8.0399 -2.73627 -2.436 -0.779481 2.08292 1.77444 5.69655 -7.84304 5.75 -7.95 1.81871 1.81872 0.57547
R 5.94045 -7.83314 -2.7486 -2.50996 -0.763973 2.21829 1.62295 5.9537 -7.7033 6 -7.75 2.72961 1.81974 0.247096
R 6.16379 -7.63297 -2.75258 -2.54424 -0.694404 2.27591 1.54819 6.27 -7.55894 6.25 -7.55 1.9992 1.9992 3.69481
R 6.39006 -7.43259 -2.75803 -2.56856 -0.624371 2.31746 1.49536 6.44365 -7.32743 6.45 -7.35 1.81911 1.81911 3.3131
R 6.62303 -7.24635 -2.76036 -2.59394 -0.566486 2.35666 1.43727 6.71635 -7.22979 6.65 -7.15 1.99996 1.99996 2.36173
R 6.85855 -7.07708 -2.77381 -2.63285 -0.547507 2.42252 1.35107 6.81357 -6.96259 6.85 -7 1.99752 0.998765 0.199432
R 7.09698 -6.90978 -2.77163 -2.65926 -0.50871 2.45543 1.28561 7.00414 -6.76319 7.1 -6.85 2.73035 1.82023 1.09854
R 7.33037 -6.72772 -2.76506 -2.65271 -0.425109 2.44115 1.29858 7.28718 -6.59248 7.35 -6.65 2.0006 2.0006 5.3772
R 7.56855 -6.57385 -2.77027 -2.67406 -0.40069 2.47298 1.24852 7.58756 -6.53752 7.55 -6.5 1.99841 0.999204 0.183794
R 7.83139 -6.46222 -2.78571 -2.74215 -0.4488 2.56641 1.08339 7.78988 -6.37954 7.8 -6.4 2.99952 0.999844 3.39372
R 8.05969 -6.28746 -2.75862 -2.70121 -0.321191 2.49542 1.17595 8.1289 -6.31809 8.05 -6.25 1.81603 1.81603 16.2744
R 8.29217 -6.09529 -2.7382 -2.65998 -0.217486 2.42672 1.26837 8.12981 -5.95044 8.25 -6.05 1.81921 1.81921 8.86804
R 8.56283 -5.96284 -2.74589 -2.70426 -0.256095 2.48746 1.16294 8.45627 -5.87026 8.5 -5.9 2.729 0.909673 2.66633
R 8.82061 -5.81211 -2.74172 -2.71057 -0.232332 2.49096 1.14549 8.68227 -5.68169 8.8 -5.75 2.72771 1.81848 0.570228
R 9.05039 -5.63097 -2.72914 -2.66566 -0.138555 2.42584 1.25041 8.99196 -5.51961 9.05 -5.55 1.99939 1.9994 7.02828
R 9.28737 -5.45589 -2.72797 -2.63862 -0.0864791 2.39012 1.31496 9.25593 -5.41629 9.25 -5.4 1.81776 0.90888 1.50284
R 9.48579 -5.22867 -2.69491 -2.53996 0.0648712 2.22172 1.52529 9.35852 -5.15981 9.45 -5.2 1.81545 2.72317 19.6371
R 9.70335 -5.01873 -2.69004 -2.49126 0.116818 2.14095 1.6287 9.60543 -4.91132 9.65 -4.95 1.82145 1.82147 1.59802
R 9.92032 -4.80914 -2.68945 -2.45054 0.150714 2.07243 1.71411 10.0131 -4.84834 9.85 -4.75 1.81961 1.8196 0.617499
R 10.1144 -4.60386 -2.68475 -2.41164 0.176487 2.0007 1.79028 9.99977 -4.51909 10.05 -4.55 1.99827 1.99828 0.417092
R 10.2855 -4.36706 -2.68033 -2.33952 0.244876 1.86342 1.92662 10.1567 -4.25503 10.25 -4.3 1.81693 2.72538 2.96823
R 10.483 -4.13842 -2.6859 -2.31372 0.233185 1.81687 1.97815 10.3715 -4.04505 10.45 -4.05 1.81951 1.8195 0.763996
R 10.678 -3.90364 -2.69368 -2.28585 0.228911 1.76613 2.03389 10.6459 -3.84961 10.65 -3.85 1.81817 1.81818 0.200301
R 10.867 -3.66821 -2.69792 -2.25636 0.231655 1.70807 2.08837 10.9384 -3.6422 10.85 -3.6 1.81621 2.7243 0.0618972
R 11.0066 -3.3812 -2.7031 -2.15788 0.331263 1.49735 2.25049 11.1095 -3.32648 11 -3.3 0.908632 2.72588 7.0594
R 11.1264 -3.08731 -2.70924 -2.07026 0.394922 1.29759 2.37828 10.9851 -2.95299 11.1 -3 0.909905 2.72971 2.36549
R 11.1824 -2.7565 -2.71573 -1.92147 0.537995 0.932941 2.55045 11.0233 -2.67356 11.15 -2.7 0 2.72847 13.2217
R 11.2202 -2.43703 -2.72083 -1.79638 0.626609 0.608577 2.6519 11.2541 -2.42353 11.15 -2.4 0 2.72732 4.58689
R 11.3344 -2.17275 -2.70751 -1.81846 0.485893 0.66372 2.6249 11.2203 -2.08998 11.25 -2.1 1.81905 2.72856 17.1394
R 11.3135 -1.83877 -2.72252 -1.65657 0.640598 0.233226 2.71252 11.3928 -1.80815 11.3 -1.8 -0.90916 2.72747 17.5708
R 11.3103 -1.52972 -2.72619 -1.57235 0.654336 0.00423495 2.72618 11.3341 -1.51762 11.25 -1.5 0 2.72321 0.0388188
R 11.2441 -1.21416 -2.73922 -1.43607 0.745646 -0.367942 2.7144 11.3121 -1.19064 11.2 -1.2 -0.909524 2.72856 6.09278
R 11.0925 -0.905017 -2.76095 -1.24617 0.898022 -0.880621 2.61674 10.9982 -0.893223 11.05 -0.9 -1.81945 2.72916 16.9106
R 10.9612 -0.618356 -2.76298 -1.13735 0.904271 -1.16047 2.50747 10.8362 -0.586 10.9 -0.599999 -0.909839 2.72952 0.203563
R 10.7179 -0.36217 -2.80958 -0.920574 1.07665 -1.70082 2.23628 10.6309 -0.359954 10.7 -0.349999 -2.99538 1.99692 26.151
R 10.5251 -0.139717 -2.79873 -0.816389 1.05751 -1.91673 2.03937 10.4322 -0.19515 10.45 -0.2 -1.81715 0.908579 1.04542
R 10.2621 0.0392616 -2.79938 -0.655984 1.11529 -2.21837 1.70745 10.2068 -0.109014 10.2 -0.0999996 -2.73194 0.910655 2.44566
R 9.97572 0.167453 -2.78591 -0.496998 1.15748 -2.44887 1.32829 9.89001 0.00816785 9.9 9.49949e-07 -2.72481 0.908268 0.753656
R 9.7014 0.255028 -2.7289 -0.368899 1.14719 -2.54531 0.98401 9.63528 0.0929341 9.65 0.100001 -1.81759 0.908784 6.22679
R 9.41468 0.305434 -2.70682 -0.221743 1.1717 -2.64055 0.595311 9.56225 0.148577 9.4 0.15 -2.72659 0 0.56684
R 9.11179 0.301819 -2.6877 -0.06099 1.20467 -2.68271 0.163822 9.0821 0.207802 9.1 0.2 -2.72925 0.909752 0.174674
R 8.8134 0.285855 -2.68964 0.0819246 1.21547 -2.68062 -0.220101 8.94634 0.26481 8.8 0.25 -2.72783 0 0.367493
R 8.51579 0.241511 -2.69672 0.216651 1.215 -2.63368 -0.579688 8.46866 0.251128 8.5 0.25 -2.72395 0 0.106114
R 8.22841 0.182565 -2.70692 0.331701 1.19433 -2.55937 -0.881513 8.28111 0.196974 8.2 0.2 -2.73278 -0.910929 0.452696
R 7.91862 0.176698 -2.77733 0.358405 1.06824 -2.60085 -0.974235 7.74678 0.154598 7.85 0.15 -3.9992 0 26.3375
R 7.66061 0.0500453 -2.77028 0.484853 1.09441 -2.45098 -1.29117 7.61982 0.157052 7.55 0.15 -1.99868 0 3.57738
R 7.37968 -0.0110184 -2.78042 0.525159 1.01207 -2.40574 -1.39396 7.34481 0.102019 7.3 0.100001 -2.72622 -0.908734 2.504
R 7.11889 -0.152339 -2.77101 0.625204 1.00932 -2.24686 -1.62177 7.05721 -0.05037 7.05 -0.0499995 -1.81887 -1.81889 1.22215
R 6.88157 -0.326783 -2.76198 0.731702 1.01294 -2.05502 -1.84538 6.87212 -0.243254 6.85 -0.25 -1.81792 -1.81792 0.705779
R 6.66117 -0.509836 -2.75091 0.81933 0.98888 -1.87808 -2.01006 6.75759 -0.453523 6.65 -0.45 -1.81881 -1.8188 0.325182
R 6.47315 -0.735231 -2.74377 0.928634 0.99272 -1.64332 -2.19722 6.60439 -0.765755 6.45 -0.749999 -1.81971 -3.63941 0.244557
R 6.38509 -1.08178 -2.77384 1.16466 1.16763 -1.09583 -2.5482 6.35624 -1.10001 6.35 -1.1 0 -2.99522 20.015
R 6.31957 -1.39433 -2.79106 1.33845 1.23509 -0.642664 -2.71606 6.30992 -1.40686 6.35 -1.4 0 -2.72817 2.68339
R 6.30015 -1.71042 -2.7921 1.50208 1.26957 -0.191701 -2.78552 6.50996 -1.74616 6.35 -1.7 0 -2.72628 0.96776
R 6.29432 -2.0003 -2.77069 1.62015 1.23682 0.136686 -2.76732 6.38849 -2.02257 6.35 -2 0 -2.73154 1.53994
R 6.30891 -2.27378 -2.74822 1.71985 1.1921 0.408128 -2.71775 6.28921 -2.27735 6.35 -2.3 0 -2.99503 2.48664
R 6.37828 -2.56517 -2.76343 1.85921 1.2126 0.786004 -2.64929 6.30986 -2.56694 6.4 -2.6 0.909996 -2.73 0.759924
R 6.48031 -2.85513 -2.76561 1.98767 1.20394 1.11981 -2.52876 6.40481 -2.87127 6.5 -2.9 0.907593 -2.72277 0.162781
R 6.5923 -3.12716 -2.74037 2.08314 1.1482 1.34338 -2.3885 6.70928 -3.32749 6.55 -3.25 0 -3.63617 6.35764
R 6.69975 -3.42072 -2.75778 2.16119 1.09129 1.53522 -2.29095 6.52015 -3.55608 6.6 -3.6 0.909673 -2.72901 1.54164
R 6.82258 -3.7048 -2.7663 2.22736 1.02934 1.68854 -2.19117 6.71319 -3.83967 6.75 -3.85 1.81795 -1.81795 1.60913
R 7.00055 -3.97168 -2.79039 2.31685 1.00308 1.89396 -2.04919 7.13409 -4.28567 6.9 -4.15 0.909979 -3.63994 1.61654
R 7.13248 -4.2447 -2.76576 2.34379 0.907651 1.93128 -1.97979 7.00947 -4.44692 7 -4.45 0.907889 -1.81577 10.13
R 7.28221 -4.51874 -2.77172 2.3752 0.837228 1.99679 -1.92231 7.15974 -4.75194 7.1 -4.7 0.909789 -2.72935 1.55766
R 7.43872 -4.80333 -2.80542 2.3997 0.775692 2.06812 -1.89558 7.2829 -5.01975 7.25 -5 1.81861 -2.72792 3.38416
R 7.57317 -5.07548 -2.79762 2.39666 0.690824 2.05663 -1.89657 7.32687 -5.26205 7.4 -5.3 0.999994 -3 2.49728
R 7.70479 -5.35468 -2.78964 2.38233 0.604433 2.02345 -1.92035 7.47116 -5.48352 7.55 -5.55 1.81749 -1.81749 1.99817
R 7.86163 -5.62598 -2.77314 2.37895 0.533453 2.00501 -1.91579 7.78563 -5.85993 7.7 -5.8 0.909598 -2.72881 3.21839
R 8.0018 -5.90872 -2.76074 2.35851 0.456401 1.95665 -1.94762 7.79663 -6.00945 7.85 -6.05 1.81964 -1.81964 1.31342
R 8.17931 -6.16904 -2.79943 2.36921 0.426658 2.00509 -1.95357 8.04642 -6.30578 8.05 -6.3 1.99731 -2.99599 6.76378
R 8.31981 -6.42456 -2.76228 2.34697 0.357382 1.93512 -1.97117 8.26534 -6.60186 8.2 -6.55 0.909111 -1.81822 8.50786
R 8.47036 -6.68589 -2.74693 2.33124 0.304512 1.8933 -1.99023 8.2109 -6.73286 8.3 -6.8 0.90845 -2.72534 1.04974
R 8.63127 -6.97071 -2.7718 2.31291 0.259942 1.87332 -2.04292 8.61718 -7.22425 8.45 -7.1 1.81925 -2.72886 3.19049
R 8.77557 -7.25366 -2.7697 2.28217 0.204191 1.80826 -2.09796 8.69659 -7.3744 8.65 -7.35 1.82079 -1.8208 0.229323
R 8.92831 -7.5027 -2.77209 2.27248 0.176926 1.78941 -2.1172 8.68846 -7.50096 8.8 -7.6 0.997629 -2.99288 1.039
R 9.03651 -7.7743 -2.74453 2.22333 0.108799 1.66649 -2.18065 8.77486 -7.80083 8.9 -7.9 0.909127 -2.72742 3.60655
R 9.12572 -8.07342 -2.71412 2.1531 0.0242024 1.49264 -2.26682 8.92041 -8.11276 9 -8.15 0.909524 -1.81903 5.36665
R 9.2284 -8.35094 -2.69896 2.11042 -0.0196185 1.38677 -2.31544 9.04724 -8.3711 9.1 -8.4 0.909922 -2.72975 0.485269
R 9.35551 -8.62756 -2.71623 2.09546 -0.0220939 1.36061 -2.35088 9.09379 -8.55764 9.25 -8.7 1.8169 -2.72535 4.15776
R 9.47124 -8.91535 -2.72076 2.06351 -0.0491714 1.28697 -2.39712 9.40597 -8.96981 9.4 -9 0.910219 -2.73069 0.0596287
R 9.57369 -9.20314 -2.73463 2.02926 -0.0736253 1.21026 -2.45223 9.51544 -9.36256 9.5 -9.3 0.997908 -2.99371 0.290016
R 9.63596 -9.49558 -2.71699 1.96055 -0.142871 1.03234 -2.51322 9.62712 -9.59131 9.6 -9.55 0.909971 -1.81993 5.8572
R 9.70417 -9.78529 -2.70079 1.90647 -0.187956 0.889654 -2.55006 9.63311 -9.79216 9.65 -9.8 0 -2.73054 2.37923
R 9.78169 -10.0642 -2.71324 1.88558 -0.178882 0.840046 -2.57992 9.61381 -9.98703 9.7 -10.1 0.998187 -2.99456 2.20218
R 9.85001 -10.3503 -2.72272 1.85641 -0.183925 0.767115 -2.61242 9.7698 -10.3568 9.8 -10.4 0.909806 -2.7294 0.317413
R 9.8935 -10.6493 -2.71253 1.80019 -0.227413 0.616789 -2.64147 9.8617 -10.6917 9.85 -10.7 0 -2.72718 2.18423
R 9.90336 -10.9549 -2.70487 1.72977 -0.277932 0.428194 -2.67077 9.75887 -10.887 9.85 -11 0 -2.72395 1.66029
R 9.91835 -11.2612 -2.70789 1.67737 -0.299897 0.288036 -2.69253 9.87245 -11.316 9.85 -11.3 0 -2.73074 0.129008
R 9.91636 -11.5705 -2.71311 1.62235 -0.321377 0.139817 -2.7095 9.93142 -11.6783 9.85 -11.6 0 -2.72396 0.183566
R 9.88019 -11.8609 -2.70951 1.55113 -0.363365 -0.0532963 -2.70899 9.80843 -11.8702 9.8 -11.9 -1.0005 -3.00149 0.808112
R 9.717 -12.1502 -2.68546 1.36386 -0.553356 -0.551775 -2.62817 9.64519 -12.1714 9.65 -12.15 -1.81799 -1.81797 28.5427
R 9.59251 -12.4301 -2.67963 1.25932 -0.602363 -0.821219 -2.55069 9.47014 -12.4324 9.45 -12.4 -1.82042 -2.73064 0.783885
R 9.36714 -12.6836 -2.68031 1.07617 -0.747725 -1.27235 -2.35907 9.21558 -12.6577 9.25 -12.65 -1.99893 -1.99891 10.2488
R 9.15298 -12.9086 -2.68134 0.932727 -0.825428 -1.59713 -2.15378 9.08802 -12.8351 9.05 -12.85 -1.81839 -1.81839 2.58695
R 8.84157 -13.0886 -2.69113 0.698102 -1.01598 -2.06157 -1.72976 8.8569 -13.1416 8.8 -13 -2.72942 -0.909806 22.0235
R 8.58936 -13.2303 -2.6829 0.564902 -1.04556 -2.26609 -1.43625 8.49168 -13.0983 8.5 -13.15 -2.99987 -1.99991 0.442372
R 8.25184 -13.2928 -2.71104 0.329705 -1.21579 -2.56502 -0.877737 8.19421 -13.0768 8.25 -13.2 -1.99797 0.998985 18.9286
R 7.99168 -13.3462 -2.68449 0.208758 -1.20465 -2.62621 -0.556348 7.9999 -13.1879 8 -13.2 -2.73 -0.910004 0.818618
R 7.6711 -13.331 -2.69381 0.0132648 -1.28744 -2.69358 -0.0357319 7.57824 -13.1345 7.65 -13.25 -3.62957 0 4.85147
R 7.37254 -13.31 -2.68505 -0.1219 -1.27101 -2.66513 0.326499 7.33241 -13.2718 7.3 -13.25 -2.7301 0 0.769621
R 7.07537 -13.2566 -2.67931 -0.250138 -1.24662 -2.59593 0.66323 6.97467 -13.1643 7 -13.25 -2.72703 0 1.36757
R 6.78308 -13.1728 -2.69088 -0.382813 -1.23944 -2.49611 1.00513 6.7113 -13.2598 6.7 -13.2 -2.72742 0.909144 0.0787708
R 6.49987 -13.0485 -2.7052 -0.515778 -1.23254 -2.35328 1.33423 6.32545 -12.9303 6.4 -13.1 -2.72703 0.908994 0.445395
R 6.23126 -12.9276 -2.70058 -0.610907 -1.18053 -2.21212 1.54908 6.09711 -13.0021 6.1 -13 -2.73 0.910004 2.33218
R 5.97639 -12.814 -2.69972 -0.68048 -1.11783 -2.09842 1.69858 5.89343 -13.1185 5.8 -12.9 -2.99275 0.99759 2.62157
R 5.7212 -12.6718 -2.7156 -0.753453 -1.06902 -1.98057 1.85791 5.61778 -12.8527 5.55 -12.75 -1.81993 1.81994 0.61288
R 5.4781 -12.4802 -2.73055 -0.842397 -1.03667 -1.81766 2.03765 5.30917 -12.4284 5.35 -12.55 -1.81722 1.81723 0.402433
R 5.25299 -12.2744 -2.74951 -0.928224 -1.01067 -1.64766 2.20114 5.13338 -12.4375 5.1 -12.35 -2.72881 1.8192 0.501929
R 5.02615 -12.0871 -2.73351 -0.974614 -0.941439 -1.53483 2.26194 4.92993 -12.1787 4.9 -12.15 -0.908467 1.81693 4.03049
R 4.83219 -11.8417 -2.77186 -1.06166 -0.931406 -1.35107 2.4203 4.79909 -11.9738 4.75 -11.9 -1.81788 2.72684 4.31159
R 4.66737 -11.5993 -2.74652 -1.13151 -0.896318 -1.16806 2.48576 4.50688 -11.4491 4.6 -11.65 -0.909963 1.81994 5.5762
R 4.50957 -11.3226 -2.7938 -1.20901 -0.877457 -0.988851 2.61295 4.46798 -11.2692 4.5 -11.35 -0.909227 3.63689 7.78268
R 4.44238 -11.045 -2.74756 -1.31805 -0.893743 -0.687065 2.66027 4.47788 -11.2387 4.4 -11.05 -0.909227 1.81845 7.7092
R 4.39385 -10.7639 -2.71483 -1.4247 -0.908827 -0.395205 2.68591 4.34978 -10.6819 4.4 -10.8 0.9094 2.72823 4.71674
R 4.36516 -10.4842 -2.71926 -1.51497 -0.900393 -0.151741 2.71503 4.45256 -10.5332 4.45 -10.5 0 3.00113 0.902273
R 4.40049 -10.2025 -2.71717 -1.63483 -0.93152 0.173868 2.7116 4.41237 -9.99477 4.5 -10.2 0.999524 2.99855 0.806161
R 4.47307 -9.92166 -2.72252 -1.7563 -0.956052 0.50214 2.67581 4.57379 -9.85032 4.6 -9.9 0.998267 2.9948 0.165361
R 4.65319 -9.63566 -2.7218 -1.95867 -1.0757 1.02944 2.51961 4.60255 -9.4741 4.7 -9.65 0.910924 1.82187 7.80025
R 4.75147 -9.36651 -2.71853 -2.04096 -1.0278 1.23159 2.42355 4.78405 -9.35872 4.8 -9.4 0.908038 2.72411 1.78167
R 4.88817 -9.10287 -2.71927 -2.13298 -0.996329 1.44947 2.30076 4.92663 -9.02336 4.95 -9.1 1.81851 2.72776 1.04639
R 5.04148 -8.84275 -2.72315 -2.20943 -0.946101 1.62327 2.18644 5.1055 -8.72095 5.15 -8.8 1.8165 2.72476 2.38695
R 5.27127 -8.60679 -2.74359 -2.33953 -0.971199 1.90741 1.97207 5.34134 -8.5519 5.35 -8.55 1.82159 1.8216 0.329926
R 5.45048 -8.36836 -2.73785 -2.38466 -0.882673 1.99027 1.88007 5.5122 -8.36624 5.5 -8.35 0.908054 1.81609 5.67374
R 5.6998 -8.18294 -2.74727 -2.51007 -0.923575 2.2174 1.62193 5.72065 -8.20245 5.7 -8.15 2.72851 1.81902 1.44884
R 5.884 -7.95648 -2.72231 -2.50776 -0.774067 2.19354 1.61225 5.99121 -7.96746 5.95 -7.9 1.81851 2.72777 16.9215
R 6.1129 -7.74817 -2.72619 -2.54726 -0.707768 2.25871 1.52655 6.20599 -7.74241 6.15 -7.65 1.81739 1.81739 2.89723
R 6.33675 -7.54058 -2.7227 -2.5668 -0.625441 2.28518 1.48022 6.39144 -7.48582 6.35 -7.45 1.8164 1.81641 4.45869
R 6.58358 -7.35843 -2.72837 -2.61256 -0.59592 2.35538 1.37701 6.66209 -7.31025 6.6 -7.25 2.72856 1.81904 0.431997
R 6.81857 -7.15176 -2.72851 -2.61144 -0.503282 2.35396 1.37972 6.83447 -7.05268 6.85 -7.05 1.82066 1.82066 5.36341
R 7.07354 -6.98727 -2.74486 -2.65255 -0.491024 2.42311 1.28949 7.13677 -6.88363 7.1 -6.85 2.99976 1.99984 0.17353
R 7.26855 -6.75939 -2.73457 -2.58397 -0.324923 2.32033 1.44705 7.234 -6.59525 7.3 -6.65 0.999384 1.99876 20.0674
R 7.55483 -6.64215 -2.76558 -2.68846 -0.437441 2.48648 1.21073 7.67988 -6.69347 7.5 -6.55 2.72747 0 13.7557
R 7.79468 -6.47684 -2.74043 -2.67476 -0.345414 2.4472 1.23336 7.94309 -6.60222 7.75 -6.45 1.81623 1.81624 8.67084
R 8.05993 -6.34024 -2.74302 -2.71941 -0.364404 2.50217 1.12397 7.9171 -6.24084 8 -6.3 2.72648 0.908821 1.21567
R 8.27768 -6.14444 -2.70521 -2.65208 -0.20903 2.38751 1.27199 8.37368 -6.18614 8.25 -6.1 1.81898 2.72847 22.6183
R 8.55166 -6.00698 -2.72201 -2.70065 -0.258358 2.46165 1.16173 8.43843 -5.87193 8.5 -5.9 2.72647 0.90883 4.3317
R 8.78679 -5.8242 -2.70546 -2.6613 -0.166312 2.39937 1.25003 8.76652 -5.75601 8.75 -5.75 1.81789 1.81788 6.47732
R 9.03988 -5.66106 -2.70976 -2.66602 -0.158907 2.40906 1.24065 8.91028 -5.47662 9 -5.55 2.73109 1.82073 0.684007
R 9.27618 -5.46227 -2.70497 -2.61898 -0.0753263 2.34391 1.35017 9.32693 -5.39468 9.25 -5.35 1.81796 1.81795 4.34172
R 9.52231 -5.28217 -2.71093 -2.61055 -0.0645072 2.33757 1.3729 9.4498 -5.12838 9.5 -5.15 2.72905 1.81937 0.39102
R 9.7176 -5.03774 -2.70107 -2.50381 0.0897539 2.17009 1.60826 9.83281 -5.00671 9.7 -4.95 0.998666 1.99733 17.4181
R 9.92147 -4.82751 -2.70001 -2.45624 0.135833 2.09034 1.70895 9.96003 -4.81123 9.85 -4.75 1.8172 1.81719 0.925553
R 10.1264 -4.61163 -2.69513 -2.41447 0.167104 2.01351 1.79151 10.1357 -4.58625 10.05 -4.55 1.82042 1.82043 0.351111
R 10.3589 -4.42577 -2.70175 -2.43294 0.105156 2.05128 1.75832 10.3185 -4.35401 10.3 -4.35 2.72672 1.81782 4.63192
R 10.5142 -4.14809 -2.68887 -2.305 0.26792 1.80153 1.99612 10.5826 -4.11494 10.5 -4.1 0.908764 2.72628 20.9265
R 10.6878 -3.89462 -2.69231 -2.24687 0.300423 1.68468 2.1001 10.6618 -3.80699 10.65 -3.8 1.8194 2.72911 0.409688
R 10.88 -3.64884 -2.70658 -2.22778 0.270364 1.65299 2.14318 10.8593 -3.57039 10.85 -3.55 1.81839 1.81838 1.43909
R 11.0104 -3.36361 -2.70652 -2.12931 0.364304 1.43426 2.29524 11.0183 -3.30073 11 -3.3 0.908549 2.72563 6.00329
R 11.1334 -3.07189 -2.71436 -2.04661 0.418912 1.24335 2.41285 11.1054 -3.01303 11.1 -3 0.90888 2.72663 1.65967
R 11.2373 -2.77493 -2.72136 -1.97053 0.453358 1.05908 2.50682 11.1758 -2.69942 11.2 -2.7 0.909988 2.72995 0.511632
R 11.2442 -2.42512 -2.73424 -1.77989 0.654588 0.567558 2.67468 11.4549 -2.46508 11.2 -2.4 -0.907692 2.72306 29.4999
R 11.3633 -2.16303 -2.71754 -1.81413 0.490163 0.654771 2.63748 11.2202 -2.08176 11.25 -2.1 1.8219 2.73284 23.1932
R 11.3812 -1.84881 -2.72318 -1.71184 0.553792 0.382823 2.69614 11.224 -1.77367 11.35 -1.8 0 2.71996 2.68504
R 11.3906 -1.53633 -2.73029 -1.62869 0.580115 0.157965 2.72572 11.2691 -1.47976 11.35 -1.5 0 2.73448 0.507625
R 11.2761 -1.21855 -2.75008 -1.42742 0.781626 -0.392955 2.72186 11.2507 -1.19626 11.25 -1.2 -1.99969 2.99952 29.8816
R 11.1697 -0.934881 -2.76174 -1.29748 0.847748 -0.745474 2.65923 10.9697 -0.89157 11.1 -0.9 -1.00006 3.00024 3.00584
R 11.0159 -0.647836 -2.7843 -1.14266 0.937386 -1.15596 2.533 10.8913 -0.655608 10.95 -0.649999 -1.81771 1.81773 6.31117
R 10.8404 -0.38578 -2.78431 -1.01005 0.9746 -1.48076 2.35791 10.827 -0.442912 10.75 -0.45 -1.81842 1.8184 0.725224
R 10.5752 -0.175524 -2.81479 -0.798857 1.12738 -1.96339 2.01697 10.5503 -0.248633 10.5 -0.25 -3.00035 2.00024 21.1749
R 10.3466 -0.00766637 -2.79848 -0.672454 1.13812 -2.18924 1.74319 10.2559 -0.115293 10.25 -0.0999996 -2.00053 1.00027 0.337139
R 10.1132 0.128918 -2.7685 -0.557965 1.13132 -2.34861 1.46581 10.0717 0.0165033 10.05 9.49949e-07 -2.00009 1.00004 1.62879
R 9.83831 0.220139 -2.77005 -0.401723 1.18496 -2.54952 1.0831 9.8478 0.0279858 9.8 0.0500011 -3.00025 0 2.55357
R 9.54975 0.256542 -2.76271 -0.23798 1.23722 -2.68484 0.651281 9.54682 0.0997641 9.5 0.100001 -2.99825 0.999409 1.51698
R 9.27364 0.260199 -2.70125 -0.0941705 1.24721 -2.68928 0.254003 9.46113 0.134275 9.25 0.15 -1.81915 0 9.05583
R 8.98344 0.24804 -2.71135 0.0530342 1.26511 -2.70754 -0.143727 9.02723 0.196492 9 0.2 -2.99927 0.99976 1.49693
R 8.70584 0.231213 -2.73288 0.1709 1.25389 -2.69306 -0.464778 8.67607 0.247512 8.7 0.25 -2.99873 0 1.28772
R 8.42722 0.179364 -2.7339 0.292106 1.2434 -2.61809 -0.787282 8.44086 0.253341 8.4 0.25 -2.72872 0 0.112992
R 8.14345 0.141771 -2.75713 0.373151 1.1877 -2.56739 -1.00511 8.1066 0.204722 8.1 0.2 -2.99909 -0.9997 2.09592
R 7.86994 0.0811786 -2.78233 0.442483 1.12759 -2.51437 -1.19135 7.82213 0.149733 7.8 0.15 -3.00101 0 3.89052
R 7.59109 -0.00293898 -2.7915 0.516631 1.08041 -2.42718 -1.37888 7.49691 0.101076 7.5 0.100001 -2.72698 -0.908982 0.650071
R 7.33004 -0.126105 -2.78848 0.608902 1.06505 -2.28732 -1.59492 7.13188 -0.00451978 7.25 9.49949e-07 -2.00032 -1.00016 1.37181
R 7.04724 -0.207554 -2.79787 0.626794 0.953986 -2.26603 -1.64109 6.77233 -0.150907 7 -0.15 -2.72534 -1.8169 7.39359
R 6.84587 -0.431986 -2.78633 0.785484 1.03956 -1.97006 -1.9704 6.70421 -0.347264 6.8 -0.349999 -1.00053 -2.00107 11.295
R 6.64508 -0.595826 -2.77431 0.851768 0.996162 -1.82731 -2.08752 6.72084 -0.550555 6.65 -0.549999 -2.00016 -2.00017 0.994523
R 6.49904 -0.837704 -2.77214 0.996299 1.06428 -1.50641 -2.32711 6.40088 -0.78131 6.5 -0.799999 -0.999884 -2.99963 5.02349
R 6.38313 -1.10065 -2.77666 1.136 1.11013 -1.1696 -2.51831 6.37766 -1.0844 6.4 -1.1 -0.999984 -2.99995 1.47677
R 6.33255 -1.40403 -2.79105 1.31769 1.20613 -0.698906 -2.70212 6.5527 -1.43518 6.35 -1.4 0 -3.00132 6.69634
R 6.28579 -1.68837 -2.78408 1.44604 1.20725 -0.346422 -2.76244 6.33086 -1.6454 6.35 -1.65 0 -1.81769 0.106065
R 6.30062 -1.98237 -2.78357 1.60577 1.25559 0.0973243 -2.78186 6.34522 -1.96081 6.35 -1.95 0 -3.99976 1.69333
R 6.30339 -2.24101 -2.75817 1.69367 1.19999 0.338045 -2.73738 6.3068 -2.23646 6.35 -2.25 0 -2 3.90414
R 6.33255 -2.49798 -2.73657 1.78549 1.15973 0.583012 -2.67374 6.26272 -2.46894 6.35 -2.5 0 -3 2.33852
R 6.41524 -2.76785 -2.74969 1.91464 1.17922 0.926955 -2.58874 6.38105 -2.77438 6.4 -2.8 1.00003 -3.00012 0.741687
R 6.53421 -3.03924 -2.78036 2.04898 1.20292 1.27942 -2.46849 6.50958 -3.15806 6.5 -3.15 0.999284 -3.99712 2.35417
R 6.63754 -3.31359 -2.77018 2.12486 1.13245 1.45752 -2.35575 6.64067 -3.46888 6.6 -3.45 0.909922 -1.81983 6.83165
R 6.77532 -3.57762 -2.78617 2.21935 1.10552 1.68294 -2.22046 6.70376 -3.69784 6.7 -3.7 0.999335 -2.99802 0.400922
R 6.90281 -3.83501 -2.7788 2.2803 1.04166 1.81027 -2.10824 6.65288 -3.91672 6.8 -4 0.908549 -2.72564 3.82659
R 7.03169 -4.10081 -2.74705 2.31724 0.949856 1.86534 -2.01662 6.9447 -4.2991 6.9 -4.25 0.90988 -1.81977 12.7002
R 7.17459 -4.3505 -2.74769 2.36149 0.892464 1.95316 -1.9326 7.09552 -4.55441 7 -4.5 1.00008 -3.00024 1.26388
R 7.32047 -4.61097 -2.79011 2.39505 0.842267 2.04806 -1.89477 7.25009 -4.88515 7.15 -4.8 1.99984 -2.99976 4.61012
R 7.45155 -4.85418 -2.76233 2.40283 0.760274 2.0422 -1.86007 7.33292 -5.08676 7.3 -5.05 1.00028 -2.00057 8.68343
R 7.58594 -5.1011 -2.75395 2.40734 0.688531 2.04435 -1.84523 7.40318 -5.28627 7.4 -5.3 1.00003 -3.00012 1.96005
R 7.71394 -5.36244 -2.75733 2.39759 0.612762 2.02874 -1.86737 7.46878 -5.50726 7.55 -5.55 1.99961 -1.99961 1.09293
R 7.85775 -5.60648 -2.74776 2.39556 0.549441 2.01791 -1.865 7.70927 -5.8194 7.7 -5.8 1.00027 -3.00084 1.96185
R 7.99089 -5.86844 -2.75228 2.37777 0.480211 1.98769 -1.90372 7.71187 -5.94164 7.85 -6.05 1.9998 -1.99981 1.0401
R 8.1254 -6.10972 -2.72419 2.36173 0.414642 1.93692 -1.91561 8.03808 -6.28852 8 -6.25 1.00004 -2.00008 5.60782
R 8.28316 -6.35784 -2.75324 2.36069 0.378775 1.95555 -1.93807 8.1071 -6.48544 8.15 -6.5 1.99951 -2.99928 5.25429
R 8.41847 -6.61445 -2.71626 2.33158 0.307732 1.87284 -1.96737 8.38768 -6.7963 8.3 -6.75 0.90921 -1.81841 10.3087
R 8.56625 -6.87899 -2.72434 2.31119 0.259072 1.83778 -2.01112 8.58336 -7.15056 8.4 -7 1.0001 -3.00036 0.905791
R 8.68759 -7.13598 -2.72532 2.2804 0.207651 1.77563 -2.06749 8.37938 -7.10665 8.55 -7.25 1.99907 -1.99908 1.31709
R 8.83075 -7.38384 -2.73535 2.26527 0.175772 1.75057 -2.10182 8.76035 -7.57239 8.7 -7.5 1.00058 -3.00174 0.704479
R 8.92968 -7.64647 -2.70808 2.2133 0.104528 1.62268 -2.16809 8.81118 -7.75569 8.8 -7.75 0.999844 -1.99968 5.31814
R 9.0828 -7.90077 -2.75102 2.21302 0.10597 1.6478 -2.20292 9.07758 -8.1058 8.95 -8 2.00017 -3.00024 10.5089
R 9.18412 -8.16296 -2.73065 2.16581 0.0460683 1.53058 -2.26137 9.05557 -8.22632 9.1 -8.25 0.998905 -1.99781 3.86379
R 9.30517 -8.4295 -2.72481 2.1348 0.0103912 1.45662 -2.30279 9.30408 -8.57245 9.2 -8.5 0.908863 -2.72657 0.724806
R 9.40856 -8.71325 -2.71422 2.09107 -0.0334461 1.34928 -2.35509 9.15971 -8.67035 9.3 -8.8 0.909475 -2.72841 1.02654
R 9.5255 -8.99813 -2.73995 2.06729 -0.0440478 1.30516 -2.40912 9.47941 -9.179 9.4 -9.1 0.999765 -2.99935 2.43685
R 9.57988 -9.29322 -2.71814 1.99091 -0.121543 1.10862 -2.48178 9.53008 -9.38561 9.5 -9.35 0.908962 -1.81792 5.59036
R 9.67429 -9.57542 -2.71243 1.95828 -0.141244 1.02492 -2.51134 9.60163 -9.60777 9.6 -9.6 0.909988 -2.72996 0.171861
R 9.76156 -9.8687 -2.72021 1.92796 -0.15058 0.951039 -2.54854 9.69155 -9.89958 9.7 -9.9 0.907972 -2.7239 0.350444
R 9.81096 -10.1628 -2.70232 1.86668 -0.202266 0.787945 -2.58489 9.69256 -10.1336 9.75 -10.2 0 -2.73106 2.80658
R 9.89439 -10.449 -2.72737 1.85881 -0.17359 0.774698 -2.61504 9.82419 -10.5024 9.8 -10.5 1 -2.99999 4.30191
R 9.93085 -10.7306 -2.73144 1.81236 -0.200799 0.653413 -2.65214 9.825 -10.7525 9.85 -10.8 0 -3.00047 0.125295
R 9.90346 -11.0234 -2.71403 1.70792 -0.30224 0.370999 -2.68855 9.91881 -11.1448 9.85 -11.05 0 -1.9956 10.1967
R 9.90705 -11.3079 -2.70673 1.65119 -0.331072 0.217363 -2.69799 9.83851 -11.2795 9.85 -11.3 0 -2.73184 0.511443
R 9.8882 -11.594 -2.70119 1.5873 -0.363633 0.0445877 -2.70082 9.79654 -11.5922 9.8 -11.6 -0.998566 -2.9957 0.501725
R 9.85785 -11.8873 -2.7082 1.5336 -0.372718 -0.100707 -2.70633 9.77699 -11.9246 9.75 -11.9 0 -2.73084 0.190953
R 9.75449 -12.1848 -2.69549 1.41206 -0.472644 -0.426077 -2.6616 9.63743 -12.1863 9.65 -12.2 -1.81598 -2.72396 7.3019
R 9.55148 -12.4715 -2.68443 1.21819 -0.650166 -0.927058 -2.51927 9.47157 -12.4849 9.45 -12.45 -1.82132 -1.82133 19.8862
R 9.3732 -12.7327 -2.68142 1.09067 -0.71475 -1.23852 -2.37825 9.32154 -12.7193 9.3 -12.65 -0.908038 -1.81606 1.41488
R 9.09091 -12.9362 -2.66971 0.859803 -0.918314 -1.74222 -2.02288 9.04013 -12.7738 9.05 -12.85 -3.64007 -1.82004 26.2575
R 8.80375 -13.1013 -2.67649 0.667356 -1.03777 -2.10228 -1.65651 8.73518 -13.0163 8.75 -13 -1.9959 -0.997948 6.72669
R 8.51507 -13.2139 -2.67631 0.483517 -1.13569 -2.36951 -1.2442 8.48414 -13.0416 8.5 -13.1 -2.73149 -0.910502 5.23926
R 8.23007 -13.306 -2.66901 0.335756 -1.16217 -2.51997 -0.879393 8.19497 -13.1991 8.2 -13.2 -2.72385 -0.907955 0.270039
R 7.91886 -13.3409 -2.6725 0.158889 -1.22478 -2.63883 -0.422847 7.88931 -13.1791 7.9 -13.25 -2.72673 0 2.27041
R 7.61099 -13.3295 -2.67204 -0.00922353 -1.26454 -2.67192 0.0246453 7.52542 -13.1457 7.6 -13.25 -2.72991 0 1.02584
R 7.2886 -13.2407 -2.70148 -0.224342 -1.36512 -2.63378 0.600983 7.19347 -13.1392 7.25 -13.2 -3.63887 0.909723 8.69156
R 7.00753 -13.1802 -2.67433 -0.333091 -1.30017 -2.52734 0.874416 6.90569 -13.1515 6.9 -13.15 -2.72312 0 6.78901
R 6.74015 -13.1174 -2.65914 -0.419834 -1.23005 -2.42821 1.08389 6.69426 -13.3705 6.6 -13.15 -3.00475 0 5.84338
R 6.47809 -13.0431 -2.66382 -0.497907 -1.17059 -2.34039 1.27221 6.4246 -13.2048 6.35 -13.1 -1.9976 0.998786 2.31155
R 6.22122 -12.9402 -2.66201 -0.580454 -1.12157 -2.22601 1.45985 6.162 -13.1569 6.1 -13 -2.72732 0.909111 1.78773
R 5.95543 -12.8088 -2.66477 -0.661674 -1.07038 -2.10241 1.63734 5.78184 -12.8825 5.8 -12.9 -2.72895 0.909657 1.6217
R 5.68957 -12.6572 -2.69021 -0.742939 -1.03381 -1.98129 1.81981 5.54035 -12.7925 5.55 -12.75 -1.81808 1.81809 0.286105
R 5.44499 -12.4749 -2.71384 -0.830092 -1.01 -1.83132 2.0028 5.33087 -12.5965 5.3 -12.55 -2.7236 1.81575 0.462093
R 5.19817 -12.3023 -2.71894 -0.883835 -0.95045 -1.72433 2.10222 5.13793 -12.5907 5.05 -12.35 -1.81967 1.81966 1.66666
R 4.97753 -12.0711 -2.75832 -0.971316 -0.938165 -1.55628 2.27735 4.81467 -12.1072 4.85 -12.1 -1.81844 2.72766 2.09763
R 4.78696 -11.8402 -2.74284 -1.04059 -0.899237 -1.3871 2.36625 4.6829 -11.7433 4.7 -11.85 -0.909557 1.8191 3.90138
R 4.63331 -11.5871 -2.74163 -1.1264 -0.887162 -1.17866 2.47534 4.61619 -11.6508 4.6 -11.6 -0.907782 2.72335 0.047921
R 4.48992 -11.289 -2.79989 -1.2198 -0.882608 -0.962708 2.62918 4.48104 -11.1618 4.5 -11.25 -0.910253 3.64099 10.2548
R 4.44756 -10.9946 -2.7466 -1.34877 -0.91864 -0.604821 2.67918 4.46178 -10.9843 4.45 -10.95 0 1.81804 13.6657
R 4.4528 -10.7157 -2.69056 -1.48805 -0.968646 -0.222375 2.68135 4.60972 -11.1274 4.45 -10.75 0 1.81614 10.4242
R 4.38565 -10.4296 -2.71598 -1.54146 -0.903499 -0.0796616 2.71481 4.43099 -10.4242 4.45 -10.45 0 3.6396 6.87961
R 4.43156 -10.124 -2.71223 -1.6722 -0.937174 0.274569 2.6983 4.52156 -10.1192 4.5 -10.1 0.910021 2.73005 0.528549
R 4.48724 -9.83092 -2.7251 -1.77269 -0.929922 0.546451 2.66975 4.5897 -9.73456 4.6 -9.8 0.999085 2.99725 0.964173
R 4.64075 -9.54547 -2.72978 -1.9426 -1.01272 0.991724 2.54326 4.61159 -9.38621 4.7 -9.55 0.910095 1.82021 3.5562
R 4.75311 -9.2867 -2.72807 -2.03759 -0.994916 1.22771 2.43621 4.80746 -9.23434 4.85 -9.3 1.99828 2.99743 0.495171
R 4.87595 -9.02634 -2.73117 -2.10366 -0.933488 1.38745 2.3525 5.03379 -9.03255 5 -9 0.908384 2.72514 4.36282
R 5.08868 -8.77809 -2.74513 -2.24358 -0.973419 1.71067 2.14693 5.21058 -8.83852 5.15 -8.75 1.81679 1.8168 0.800308
R 5.26605 -8.53606 -2.74268 -2.31165 -0.914344 1.85109 2.0238 5.33786 -8.49992 5.35 -8.5 1.82017 2.73024 3.00536
R 5.47905 -8.30818 -2.75076 -2.39693 -0.887942 2.02268 1.86426 5.58025 -8.28767 5.55 -8.25 1.81852 1.81852 0.6815
R 5.70917 -8.1021 -2.75413 -2.47855 -0.860095 2.1706 1.6952 5.83663 -8.14888 5.75 -8.05 1.81752 1.81753 0.90876
R 5.9275 -7.89998 -2.74723 -2.53207 -0.80297 2.25251 1.57273 5.99151 -7.92468 5.95 -7.85 1.8194 1.8194 2.22422
R 6.11557 -7.66919 -2.72632 -2.52078 -0.663656 2.21761 1.5859 6.05883 -7.47931 6.15 -7.6 1.81742 2.72613 14.0121
R 6.36637 -7.47231 -2.74526 -2.58101 -0.650547 2.32509 1.45959 6.3458 -7.29797 6.4 -7.35 2.7248 1.81653 0.366596
R 6.6259 -7.28344 -2.76193 -2.62889 -0.620105 2.40681 1.35483 6.61964 -7.17343 6.65 -7.2 1.81957 0.909789 0.388196
R 6.88275 -7.11847 -2.76186 -2.67226 -0.587047 2.46322 1.24917 6.91901 -7.04822 6.9 -7.05 2.72946 1.81964 0.661465
R 7.12339 -6.91944 -2.75256 -2.65688 -0.473055 2.43548 1.28258 7.12574 -6.81829 7.15 -6.85 1.81554 1.81554 9.33692
R 7.35696 -6.71715 -2.74144 -2.63342 -0.368814 2.39502 1.33393 7.30586 -6.61905 7.35 -6.65 1.82007 1.82007 7.33065
R 7.629 -6.56687 -2.75538 -2.68402 -0.392369 2.47193 1.21724 7.59546 -6.48319 7.6 -6.5 2.72628 0.908764 1.22785
R 7.88179 -6.40675 -2.74545 -2.6894 -0.340282 2.46951 1.19959 7.8486 -6.40709 7.85 -6.4 1.81656 0.908285 1.9719
R 8.13233 -6.26217 -2.73025 -2.7007 -0.304423 2.46916 1.16512 8.05398 -6.23227 8.1 -6.25 2.73006 1.82003 1.11768
R 8.39587 -6.11585 -2.73074 -2.71666 -0.284954 2.48789 1.12576 8.32369 -5.98136 8.4 -6.05 2.72692 1.81795 0.36695
R 8.64438 -5.92998 -2.72423 -2.67935 -0.192112 2.43833 1.21489 8.63806 -5.84559 8.65 -5.85 1.81844 1.81844 6.02105
R 8.9104 -5.76942 -2.73269 -2.68191 -0.173026 2.44901 1.21241 9.00947 -5.71389 8.9 -5.65 2.72464 1.81643 0.16805
R 9.15698 -5.5647 -2.73465 -2.63313 -0.0863831 2.38869 1.33133 9.13937 -5.44533 9.15 -5.45 1.81916 1.81918 4.25378
R 9.39822 -5.36579 -2.73715 -2.60071 -0.0397855 2.34642 1.40935 9.26221 -5.25032 9.35 -5.3 1.81895 0.909466 0.925183
R 9.61006 -5.15739 -2.71425 -2.52987 0.0669424 2.22205 1.55873 9.71784 -5.19928 9.55 -5.1 1.81986 2.72981 9.48878
R 9.81863 -4.93116 -2.70911 -2.47081 0.129054 2.12215 1.68398 9.67182 -4.82288 9.75 -4.85 1.81736 1.81735 1.95738
R 10.0217 -4.70269 -2.70571 -2.41445 0.180726 2.02137 1.79859 9.94653 -4.64316 9.95 -4.65 1.81806 1.81805 1.29305
R 10.2678 -4.52561 -2.71636 -2.44152 0.108003 2.07745 1.75008 10.4074 -4.52899 10.2 -4.45 2.72609 1.8174 5.24944
R 10.4239 -4.2494 -2.70155 -2.31638 0.264893 1.83273 1.98481 10.3837 -4.17982 10.4 -4.2 0.908945 2.72688 18.3359
R 10.621 -4.02263 -2.70504 -2.29586 0.244084 1.79394 2.0246 10.5531 -3.94939 10.55 -3.95 1.81903 1.81904 0.917741
R 10.7924 -3.77308 -2.7052 -2.2489 0.262643 1.69702 2.10671 10.6157 -3.63689 10.75 -3.7 1.81931 2.72896 0.579684
R 10.9272 -3.49698 -2.7123 -2.16263 0.336879 1.51315 2.25099 10.8149 -3.35552 10.9 -3.4 0.999745 2.99928 3.26671
R 11.0945 -3.24987 -2.72405 -2.15602 0.280546 1.50472 2.27073 10.9078 -3.10703 11.05 -3.15 1.81764 1.81765 4.89133
R 11.2176 -2.96288 -2.72656 -2.07656 0.344178 1.32095 2.3852 11.1507 -2.87978 11.2 -2.9 0.909657 2.72896 2.52294
R 11.2306 -2.62182 -2.73022 -1.87405 0.581305 0.815328 2.60564 11.2614 -2.58697 11.2 -2.6 -0.998725 2.99617 41.9155
R 11.3039 -2.34655 -2.72484 -1.82697 0.551668 0.690421 2.63592 11.2346 -2.28949 11.2 -2.3 0.908813 2.72643 1.43766
R 11.3688 -2.05413 -2.72085 -1.78195 0.518937 0.570252 2.66042 11.1623 -1.9797 11.3 -2 0.909855 2.72956 2.16256
|
4ea43fee62180182dfd87352c1e293c931f1ee50
|
cf93c2cbb6d4889a71416a5b45b27d9ac1fae9e6
|
/cachematrix.R
|
92fdabe39df4b3f3cf44087cd4391d96298c64b2
|
[] |
no_license
|
singh-manish/ProgrammingAssignment2
|
1eb059465a3a0e3df4b4ba0819a81e3745583e3c
|
fc0609c6c62261c9466e447cebbd8c0626ef665e
|
refs/heads/master
| 2021-01-18T15:01:52.282874
| 2014-12-22T09:39:50
| 2014-12-22T09:39:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,693
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than computing it repeatedly
## These functions are pair of functions that cache the inverse of a matrix.
##
## Author : manish singh
## Date : 22 Dec 2014
##
## Following functions are available :
##
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then cacheSolve should retrieve the inverse from the cache.
## Computing the inverse of a square matrix can be done with the solve function in R. For example, if X is a square invertible matrix, then solve(X) returns its inverse.
##
## Below functions assumes that the matrix supplied is always invertible and square matrix.
##
## The function, makeCacheMatrix creates a special "vector", which is really a list containing a function to
## set the value of the vector
## get the value of the vector
## set the inverse matrix
## get the inverse matrix
##
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NA
set <- function(givenMatrix) {
x <<- givenMatrix
inverseMatrix <<- NA
}
get <- function() x
setinverse <- function(solveOfx) inverseMatrix <<- solveOfx
getinverse <- function() inverseMatrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## end of function makeCacheMatrix
##
## Compare two matrix and return true if both are equal
##
matrixCompare <- function(x,y) {
## Return True if matrix are equal in dimension as well as values
if ( (dim(x) == dim(y)) &&
all(x == y) ) {
return(TRUE)
}
## return false if above condition is not met
return(FALSE)
}
## end of matrixCompare
##
## The following function calculates the inverse of the special "vector" created with the above function.
## However, it first checks to see if the inverse has already been calculated. If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the given matrix and sets the inverse matrix in the cache via the setinverse function.
##
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseOfx <- x$getinverse()
if(!is.na(inverseOfx)) {
message("getting cached data")
return(inverseOfx)
}
data <- x$get()
inverseOfx <- solve(data, ...)
x$setinverse(inverseOfx)
inverseOfx
}
## end of function cacheSolve
|
0aae0e3bfc2394b0a4f8881416c4b02ce8cec732
|
77f1b2b22ae46e2785d9bd57b8d18ff7e51242b5
|
/WebGUI/CPTMLTools/EECalc/eeCalculate.R
|
4afd4dd5174dfdddf62f1130e2f5147d3f3449e3
|
[
"BSD-2-Clause"
] |
permissive
|
muntisa/RMarkovTI
|
6e4c93797a217bfe1253a539daf61010c1bb6445
|
61dc489de1b22b8889a4ce91516cdaae8e0b9cab
|
refs/heads/master
| 2021-01-01T04:46:56.066730
| 2018-04-04T14:47:14
| 2018-04-04T14:47:14
| 97,241,653
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,674
|
r
|
eeCalculate.R
|
library(ChemmineR)
library(base)
library(expm)
library(MASS)
library(openxlsx)
source("F:/CPTMLTools/EECalc/RMarkovTI_functions_VPCR.R")
args <- commandArgs(TRUE)
fileInput = args[1]
resultFile = args[2]
minTime = as.numeric(args[3])
maxTime = as.numeric(args[4])
stepTime = as.numeric(args[5])
minTemp = as.numeric(args[6])
maxTemp = as.numeric(args[7])
stepTemp = as.numeric(args[8])
minLoad = as.numeric(args[9])
maxLoad = as.numeric(args[10])
stepLoad = as.numeric(args[11])
quiral = as.numeric(args[12])
refData = read.xlsx("F:/CPTMLTools/EECalc/RefRecctions.xlsx", sheet = "All")
inputData=read.table(fileInput,header=T, sep=",")
finalDf <- data.frame(matrix(ncol = 5, nrow = 0))
colnames(finalDf) <- c("Reacction","*ee(%)[R]", "Time", "Temp", "Load")
print(inputData[2,6])
print(inputData[2,7])
for(i in 1:nrow(inputData)){
# Call functions to calculate individual descriptors
subs_Vvdw_All <- calculateDescriptor("Zv,EA,aPolar,SAe","All", inputData[i,3])
prod_EA_Csat <- calculateDescriptor("Zv,aPolar,Vvdw,SAe","Csat", inputData[i,5])
prod_aPolar_HetNox <- calculateDescriptor("Zv,EA,Vvdw,SAe","HetNoX", inputData[i,5])
cat_Zv_Cuns <- calculateDescriptor("EA,aPolar,Vvdw,SAe","Cuns", inputData[i,7])
cat_Sae_HetNox <- calculateDescriptor("Zv,EA,aPolar,Vvdw","HetNoX", inputData[i,7])
cat_aPolar_Cuns <- calculateDescriptor("Zv,EA,Vvdw,SAe","Cuns", inputData[i,7])
cat_EA_HetNox <- calculateDescriptor("Zv,aPolar,Vvdw,SAe","HetNoX", inputData[i,7])
solv_Zv_Cuns <- calculateDescriptor("EA,aPolar,Vvdw,SAe","Cuns", inputData[i,9])
nuc_SAe_Het <- calculateDescriptor("Zv,EA,aPolar,Vvdw","Het", inputData[i,11])
minDist = sqrt((cat_Zv_Cuns - refData[r2,"Zv_Cuns_Cat_Mean_ref"])^2
+ (prod_EA_Csat - refData[r2,"EA_Csat_Prod_Mean_ref"])^2
+ (solv_Zv_Cuns - refData[r2,"Zv_Cuns_Solv_Mean_ref"])^2
+ (nuc_SAe_Het - refData[r2,"SAe_Het_Nuc_Mean_ref"])^2
+ (cat_Sae_HetNox - refData[r2,"SAe_HetNoX_Cat_Mean_ref"])^2
+ (cat_aPolar_Cuns - refData[r2,"aPolar_Cuns_Cat_Mean_ref"])^2
+ (cat_EA_HetNox - refData[r2,"EA_HetNoX_Cat_Mean_ref"])^2
+ (prod_aPolar_HetNox - refData[r2,"aPolar_HetNoX_Prod_Mean_ref"])^2
+ (subs_Vvdw_All - refData[r2,"Vvdw_All_Sub_Mean_ref"])^2)
reaRef = 1
for(r in 1:nrow(refData)){
dist <- sqrt((cat_Zv_Cuns - refData[r2,"Zv_Cuns_Cat_Mean_ref"])^2
+ (prod_EA_Csat - refData[r2,"EA_Csat_Prod_Mean_ref"])^2
+ (solv_Zv_Cuns - refData[r2,"Zv_Cuns_Solv_Mean_ref"])^2
+ (nuc_SAe_Het - refData[r2,"SAe_Het_Nuc_Mean_ref"])^2
+ (cat_Sae_HetNox - refData[r2,"SAe_HetNoX_Cat_Mean_ref"])^2
+ (cat_aPolar_Cuns - refData[r2,"aPolar_Cuns_Cat_Mean_ref"])^2
+ (cat_EA_HetNox - refData[r2,"EA_HetNoX_Cat_Mean_ref"])^2
+ (prod_aPolar_HetNox - refData[r2,"aPolar_HetNoX_Prod_Mean_ref"])^2
+ (subs_Vvdw_All - refData[r2,"Vvdw_All_Sub_Mean_ref"])^2)
if (minDist > dist)
{
minDist = dist
reaRef = r
}
}
# Compare values for each file from ref datasource
for(time in seq(from=minTime, to=maxTime, by=stepTime)){
for(temp in seq(from=minTemp, to=maxTemp, by=stepTemp)){
for(load in seq(from=minLoad, to=maxLoad, by=stepLoad)){
eeqq <- -0.914038918667643 + refData[reaRef,"ee_ref"] - 0.821032133512764 * (load-minDfProp[reaRef,"Load"])
- 0.343919121414324 * (temp-minDfProp[reaRef,"Temp"])
+ 0.211791266990752 * (time-minDfProp[reaRef,"Time"])
+ 22.0406704292748 * (cat_Zv_Cuns - refData[reaRef,"Zv_Cuns_Cat_Mean_ref"])
- 215.982019256065 * (prod_EA_Csat - refData[reaRef,"EA_Csat_Prod_Mean_ref"])
- 12.4578151202493 * (solv_Zv_Cuns - refData[reaRef,"Zv_Cuns_Solv_Mean_ref"])
- 42.4863067259439 * (nuc_SAe_Het - refData[reaRef,"SAe_Het_Nuc_Mean_ref"])
+ 750.757360483937 * (cat_Sae_HetNox - refData[reaRef,"SAe_HetNoX_Cat_Mean_ref"])
- 174.368536901798 * (cat_aPolar_Cuns - refData[reaRef,"aPolar_Cuns_Cat_Mean_ref"])
- 1747.11691314115 * (cat_EA_HetNox - refData[reaRef,"EA_HetNoX_Cat_Mean_ref"])
- 1534.17019704508 * (prod_aPolar_HetNox - refData[reaRef,"aPolar_HetNoX_Prod_Mean_ref"])
- 34.1870137382133 * (subs_Vvdw_All - refData[reaRef,"Vvdw_All_Sub_Mean_ref"])
finalDf[nrow(finalDf) + 1,] = c(as.character(inputData[i,1]), round(AVGee, digits=1), time, temp, load)
}
}
}
}
write.table(finalDf, resultFile, sep=";", row.names=FALSE, quote = TRUE)
|
cab999b8b21bd7c1ba0a3acd1908fdaf9d02a978
|
c686e4be025390cf6b9bc2991ba2c34c644bc3cc
|
/GLM_01.R
|
dad3253fa2d2b77e19474a49eb18d22ce9928772
|
[] |
no_license
|
gargass/GLM
|
5b6a035d880f0ddffefacd37c460c67b5655df80
|
9c43d0dd67c5ba3e7e6fa0f64330b673cfaa96d1
|
refs/heads/master
| 2021-01-10T11:14:47.051102
| 2016-01-25T18:44:16
| 2016-01-25T18:44:16
| 50,208,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,739
|
r
|
GLM_01.R
|
x<-c(1,2,3,4,5,6)
y<-c(1,1,1,0,0,0)
model<-glm(y~x, family="binomial")
model
summary(model)
model$iter
n<-25
n<-c(25, 50)
y<-c(10, 20)
curve(x, x^y[1]*(1-x)^(n[1]-y[1])*x^y[2]*(1-x)^(n[2]-y[2]))
logL<-function(p, n, y){
choose(n, y)+y*log(p)+(n-y)*log(1-p)
}
p<-seq(0,1,length=100)
logL(p, n[1], y[1])
plot(logL(p, n[1], y[1]))
lines(logL(p, n[2], y[2]))
curve(logL(x, n[2], y[2])-logL(y[2]/n[2],n[2], y[2]))
curve(logL(x, n[1], y[1])-logL(y[1]/n[1], n[1], y[1]), add=T, col="red")
?nlm
f<-function(x) -logL(x, n[1], y[1])
nlm<-nlm(f, p=0.2, hessian=T)
hessian<-nlm$hessian
1/hessian
n[1]*nlm$estimate*(1-nlm$estimate)
data(bliss)
conc<-c(0,1,2,3,4)
dead<-c(2,8,15,23,27)
number<-c(30,30,30,30,30)
alive<-number-dead
bliss<-cbind(conc, dead, number, alive)
bliss<-as.data.frame(bliss)
y<-cbind(bliss$dead, bliss$alive)
model<-glm(y~bliss$conc, family="binomial")
model
summary(model)
bliss2<-matrix(0,nrow=150, ncol=2)
bliss2[1:2,]<-c(0,1,0)
bliss2[3:30,]<-c(0,0,1)
bliss2[31:38,]<-c(1,1,0)
bliss2[39:60,]<-c(1,0,1)
bliss2[61:75,]<-c(2,1,0)
bliss2[76:90,]<-c(2,0,1)
bliss2[91:113,]<-c(3,1,0)
bliss2[114:120,]<-c(3,0,1)
bliss2[121:147,]<-c(4,1,0)
bliss2[148:150,]<-c(4,0,1)
bliss2<-matrix(0,nrow=150, ncol=2)
k<-1
for(i in 1:nrow(bliss)){
bliss2[k:(30*(k+1)-1), 1]<-bliss[i,1]
bliss2[k:(30*(k+1)-1), 2]<-0
for(j in 1:bliss[i,2]){
bliss2[k, 1]<-bliss[i,1]
bliss2[k,2]<-1
k<-k+1
}
bliss2[k, 1]<-bliss[i,1]
bliss2[k,2]<-0
k<-k+1
}
b_old<-c(0,0)
pi<-bliss[,4]/30
pi
W<-diag(pi*(1-pi))
W
logit<-function(x){
log(x/(1-x))
}
ilogit<-function(x){
exp(x)/(1+exp(x))
}
logit(1/2)
X<-cbind(c(1,1,1,1,1), bliss$alive)
z<-logit(pi)+
z<-X%*%b_old+solve(W)*(y-pi)
lm(conc~z, weights = pi*(1-pi))
|
d3c78e28582ae8ed9a042bbf4fec41d0383e39b4
|
d94071bc2582fc209e4974ac0cd93a7a9227982f
|
/cachematrix.R
|
f4d9414abb7b4c5745aa6c21277594b567f2bc69
|
[] |
no_license
|
Skrie/ProgrammingAssignment2
|
4832010a4e68b3259624a2295c6179d7ce12af2a
|
e53058ee1a3d57c00249d7d70667570ccad6f608
|
refs/heads/master
| 2021-01-13T04:14:42.779007
| 2016-12-28T20:33:32
| 2016-12-28T20:33:32
| 77,485,411
| 0
| 0
| null | 2016-12-27T22:08:35
| 2016-12-27T22:08:34
| null |
UTF-8
|
R
| false
| false
| 2,251
|
r
|
cachematrix.R
|
## The functions makeCacheMatrix and cacheSolve are used to create a matrix, calcualte the inverse of that matrix
## and then caches the inverse of that matrix. makeCacheMatrix provides a list of functions that can be used to get
## and set a cached matrix and its inverse. cacheSolve retrieves and returns an inverse matrix, if the inverse matrix has
## been cached by the makeCacheMatrix function, or calculates and caches a new inverse matrix if one has not been cached by
## the makeCacheMatrix already.
## makeCacheMatrix receives a matrix as an argument and caches the matrix in the variable x. The function provides a get and set
## method to retrieve and cache the matrix. The function also provides 2 additional functions to get and set a cached inverse of
## that matrix stored in the variable m. The inverse matrix is calculated in a seperate function named cacheSolve. makeCacheMatrix
## makes available the get and set functions for the cached matrix and its cached inverse through a list.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverseMatrix <- function(matrix) m <<- matrix
getInverseMatrix <- function() m
list(set = set, get = get,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix)
}
## cacheSolve accepts a variable x as an argument, variable x has had the function makeCacheMatrix assigned to it. cacheSolve
## the retrieves an inverse matrix from x and assigns the value to the variable m. If m is not null then cacheSolve retrieves
## and returns the cached inverse matrix assigned to m. If m is null then cacheSolve retrieves the cached matrix stored in x,
## calculates the inverse of that matrix and stores the result in the variable m, where the inverse matrix will be cached for
## future use, and returns the inverse matrix.
cacheSolve <- function(x) {
m <- x$getInverseMatrix()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverseMatrix(m)
m
}
|
d687071ad812dfe2ba9a797b84999dbdaac74c99
|
6feee0acceea5634d6d3f87a4a49f02ac8aebe99
|
/simulations/popFunctions.R
|
4725f52209f9a3c8202638a867dfdf937e940ff4
|
[
"MIT"
] |
permissive
|
sjpeacock/OsterBou-pop
|
a54879c619af15dbebd8f0ef71aaae98fafbcbc4
|
52ae2fe33b13026c7dae3750c3750b9940099449
|
refs/heads/master
| 2023-04-10T10:34:04.961913
| 2022-06-13T03:05:49
| 2022-06-13T03:05:49
| 502,785,749
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,704
|
r
|
popFunctions.R
|
###############################################################################################
# Scenario for proportion inhibiting
###############################################################################################
pArrestCalc <- function(scenario){
if(scenario == 4){
pIn <- c(rep(1, 109), 1/(1 + exp(-0.08*(c(110:365) - 172))))
} else {
pIn <- rep(c(1, 0.5, 0)[p], 365)
}
return(pIn)
}
###############################################################################################
# Time derivative of the PDE system
###############################################################################################
# Input variables:
# y: matrix with columns equal to the spatial grid and rows equal to the different variables:
# calf_stat
# calf_mov
# yearling_stat
# yearling_mov
# adult
# developingL4
# arrestedL4
# P (adult worm)
# L0 (free-living pre-infective larvae)
# L3 (free-living infective larvae)
# V_developingL4
# V_arrestedL4
# V_P
# params: vector of parameters, including:
# muC - mortality rate of calves per day
# muY - mortality rate of yearlings per day
# muA - mortality rate of adults per day
# alpha - per-parasite rate of parasite-induced mortality of adults
# beta - intake rate of parasites
# (*constant for now but may consider seasonal variation as with
# fecal output)
# ppnInhibit - the proportion of larvae that go into arrested development (0-1)
# rhoL4 - development rate of L4 larvae to adults (per day)
# muP - mortality of adult parasites per day
# *lambda* - time-varying rate of egg output per adult parasite
# gamma - density dependence of parasite fecundity (-0.49)
# *muL0* - mortality rate (per day) of pre-infective larvae
# *rho0* - development rate (per day) of pre-infectives to infectives
# *muL3* - mortality
# * parameters that will vary over time (temperature, etc.)
# #------------------------------------------------------------------------------
partial_t.Bou <- function(y, p){
nx <- dim(y)[2]
dy <- array(0, dim = dim(y), dimnames = dimnames(y))
#-----------------------------------------------------------------------------
# If there are moving adults, then stopping can be non-zero
adult_mov.nonzero <- which(y['adult_mov', ] > 0)
adult_stat.nonzero <- which(y['adult_stat', ] > 0)
#*****************************************************************************
# 1) Number of stationary and moving calves
dy['calf_stat', ] <- - (p$muC + p$start) * y['calf_stat', ] + p$stop * y['calf_mov', ]
dy['calf_mov', ] <- - (p$muC + p$stop) * y['calf_mov', ] + p$start * y['calf_stat', ]
#*****************************************************************************
# 2) Number of yearlings
dy['yearling_stat', ] <- - (p$muY + p$start) * y['yearling_stat', ] + p$stop * y['yearling_mov', ]
dy['yearling_mov', ] <- - (p$muY + p$stop) * y['yearling_mov', ] + p$start * y['yearling_stat', ]
#*****************************************************************************
# 3) Number of adults
dy['adult_stat', ] <- - (p$muA + p$start) * y['adult_stat', ] + p$stop * y['adult_mov', ]
dy['adult_mov', ] <- - (p$muA + p$stop) * y['adult_mov', ] + p$start * y['adult_stat', ]
#*****************************************************************************
# 4) Arrested larvae
dy['L4A_stat', ] <- p$beta * p$ppnInhibit * y['L3', ] * y['adult_stat', ] - (p$muA + p$mu4 + p$start) * y['L4A_stat', ] + p$stop * y['L4A_mov', ]
dy['L4A_mov', ] <- p$beta * p$ppnInhibit * y['L3', ] * y['adult_mov', ] - (p$muA + p$mu4 + p$stop) * y['L4A_mov', ] + p$start * y['L4A_stat', ]
#*****************************************************************************
# 5) Developing larvae
dy['L4_stat', ] <- p$beta * (1 - p$ppnInhibit) * y['L3', ] * y['adult_stat', ] - (p$muA + p$mu4 + p$rho4 + p$start) * y['L4_stat', ] + p$stop * y['L4_mov', ]
dy['L4_mov', ] <- p$beta * (1 - p$ppnInhibit) * y['L3', ] * y['adult_mov', ] - (p$muA + p$mu4 + p$rho4 + p$stop) * y['L4_mov', ] + p$start * y['L4_stat', ]
#*****************************************************************************
# 6) Adult parasites
PMort_stat <- rep(0, nx)
PMort_stat[adult_stat.nonzero] <- y['P_stat', adult_stat.nonzero] / y['adult_stat', adult_stat.nonzero] * (p$k + 1)/p$k + 1
dy['P_stat', ] <- p$rho4 * y['L4_stat', ] - (p$muA + p$muP + (p$nuP * PMort_stat) + p$start) * y['P_stat', ] + p$stop * y['P_mov', ]
PMort_mov <- rep(0, nx)
PMort_mov[adult_mov.nonzero] <- y['P_mov', adult_mov.nonzero] / y['adult_mov', adult_mov.nonzero] * (p$k + 1)/p$k + 1
dy['P_mov', ] <- p$rho4 * y['L4_mov', ] - (p$muA + p$muP + (p$nuP * PMort_mov) + p$stop) * y['P_mov', ] + p$start * y['P_stat', ]
#*****************************************************************************
# 7) Larvae
dy['L0', ] <- p$lambda * (y['adult_stat', ] * y['P_stat', ]^(1 + p$gamma) + y['adult_mov', ] * y['P_mov', ]^(1 + p$gamma)) - (p$mu0 + p$rho0) * y['L0', ]
dy['L3', ] <- p$rho0 * y['L0', ] - p$mu3 * y['L3', ] - p$beta * y['L3', ]* (y['adult_stat', ] + y['adult_mov', ])
#*****************************************************************************
# 8) Uptake
dy['L_uptake', ] <- p$beta * y['L3', ] * (y['adult_stat', ] + y['adult_mov', ]) / sum(y['adult_stat', ] + y['adult_mov', ])
#*****************************************************************************
if(sum(is.na(dy)) > 0) stop("\n\nNAs in derivative function.\n\n")
unique(which(is.na(dy) == TRUE, arr.ind = TRUE)[, 1])
return(dy)
} #end function
###############################################################################################
# Parasite egg output - lambda
###############################################################################################
# From Stien et al. 2002 Int J Parasit
predict.lambda <- function(DOY){
# Eggs per gram feces per worm, not accounting for density dependence
# Lambda
alpha1 <- 0.01
alpha2 <- 0.345
mu <- 0.52
sigma <- 0.087
lambda <- alpha1 + alpha2/(sigma*sqrt(2*pi)) * exp(-(DOY/365 - mu)^2/(2*sigma^2))
# Faeces production rate
muF <- 0.58 # peak plant biomass in august, 58% thorugh the year
alphaF1 <- 1300 # faecal production rate in winter (min) based on 1 kg dry matter per day
maxF <- 5400 # g faeces per day in summer
sigmaF <- 0.085
alphaF2 <- (maxF - alphaF1) * sigmaF *sqrt(2*pi)
fpr <- alphaF1 + alphaF2/(sigmaF*sqrt(2*pi)) * exp(-(DOY/365 - muF)^2/(2*sigmaF^2))
return(lambda * fpr)
}
###############################################################################################
# MTE predictions for free-living larvae params
###############################################################################################
predict.mu0 <- function(temp){
return(0.068 * exp(-0.884/(8.62*10^-5) * (1/(temp + 273.15) - 1/(15 + 273.15))) * (1 + exp(2.928/(8.62*10^-5)*(1/(temp+273.15) - 1 / (-3.377 + 273.15)))))
}
predict.rho0 <- function(temp){
return(0.032 * exp(-0.686/(8.62*10^-5) * (1/(temp + 273.15) - 1/(15 + 273.15))) * (1 + exp(7.957/ (8.62*10^-5)*(-1/(temp+273.15)+1/(30.568+273.15))))^(-1))
}
predict.mu3 <- function(temp){
0.0211*exp(-0.208/(8.62*10^-5)*(1/(temp +273.15)-1/(15+273.15)))*(1 + exp(3.409 / (8.62*10^-5)*(1/(temp + 273.15)-1/(-19.318 + 273.15))) + exp(3.5543/(8.62*10^-5)*(-1/(temp +273.15)+1/(27.6+273.15))))
}
###############################################################################################
# Parasite and host dependent fecundity
###############################################################################################
# Inputs:
# P_mean: mean parasite burden at conception (October, 222 daysprior to calving)
# numAdults: number of adult female caribou that survive to calving season
# pCalf0: fecundity (i.e., probability that female has a calf) in the absence of parasites
numCalves <- function(P_mean, numFemales, pCalf0){#, stoch = FALSE){
# Realtionship from Albon et al. 2002
return(numFemales * pCalf0 * (1 - 1/(1 + exp(7.025 - 0.000328 * P_mean))))
# # Relationship from stochastic fitting
# return(numFemales * pCalf0 * (1 - 3.190985e-08 * P_mean^1.68558 / (1 + 3.628053e-08 * P_mean^1.68558)))
}
# ###############################################################################################
# # Annual temperature cycle
# ###############################################################################################
#
# # Adjust tempDOY based on climate change scenario
# predict.temp <- function(temp, climateScenario = "current"){
# DOY <- c(1:365)
#
# if(climateScenario == "rcp26"){
# increase <- 2.2176 - 0.6511 * cos((DOY - 168.5002)* 2 * pi / 365)
# } else if(climateScenario == "rcp85"){
# increase <- 7.7867 - 3.1112 * cos((DOY - 179.0068)* 2 * pi / 365)
# } else {
# increase <- 0
# }
#
# temp + increase
#
# }
###############################################################################################
# Calculate parameters based on DOY
###############################################################################################
calcParams <- function(DOY, temp, ppnInhibit = 0, transmission = "base"){
if(transmission == "base") beta <- 10^-6 else if(transmission == "high") beta <- 10^-5 else if(transmission == "low") beta <- 10^-7 else beta <- as.numeric(transmission)
# Need to have parameters as a list because the parameters for stationary larvae will vary in space and time
params <- list(
# muC - mortality rate of calves per day
muC = (1 - 0.45)/365, #approx. initial parameter from Boulanger
# muC = (1 - 0.375)/365, #approx. initial parameter from Boulanger
# muY - mortality rate of yearlings per day
muY = (1 - 0.86)/365, # annual Sy = 0.86 from Boulanger et al. 2011
# muA - mortality rate of adults per day
muA = (1 - 0.86)/365, #approx. initial parameter from Boulanger
# muA = (1 - 0.78)/365, #approx. initial parameter from Bathurst range for 2009-2012
# alpha - per-parasite rate of parasite-induced mortality of adults
alpha = 0,
# rate of starting
start = startMat[DOY, ],
# rate of stopping
stop = stopMat[DOY, ],
# per-parasite rate of parasite-induced stopping
parasitStop = 0,
# beta - intake rate of parasites
# ********* Need to better resolve this. ****************************
# Also depends on dry matter intake and will vary thorughout the year?
# Assume constant for now. Grenfell et al. 1987 assumed three levels (0.0001, 0.001, 0.01)
# Seems common to do that so we may just need to look at sensitivity to this parameter.
beta = beta,
# ppnInhibit - the proportion of larvae that go into arrested development (0-1)
ppnInhibit = ppnInhibit,
# rho4 - development rate of L4 larvae to adults (per day)
# From Grenfell et al. 1987, development to adults can take 17 days.
rho4 = 0.06,
# mu4 - mortality of L4 larvae per day
mu4 = 0.002,
# muP - mortality of adult parasites per day
# Likely density-dependent, see Smith and Grenfell 1985 Parasit. Today.
# From Grenfell et al. 1987: mu5 = a + b * P where
# a = 0.1713 per day and b = 0.3082 * 10^-6 per worm per day
# plot(seq(1, 10^18, length.out = 100), 0.1713 + 0.3082 * 10^-6 * seq(1, 10^18, length.out = 100), "l")
# Likely insignificant over the ranges of parasites that we see, use mean
muP = 0.1713,
nuP = 0.3082e-6,
# *lambda* - time-varying rate of egg output per adult parasite
lambda = predict.lambda(DOY)*10^-2,
# gamma - density dependence of parasite fecundity (-0.49 Stien et al. 2002 Int J Parasit)
gamma = -0.49,
# *mu0* - mortality rate (per day) of pre-infective larvae
mu0 = predict.mu0(temp),
# *rho0* - development rate (per day) of pre-infectives to infectives
rho0 = predict.rho0(temp),
# *mu3* - mortality
# Estimated as constant, but apply in matrix to allow for changes.
mu3 = predict.mu3(temp),#rep(0.022, length(x)),
# Aggregation parameter
# Estimated from Bathurst data
k = 0.9940684
)
return(params)
}
###############################################################################################
# Function to set up initial distribution
###############################################################################################
# Initial spatial distribution
initDist <- function(totPop, x, x.start.sd = 80){
# Note the shift so that population always starts at x = 0
shift.x <- round(length(x)/2)
return(c(totPop / sqrt(2 * pi * (x.start.sd^2)) * exp(- (x - x[shift.x])^2 / (2 * x.start.sd^2)))[c(shift.x:length(x), 1:(shift.x - 1))])
}
###############################################################################################
# Function to simulate caribou dynamics within a season
###############################################################################################
simBou <- function(
initBou, # Initial conditions for all x for 14 variables
temp, # matrix of temperatures for each day and location (dimension 365 x 1135)
ppnInhibit, # the proportion of larvae undergoing arrested development, between 0 and 1
transmission = "base", # the transmission coefficient (beta) can be three levels: base, low, or high
migSpeed = 14, # migration speed of caribou in km/day (can be zero for simualtions without migration)
OctP = NULL # the adult parasite burden in October; if supplied then this affects the pregnancy rate of cows in year 1 of the simulation (for use when carrying on from previous sims)
){
# If only one value of ppnInhibit is supplied, then use that
# Otherwise use daily estimate in calcParams below
if(length(ppnInhibit) == 1) ppnInhibit <- rep(ppnInhibit, 365)
# Breeding date, when animals move up a class, is June 7 (DOY = 158)
breedDOY <- as.numeric(strftime(as.Date("1985-06-07"), "%j"))
# L4 resume development at the start of spring migration
# Hoar et al. 2012 show resumption in late March
L4startDOY <- as.numeric(strftime(as.Date("1985-04-20"), "%j"))
# Advection speed for each variable: number of grid spaces moved
u <- migSpeed * dt / dx
nt <- dim(timeDat)[1]
# # 1) Set up matrices to store solutions every d days
# d <- 1
# ntKeep <- floor(dim(timeDat)[1]/d)
# nKeep <- seq(1, nt, d)
V <- array(NA, dim = c(dim(initBou)[1], length(x), nt), dimnames = list(rownames(initBou), x, paste(timeDat$year, timeDat$time, sep="-")))
V[, , 1] <- initBou
V0 <- array(0, dim = c(dim(initBou)[1], length(x)), dimnames = list(rownames(initBou), x))
# 2) Run through each timestep
for(n in 1:(nt-1)){
# Set parameters based on DOY
params.n <- calcParams(
DOY = timeDat$DOY[n],
temp = temp[timeDat$DOY[n], ],
ppnInhibit = ppnInhibit[timeDat$DOY[n]],
transmission = transmission)
# Calculate boundary conditions: torus for circular migration
Vn <- V[, , n]
Vnp1 <- Vn
# Set L_uptake to zero, since we want to record the instantaneous rate and not the accumulation
Vnp1['L_uptake', ] <- 0
# Spatial advection (upstream differencing) for moving stages
if(u > 0){
for(j in match(c("calf_mov", "yearling_mov", "adult_mov", "L4_mov", "L4A_mov", "P_mov"), rownames(initBou))){
Vnp1[j, ] <- Vn[j, c(c((length(x) - u + 1) : length(x)), 1:(length(x) - u))]
}
}
#---------------------------------------------------------------------------
# If breeding date, move caribou up and add calves
# *** we're going to get weird things happening if the population doesn't mix...
# ONLY stationary cows have calves, based on average parasite abundance previous October among all
if(round(timeDat$time[n], 2) == round(breedDOY, 2)){
# cat("breeding")
Vn.breed <- Vnp1
if((n - 240/dt) < 0){ # for the first year of the simulation
if(length(OctP) == 0){
P_mean <- 0 # If not supplied, assume zero parasite burden
} else {
P_mean <- OctP
}
} else { # for next years
# adult_stat.nonzero <- which(V['adult_stat', , n - 240/dt] > 0)
# adult_mov.nonzero <- which(V['adult_mov', , n - 240/dt] > 0)
#
# # Mean parasite burden across all hosts = #parasites/#hosts
# P_mean <- sum(c(V['P_stat', adult_stat.nonzero, n - 240/dt], V['P_mov', adult_mov.nonzero, n - 240/dt]))/sum(c(V['adult_stat', adult_stat.nonzero, n - 240/dt], V['adult_mov', adult_mov.nonzero, n - 240/dt]))
P_mean <- sum(c(V['P_stat', , n - 240/dt], V['P_mov', , n - 240/dt]))/sum(c(V['adult_stat', , n - 240/dt], V['adult_mov', , n - 240/dt]))
}
newCalves <- numCalves(
P_mean = P_mean,
numFemales = (V['adult_stat', , n] + V['adult_mov', , n]) * propFemale,
pCalf0 = 0.8)
# # All calves start out in stat category
Vn.breed['calf_stat', ] <- newCalves
Vn.breed['calf_mov', ] <- rep(0, length(x))
# Yearlings and adults stay in respective categories
Vn.breed['yearling_stat', ] <- Vnp1['calf_stat', ]
Vn.breed['yearling_mov', ] <- Vnp1['calf_mov', ]
Vn.breed['adult_stat', ] <- Vnp1['adult_stat', ] + Vnp1['yearling_stat', ]
Vn.breed['adult_mov', ] <- Vnp1['adult_mov', ] + Vnp1['yearling_mov', ]
# Replace Vnp1 with updated matrix
Vnp1 <- Vn.breed
} # end if breed
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# If start of spring migration, L4 resume development
if(round(timeDat$time[n], 2) == round(L4startDOY, 2)){
# cat("L4 development resuming")
Vn.start <- Vnp1
Vn.start['L4_stat', ] <- Vnp1['L4_stat', ] + Vnp1['L4A_stat', ]
Vn.start['L4A_stat', ] <- rep(0, length(x))
Vn.start['L4_mov', ] <- Vnp1['L4_mov', ] + Vnp1['L4A_mov', ]
Vn.start['L4A_mov', ] <- rep(0, length(x))
Vnp1 <- Vn.start
}
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Temporal dynamics (Euler's formula)
k1 <- partial_t.Bou(y = Vnp1, p = params.n)
V[, , n + 1] <- pmax(V0, Vnp1 + dt * k1)
# # Temporal dynamics (4th order Runge Kutta)
# k1 <- partial_t(Vnp1, params)
# k2 <- partial_t(Vnp1 + grid$dt / 2 * k1, params)
# k3 <- partial_t(Vnp1 + grid$dt / 2 * k2, params)
# k4 <- partial_t(Vnp1 + grid$dt * k3, params)
#
# V[, , n + 1] <- Vnp1 + grid$dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)#, 0) # Added in max to avoid negative values (Mar 14, 2019)
# if(is.element(n, nKeep) == TRUE) V[, , n] <- Vn
# if(timeDat$DOY[n] == 365 & timeDat$time[n] == timeDat$DOY[n]) cat(paste("Year", timeDat$year[n], "complete\n"))
} #end all timesteps n
return(V)
}
###############################################################################
# Plot output
###############################################################################
plot.timestep <- function(V, n, Nrange = NA, Prange = NA, Lrange = NA){
# Nrange <- range(V[c('adult_mov', "adult_stat"), , ]
if(is.na(Nrange[1]) == TRUE) Nrange <- range(V[c('adult_mov', "adult_stat"), , n])
if(is.na(Prange[1]) == TRUE) Prange <- range(V[c('P_mov', "P_stat"), , n])
if(is.na(Lrange[1]) == TRUE) Lrange <- range(V[c('L0', "L3"), , n])
par(mfrow = c(3,1), mar = c(2,5,1,4), oma = c(2, 0, 2, 0))
plot(x, V['adult_mov', , n], "l", ylim = Nrange, bty = "l", xaxt="n", yaxt = "n", ylab = "", lwd = 1.5)
axis(side = 1, labels = FALSE)
yTick <- pretty(Nrange)
axis(side = 2, las = 1, at = yTick, labels = yTick/1000)
lines(x, V['adult_stat', , n], lty = 3, lwd = 1.5)
lines(x, V['calf_mov', , n], col = seasonCols['Calving'])
lines(x, V['calf_stat', , n], lty = 3, col = seasonCols['Calving'])
lines(x, V['yearling_mov', , n], col = seasonCols['Fall'])
lines(x, V['yearling_stat', , n], lty = 3, col = seasonCols['Fall'])
mtext(side = 3, adj = 0, "a) Host population density (1000s per km)")
plot(x, V['P_mov', , n], "l", ylim = Prange, bty = "l", ylab = "", lwd = 1.5, las = 1, xaxt="n")
axis(side = 1, labels = FALSE)
lines(x, V['P_stat', , n], lty = 3, lwd = 1.5)
mtext(side = 3, adj = 0, "b) Mean parasite burden per host")
plot(x, V['L3', , n], "l", ylim = Lrange, bty = "l", ylab = "", lwd = 1.5, las = 1)#, yaxt="n")
# yTick <- pretty(range(V[c('L0', "L3"), , ], na.rm = TRUE))
# axis(side = 2, las = 1, at = yTick, labels = yTick*10^-10)
lines(x, V['L0', , n], lwd = 1.5, col = seasonCols['Winter'])
mtext(side = 3, adj = 0, "c) Density of free-living parasite larvae (per km)")
D <- as.Date(paste((1984 + timeDat$year[n]), timeDat$DOY[n], sep="-"), format = "%Y-%j")
mtext(side = 3, adj = 1, line = -1, outer = TRUE, paste("Year ", timeDat$year[n], "\n", strftime(D, format = "%b %d"), "\n timestep ", n, sep =""))
}
|
1bf8f9300621356835cc799eafb94d67f22328cc
|
5e94b522bd93166db71ffe6b214edb3daa706908
|
/Materiell fra tidl semestre/scripts/Seminar2.R
|
762abab1f6f97d789fae051e2dec5ca6ebe15ce2
|
[] |
no_license
|
liserodland/stv4020aR
|
c3411a6a61f0e1dcdab1f6d7577ae707b86100d0
|
78cd1fade56c05c55cc86dd9adcb72f77a9884d3
|
refs/heads/master
| 2023-08-05T04:26:54.567357
| 2021-09-10T10:42:10
| 2021-09-10T10:42:10
| 288,154,313
| 0
| 7
| null | 2020-08-17T10:47:59
| 2020-08-17T10:47:58
| null |
UTF-8
|
R
| false
| false
| 4,379
|
r
|
Seminar2.R
|
#################################
#### R seminar 2 ####
#### STV 4020A ####
#################################
## I dette seminaret skal vi gå gjennom:
## 1. organisering av R-script
## 2. Import av data
## 3. regresjonsanalyse
## Hovedfokus blir på arbeid med regresjon
## Fjerner objekter fra R:
rm(list=ls())
## Setter working directory - trengs ikke dersom du jobber fra et prosjekt
#setwd("C:/Users/Navn/R/der/du/vil/jobbe/fra")
## Installerer pakker (fjerne '#' og kjør dersom en pakke ikke er installert)
# install.packages("tidyverse")
# install.packages("moments")
# install.packages("stargazer")
# install.packages("xtable")
# install.packages("texreg")
#### Laster inn pakker:
library(tidyverse)
library(moments)
library(stargazer)
library(xtable)
library(texreg)
#### Overskrift 1 #####
## Kort om hva jeg skal gjøre/produsere i seksjonen
2+2 # her starter jeg å kode
### Flere tips:
# 1. Start en ny seksjon med en kommentar der du skriver hva du skal produsere i seksjonen,
# forsøk å bryte oppgaven ned i så mange små steg som du klarer. Dette gjør det ofte lettere
# å finne en fremgangsmåte som fungerer.
#2 . Test at ny kode fungerer hele tiden, fjern den koden som ikke trengs til å løse oppgavene
# vil løse med scriptet ditt (skriv gjerne i et eget R-script du bruker som kladdeark dersom du
# famler i blinde). Forsøk å kjøre gjennom større segmenter av koden en gang i blant.
### Denne organiseringen hjelper deg og andre med å finne frem i scriptet ditt,
### samt å oppdage feil.
##############################
#### Lineær regresjon OLS ####
##############################
### Syntaks
#For å kjøre en lineær regresjon i R, bruker vi funksjonen `lm()`, som har følgende syntaks:
#lm(avhengig.variabel ~ uavhengig.variabel, data=mitt_datasett)
# på mac får du ~ med alt + k + space
# La oss se på et eksempel med `aid` datasettet vi har brukt så langt:
aid <- read_csv("https://raw.githubusercontent.com/langoergen/stv4020aR/master/data/aid.csv")
# Oppretter variablene policy og region på nytt, samme kode som i seminar 1:
aid <- aid %>% # samme kode som over, men nå overskriver jeg data slik at variabelen legges til - gjør dette etter at du har testet at koden fungerte
mutate(policy = elrsacw + elrinfl + elrbb,
policy2 = elrsacw*elrinfl*elrbb,
region = ifelse(elrssa == 1, "Sub-Saharan Africa",
ifelse(elrcentam == 1, "Central America",
ifelse(elreasia == 1, "East Asia", "Other"))))
m1 <- lm(elrgdpg ~ elraid, data = aid) # lagrer m1 om objekt
summary(m1) # ser på resultatene med summary()
class(m1) # Legg merke til at vi har et objekt av en ny klasse!
str(m1) # Gir oss informasjon om hva objektet inneholder.
### Multivariat regresjon
# Vi legger inn flere uavhengige variabler med `+`.
summary(m2 <- lm(elrgdpg ~ elraid + policy + region, data = aid))
# Her kombinerer vi summary() og opprettelse av modellobjekt på samme linje
### Samspill
#Vi legger inn samspill ved å sett `*` mellom to variabler. De individuelle
#regresjonskoeffisientene til variablene vi spesifisere samspill mellom blir automatisk
#lagt til.
summary(m3 <- lm(elrgdpg ~ elraid*policy + region, data = aid))
### Andregradsledd og andre omkodinger
#Vi kan legge inn andregradsledd eller andre omkodinger av variabler i regresjonsligningene
# våre. Andregradsledd legger vi inn med `I(uavh.var^2)`. Under har jeg lagt inn en `log()`
#omkoding, en `as.factor()` omkoding og et andregradsledd. Merk at dere må legge inn
# førstegradsleddet separat når dere legger inn andregradsledd. Dersom en
#variabeltransformasjon krever mer enn en enkel funksjon, er det fint å opprette en ny
#variabel i datasettet. For andregradsledd/høyeregrads polynomer bør imidlertid
# transformasjonen skje ved hjelp av I() inne i lm() funksjonen - dette gjør plotting lettere.
summary(m4 <- lm(elrgdpg ~ log(elrgdpg) + elricrge + I(elricrge^2) + region + elraid*policy + as_factor(period), data = aid, na.action = "na.exclude"))
#**Oppgave:** hva blir den forventede effekten av bistand for medianverdien til bistand,
# og for maksimumsverdien til bistand, i henhold til regresjonen over?
## For løsningsforslaget til oppgavene fra seminar 1, se på slutten av dokumentet til dagens
## seminar!
|
fd4c0669dff73feb56bd4d192b66ef9dfb44eab1
|
627111cb72b2d1f4df38e1170b8e556b19395a44
|
/ribiosPlot/man/pcLabels-PCAScoreMatrix-method.Rd
|
2211a34ddeb619b1b3fca8d8c9297276d2b8b14c
|
[] |
no_license
|
RCBiczok/ribios
|
c387a4fa3daec8bbdaf86fc7d233afdf1835d1da
|
8fe6eb6b33e6065fa710906d0ee4a7c1a8540a74
|
refs/heads/main
| 2021-06-18T13:43:33.184416
| 2018-11-12T23:06:33
| 2018-11-12T23:06:33
| 133,314,150
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 508
|
rd
|
pcLabels-PCAScoreMatrix-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllMethods.R
\docType{methods}
\name{pcLabels,PCAScoreMatrix-method}
\alias{pcLabels,PCAScoreMatrix-method}
\title{Labels of principal components}
\usage{
\S4method{pcLabels}{PCAScoreMatrix}(object, variant = c("compact",
"full"))
}
\arguments{
\item{object}{A PCAScoreMatrix object}
\item{variant}{Character, either \code{compact} or \code{full}, to specify the label variant}
}
\description{
Labels of principal components
}
|
64313975f2b9c2516a0fc6fabf366e8a600296f6
|
6125f56ef5651c81bfbe85eebccc05c81f49398c
|
/R/rao_chr22_rep.R
|
6e067dc2fba56b30b9ab7390519641e91e9c3e78
|
[
"MIT"
] |
permissive
|
dozmorovlab/TADCompare
|
87612572160b43d754b1444f0357135d2d3965ed
|
f1b61b789eb6717ce9397cbe059b9f0f721d2bb1
|
refs/heads/master
| 2022-05-13T20:31:58.255606
| 2022-04-25T01:13:55
| 2022-04-25T01:13:55
| 207,209,435
| 19
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 321
|
r
|
rao_chr22_rep.R
|
#' Chromosome 22 combined intrachromosomal replicate contact matrix from
#' Rao et al. 2014.
#'
#' A 704x704 contact matrix from the GM12878 cell line (50kb Resolution)
#'
#' @format A data frame with 704 rows and 704 variables:
#' @source \url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE63525}
"rao_chr22_rep"
|
cc1c4ee8442ec57a53339839be83eff5e6b199c6
|
ecb1d037e50203f2e5e79da0d845598c5b6cdd99
|
/man/optimization_lambda.Rd
|
6aa1d1885f0ab67eb086982d2ef167502ee8da36
|
[] |
no_license
|
Z1chenZhao/bis557
|
59669f343f9764ba6aa4c132dfefe1d4eba0b8ab
|
b9c1f1d2fc60ec23d2ec18f274fe743769477ab3
|
refs/heads/master
| 2023-02-01T00:36:18.885873
| 2020-12-18T19:45:21
| 2020-12-18T19:45:21
| 296,169,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 676
|
rd
|
optimization_lambda.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimization_lambda.R
\name{optimization_lambda}
\alias{optimization_lambda}
\title{optimization_lambda}
\usage{
optimization_lambda(
form,
dat,
folds = 10,
lambdas = seq(0, 1, 0.1),
contrasts = NULL
)
}
\arguments{
\item{form}{A formula with the format of "Y ~ .".}
\item{dat}{A dataframe.}
\item{folds}{The number of folds to cross validate}
\item{lambdas}{A list of the ridge penalty term lambda.}
\item{contrasts}{A list of contrasts.}
}
\value{
The ridge regression parameter lambda that minimizes mse.
}
\description{
Optimizing the ridge parameter lambda by cross validation
}
|
654db3bee4cb903e74dc819429631ceb46594e93
|
d98b7d973db4770b573ffcf2e61a37ffa74ecb21
|
/walmart/stack_models/layer2_stack_v7.R
|
451d633245b9e72e6a822600d9d3e875e687c474
|
[] |
no_license
|
brandenkmurray/kaggle
|
a27a85f172c5ecd58d9fc58219b3e31400be597e
|
30924c37e15772b6e7125b341931d7c775b07d0b
|
refs/heads/master
| 2021-01-10T14:18:07.768888
| 2017-08-29T00:20:48
| 2017-08-29T00:20:48
| 44,857,026
| 30
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,033
|
r
|
layer2_stack_v7.R
|
# Load CV predictions from models
# xgb1preds <- read.csv("./stack_models/cvPreds_xgb1.csv")
xgb2preds <- read.csv("./stack_models/cvPreds_xgb2.csv")
xgb3preds <- read.csv("./stack_models/cvPreds_xgb3.csv")
xgb7preds <- read.csv("./stack_models/cvPreds_xgb7.csv")
kknn1preds <- read.csv("./stack_models/cvPreds_kknn1.csv")
# Edit and bind predictions
# xgb1preds$VisitNumber <- NULL
xgb2preds$VisitNumber <- NULL
xgb3preds$VisitNumber <- NULL
xgb7preds$VisitNumber <- NULL
kknn1preds$VisitNumber <- NULL
lay1preds <- cbind(xgb2preds, xgb3preds, xgb7preds, kknn1preds)
# Add the class column to the dataset
t1 <- data.table(read.csv("train.csv"))
tripClasses <- data.frame(TripType=sort(unique(t1$TripType)), class=seq(0,37))
t1 <- merge(t1, tripClasses, by="TripType")
t1 <- t1[order(t1$VisitNumber),]
TripType <- t1$TripType
t1 <- t1[,length(DepartmentDescription),by=list(VisitNumber,class)]
lay1preds <- data.table(cbind(class=t1$class, lay1preds))
# Create a validation set
set.seed(1234)
h <- sample(nrow(lay1preds), 2000)
# Create DMatrices
dval <- xgb.DMatrix(data=data.matrix(lay1preds[h,2:ncol(lay1preds), with=FALSE]),label=data.matrix(lay1preds[h,"class", with=FALSE]))
dtrain <- xgb.DMatrix(data=data.matrix(lay1preds[-h,2:ncol(lay1preds), with=FALSE]),label=data.matrix(lay1preds[-h,"class", with=FALSE]))
watchlist <- list(val=dval,train=dtrain)
# Train Model
param <- list(objective="multi:softprob",
eval_metric="mlogloss",
num_class=38,
eta = .05,
max_depth=3,
min_child_weight=1,
subsample=1,
colsample_bytree=1
)
set.seed(201510)
(tme <- Sys.time())
xgbLay2_v7 <- xgb.train(data = dtrain,
params = param,
nrounds = 6000,
maximize=FALSE,
watchlist=watchlist,
print.every.n = 5,
early.stop.round=50)
Sys.time() - tme
save(xgbLay2_v7, file="./stack_models/xgbLay2_v7.rda")
# Load Test Set predictions from models trained on the entire training set
xgb2fullpreds <- read.csv("./stack_models/testPreds_xgb2full.csv")
xgb3fullpreds <- read.csv("./stack_models/testPreds_xgb3full.csv")
xgb7fullpreds <- read.csv("./stack_models/testPreds_xgb7full.csv")
kknn1fullpreds <- read.csv("./stack_models/testPreds_kknn1full.csv")
# Edit and bind test set predictions
xgb2fullpreds$VisitNumber <- NULL
xgb3fullpreds$VisitNumber <- NULL
xgb7fullpreds$VisitNumber <- NULL
kknn1fullpreds$VisitNumber <- NULL
lay1fullpreds <- cbind(xgb2fullpreds, xgb3fullpreds, xgb7fullpreds, kknn1fullpreds)
# Predict the test set using the XGBOOST stacked model
lay2preds <- predict(xgbLay2_v7, newdata=data.matrix(lay1fullpreds))
preds <- data.frame(t(matrix(lay2preds, nrow=38, ncol=length(lay2preds)/38)))
samp <- read.csv('sample_submission.csv')
cnames <- names(samp)[2:ncol(samp)]
names(preds) <- cnames
submission <- data.frame(VisitNumber=samp$VisitNumber, preds)
write.csv(submission, "./stack_models/xgbLay2_v7_preds.csv", row.names=FALSE)
|
a8b75679c536505c5a56029c3bf0ad327ad9a8c3
|
fc70b4b8f15ec7062ad57714ad81441015b559b8
|
/inst/app/server/optimisation/target.R
|
9f6c74ce735633a08b4d6fb31362d28ebbe213f8
|
[] |
no_license
|
jackolney/CascadeDashboard
|
02aa85dc78e6ab916ba6e01328b45f483d81b0c0
|
25e29abd233ba365501900c800f81ae0beadc0c6
|
refs/heads/master
| 2020-07-07T05:58:55.039907
| 2017-04-04T07:58:23
| 2017-04-04T07:58:26
| 66,279,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,696
|
r
|
target.R
|
# custom will be the editable target that will re-generate
# optimisation results
custom <- reactiveValues(target = 0.9)
# slider UI
output$UI_909090_1_slider <- renderUI({
sliderInput(inputId = "slider_909090_1", label = NULL, min = 0, max = 1, value = 0.9, step = 0.01, round = FALSE, ticks = FALSE, width = NULL)
})
output$UI_909090_2_slider <- renderUI({
sliderInput(inputId = "slider_909090_2", label = NULL, min = 0, max = 1, value = 0.9, step = 0.01, round = FALSE, ticks = FALSE, width = NULL)
})
output$UI_909090_3_slider <- renderUI({
sliderInput(inputId = "slider_909090_3", label = NULL, min = 0, max = 1, value = 0.9, step = 0.01, round = FALSE, ticks = FALSE, width = NULL)
})
# valueBox UI
output$VB_909090_1 <- renderValueBox({
valueBox(value = scales::percent(input$slider_909090_1), subtitle = "Diagnosed / PLHIV", color = "red", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
output$VB_909090_2 <- renderValueBox({
valueBox(value = scales::percent(input$slider_909090_2), subtitle = "On Treatment / Diagnosed", color = "red", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
output$VB_909090_3 <- renderValueBox({
valueBox(value = scales::percent(input$slider_909090_3), subtitle = "Virally Suppressed / On Treatment", color = "red", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
# cumulative valueBox UI
output$VB_cum_909090_1 <- renderValueBox({
valueBox(value = scales::percent(input$slider_909090_1),
subtitle = "Diagnosed / PLHIV ", color = "orange", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
output$VB_cum_909090_2 <- renderValueBox({
valueBox(value = scales::percent(round(input$slider_909090_1 * input$slider_909090_2, digits = 2)),
subtitle = "On Treatment / PLHIV", color = "orange", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
output$VB_cum_909090_3 <- renderValueBox({
valueBox(value = scales::percent(round(input$slider_909090_1 * input$slider_909090_2 * input$slider_909090_3, digits = 2)),
subtitle = "Virally Suppressed / PLHIV", color = "orange", width = NULL, icon = icon("bullseye", lib = "font-awesome"))
})
# Observe function on any change to the sliders, and update custom$target
observe({
# dependency on sliders
input$slider_909090_1
input$slider_909090_2
input$slider_909090_3
# update reactiveValues
custom$target <- input$slider_909090_1 * input$slider_909090_2 * input$slider_909090_3
})
# reset targets button
observeEvent(input$resetTarget, {
shinyjs::reset("slider_909090_1")
shinyjs::reset("slider_909090_2")
shinyjs::reset("slider_909090_3")
})
|
ba57d41ad411d0739057a685f95f8282c59adfd0
|
7f25ad3bbea7b9c152911735c4b20666f9433b3f
|
/devtoolsintegrateit/integrateIt/man/Simpson.Rd
|
781cf3d99963b58110e7418a4e561d99b99168f8
|
[] |
no_license
|
benjaminschneider212/PS5
|
44acb31c25ee5b359bc265b82aa7e5f19a075826
|
e2f7b2aade542ef2a9307f88c93cf509dd283ebe
|
refs/heads/master
| 2021-01-25T12:36:24.375006
| 2018-03-16T18:25:57
| 2018-03-16T18:25:57
| 123,482,467
| 0
| 0
| null | 2018-03-01T19:28:13
| 2018-03-01T19:28:12
| null |
UTF-8
|
R
| false
| true
| 568
|
rd
|
Simpson.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Simpson-class.R
\docType{class}
\name{Simpson-class}
\alias{Simpson-class}
\alias{Simpson}
\title{An object with two vectors of Class Simpson}
\description{
Object of class \code{Simpson}
}
\details{
An object of the class `Simpson' has the following slots:
\itemize{
\item \code{result} The result of the integral
\item \code{x} a vector of values
\item \code{y} a vector of values of same dimensionality as \code{x}
}
}
\author{
Jacob M. Montgomery: \email{jacob.montgomery@wustl.edu}
}
|
6802acfbf7a93e6a45384d8fb77e6b82f28f6682
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GLDEX/examples/fun.nclass.e.Rd.R
|
ad1c54320d1362bd33464593b253a3725a2b888f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
fun.nclass.e.Rd.R
|
library(GLDEX)
### Name: fun.nclass.e
### Title: Estimates the number of classes or bins to smooth over in the
### discretised method of fitting generalised lambda distribution to
### data.
### Aliases: fun.nclass.e
### Keywords: univar
### ** Examples
fun.nclass.e(rnorm(100,3,2))
|
d435c794823e3821906bef1be9d54b4363e16dc5
|
47ad4ea71e27ea8ed368174ef1ae2e8811eed71f
|
/R/tseg.R
|
0b6bda1b2562b9f1cd3e20d2290604f99fc8b5f6
|
[] |
no_license
|
cran/artfima
|
aa85e2dff332d12f97cb8a294ec59479082460e5
|
7466a702ce4cf2c9b38414d49bebcc097fbd04b1
|
refs/heads/master
| 2021-01-21T04:41:34.594119
| 2016-07-14T00:28:43
| 2016-07-14T00:28:43
| 48,076,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
tseg.R
|
tseg <- function(n, which=c("BJAR2","BJAR1", "BJAR3", "PWAR4",
"BJARMA11", "MHAR9", "NileMin", "SB32")) {
which <- match.arg(which)
ans <- switch(which,
BJAR1 = (1.17/(1-0.87)) +
arima.sim(model=list(ar=0.87), n=n, sd=sqrt(0.09)),
BJAR2 = (14.35/(1-sum(c(1.42, -0.73)))) +
arima.sim(model=list(ar=c(1.42, -0.73)), n=n, sd=sqrt(227.8)),
#sqrt(227.8) = 15.09304
BJAR3 = (11.31/(1-sum(c(1.57, -1.02, 0.21)))) +
arima.sim(model=list(ar=c(1.57, -1.02, 0.21)), n=n, sd=sqrt(218.1)),
PWAR4 = arima.sim(model=list(ar=c(2.7607,-3.8106,2.6535,-0.9238)),
n=n), #Percival and Walden, p.45
BJARMA11 = 1.45/(1-0.92) +
arima.sim(model=list(ar=0.92, ma=-0.58), n=n, sd=sqrt(0.097),
n.start=1000), #sqrt(0.097)=0.3114482
MHAR9 = 11.17 +
arima.sim(model=list(ar=c(1.2434, -0.5192, 0,0,0,0,0,0, 0.1954)),
n=n, sd=2.0569, n.start=1000),
#McLeod, Hipel & Lennox (1978, p.581)
NileMin = 11.48+artsim(n, d=0.393, sigma2=0.4894),
SB32 = -0.5559+artsim(n, d=5/6, lambda=0.045, sigma2 = 3.573)
)
as.vector(ans)
}
|
21df373f19696f03a6ef4f15201ec65dbcf7b8c6
|
c3edcbba1d73eda7aeca6a9459911c8ead348a89
|
/Logistic regression models - ARMITX data.R
|
240afed7e48e507d7a4dbdfa9f915cbe932d447d
|
[
"MIT"
] |
permissive
|
schrawj/GOBACK
|
33f200159531008443f5df140a60c8a6b2a1f454
|
c6aafcf5d874e44936a9bb0c5bf285f9adf7ae09
|
refs/heads/master
| 2023-06-22T03:44:19.194000
| 2023-06-13T14:18:51
| 2023-06-13T14:18:51
| 124,277,679
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,211
|
r
|
Logistic regression models - ARMITX data.R
|
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' GOBACK logistic regression modeling
#'
#' Discussed initial modeling approach at meeting on 10/23/2017.
#'
#' Will generate a table of logistic regression models for all potential
#' cancer x birth defect associations with at least 5 cormorbid cases,
#' and heatmaps based on the one in the WA state paper.
#'
#' Two sets of tables: one for kids with chromosomal birth defects, one
#' for kids with non-chromosomal birth defects.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
# prep environment --------------------------------------------------------
require(dplyr)
#' For desktop
setwd('Z:/Jeremy/GOBACK/Datasets/Combined Arkansas Michigan and Texas/')
# Logistic regression: cancer in children w/o chromosomal defects ---------
load('./ar.mi.tx.nochromosomaldefects.v20171025.1.rdata')
for (i in 22:103){
tmp <- table(armitx.nochrom[,i], armitx.nochrom$cancer1)
tmp <- which(tmp[2, ] >= 5)
tmp <- tmp + 117
if (length(tmp) > 0){
for (j in tmp){
z <- names(armitx.nochrom[i])
y <- names(armitx.nochrom[j])
x <- glm(armitx.nochrom[,j] ~ armitx.nochrom[,i], data = armitx.nochrom, family = binomial(link = 'logit'))
x.summary <- summary(x)$coefficients
tab <- as.numeric(table(armitx.nochrom[,i], armitx.nochrom[,j])[2,2])
estimates <- data.frame(defect = z, cancer = y, OR = exp(x.summary[2,1]),
ci.lower = exp(x.summary[2,1]-(1.96*x.summary[2,2])),
ci.upper = exp(x.summary[2,1]+(1.96*x.summary[2,2])),
num.pos.events = tab)
write.table(estimates, file = 'C:/Users/schraw/Desktop/goback models/BD-CC associations.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
}
else{
sink(file = 'C:/Users/schraw/Desktop/goback models/list of defects with no models.txt', append = TRUE)
print(paste('There were no cancers with 5 or more comorbid cases for',names(armitx.nochrom[i])))
sink()
}
}
rm(estimates, x.summary, i, j, tab, tmp, x, y, z)
gc()
#' Models for individual non-chromosomal birth defects and [cancer].any variables
for (i in 22:103){
for (j in 148:157){
z <- names(armitx.nochrom[i])
y <- names(armitx.nochrom[j])
comorbid.cases <- table(armitx.nochrom[,i], armitx.nochrom[,j])[2,2]
if (comorbid.cases > 5){
x <- glm(armitx.nochrom[,j] ~ armitx.nochrom[,i], data = armitx.nochrom, family = binomial(link='logit'))
x.summary <- summary(x)$coefficients
estimates <- data.frame(defect = z, cancer = y, OR = exp(x.summary[2,1]),
ci.lower = exp(x.summary[2,1]-(1.96*x.summary[2,2])),
ci.upper = exp(x.summary[2,1]+(1.96*x.summary[2,2])),
num.pos.events = as.numeric(comorbid.cases))
write.table(estimates, file = 'C:/Users/schraw/Desktop/goback models/BD-CC associations.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
sink(file = 'C:/Users/schraw/Desktop/goback models/list of defects with no models.txt', append = TRUE)
print(paste('There were less than five comorbid instances of',z,'and',y))
sink()
}
}
}
rm(armitx.nochrom, i, j, estimates, x, x.summary, y, z, comorbid.cases)
gc()
# Logistic regression: cancer in children w/chromosomal defects -----------
load('./ar.mi.tx.chromosomaldefects.v20171025.1.rdata')
#' Models for individual chromosomal birth defects and individual cancers
for (i in 104:111){
tmp <- table(armitx.chrom[,i], armitx.chrom$cancer1)
tmp <- which(tmp[2, ] >= 5)
tmp <- tmp + 117
if (length(tmp) > 0){
for (j in tmp){
z <- names(armitx.chrom[i])
y <- names(armitx.chrom[j])
x <- glm(armitx.chrom[,j] ~ armitx.chrom[,i], data = armitx.chrom, family = binomial(link = 'logit'))
x.summary <- summary(x)$coefficients
tab <- as.numeric(table(armitx.chrom[,i], armitx.chrom[,j])[2,2])
estimates <- data.frame(defect = z, cancer = y, OR = exp(x.summary[2,1]),
ci.lower = exp(x.summary[2,1]-(1.96*x.summary[2,2])),
ci.upper = exp(x.summary[2,1]+(1.96*x.summary[2,2])),
num.pos.events = tab)
write.table(estimates, file = 'C:/Users/schraw/Desktop/goback models/BD-CC associations.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
}
else{
sink(file = 'C:/Users/schraw/Desktop/goback models/list of defects with no models.txt', append = TRUE)
print(paste('There were no cancers with 5 or more comorbid cases for',names(armitx.chrom[i])))
sink()
}
}
rm(estimates, x.summary, i, j, tab, tmp, x, y, z)
gc()
#' Models for individual chromosomal birth defects and [cancer].any variables
for (i in 104:111){
for (j in 148:157){
z <- names(armitx.chrom[i])
y <- names(armitx.chrom[j])
comorbid.cases <- table(armitx.chrom[,i], armitx.chrom[,j])[2,2]
if (comorbid.cases > 5){
x <- glm(armitx.chrom[,j] ~ armitx.chrom[,i], data = armitx.chrom, family = binomial(link='logit'))
x.summary <- summary(x)$coefficients
estimates <- data.frame(defect = z, cancer = y, OR = exp(x.summary[2,1]),
ci.lower = exp(x.summary[2,1]-(1.96*x.summary[2,2])),
ci.upper = exp(x.summary[2,1]+(1.96*x.summary[2,2])),
num.pos.events = as.numeric(comorbid.cases))
write.table(estimates, file = 'C:/Users/schraw/Desktop/goback models/BD-CC associations.csv', sep=',', append = TRUE,
row.names = FALSE, col.names = FALSE)
}
else{
sink(file = 'C:/Users/schraw/Desktop/goback models/list of defects with no models.txt', append = TRUE)
print(paste('There were less than five comorbid instances of',z,'and',y))
sink()
}
}
}
rm(armitx.chrom, estimates, x, x.summary, y, z, comorbid.cases)
gc()
# Model QC: Re-run a few models manually ----------------------------------
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
#' Some of these ORs are quite dramatic.
#'
#' Hopefully that reflects the biology of these associations.
#'
#' Check the diagnostic codes for some cancer and birth defects diagnoses
#' to make sure there are no errors in our variables. Just want to rule
#' out that there is some error in the input to the data.
#'-------------------------------------------------------------------------
#'-------------------------------------------------------------------------
model <- glm(armitx.nochrom$hepato ~ armitx.nochrom$atrialseptaldefect, data = armitx.nochrom, family = binomial(link = 'logit'))
summary(model)
model <- glm(armitx.nochrom$gct.any ~ armitx.nochrom$digestivesystem.other.major, data = armitx.nochrom, family = binomial(link = 'logit'))
summary(model)
model <- glm(armitx.nochrom$pns.any ~ armitx.nochrom$musculoskelsys.other.major, data = armitx.nochrom, family = binomial(link = 'logit'))
summary(model)
model <- glm(armitx.nochrom$all ~ armitx.nochrom$microcephalus, data = armitx.nochrom, family = binomial(link = 'logit'))
summary(model)
rm(model)
# Model QC: Verifying birth defects diagnoses -----------------------------
for (i in 22:112){
tmp <- table(armitx.nochrom[,i], useNA = 'always')
tmp <- data.frame(defect = names(armitx.nochrom[i]),
negative.for.def = tmp[1],
positive.for.def = tmp[2],
num.actually.na = tmp[3],
num.should.be.na = 479467-(tmp[2]))
write.table(tmp, file = 'C:/Users/schraw/Desktop/goback models/number of missing observations by defect.csv', sep= ',',
row.names = FALSE, col.names = FALSE, append = TRUE)
}
rm(i, tmp)
ids <- select(armitx.nochrom, studyid)
#' Look through some of the original ICD codes in MI data and verify they match the number of
#' children DX'd with that anomaly.
load("Z:/Jeremy/GOBACK/Datasets/Michigan/mi.birthdefects.codes.rdata")
mi.bd$ebstein.code <- as.numeric(NA)
for (i in mi.bd){
for (j in 110:133){
mi.bd$ebstein.code <- ifelse(is.na(mi.bd$ebstein.code) & round(mi.bd[,j], digits = 1) == 746.2, 1, mi.bd$ebstein.code)
}
}
table(mi.bd$ebstein.code, useNA = 'ifany')
table(mi.bd$EbsteinAnomaly, useNA = 'ifany')
tmp$sb.code <- as.numeric(NA)
sb.codes <- c(741.0, 741.9)
for (i in tmp){
for (j in 110:133){
tmp$sb.code <- ifelse(is.na(tmp$sb.code) & round(tmp[,j], digits = 1) %in% sb.codes, 1, tmp$sb.code)
}
}
table(tmp$sb.code)
rm(mi.bd, tmp, sb.codes, i, j)
# Model QC: verifying some cancer diagnoses -------------------------------
hepato <- filter(filter(armitx.nochrom, cancer1 == 'hepato'), state == 'TX')
hepato <- c(hepato$studyid)
nhl <- filter(filter(armitx.nochrom, cancer1 == 'nhl'), state == 'TX')
nhl <- c(nhl$studyid)
all <- filter(filter(armitx.nochrom, cancer1 == 'all'), state == 'TX')
all <- c(all$studyid)
load('Z:/Jeremy/GOBACK/Datasets/Texas/tx.cancer1.codes.rdata')
tx.can$birthid <- paste0('tx',tx.can$birthid)
tx.hepato <- tx.can[tx.can$birthid %in% hepato, ]
unique(tx.hepato$morph31)
table(tx.hepato$morph31, useNA = 'ifany')
tx.nhl <- tx.can[tx.can$birthid %in% nhl, ]
tx.nhl <- arrange(tx.nhl, morph31)
unique(tx.nhl$morph31)
print(tx.nhl[,2:3])
tx.all <- tx.can[tx.can$birthid %in% all, ]
tx.all <- arrange(tx.all, morph31)
unique(tx.all$morph31)
tmp <- filter(tx.all, morph31 == 9811)
unique(tmp$site.code1)
tmp <- filter(tx.all, morph31 == 9823)
unique(tmp$site.code1)
rm(tx.can, tmp, tx.all, tx.nhl, tx.hepato, all, hepato, nhl)
|
427b827c2b706a0c2cdf6db97653064bf2e17275
|
062a1c2dce842f8a75841e18f74110becb4dc7c2
|
/Data Science Specialization - JHU/7. Practical Machine Learning/quiz2_practical_ml.R
|
0c5c52309388b35f352d1280d61cb1facacdb6a6
|
[] |
no_license
|
MaazAmjad/MyDataScienceMasters_2015-16
|
a976abd221ece6b41816735ce80c8143276bf5bb
|
c69a3faebb1e9967fe4a8d8cfce363eecd215c96
|
refs/heads/master
| 2020-11-30T03:58:21.347882
| 2016-05-19T15:37:15
| 2016-05-19T15:37:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,618
|
r
|
quiz2_practical_ml.R
|
# Question 1
library(AppliedPredictiveModeling)
library(caret)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
testIndex = createDataPartition(diagnosis, p = 0.50,list=FALSE)
training = adData[-testIndex,]
testing = adData[testIndex,]
adData = data.frame(diagnosis,predictors)
trainIndex = createDataPartition(diagnosis, p = 0.50,list=FALSE)
training = adData[trainIndex,]
testing = adData[-trainIndex,]
# Question 2
library(AppliedPredictiveModeling)
data(concrete)
library(caret)
set.seed(1000)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
hist(x = training$Superplasticizer)
# Question 3
library(AppliedPredictiveModeling)
library(caret)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
train_sub = subset(training[grepl("^IL",colnames(training))])
# Subset data where columns start by "IL"
subset_tr <- training[,grepl("^IL", names(training))]
View(subset_tr)
# thresh: cutoff for the cumulative percent of variance to be retained by PCA
preprop <- preProcess(subset_tr,method="pca",thresh=0.8)
preprop$rotation
# Question 4
library(AppliedPredictiveModeling)
library(caret)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
newtrain = data.frame(train_sub,training$diagnosis)
test_sub = subset(testing[grepl("^IL",colnames(testing))])
newtest = data.frame(test_sub,testing$diagnosis)
predict1 = train(data = newtrain[,-13],method = "glm",newtrain$training.diagnosis~.)
confusionMatrix(data = testing$diagnosis, predict(predict1,testing))
# Accuracy : 0.6463
preprop = preProcess(newtrain[,-13],method = "pca",thresh = 0.8)
preprop2 = predict(preprop, newtrain[,-13])
testpreprop2 = predict(preprop,newtest[,-13])
predict2 = train(newtrain$training.diagnosis~., method="glm",data=preprop2)
confusionMatrix(data=newtest$testing.diagnosis,predict(predict2,testpreprop2))
set.seed(1235)
train_set <- data.frame(training[,grepl("^IL", names(training))],training$diagnosis)
View(train_set)
preprop_PCA <- preProcess(train_set[,-13],method="pca",thresh=0.8)
predict_PCA <- predict(preprop_PCA,train_set[,-13])
model_PCA <- train(train_set$training.diagnosis~.,method="glm",data=predict_PCA)
set.seed(1235)
modelfit <- train(train_set$training.diagnosis~.,data=train_set,method="glm")
model_PCA
modelfit
|
8f917cf4e99ce8b638678a0b91e97eaa3aea1f56
|
00e4b920da382cbd44767e05684f557478b98eaf
|
/EDA/NA-string check.R
|
c75e37c060f80fca4ad9e7c7970fb988c72af5c9
|
[] |
no_license
|
shenzijian/Prediction-of-Hospital-Readmission
|
e2f64f7085a8fce9d0e2e9b408997a4fc6b9a7d4
|
501a6fc23c5114a9295031ad1929d96a8d3280c0
|
refs/heads/master
| 2020-08-22T00:47:43.287934
| 2019-10-21T18:18:15
| 2019-10-21T18:18:15
| 216,283,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,100
|
r
|
NA-string check.R
|
setwd("~/Desktop/UMD/课程/第二学期/758T/Project Files")
######check NA string distribution##########
hos_whole <- read_csv("Hospitals_Train.csv")
hos_whole <- hos_whole[1:38221,]
##read valuable row and turn problematic values to NA
na_strings <- c("Unknown", "Declined to Answer","","#N/A","5 Purple","#VALUE!",'Expired','Deceased','Hospice/ Medical Facility','Hospice/Home')
hos <- read_csv("Hospitals_Train.csv", na=na_strings)
hos <- hos[1:38221,]
hos$ACUITY_ARR[is.na(hos$ACUITY_ARR)] <- 'new_category'
hos$CHARGES[is.na(hos$CHARGES)] <- 0
hos$CONSULT_IN_ED[is.na(hos$CONSULT_IN_ED)] <- 0 ####because distribution of CHARGE's NA's RETURN similar to CHARGE == 0
test_X$ACUITY_ARR[is.na(test_X$ACUITY_ARR)] <- 'new_category'
test_X$CHARGES[is.na(test_X$CHARGES)] <- 0
test_X$CONSULT_IN_ED[is.na(test_X$CONSULT_IN_ED)] <- 0
## check distribution of "3-" and "new_cate"
urge <- hos %>%
filter(ACUITY_ARR == '3-Urgent')
table(urge$RETURN)
newC <- hos %>%
filter(ACUITY_ARR == 'new_category')
table(newC$RETURN)
#create bin
hos$AGE = cut(hos$AGE, c(-Inf, 24,34,44,59,74,89, Inf), labels=1:7)
hos$HOUR_ARR = cut(hos$HOUR_ARR, c(-Inf,6,12,18,Inf), labels = 1:4)
test_X$AGE = cut(test_X$AGE, c(-Inf, 24, 34, 44, 59, 74, 89, Inf), labels = 1:7)
test_X$HOUR_ARR = cut(test_X$HOUR_ARR, c(-Inf, 6, 12, 18, Inf), labels = 1:4)
#delete unneccessary column
clean1 = subset(hos, select=-c(WEEKDAY_DEP,HOUR_DEP,MONTH_DEP,ADMIT_RESULT,RISK,SEVERITY))
#omit NA value after subset
hos=na.omit(clean1)
dim(hos)
NA_list <- setdiff(hos_whole$INDEX, hos$INDEX)
####check how distribution in NA-string
NA_df <- hos_whole$RETURN[hos_whole$INDEX %in% NA_list]
table(NA_df)
############
NV <- hos_whole %>%
filter(CHARGES == "#VALUE!")
table(NV$RETURN)
Ngender <- hos_whole %>%
filter(is.na(GENDER))
## ethnicity##
Unknow <- hos_whole %>%
filter(ETHNICITY == 'Unknown')
table(Unknow$RETURN)
## 0.09311741
### race declined to answer###
decline <- hos_whole %>%
filter(RACE == 'Declined to Answer')
table(decline$RETURN)
# 3 No#####
###############################
|
2f22398a5184ba52c25646b39bcaa8381dba51ee
|
3ed36c82ca3a4fea44a7bab117a2c46a22894e7f
|
/R/psychometric.R
|
05a8b8d2eec21cdae72d34b9c5333a8b127f3f97
|
[] |
no_license
|
ccamp83/mu
|
816d899b0e4491ae0aaaee40f25f33cceb2196aa
|
e57980c98e293bd27f5cc3bc04f8976798f3f00d
|
refs/heads/master
| 2023-08-31T14:27:30.922366
| 2023-08-29T12:34:19
| 2023-08-29T12:34:19
| 124,022,647
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,292
|
r
|
psychometric.R
|
#' @export
psychometric <- function(model, alpha = 0.05, lme4 = F){
if(lme4 == F){
pse <- -model$coef[1]/model$coef[2]
BETA <- model$coef[2]} else {
# if extracting form mer, check lme4 version!
fixed.par = getME(model, "beta")
pse <- -(fixed.par[1]/fixed.par[2])
BETA <- fixed.par[2]}
# if extracting from mer, check lme4 version!
var.alpha <- vcov(model)[1,1]
var.beta <- vcov(model)[2,2]
cov.alpha.beta <- vcov(model)[2,1]
var.pse <- (1/BETA^2)*(var.alpha + (2*pse*cov.alpha.beta)+(pse^2*var.beta)) #PSE
inferior.pse <- pse - (qnorm(1 - (alpha/2))*sqrt(var.pse))
superior.pse <- pse + (qnorm(1 - (alpha/2))*sqrt(var.pse))
jnd <- 1/BETA
var.jnd <- (-1/BETA^2)^2 * var.beta #JND
inferior.jnd <- jnd - (qnorm(1 - (alpha/2))*sqrt(var.jnd))
superior.jnd <- jnd + (qnorm(1 - (alpha/2))*sqrt(var.jnd))
output <- matrix(rbind(c(pse, sqrt(var.pse), inferior.pse, superior.pse),
c(jnd, sqrt(var.jnd), inferior.jnd, superior.jnd)), nrow = 2,
dimnames = list(param <- c("pse", "jnd"), statistics <- c("Estimate",
"Std. Error", "Inferior", "Superior")))
return(output)}
|
2f7583c85d0cae6e71f9b445d7f173bd5c16f242
|
15fe06f49cbeb087c03bf8b743f18bce470e8cb0
|
/search.R
|
c7f0d7b54eacba295dd7c989f5a418ab0f804bfd
|
[] |
no_license
|
PranavKrishnan1/assignment_3
|
8555a791b859402d21c790846adfd99c50a2d9a9
|
a3cf092805357f28695145c07f14544fcdb12f1a
|
refs/heads/master
| 2022-04-09T03:39:47.965388
| 2020-02-26T04:52:36
| 2020-02-26T04:52:36
| 243,171,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 383
|
r
|
search.R
|
#[1] "rlist" "gtools" "seoR" "devtools"
#[5] "usethis" "stats" "graphics" "grDevices"
#[9] "utils" "datasets" "methods" "base"
x = "zoho"
p=c()
m=c()
for (i in 1:26){
y = paste(x,intToUtf8(96+i),sep = " ")
p[i]=(googleSuggest(y))
}
p[[2]]
for(i in 1:26){
print(p[[i]])
write.table( data.frame(p[[i]]), 'test.csv' , append= T, sep=',' )
}
|
08fc27785446e1b0d8ea70501a7205830b222e04
|
8591c35b0ed4035aee8b174fb003965dc0709031
|
/reeflandarea/3-extract_reef_area.R
|
51506a15a733d663ecf46a0aa26dcd5912846c42
|
[] |
no_license
|
ultimatemegs/msec
|
ad713d47caad0a712448c410b5eac9597e9006ff
|
576c3c842327198538c3e6cf8b82ec5cda555174
|
refs/heads/master
| 2020-03-23T14:13:48.590647
| 2017-05-09T13:53:16
| 2017-05-09T13:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,506
|
r
|
3-extract_reef_area.R
|
# Produce rasters of reef area within 15km and 200km of marine grid cells
library(raster)
library(rgeos)
source("utils.R")
reef_dir <- "{{Insert path to Reefs at Risk raster}}"
# Load coral reefs layer
reefs <- raster(file.path(reef_dir, "reef_500"))
projection(reefs) <- "+proj=cea +lon_0=-160 +lat_ts=0 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs"
# Load land mask
land <- raster("land_final.grd")
# Function to calculate reef area within dist of point (long, lat)
reef_area <- function(long, lat, dist) {
tryCatch({
# Create a circular buffer in a equidistant projection centered on point
pt <- SpatialPoints(cbind(0, 0),
proj = CRS(paste0("+proj=aeqd +lon_0=", long,
" +lat_0=", lat, " +unit=m")))
buf <- gBuffer(pt, width = dist, quadsegs = 20)
buf <- spTransform(buf, projection(reefs))
# Compute reef area in buffer (# cells x 0.25km^2 per cell)
reef_crop <- crop(reefs, buf, snap = "out")
cell_area <- 0.25
rarea <- suppressWarnings(
extract(reef_crop, buf, fun = sum, na.rm = TRUE) * cell_area)
c(long = long, lat = lat, reef_area = rarea)
}, error = function(e) {
print(c(long, lat, e))
c(long = long, lat = lat, reef_area = NA)
})
}
#### Compute reef area within 15km radius ####
# 20km buffer around reef areas (pre-computed in ArcGIS)
reefs_buf20 <- raster("reeflandarea/buffers/reef_20km_rast.tif")
reefs_buf20 <- inv_rotate(reefs_buf20)
# Resample reef buffer to final grid
land_crop <- crop(land, reefs_buf20, snap = "out")
reefs_buf20 <- resample(reefs_buf20, land_crop, method = "ngb")
# Remove points over land
reefs_buf20 <- mask(reefs_buf20, land_crop, maskvalue = 1)
# Get grid points for reef area calculation
grid_pts <- rasterToPoints(reefs_buf20)
grid_pts <- as.data.frame(grid_pts)
colnames(grid_pts) <- c("long", "lat", "dist")
grid_pts$dist <- 15000
# Calculate reef area within 15km of each point
# NOTE: This calculation (and the one for 200km below) was processed in parallel on a HPC cluster.
res <- Map(reef_area, grid_pts$long, grid_pts$lat, grid_pts$dist)
# Convert result to SpatialPointsDataFrame and rasterize
res <- as.data.frame(do.call(rbind, res))
coordinates(res) <- ~long + lat
res_rast <- rasterize(res, land, field = "reef_area", background = 0,
filename = "reef_area_15km.grd")
res_mask <- mask(res_rast, land, maskvalue = 1,
filename = "reef_area_15km_masked.grd")
#### Repeat for 200km radius ####
# Load a 205km buffer around reef areas
reefs_buf200 <- raster("reeflandarea/buffers/reef_205kmbuff_rast.tif")
reefs_buf200 <- inv_rotate(reefs_buf200)
land_crop <- crop(land, reefs_buf200, snap = "out")
reefs_buf200 <- resample(reefs_buf200, land_crop, method = "ngb")
reefs_buf200 <- mask(reefs_buf200, land_crop, maskvalue = 1)
grid_pts <- rasterToPoints(reefs_buf200)
grid_pts <- as.data.frame(grid_pts)
colnames(grid_pts) <- c("long", "lat", "dist")
grid_pts$dist <- 200000
res <- Map(reef_area, grid_pts$long, grid_pts$lat, grid_pts$dist)
# Convert result to SpatialPointsDataFrame and rasterize
res <- as.data.frame(do.call(rbind, res))
coordinates(res) <- ~long + lat
res_rast <- rasterize(res, land, field = "reef_area", background = 0,
filename = "reef_area_200km.grd")
res_mask <- mask(res_rast, land, maskvalue = 1,
filename = "reef_area_200km_masked.grd")
|
107858ac98ab7583e1e96bb85d47cf220a7ade8c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PairedData/examples/mcculloch.Var.test.Rd.R
|
81b6630dfb56b07abce694e830c1f68d836c4c14
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
mcculloch.Var.test.Rd.R
|
library(PairedData)
### Name: mcculloch.Var.test
### Title: McCulloch test of scale for paired samples
### Aliases: mcculloch.Var.test mcculloch.Var.test.default
### mcculloch.Var.test.paired
### Keywords: htest
### ** Examples
z<-rnorm(20)
x<-rnorm(20)+z
y<-(rnorm(20)+z)*2
mcculloch.Var.test(x,y)
p<-paired(x,y)
mcculloch.Var.test(p)
# A variation with kendall tau
mcculloch.Var.test(p,method="kendall")
# equivalence with the PitmanMorgan test
mcculloch.Var.test(p,method="pearson")
Var.test(p)
|
7c79caaf99d95b682783f24822cb49afcd910ffb
|
d96889f52ab75b0e99e1cd308ac3cfda531e8f78
|
/R/fig07_01.R
|
1e9768a4d427f479b440357394129aa13b67a64b
|
[
"MIT"
] |
permissive
|
enguang2/stan-statespace
|
ce39eea82b6461a56795fc2960cc7677db3539db
|
5175c83aaf2197d2f33024eea04e8d81c8d4dcdb
|
refs/heads/master
| 2022-11-22T22:35:23.308194
| 2020-07-12T23:18:32
| 2020-07-12T23:18:32
| 278,282,945
| 0
| 0
|
MIT
| 2020-07-09T06:34:25
| 2020-07-09T06:34:24
| null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
fig07_01.R
|
source('common.R', encoding = 'utf-8')
## @knitr init_stan
y <- ukdrivers
x <- ukpetrol
w <- ukseats
standata <- within(list(), {
y <- as.vector(y)
x <- as.vector(x)
w <- as.vector(w)
n <- length(y)
})
## @knitr show_model
model_file <- '../models/fig07_01.stan'
cat(paste(readLines(model_file)), sep = '\n')
## @knitr fit_stan
fit <- stan(file = model_file, data = standata,
iter = 2000, chains = 4)
stopifnot(is.converged(fit))
yhat <- get_posterior_mean(fit, par = 'yhat')[, 'mean-all chains']
mu <- get_posterior_mean(fit, par = 'mu')[, 'mean-all chains']
beta <- get_posterior_mean(fit, par = 'beta')[, 'mean-all chains']
lambda <- get_posterior_mean(fit, par = 'lambda')[, 'mean-all chains']
sigma_irreg <- get_posterior_mean(fit, par = 'sigma_irreg')[, 'mean-all chains']
# stopifnot(is.almost.fitted(mu, 6.4016))
is.almost.fitted(mu, 6.4016)
# stopifnot(is.almost.fitted(beta, -0.45213))
is.almost.fitted(beta, -0.45213)
stopifnot(is.almost.fitted(lambda, -0.19714))
stopifnot(is.almost.fitted(sigma_irreg^2, 0.00740223))
## @knitr output_figures
title <- paste('Figure 7.1. Deterministic level plus variables',
'log petrol price and seat belt law.', sep = '\n')
title <- paste('図 7.1 確定的レベルプラス対数石油価格と',
'シートベルト法', sep = '\n')
p <- autoplot(y)
yhat <- ts(yhat, start = start(y), frequency = frequency(y))
p <- autoplot(yhat, p = p, ts.colour = 'blue')
p + ggtitle(title)
|
7f9a92a94bab3d120b41f457aae23df304611f5a
|
b92e6db7366fad56763e199ca312c67b8fe9a6d8
|
/man/rmd_code_block.Rd
|
8e6f2f5ab3e770d5b18646267d55990feec23654
|
[
"MIT"
] |
permissive
|
nemochina2008/spAddins
|
52e0ba73bbe27e5b3a9150b19c0f843116663b87
|
f9de69203760b692e9fa571a85cd0304e146f361
|
refs/heads/master
| 2021-07-16T11:11:12.615231
| 2017-10-14T13:24:53
| 2017-10-14T13:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 692
|
rd
|
rmd_code_block.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insert_code_block.R
\name{rmd_code_block}
\alias{rmd_code_block}
\alias{rmd_r_code_block}
\alias{rmd_code_block}
\title{Convert rows into the block of code}
\usage{
rmd_r_code_block()
rmd_code_block()
}
\description{
RStudio addin to insert selected lines into code block: \itemize{
\item \code{rmd_r_code_block()} - R code block;
\item \code{rmd_code_block()} - verbatim code block.
}
\code{rs_enclose_all_with_lines} - function that adds lines above and below the selection.
}
\seealso{
Other R Markdown formatting addins: \code{\link{format_rmd}},
\code{\link{rmd_equations}}, \code{\link{rmd_list}}
}
|
90f902570ea5e12791001f6c7335c628e729861a
|
8ea31acfbb23dfaef5856e20176f5884efcdde74
|
/PTMC/DEG analysis with TRAPR.R
|
349e9ad5e8c009ed203a482647ab38e510d25e4d
|
[] |
no_license
|
gnsljw/Research
|
5977bc1af9d397577ec8cd47a2770030a06038a5
|
237742f623f1b9c13f7cca9106aa571fbb5c45d9
|
refs/heads/master
| 2016-09-15T22:11:39.755084
| 2015-02-26T05:19:43
| 2015-02-26T05:19:43
| 23,687,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,187
|
r
|
DEG analysis with TRAPR.R
|
source('TRAPR_Code.R')
ls()
PTMC <- TRAPR.Data.ReadExpressionTable('(1) PTMC(36)PTC(450) All gene symbol.txt', Exp1 = c(1:36), Exp2 = c(37:486), Tag = c('PTMC', 'PTC'))
str(PTMC)
TRAPR.DataVisualization(PTMC, 'box', logged = F)
TRAPR.DataVisualization(PTMC, 'DS', logged = F)
TRAPR.DataVisualization(PTMC, 'MA', logged = F)
PTMC <- TRAPR.Filter.ZeroValue(PTMC)# Filtering for zero values
PTMC <- TRAPR.Filter.LowVariance(PTMC) # Filtering for genes with low variance
PTMC <- TRAPR.Normalize(PTMC, Method = 'UpperQuartile')
TRAPR.DataVisualization(PTMC, 'box', logged = F)
TRAPR.DataVisualization(PTMC, 'DS', logged = F)
TRAPR.DataVisualization(PTMC, 'MA', logged = F)
PTMC <- TRAPR.StatisticalTest(PTMC, Method = 'ttest', FDRControl = 'BH', PvalueThre = 0.05, FCThre = 0.5)
TestMatrix <- PTMC$CurrentMatrix[PTMC$DEGIndex,]
zTestMatrix <- (TestMatrix - rowMeans(TestMatrix)) / apply(TestMatrix, 1, var)
rownames(zTestMatrix) <- PTMC$DEGName
colnames(zTestMatrix) <- PTMC$SampleTag
heatmap(zTestMatrix)
TRAPR.ResultVisualization(PTMC, 'VO')
TRAPR.ResultVisualization(PTMC, 'HM')
TRAPR.Data.DEGResulttoFile(PTMC, FileName = '(1) TRAPR Result.txt')
TRAPR.Data.DEGNameListtoFile(PTMC)
|
c82cf47625ea3104f8f083e9981ce01fc321ebbc
|
c4d6ad3fe0fdce49bb95f91e633fca14991cf47b
|
/R/scheffer.R
|
d3ec7d45dc601a5809de585e1a9d46a37f05eb87
|
[] |
no_license
|
HankStevens/primer
|
cbe56ce731ba47e056ad0eb6aa184ffb7d840239
|
945f7bb511314d4c9f6240d8f8b1ea57e5da0baf
|
refs/heads/master
| 2021-09-11T00:24:56.465932
| 2021-08-27T19:17:12
| 2021-08-27T19:17:12
| 290,855,188
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,833
|
r
|
scheffer.R
|
#' A resource-based model of alternative stable states
#'
#' A model of floating vs. submerged plant dominance in shallow aquatic
#' systems, after Scheffeer \emph{et al}. (2003). For use with \code{ode} in
#' the \code{deSolve} package. Floating plants are better competitors for
#' light, as long as submerged plants cannot drive down nitrogen levels.
#'
#'
#' @param t the time point for a realization of the integration.
#' @param y the vector of populations, at each time t.
#' @param p a vector or list containing the necessary parameters.
#' @return Returns a list of length one which is the vector of the rates of
#' increase (required by \code{ode}).
#' @author Hank Stevens <HStevens@@muohio.edu>
#' @seealso \code{\link{lvcompg}}, \code{\link{igp}}
#' @references Scheffer, M., Szabo, S., Gragnani, A., van Nes, E.H., Rinaldi,
#' S., Kautsky, N., Norberg, J., Roijackers, R.M.M. and Franken, R.J.M. (2003)
#' Floating plant dominance as a stable state. \emph{Proceeding of the National
#' Academy of Sciences, U.S.A.}, \bold{100}, 4040--4045.
#'
#' Stevens, M.H.H. (2009) \emph{A Primer of Ecology with R}. Use R! Series.
#' Springer.
#' @keywords methods
#' @export
#' @examples
#'
#' p <- c(N=2.5, as=0.01, af=0.01, b=0.02, qs=0.075, qf=0.005,
#' hs=0, hf=0.2, ls=0.05, lf=0.05, rs=0.5, rf=0.5, W=0)
#' t <- 1:200
#' Initial <- c(F=10, S=10)
#' S.out1 <- ode(Initial, t, scheffer, p)
#' matplot(t, S.out1[,-1], type='l')
#' legend('right', c("F", "S"), lty=1:2, col=1:2, bty='n')
#'
`scheffer` <-
function (t, y, p)
{
F <- y[1]
S <- y[2]
with(as.list(p), {
n <- N/(1 + qs * S + qf * F)
dF <- rf * F * (n/(n + hf)) * (1/(1 + af * F)) - lf *
F
dS <- rs * S * (n/(n + hs)) * (1/(1 + as * S + b * F +
W)) - ls * S
return(list(c(dF, dS)))
})
}
|
672d120dcf024caf25eb12154e6a0240015d6b82
|
0124b6a02692905922a3a8992cb123bb8e039e6b
|
/computeEntropyFromAlignedFasta.R
|
7aed4a4018391dec2b6b3b84b2217cf322c438ef
|
[
"MIT"
] |
permissive
|
pedlefsen/hiv-founder-id
|
ebcadf9563549fb9932237e148a0c93609d856f3
|
50ba8d2757cb3be15357b4bdeaea3fe0d2680eea
|
refs/heads/master
| 2020-12-24T05:58:03.939103
| 2019-07-07T21:36:09
| 2019-07-07T21:36:09
| 42,635,031
| 3
| 3
|
MIT
| 2019-02-08T12:36:25
| 2015-09-17T04:32:20
|
R
|
UTF-8
|
R
| false
| false
| 5,543
|
r
|
computeEntropyFromAlignedFasta.R
|
library( "ade4", warn.conflicts = FALSE ) # needed by something. ape?
library( "ape" ) # for "chronos", "as.DNAbin", "dist.dna", "read.dna", "write.dna"
library( "seqinr", warn.conflicts = FALSE ) # for "as.alignment", "consensus"
library( "entropy" );
# This computes the consensus of the given alignment, writes it to a fasta file, returns the filename.
# consensus.sequence.name = NA means use the name of the input fasta file (not the full path, just the filename, excluding suffix eg ".fasta"), postpended with "_Consensus".
# include.full.alignment == TRUE means that the output file will contain both the consensus and the input alignment (consensus first).
# if use.sequence.numbers.as.names == TRUE, rename non-consensus output sequences using just their order of appearance (eg 1, 2, 3, etc). This only applies if include.full.alignment is also TRUE.
computeEntropyFromAlignedFasta <- function ( input.fasta.file, output.dir = NULL, output.file = NULL ) {
if( length( grep( "^(.*?)\\/[^\\/]+$", input.fasta.file ) ) == 0 ) {
input.fasta.file.path <- ".";
} else {
input.fasta.file.path <-
gsub( "^(.*?)\\/[^\\/]+$", "\\1", input.fasta.file );
}
input.fasta.file.short <-
gsub( "^.*?\\/?([^\\/]+?)$", "\\1", input.fasta.file, perl = TRUE );
input.fasta.file.short.nosuffix <-
gsub( "^([^\\.]+)(\\..+)?$", "\\1", input.fasta.file.short, perl = TRUE );
input.fasta.file.suffix <-
gsub( "^([^\\.]+)(\\..+)?$", "\\2", input.fasta.file.short, perl = TRUE );
if( !is.null( output.file ) ) {
if( length( grep( "^(.*?)\\/[^\\/]+$", output.file ) ) == 0 ) {
output.file.path <- NULL;
output.file.path.is.absolute <- NA;
} else {
output.file.path <-
gsub( "^(.*?)\\/[^\\/]+$", "\\1", output.file );
output.file.path.is.absolute <- ( substring( output.file.path, 1, 1 ) == "/" );
}
output.file.short <-
gsub( "^.*?\\/?([^\\/]+?)$", "\\1", output.file, perl = TRUE );
if( !is.null( output.file.path ) && output.file.path.is.absolute ) {
output.dir <- output.file.path;
} else if( is.null( output.dir ) ) {
if( is.null( output.file.path ) ) {
output.dir <- input.fasta.file.path;
} else {
output.dir <- output.file.path;
}
} else {
output.dir <- paste( output.dir, output.file.path, sep = "/" );
}
output.file <- output.file.short;
} else { # is.null( output.file )
output.file <- paste( input.fasta.file.short.nosuffix, ".entropy.txt", sep = "" );
}
if( is.null( output.dir ) || ( nchar( output.dir ) == 0 ) ) {
output.dir <- ".";
}
## Remove "/" from end of output.dir
output.dir <-
gsub( "^(.*?)\\/+$", "\\1", output.dir );
input.fasta <- read.dna( input.fasta.file, format = "fasta" );
# IUPAC profile (may contain ambiguity chars)
input.fasta.profile.iupac <- seqinr::consensus( as.character( input.fasta ), method = "profile" );
input.fasta.profile <- input.fasta.profile.iupac[ intersect( c( "-", "a", "c", "g", "t" ), rownames( input.fasta.profile.iupac ) ), ];
.iupacs <- setdiff( rownames( input.fasta.profile.iupac ), c( "-", "a", "c", "g", "t" ) );
# DIVVY up ambiguous counts evenly among the component bases.
for( .iupac in .iupacs ) {
.component.bases <- amb( .iupac );
input.fasta.profile[ .component.bases, ] <-
input.fasta.profile[ .component.bases, ] + ( input.fasta.profile.iupac[ .iupac, ] / length( .component.bases) );
}
input.fasta.profile.nogap <- input.fasta.profile[ setdiff( rownames( input.fasta.profile ), "-" ), ];
entropies <- apply( input.fasta.profile.nogap, 2, entropy.empirical, unit = "log2" );
entropies.sd <- sd( entropies, na.rm = T );
num.entropies <- length( !is.na( entropies ) );
.results <- summary( entropies[ !is.na( entropies ) ] );
.results.string <- sprintf( "%0.4f", .results );
names( .results.string ) <- names( .results );
.results <- c( N = as.character( nrow( input.fasta ) ), K = as.character( num.entropies ), .results.string, SD = round( entropies.sd, digits = 4 ) );
.results[ .results == "-0.0000" ] <- "0.0000";
output.file.path <-
paste( output.dir, "/", output.file, sep = "" );
write.table( t( as.matrix( .results ) ), file =output.file.path, row.names = F, sep = "\t" );
# Return the file name.
return( output.file.path );
} # computeEntropyFromAlignedFasta ( input.fasta.file, ... )
## Here is where the action is.
input.fasta.file <- Sys.getenv( "computeEntropyFromAlignedFasta_inputFilename" );
output.fasta.file <- Sys.getenv( "computeEntropyFromAlignedFasta_outputFilename" );
if( nchar( output.fasta.file ) == 0 ) {
output.fasta.file <- NULL;
}
output.dir <- Sys.getenv( "computeEntropyFromAlignedFasta_outputDir" );
if( nchar( output.dir ) == 0 ) {
output.dir <- NULL;
}
## TODO: REMOVE
# warning( paste( "aligned fasta input file:", input.fasta.file ) );
# if( !is.null( output.dir ) ) {
# warning( paste( "consensus fasta output dir:", output.dir ) );
# }
# if( !is.null( output.fasta.file ) ) {
# warning( paste( "consensus fasta output file:", output.fasta.file ) );
# }
if( file.exists( input.fasta.file ) ) {
print( computeEntropyFromAlignedFasta( input.fasta.file, output.dir = output.dir, output.file = output.fasta.file ) );
} else {
stop( paste( "File does not exist:", input.fasta.file ) );
}
|
05450b00bf173483fbbc68979f7d834df158d56a
|
3f93c5dd6d33378299657cec6d3f7ae23889d380
|
/plot2.R
|
8e831d0397c648b7dce79e4bb95de25bc9fbddd6
|
[] |
no_license
|
data-sci20/ExData_Plotting1
|
ce77a8a2813aa856fa6a964a361fc22ac3f1d6a5
|
d3a19b1775394e7c40e575adbaa66966ab87fbba
|
refs/heads/master
| 2022-11-17T15:20:24.158946
| 2020-07-14T05:14:02
| 2020-07-14T05:14:02
| 279,482,984
| 0
| 0
| null | 2020-07-14T04:38:00
| 2020-07-14T04:37:59
| null |
UTF-8
|
R
| false
| false
| 735
|
r
|
plot2.R
|
classes = c( rep("character", 2), rep("numeric", 7) )
df <- read.table( file = "household_power_consumption.txt",
header = TRUE,
sep = ';',
na.strings = "?",
stringsAsFactors = FALSE,
comment.char="",
colClasses = classes )
s_df <- df[ df$Date %in% c("1/2/2007", "2/2/2007"), ]
s_df$date_time <- strptime( paste(s_df$Date, s_df$Time),
"%d/%m/%Y %H:%M:%S" )
png("plot2.png", width = 480, height = 480)
with(s_df, plot( date_time, Global_active_power,
type = "l", # lines
ylab = "Global Active Power (kilowatts)",
xlab = "" ) )
dev.off()
|
284bd9484adfb8836a0116d041d14d1716effe76
|
c9bcc781570a06baad3c96f5a19b5bd60094a17b
|
/Kodak Sentiment.R
|
3985cbeeda40ea3fa127d127da45cf08f7d0690d
|
[] |
no_license
|
jtarnowski18/ScienceScholars
|
e76141e5ee7fb46d425c7e3fa91f414421a4ca01
|
7a324e29cd909dbbaa48850f6408180fa1860b01
|
refs/heads/main
| 2023-01-23T16:11:19.765957
| 2020-12-06T22:02:18
| 2020-12-06T22:02:18
| 319,139,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,545
|
r
|
Kodak Sentiment.R
|
require(ggplot2)
require(stringr)
require(tidyverse)
#Sentiment Analysis Function
Companies_Sentiment <- function(string){
string <- replace_emoji(string)
#Split the string at each space
string_sep <- as.data.frame(str_split(string = string, " "))
#change the name of the resulting column to tweet
names(string_sep)[1] <- "tweet"
#Make sure each observation is a string not a factor
string_sep$tweet <- as.character(string_sep$tweet)
#create a space to store row numbers that contain links (to be removed)
links <- c()
emoji <- c()
#For each word in the string
for (z in 1:nrow(string_sep)) {
# Take out the numbers
string_sep$tweet[z] <- gsub('[0-9]+', '', string_sep$tweet[z])
# Take out the punctuation
string_sep$tweet[z] <-
str_replace_all(string_sep$tweet[z] , "[[:punct:]]", "")
# If there is a link marke the row number (will be removed after loop to avoid messing with the loop)
if (grepl("http", string_sep$tweet[z], fixed = TRUE) == TRUE) {
links <- c(links, z)
}
if (grepl("<", string_sep$tweet[z], fixed = TRUE) == TRUE) {
emoji <- c(emoji, z)
}
}
#Remove links and emoji
remove <- unique(c(emoji,links))
if (length(remove) > 1) {
string_sep <-string_sep[-remove,]
}
string_sep <- paste(string_sep, collapse = " ")
sent <- analyzeSentiment(string_sep)
sent_binary <- convertToBinaryResponse(sent$SentimentQDAP)
sent_list <- list(sent$SentimentQDAP, sent_binary)
return(sent_list)
}
Tweets$Sentiment <-NA
#Run all of the Kodak Tweets through the sentiment analysis function
for (j in 1:nrow(Tweets)) {
sent_list <- Companies_Sentiment(Tweets$text[j])
Tweets$Sentiment[j] <- sent_list[[1]]
if (j%%50 == 0) {
print(j)
}
}
#Scatter Plot of Kodaks sentiments
Sentiments <- ggplot(Tweets,aes(created_at, Sentiment))+
geom_point()+
ggtitle("Kodak's Sentiment Scores")+
theme(plot.title = element_text(hjust = 0.5))+
xlab("Date")
#Kodaks Stock Price Plot
Beg <- as.date("2020-07-27")
beg <- as.Date.character("2020-07-27")
Aft <- as.Date.character("2020-08-05")
Price <- ggplot(KODK_Data, aes(Date, High))+
geom_line()+
geom_vline(xintercept = beg, col = "red")+
geom_vline(xintercept = Aft, col = "red")+
ggtitle("Kodak's Stock Price")+
theme(plot.title = element_text(hjust = 0.5))
ggsave("E:/Summer Fellows/Phase 2/Kodak_Plot.png", Sentiments, "png")
save(Tweets, file = "KODK.Rdata")
|
99f8f5662e8ee594e3151295650c43481f9d78d8
|
8866b741411e2edfa61972369143de26fde5f821
|
/R/MMInfKK.R
|
3941f61666a251c62dd45909c5e143c48aa6859b
|
[] |
no_license
|
cran/queueing
|
6232577c0eb67cae7c716ef1432cc54194fb26d4
|
7712782a0d82d73599f128f68e94536b0cf8d4e5
|
refs/heads/master
| 2020-12-25T17:36:19.513391
| 2019-12-08T21:10:02
| 2019-12-08T21:10:02
| 17,698,913
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,304
|
r
|
MMInfKK.R
|
############################################################
############################################################
## MODEL M/M/Infinite/K/K
############################################################
############################################################
NewInput.MMInfKK <- function(lambda=0, mu=0, k=1)
{
res <- list(lambda = lambda, mu = mu, k = k)
class(res) <- "i_MMInfKK"
res
}
CheckInput.i_MMInfKK <- function(x, ...)
{
MMInfKK_class <- "The class of the object x has to be M/M/Inf/K/K (i_MMInfKK)"
MMInfKK_anomalous <- "Some value of lambda, mu, or n is anomalous. Check the values."
if (!inherits(x, "i_MMInfKK"))
stop(MMInfKK_class)
if (is.anomalous(x$lambda) || is.anomalous(x$mu) || is.anomalous(x$k))
stop(MMInfKK_anomalous)
if (x$mu <= 0)
stop(ALL_mu_positive)
if (x$lambda < 0)
stop(ALL_lambda_zpositive)
if (x$k < 0)
stop(ALL_k_warning)
if (!is.wholenumber(x$k))
stop(ALL_k_integer)
}
MMInfKK_InitPn_Aprox_Aux <- function(n, lambda, mu, c, k, m)
{
(n * (log(lambda) - log(mu))) + (lfactorial(k) - lfactorial(k-n) - lfactorial(n))
}
MMInfKK_InitPn <- function(x)
{
ProbFactCalculus(
x$lambda, x$mu, 1, x$k, x$k, x$k, MMInfKK_InitPn_Aprox_Aux, MMInfKK_InitPn_Aprox_Aux, MMInfKK_InitPn_Aprox_Aux
)
}
QueueingModel.i_MMInfKK <- function(x, ...)
{
# Is everything fine??
CheckInput.i_MMInfKK(x, ...)
# we're going to calculate the probability distribution
Pn <- MMInfKK_InitPn(x)
u <- x$lambda/x$mu
# Calculate the output parameters of the model
L <- (x$k * u)/(1 + u)
Throughput <- x$lambda * (x$k - L)
W <- L / Throughput
Lq <- 0
VNq <- 0
Wq <- 0
VTq <- 0
Wqq <- NA
Lqq <- NA
QnAux <- function(n){ Pn[n] * (x$k - (n-1)) / (x$k - L) }
Qn <- sapply(1:x$k, QnAux)
FW <- function(t){ exp(x$mu) }
FWq <- function(t){ 0 }
# if the sum(Pn) == 0, then too big K or lambda/mu is
if (sum(Pn) == 0)
{
VN <- NA
}
else
{
VT <- ( ((0:x$k)^2) * Pn) - (L^2)
}
VN <- 1/(x$mu^2)
# The result
res <- list(
Inputs=x, RO = L, Lq = Lq, VNq = VNq, Wq = Wq, VTq = VTq, Throughput = Throughput,
L = L, W = W, Lqq = Lqq, Wqq = Wqq, Pn = Pn, Qn = Qn, FW = FW, FWq = FWq
)
class(res) <- "o_MMInfKK"
res
}
Inputs.o_MMInfKK <- function(x, ...) { x$Inputs }
L.o_MMInfKK <- function(x, ...) { x$L }
VN.o_MMInfKK <- function(x, ...) { x$VN }
W.o_MMInfKK <- function(x, ...) { x$W }
VT.o_MMInfKK <- function(x, ...) { x$VT }
RO.o_MMInfKK <- function(x, ...) { x$RO }
Lq.o_MMInfKK <- function(x, ...) { x$Lq }
VNq.o_MMInfKK <- function(x, ...) { x$VNq }
Wq.o_MMInfKK <- function(x, ...) { x$Wq }
VTq.o_MMInfKK <- function(x, ...) { x$VTq }
Wqq.o_MMInfKK <- function(x, ...) { x$Wqq }
Lqq.o_MMInfKK <- function(x, ...) { x$Lqq }
Pn.o_MMInfKK <- function(x, ...) { x$Pn }
Qn.o_MMInfKK <- function(x, ...) { x$Qn }
Throughput.o_MMInfKK <- function(x, ...) { x$Throughput }
Report.o_MMInfKK <- function(x, ...)
{
reportAux(x)
}
summary.o_MMInfKK <- function(object, ...)
{
aux <- list(el=CompareQueueingModels(object))
class(aux) <- "summary.o_MM1"
aux
}
print.summary.o_MMInfKK <- function(x, ...)
{
print_summary(x, ...)
}
|
1024579da7845d651706a8772fb4865cf521fd03
|
b73ba9d91f872931cbf88d50999411c0bb7c211e
|
/code_development/tests.R
|
3dd4fefa822427d6de38611d72819e13d86d61b7
|
[
"MIT"
] |
permissive
|
weecology/portalcasting
|
73347ce66f8c1e5c080a1f1029ec17026c912588
|
a35a77214d41dbdaa50bb39452b5fe49c3763a83
|
refs/heads/main
| 2023-08-20T12:48:59.392495
| 2023-05-23T01:16:33
| 2023-05-23T01:16:33
| 129,144,321
| 8
| 12
|
NOASSERTION
| 2023-05-23T01:16:34
| 2018-04-11T19:34:03
|
R
|
UTF-8
|
R
| false
| false
| 787
|
r
|
tests.R
|
devtools::test(filter = "00")
devtools::test(filter = "10")
devtools::test(filter = "11")
devtools::test(filter = "12")
devtools::test(filter = "20")
devtools::test(filter = "21")
devtools::test(filter = "22")
devtools::test(filter = "23")
devtools::test(filter = "24")
devtools::test(filter = "25")
devtools::test(filter = "26")
devtools::test(filter = "27")
devtools::test(filter = "28")
devtools::test(filter = "30")
devtools::test(filter = "31")
devtools::test(filter = "32")
devtools::test(filter = "33")
devtools::test(filter = "40")
devtools::test(filter = "41")
devtools::test(filter = "42")
devtools::test(filter = "43")
devtools::test(filter = "50")
devtools::test(filter = "60")
devtools::test(filter = "61")
devtools::test(filter = "62")
#devtools::test(filter = "99")
|
6904d392bceb71fc6b07389aec20625cc1b3337f
|
f42b0f80be58ebd5cf581fb662bc12953739a0a7
|
/workstation/R/example_run.R
|
f3cbd44791a220fdfeeb731e4bd4493eb4295d25
|
[] |
no_license
|
UniK-INES/ines-cluster
|
45d023f0623cb3825053fb15d78ba0fad51ff9f3
|
840a03f28faf25766c547abb1f2be528bbe864c8
|
refs/heads/master
| 2023-07-02T10:01:38.785146
| 2020-11-28T15:30:07
| 2020-11-28T15:30:07
| 278,318,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,113
|
r
|
example_run.R
|
require('ssh')
require('readr')
# Full path to your simulation directory
sim_path <- "/home/chh/ines-cluster/workstation/R/testsim"
# Name of the simulation
sim_name <- basename(sim_path)
# Executable in simulation directory. Something like "java .." is possible too.
sim_cmd <- "cmd"
# Range of nodes that should run the simulation
node_beg <- 8
node_end <- 9
############### Internals ###############
# Ssh connection
session <- ssh_connect("outsider@141.51.123.55")
# Simulation directory on server node
pxe <- "/pxe/meta/simulation/"
# Init/Exec script on server node
sim_ctrl <- "/pxe/meta/sim_start_on_nodes"
# Distribution script on server node
sim_dist <- "/pxe/meta/sim_to_nodes"
# Current node we are working on used in global context
current_node <- node_beg
# Local simulation files
sim_files <- dir(sim_path)
# Retrieves the PID of the simulation process started on nodes
retrieve_pid <- function(cbstream) {
rpid <- rawToChar(cbstream)
pid <- parse_number(rpid)
Sys.sleep(2)
print(current_node)
start_sim(current_node, pid)
# return(pid)
}
start <- function() {
upload_sim(sim_files)
for (node in node_beg:node_end) {
current_node <<- node
# assign("current_node", node, envir = .GlobalEnv)
ssh_exec_wait(session, command = paste(sim_ctrl, 'init', node, sep=" "), std_out = function(x) { retrieve_pid(x)})
}
}
# Starts the simulation
start_sim <- function(node, pid) {
print(paste("Starting on ", node))
ssh_exec_wait(session, command = paste(sim_ctrl, 'start', node, pid, sep=" "))
}
# Uploads simulation files to nodes
upload_sim <- function(files) {
# Create simulation directory on server node
out <- ssh_exec_wait(session, command = paste('mkdir', paste(pxe, sim_name, sep="/"), sep=" "))
# Upload the simulation to the server node
for (f in files) {
out <- scp_upload(session, paste(sim_path, f, sep="/"), paste(pxe, sim_name, sep="/"))
}
# Distribute to nodes
for (node in node_beg:node_end) {
out <- ssh_exec_wait(session, command = paste(sim_dist, sim_name, node, sep=" "))
}
}
# upload_sim(sim_files)
start()
ssh_disconnect(session)
|
c113fb3e21a3c2e08506a1b602ac9f1855415c94
|
33c29c05e7a9c0e90ed027c86466b6c2ff51f1c5
|
/Imputation_VI_Functions.R
|
6e46b3f91a6907044f288ec58d622993f5ec271a
|
[] |
no_license
|
AndrewjSage/Imputation_Variable_Importance
|
c95b5beafd391fd2d53c62c60c653f780844dbd9
|
193b97f76a82ea529b26c0ee8973dbddaa783304
|
refs/heads/master
| 2021-05-08T17:17:51.471301
| 2019-12-17T04:30:22
| 2019-12-17T04:30:22
| 120,190,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,727
|
r
|
Imputation_VI_Functions.R
|
#setwd("/work/STAT/ajsage")
#Load Packages
library(randomForest)
library(randomForestSRC)
library(CALIBERrfimpute)
library(mice)
library(MASS)
#function to generate data for simulation described in paper
Generate_Sim_Data <- function(rho, size=1000, simsetting=1){
Sigma <- matrix(rho, nrow=6, ncol=6)
Sigma[1,1] <- Sigma[2,2] <- Sigma[3,3] <- Sigma[4,4] <- Sigma[5,5] <- Sigma[6,6] <- 1
X <- mvrnorm(n = size, mu=c(0,0,0,0,0,0), Sigma=Sigma, tol = 1e-6, empirical = FALSE, EISPACK = FALSE)
y <- rep(NA, size)
epsilon <- rnorm(size,mean=0, sd=1)
if (simsetting==1){
y <- .5*X[,1]+.4*X[,2]+.3*X[,3]+.2*X[,4]+.1*X[,5]+epsilon
} else{
y <- 0.3*exp(X[,1])-0.4*X[,2]^2 + 0.5*X[,3]*X[,4] +.2*X[,5]+epsilon
}
DATA <- data.frame(cbind(X,y))
return(DATA)
}
#function to delete a percentage of values, p, for a variable x
DeleteMissing <- function(data, xvar, p, missingness){
data1 <- as.data.frame(apply(data, 2, as.numeric)) #Need to convert to numeric for weighting
data1 <- as.data.frame(apply(data1, 2, scale)) # standardize so weighting is consistent
if(missingness == "MCAR"){
Miss <- sample(1:nrow(data),p*nrow(data))
} else if(missingness == "MAR"){
var <- sample(setdiff((1:(ncol(data)-1)), c(xvar)), 1)
weights <- 1/(1+exp(-3*data1[,var])) #assign sampling weights according to another randomly chosen x variable
if(rbinom(1,0,1)==1){ #randomly determine whether to sample high or low values more heavily
weights==1-weights
}
Miss <- sample(1:nrow(data1),p*nrow(data1), prob=weights)
} else if(missingness == "MNAR"){
weights <- 1/(1+exp(-3*data1[,xvar])) #assign sampling weights according to variable being deleted/imputed
if(rbinom(1,0,1)==1){ #randomly determine whether to sample high or low values more heavily
weights==1-weights
}
Miss <- sample(1:nrow(data1),p*nrow(data1), prob=1/(1+exp(-3*data1[,xvar])))
} else{ stop("missingness must be set to either 'MCAR', 'MAR', or 'MNAR'")
}
data[Miss, xvar] <- NA
return(data)
}
#Function to impute missing values and compute variable importance using Shah's method
#This function does the imputation and VI once. Apply it nmult times for multiple imputation, as is done in Impute_VI
CaliberVI <- function(x, y, ntreesimp, ntrees, xvar){
if(sum(is.na(x))>0){
ImputedX <-x #first set imputed dataset equal to one with missing values then fill missing values
if(is.factor(x[,xvar])){
ImputedX[is.na(x[,xvar]),xvar] <- mice.impute.rfcat(x[,xvar],!is.na(x[,xvar]), x[,-xvar], iter=5, ntree=ntreesimp)
} else{
ImputedX[is.na(x[,xvar]),xvar] <- mice.impute.rfcont(x[,xvar],!is.na(x[,xvar]), x[,-xvar], iter=5, ntree=ntreesimp)
}
Imputed <- cbind(ImputedX, y)} else{
Imputed <- cbind(x,y)}
rfMiss=rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VIvec <- rfMiss$importance[,1]}else{VIvec <- rfMiss$importance} #If y is a factor, permutation importance is given in 3rd col. Otherwise first
return(VIvec)
}
#Function to impute missing values and compute variable importance using Doove's method
#This function does the imputation and VI once. Apply it nmult times for multiple imputation, as is done in Impute_VI
miceVI <- function(x,y,ntreesimp, ntrees, xvar){
if(sum(is.na(x))>0){
ImputedX <-x #first set imputed dataset equal to one with missing values then fill missing values
ImputedX[is.na(x[,xvar]),xvar] <- mice.impute.rf(x[,xvar],!is.na(x[,xvar]), x[,-c(xvar, ncol(x))], iter=5, ntree=ntreesimp)
Imputed <- cbind(ImputedX, y)} else{
Imputed <- cbind(x,y)}
rfMiss=rfsrc(y~., data <- Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VIvec <- rfMiss$importance[,1]}else{VIvec <- rfMiss$importance} #If y is a factor, permutation importance is given in 3rd col. Otherwise first
return(VIvec)
}
#Function to delete values and perform imputation using each technique in question.
Impute_and_VI <- function(data, ntreesimp=300, ntrees=500, nmult=5, ntechs=9, xvar){
#separate predictor variables from response since some techniques require one or other
x <- data[,-ncol(data)]
y <- data[,ncol(data)]
#setup dataframe to store variable importance results
VI <- array(NA, dim=c(ncol(x), ntechs)) #rows correspond to variables, columns to imputation techniques
#Strawman-median imputation
tech <- 1
if(sum(is.na(x))>0){
Imputed <- data
Imputed[which(is.na(Imputed[,xvar])), xvar] <- median(Imputed[, xvar], na.rm=TRUE)} else{ #reordered so y still last
Imputed <- data
}
rfMiss <- rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 1st col. Otherwise just a vector
#Impute using rfImpute
tech <- 2
if(sum(is.na(x))>0){
Imputed <- rfImpute(x, y, iter=5, ntree=ntreesimp)[,c(2:(ncol(x)+1),1)]} else{ #reordered so y still last
Imputed <- data
}
rfMiss <- rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 1st col. Otherwise just a vector
#Impute using missForest
tech <- 3
if(sum(is.na(x))>0){
ImputedX <- impute(data = x, mf.q = 1/ncol(x))
Imputed <- cbind(ImputedX, y)} else{
Imputed <- data
}
rfMiss <- rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 1st col. Otherwise just a vector
#Impute using RFSRC-1 iteration
tech <- 4
rfMiss <- rfsrc(y~., data=data, ntree=ntrees,importance=c("permute"), na.action=c("na.impute"), nimpute=1)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 1st col. Otherwise just a vector
#Impute using RFSRC-5 iterations
tech <- 5
rfMiss <- rfsrc(y~., data=data, ntree=ntrees,importance=c("permute"), na.action=c("na.impute"), nimpute=5)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 1st col. Otherwise just a vector
# RFSRC unsupervised - 1 iteration
tech <- 6
if(sum(is.na(x))>0){
ImputedX <- impute(data = x, nimpute = 1)
Imputed <- cbind(ImputedX, y)} else{
Imputed <- data
}
rfMiss <- rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 3rd col. Otherwise first
# RFSRC unsupervised - 5 iterations
tech <- 7
if(sum(is.na(x))>0){
ImputedX <- impute(data = x, nimpute = 5)
Imputed <- cbind(ImputedX, y)} else{
Imputed <- data
}
rfMiss <- rfsrc(y~., data=Imputed, ntree=ntrees,importance=TRUE)
if(is.factor(y)){VI[,tech]<-rfMiss$importance[,1]}else{VI[,tech]<-rfMiss$importance} #If y is a factor, permutation importance is given in 3rd col. Otherwise first
#Impute using CALIBER
#since this is a multiple imputation technique, perform nmult times then average VI
tech <- 8
VImat <- replicate(n=nmult, CaliberVI(x,y, ntreesimp = ntreesimp, ntrees=ntrees, xvar=xvar))
VI[,tech] <- rowMeans(VImat)
#Impute using mice
#since this is a multiple imputation technique, perform nmult times then average VI
tech <- 9
VImat <- replicate(n=nmult, miceVI(x,y,ntreesimp = ntreesimp, ntrees=ntrees, xvar=xvar))
VI[,tech] <- rowMeans(VImat)
return(VI)
}
#Function to do deletion and imputation. Apply this for different xvars after data have been generated
Del_Impute <- function(data, xvar, pvec, ntrees=500, missingness){
Deleted_Data <- lapply(X=pvec, data=data, FUN=DeleteMissing, xvar=xvar, missingness=missingness)
VI <- lapply(X=Deleted_Data, FUN=Impute_and_VI, xvar=xvar)
return(VI)
}
#function to generated data, then delete and impute for all variables of interest and measure VI
Gen_Del_Impute <- function(rho, xvarvec, pvec, size=100, ntrees=500, missingness="MCAR", simsetting=1){
DATA <- Generate_Sim_Data(rho=rho, size=size, simsetting=simsetting)
VI <- lapply(X=xvarvec, pvec=pvec, data=DATA, FUN=Del_Impute, missingness=missingness)
return(VI)
}
#function to delete and impute for all variables of interest for given dataset and measure VI
Del_Impute_wrapper <- function(data, xvarvec, pvec, ntrees=500, missingness="MCAR"){
VI <- lapply(X=xvarvec, pvec=pvec, data=data, FUN=Del_Impute, missingness=missingness)
return(VI)
}
|
cc79b77c755d2165f015e7ba8ec44aa7d8c9bfe7
|
c49aa09f1f83ee8f8c9d1e716ae38381ed3fafca
|
/feature_selection/ex_8/de8_2_2.R
|
724d7b23374149ce14b9b4846b0b296f6bfb7360
|
[] |
no_license
|
whtbowers/multiomics
|
de879d61f15aa718a18dc866b1e5ef3848e27c42
|
81dcedf2c491107005d184f93cb6318865d00e65
|
refs/heads/master
| 2020-04-11T03:25:40.635266
| 2018-09-24T08:51:06
| 2018-09-24T08:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,995
|
r
|
de8_2_2.R
|
setwd("/home/whb17/Documents/project3/project_files/feature_selection/ex_8/")
library(limma)
library(heatmap3)
library(SNFtool)
library(glmnet)
set.seed(12)
#df.gene.body <- read.csv("../../data/ex_8/gene_train_body.csv", header=TRUE, row.names = 1) # Protein test/train set
df.prot.body <- read.csv("../../data/ex_8/prot_train_body.csv", header=TRUE, row.names = 1) # Protein train set
df.meta <- read.csv("../../data/ex_8/gp_train_meta.csv", header=TRUE, row.names = 1)
df.meta$group <- as.character(df.meta$group)
# To direct to the correct folder
date <- "2018-07-30/recheck_auto/"
ex_dir <- "ex_8/"
# Parameters
#alphas = c(0, 0.5, 1)
alpha = 0.5
K = 20
datasets = list(
#list(df.gene.body, df.meta, "gene", "gene")
#,
list(df.prot.body, df.meta, "protein", "prot")
)
for (i in 1:length(datasets)){
set.data <- datasets[[i]][[1]]
set.meta <- datasets[[i]][[2]]
set.verbose <- datasets[[i]][[3]]
set.abrv <- datasets[[i]][[4]]
#Select HIV- TB vs LTBI patients
ind.hiv_neg.tb_ltbi <- c()
ind.hiv_neg.tb_od <- c()
for (i in 1:nrow(set.data)){
if((set.meta$group[i] == 1) || (set.meta$group[i] == 3)){
ind.hiv_neg.tb_ltbi <- c(ind.hiv_neg.tb_ltbi, i)
}
}
for (i in 1:nrow(set.data)){
if((set.meta$group[i] == 1) || (set.meta$group[i] == 5)){
ind.hiv_neg.tb_od <- c(ind.hiv_neg.tb_od, i)
}
}
set.data.hiv_neg.tb_ltbi <- set.data[ind.hiv_neg.tb_ltbi,]
set.meta.hiv_neg.tb_ltbi <- set.meta[ind.hiv_neg.tb_ltbi,]
set.data.hiv_neg.tb_od <- set.data[ind.hiv_neg.tb_od,]
set.meta.hiv_neg.tb_od <- set.meta[ind.hiv_neg.tb_od,]
comparisons <- list(
#list(set.data.hiv_neg.tb_ltbi, set.meta.hiv_neg.tb_ltbi, "TB vs LTBI", "tb_ltbi")
#,
list(set.data.hiv_neg.tb_od, set.meta.hiv_neg.tb_od, "TB vs OD", "tb_od")
)
for (j in 1:length(comparisons)){
comp.data <- comparisons[[j]][[1]]
comp.meta <- comparisons[[j]][[2]]
comp.verbose <- comparisons[[j]][[3]]
comp.abrv<- comparisons[[j]][[4]]
#############################
## Limma-based DE analysis ##
#############################
# Make factors for analysis to consider
fac.sex <- factor(comp.meta$sex)
fac.site <- factor(comp.meta$site) # Correct for site? Maybe use as intercept
fac.tb <- factor(comp.meta$tb.status)
design <- model.matrix(~fac.site + fac.sex + fac.tb)
fit <- lmFit(t(comp.data), design)
fit <- eBayes(fit, trend=TRUE, robust=TRUE)
results <- decideTests(fit)
print(summary(results))
tab.res <- topTable(fit, coef=4, n=22)
print(tab.res)
png(paste("../../img/", ex_dir, date, set.abrv, "_", comp.abrv, "_meandiff.png", sep=""),
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8 # smaller font size
)
plotMD(fit, coef=4, status=results[,4], values=c(1,-1), hl.col=c("red","blue"))
dev.off()
# Get significant BH corrected values
ind.dif_ex <- c()
for (i in 1:length(tab.res$adj.P.Val)){
if (tab.res$adj.P.Val[i] < 0.05){
ind.dif_ex <- c(ind.dif_ex, i)
}
}
sig_P = tab.res$adj.P.Val[ind.dif_ex]
sig_factor = rownames(tab.res)[ind.dif_ex]
sig_rows = tab.res[ind.dif_ex,]
write.csv(sig_rows, paste("../../data/", ex_dir, set.abrv, "_", comp.abrv, "_sig_factors.csv", sep=""))
###########################
## Elastic net selection ##
###########################
# Fit to elastic net
fit.glm <- glmnet(as.matrix(comp.data),
comp.meta$group,
family="gaussian",
alpha=alpha
)
png(paste("../../img/", ex_dir, date, set.abrv, "_", comp.abrv, "_glmnet_coeff.png", sep=""),
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8 # smaller font size
)
plot(fit.glm, label=TRUE)
dev.off()
# Cross-validated analysis of coefficients
cvfit <- cv.glmnet(data.matrix(comp.data),
data.matrix(as.numeric(comp.meta$group)),
family="gaussian",
alpha=alpha
)
png(paste("../../img/", ex_dir, date, set.abrv, "_", comp.abrv, "_cv_glmnet_coeff.png", sep=""),
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8 # smaller font size
)
plot(cvfit, main=paste("Cross-validated coefficient plot for HIV-", comp.verbose, "protein data", sep=" "))
dev.off()
######################################################
## Out of curiosity, comparing rige, EMN, and lasso ##
######################################################
foldid=sample(1:K,size=length(data.matrix(as.numeric(comp.meta$group))),replace=TRUE)
cv1=cv.glmnet(data.matrix(comp.data),data.matrix(as.numeric(comp.meta$group)),foldid=foldid,alpha=1)
cv.5=cv.glmnet(data.matrix(comp.data),data.matrix(as.numeric(comp.meta$group)),foldid=foldid,alpha=.5)
cv0=cv.glmnet(data.matrix(comp.data),data.matrix(as.numeric(comp.meta$group)),foldid=foldid,alpha=0)
png(paste("../../img/", ex_dir, date, set.abrv, "_", comp.abrv, "_alpha_comp.png", sep=""),
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8 # smaller font size
)
par(mfrow=c(2,2))
plot(cv1);plot(cv.5);plot(cv0)
plot(log(cv1$lambda),cv1$cvm,pch=19,col="red",xlab="log(Lambda)",ylab=cv1$name)
points(log(cv.5$lambda),cv.5$cvm,pch=19,col="grey")
points(log(cv0$lambda),cv0$cvm,pch=19,col="blue")
legend("topleft",legend=c("alpha= 1","alpha= .5","alpha 0"),pch=19,col=c("red","grey","blue"))
dev.off()
}
}
|
9bd42a240ec3950bb3dda09b3931c72dd6e342c5
|
976115badd0ab44f6f4313f6abbdbfade86a4222
|
/R/tabItem.R
|
aa7e9f9e0fab2f0b702845bf4e6f128cea3ec4fa
|
[] |
no_license
|
ValeriaNiceria/operDash
|
fc10c099231271e9d5bf9e10294536d1b4db1534
|
27792aece56b17c8dbfcc2f093eb176c4b8754b6
|
refs/heads/master
| 2020-06-03T02:19:20.100745
| 2020-01-17T19:48:38
| 2020-01-17T19:48:38
| 191,392,514
| 8
| 0
| null | 2020-01-17T19:48:39
| 2019-06-11T14:48:00
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,073
|
r
|
tabItem.R
|
#' @title tabItem
#' @name tabItem
#'
#' @description Função responsável por agrupar conteúdos que serão exibidos no corpo da página.
#'
#' @param tab_name Um nome para o tabItem o mesmo nome deverá ser informado no \code{sidebarItem} ou \code{sidebarSubItem}.
#' @param title Um título para o tabItem.
#' @param ... Conteúdo que será adicionado no corpo da página.
#'
#' @export
tabItem <- function(tab_name = NULL, title = NULL, ...) {
if (is.null(tab_name))
stop("E necessario adicionar o tab_name")
if (!is.null(title)) {
title = shiny::tags$section(
class="content-header",
shiny::tags$h1(title)
)
}
shiny::tagList(
tags$div(
class="shiny-oper-tab-content",
id = paste0("shiny-tab-", tab_name),
style = "visibility:hidden; display: none;",
title,
tags$section(
class="content",
...
)
),
shiny::singleton(
shiny::includeScript(
system.file("oper-0.1.0/js/shiny-oper-tabs.js",
package = "operDash")
)
)
)
}
|
f9891c94c82065765534dffdc6c8ed7b795b1ee7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OrdinalLogisticBiplot/examples/summary.ordinal.logistic.biplot.Rd.R
|
20598bf4e38f575fdfaa4b2c1b4527b82fe460a8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
summary.ordinal.logistic.biplot.Rd.R
|
library(OrdinalLogisticBiplot)
### Name: summary.ordinal.logistic.biplot
### Title: Summary Method Function for Objects of Class
### 'ordinal.logistic.biplot'
### Aliases: summary.ordinal.logistic.biplot
### Keywords: summary
### ** Examples
data(LevelSatPhd)
olbo = OrdinalLogisticBiplot(LevelSatPhd,sFormula=NULL,numFactors=2,
method="EM",penalization=0.2,show=FALSE)
summary(olbo)
|
98b68ff2724f485c8c4c7cef3c9b07a94ccd28cd
|
18da8146b3602430e2c5687aba08e8555de8cb87
|
/Assignment 9.R
|
8cef38abf29ee74dd4741ca87aee7b274614e914
|
[] |
no_license
|
cnmwebb/assignment9
|
65266f72de671e4b25960dd61a7cdb379266ee58
|
6bc131c88b747c50a4562118b85979f668da024e
|
refs/heads/master
| 2021-04-03T08:18:36.068280
| 2018-03-12T01:13:36
| 2018-03-12T01:13:36
| 124,815,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
Assignment 9.R
|
JJ=JohnsonJohnson
plot(JJ$time, JJ$JohnsonJohnson, main="EPS Over Time", xlab = "Year", ylab = "EPS", pch=20, col=2, cex=2)
library(lattice)
xyplot(JohnsonJohnson~time, data=JJ, pch=".", cex=5, main="EPS Over Time", xlab="Year", ylab="EPS")
library(ggplot2)
ggplot(JJ, aes(time, JohnsonJohnson)) + geom_point(col="green")+ggtitle("EPS Over Time")+labs(y= "EPS", x = "Year")
|
cc08772c6b485850bb994bd0cd0d3e33c7207a66
|
3ae0832863c3fd6b00a684b309c3ba760b41918e
|
/R/selection.R
|
52eb1e0c3d1c22ed04227a38d5efedac451eb18c
|
[] |
no_license
|
sestelo/fwdselect
|
a0a86b2810f5057c6dc95514faf6fd15f2058c1f
|
1446635b14e75284dc035dacf718de1dc0d6316b
|
refs/heads/master
| 2021-01-01T05:38:42.817077
| 2015-12-21T11:19:01
| 2015-12-21T11:19:01
| 30,368,847
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,556
|
r
|
selection.R
|
#'Selecting a subset of \code{q} variables
#'
#'@description Main function for selecting the best subset of \eqn{q} variables.
#' Note that the selection procedure can be used with lm, glm or gam functions.
#'@param x A data frame containing all the covariates.
#'@param y A vector with the response values.
#'@param q An integer specifying the size of the subset of variables to be
#' selected.
#'@param prevar A vector containing the number of the best subset of
#' \code{q-1} variables. \code{NULL}, by default.
#'@param criterion The information criterion to be used.
#' Default is the deviance. Other functions provided
#' are the coefficient of determination (\code{"R2"}), the residual
#' variance (\code{"variance"}), the Akaike information criterion (\code{"aic"}),
#' AIC with a correction for finite sample sizes (\code{"aicc"})
#' and the Bayesian information criterion (\code{"bic"}). The deviance,
#' coefficient of determination and variance are calculated by cross-validation.
#'@param method A character string specifying which regression method is used,
#' i.e., linear models (\code{"lm"}), generalized additive models
#' (\code{"glm"}) or generalized additive models (\code{"gam"}).
#'@param family A description of the error distribution and link function to be
#' used in the model: (\code{"gaussian"}), (\code{"binomial"}) or
#' (\code{"poisson"}).
#'@param seconds A logical value. By default, \code{FALSE}. If \code{TRUE}
#' then, rather than returning the single best model only, the function returns
#' a few of the best models (equivalent).
#'@param nmodels Number of secondary models to be returned.
#'@param nfolds Number of folds for the cross-validation procedure, for
#'\code{deviance}, \code{R2} or \code{variance} criterion.
#'@param cluster A logical value. If \code{TRUE} (default), the
#' procedure is parallelized. Note that there are cases without enough
#' repetitions (e.g., a low number of initial variables) that R will gain in
#' performance through serial computation. R takes time to distribute tasks
#' across the processors also it will need time for binding them all together
#' later on. Therefore, if the time for distributing and gathering pieces
#' together is greater than the time need for single-thread computing, it does
#' not worth parallelize.
#'@param ncores An integer value specifying the number of cores to be used
#' in the parallelized procedure. If \code{NULL} (default), the number of cores to be used
#' is equal to the number of cores of the machine - 1.
#'@return
#'\item{Best model}{The best model. If \code{seconds=TRUE}, it returns
#' also the best alternative models.}
#' \item{Variable name}{Names of the variable.}
#' \item{Variable number}{Number of the variables.}
#' \item{Information criterion}{Information criterion used and its value.}
#' \item{Prediction}{The prediction of the best model.}
#'@author Marta Sestelo, Nora M. Villanueva and Javier Roca-Pardinas.
#'@examples
#' library(FWDselect)
#' data(diabetes)
#' x = diabetes[ ,2:11]
#' y = diabetes[ ,1]
#' obj1 = selection(x, y, q = 1, method = "lm", criterion = "variance", cluster = FALSE)
#' obj1
#'
#' # second models
#' obj11 = selection(x, y, q = 1, method = "lm", criterion = "variance",
#' seconds = TRUE, nmodels = 2, cluster = FALSE)
#' obj11
#'
#' # prevar argument
#' obj2 = selection(x, y, q = 2, method = "lm", criterion = "variance", cluster = FALSE)
#' obj2
#' obj3 = selection(x, y, q = 3, prevar = obj2$Variable_numbers,
#' method = "lm", criterion = "variance", cluster = FALSE)
#'
#'
#'@importFrom mgcv gam
#'@importFrom mgcv predict.gam
#'@importFrom parallel detectCores
#'@importFrom parallel makeCluster
#'@importFrom parallel parLapply
#'@importFrom parallel stopCluster
#'@importFrom stats as.formula
#'@importFrom stats deviance
#'@importFrom stats lm
#'@importFrom stats glm
#'@importFrom stats predict
#'@importFrom stats update
#'@importFrom stats var
#'@importFrom stats AIC
#'@importFrom stats BIC
#'@importFrom stats logLik
#'@export
selection <- function(x, y, q, prevar = NULL, criterion = "deviance",
method = "lm", family = "gaussian", seconds = FALSE,
nmodels = 1, nfolds = 5, cluster = TRUE, ncores = NULL) {
if (missing(x)) {
stop("Argument \"x\" is missing, with no default")
}
if (missing(y)) {
stop("Argument \"y\" is missing, with no default")
}
if (missing(q)) {
stop("Argument \"q\" is missing, with no default")
}
nvar <- ncol(x)
inside <- integer(q)
n <- length(y)
if(q == nvar) {
stop('The size of subset \'q\' is the same that the number of covariates')
}
if(!criterion %in% c("deviance", "R2", "variance", "aic", "aicc", "bic")) {
stop('The selected criterion is not implemented')
}
if (cluster == TRUE & detectCores() == 2 & is.null(ncores)) {
stop("The number of cores used in the parallelized procedure is just one.
It is recommended to use cluster = FALSE ")
}
# for paralellize
if (cluster == TRUE){
if (is.null(ncores)){
ncores <- detectCores() - 1
}else{
ncores <- ncores
}
if(.Platform$OS.type == "unix"){par_type = "FORK"}else{par_type = "PSOCK"}
cl <- makeCluster(ncores, type = par_type)
on.exit(stopCluster(cl))
}
#dat = data.frame(y,x)
if (method == "lm") {
model <- lm(y ~ NULL)
}
if (method == "glm") {
model <- glm(y ~ NULL, family = family)
}
if (method == "gam") {
model <- gam(y ~ NULL, family = family)
}
# To use the variable of the previous q and
# it have not to look for again (class(prevar) = vector)
if (is.null(prevar)) { }else{
xyes = c()
for (l in 1:(q-1)){
if (method == "gam" & is.factor(x[, prevar[l]]) == FALSE) {
xnam = paste("s(x[,", prevar[l], "])", sep = "")
} else {
xnam = paste("x[,", prevar[l], "]", sep = "")
}
xyes[l] = xnam
}
form1 <- update(as.formula(model, env = environment(fun = NULL)), paste(". ~ ",
paste(xyes, collapse = "+")))
if (method == "gam"){
model <- gam(form1, family = family)
}else{
model <- glm(form1, family = family)
}
# model <- update(model, as.formula(paste(". ~ ", paste(xyes, collapse = "+"))))
}
fwdstep <- function(j){
form0 <- as.formula(model, env = environment(fun = NULL))
if (method == "gam" & is.factor(x[,j]) == FALSE) {
form1 <- update(form0, . ~ . + s(x[,j]))
}else{
form1 <- update(form0, . ~ . + x[,j])
}
if (method == "gam"){
models <- gam(form1, family = family)
}else{
models <- glm(form1, family = family)
}
return(deviance(models))
}
fwdstep2 <- function(j, bucle){
if (method == "gam" & is.factor(x[ ,j]) == FALSE) {
xnam[bucle] <- paste("s(x[ ,", j, "])",sep="")
} else {
xnam[bucle] <- paste("x[ ,", j, "]",sep="")
}
form0 <- as.formula(model, env = environment())
form1 <- update(form0, paste(". ~ ", paste(xnam, collapse = "+")))
if (method == "gam"){
model1 <- gam(form1, family = family)
}else{
model1 <- glm(form1, family = family)
}
return(deviance(model1))
}
out <- 1:nvar
if(is.null(prevar)){
xyes = NULL
bucle <- c(1:q)
}else{
bucle <- q
inside <- prevar
out <- out[-prevar]
}
for (k in bucle) {
ic <- NULL
if (cluster == TRUE){
ic <- parLapply(cl = cl, out, fwdstep)
}else{
ic <- sapply(out, fwdstep)
}
ii = which.min(ic)
inside[k] = out[ii]
out = out[-ii]
if (method == "gam" & is.factor(x[, inside[[k]]]) == FALSE) {
xnam = paste("s(x[,", inside[[k]], "])", sep = "")
} else {
xnam = paste("x[,", inside[[k]], "]", sep = "")
}
xyes[k] = xnam
form1 <- update(as.formula(model, env = environment(fun = NULL)), paste(". ~ ",
paste(xyes, collapse = "+")))
if (method == "gam"){
model <- gam(form1, family = family)
}else{
model <- glm(form1, family = family)
}
bestic = deviance(model)
}
## Here it have introduced the first q variables
stop <- integer(q)
end <- 1
if (q == 1 | q == nvar) {
end <- 0
}
cont <- 0
while (end != 0) {
stop <- 0
for (f in 1:q) {
#para coger en un vector los nombres
for (num in 1:length(inside)) {
if (method == "gam" & is.factor(x[, inside[num]]) == FALSE) {
xnam[num] = paste("s(x[,", inside[num], "])", sep = "")
} else {
xnam[num] = paste("x[,", inside[num], "]", sep = "")
}
}
ic <- NULL
if (cluster == TRUE){
ic <- parLapply(cl = cl, out, fwdstep2, bucle = f)
}else{
ic <- sapply(out, fwdstep2, bucle = f)
}
ii = which.min(ic)
if (ic[ii] >= bestic) {
stop[f] <- 0
} else {
ii = which.min(ic)
oldinside = inside
inside[f] = out[ii]
out[ii] = oldinside[f]
if (method == "gam" & is.factor(x[ ,inside[f]]) == FALSE) {
xin = paste("s(x[,", inside[f], "])", sep = "")
} else {
xin = paste("x[,", inside[f], "]", sep = "")
}
xnam[f] = xin
#model <- update(model, as.formula(paste(". ~ ", paste(xnam, collapse = "+"))))
form1 <- update(as.formula(model, env = environment()), paste(". ~ ", paste(xnam, collapse = "+")))
if (method == "gam"){
model <- gam(form1, family = family)
}else{
model <- glm(form1, family = family)
}
bestic = deviance(model)
stop[f] = 1
}
}
cont = cont + 1
end = sum(stop)
}
pred <- predict(model, type = "response")
# functions for cv
cv <- function(nfolds){
#function for calculate ic for each fold
eachfold <- function(fold){
test <- aux$which==fold
Wtrainning = rep(1, n)
Wtrainning[test] = 0
formula <- eval(model$call$formula)
dat <- data.frame(Wtrainning = Wtrainning)
if (method == "lm") {
Mtrainning = lm(formula, weights = Wtrainning, data = dat)
}
if (method == "glm") {
Mtrainning = glm(formula, family = family, weights = Wtrainning, data = dat)
}
if (method == "gam") {
Mtrainning = gam(formula, family = family, weights = Wtrainning, data = dat)
}
muhat = predict(Mtrainning, type = "response")
muhat_test = muhat[test]
y_test = y[test]
if (family == "binomial") {y = as.numeric(as.character(y))}
if (criterion == "deviance") {
if (family == "gaussian"){
dev_cv = sum((y_test - muhat_test)^2, na.rm = TRUE)
}
if (family == "binomial") {
ii = muhat_test < 1e-04
muhat_test[ii] = 1e-04
ii = muhat_test > 0.9999
muhat_test[ii] = 0.9999
entrop = rep(0, length(test))
ii = (1 - y_test) * y_test > 0
if (sum(ii, na.rm = TRUE) > 0) {
entrop[ii] = 2 * (y_test[ii] * log(y_test[ii])) +
((1 - y_test[ii]) * log(1 - y_test[ii]))
} else {
entrop = 0
}
entadd = 2 * y_test * log(muhat_test) +
(1 - y_test) * log(1 - muhat_test)
dev_cv = sum(entrop - entadd, na.rm = TRUE)
}
if (family == "poisson") {
tempf = muhat_test
ii = tempf < 1e-04
tempf[ii] = 1e-04
dev_cv = 2 * (-y_test * log(tempf) - (y_test - muhat_test))
ii = y_test > 0
dev_cv[ii] = dev_cv[ii] + (2 * y_test[ii] * log(y_test[ii]))
dev_cv = sum(dev_cv, na.rm = TRUE)
}
} else if (criterion == "R2") {
var_res = sum((y[test] - muhat[test])^2, na.rm = TRUE)/length(test)
r2cv = 1 - (var_res/(var(y[test]) * (length(test) - 1)/length(test)))
}else{
var_res = sum((y[test] - muhat[test])^2, na.rm = TRUE)/length(test)
}
if (criterion == "deviance") {
return(dev_cv)
} else if (criterion == "R2") {
return(r2cv)
}else{
return(var_res)
}
}
aux <- cvTools::cvFolds(n, K = nfolds, type = "consecutive")
if (cluster == TRUE){
cv_ics <- parLapply(cl = cl, 1:nfolds, eachfold)
}else{
cv_ics <- sapply(1:nfolds, eachfold)
}
return(mean(unlist(cv_ics)))
}
aicc <- function(model){
n <- length(model$y)
k <- attr(logLik(model), "df")
res <- AIC(model) + 2 * k * (k+1)/(n-k-1)
}
if(criterion %in% c("deviance", "R2", "variance")){
icfin <- cv(nfolds)
}else{
if (criterion == "aic"){
icfin <- AIC(model)
}else if(criterion == "aicc"){
icfin <- aicc(model)
}else{
icfin <- BIC(model)
}
}
if(class(x) == "data.frame"){
names1 = names(x[inside])
}else{
allnames <- colnames(x)
names1 = allnames[inside]
}
if(is.null(names1)){names1=inside} #por si no tiene nombres
res <- list(Best_model = model, Variable_names = names1,
Variable_numbers = inside, Information_Criterion = icfin,
ic = criterion, seconds = seconds, nmodels = nmodels,
Prediction = pred)
# Second models
if (seconds == TRUE) {
bestic1 = bestic
besticn = 0
cont = -1
fin = 1
for (h in 1:nmodels) {
cont = -1
fin = 1
while (fin != 0) {
fin = 0
for (zz in 1:q) {
#para coger en un vector los nombres
for (num in 1:length(inside)) {
if (method == "gam" & is.factor(x[, inside[num]]) == FALSE) {
xnam[num] = paste("s(x[,", inside[num], "])", sep = "")
} else {
xnam[num] = paste("x[,", inside[num], "]", sep = "")
}
}
ic2 <- NULL
if (cluster == TRUE){
ic2 <- parLapply(cl = cl, out, fwdstep2, bucle = zz)
}else{
ic2 <- sapply(out, fwdstep2, bucle = zz)
}
ic2 <- unlist(ic2)
if ((zz == 1) & (cont == -1)) {
bestic = 1e+11}
# oldinside = inside
# inside[zz] = out[1]
# out[1] = oldinside[1]
# }
for (j in 1:length(out)) {
# if ((zz == 1) & (cont == -1) &
# (j == 1)) {
# j = 2
# }
if (h == 1) {
if ((ic2[j] < bestic) &
(round(ic2[j],3) > round(bestic1,3))) {
bestic = ic2[j]
oldinside = inside
inside[zz] = out[j]
out[j] = oldinside[zz]
fin = 1
}
} else {
if ((ic2[j] < bestic) &
(ic2[j] > besticn)) {
bestic = ic2[j]
oldinside = inside
inside[zz] = out[j]
out[j] = oldinside[zz]
fin = 1
}
}
}
}
cont = cont + 1
}
for (num in 1:length(inside)) {
if (method == "gam" & is.factor(x[, inside[num]]) == FALSE) {
xnam[num] = paste("s(x[,", inside[num], "])", sep = "")
} else {
xnam[num] = paste("x[,", inside[num], "]", sep = "")
}
}
# model <- update(model, as.formula(paste(". ~ ",paste(xnam, collapse = "+"))))
form1 <- update(as.formula(model, env = environment()), paste(". ~ ", paste(xnam, collapse = "+")))
if (method == "gam"){
model <- gam(form1, family = family)
}else{
model <- glm(form1, family = family)
}
besticn = deviance(model)
if(criterion %in% c("deviance", "R2", "variance")){
icfin <- cv(nfolds)
}else{
if (criterion == "aic"){
icfin <- AIC(model)
}else if(criterion == "aicc"){
icfin <- aicc(model)
}else{
icfin <- BIC(model)
}
}
if(class(x) == "data.frame"){
names2 = names(x[inside])
}else{
allnames <- colnames(x)
names2 = allnames[inside]}
if(is.null(names2)){names2=inside} #por si no tiene nombres
res2 <- list(Alternative_model = model, Variable_names = names2,
Variable_numbers = inside, Information_Criterion = icfin,
ic = criterion)
res = c(res, res2)
}
}
class(res) <- "selection"
return(res)
}
|
8ea6baf2748298a243f0b3b1b70bc87132951126
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/umx/examples/umx_check_names.Rd.R
|
99b78e3425892286556d38e4455fc8d43aad491b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 704
|
r
|
umx_check_names.Rd.R
|
library(umx)
### Name: umx_check_names
### Title: umx_check_names
### Aliases: umx_check_names
### ** Examples
require(umx)
data(demoOneFactor) # "x1" "x2" "x3" "x4" "x5"
umx_check_names(c("x1", "x2"), demoOneFactor)
umx_check_names(c("x1", "x2"), as.matrix(demoOneFactor))
umx_check_names(c("x1", "x2"), cov(demoOneFactor[, c("x1","x2")]))
umx_check_names(c("z1", "x2"), data = demoOneFactor, die = FALSE)
umx_check_names(c("x1", "x2"), data = demoOneFactor, die = FALSE, no_others = TRUE)
umx_check_names(c("x1","x2","x3","x4","x5"), data = demoOneFactor, die = FALSE, no_others = TRUE)
## Not run:
##D umx_check_names(c("bad_var_name", "x2"), data = demoOneFactor, die = TRUE)
## End(Not run)
|
6cdba5030fd4602f7e1fb80abb8eda190190f53d
|
d2f39a2258dbe6253bc28fd00717a67b131751f4
|
/man/EmptyTheme.Rd
|
705b02d03d629885c6de07668f4a892224ed112d
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
andrewzm/MVST
|
6e5d9d5c84ba0d28e38fdb69b12cfa8ba1bcc45f
|
2bf0835e66e04e120f78fe8673afe3dd9d6f42c0
|
refs/heads/master
| 2022-09-29T23:40:39.048820
| 2022-09-15T21:37:50
| 2022-09-15T21:37:50
| 20,478,703
| 10
| 9
| null | 2018-10-18T14:50:36
| 2014-06-04T10:13:03
|
R
|
UTF-8
|
R
| false
| true
| 452
|
rd
|
EmptyTheme.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingfns.R
\name{EmptyTheme}
\alias{EmptyTheme}
\title{Empty-plot theme}
\usage{
EmptyTheme()
}
\value{
Object of class \code{ggplot}
}
\description{
Formats a ggplot object for plotting with no annotations/grids.
}
\examples{
\dontrun{
X <- data.frame(x=runif(100),y = runif(100), z = runif(100))
EmptyTheme() + geom_point(data=X,aes(x,y,colour=z))
}
}
\keyword{ggplot}
|
36cc34ee11e64390f67026405e330c119519b9eb
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/R/habitat.model.data.r
|
8628f320390df543bebeea18db8e17bc59809517
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 12,420
|
r
|
habitat.model.data.r
|
#' @export
habitat.model.data <- function(DS, p) {
options(stringsAsFactors=F)
fn.root = file.path( project.datadirectory('bio.lobster'), "data")
fnProducts = file.path(fn.root,'products')
dir.create( fn.root, recursive = TRUE, showWarnings = FALSE )
dir.create( fnProducts, recursive = TRUE, showWarnings = FALSE )
if(DS %in% c('logs41','logs41.redo')) {
if(DS == 'logs41.habitat') {
a = lobster.db('logs41.habitat')
a$CPUE = a$ADJCATCH / a$NUM_OF_TRAPS
vars.2.keep = c('dyear','plon','plat','timestamp',"CPUE",'z','dZ','ddZ','t','substrate.mean')
a = a[,vars.2.keep]
a = rename.df(a,c('CPUE'),c('B'))
return(a)
}
a = lobster.db('logbook41.habitat.redo')
return(a)
}
if(DS %in% c('nefsc.surveys', 'nefsc.surveys.redo')) {
if(DS == 'nefsc.surveys') {
load(file=file.path(fnProducts,'nefscHabitatData.rdata'))
vars.2.keep = c('dyear','plon','plat','timestamp','z','dZ','ddZ','t','TOTWGT')
ab = ab[,vars.2.keep]
ab = rename.df(ab,c('TOTWGT'),c('B'))
return(ab)
}
p$reweight.strata = F #this subsets
p$years.to.estimate = c(1969:2016)
p$length.based = T
p$size.class = c(50,300)
p$by.sex = F
p$bootstrapped.ci=F
p$strata.efficiencies=F
p$clusters = c( rep( "localhost", 7) )
p$strata.files.return=T
p$season =c('spring')# p$series =c('spring');p$series =c('fall')
p$define.by.polygons = F
p$lobster.subunits=F
p$area = 'all'
p = make.list(list(yrs=p$years.to.estimate),Y=p)
aout= nefsc.analysis(DS='stratified.estimates.redo',p=p,save=F)
aa = do.call(rbind,lapply(aout,function(X) X[[2]])) #return just the strata data
p$season ='fall'
aout= nefsc.analysis(DS='stratified.estimates.redo',p=p,save=F)
bb = do.call(rbind,lapply(aout,function(X) X[[2]])) #return just the strata data
aa = rbind(aa,bb)
aa = lonlat2planar(aa,input_names = c('X','Y'),newnames=c('plon','plat'),proj.type = p$internal.projection)
aa$plon = grid.internal(aa$plon,p$plons)
aa$plat = grid.internal(aa$plat,p$plats)
aa$zO = aa$z
aa$z = aa$depth = NULL
aa = completeFun(aa,c('plon','plat'))
load(file.path(project.datadirectory('bio.bathymetry'),'modelled','bathymetry.baseline.canada.east.rdata'))
baseLine = Z[,c('plon','plat')]
locsmap = match(
lbm::array_map( "xy->1", aa[,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
)
ab = cbind(aa,Z[locsmap,c('z','dZ','ddZ')])
j = which(is.na(ab$zO))
ab$zO[j] = ab$z[j]
ab$z = ab$zO
#time stamping for seasonal temperatures
ab$timestamp = as.POSIXct(ab$BEGIN_GMT_TOWDATE,tz='America/Halifax',origin=lubridate::origin)
ab$timestamp = with_tz(ab$timestamp,"UTC")
ab$dyear = lubridate::decimal_date(ab$timestamp)- lubridate::year(ab$timestamp)
ab$rdy <- round(ab$dyear,1)*10 #temperature column to pull from
ab$ry <- ab$GMT_YEAR -1950 + 1 #index year for temperature
ff = file.path(project.datadirectory('bio.temperature'),'modelled','t','canada.east','temperature.spatial.annual.seasonal.rdata')
load(ff)
print(paste('loading ',ff))
j = which(is.na(ab$BOTTEMP))
locsmap = match(
lbm::array_map( "xy->1", ab[j,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
) #to get the correct locations from the temperature surface
k = ab[j,'ry']
l = ab[j,'rdy']
tp=c()
for(i in 1:length(j)){
tp[i] = O[locsmap[i],k[i],l[i]]
}
ab$BOTTEMP[j] <- tp
ab$t = ab$BOTTEMP
save(ab,file=file.path(fnProducts,'nefscHabitatData.rdata'))
print('Done Aug 28, 2017')
}
if(DS %in% c('dfo.summer','dfo.summer.redo')) {
if(DS =='dfo.summer') {
load(file=file.path(fnProducts,'dfosummerHabitatData.rdata'))
vars.2.keep = c('dyear','plon','plat','timestamp','t','z','dZ','ddZ','totwgt')
ab = ab[,vars.2.keep]
ab = rename.df(ab,c('totwgt'),c('B'))
return(ab)
}
p$series =c('summer')# p$series =c('georges');p$series =c('fall')
p$define.by.polygons = F
p$lobster.subunits=F
p$area = 'all'
p$years.to.estimate = c(1970:2016)
p$length.based = F
p$by.sex = F
p$bootstrapped.ci=F
p$strata.files.return=F
p$vessel.correction.fixed=1.2
p$strat = NULL
p$clusters = c( rep( "localhost", 7) )
p$strata.efficiencies = F
p$strata.files.return=T
p = make.list(list(yrs=p$years.to.estimate),Y=p)
# DFO survey All stations including adjacent
p$define.by.polygons = F
p$lobster.subunits=F
p$reweight.strata = F #this subsets
aout= dfo.rv.analysis(DS='stratified.estimates.redo',p=p,save=F)
aa = do.call(rbind,lapply(aout,function(X) X[[2]])) #return just the strata data
aa = lonlat2planar(aa,input_names = c('X','Y'),proj.type = p$internal.projection)
aa$plon = grid.internal(aa$plon,p$plons)
aa$plat = grid.internal(aa$plat,p$plats)
aa$zO = aa$z
aa$z = NA
aa$depth = NULL
aa = completeFun(aa,c('plon','plat'))
load(file.path(project.datadirectory('bio.bathymetry'),'modelled','bathymetry.baseline.canada.east.rdata'))
baseLine = Z[,c('plon','plat')]
locsmap = match(
lbm::array_map( "xy->1", aa[,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
)
ab = cbind(aa,Z[locsmap,c('z','dZ','ddZ')])
j = which(is.na(ab$zO))
ab$zO[j] = ab$z[j]
ab$z = ab$zO
#time stamping for seasonal temperatures
ab$timestamp = as.POSIXct(ab$sdate,tz='America/Halifax',origin=lubridate::origin)
ab$timestamp = with_tz(ab$timestamp,"UTC")
ab$dyear = lubridate::decimal_date(ab$timestamp)- lubridate::year(ab$timestamp)
ab$rdy <- round(ab$dyear,1)*10 #temperature column to pull from
ab$ry <- year(ab$timestamp) -1950 + 1 #index year for temperature
ff = file.path(project.datadirectory('bio.temperature'),'modelled','t','canada.east','temperature.spatial.annual.seasonal.rdata')
load(ff)
print(paste('loading ',ff))
j = which(is.na(ab$bottom_temperature))
locsmap = match(
lbm::array_map( "xy->1", ab[j,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
)
k = ab[j,'ry']
l = ab[j,'rdy']
tp=c()
for(i in 1:length(j)){
tp[i] = O[locsmap[i],k[i],l[i]]
}
ab$bottom_temperature[j] <- tp
ab$t = ab$bottom_temperature
save(ab,file=file.path(fnProducts,'dfosummerHabitatData.rdata'))
}
if(DS %in% c('dfo.georges','dfo.georges.redo')) {
if(DS == 'dfo.georges') {
load(file=file.path(fnProducts,'dfogeorgesHabitatData.rdata'))
vars.2.keep = c('dyear','plon','plat','timestamp','t','z','dZ','ddZ','totwgt')
ab = ab[,vars.2.keep]
ab = rename.df(ab,c('totwgt'),c('B'))
return(ab)
}
p$series =c('georges')# p$series =c('georges');p$series =c('fall')
p$define.by.polygons = F
p$lobster.subunits=F
p$years.to.estimate = c(1987:2016)
p$length.based = F
p$by.sex = F
p$bootstrapped.ci=T
p$strata.files.return=F
p$vessel.correction.fixed=1.2
p$strat = NULL
p$clusters = c( rep( "localhost", 7) )
p$strata.efficiencies = F
p = make.list(list(yrs=p$years.to.estimate),Y=p)
# DFO survey All stations including adjacent
p$define.by.polygons = F
p$lobster.subunits=F
p$area = 'all'
p$reweight.strata = F #this subsets
p$strata.files.return=T
aout= dfo.rv.analysis(DS='stratified.estimates.redo',p=p,save=F)
aa = do.call(rbind,lapply(aout,function(X) X[[2]])) #return just the strata data
aa = lonlat2planar(aa,input_names = c('X','Y'),proj.type = p$internal.projection)
aa$plon = grid.internal(aa$plon,p$plons)
aa$plat = grid.internal(aa$plat,p$plats)
#aa$z = aa$z*1.8288
aa$zO = aa$z
aa$z = NA
aa$depth = NULL
aa = completeFun(aa,c('plon','plat'))
load(file.path(project.datadirectory('bio.bathymetry'),'modelled','bathymetry.baseline.canada.east.rdata'))
baseLine = Z[,c('plon','plat')]
locsmap = match(
lbm::array_map( "xy->1", aa[,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
)
ab = cbind(aa,Z[locsmap,c('z','dZ','ddZ')])
j = which(is.na(ab$zO))
ab$zO[j] = ab$z[j]
ab$z = ab$zO
#time stamping for seasonal temperatures
ab$timestamp = as.POSIXct(ab$sdate,tz='America/Halifax',origin=lubridate::origin)
ab$timestamp = with_tz(ab$timestamp,"UTC")
ab$dyear = lubridate::decimal_date(ab$timestamp)- lubridate::year(ab$timestamp)
ab$rdy <- round(ab$dyear,1)*10 #temperature column to pull from
ab$ry <- year(ab$timestamp) -1950 + 1 #index year for temperature
ff = file.path(project.datadirectory('bio.temperature'),'modelled','t','canada.east','temperature.spatial.annual.seasonal.rdata')
load(ff)
print(paste('loading ',ff))
j = which(is.na(ab$bottom_temperature))
locsmap = match(
lbm::array_map( "xy->1", ab[j,c("plon","plat")], gridparams=p$gridparams ),
lbm::array_map( "xy->1", baseLine, gridparams=p$gridparams )
)
k = ab[j,'ry']
l = ab[j,'rdy']
tp=c()
for(i in 1:length(j)){
tp[i] = O[locsmap[i],k[i],l[i]]
}
ab$bottom_temperature[j] <- tp
ab$t = ab$bottom_temperature
save(ab,file=file.path(fnProducts,'dfogeorgesHabitatData.rdata'))
}
if(DS %in% c('prediction.surface','prediction.surface.redo')) {
if(DS == 'prediction.surface'){
load(file=file.path(fnProducts,'CanadaEastPredictionSurface.rdata'))
return(H)
}
load(file.path(project.datadirectory('bio.bathymetry'),'modelled','bathymetry.baseline.canada.east.rdata'))
H = Z[,c('z','dZ','ddZ','plon','plat')]
ff = file.path(project.datadirectory('bio.temperature'),'modelled','t','canada.east','temperature.spatial.annual.seasonal.rdata')
load(ff)
for(y in p$yrs) {
T = O[,y-1950+1,]
if(p$annual.T.means) {
T = rowMeans(T,na.rm=TRUE)
} else {
d= p$dyear * 10
T = T[,d]
}
H = data.frame(H,T)
names(H)[ncol(H)] <- paste('x',y,sep='.')
}
save(H,file=file.path(fnProducts,'CanadaEastPredictionSurface.rdata'))
return(H)
}
}
|
69c41df59b34d1951f79a9863ebab494b24f3a96
|
763b20bbaf2aa89bbe5ed27bd25d074d08e204cb
|
/scripts/tema1/03_analisis_HTML.R
|
70756e5de85bd01e6dd2c65222bbcacfd626d5fd
|
[] |
no_license
|
kepasa05/R_
|
37eb262e071a969031121465c5def64a85ddfb06
|
09d37a374e3f98d89aadcdc252724ef7275523ac
|
refs/heads/master
| 2022-12-02T22:02:51.764714
| 2020-08-01T11:44:04
| 2020-08-01T11:44:04
| 283,586,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
r
|
03_analisis_HTML.R
|
# lee datos de tablas de una tabla
tabla <- read.table("https://datanalytics.com/uploads/datos_treemap.txt", header = TRUE)
class(tabla)
View(tabla)
|
57ac96aa94336dfbdee26a34d06499ddb4e9c63b
|
f3ed3d4dab1222fbc9132050b5f95044d1058ea1
|
/13.1.R
|
18ca872af72cc330f0e8ac503e7ee5d79f81c30e
|
[] |
no_license
|
Clairepp/Design-of-Experiments
|
e3e8971aea2093e68aabeb9e9663af364419f8f3
|
a72e75877612ee07b9640a72400d7e3d8b21939d
|
refs/heads/master
| 2021-01-20T22:29:38.910388
| 2016-06-21T19:55:40
| 2016-06-21T19:55:40
| 61,662,898
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
13.1.R
|
library(lme4)
y <- c(14,14.1,14.2,14,14.1,13.9,13.8,13.9,14,14,14.1,14.2,14.1,14,13.9,13.6,13.8,14,13.9,13.7,13.8,13.6,13.9,13.8,14)
looms <- c(rep(1,5),rep(2,5),rep(3,5),rep(4,5),rep(5,5))
looms <- as.factor(looms)
y.lmer1<-lmer(y~1+(1|looms))
summary(y.lmer1)
y.aov <- aov(y ~ Error(looms))
summary(y.aov)
qqnorm(residuals(y.lmer1))
pchisq(0.0854/0.0148,4,20)
qchisq(0.025,4,20)
1/qchisq(0.025,4,20)
(0.0854/(0.0148*qchisq(0.025,4,20))-1)/5
((0.0854*qchisq(0.025,4,20))/0.0148-1)/5
-0.06553329/(1-0.06553329)
9.704613/(1+9.704613)
|
adf78a2d0585497e9105b4afdd48e4a0e620533a
|
fe8d2b3f50d9441fa12a35b0e84a8196e8d48702
|
/man/ps_read_table.Rd
|
e424cfb5fcdf5803c6e35a2386e12b408ab51924
|
[
"MIT"
] |
permissive
|
poissonconsulting/poissqlite
|
6593e26aac0a71c4a1298ec6d2e1ca386f400120
|
8a868df55dcebe2012a121d656ad13298267b324
|
refs/heads/master
| 2021-10-01T22:09:19.865823
| 2021-09-16T16:19:20
| 2021-09-16T16:19:20
| 98,579,666
| 0
| 0
|
NOASSERTION
| 2021-06-23T22:53:54
| 2017-07-27T21:04:22
|
R
|
UTF-8
|
R
| false
| true
| 402
|
rd
|
ps_read_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{ps_read_table}
\alias{ps_read_table}
\title{Read Table}
\usage{
ps_read_table(table_name, conn = getOption("ps.conn"))
}
\arguments{
\item{table_name}{A string of the name of the table.}
\item{conn}{An SQLiteConnection object.}
}
\description{
Returns a table in an SQLite database as a tibble or sf object.
}
|
84dfdf92a6158d6a12fed8385463ccc1c40ce7b7
|
eb1dfc035cd60dbda7a9b0f784c11599df7e32a4
|
/man/get_population_rate.Rd
|
0db2b99b6c8b685728967981c18e21ce1343680b
|
[
"MIT"
] |
permissive
|
grattan/covidReff
|
7ee699fdfcee0744de8e5b2d13f2d0d1cdc45713
|
7269127c4de6d6504d72ae2159ad88352773c7f1
|
refs/heads/master
| 2023-07-18T22:01:31.234608
| 2021-08-14T00:41:04
| 2021-08-14T00:41:04
| 381,024,756
| 4
| 1
|
NOASSERTION
| 2021-08-14T00:41:04
| 2021-06-28T12:33:22
|
R
|
UTF-8
|
R
| false
| true
| 627
|
rd
|
get_population_rate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get-vaccination-rates.R
\name{get_population_rate}
\alias{get_population_rate}
\title{Get population rate based on an age-specific input}
\arguments{
\item{age_rate}{a vector of length 10 with vaccination levels for age groups: 0-10, 11-20, ..., 81-90, and 90+.
Can be unnamed or named for clarity, eg: \code{c("0-10" = 0, "11-20" = 0.1, "21-30" = 0.2, "31-40" = 0.2, "41-50" = 0.2, "51-60" = 0.2, "61-70" = 0.2, "71-80" = 0.2, "81-90" = 0.2, "91-100" = 0.2)}}
}
\description{
determine the Australian population level rate for age-specific rates
}
|
32fa28a9819d488498ba532fed8e48de871a427e
|
b30a6a9d69305509e197bd36d5307578a05ad46f
|
/multipointcalcs.R
|
4b330d4574d5e9674396693deb52b16002cc0bde
|
[] |
no_license
|
amwootte/analysisscripts
|
49b4d6736d1701805a960425f96d01e7397ef852
|
9ab5dd1a7659664daf652c0138510e5a3644ee62
|
refs/heads/master
| 2022-07-20T05:09:10.418987
| 2022-07-06T15:02:10
| 2022-07-06T15:02:10
| 116,304,534
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,469
|
r
|
multipointcalcs.R
|
##################
#
# Multiple Location Point calculation
setwd("/data2/3to5/I35/scripts/")
location = "Guymon"
lon = 258.5185
lat = 36.6828
period = "2041-2070"
vars = c("tasmax","tasmin","tmax95","tmax100","tmin32","tmin28","heatwaves","gsl","frd","mdrn","pr25","pr50","rx1day","rx5day","cwd","cdd","pr")
type = c(rep("absolute",16),"percent")
commandstart = "Rscript /data2/3to5/I35/scripts/point_calcs.R -i /data2/3to5/I35/all_mems/"
commandmiddle = " -s CCSM4_DeltaSD_Daymet,CCSM4_DeltaSD_Livneh,CCSM4_DeltaSD_PRISM,MIROC5_DeltaSD_Daymet,MIROC5_DeltaSD_Livneh,MIROC5_DeltaSD_PRISM,MPI-ESM-LR_DeltaSD_Daymet,MPI-ESM-LR_DeltaSD_Livneh,MPI-ESM-LR_DeltaSD_PRISM,CCSM4_QDM_Daymet,CCSM4_QDM_Livneh,CCSM4_QDM_PRISM,MIROC5_QDM_Daymet,MIROC5_QDM_Livneh,MIROC5_QDM_PRISM,MPI-ESM-LR_QDM_Daymet,MPI-ESM-LR_QDM_Livneh,MPI-ESM-LR_QDM_PRISM -p CCSM4_DeltaSD_Daymet_rcp26,CCSM4_DeltaSD_Livneh_rcp26,CCSM4_DeltaSD_PRISM_rcp26,CCSM4_DeltaSD_Daymet_rcp45,CCSM4_DeltaSD_Livneh_rcp45,CCSM4_DeltaSD_PRISM_rcp45,CCSM4_DeltaSD_Daymet_rcp85,CCSM4_DeltaSD_Livneh_rcp85,CCSM4_DeltaSD_PRISM_rcp85,MIROC5_DeltaSD_Daymet_rcp26,MIROC5_DeltaSD_Livneh_rcp26,MIROC5_DeltaSD_PRISM_rcp26,MIROC5_DeltaSD_Daymet_rcp45,MIROC5_DeltaSD_Livneh_rcp45,MIROC5_DeltaSD_PRISM_rcp45,MIROC5_DeltaSD_Daymet_rcp85,MIROC5_DeltaSD_Livneh_rcp85,MIROC5_DeltaSD_PRISM_rcp85,MPI-ESM-LR_DeltaSD_Daymet_rcp26,MPI-ESM-LR_DeltaSD_Livneh_rcp26,MPI-ESM-LR_DeltaSD_PRISM_rcp26,MPI-ESM-LR_DeltaSD_Daymet_rcp45,MPI-ESM-LR_DeltaSD_Livneh_rcp45,MPI-ESM-LR_DeltaSD_PRISM_rcp45,MPI-ESM-LR_DeltaSD_Daymet_rcp85,MPI-ESM-LR_DeltaSD_Livneh_rcp85,MPI-ESM-LR_DeltaSD_PRISM_rcp85,CCSM4_QDM_Daymet_rcp26,CCSM4_QDM_Livneh_rcp26,CCSM4_QDM_PRISM_rcp26,CCSM4_QDM_Daymet_rcp45,CCSM4_QDM_Livneh_rcp45,CCSM4_QDM_PRISM_rcp45,CCSM4_QDM_Daymet_rcp85,CCSM4_QDM_Livneh_rcp85,CCSM4_QDM_PRISM_rcp85,MIROC5_QDM_Daymet_rcp26,MIROC5_QDM_Livneh_rcp26,MIROC5_QDM_PRISM_rcp26,MIROC5_QDM_Daymet_rcp45,MIROC5_QDM_Livneh_rcp45,MIROC5_QDM_PRISM_rcp45,MIROC5_QDM_Daymet_rcp85,MIROC5_QDM_Livneh_rcp85,MIROC5_QDM_PRISM_rcp85,MPI-ESM-LR_QDM_Daymet_rcp26,MPI-ESM-LR_QDM_Livneh_rcp26,MPI-ESM-LR_QDM_PRISM_rcp26,MPI-ESM-LR_QDM_Daymet_rcp45,MPI-ESM-LR_QDM_Livneh_rcp45,MPI-ESM-LR_QDM_PRISM_rcp45,MPI-ESM-LR_QDM_Daymet_rcp85,MPI-ESM-LR_QDM_Livneh_rcp85,MPI-ESM-LR_QDM_PRISM_rcp85 -n "
for(i in 1:length(vars)){
#"Rscript point_calcs.R -i /data2/3to5/I35/all_mems/tmin28_allmem_absolute_2041-2070_ann.nc -s CCSM4_DeltaSD_Daymet,CCSM4_DeltaSD_Livneh,CCSM4_DeltaSD_PRISM,MIROC5_DeltaSD_Daymet,MIROC5_DeltaSD_Livneh,MIROC5_DeltaSD_PRISM,MPI-ESM-LR_DeltaSD_Daymet,MPI-ESM-LR_DeltaSD_Livneh,MPI-ESM-LR_DeltaSD_PRISM,CCSM4_QDM_Daymet,CCSM4_QDM_Livneh,CCSM4_QDM_PRISM,MIROC5_QDM_Daymet,MIROC5_QDM_Livneh,MIROC5_QDM_PRISM,MPI-ESM-LR_QDM_Daymet,MPI-ESM-LR_QDM_Livneh,MPI-ESM-LR_QDM_PRISM -p CCSM4_DeltaSD_Daymet_rcp26,CCSM4_DeltaSD_Livneh_rcp26,CCSM4_DeltaSD_PRISM_rcp26,CCSM4_DeltaSD_Daymet_rcp45,CCSM4_DeltaSD_Livneh_rcp45,CCSM4_DeltaSD_PRISM_rcp45,CCSM4_DeltaSD_Daymet_rcp85,CCSM4_DeltaSD_Livneh_rcp85,CCSM4_DeltaSD_PRISM_rcp85,MIROC5_DeltaSD_Daymet_rcp26,MIROC5_DeltaSD_Livneh_rcp26,MIROC5_DeltaSD_PRISM_rcp26,MIROC5_DeltaSD_Daymet_rcp45,MIROC5_DeltaSD_Livneh_rcp45,MIROC5_DeltaSD_PRISM_rcp45,MIROC5_DeltaSD_Daymet_rcp85,MIROC5_DeltaSD_Livneh_rcp85,MIROC5_DeltaSD_PRISM_rcp85,MPI-ESM-LR_DeltaSD_Daymet_rcp26,MPI-ESM-LR_DeltaSD_Livneh_rcp26,MPI-ESM-LR_DeltaSD_PRISM_rcp26,MPI-ESM-LR_DeltaSD_Daymet_rcp45,MPI-ESM-LR_DeltaSD_Livneh_rcp45,MPI-ESM-LR_DeltaSD_PRISM_rcp45,MPI-ESM-LR_DeltaSD_Daymet_rcp85,MPI-ESM-LR_DeltaSD_Livneh_rcp85,MPI-ESM-LR_DeltaSD_PRISM_rcp85,CCSM4_QDM_Daymet_rcp26,CCSM4_QDM_Livneh_rcp26,CCSM4_QDM_PRISM_rcp26,CCSM4_QDM_Daymet_rcp45,CCSM4_QDM_Livneh_rcp45,CCSM4_QDM_PRISM_rcp45,CCSM4_QDM_Daymet_rcp85,CCSM4_QDM_Livneh_rcp85,CCSM4_QDM_PRISM_rcp85,MIROC5_QDM_Daymet_rcp26,MIROC5_QDM_Livneh_rcp26,MIROC5_QDM_PRISM_rcp26,MIROC5_QDM_Daymet_rcp45,MIROC5_QDM_Livneh_rcp45,MIROC5_QDM_PRISM_rcp45,MIROC5_QDM_Daymet_rcp85,MIROC5_QDM_Livneh_rcp85,MIROC5_QDM_PRISM_rcp85,MPI-ESM-LR_QDM_Daymet_rcp26,MPI-ESM-LR_QDM_Livneh_rcp26,MPI-ESM-LR_QDM_PRISM_rcp26,MPI-ESM-LR_QDM_Daymet_rcp45,MPI-ESM-LR_QDM_Livneh_rcp45,MPI-ESM-LR_QDM_PRISM_rcp45,MPI-ESM-LR_QDM_Daymet_rcp85,MPI-ESM-LR_QDM_Livneh_rcp85,MPI-ESM-LR_QDM_PRISM_rcp85 -n Anadarko -x 261.7563 -y 35.0726"
command = paste(commandstart,vars[i],"_allmem_",type[i],"_",period,"_ann.nc",commandmiddle,location," -x ",lon," -y ",lat,sep="")
system(command,wait=TRUE)
message("Finished calcs for var ",vars[i])
}
|
89af9ab75f50808662b12bb1f7f6fde27a3815a6
|
a3a3a26d0ef80fba3d5c133d62e649dd55937f2c
|
/R/ToNCDFSG.R
|
eacc396f7475707a1b25dd7ceaf3dcc2dbaa098e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
dblodgett-usgs/NCDFSG
|
74181ab2d24d588072cf0add71f67141c5c48f14
|
ec92886d9da885125fd08a059312ca2c3d86c465
|
refs/heads/master
| 2021-01-19T00:18:01.959465
| 2017-04-07T00:50:41
| 2017-04-07T00:50:41
| 73,005,025
| 1
| 1
| null | 2017-03-25T14:29:45
| 2016-11-06T16:53:21
|
R
|
UTF-8
|
R
| false
| false
| 3,418
|
r
|
ToNCDFSG.R
|
#'@title Convert sp objects to NetCDF
#'
#'
#'@param nc_file A string file path to the nc file to be created.
#'@param instance_names A character vector of names for geometries.
#'If NULL, integers are used. If the geomData has a data frame, this is not used.
#'@param instance_dim_name If the file provided already has an instance dimension,
#'it needs to be provided as a character string otherwise a new instance dim may be created.
#'@param geomData An object of class \code{SpatialPoints}, \code{SpatialLines} or
#'\code{SpatialPolygons} with WGS84 lon in the x coordinate and lat in the y coordinate.
#'Note that three dimensional geometries is not supported.
#'@param lats Vector of WGS84 latitudes
#'@param lons Vector of WGS84 longitudes
#'@param variables If a an existing netcdf files is provided, this list of strings is used
#'to add the geometry container attribute to the named existing variables.
#'
#'@description
#'Creates a file with point, line or polygon instance data ready for the extended NetCDF-CF timeSeries featuretype format.
#'Will also add attributes if a sp dataframe object is passed in.
#'
#'@references
#'https://github.com/bekozi/netCDF-CF-simple-geometry
#'
#'@importFrom ncdf4 nc_open ncvar_add nc_close ncvar_def ncvar_put ncatt_put ncdim_def
#'@importFrom sp SpatialLinesDataFrame polygons SpatialPoints
#'@importFrom netcdf.dsg write_instance_data
#'
#'@export
ToNCDFSG = function(nc_file, geomData = NULL, instance_names = NULL, instance_dim_name = NULL, lats = NULL, lons = NULL, variables = list()){
pointsMode <- FALSE
if(is.null(instance_names) && !is.null(geomData)) {
if(class(geomData)=="SpatialPoints" || class(geomData)=="SpatialPointsDataFrame") {
instance_names <- as.character(unique(attributes(geomData@coords)$dimnames[[1]]))
} else {
instance_names <- as.character(c(1:length(geomData)))
}
}
if(class(geomData) == "SpatialPolygonsDataFrame") {
attData<-geomData@data
geomData<-polygons(geomData)
} else if(class(geomData) == "SpatialLinesDataFrame") {
attData<-geomData@data
} else if(class(geomData) == "SpatialPolygons") {
geomData<-polygons(geomData)
} else if(class(geomData) == "SpatialLines") {
geomData<-SpatialLinesDataFrame(geomData,data=as.data.frame(instance_names,stringsAsFactors = FALSE))
} else if(class(geomData) == "SpatialPoints") {
pointsMode<-TRUE
} else if(class(geomData) == "SpatialPointsDataFrame") {
pointsMode<-TRUE
attData<-geomData@data
} else if(!is.null(lats)) {
pointsMode<-TRUE
geomData <- SpatialPoints(as.data.frame(list(x=lons, y=lats)),proj4string = CRS("+proj=longlat +datum=WGS84"))
if(is.null(instance_names)) {
instance_names<-as.character(c(1:length(lats)))
}
} else {
stop("Did not find supported spatial data.")
}
if(!pointsMode && !is.null(geomData)) {
if(length(instance_names)!=length(geomData)) stop('instance_names must be same length as data')
}
if(is.null(instance_dim_name)) {
instance_dim_name <- pkg.env$instance_dim_name
}
if(exists("attData")) {
itemp <- sapply(attData, is.factor)
attData[itemp] <- lapply(attData[itemp], as.character)
nc_file <- write_instance_data(nc_file, attData, instance_dim_name)
variables <- c(variables, names(attData))
}
nc_file <- addGeomData(nc_file, geomData, instance_dim_name, variables = variables)
return(nc_file)
}
|
d4346a716b29ab1cc2cee688456f0082cdd5c2ed
|
005d7e55b9be2b8fa9caf73ee8d6178b9ba07760
|
/network.R
|
e04ca2c41c39848efee500366c95599531bec5cf
|
[] |
no_license
|
statsjedi/hhco
|
63cf89f72e59b23123452179109c21010f4f4b97
|
fd02350c1c9247da7ead3bc400de8082fe94d294
|
refs/heads/master
| 2022-12-01T01:17:00.754025
| 2020-08-07T17:19:38
| 2020-08-07T17:19:38
| 284,848,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
network.R
|
library(tidyverse)
library(tidygraph)
library(ggraph)
library(readxl)
#make edges and nodes
edges <- read_excel("hhco_markov.xlsx", sheet = "network")
nodes <- tibble(node_key=seq(1, 11, 1), label=seq(0, 10, 1), color=c(rep("white", 10), "black"))
#edges can't have 0 in the list
edges$from <- edges$from + 1
edges$to <- edges$to + 1
#make network object
routes_tidy <- tbl_graph(nodes = nodes, edges = edges, directed = TRUE)
#calculate centrality
routes_tidy <-
routes_tidy %>%
mutate(centrality = centrality_authority())
#This lets us get the fractional probability legend titles to use in the other plot
p.network_color <- ggraph(routes_tidy, layout = "focus", focus=11) +
geom_edge_fan(aes(color = as.factor(probability)),
arrow = arrow(length = unit(4, 'mm')),
end_cap = circle(5, 'mm')) +
geom_node_point(aes(color=cut_interval(centrality, 5)), shape=21, size=5, stroke=3, fill="white") +
geom_node_text(aes(label = label), color="black", size=3) +
labs(edge_color= "Probability", color="Centrality",
title="Network Analysis of \"Hi Ho! Cherry-O\"") +
scale_edge_color_brewer(palette = "Set1", labels=c("1/7", "2/7", "3/7", "4/7"))+
scale_color_viridis_d()+
theme_graph()+
theme(legend.text = element_text(size = 10), legend.title = element_text(size = 10),
plot.title = element_text(size = 12))
p.network_color
#the network plot
p.network <- ggraph(routes_tidy, layout = "focus", focus=11) +
geom_edge_fan(aes(edge_width = probability), color="#737373",
arrow = arrow(length = unit(4, 'mm')),
end_cap = circle(5, 'mm')) +
geom_edge_loop(aes(edge_width = probability),color="#737373",
arrow = arrow(length = unit(4, 'mm')),
end_cap = circle(5, 'mm')) +
geom_node_point(aes(color=cut_interval(centrality, 5)), shape=21, size=5, stroke=3, fill="white") +
geom_node_text(aes(label = label), color="black", size=3) +
labs(edge_width = "Probability", color="Centrality",
title="Network Analysis of \"Hi Ho! Cherry-O\"") +
scale_edge_width(range=c(0.2, 3))+ #set thickness
scale_color_viridis_d()+
theme_graph()+
theme(legend.text = element_text(size = 10), legend.title = element_text(size = 10),
plot.title = element_text(size = 12))
p.network
ggsave("hhco_network_color.png", p.network_color, width=7, height=5, units="in")
ggsave("hhco_network.png", p.network, width=7, height=5, units="in")
|
32041e1d341393aa29fc216f6f91e590b34989ff
|
1a3db847ec62cbc9c88a0742d7c67bd035069ede
|
/plot3.R
|
5a9237d7f3a63adccd7e75aadf224d7beb9b78b2
|
[] |
no_license
|
anpefi/ExData_Plotting1
|
b5756be4e4449e0e01149c5fcc3aa5765ac7115c
|
6e4c447aa098924d9e9b1f01394a7aeb2514db8c
|
refs/heads/master
| 2021-04-29T16:32:13.221317
| 2018-02-15T17:43:49
| 2018-02-15T17:43:49
| 121,651,795
| 0
| 0
| null | 2018-02-15T16:23:01
| 2018-02-15T16:23:01
| null |
UTF-8
|
R
| false
| false
| 837
|
r
|
plot3.R
|
library(tidyverse)
library(lubridate)
variables <- read_lines("household_power_consumption.txt", n_max = 1) %>%
stringr::str_split(";") %>%
unlist
data <- read_delim("household_power_consumption.txt", delim = ";",
skip = 66637, n_max = 2880,
col_names = variables)
data <- data %>%
mutate_at(vars(Date),funs(lubridate::dmy)) %>%
mutate(datetime=paste(Date,Time)) %>%
mutate_at(vars(datetime),funs(lubridate::ymd_hms))
png(filename = "plot3.png",width = 480, height = 480)
plot(x=data$datetime, y=data$Sub_metering_1, type = "l", ylab="Energy sub metering", xlab="")
lines(x=data$datetime, y=data$Sub_metering_2,col="red")
lines(x=data$datetime, y=data$Sub_metering_3,col="blue")
legend(x = "topright",legend = variables[7:9], col = c("black","red","blue"),lwd = 2)
dev.off()
|
b878783df2d05902c294b1ffbb56fad38330490c
|
7b8b0e7f3d0866f031dd66f0cb7affeef4ec308f
|
/R/col_density.R
|
a549e98e8c0a7a321d21a5d42c85f7c9d72a84de
|
[
"MIT"
] |
permissive
|
PizzaShift/r-codatools
|
ac11f97705e15c1d7ceacf54f65b0be28c094435
|
202ffb1c133d161a7bc1beecc557690fb02e40aa
|
refs/heads/master
| 2021-10-24T23:15:56.992115
| 2019-03-29T16:51:15
| 2019-03-29T16:51:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,616
|
r
|
col_density.R
|
#' @title Plots non-parametric density overlays for each column in a matrix
#'
#' @description
#' Calculate and plot the non-parametric density for the data in each
#' column of a matrix. All densities are plotted on the same graphic.
#'
#' @param x
#' a matrix
#'
#' @param xlim
#' vector of length 2 giving min and max for x-axis
#'
#' @param xlab
#' x-axis label
#'
#' @param main
#' main title for graphic
#'
#' @param plot.it
#' logical, should a plot be created
#'
#' @param \dots
#' additional arguments passed to \code{density}
#'
#' @return
#' Invisibly returns a list with x and y components for the plotted density
#' curves.
#'
#' @seealso \code{\link{density}}
#'
#' @export
#'
#' @author Michael Malick
#'
#' @examples
#' mat <- matrix(rnorm(100000), ncol = 100)
#' col_density(mat)
#' col_density(mat, main = "test")
#' col_density(mat, xlim = c(-4, 3))
#' xx <- col_density(mat, xlim = c(-4, 3))
#' class(xx)
col_density <- function(x, xlim = NULL, xlab = "", main = "", plot.it = TRUE, ...) {
if (!is.matrix(x))
stop("x is not a matrix")
n.rows <- dim(x)[1]
n.cols <- dim(x)[2]
x.min <- rep(NA, n.cols)
x.max <- rep(NA, n.cols)
y.min <- rep(NA, n.cols)
y.max <- rep(NA, n.cols)
## Calculate column densities
dens <- vector("list", n.cols)
x.dens <- vector("list", n.cols)
y.dens <- vector("list", n.cols)
for(i in 1:n.cols) {
dens[[i]] <- stats::density(x[ , i], ...)
x.dens[[i]] <- dens[[i]]$x
y.dens[[i]] <- dens[[i]]$y
}
x.dens <- do.call("cbind", x.dens)
y.dens <- do.call("cbind", y.dens)
## Set color palette
ang <- seq(0, 240, length.out = n.cols)
pal <- grDevices::hcl(h = ang, c = 100, l = 60, fixup = TRUE)
if(is.null(xlim))
xlim <- c(min(x.dens), max(x.dens))
## Create plot
if(plot.it) {
graphics::matplot(x.dens, y.dens,
type = "l",
lty = 1,
xlim = xlim,
ylim = c(min(y.dens), max(y.dens)),
col = pal,
main = main,
ylab = "Density",
xlab = xlab,
axes = FALSE)
graphics::axis(2, lwd = 1, lwd.ticks = 1, las = 1, col = "grey50")
graphics::axis(1, lwd = 1, lwd.ticks = 1, col = "grey50")
graphics::box(col = "grey50")
}
invisible(list(x = x.dens, y = y.dens))
}
colDensity <- col_density ## backwards compatibility
|
d8d6bce5b3d24a084f76b2e8f1042f6d5f6bc110
|
5949b4ceb8ef222b1feadac1cc93f1ba4101f8dd
|
/man/download_phoenix.Rd
|
9722e2f9b08c758569c1785d5a1507d2396c472e
|
[
"MIT"
] |
permissive
|
altaf-ali/phoxy
|
f49e5d4234613a5f9285d854a7f16cce865981a4
|
8718c4625fa119a20e495cd036a500a7bfef7237
|
refs/heads/master
| 2020-12-31T00:41:01.776707
| 2017-02-24T02:13:48
| 2017-02-24T02:13:48
| 80,634,679
| 0
| 0
| null | 2017-02-01T15:42:23
| 2017-02-01T15:42:23
| null |
UTF-8
|
R
| false
| true
| 1,047
|
rd
|
download_phoenix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_phoenix.R
\name{download_phoenix}
\alias{download_phoenix}
\title{Download the Phoenix Dataset}
\usage{
download_phoenix(destpath, phoenix_version = "current", start_date = NULL,
end_date = NULL)
}
\arguments{
\item{destpath}{The path to the directory where Phoenix should go.}
\item{phoenix_version}{Download a specific version of Phoenix ("v0.1.0" or the current version by default).}
\item{start_date}{Filter the dataset to only include events from start_date.}
\item{end_date}{Filter the dataset to only include events before end_date.}
}
\description{
Download and unzip all of the data files for the Phoenix dataset from the
Phoenix data website into a given directory.
}
\note{
This function, like Phoenix, is still in development and may contain errors and change quickly.
}
\examples{
download_phoenix("~/OEDA/phoxy_test/", phoenix_version = "current", start_date = "2017-01-01", end_date = "2017-01-31")
}
\author{
Andy Halterman, Altaf Ali
}
|
8972e1f893d43ed14dfe6c0ba21c75cd3ea85e8d
|
67c2a90c7edfac3cfd891cb332c45e71cf4a6ad1
|
/R/vcov.loglike.din.R
|
eb1e939f4ad79a3b97e86155d271cf455a0dcb4d
|
[] |
no_license
|
alexanderrobitzsch/CDM
|
48316397029327f213967dd6370a709dd1bd2e0a
|
7fde48c9fe331b020ad9c7d8b0ec776acbff6a52
|
refs/heads/master
| 2022-09-28T18:09:22.491208
| 2022-08-26T11:36:31
| 2022-08-26T11:36:31
| 95,295,826
| 21
| 11
| null | 2019-06-19T09:40:01
| 2017-06-24T12:19:45
|
R
|
UTF-8
|
R
| false
| false
| 1,415
|
r
|
vcov.loglike.din.R
|
## File Name: vcov.loglike.din.R
## File Version: 0.10
#########################################################################
# compute log-likelihood for din objects
vcov.loglike.din <- function( weights, skillprobs0, slip0, guess0,
latresp, item.patt.split, resp.ind.list,
return.p.xi.aj=FALSE )
{
########################
IP <- N <- length(weights)
L <- length(skillprobs0)
J <- length(guess0)
# calculate probabilities
slipM <- matrix( slip0, nrow=nrow(latresp), ncol=ncol(latresp))
guessM <- matrix( guess0, nrow=nrow(latresp), ncol=ncol(latresp))
pj <- (1 - slipM )*latresp + guessM * ( 1 - latresp )
pjM <- array( NA, dim=c(J,2,L) )
pjM[,1,] <- 1 - pj
pjM[,2,] <- pj
skillprobsM <- matrix( skillprobs0, nrow=IP, ncol=L, byrow=TRUE )
# calculate log-likelihood
h1 <- matrix( 1, nrow=IP, ncol=L )
res.hwt <- cdm_calc_posterior(rprobs=pjM, gwt=h1, resp=item.patt.split,
nitems=J, resp.ind.list=resp.ind.list, normalization=FALSE,
thetasamp.density=NULL, snodes=0 )
p.xi.aj <- res.hwt$hwt
# Log-Likelihood (casewise)
ll2 <- log( rowSums( p.xi.aj * skillprobsM ) )
if (return.p.xi.aj){
res <- list( "ll"=ll2, "p.xi.aj"=p.xi.aj )
} else {
res <- ll2
}
return(res)
}
#########################################################################
|
f2a469cfb9fba8614c2d2beed3a24925de1bc78c
|
1d42d73011f65a104bbfa60a60585e831af31870
|
/cachematrix.R
|
639011ba3a6e5e76b640cbe9a93d53f9e701d05f
|
[] |
no_license
|
alnisb/ProgrammingAssignment2
|
53e2b9b47588f9a580f71e5eabf0bd65e93d08ec
|
0bd247b376767610313da75aceb8f937fd7e4c90
|
refs/heads/master
| 2021-01-19T06:56:06.993555
| 2015-05-22T03:25:34
| 2015-05-22T03:25:34
| 36,049,314
| 0
| 0
| null | 2015-05-22T03:01:41
| 2015-05-22T03:01:40
| null |
UTF-8
|
R
| false
| false
| 2,362
|
r
|
cachematrix.R
|
## Pair of functions to.
## 1. Store a matrix and cache its inverse.
## 2. Call function to compute, but try to get from cache as an optimisation.
## Assumption. NO validation that a supplied matrix is indeed invertible.
## Sample test case
## sampleMatrix <- matrix(c(1,0,5,2,1,6,3,4,0), ncol=3)
## s <- makeCacheMatrix(sampleMatrix)
## cacheSolve(s) # Not cached
## cacheSolve(s) # Cached, you'll see message "getting cached data"
################################################################################
# makeCacheMatrix.R
# Author. Alnis Bajars. 2015-05-20
#
# Caches invertible matrix as a performance optimisation.
# Exploits the ability to cache objects to another environment.
################################################################################
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
# Initialise matrix object into environment.
set <- function(y) {
x <<- y
inv <<- NULL # Clear cached inverse as it will have to be (re)computed
}
# Get input matrix
get <- function() x
# Store matrix inverse computed by caller
setMatrix <- function(inverse) inv <<- inverse
# Get matrix inverse
getMatrix <- function() inv
# Store functions for this object
list(set = set, get = get,
setMatrix = setMatrix,
getMatrix = getMatrix)
}
################################################################################
# cacheSolve.R
# Author. Alnis Bajars. 2015-05-20
#
# Inverts matrix using standard solve function.
# Assumption. No validation that the matrix is invertible, need to check yourself.
# The "secret sauce" is a (not THE) performance optimisation that checks.
# 1. That the matrix is already cached.
# 2. Has not changed since being cached.
################################################################################
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# Try to retrieve cached result from environment
m <- x$getMatrix()
# Have we already computed and cached result?
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# Get the input matrix
thisMatrix <- x$get()
# The purpose of this function
m <- solve(thisMatrix, ...)
# Store result in cache
x$setMatrix(m)
# Show the result
m
}
|
17c708b592f008303dcb39dbbe7ddd0c75da019d
|
f3cd125e975a2373bb7dc62b808b1bbcdb96e274
|
/R/dependency.R
|
d0ef54ea9951e5ffe20be7a898586515bad26373
|
[
"MIT"
] |
permissive
|
JohnCoene/ascatter
|
5260b1c46a5ba373a83d5d7481435d0983aa08a6
|
c814c8193b291e5ac8dfc44b5f17ece18b2a1531
|
refs/heads/master
| 2021-06-27T12:57:22.655312
| 2019-06-17T07:33:27
| 2019-06-17T07:33:27
| 145,391,406
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 754
|
r
|
dependency.R
|
#' Attach dependency
#'
#' Attach scatter plot dependency.
#'
#' @note The package also depends on d3.
#'
#' @examples
#' aframer::a_scene(
#' aframer::a_sky(),
#' list(
#' d3_dependency(),
#' as_dependency()
#' )
#' )
#'
#' # OR
#' aframer::a_scene(
#' aframer::a_sky(),
#' as_full_dependency()
#' )
#'
#' @rdname dependency
#' @export
as_dependency <- function(){
.get_dependency("a-scatterplot.min.js", "ascatter", "0.0.1", "ascatter")
}
#' @rdname dependency
#' @export
d3_dependency <- function(){
.get_dependency("d3.min.js", "d3", "4.4.1", "d3")
}
#' @rdname dependency
#' @export
as_full_dependency <- function(){
dep <- list()
dep <- append(dep, d3_dependency())
dep <- append(dep, as_dependency())
return(dep)
}
|
2e653d1c7f0c3cbd1578fa93361f69e5bc78f72e
|
eb9b5a5b759b10bfbf8421f3a67a025a9ff7c069
|
/pQTL/2_Generate_tables/6_druggability.R
|
2abb36bf21fc8a7b678fb66ad15b5be9506971c8
|
[] |
no_license
|
Jingning-Zhang/PlasmaProtein
|
fc42790f4eaea03e5b0285dbcc5ca7bc929ecc3e
|
1a3fd772782bf2b599f8c81054e4bf899ca41bd1
|
refs/heads/main
| 2023-04-15T22:55:54.912402
| 2022-11-29T06:42:31
| 2022-11-29T06:42:31
| 465,238,572
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
6_druggability.R
|
## results from revision_500Kb/4_AASK
rm(list=ls())
library(readr)
library(bigreadr)
library(stringr)
pos0 <- read_tsv("/dcl01/chatterj/data/jzhang2/PWAS_tutorial/Plasma_Protein_EA_hg38.pos")$ID
pos0 <- c(pos0, read_tsv("/dcl01/chatterj/data/jzhang2/PWAS_tutorial/Plasma_Protein_AA_hg38.pos")$ID)
pos0 <- unique(pos0)
pos <- read_tsv("/dcs01/arking/ARIC_static/ARIC_Data/GWAS/HRC/Aric_HRC_imputation/bedfiles/files_to_share/PWAS_tutorial/Plasma_Protein_EA_hg38.pos")$ID
pos <- c(pos, read_tsv("/dcs01/arking/ARIC_static/ARIC_Data/GWAS/HRC/Aric_HRC_imputation/bedfiles/files_to_share/PWAS_tutorial/Plasma_Protein_AA_hg38.pos")$ID)
pos <- unique(pos)
writeLines(pos, "/dcs01/arking/ARIC_static/ARIC_Data/GWAS/HRC/Aric_HRC_imputation/bedfiles/files_to_share/PWAS_tutorial/all_genes_EA_and_AA.txt")
|
e735e00d95a0da60bc099725cb2cb37e2e2f03bf
|
6b847b77a3d0ca15fcbb681fb1136c84644556c2
|
/stmpy/R/semanticCoherence.R
|
87fc39b24908c360c6591f1ad5c5dce09198c19c
|
[
"MIT"
] |
permissive
|
AntonioCoppola/stmpy
|
09163b2ef3969db1c64cf40743d1eebefad82c68
|
91550648fb4541297d117938a52877a56f392e61
|
refs/heads/master
| 2021-01-24T21:42:35.668501
| 2016-01-17T01:54:40
| 2016-01-17T01:54:40
| 43,222,627
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,943
|
r
|
semanticCoherence.R
|
semanticCoherence <- function(model.out, documents, M){
if(length(model.out$beta$logbeta)!=1) {
result <- 0
for(i in 1:length(model.out$beta$logbeta)){
subset <- which(model.out$settings$covariates$betaindex==i)
triplet <- doc.to.ijv(documents[subset])
mat <- simple_triplet_matrix(triplet$i, triplet$j,triplet$v, ncol=model.out$settings$dim$V)
result = result + semCoh1beta(mat, M, beta=model.out$beta$logbeta[[i]])*length(subset)
}
return(result/length(documents))
}
else {
beta <- model.out$beta$logbeta[[1]]
#Get the Top N Words
top.words <- apply(beta, 1, order, decreasing=TRUE)[1:M,]
wordlist <- unique(as.vector(top.words))
triplet <- doc.to.ijv(documents)
mat <- simple_triplet_matrix(triplet$i, triplet$j,triplet$v, ncol=model.out$settings$dim$V)
result = semCoh1beta(mat, M, beta=beta)
return(result)
}
}
semCoh1beta <- function(mat, M, beta){
#Get the Top N Words
top.words <- apply(beta, 1, order, decreasing=TRUE)[1:M,]
wordlist <- unique(as.vector(top.words))
mat <- mat[,wordlist]
mat$v <- ifelse(mat$v>1, 1,mat$v) #binarize
#do the cross product to get co-occurences
cross <- tcrossprod_simple_triplet_matrix(t(mat))
#create a list object with the renumbered words (so now it corresponds to the rows in the table)
temp <- match(as.vector(top.words),wordlist)
labels <- split(temp, rep(1:nrow(beta), each=M))
#Note this could be done with recursion in an elegant way, but let's just be simpler about it.
sem <- function(ml,cross) {
m <- ml[1]; l <- ml[2]
log(.01 + cross[m,l]) - log(cross[l,l] + .01)
}
result <- vector(length=nrow(beta))
for(k in 1:nrow(beta)) {
grid <- expand.grid(labels[[k]],labels[[k]])
colnames(grid) <- c("m", "l") #corresponds to original paper
grid <- grid[grid$m > grid$l,]
calc <- apply(grid,1,sem,cross)
result[k] <- sum(calc)
}
return(result)
}
|
88516cde922b0cbee30829376bdd61ecbfa48114
|
6313022214452f3f1645b8f1ec4b70a86029c86a
|
/cachematrix.R
|
5e1adc024f862a535efce2ce01df0f49c1b65ec5
|
[] |
no_license
|
benedictjg/ProgAssignment2
|
9e0d6ea488f68da36582c9802ced9b60ebef9a6d
|
2ed0b26c94a2898c46ac5ad11626e34bcd66f54e
|
refs/heads/master
| 2020-04-06T05:01:51.495973
| 2015-04-26T13:05:48
| 2015-04-26T13:05:48
| 34,163,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
cachematrix.R
|
##The following pair of functions can compute the inverse of a matrix
##and cache the solution to be used later instead of re-computing.
##The makeCacheMatrix function creates a list containing a function to
##1. set the value of the matrix
##2. get the value of the matrix
##3. set the value of the inverse of the matrix
##4. get the value of the inverse of the matrix
makeCacheMatrix <- function(mx = matrix()){
mxinv <- NULL
set <- function(x) {
mx <<- x
mxinv <<- NULL
}
get <- function() return(mx)
setinv <- function(inv) mxinv <<- inv
getinv <- function() return(mxinv)
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
##The cacheSolve function calculates the inverse of the "matrix" created
##with the makeCacheMatrix function. However, it first checks to see if
##the inverse has already been calculated. If so, it gets the inverse from
##the cache and skips the computation. Otherwise, it calculates the inverse of
##the matrix and sets the value of the inverse in the cache via the setinv
##function.
cacheSolve <- function(mx, ...) {
mxinv <- mx$getinv()
if(!is.null(mxinv)) {
message("Getting cached data...")
return(mxinv)
}
data <- mx$get()
mxinv <- solve(data, ...)
mx$setinv(mxinv)
return(mxinv)
}
|
ef5e4e859765d6d91736d8d948d4d0c02453238f
|
9972106e39f5cc87ec7c85a3c890d09a253de6bb
|
/man/print.n.for.2p.rd
|
ab30a049f18ec71316e7b664704180ee6346ef63
|
[] |
no_license
|
cran/epiDisplay
|
ed688f06f5f7101d851a28857930dd43748b51ee
|
e90693c88a54fe1ecb7c64638de43c3992375de9
|
refs/heads/master
| 2022-05-28T12:16:44.261031
| 2022-05-18T13:20:02
| 2022-05-18T13:20:02
| 38,376,568
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 609
|
rd
|
print.n.for.2p.rd
|
\name{print n.for.2p}
\alias{print.n.for.2p}
\title{Print n.for.2p results}
\description{Print results for sample size for hypothesis testing of 2 proportions}
\usage{
\method{print}{n.for.2p}(x, ...)
}
\arguments{
\item{x}{object of class 'n.for.2p'}
\item{...}{further arguments passed to or used by methods.}
}
\author{Virasakdi Chongsuvivatwong
\email{ cvirasak@gmail.com}
}
\seealso{'n.for.2p'}
\examples{
n.for.2p(p1=.1, p2=.2)
n.for.2p(p1=seq(1,9,.5)/10, p2=.5)
}
\keyword{database}
|
b02d2407ae36f1ecaf0e0b56359c00b2732f7033
|
ab00bc7e17121d2dcf3741dc9f650a4e76ed4a44
|
/revdep/drake.R
|
55909ba47a6470046f26d1693036e56d263e6aa1
|
[
"MIT"
] |
permissive
|
tidyverse/dplyr
|
9b7fdc07e6a70bc8e802094e2e2a127af22bcc02
|
cf8031d00f406c6dc5d483d7e9e34639df797b81
|
refs/heads/main
| 2023-09-01T03:52:50.608019
| 2023-08-25T13:42:29
| 2023-08-25T13:42:29
| 6,427,813
| 3,290
| 1,982
|
NOASSERTION
| 2023-09-09T20:14:25
| 2012-10-28T13:39:17
|
R
|
UTF-8
|
R
| false
| false
| 6,173
|
r
|
drake.R
|
source("revdep/drake-base.R")
subset_available <- function(available, pkg) {
if (pkg %in% rownames(available)) {
available[pkg, , drop = FALSE]
} else {
available[integer(), , drop = FALSE]
}
}
download <- function(pkg, available, ...) {
dir <- fs::dir_create("revdep/download")
dir <- fs::path_real(dir)
withr::with_options(
list(warn = 2),
download.packages(pkg, dir, available = available)[, 2]
)
}
get_i_lib <- function() {
path <- "revdep/libs/cran"
fs::dir_create(path)
fs::path_real(path)
}
install <- function(pkg, path, ...) {
dep_packages <- list(...)
dep_paths <- map_chr(dep_packages, attr, "path")
stopifnot(all(fs::dir_exists(dep_paths)))
deps <- c(pkg, sort(as.character(unique(unlist(map(dep_packages, attr, "deps"))))))
lib <- get_i_lib()
withr::with_envvar(
c(R_LIBS_USER = lib),
# Suppress warnings about loaded packages
retry(system(paste0("R CMD INSTALL ", path)))
)
stopifnot(dir.exists(file.path(lib, pkg)))
structure(
pkg,
path = file.path(lib, pkg),
version = utils::packageVersion(pkg, lib),
deps = deps
)
}
get_old_lib <- function() {
path <- "revdep/libs/old"
fs::dir_create(path)
fs::path_real(path)
}
get_new_lib <- function() {
path <- "revdep/libs/new"
fs::dir_create(path)
fs::path_real(path)
}
create_lib <- function(pkg, lib) {
fs::dir_create(lib)
target <- fs::path(lib, pkg)
fs::link_delete(target[fs::link_exists(target)])
fs::link_create(fs::path(get_i_lib(), pkg), target)
lib
}
create_new_lib <- function(old_lib, new_lib) {
lib <- c(new_lib, old_lib)
withr::with_libpaths(lib, action = "replace", {
remotes::install_local(".")
})
lib
}
get_pkg_and_deps <- function(i_pkg) {
get_deps(i_pkg)
}
get_deps <- function(i_pkg) {
attr(i_pkg, "deps")
}
check <- function(tarball, lib, ...) {
pkgs <- c(...)
check_lib <- fs::file_temp("checklib")
create_lib(pkgs, check_lib)
withr::with_libpaths(c(lib, check_lib), rcmdcheck::rcmdcheck(tarball, quiet = TRUE, timeout = ignore(600)))
}
compare <- function(old, new) {
rcmdcheck::compare_checks(old, new)
}
get_plan <- function() {
plan_deps <- get_plan_deps()
config_deps <- drake_config(plan_deps)
if (length(outdated(config_deps, make_imports = FALSE)) > 0) {
warning("Making dependencies first, rerun.", call. = FALSE)
return(plan_deps)
}
# Avoid expensive and flaky check for build tools from pkgbuild
# Leads to errors, need to check!
#options(buildtools.check = identity)
deps <- readd(deps)
make_subset_available <- function(pkg) {
expr(subset_available(available, !!pkg))
}
plan_available <-
deps %>%
enframe() %>%
transmute(
target = glue("av_{name}"),
call = map(name, make_subset_available)
) %>%
deframe() %>%
drake_plan(list = .)
make_download <- function(pkg, my_pkgs) {
av_pkg <- sym(glue("av_{pkg}"))
deps <- list()
if (!(pkg %in% my_pkgs)) {
deps <- c(deps, expr(old_lib))
}
expr(download(!!pkg, available = !!av_pkg, !!!deps))
}
plan_download <-
deps %>%
enframe() %>%
transmute(
target = glue("d_{name}"),
call = map(name, make_download, c(get_this_pkg(), deps[[get_this_pkg()]]))
) %>%
deframe() %>%
drake_plan(list = .)
make_install <- function(pkg, dep_list) {
d_pkg <- sym(glue("d_{pkg}"))
expr(install(!!pkg, path = !!d_pkg, !!! dep_list))
}
create_dep_list <- function(deps, base_pkgs) {
valid_deps <- setdiff(deps, base_pkgs)
syms(glue("i_{valid_deps}"))
}
plan_install <-
deps %>%
enframe() %>%
mutate(target = glue("i_{name}")) %>%
mutate(
dep_list = map(value, create_dep_list, readd(base_pkgs)),
call = map2(name, dep_list, make_install)
) %>%
select(target, call) %>%
deframe() %>%
drake_plan(list = .)
plan_base_libs <- drake_plan(
old_lib = create_lib(get_pkg_and_deps(!!sym(glue("i_{get_this_pkg()}"))), get_old_lib()),
new_lib = create_new_lib(old_lib, get_new_lib())
)
make_check <- function(pkg, lib, deps, first_level_deps, base_pkgs) {
lib <- enexpr(lib)
req_pkgs <- first_level_deps[[pkg]]
req_pkgs_deps <- deps[c(pkg, req_pkgs)] %>% unname() %>% unlist() %>% unique()
all_deps <- c(req_pkgs, req_pkgs_deps) %>% unique()
i_deps <- create_dep_list(all_deps, base_pkgs)
d_dep <- sym(glue("d_{pkg}"))
expr(check(!!d_dep, !!lib, !!! i_deps))
}
plan_check <-
readd(revdeps) %>%
enframe() %>%
mutate(
old = map(value, make_check, old_lib, readd(deps), readd(first_level_deps), readd(base_pkgs)),
new = map(value, make_check, new_lib, readd(deps), readd(first_level_deps), readd(base_pkgs))
) %>%
gather(kind, call, old, new) %>%
transmute(
target = glue("c_{value}_{kind}"),
call
) %>%
deframe() %>%
drake_plan(list = .)
make_compare <- function(pkg) {
old_result <- sym(glue("c_{pkg}_old"))
new_result <- sym(glue("c_{pkg}_new"))
expr(compare(!!old_result, !!new_result))
}
plan_compare <-
readd(revdeps) %>%
enframe() %>%
transmute(
target = glue("c_{value}"),
call = map(value, make_compare)
) %>%
deframe() %>%
drake_plan(list = .)
make_compare_all <- function(pkg) {
check_targets <- set_names(syms(glue("c_{pkg}")), pkg)
check_targets <- map(check_targets, function(x) expr(try(!!x)))
expr(list(!!! check_targets))
}
plan_compare_all <-
readd(revdeps) %>%
enframe() %>%
summarize(
target = "compare_all",
call = list(make_compare_all(value))
) %>%
deframe() %>%
drake_plan(list = .)
#future::plan(future.callr::callr)
plan <-
bind_rows(
# Put first to give higher priority
plan_check,
plan_compare,
plan_compare_all,
plan_install,
plan_base_libs,
plan_download,
plan_available,
plan_deps
)
plan
}
plan <- get_plan()
#trace(conditionCall.condition, recover)
make(
plan,
#"compare_all",
keep_going = TRUE,
#parallelism = "future"
, verbose = 3
, jobs = parallel::detectCores()
)
|
b96abcff3bfbcd4aacd868a787ba30836436393b
|
4bcb4b43befff86ce93476e5418b96274347ac50
|
/man/lowerdiag2mat.Rd
|
e92923714806d14c8d8eb7842fc51aa770e6f331
|
[] |
no_license
|
ashiklom/mvtraits
|
7a05e2c19925ea00da964c462cda38a05707a8db
|
7a8cbd4015adad8b187252b244e89eb25fe73c65
|
refs/heads/master
| 2020-09-12T07:32:21.259622
| 2019-09-03T01:15:07
| 2019-09-03T01:15:07
| 94,459,450
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,118
|
rd
|
lowerdiag2mat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lowerdiag2mat.R
\name{lowerdiag2mat}
\alias{lowerdiag2mat}
\title{Convert a vector of the lower diagonoal of a symmetrical matrix to a matrix}
\usage{
lowerdiag2mat(vec, col_names = TRUE, corr = FALSE, colorder = NULL,
hier = FALSE)
}
\arguments{
\item{vec}{Vector of lower diagonal values}
\item{col_names}{(Logical) If \code{TRUE} (default), extract row and column names from
vector names formatted as \code{"row..column"}.}
\item{corr}{(Logical) If \code{TRUE}, assume this is a correlation matrix where
the diagonal is fixed at 1 and therefore not stored.}
\item{colorder}{Optional numeric or character vector specifying the desired
column order.}
\item{hier}{(Logical) Whether the vector names also include a group name.
Only used if \code{col_names} is \code{TRUE}.}
}
\description{
Storing just the lower diagonal is an efficient way to store MCMC samples of
a matrix (and, in fact, is how matrices are stored by the samplers in
\code{\link[=fit_mvnorm]{fit_mvnorm()}} and \code{\link[=fit_mvnorm_hier]{fit_mvnorm_hier()}}).
}
|
59e204220c2685ade025b654b17250ec91a753ba
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/2176_0/rinput.R
|
cd9c39f0df97496401f79ed5bc6d9135d91579e0
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("2176_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2176_0_unrooted.txt")
|
c90f54e4bf84de16d548ea2fb4924c26e48a9275
|
3fe1517654896fb0e0e821380c907660195b2e0f
|
/man/makeCDM.Rd
|
89b0db178c6d7e87f20eb43683b12e9aa882e9df
|
[] |
no_license
|
eliotmiller/metricTester
|
9462035d8342e49d766ec37463cd27c2090c85c1
|
976d9b43773f1a06bc0254d3f355d2ee9f4be659
|
refs/heads/master
| 2020-04-06T06:30:42.528226
| 2019-12-12T20:38:53
| 2019-12-12T20:38:53
| 11,936,920
| 8
| 5
| null | 2017-03-21T15:00:28
| 2013-08-06T23:25:26
|
R
|
UTF-8
|
R
| false
| true
| 1,716
|
rd
|
makeCDM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeCDM.R
\name{makeCDM}
\alias{makeCDM}
\title{Wrapper for creating a CDM from a spatial simulation result}
\usage{
makeCDM(single.simulation, no.plots, plot.length)
}
\arguments{
\item{single.simulation}{The results of a single spatial simulation, e.g. a call to
randomArena}
\item{no.plots}{The desired number of plots in the final CDM}
\item{plot.length}{The length of one side of each plot}
}
\value{
A list with the regional abundance from the single simulation result, if it
included such a result, or the results of a call to abundanceVector() if not. The list
also includes the CDM based on the parameters (number and size of plots) provided.
}
\description{
Given the results of a single spatial simulation, and a desired number of plots
and the length of one side of each plot, will place the plots down and output
a CDM. Importantly, also carries along the regional abundance vector from the
spatial simulation results if one was included.
}
\details{
Just a simple wrapper function to quickly turn spatial simulations into CDMs
for subsequent analysis.
}
\examples{
tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50)
#prep the data for the simulation
prepped <- prepSimulations(tree, arena.length=300, mean.log.individuals=2,
length.parameter=5000, sd.parameter=50, max.distance=20, proportion.killed=0.2,
competition.iterations=3)
competition <- competitionArena(prepped)
test <- makeCDM(competition, 15, 30)
}
\references{
Miller, E. T., D. R. Farine, and C. H. Trisos. 2016. Phylogenetic community
structure metrics and null models: a review with new methods and software.
Ecography DOI: 10.1111/ecog.02070
}
|
6c26f7c93b15e89de4d71aab6ea05c358ec6b741
|
da45f8493930e97a28d1784ec5e5280275ff4a4d
|
/man/showall.Rd
|
b60c5fb2785d329e9367c09f50bef5d77ea2fd3a
|
[] |
no_license
|
cran/npde
|
ae704a7f3c7725756cb943f86bbd2037bf8ebebc
|
e612d833986a752ea2f2580f07f838478c3864a5
|
refs/heads/master
| 2023-06-24T21:23:39.188909
| 2023-06-15T08:10:02
| 2023-06-15T08:10:02
| 17,697,971
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 617
|
rd
|
showall.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NpdeData-methods.R, R/NpdeRes-methods.R,
% R/NpdeObject-methods.R
\name{showall}
\alias{showall}
\alias{showall.NpdeData}
\alias{showall,NpdeData-method}
\alias{showall.default}
\alias{showall,method}
\alias{showall.NpdeRes}
\alias{showall.NpdeObject}
\title{Contents of an object}
\usage{
showall(object)
\method{showall}{NpdeRes}(object)
\method{showall}{NpdeObject}(object)
}
\arguments{
\item{object}{a NpdeData object}
}
\value{
No return value, shows the object
}
\description{
Prints the contents of an object
}
\keyword{print}
|
863f93d69cb83b2f4902da354af51fc99f586859
|
2529bf4201dceabb19a0197e77e6e4a67064502c
|
/Ass3/test.r
|
a526d31988464bc4996f3f9b68ca09a374260503
|
[] |
no_license
|
adamzenith/Adv-data-analysis-projects
|
a5c468eb39e2e69da6ff45977693151c6f5652ea
|
1b897cb4f3dc58ae198fe3f03bab9c56236b96e1
|
refs/heads/main
| 2023-04-21T12:38:05.832092
| 2021-05-12T11:11:06
| 2021-05-12T11:11:06
| 343,458,516
| 0
| 0
| null | 2021-03-08T15:29:19
| 2021-03-01T15:10:44
|
R
|
UTF-8
|
R
| false
| false
| 960
|
r
|
test.r
|
### 1.1
library(lme4)
clothing <- read.table(file = 'clothingFullAss03.csv', sep = ",",header=TRUE)
no.persons <- max(clothing$subjId)
## Test result
fit0 <- lmer(clo~sex+(1|subjId),data=clothing,REML=FALSE)
summary(fit0)
#function to optimize
nll.0 <- function(theta,dat,X) {
params <- X %*% t(t(theta[1:2]))
sigma <- exp(theta[3])
sigma.u <- exp(theta[4])
L = 0
# loop over subjects
for(i in 0:no.persons){
y_i <- dat$clo[dat$subjId==i]
params_i <- params[dat$subjId==i]
n_i <- length(params_i)
ones <- matrix(1,n_i,n_i)
V_i <- diag(n_i)*sigma+ones*sigma.u
likelihood = log(1/((2*pi)^(n_i/2)*sqrt(det(V_i)))*exp(-0.5*t(y_i-params_i)%*%solve(V_i)%*%(y_i-params_i)))
L = L - likelihood
}
# output Likelihood
L
}
X <- model.matrix(fit0)
theta0 <- c(0.5, -0.1, 0.1, 0.1)
fit.nll.0 <- nlminb(theta0, nll.0, dat = clothing, X = X)
#COMPARING
print(c(fit.nll.0$objective,logLik(fit0)))
|
8665840b1a1370e1662d440ddeaa4d149b1582a8
|
9da2e50861397cadc365e39883a0dd4d409c72a6
|
/codeANM/code/experiments/ANM/experimentAltitude.R
|
87bbe70d6d762cc0de0f9deab3c8b2c23cab5418
|
[
"BSD-2-Clause"
] |
permissive
|
bquast/ANM
|
643c9c6168d7ed0767c6594683c9b07a8f7a6213
|
eaf5cec24af8143d4bbd96a3e469ed7bb479ed98
|
refs/heads/master
| 2021-01-19T08:49:04.670569
| 2014-12-22T09:48:24
| 2014-12-22T09:48:24
| 28,332,929
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,884
|
r
|
experimentAltitude.R
|
# Copyright (c) 2013 - 2013 Jonas Peters [peters@stat.math.ethz.ch]
# All rights reserved. See the file COPYING for license terms.
#source("../../startups/startupGES.R", chdir = TRUE)
source("../../util_DAGs/randomB.R")
source("../../util_DAGs/randomDAG.R")
source("../../util_DAGs/dag2cpdagAdj.R")
source("../../startups/startupSHD.R", chdir = TRUE)
source("../../startups/startupSID.R", chdir = TRUE)
source("../../startups/startupLINGAM.R", chdir = TRUE)
source("../../startups/startupICML.R", chdir = TRUE)
source("../../startups/startupBF.R", chdir = TRUE)
source("../../startups/startupGDS.R", chdir = TRUE)
source("../../startups/startupPC.R", chdir = TRUE)
source("../../startups/startupScoreSEMIND.R", chdir = TRUE)
stop("The data are not available in this code package.")
load("./Altitude.RData")
resLINGAM <- lingamWrap(cbind(Altitude,Sun,Temp))
cat("LINGAM:\n")
show(resLINGAM$Adj)
resPC <- pcWrap(cbind(Altitude,Sun,Temp),0.01,Inf)
cat("PC:\n")
show(resPC)
resPC <- pcWrap(cbind(Altitude,Sun,Temp),0.01,Inf)
cat("CPC:\n")
show(resPC)
#resGES <- gesWrap(cbind(Altitude,Sun,Temp))
#cat("GES:\n")
#show(resGES$Adj)
# linear
pars <- list(regr.method = train_linear, regr.pars = list(), indtest.method = indtestHsic, indtest.pars = list())
resBF <- BruteForce(cbind(Altitude,Sun,Temp), "SEMIND", pars, output = TRUE)
cat("BF linear:\n")
show(resBF$Adj)
# linear
resICML <- ICML(cbind(Altitude,Sun,Temp),0.05,model=train_linear,indtest=indtestHsic,output= TRUE)
cat("ICML linear:\n")
show(resICML)
# gam
pars <- list(regr.method = train_gam, regr.pars = list(), indtest.method = indtestHsic, indtest.pars = list())
resBFg <- BruteForce(cbind(Altitude,Sun,Temp), "SEMIND", pars, output = TRUE)
cat("BF gam:\n")
show(resBFg$Adj)
# gam
resICML <- ICML(cbind(Altitude,Sun,Temp),0.05,model=train_gam,indtest=indtestHsic,output= TRUE)
cat("ICML gam:\n")
show(resICML)
|
b35e67995054880ad87f1c5ad9d45caf62923d14
|
b5c4c7b13d97161d67ab8aafa4a124696331aa70
|
/loans v6.R
|
7e189b10d59d3e3f0d8c47e75c6271ead9b0c0e9
|
[] |
no_license
|
ISWARPRADHAN/Gramener-Case-Study
|
8abf5fc44adab11a785d16692421b3fb06c6c7ba
|
76f23f01afecaccb5a2c77bbd683715e20cd3e5e
|
refs/heads/master
| 2022-01-11T20:00:13.573840
| 2018-08-13T17:52:29
| 2018-08-13T17:52:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,216
|
r
|
loans v6.R
|
#PLEASE NOTE THIS CODE WILL TAKE 3-4 MINS TO COMPLETE THE EXECUTION
#MANY GRAPHS ARE GENERATED!!
#THANK YOU FOR YOUR PATIENCE
#--clearing workingspace and loading required libraries ------- --------
#clearing workingspace and loading required libraries
rm(list=ls())
library(xlsx)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stringr)
#install.packages("reshape2")
library(reshape2)
library(xlsx)
#install.packages("corrplot")
library(corrplot)
#install.packages("GGally")
library(GGally)
#seting working directory
setwd("D:\\Loans Case Study\\")
getwd()
start_time <- Sys.time()
#--loading dataframe with loans data--------------
tmp_loans.data <- read.csv("loan.csv", header = TRUE, stringsAsFactors = FALSE)
#Data Understanding and Prepration
str(tmp_loans.data)
summary(tmp_loans.data)
#View(tmp_loans.data)
#Data(frame) contains 39717 obs. of 111 variables. With many NA's.
#checking NA's counts per variable / colomns.
(tmp_loan.data_NA_stats <- sapply(colnames(tmp_loans.data), function(x) length(which(is.na(tmp_loans.data[,x])))))
str(tmp_loan.data_NA_stats)
length_id <- length(tmp_loans.data$id)
#--1. Data preparation ----
#--1.1 checkinig for duplicates ------------
which(duplicated(tmp_loans.data))
which(duplicated(tmp_loans.data$id))
which(duplicated(tmp_loans.data$member_id))
which(duplicated(tmp_loans.data))
#no duplicates found
#--1.2 Removing cols that have all NA's -----
#Many cols have all NA's!! Implying these variable have no meaningfull information for any analysis.
#Removing all the colomns that have all NA's (39717 NA's)
#is.na(loans.data[])
all_na_cols <- lapply(colnames(tmp_loans.data), function(x) length(which(is.na(tmp_loans.data[,x])))!= length_id)
class(all_na_cols)
loans.data <- tmp_loans.data[,which(all_na_cols[]==1)]
summary(loans.data)
str(loans.data)
# about 54 colomns had all observations as NA's; these are removed.
#--1.3 Data modification (format changes, derived fields etc) ---------------
#-- Date variable conversions
loans.data$issue_d_conv <- as.Date(paste('01-',loans.data$issue_d,sep=''),'%d-%b-%y')
str(loans.data$issue_d_conv)
summary(loans.data$issue_d_conv)
unique(loans.data$issue_d_conv)
loans.data$issue_d_conv_year <- as.integer(format(loans.data$issue_d_conv, "%Y"))
str(loans.data$issue_d_conv_year)
unique(loans.data$issue_d_conv_year)
loans.data$issue_d_conv_month_num <- as.integer(format(loans.data$issue_d_conv, "%m"))
str(loans.data$issue_d_conv_month)
unique(loans.data$issue_d_conv_month)
#interest rate conversion, Revolving line utilization rate
loans.data$int_rate_conv <- as.numeric(gsub("%", "", loans.data$int_rate))
summary(loans.data$int_rate_conv)
loans.data$revol_util_conv <- as.numeric(gsub("%", "", loans.data$revol_util))
summary(loans.data$revol_util_conv)
#calculating the actual charged off amount; subtract net principle paid from principle
loans.data$derieved_chargedoff_amnt <- loans.data$funded_amnt - loans.data$total_rec_prncp
str(loans.data$derieved_chargedoff_amnt)
#calculating the charge amount as percentage of funded_amt
loans.data$derieved_chargedoff_per = round((loans.data$derieved_chargedoff_amnt * 100 / loans.data$funded_amnt), digit =2)
#term conversion into number 36 / 60
loans.data$term_conv <- as.numeric(trimws((gsub(" months", "", loans.data$term))))
#employment duration to numeric
loans.data$emp_length_conv <- gsub("years", "", loans.data$emp_length)
loans.data$emp_length_conv <- gsub("year", "", loans.data$emp_length_conv)
loans.data$emp_length_conv <- gsub("\\+", "", loans.data$emp_length_conv)
loans.data$emp_length_conv <- gsub("<", "", loans.data$emp_length_conv)
loans.data$emp_length_conv <- as.numeric(loans.data$emp_length_conv)
str(loans.data$emp_length_conv)
summary(loans.data$emp_length_conv)
unique(loans.data$emp_length_conv)
#--2. Quick statistics on numeric variables / continious variables-----
#geting the class of variables
(loans.data.col_class <- unlist(sapply(loans.data,class)))
(loans.data.var_continious_summary <- sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) summary(x)))
#geting the quantile for numeric and integer variables.
quantile_for <- c(0, 0.25, 0.50, 0.75, 1.00)
loans.data.var_continious_summary <- sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) quantile(x, quantile_for, na.rm = TRUE))
#View(loans.data.var_continious_summary)
#sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) length(which(is.na(x))))
#geting count of mean for numeric and integer variables.
variable_means <- as.numeric(sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) mean(x, rm.na = TRUE)))
loans.data.var_continious_summary <- rbind(loans.data.var_continious_summary, variable_means)
#str(loans.data.var_continious_summary)
#geting count of NA's, not NA's and total for numeric and integer variables.
no_of_NAs <- unlist(sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) length(which(is.na(x)))))
loans.data.var_continious_summary <- rbind(loans.data.var_continious_summary, no_of_NAs)
#View(loans.data.var_continious_summary)
not_NAs <- unlist(sapply(loans.data[,loans.data.col_class=="numeric" | loans.data.col_class=="integer"], function(x) length(which(!is.na(x)))))
loans.data.var_continious_summary <- rbind(loans.data.var_continious_summary, not_NAs)
total <- no_of_NAs + not_NAs
loans.data.var_continious_summary <- rbind(loans.data.var_continious_summary, total)
#geting count of NA's in percentage for numeric and integer variables.
no_of_NAs_percentage <- (no_of_NAs / length_id) * 100
loans.data.var_continious_summary <- rbind(loans.data.var_continious_summary, no_of_NAs_percentage)
#str(loans.data.var_continious_summary)
#View(loans.data.var_continious_summary)
#transposeing for better readability
loans.data.var_continious_summary <- t(loans.data.var_continious_summary)
loans.data.var_continious_summary[,1:6] <- round(loans.data.var_continious_summary[,1:6], digits = 2)
loans.data.var_continious_summary[,7:9] <- round(loans.data.var_continious_summary[,7:9], digits = 0)
loans.data.var_continious_summary[,10] <- round(loans.data.var_continious_summary[,10], digits = 2)
View(loans.data.var_continious_summary)
#--3. Quick statistics on Categorical------------
#selecting categorical variables that has unique values < 55. Sub grade seem to have hi number of unique values.
#one can convert all the charector variables into factor to find out the no of unique.. but i want to summarize in a
#df the summary of categorical variables.
str(loans.data)
loans.data.col_class <- unlist(sapply(loans.data,class))
loans.data.colnames_categorical <- sapply(loans.data[,loans.data.col_class=="character"],
function(x) length(unique(x)) < 55)
loans.data.var_categorical <- loans.data[,names(loans.data.colnames_categorical)]
str(loans.data.var_categorical)
length(unique(loans.data.var_categorical$issue_d))
View(loans.data.var_categorical)
#function to append the summary of categorical variables one after the other. the summary contains the name, count
#count % and the category
variable_sumary <- function(x) {
result <- loans.data %>%
group_by(loans.data.var_categorical[,x]) %>%
summarise(count=n()) %>%
arrange(desc(count))
sum_count <- sum(result$count)
result$count_percentage <- round((result$count *100 / sum_count),2)
names(result)[1] <- "Category"
result$categorical_Variable_names <- names(loans.data.var_categorical[x])
return(result)
}
loans.data.var_categorical_summary <- 1
for (i in 1:ncol(loans.data.var_categorical)) {
if (length(unique(loans.data.var_categorical[,i])) <= 55){
print (i)
loans.data.var_categorical_summary <- rbind(loans.data.var_categorical_summary, variable_sumary(i))
next
}
}
#View(loans.data.var_categorical_summary)
# removing the first observation that was to initialize the df.
loans.data.var_categorical_summary <- loans.data.var_categorical_summary %>%
filter(categorical_Variable_names != "1")
#quick check to see if we are misssing any observation. all count shoud be equal to 39717
loans.data.var_categorical_summary %>%
group_by(categorical_Variable_names) %>%
summarise(sum(count))
View(loans.data.var_categorical_summary)
#--4. quick plots on categorical variable -------------
#--4.1 univariate categorical plots--------
plot.graphs <- function(cat_x) {
# result <- loans.data %>%
print(cat_x)
print("processing graphs. thank you for your patience... Please wait...")
graph_tmp <- loans.data.var_categorical_summary %>%
filter(categorical_Variable_names == cat_x)
result <- ggplot(data = graph_tmp, aes(x = Category, y = count, color = Category, fill = Category)) +
geom_bar(alpha = 0.7, stat = "identity") +
labs(x = cat_x) +
geom_text(aes(label = count), position = position_dodge(1), color = "black")
# labs(colour = x)
return(result)
}
category_collection <- unique(loans.data.var_categorical_summary$categorical_Variable_names)
for(j in 1:length(category_collection)){
plot(plot.graphs(category_collection[j]))
}
##
# Key inferences
# - term - 36 months has 29096 and 10621 loans observation including closed, charged of and current.
# - grade 'B' and 'A' had the max observation
# - sub_grade B3, A4, A3, B5, B4 has max loan observations than the rest
# - emp_length '10+' has the significant loan observation 8879 than the rest.
# - home_ownership 'RENT' and 'MORTGAGE' combined has nearly all the observations.
# - addr_state 'CA' has the max observation 7099, next is NY - 3812
# - based on loan_status fully paid, charged off, current has 39250, 5627 and 1140 observations respectivly
# - purpose 'debt_consolidation' had the max observation next is 'credit_card'
#--4.2 bivariate categorical plots --------------
plot.graphs_continious <- function(cat_x, cat_y, cat_color) {
# print("start")
print(cat_x)
# print(cat_y)
# print(cat_color)
print("processing graphs. thank you for your patience... Please wait...")
#1. stack categorical variable vs count
plot(ggplot(data = loans.data) +
geom_bar(aes_string(x = cat_x, fill = cat_color), alpha = 0.8,
stat = "count", position = "stack"))
#2. fill categorical variable colored by another categorical values
plot(ggplot(data = loans.data) +
geom_bar(aes_string(x = cat_x, y = cat_y, fill = cat_color), alpha = 0.8,
stat = "identity", position = "fill"))
#3. fill categorical variable vs agregated value, colored by another categorical values
plot(ggplot(data = loans.data) +
geom_bar(aes_string(x = cat_x, y = cat_y, fill = cat_color), alpha = 0.8,
stat = "identity", position = "stack"))
result <- "successfuly ending"
# print(result)
return(result)
}
#category_collection <- unique(loans.data.var_categorical_summary$categorical_Variable_names)
# based on the earlier plots run plots
category_collection_ploting <- c("term", "grade", "sub_grade", "emp_length", "home_ownership", "verification_status",
"issue_d", "loan_status", "purpose", "addr_state")
#ploting categorical variables vs count colored by loan_status
for(j in 1:length(category_collection_ploting)){
plot.graphs_continious(category_collection_ploting[j], "funded_amnt", "loan_status")
}
#ploting categorical variables vs count colored by purpose
for(j in 1:length(category_collection_ploting)){
plot.graphs_continious(category_collection_ploting[j], "funded_amnt", "purpose")
}
#ploting categorical variables vs count colored by verification_status
for(j in 1:length(category_collection_ploting)){
plot.graphs_continious(category_collection_ploting[j], "funded_amnt", "verification_status")
}
#--5.histogram graph on continious variable ------
#--5.1 on funded amount (actual amout that is funded) where is the risk to the financial institution-----
ggplot(data = loans.data) +
geom_freqpoly(aes(x=funded_amnt, color = loan_status)) +
# geom_histogram(aes(x=funded_amnt, fill = loan_status), binwidth=100,alpha = 0.7)
geom_histogram(aes(x=funded_amnt, fill = loan_status), binwidth=1000,alpha = 0.7)
#loans are geting rounded of to the neares 500 or 5000. spike at 5000, 10000, 15000 etc!!
ggplot(data = loans.data) +
geom_freqpoly(aes(x=funded_amnt, color = verification_status)) +
# geom_histogram(aes(x=funded_amnt, fill = loan_status), binwidth=100,alpha = 0.7)
geom_histogram(aes(x=funded_amnt, fill = verification_status), binwidth=1000,alpha = 0.7)
#theres a huge count of not verified once, faceting it by loan_status
ggplot(data = loans.data) +
geom_freqpoly(aes(x=funded_amnt, color = verification_status)) +
# geom_histogram(aes(x=funded_amnt, fill = loan_status), binwidth=100,alpha = 0.7)
geom_histogram(aes(x=funded_amnt, fill = verification_status), binwidth=1000,alpha = 0.7)+
facet_grid(.~ loan_status)
#calculating the actual charged off amount
#subtract net principle paid from principle
#
loans.data$derieved_chargedoff_amnt <- loans.data$funded_amnt - loans.data$total_rec_prncp
loans.data1 <- loans.data %>%
filter(loans.data$loan_status != "Fully Paid")
ggplot(data = loans.data1) +
geom_freqpoly(aes(x=derieved_chargedoff_amnt, color = verification_status)) +
# geom_histogram(aes(x=derieved_chargedoff_amnt, fill = loan_status), binwidth=100,alpha = 0.7)
geom_histogram(aes(x=derieved_chargedoff_amnt, fill = verification_status), binwidth=1000,alpha = 0.7)+
facet_grid(.~ loan_status)
#--5.2 funded amount frequency vs grade, funded amout frequency vs sub grades --------
funded_amnt_by_grade_vs_status <- loans.data %>%
select(funded_amnt, grade, loan_status) %>%
group_by(grade, loan_status) %>%
summarise(total_funded_amnt = sum(funded_amnt))
#View(funded_amnt_by_grade_vs_status)
ggplot(data = funded_amnt_by_grade_vs_status) +
geom_bar(aes(x = grade, y = total_funded_amnt, fill = loan_status), alpha = 0.8,
stat = "identity", position = "dodge")
#this is not the exat ammount charged off; this just shows the funded amount of fully paid,
#current and charged offin terms of actual amout at risk is in B, C, D, E
#ploting charged off amount againt subgrades
chargedoff_summary_data <- loans.data %>%
select(funded_amnt, derieved_chargedoff_amnt, sub_grade, loan_status) %>%
group_by(sub_grade, loan_status) %>%
summarise(total_funded_amnt = sum(funded_amnt),
total_derieved_chargedoff_amnt = as.integer(sum(derieved_chargedoff_amnt)),
derieved_chargedoff_per = (total_funded_amnt - total_derieved_chargedoff_amnt)* 100 / total_funded_amnt,
count=n())%>%
arrange(desc(total_derieved_chargedoff_amnt))
View(chargedoff_summary_data)
sum(chargedoff_summary_data$total_derieved_chargedoff_amnt)
sum(chargedoff_summary_data$total_funded_amnt)
str(chargedoff_summary_data$total_funded_amnt)
str(chargedoff_summary_data$total_derieved_chargedoff_amnt)
ggplot(data = chargedoff_summary_data) +
geom_bar(aes(x = sub_grade, y = total_derieved_chargedoff_amnt, fill = loan_status), alpha = 0.8,
stat = "identity", position = "dodge")
for(j in 1:length(category_collection_ploting)){
plot.graphs_continious(category_collection_ploting[j], "derieved_chargedoff_amnt", "verification_status")
}
#length(is.na(loans.data$derieved_chargedoff_amnt))
#--5.3----box plots for DTI, interest across categorical variables ----------
ggplot(data = loans.data) +
geom_boxplot(aes(x = grade, y = int_rate_conv, fill = grade), alpha=0.7)
ggplot(data = loans.data) +
geom_boxplot(aes(x = sub_grade, y = int_rate_conv, fill = sub_grade), alpha=0.7)
ggplot(data = loans.data) +
geom_boxplot(aes(x = grade, y = revol_util_conv, fill = grade), alpha=0.7)
ggplot(data = loans.data) +
geom_boxplot(aes(x = sub_grade, y = revol_util_conv, fill = sub_grade), alpha=0.7)
ggplot(data = loans.data) +
geom_boxplot(aes(x = grade, y = dti, fill = grade), alpha=0.7)
ggplot(data = loans.data) +
geom_boxplot(aes(x = sub_grade, y = dti, fill = sub_grade), alpha=0.7)
#--5.4 Scater plot---------
ggplot(data=loans.data)+
geom_point(aes(x = derieved_chargedoff_amnt, y = dti))
ggplot(data=loans.data)+
geom_point(aes(x = derieved_chargedoff_amnt, y = int_rate_conv))
ggplot(data=loans.data)+
geom_point(aes(x = dti, y = int_rate_conv, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = dti, y = int_rate_conv), alpha = 0.5 , position="jitter")
ggplot(data=loans.data)+
geom_point(aes(x = funded_amnt, y = dti, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = dti), alpha = 0.5 , position="jitter")
# facet_grid(grade ~ verification_status)
ggplot(data=loans.data)+
geom_point(aes(x = funded_amnt, y = int_rate_conv, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = int_rate_conv), alpha = 0.5 , position="jitter")
# facet_grid(grade ~ verification_status)
ggplot(data=loans.data)+
geom_point(aes(x = funded_amnt, y = annual_inc, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = annual_inc), alpha = 0.5 , position="jitter")
tmp_graph_data <- loans.data %>%
filter(derieved_chargedoff_per > 5 & loan_status == "Charged Off" & (annual_inc < 100000 | is.na(annual_inc))) %>%
# filter(loan_status == "Charged Off") %>%
select(funded_amnt,
derieved_chargedoff_amnt,
annual_inc,
dti,
int_rate_conv,
revol_util_conv,
grade,
verification_status,
derieved_chargedoff_per)
ggplot(data=tmp_graph_data)+
geom_point(aes(x = funded_amnt, y = annual_inc, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = annual_inc), alpha = 0.5 , position="jitter")
ggplot(data=tmp_graph_data)+
geom_point(aes(x = funded_amnt, y = annual_inc, size=derieved_chargedoff_amnt, color = derieved_chargedoff_amnt,fill = derieved_chargedoff_amnt), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = annual_inc), alpha = 0.5 , position="jitter") +
facet_grid(. ~ grade)
ggplot(data=tmp_graph_data)+
geom_point(aes(x = funded_amnt, y = annual_inc, size=derieved_chargedoff_per, color = derieved_chargedoff_per, fill = derieved_chargedoff_per), alpha=0.6)+
geom_jitter(aes(x = funded_amnt, y = annual_inc), alpha = 0.5 , position="jitter")
#dti vs revol_util %
ggplot(data=tmp_graph_data)+
geom_point(aes(x = dti, y = revol_util_conv, size=derieved_chargedoff_per, color = derieved_chargedoff_per, fill = derieved_chargedoff_per), alpha=0.6)+
geom_jitter(aes(x = dti, y = revol_util_conv), alpha = 0.5 , position="jitter")
#inference higher the dti and revol_util %, higher the chagrge off and charge off amount
ggplot(data=tmp_graph_data)+
geom_point(aes(x = dti, y = revol_util_conv, size=derieved_chargedoff_per, color = verification_status, fill = verification_status), alpha=0.6)+
geom_jitter(aes(x = dti, y = revol_util_conv), alpha = 0.5 , position="jitter")
#inference higher the dti and revol_util %, higher the chagrge off and charge off amount
ggplot(data=tmp_graph_data)+
geom_point(aes(x = dti, y = int_rate_conv, size=derieved_chargedoff_per, color = derieved_chargedoff_per, fill = derieved_chargedoff_per), alpha=0.6)+
geom_jitter(aes(x = dti, y = int_rate_conv), alpha = 0.5 , position="jitter")
ggplot(data=tmp_graph_data)+
geom_point(aes(x = dti, y = int_rate_conv, size=derieved_chargedoff_per, color = verification_status, fill = verification_status
), alpha=0.6)+
geom_jitter(aes(x = dti, y = int_rate_conv), alpha = 0.5 , position="jitter")
#by profession
tmp_graph_data <- loans.data %>%
select(funded_amnt,
derieved_chargedoff_amnt,
annual_inc,
dti,
int_rate_conv,
grade,
derieved_chargedoff_per,
emp_title) %>%
mutate(emp_title_conv = tolower(emp_title))%>%
group_by(emp_title_conv) %>%
summarise(sum(funded_amnt), sum(derieved_chargedoff_amnt),count=n()) %>%
arrange(desc(count))
#View(tmp_graph_data)
#--6. correlation matrix------------
correlation_data <- loans.data %>%
# filter(loan_status == "Charged Off") %>%
filter(derieved_chargedoff_per > 5 & loan_status == "Charged Off" & (annual_inc < 100000 | is.na(annual_inc))) %>%
select(funded_amnt,
int_rate_conv,
revol_util_conv,
emp_length_conv,
# grade,
# home_ownership,
# purpose,
annual_inc,
dti,
derieved_chargedoff_amnt,
term_conv,
derieved_chargedoff_per)
#View(correlation_data)
cor(correlation_data, method = "pearson", use = "complete.obs")
ggpairs(correlation_data)
#--7 writing dataframes to excel csv file for analysis in Tableau ----------
#--writing to csv file as xlsx write take hell lot of time, compromising writing to multile tabs in xlsx file. hence multiple csv's
write.csv(loans.data, "LOANS-R-OUTPUT.csv")
write.csv(loans.data.var_continious_summary, "loans.data.var_continious_summary.csv")
write.csv(loans.data.var_categorical_summary, "loans.data.var_categorical_summary.csv")
print(start_time)
Sys.time()
print("Program ended, thank you!")
|
3e3362c4bfa362dc48711ffeac2e7cbe4f6b4b6b
|
988da62bee4a32aa9aaa4720926429a05539cba3
|
/code/Check_central_Australia.R
|
91acaf7239e524d43f69b33e3c4f6cb862be4015
|
[] |
no_license
|
mingkaijiang/Australia-precipitation-predictability
|
5acddd779e5e043d9d435703ddf5ddcd0564d42b
|
d42e2dc996c15d449d67ee9625b14d0aed0f93c3
|
refs/heads/master
| 2021-08-08T17:45:04.783533
| 2021-06-15T22:48:03
| 2021-06-15T22:48:03
| 129,040,038
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
r
|
Check_central_Australia.R
|
Check_central_Australia <- function() {
#### Read in 0.05 resolution gridded predictability data
myDF <- read.csv("output/Australia_rainfall_annual_0.05_resolution.csv")
### prepare the finer resolution data
f <- read.ascii.grid("data/1980/rain_19800101.grid")
### Create grid info
x.list <- seq(f$header$xllcorner, f$header$xllcorner + (0.05 * (f$header$ncols-1)), by=0.05)
y.list <- seq(f$header$yllcorner, f$header$yllcorner + (0.05 * (f$header$nrows-1)), by=0.05)
nrows = f$header$nrows
ncols = f$header$ncols
myDF$y <- rep(y.list, each=ncols)
myDF$x <- rep(x.list, by=nrows)
### Extract grids with prec = 0
test <- subset(myDF, annual_prec <= 50)
### plot
require(fields)
pdf("output/prec_less_than_50mm.pdf")
quilt.plot(test$x, abs(test$y), test$annual_prec,
nx=820, ny=660, nlevel=10)
dev.off()
}
|
936bd69d7566905db7929497044c9d8a901fc831
|
70e4cdaf31689583d08cb8149eeeed8f7e97a54e
|
/Top 10 Web Pages Visited In A Website.R
|
19518d75609fd9419ad6d835d471fc9fb18ff072
|
[] |
no_license
|
cdevairakkam7/R-Projects
|
b16e832f9b0db605f966a62fc9eb1fbd2a45163f
|
a8269771de29e67d2ff1c41aa8039c97b701b474
|
refs/heads/master
| 2022-10-12T07:20:15.809701
| 2020-06-05T23:18:36
| 2020-06-05T23:18:36
| 269,787,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
Top 10 Web Pages Visited In A Website.R
|
# Loading A1-1_pages.csv
pages_dataset<-read.csv("https://www.dropbox.com/s/m8yjzsnwxs4ohbe/A1-1_pages.csv?dl=0",header =TRUE)
# Top 10 Visited pages
top_10_visited_pages<-sqldf("SELECT count(path) as Count,Path from pages_dataset group by 2 order by 1 desc limit 10")
# Re-Ordering
top_10_visited_pages$path<-factor(top_10_visited_pages$path,levels = c("/","/category/food","/shop_all","/category/home-and-office","/category/beauty","/category/personal-care","/category/household-supplies","/category/health","/about","/checkout/email"))
# Top 10 pages visited in a ggplot
ggplot(top_10_visited_pages,aes(x=path,y=Count,label=Count))+labs(title="Top 10 pages visited",x="Path",y="# of times visited")+theme(axis.text.x = element_text(angle =45,vjust = 0.5))+geom_bar(stat="identity", width = 0.5, aes(fill=path))+geom_text(hjust=0.09,angle=45)+theme(text=element_text(size=10, family="Comic Sans MS"))
|
5227c15f8d9ad218aff62c4e831cbbc3d8aa4cf0
|
8de55d619d716b78e4f532e72811fb229a285841
|
/man/calculateTotal.Rd
|
e983042e4691cba66ca91235cec818ba6219f086
|
[] |
no_license
|
tbendsen/VIAreports
|
e9b99373586a2fe8bdd05585f76ee44fd73eccaf
|
2997d35368e03c803affaf7d12c69f10cf22279e
|
refs/heads/master
| 2020-09-10T17:10:39.841646
| 2019-11-14T19:39:05
| 2019-11-14T19:39:05
| 221,772,877
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 592
|
rd
|
calculateTotal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termReport.R
\name{calculateTotal}
\alias{calculateTotal}
\title{calculateTotal}
\usage{
calculateTotal(roomuse, singleWeek = NA)
}
\arguments{
\item{roomuse}{RoomUse object}
\item{singleWeek}{if given roomutilization is calculated only for this week. However
number of rooms and number of lessons is calculated from entire dataset. Number of days is only
calculated for this week.}
}
\value{
fraction of periods used
}
\description{
Calculates total roomutilizition
}
\examples{
blabla
}
|
3d27a756240e43361cc14484ea7a14dda975a751
|
5c4b2b3e488d306151948f4ffdb648b54f0b4cee
|
/Table 4 code/HSregression_043020.R
|
927166abd4b6afb5ffa9f818a9fb687fc3796505
|
[] |
no_license
|
Anupreet-Porwal/Prelim-project
|
fb3e3b13fbe3f22f210b3b1465af91fa253c4c28
|
6c27e2f7cdfb38a1dcedb80757e824b102d40c16
|
refs/heads/master
| 2022-07-31T06:49:07.323089
| 2020-05-18T05:52:10
| 2020-05-18T05:52:10
| 264,840,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,310
|
r
|
HSregression_043020.R
|
library(truncdist)
library(mvtnorm)
# This paper implements the Horeshoe sampler discussed by
# Makalic, Enes, and Daniel F. Schmidt. "A simple sampler for the horseshoe estimator."
# IEEE Signal Processing Letters 23.1 (2015): 179-182.
# It uses the relation between Cauchy and Inverse gamma to make conditional distributions
# of all the parameters involved conjugate
# Another approach could be to do slice sampling which is implemented in another file
# HSnormalmean_041720_vAlt
HS.regression <- function(X,y,burn=1000,nmc=5000,tau=1){
n <- length(y)
p <- ncol(X)
BetaSave = matrix(0, nmc, p)
LambdaSave = matrix(0, nmc, p)
TauSave = rep(0, nmc)
Sigma2Save = rep(0, nmc)
#Initialize
Beta = y
Tau = tau
Sigma2 = 0.95*stats::var(y)
Lambda = rep(1,ncol(X))
nu=1/rgamma(p, shape=1/2,rate=1)
chi=1/rgamma(1,shape=1/2,rate=1)
for(t in 1:(nmc+burn)){
if (t%%1000 == 0){print(t)}
# Update Beta
Lambda.star.inv=diag(p)*1/(Lambda^2*Tau^2)
A.inv=solve(t(X)%*%X+Lambda.star.inv)
Beta=t(rmvnorm(1, mean = A.inv%*%t(X)%*%y, sigma=A.inv*Sigma2))
#Update Sigma2
Sigma2.inv=rgamma(1,(n+p)/2,rate = 0.5*(sum((y-X%*%Beta)^2)+sum((Beta/Lambda)^2)/Tau^2))
Sigma2=1/Sigma2.inv
# Update Lambda
b1=1/nu+Beta^2/(2*Tau^2*Sigma2)
Lambda=sqrt(1/rgamma(p, shape = 1, rate = b1))
# Update Tau
Theta=Beta/Lambda
b2=1/chi+sum(Theta^2)/(2*Sigma2)
Tau=sqrt(1/rgamma(1,(p+1)/2,b2))
# Update nu
nu=1/rgamma(p,1,1+1/Lambda^2)
# Update chi
chi=1/rgamma(1,1,1+1/Tau^2)
#Save results
if(t > burn){
BetaSave[t-burn, ] = Beta
TauSave[t-burn] = Tau
Sigma2Save[t-burn] = Sigma2
LambdaSave[t-burn, ] = Lambda
}
}
BetaHat = colMeans(BetaSave)
BetaMedian = apply(BetaSave, 2, stats::median)
TauHat = mean(TauSave)
Sigma2Hat = mean(Sigma2Save)
result <- list("BetaHat" = BetaHat, "BetaMedian" = BetaMedian,
"Sigma2Hat" = Sigma2Hat,
"TauHat" = TauHat, "BetaSamples" = BetaSave,
"TauSamples" = TauSave, "Sigma2Samples" = Sigma2Save, "LambdaSamples"=LambdaSave)
return(result)
}
|
6b95e15940baba9f3486fc76b428ac0e95aa4376
|
cd7136449c4ea91fec7071948cce80da45d4244d
|
/man/swap.project.paths.Rd
|
a5553bcfa254771d216716cf5daa468da2b37c66
|
[] |
no_license
|
bokov/adapr
|
20428bb4c871b6ae02f3df3461d837513174d622
|
5af9860921a3e945a8ea4993d69e545c1cc5681e
|
refs/heads/master
| 2021-01-19T10:05:40.674036
| 2017-05-23T14:15:05
| 2017-05-23T14:15:05
| 87,824,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 645
|
rd
|
swap.project.paths.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swap_project_paths.R
\name{swap.project.paths}
\alias{swap.project.paths}
\title{Take list of dependency file data and changes the project path}
\usage{
swap.project.paths(list.deps,
new.path = get.project.path(get("source_info")$project.id))
}
\arguments{
\item{list.deps}{list of dependency file data}
\item{new.path}{file path for the new project path}
}
\value{
Updated list of dependency data
}
\description{
Take list of dependency file data and changes the project path
}
\details{
Not for direct use. Used with swapping branches by rework.project.path()
}
|
f48dd6abf7ef149c7fda49c098fdc2d0ef9df1cf
|
aea24dd12afe7bf9c93eaa0dc17c6744dc487d2a
|
/man/int_to_zip_str.Rd
|
7d64b1e6bcef0423f7c170a3c8dcbedcafd21910
|
[] |
no_license
|
NSAPH/rcehelp
|
9f6091e35072ac84a8a7a00cd15b959019cd6c99
|
06718559bf786ab11ce2fc473a5faf4b3a117c72
|
refs/heads/master
| 2020-03-22T20:35:01.226005
| 2019-04-22T18:26:53
| 2019-04-22T18:26:53
| 140,612,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 351
|
rd
|
int_to_zip_str.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manipulation.R
\name{int_to_zip_str}
\alias{int_to_zip_str}
\title{Convert zips stored as ints to 5 digit strings}
\usage{
int_to_zip_str(zip)
}
\arguments{
\item{zip}{zipcode represented as an integer}
}
\description{
Convert zips stored as ints to 5 digit strings
}
|
5c41ca0ed8f312bdf5b788a22ef2ed0edeccddfe
|
22b4284758ec2bff26177271e9a099da21726fb1
|
/4.5 Adding Signatues Names.R
|
0af3672e080ae835ea57401fd716ba55841b9e8c
|
[] |
no_license
|
pmav99/SEC_Letters_Codes
|
38da3a9791f57729954b9526c3135c55dc22be3c
|
ce24117cdbe26a662190c0a77bacce9694733120
|
refs/heads/master
| 2021-05-03T14:38:58.649032
| 2017-07-24T22:16:54
| 2017-07-24T22:16:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,202
|
r
|
4.5 Adding Signatues Names.R
|
### load large file
require(data.table)
upload <- fread("./Projects/SEC Letter Project/Data After Review/upload_all.csv")
find.names <- function(upload, name_codes)
{
upload[, who_wrote := gsub("\\bfor\\b.*", "", sign)]
upload[, who_authored := "NA"]
upload[grepl("\\bfor\\b", sign), who_authored := gsub(".*\\bfor\\b", "", sign)]
upload[, `:=` (letter_author = "NA", letter_sender = "NA")]
#name_codes <- read.csv(hand_names)
for(i in 1:length(name_codes$Code))
{
print(i)
name_version <- name_codes[i,2:5]
name_version <- name_version[!is.na(name_version) & name_version != ""]
name_regex <- paste0(name_version, collapse = ")|(")
name_regex <- paste0("(", name_regex, ")")
ind <- grep(name_regex, upload$who_authored)
upload$letter_author[ind] <- as.character(name_codes$Code[i])
ind <- grep(name_regex, upload$who_wrote)
upload$letter_sender[ind] <- as.character(name_codes$Code[i])
}
upload[letter_author == "NA", letter_author := letter_sender]
return(upload)
}
require(xlsx)
names <- read.csv("./Projects/SEC Letter Project/Data After Review/Sorting SEC letters/Names and Offices.csv")
upload <- find.names(upload, names)
offices <- fread("./Projects/SEC Letter Project/Data After Review/Sorting SEC letters/All_CIKs.csv")
offices$AD_Office[offices$AD_Office == "2 & 3"] <- 23
upload$AD_Office <- offices$AD_Office[match(upload$CIK, as.numeric(as.character(offices$CIK)))]
tmp <- upload[upload$letter_author %in% names$Code[names$See.Comment == "Yes" ]]
tmp <- tmp[!letter_author %in% c("Andrew Mew", "Anne Nguyen Parker", "Cicely LaMothe",
"Gus Rodriguez", "Hugh West", "Jill Davis", "Joel Parker",
"Karen Garnett", "Kevin Vaughn", "Kyle Moffatt", "Mark Kronforst",
"Mark Shannon")]
require(lubridate)
tmp[, dates := mdy(dates)]
setkey(tmp, letter_author, dates)
tmp[, N := .N, by = letter_author]
tmp[, min_date := min(dates, na.rm = T), by = letter_author]
tmp[, max_date := max(dates, na.rm = T), by = letter_author]
write.csv(tmp, "tmp.csv", row.names = F)
|
da213a0f24ee2f77a01299cbb8cbfbf19678708d
|
ba35b3da4f99c0ee6d0955c47470fd7bc61af7cc
|
/Rotina Base Geral - Custo.R
|
70a008bd2aa90aba420915dca3602e13b3b60579
|
[] |
no_license
|
maguiiiar/projetos_unimed
|
9337009698cabb9069e0a09550383bea1fdc1a97
|
bf198eb42ee9b9755656bb7715c5b85410b7ec8a
|
refs/heads/master
| 2021-04-06T06:24:36.701981
| 2019-03-29T13:21:05
| 2019-03-29T13:21:05
| 125,254,839
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,545
|
r
|
Rotina Base Geral - Custo.R
|
#BASES GERAIS - ORNELAS
basegeral201401 <- fread("BaseCusto201401.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201402 <- fread("BaseCusto201402.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201403 <- fread("BaseCusto201403.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201404 <- fread("BaseCusto201404.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201405 <- fread("BaseCusto201405.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201406 <- fread("BaseCusto201406.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201407 <- fread("BaseCusto201407.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201408 <- fread("BaseCusto201408.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201409 <- fread("BaseCusto201409.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201410 <- fread("BaseCusto201410.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201411 <- fread("BaseCusto201411.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201412 <- fread("BaseCusto201412.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201501 <- fread("BaseCusto201501.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201502 <- fread("BaseCusto201502.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201503 <- fread("BaseCusto201503.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201504 <- fread("BaseCusto201504.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201505 <- fread("BaseCusto201505.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201506 <- fread("BaseCusto201506.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201507 <- fread("BaseCusto201507.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201508 <- fread("BaseCusto201508.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201509 <- fread("BaseCusto201509.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201510 <- fread("BaseCusto201510.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201511 <- fread("BaseCusto201511.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201512 <- fread("BaseCusto201512.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201601 <- fread("BaseCusto201601.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201602 <- fread("BaseCusto201602.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201603 <- fread("BaseCusto201603.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201604 <- fread("BaseCusto201604.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201605 <- fread("BaseCusto201605.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201606 <- fread("BaseCusto201606.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201607 <- fread("BaseCusto201607.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201608 <- fread("BaseCusto201608.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201609 <- fread("BaseCusto201609.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201610 <- fread("BaseCusto201610.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201611 <- fread("BaseCusto201611.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201612 <- fread("BaseCusto201612.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201701 <- fread("BaseCusto201701.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201702 <- fread("BaseCusto201702.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201703 <- fread("BaseCusto201703.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral201704 <- fread("BaseCusto201704.txt", h=T, sep="|",fill=T, na.string="NA")
basegeral <- bind_rows(basegeral201401,basegeral201402,
basegeral201403,basegeral201404,
basegeral201405,basegeral201406,
basegeral201407,basegeral201408,
basegeral201409,basegeral201410,
basegeral201411,basegeral201412,
basegeral201501,basegeral201502,
basegeral201503,basegeral201504,
basegeral201505,basegeral201506,
basegeral201507,basegeral201508,
basegeral201509,basegeral201510,
basegeral201511,basegeral201512,
basegeral201601,basegeral201602,
basegeral201603,basegeral201604,
basegeral201605,basegeral201606,
basegeral201607,basegeral201608,
basegeral201609,basegeral201610,
basegeral201611,basegeral201612,
basegeral201701,basegeral201702,
basegeral201703,basegeral201704)
|
6cdfcd11fe57c4adfa2be41379501ebcdf2e4e35
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/irregulAR1/examples/ar1_prec_irregular.Rd.R
|
822002e2fe00da64bbffdcb768f3e49c0f54190a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 321
|
r
|
ar1_prec_irregular.Rd.R
|
library(irregulAR1)
### Name: ar1_prec_irregular
### Title: Precision matrix for a stationary Gaussian AR(1) process,
### observed at irregularly spaced time points.
### Aliases: ar1_prec_irregular
### ** Examples
library(Matrix)
times <- c(1, 4:5, 7)
rho <- 0.5
sigma <- 1
ar1_prec_irregular(times, rho, sigma)
|
d95ca9b2db602ff588c3bfce59695a692d5b1122
|
cee0adff8aaad5e20e69008e4c28e5b8543de90a
|
/BDA/visual4.R
|
b924ac25b5921ee446b4ff5b912e244607574f31
|
[] |
no_license
|
rohan-dhere/R-Project
|
d3fd096701e0919beecc3163f4468ff21a1d13ee
|
fefb2d638df0fa2edcfd58a1ab1c85c11def5d24
|
refs/heads/master
| 2020-08-10T13:15:32.469897
| 2019-10-16T07:55:07
| 2019-10-16T07:55:07
| 214,350,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
visual4.R
|
pokemon<-read.csv("pokemon/Pokemon.csv")
attach(pokemon)
x <- pokemon$Total
y <- pokemon$Special.Defense
plot(x, y, main="Scatterplot Example", xlab="Total ", ylab="Special Defense ", pch=19)
|
d85acfaee6120565e92db71a1452c258d6e4ff25
|
05434920dc36c9ff650d9dc030a4658998f80403
|
/CohortMaintenance.R
|
ae5429b04464062cc4b8901a5a6fe44d74a541d9
|
[] |
no_license
|
oxford-pharmacoepi/CoagulopathyInCovid19
|
edd5f3c86a0ea0108da3d32f06e931343f77bcb7
|
5aad6014bf1a19fffbe46fcf04b7adeb66e5efb8
|
refs/heads/main
| 2023-07-15T06:12:36.161076
| 2021-08-22T18:25:57
| 2021-08-22T18:25:57
| 386,275,756
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
r
|
CohortMaintenance.R
|
library(here)
library(stringr)
## Copy in sql for exposure cohorts from cohort diagnostics
exposure.cohort.diag.path<-"/home/eburn/diagCovCoagExposures"
# path to the cohort diagnostics package
# copy in cohorts
# remove existing
unlink(paste0(here("Cohorts","ExposureCohorts"), "/*"))
# bring in current
sqls<-list.files(paste0(exposure.cohort.diag.path, "/inst/sql/sql_server"))
for(i in 1:length(sqls)){
file.copy(from=paste0(exposure.cohort.diag.path, "/inst/sql/sql_server/",sqls[i]),
to=here("Cohorts","ExposureCohorts"),
overwrite = TRUE, recursive = FALSE,
copy.mode = TRUE)
}
outcome.cohort.diag.path<-"/home/eburn/CovCoagOutcomeDiagnostics-main/diagCovCoagOutcomes"
# path to the cohort diagnostics package
# copy in cohorts
# remove existing
unlink(paste0(here("Cohorts","OutcomeCohorts"), "/*"))
#bring in current
sqls<-list.files(paste0(outcome.cohort.diag.path, "/inst/sql/sql_server"))
# drop hosp cohorts
sqls<-sqls[str_detect(sqls,"hosp", negate = TRUE)]
# for now, work with a subset of outcomes of interest
sqls<-sqls[str_detect(sqls,paste("PE.sql", "DVT narrow.sql", "VTE narrow.sql", sep="|"))]
for(i in 1:length(sqls)){
file.copy(from=paste0(outcome.cohort.diag.path, "/inst/sql/sql_server/",sqls[i]),
to=here("Cohorts","OutcomeCohorts"),
overwrite = TRUE, recursive = FALSE,
copy.mode = TRUE)
}
|
1226cf2dd096653dde55a3706a66246fd180c144
|
237bcbdc6b09c57b251191471359eeefb8014410
|
/forestPlot.r
|
4b4d065d4ed26d39751acd17b6fee1dcea2abfaa
|
[] |
no_license
|
achalneupane/rcodes
|
d2055b03ca70fcd687440e6262037507407ec7a5
|
98cbc1b65d85bbb6913eeffad62ad15ab9d2451a
|
refs/heads/master
| 2022-10-02T20:35:18.444003
| 2022-09-09T20:53:03
| 2022-09-09T20:53:03
| 106,714,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,367
|
r
|
forestPlot.r
|
#!/usr/bin/env Rscript
cat("Starting R\n")
"forestplot" <-
function(estimate, se,
labels=paste("Study", c(1:length(estimate))),
CI=0.95, xexp=FALSE, xlab=expression(beta), ylab="", ...) {
hoff <- 3
del <- 10
mea <- !is.na(estimate)
estimate <- estimate[mea]
se <- se[mea]
labels <- labels[mea]
w2 <- 1. / (se * se)
invsumw2 <- 1. / sum(w2)
mestimate <- sum(estimate * w2) * invsumw2
mse <- sqrt(invsumw2)
npop <- length(estimate)
estimate[npop+1] <- mestimate
se[npop+1] <- mse
labels[npop+1] <- "Pooled"
chi2 <- round(estimate * estimate / (se * se), 2)
p <- sprintf("%5.1e", pchisq(estimate * estimate / (se * se),
1, lower.tail=FALSE))
## p[as.numeric(p)<0] <- "<1.e-16"
if (CI > 1 || CI < 0) {
stop("CI argument should be between 0 and 1")
}
cimultip <- qnorm(1 - (1 - CI) / 2)
lower <- estimate - cimultip * se
upper <- estimate + cimultip * se
if (xexp) {
estimate <- exp(estimate)
lower <- exp(lower)
upper <- exp(upper)
}
cntr <- 0; if (xexp) cntr <- 1;
lbnd <- (-.1); if (xexp) lbnd <- 0.9
rbnd <- (.1); if (xexp) rbnd <- 1.1
minv <- min(lower)
minv <- minv - abs(minv / 10)
minv <- min(lbnd, minv)
maxv <- max(upper)
maxv <- maxv + abs(maxv / 10)
maxv <- max(rbnd, maxv)
hgt <- (length(estimate) + 1) * del
if (any(is.na(estimate))) stop("estimate contains NAs")
if (any(is.na(se))) stop("se contains NAs")
plot(x=c(cntr, cntr), y=c(0, hgt), xlim=c(minv, maxv),
ylim=c(0, hgt), type="l", lwd=2, lty=2,
xlab=xlab, ylab=ylab, yaxt='n', ...)
## Draw the bars for the individual studies
for (i in c(1:(length(estimate)-1))) {
points(x=c(lower[i], upper[i]), y=c((i) * del, (i) * del),
type="l", lwd=2)
points(x=c(estimate[i]), y=c((i) * del), pch=19, cex=1)
labeltext <- bquote(
.(labels[i]) ~ "(" * chi^2 ~ "=" ~ .(chi2[i]) * ","
~ italic(P) ~ "=" ~ .(p[i]) * ")"
)
text(estimate[i], i * del + 1, labeltext, pos=3, cex=.7)
}
## Draw diamond of the estimate
for (i in c(length(estimate))) {
points(x=c(lower[i], estimate[i]),
y=c((i) * del, (i) * del + hoff),
type="l", lwd=2)
points(x=c(estimate[i], upper[i]),
y=c((i) * del + hoff, (i) * del),
type="l", lwd=2)
points(x=c(upper[i], estimate[i]),
y=c((i) * del, (i) * del - hoff),
type="l", lwd=2)
points(x=c(lower[i], estimate[i]),
y=c((i) * del, (i) * del - hoff),
type="l", lwd=2)
labeltext <-bquote(
.(labels[i]) ~ "(" * chi^2 ~ "=" ~ .(chi2[i]) * ","
~ italic(P) ~ "=" ~ .(p[i]) * ")"
)
text(estimate[i], i * del + 5, labeltext, pos=3, cex=1)
}
}
|
da2e5748998e07dfd61061948b738f494a5c7f36
|
520b7ee4b967adab4aeb39f5a948889b13f5518d
|
/man/subBoot.Rd
|
0253eb60dddb56c9d606384540fd78e775164d6f
|
[] |
no_license
|
jknowles/merTools
|
d0e178a03b4da0af8ce133cf0c3b45d50741b8b6
|
178248e8ebf5eacb44c8a7e75951c65071678449
|
refs/heads/master
| 2023-04-07T11:31:45.834722
| 2023-03-20T16:44:14
| 2023-03-20T16:44:14
| 34,033,193
| 111
| 25
| null | 2023-03-16T20:52:29
| 2015-04-16T03:53:06
|
R
|
UTF-8
|
R
| false
| true
| 1,209
|
rd
|
subBoot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subBoot.R
\name{subBoot}
\alias{subBoot}
\title{Bootstrap a subset of an lme4 model}
\usage{
subBoot(merMod, n = NULL, FUN, R = 100, seed = NULL, warn = FALSE)
}
\arguments{
\item{merMod}{a valid merMod object}
\item{n}{the number of rows to sample from the original data
in the merMod object, by default will resample the entire model frame}
\item{FUN}{the function to apply to each bootstrapped model}
\item{R}{the number of bootstrap replicates, default is 100}
\item{seed}{numeric, optional argument to set seed for simulations}
\item{warn}{logical, if TRUE, warnings from lmer will be issued, otherwise they will be suppressed
default is FALSE}
}
\value{
a data.frame of parameters extracted from each of the R replications.
The original values are appended to the top of the matrix.
}
\description{
Bootstrap a subset of an lme4 model
}
\details{
This function allows users to estimate parameters of a
large merMod object using bootstraps on a subset of the data.
}
\examples{
\donttest{
(fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy))
resultMatrix <- subBoot(fm1, n = 160, FUN = thetaExtract, R = 20)
}
}
|
03ec8fb9deb9c356e45ed80b874aa64acf2807f9
|
e61a93f373ebb49f137783dbcc441b945c1c6d55
|
/3_Technical_change/1_Housing/4_2_Achat_2010_2024.R
|
3e6e38258643120eeac0947bdb66bc6819000d1e
|
[] |
no_license
|
eravigne/matisse
|
5eb2d9437d5e28542396370301737f2fa6846684
|
c6767f9495e4ad43d07c969b1ec5ee0362d95636
|
refs/heads/master
| 2021-03-18T07:02:01.002602
| 2020-03-15T15:53:35
| 2020-03-15T15:53:35
| 247,055,303
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,267
|
r
|
4_2_Achat_2010_2024.R
|
# Constructions neuves entre 2010 et 2024 :
# Selection des ménages
# Mise à jour budgets
# LIBRARIES ---------------------------------------------------------------
library(tidyverse)
library(dplyr)
# DATA --------------------------------------------------------------------
setwd("D:/CIRED/Projet_Ademe")
# load("2025/Mat_gain_ener_2025.RData")
# load("2025/menage_DPE_neuf_2025.RData")
load(paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/Technical_change","/menage_DPE_neuf_",horizon,".RData",sep=""))
load(paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/Technical_change","/menage_echelle_41.RData",sep=""))
# load("Technical_change/TC_renovation_DPE/menage_echelle_41.RData")
load("2010/depmen.RData")
load("2010/auto.RData")
# load("2025/menage_ener_dom_2025.RData")
menage_echelle<-menage_echelle_41
# load("2025/c13_2025.RData")
load(paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/Technical_change","/ident_accedants.RData",sep=""))
load(paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/","Iteration_0/Input/FC_2010_",horizon,".RData",sep=""))
load("Donnees_brutes/Sorties ThreeMe/ThreeME.RData")
source("Code_global_Ademe/mutate_when.R")
source("Code_global_Ademe/compute_share.R")
source("Code_global_Ademe/compute_share_export.R")
source("Code_global_Ademe/compute_savings_rate_export.R")
source("Code_global_Ademe/mutate_when.R")
source("Code_global_Ademe/verif_epargne_taux.R")
source("Code_global_Ademe/maj_dep_preeng.R")
source("Technical_change/Repayment.R")
load("Technical_change/TC_renovation_DPE/list_source_usage.RData")
# DONNEES MANUELLES -------------------------------------------------------
sources=c("Elec","Gaz","Fuel","GPL","Urbain","Solides")
dep_sources=paste("dep",sources,sep="_")
list_dep=c("agriculture",
"dep_Elec",
"dep_Gaz",
"dep_GPL",
"dep_Fuel",
"dep_Urbain",
"dep_Solides",
"BTP",
"prod_veh",
"carb_lubr",
"transp_rail_air",
"transp_routes_eau",
"loisirs_com",
"autres_services",
"autres",
"loyers",
"veh_occasion",
"Hors_budget")
###
# CHOIX PESSIMISTE vs OPTIMISTE
###
# scenario="PESSIMISTE"
# scenario="OPTIMISTE"
# scenario="MEDIAN"
# scenario="RICH"
# scenario="POOR"
# print(paste("SCENARIO", scenario,sep=" "))
# DATA ThreeME ------------------------------------------------------------
# VOLUME CONSTRUCTION NEUF ------------------------------------------------
# en m2
NEWBUIL_H01_CA_2<-
ThreeME %>%
filter(year<horizon & year >=2010) %>%
filter(Var=="NEWBUIL_H01_CA_2")%>%
select(year,value)
NEWBUIL_H01_CB_2<-
ThreeME %>%
filter(year<horizon & year >=2010) %>%
filter(Var=="NEWBUIL_H01_CB_2")%>%
select(year,value)
NEWBUIL_H01_CC_2<-
ThreeME %>%
filter(year<horizon & year >=2010) %>%
filter(Var=="NEWBUIL_H01_CC_2")%>%
select(year,value)
# SELECTION MENAGE --------------------------------------------------------
# # Exclusion des ménages accédants en horizon, on veut que les budgets réflètent un achat entre 2011 et 2024 donc sans dep c13711
# ident_accedants <- ident
# c13_horizon %>% filter(c13711>0) %>% select(ident_men,c13711)
# 174 ménages
menage_echelle<-
menage_echelle %>%
left_join(depmen %>% select(ident_men, stalog,ancons,prixrp_d,remb,totpre_d,mcred1_d,mcred2_d,mcred3_d,mcred4_d,mcred5_d,mcred6_d,mcred7_d,mcred8_d,mcred9_d),by="ident_men")
menage_echelle$mcred1_d[which(is.na(menage_echelle$mcred1_d))]<-0
menage_echelle$mcred2_d[which(is.na(menage_echelle$mcred2_d))]<-0
menage_echelle$mcred3_d[which(is.na(menage_echelle$mcred3_d))]<-0
menage_echelle$mcred4_d[which(is.na(menage_echelle$mcred4_d))]<-0
menage_echelle$mcred5_d[which(is.na(menage_echelle$mcred5_d))]<-0
menage_echelle$mcred6_d[which(is.na(menage_echelle$mcred6_d))]<-0
menage_echelle$mcred7_d[which(is.na(menage_echelle$mcred7_d))]<-0
menage_echelle$mcred8_d[which(is.na(menage_echelle$mcred8_d))]<-0
menage_echelle$mcred9_d[which(is.na(menage_echelle$mcred9_d))]<-0
menage_echelle$totpre_d[which(is.na(menage_echelle$totpre_d))]<-0
menage_echelle<-
menage_echelle %>% mutate(mcred_tot=(mcred1_d+mcred2_d+mcred3_d+mcred4_d+mcred5_d+mcred6_d+mcred7_d+mcred8_d+mcred9_d)*as.numeric(FC$A12))%>%mutate(totpre_d=totpre_d*as.numeric(FC$A05))
menage_echelle <- menage_echelle %>%
mutate(solv=ifelse(RDB==0,999,(mcred_tot+totpre_d)/RDB))%>%
select(-c(totpre_d,mcred1_d,mcred2_d,mcred3_d,mcred4_d,mcred5_d,mcred6_d,mcred7_d,mcred8_d,mcred9_d))
menage_echelle<-
menage_echelle %>%
mutate(exclus=FALSE,NEUF=FALSE) %>%
mutate_when(year_neuf==horizon,list(exclus=TRUE),
ident_men %in% ident_accedants,list(exclus=TRUE),
DPE_dep=="A",list(exclus=TRUE),
stalog>2,list(exclus=TRUE)) %>%
mutate_when(!year_neuf==horizon, list(classe_arr=DPE_dep))%>%
mutate(solde_int=0,solde_ener=0,principal_dette=0,solde_princ=0,solde_int_prov=0,solde_int_prov=0)%>%
mutate_when(solv>0.28,list(exclus=TRUE))%>%
mutate_when(ident_men==8063,list(exclus=TRUE))%>% #menage trop fragile qui crée des NA
mutate_when(ident_men==10583,list(exclus=TRUE)) #(ménage qui bug en AME 2025 Pess decile)
menage_echelle <-
menage_echelle %>%
left_join(depmen%>%select(ident_men,totpre_d),by="ident_men")
rm(depmen)
# rm(menage_echelle_41)
rm(menage_DPE_neuf_horizon)
# rm(menage_ener_dom_horizon)
#NEUF va indiquer les ménages sélectionnés pour rénover leur logement :
# passer de DPE_pred à class_arr
# Classement DPE --------------------------------------------------------------
# Precision d'utiliser mutate de dplyr et pas de plyr
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
menage_echelle<- menage_echelle %>% mutate_when(is.na(ener_dom),list(ener_dom=0))
menage_echelle<-
menage_echelle %>%
dplyr::mutate(kWh_rank_opt =row_number(-ener_dom))
menage_echelle <-
menage_echelle %>%
dplyr::mutate(kWh_rank_pess =max(kWh_rank_opt,na.rm=T)-kWh_rank_opt+1)
menage_echelle<-
menage_echelle %>%
dplyr::mutate(kWh_rank_med =kWh_rank_pess-kWh_rank_opt) %>%
mutate(L=max(kWh_rank_opt,na.rm=T)) %>%
mutate_when(
kWh_rank_med<0,
list(
kWh_rank_med = ifelse(
is.wholenumber(L/2),
-kWh_rank_med+1,
-kWh_rank_med-1)
)
) %>%
select(-L)
menage_echelle <-
menage_echelle %>%
dplyr::mutate(kWh_rank_rich=row_number(-RDB/coeffuc))
menage_echelle <-
menage_echelle %>%
dplyr::mutate(kWh_rank_poor=max(kWh_rank_rich)-kWh_rank_rich+1)
if(str_detect(scenario_classement,"Pessimiste")){
menage_echelle <- menage_echelle %>% mutate(kWh_rank=kWh_rank_pess)
}
if(str_detect(scenario_classement,"Optimiste")){
menage_echelle <- menage_echelle %>% mutate(kWh_rank=kWh_rank_opt)
}
if(scenario_classement=="Median"){
menage_echelle <- menage_echelle %>% mutate(kWh_rank=kWh_rank_med)
}
if(scenario_classement=="Rich"){
menage_echelle <- menage_echelle %>% mutate(kWh_rank=kWh_rank_rich)
}
if(scenario_classement=="Poor"){
menage_echelle <- menage_echelle %>% mutate(kWh_rank=kWh_rank_poor)
}
# SELECTION DES MENAGES ---------------------------------------------------
menage_echelle <-
menage_echelle %>%
mutate_when(exclus,list(kWh_rank=0))
# ANNEE PAR ANNEE
#important pour que les ménages puissent faire plusieurs REHAB
ident_rehab=data.frame("Year"=c(),"list_ident"=c())
A1<-menage_echelle
ident_r<-c()
for (Y in 2011:(horizon-1)){
# for (Y in 2010:2023){
print(Y)
ident_r<-c()
menage_echelle <- menage_echelle %>% mutate(principal_dette=0)
# Mat_gain_ener -----------------------------------------------------------
# Extraction de la conso moyenne au m2 en kWH par classe DPE
conso_moy_dep=data.frame("A"=0, "B"=0, "C"=0, "D"=0, "E"=0, "F"=0, "G"=0)
for (i in LETTERS[1:7]){
conso_moy_dep[i]<-
as.numeric(
ThreeME %>%
filter(Var==
paste("ENER_BUIL_H01_C",i,"_2*11630/BUIL_H01_C",i,"_2",sep="")
) %>%
filter(year==Y) %>%
select(value)
)
}
Mat_gain_ener<-data.frame("DPE_before"=sort(rep(LETTERS[1:7],7)),"DPE_after"=rep(LETTERS[1:7],7))
Mat_gain_ener$value_after<-sapply(Mat_gain_ener$DPE_after,function(x) as.numeric(conso_moy_dep[x]))
Mat_gain_ener$value_before<-sapply(Mat_gain_ener$DPE_before,function(x) as.numeric(conso_moy_dep[x]))
Mat_gain_ener$value<-(Mat_gain_ener$value_after-Mat_gain_ener$value_before)/Mat_gain_ener$value_before
Mat_gain_ener <- Mat_gain_ener %>% select(-c(value_after,value_before))
# DONNEES THREEME ---------------------------------------------------------
# travaux de rénovation énergétiques en volume par saut de classe (en M2)
# Transition de L vers M
# Dépenses en constructions neuves en valeur (M€ courants)
PNEWBUIL_H01_2_NEWBUIL_H01_2_Y <-
as.numeric(
ThreeME %>%
filter(Var=="PNEWBUIL_H01_2*NEWBUIL_H01_2") %>%
filter(year==Y) %>%
select(value)
)*10^6
NEWBUIL_H01_2_Y<-
as.numeric(
ThreeME %>%
filter(Var=="NEWBUIL_H01_2") %>%
filter(year==Y) %>%
select(value)
)
NEWBUIL_H01_2_2010<-
as.numeric(
ThreeME %>%
filter(Var=="NEWBUIL_H01_2") %>%
filter(year==2010) %>%
select(value)
)
# Dépenses en constructions neuves en valeur (M€ courants) 2010
PNEWBUIL_H01_2_NEWBUIL_H01_2_2010 <-
as.numeric(
ThreeME %>%
filter(Var=="PNEWBUIL_H01_2*NEWBUIL_H01_2") %>%
filter(year==2010) %>%
select(value)
)*10^6
# Prix du m2 de logement neuf en horizon
PNEWBUIL_H01_2_Y<-
PNEWBUIL_H01_2_NEWBUIL_H01_2_Y /
NEWBUIL_H01_2_Y
# Prix du m2 de logement neuf en 2010
PNEWBUIL_H01_2_2010<-
PNEWBUIL_H01_2_NEWBUIL_H01_2_2010 /
NEWBUIL_H01_2_2010
#ratio du prix du m2 neuf en 2010 et horizon
ratio_prix_m2<-PNEWBUIL_H01_2_Y/PNEWBUIL_H01_2_2010
# taux de remboursement des constructions neuves
R_RMBS_NEWBUIL_H01_CA<-
as.numeric(
ThreeME %>%
filter(Var=="R_RMBS_NEWBUIL_H01_CA") %>%
filter(year==Y) %>%
select(value)
)
# Taux d'intérêts des emprunts liés à la construction neuve en %
R_I_BUIL_H01_CG_2<-
as.numeric(
ThreeME %>%
filter(Var=="R_I_BUIL_H01_CG_2") %>%
filter(year==Y) %>%
select(value)
)
# BASCULE -----------------------------------------------------------------
for (arr in LETTERS[1:3]){
if(arr=="A"){stock_m2_trans=NEWBUIL_H01_CA_2 %>% filter(year==Y)%>%select(value)}
if(arr=="B"){
stock_m2_trans=NEWBUIL_H01_CB_2 %>% filter(year==Y)%>%select(value)
menage_echelle <-
menage_echelle %>%
mutate_when(DPE_dep=="B",list(kWh_rank=0))
}
if(arr=="C"){
stock_m2_trans=NEWBUIL_H01_CC_2 %>% filter(year==Y)%>%select(value)
menage_echelle <-
menage_echelle %>%
mutate_when(DPE_dep=="C",list(kWh_rank=0))
}
sum=0
i=1
while(!i %in% menage_echelle$kWh_rank){i=i+1}
while(sum<stock_m2_trans){
sum =
sum +
as.numeric(menage_echelle %>% filter(kWh_rank==i) %>% summarise(sum(pondmen*surfhab_d)))
# identifiant du ménage sélectionné
im<-as.numeric(menage_echelle %>% filter(kWh_rank==i) %>% select(ident_men))
# print(im)
ident_r<-c(ident_r,im)
# Modification des variables REHAB et class_arr dans la base globale
menage_echelle<- menage_echelle %>%
mutate_when(ident_men==im,list(NEUF=TRUE,classe_arr=arr,year_neuf=Y,kWh_rank=0))
# Itération, le non prise en compte des constructions neuves
# fait disparaîtres certains rangs du classement
i=i+1
while(!i %in% menage_echelle$kWh_rank){i=i+1}
}
}
for (dep in LETTERS[1:7]){
for (arr in LETTERS[1:7]){
rate_gain_ener<-as.numeric(
Mat_gain_ener %>%
filter(DPE_before==dep) %>%
filter(DPE_after==arr) %>%
select(value))
# print(rate_gain_ener)
if(dim(menage_echelle %>% filter(year_neuf==Y & DPE_dep==dep & classe_arr==arr) %>% select(ident_men))[1]>0){
menage_echelle <-
menage_echelle %>%
mutate_when(
# Condition
year_neuf==Y &
DPE_dep==dep &
classe_arr==arr,
# Action
list(
principal_dette=ifelse(is.na(prixrp_d) || prixrp_d<10^5,PNEWBUIL_H01_2_Y*surfhab_d,prixrp_d*ratio_prix_m2),# par sécurité ne prendre que le surcout en compte, être sûr de ne pas compter deux fois un éventuel surcoût
#Energie
Elec_ECS=Elec_ECS*(1+rate_gain_ener),
Gaz_ECS=Gaz_ECS*(1+rate_gain_ener),
GPL_ECS=GPL_ECS*(1+rate_gain_ener),
Fuel_ECS=Fuel_ECS*(1+rate_gain_ener),
Solides_ECS=Solides_ECS*(1+rate_gain_ener),
Urbain_ECS=Urbain_ECS*(1+rate_gain_ener),
Elec_chauff=Elec_chauff*(1+rate_gain_ener),
Gaz_chauff=Gaz_chauff*(1+rate_gain_ener),
GPL_chauff=GPL_chauff*(1+rate_gain_ener),
Fuel_chauff=Fuel_chauff*(1+rate_gain_ener),
Solides_chauff=Solides_chauff*(1+rate_gain_ener),
Urbain_chauff=Urbain_chauff*(1+rate_gain_ener),
Elec_clim=Elec_clim*(1+rate_gain_ener)
# ,
# Gaz_clim=Gaz_clim*(1+rate_gain_ener),
# GPL_clim=GPL_clim*(1+rate_gain_ener),
# Fuel_clim=Fuel_clim*(1+rate_gain_ener),
# Solides_clim=Solides_clim*(1+rate_gain_ener),
# Urbain_clim=Urbain_clim*(1+rate_gain_ener)
))
menage_echelle$solde_int_prov <- sapply(menage_echelle$principal_dette, function(X) ifelse(X==0,0,as.numeric(int_princ(loan=X, n=1/R_RMBS_NEWBUIL_H01_CA,
year_purchase = Y,
horizon=horizon,
i=R_I_BUIL_H01_CG_2,
pf=1)[1])-2/3*if(is.na(menage_echelle$totpre_d)){0}else{menage_echelle$totpre_d*ratio_prix_m2}))
menage_echelle$solde_princ_prov<-sapply(menage_echelle$principal_dette, function(X) ifelse(X==0,0,as.numeric(int_princ(loan=X,
n=1/R_RMBS_NEWBUIL_H01_CA,
year_purchase = Y,
horizon=horizon,
i=R_I_BUIL_H01_CG_2,
pf=1
)[2])-1/3*if(is.na(menage_echelle$totpre_d)){0} else{menage_echelle$totpre_d*ratio_prix_m2}))
menage_echelle <-
menage_echelle %>%
mutate_when(year_neuf==Y,list(solde_int=solde_int_prov,solde_princ=solde_princ_prov))
}
}
}
# print(compute_share_export(menage_echelle))
}
rm(i,sum,stock_m2_trans)
menage_echelle <- menage_echelle %>% select(-solde_int_prov,-solde_princ_prov)
sauv_int<-menage_echelle
# menage_echelle<-sauv_int
# SOLDE_ENER --------------------------------------------------------------
# Mise à jour des totaux
menage_echelle<-
menage_echelle %>%
mutate(
Elec=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("Elec"))),
Gaz=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("Gaz"))),
GPL=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("GPL"))),
Fuel=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("Fuel"))),
Urbain=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("Urbain"))),
Solides=rowSums(menage_echelle %>% select(list_source_usage) %>% select(starts_with("Solides")))
)
# Due à la fusion Sources et Dep_sources sont redondants, la mise à jour de Sources permet de déduire facilement le solde sur tous les sources d'éneergie
menage_echelle$solde_ener<-
rowSums(menage_echelle[sources]) -
rowSums(menage_echelle[dep_sources])
A<-menage_echelle %>% filter(abs(solde_ener)>10^(-9))%>% select(ident_men)
menage_echelle %>% filter(NEUF) %>% filter(!year_neuf==horizon) %>% filter(!ident_men %in% A$ident_men) %>% select(ident_men)
# 5797
# View(rbind(menage_echelle))
menage_echelle<-
menage_echelle %>%
mutate(
dep_Elec=Elec,
dep_Gaz=Gaz,
dep_GPL=GPL,
dep_Fuel=Fuel,
dep_Solides=Solides,
dep_Urbain=Urbain)
menage_echelle$dep_energie=rowSums(menage_echelle[dep_sources])
menage_echelle$dep_energie_logement=rowSums(menage_echelle[
c("Elec_ECS","Gaz_ECS","GPL_ECS","Fuel_ECS","Solides_ECS","Urbain_ECS","Elec_chauff","Gaz_chauff",
"GPL_chauff","Fuel_chauff","Solides_chauff","Urbain_chauff","Elec_clim")])
# SOLDE_DETTE -------------------------------------------------------------
solde<-menage_echelle %>%
mutate(solde=solde_ener+solde_int
# +solde_princ
) %>%
select(ident_men,solde)
#
# menage_echelle %>%left_join(solde, by="ident_men")%>%filter(solde>RDB_reel) %>%select(ident_men)
# menage_echelle %>%left_join(solde, by="ident_men")%>%filter(solde>Rcons) %>%select(ident_men)
#
# df<-menage_echelle%>%select(starts_with("elast_rev"))
# df$max <- apply(df, 1, max)
# menage_echelle$elast_rev_max<-df$max
# menage_echelle %>%
# left_join(solde, by="ident_men")%>%
# filter(solde>(RDB_reel/elast_rev_max)) %>%select(ident_men)
menage_echelle <-
menage_echelle %>%
mutate(autres_services=autres_services+solde_int,
solde_int_total=solde_int_total+solde_int,
solde_princ_total=solde_princ_total+solde_princ,
Hors_budget=Hors_budget+solde_princ)
A1<-menage_echelle
# VENTILATION -------------------------------------------------------------
source("Technical_change/Econometrie_solde_budg_Logement.R")
# source("Technical_change/Econometrie_solde_budg_bouclage_autres.R")
Ventil_solde(solde,menage_echelle)
menage_echelle <- A
# %>%
# mutate(autres=autres+solde_int,Hors_budget=Hors_budget+solde_princ)
# Recalcul de toutes les variables impactées : Rcons, épargne, ratio_S, RDB
# Rcons
menage_echelle$Rcons <-
rowSums(menage_echelle[list_dep])
# Parts budgétaires
for (k in list_dep){
menage_echelle[paste("part",k,sep="_")]<-menage_echelle[k]/menage_echelle$Rcons
}
# Epargne
menage_echelle$epargne <-
menage_echelle$RDB -
menage_echelle$Rcons +
menage_echelle$rev_exceptionnel
# Ratio_S
menage_echelle$ratio_S <-
menage_echelle$epargne /
menage_echelle$Rcons
# Taux épargne
menage_echelle$taux_epargne<- ifelse(menage_echelle$RDB==0,0,
menage_echelle$epargne /
menage_echelle$RDB)
source("Technical_change/TC_renovation_DPE/calc_energie_kWh_m2.R")
energie_dom_surf(menage_echelle)
menage_echelle<-
menage_echelle %>%
select(-ener_dom_surf,-ener_dom) %>%
left_join(dep_source_usage,by="ident_men")
menage_echelle <- menage_echelle %>% mutate_when(year_neuf>0,list(NEUF=TRUE))
A2<-menage_echelle %>% select(-kWh_rank_pess,-kWh_rank_opt,-kWh_rank,-solde_dette,-solde_ener)
# VERS LA PROCHAINE ETAPE -------------------------------------------------
# ident_rehab=cbind(ident_rehab,c(Y,menage_echelle%>%filter(REHAB)%>%select(ident_men)))
# SAVE --------------------------------------------------------------------
menage_echelle_42<-menage_echelle
# %>% mutate(DPE_2024=DPE_dep) %>% select(-stalog,-propri,-REHAB,-DPE_dep,-classe_arr ,-kWh_rank_pess,-kWh_rank_opt,-kWh_rank,-REHAB,-classe_arr)
# load("Technical_change/TC_renovation_DPE/menage_echelle_42.RData")
# Parts Budgétaires -------------------------------------------------------
print(compute_share_export(menage_echelle_42))
print(compute_savings_rate_export(menage_echelle_42))
# Maj_dep_preeng ----------------------------------------------------------
menage_echelle_42 <- maj_dep_preeng(bdd1= menage_echelle_41,bdd2=menage_echelle_42)
# SAVE --------------------------------------------------------------------
load(paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/Technical_change","/menage_echelle_41.RData",sep=""))
#
# inter<-intersect(colnames(menage_echelle_42), colnames(menage_echelle_41))
# not_inter<-setdiff(colnames(menage_echelle_42), colnames(menage_echelle_41))
# menage_echelle_42<-menage_echelle_42 %>% select(inter)
save(menage_echelle_42, file=paste(scenario,"/",horizon,"/",scenario_classement,"/",redistribution,"/Technical_change","/menage_echelle_42.RData",sep=""))
#
#
#
#
#
#
#
# # Suppression des bases ---------------------------------------------------
#
# rm(
# tot_Constr_neuf_10_24,
# Constr_neuf_10_24,
# sum,
# i,
# scenario,
# dep_sources,
# len,
# A,
# A1,A3,A4,
# ThreeME,
# c13_2025,
# dep_ener_2025,
# ident_accedants,
# im,
# menage_echelle_prop,
# menage_echelle,
# solde,
# rate_gain_ener,
# list_source_usage,
# j,arr,dep,
# Mat_gain_ener_2025
# )
#
#
#
# # VERIF prix au M2 --------------------------------------------------------
#
# # CCL : impossible de vérifier que les prix de construction au m2
# # sont cohérents avec les données de THREEME.
# # La variable prixrp de DEPMEN est trop parcellaire.
#
#
# SUCCESS -----------------------------------------------------------------
print("4_2_Achat_2010_2024 : SUCCESS")
#
#
# # load("2010/depmen.RData")
# #
# # IM<-menage_echelle %>% filter(NEUF)%>% select(ident_men)
# #
# # Prix<-
# # depmen %>%
# # select(ident_men,prixrp_d,surfhab_d) %>%
# # filter(ident_men %in% IM$ident_men) %>%
# # mutate(prix_2=prixrp_d/surfhab_d) %>%
# # mutate_when(is.na(prix_2),list(prix_2=0))
# #
# # dim(Prix)
# #
# # head(Prix$surfhab_d)
# # head(Prix$prixrp_d)
# # table(is.na(Prix$prixrp_d)) #=> Tous des NA
# #
# # Prix %>% summarise(mean(prix_2))
#
# # rm(depmen)
#
|
7f08299c13cebe3e10871911f37fe2de118173ef
|
8c436b7886ec442c70a61ed51520312fb138ca8f
|
/CODE FOR LINEAR REGRESSION.R
|
48852c59d73eb0218921887cb4030e0eadc4ed93
|
[] |
no_license
|
ARYAN953/git-github
|
63a4ea532769cf6d74917e071fb996e49f13052b
|
bfccaaf37d6edc0fc9ec7085b35b4facb1a3785a
|
refs/heads/master
| 2023-02-17T11:08:58.032218
| 2021-01-09T19:26:15
| 2021-01-09T19:26:15
| 298,910,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,450
|
r
|
CODE FOR LINEAR REGRESSION.R
|
setwd("D:\\R CLASS\\Linear Case Study\\Linear Regression Case")
#IMPORTING PACKAGES
library(dplyr)
library(lubridate)
library(readxl)
library(XLConnect)
library(openxlsx)
library(MASS)
library(car)
require(sqldf)
custdata <- read_excel("linear regression case.xlsx")
custdata$total_spend = custdata$cardspent + custdata$card2spent
#USER DEFINED FUNCTION
cust_sum_fun <- function(x) {
nmiss<-sum(is.na(x))
a <- x[!is.na(x)]
m <- mean(a)
n <- length(a)
s <- sd(a)
min <- min(a)
p1<-quantile(a,0.01)
p5<-quantile(a,0.05)
p10<-quantile(a,0.10)
q1<-quantile(a,0.25)
q2<-quantile(a,0.5)
q3<-quantile(a,0.75)
p90<-quantile(a,0.90)
p95<-quantile(a,0.95)
p99<-quantile(a,0.99)
max <- max(a)
UC <- m+3*s
LC <- m-3*s
outlier_flag<- max>p95 | min<p5
return(c(n=n, nmiss=nmiss, outlier_flag=outlier_flag, mean=m,stdev=s,min = min, p1=p1,p5=p5,p10=p10,q1=q1,q2=q2,
q3=q3,p90=p90,p95=p95,p99=p99,max=max, UC=UC, LC=LC ))
}
# SEPRATING NUMERICAL AND CATEGORICAL VARIABLES
Numeric_variables = custdata[,sapply(custdata,is.numeric)]
character_variable = custdata[,!sapply(custdata,is.numeric)]
#ANALYSIS OF COMPLETE DATASET
dia_test <- apply(Numeric_variables,2,cust_sum_fun)
dia_test <- t(data.frame(dia_test))
#CREATING DATA FILE
write.csv(dia_test,"LR_Data.csv",row.names = TRUE)
# OUTLIER AND MISSING VALUE IMPUTATON
Numeric_chars1 <- c( "age","ed",'employ',"income","lninc","debtinc","creddebt","lncreddebt","othdebt","lnothdebt" ,
"spoused" ,"reside", "pets","pets_cats","pets_dogs" ,"pets_birds","pets_reptiles","pets_small",
"pets_saltfish","pets_freshfish","carvalue", "commutetime","carditems" , "cardspent" , "card2items" ,"card2spent",
"tenure","longmon","lnlongmon","longten" , "lnlongten","tollmon","lntollmon","tollten", "lntollten" ,
"equipmon","lnequipmon","equipten","lnequipten", "cardmon","lncardmon",
"cardten","lncardten","wiremon","lnwiremon","wireten","lnwireten","hourstv",'total_spend')
# APPLYING UDF
dia_test1 <- apply(Numeric_variables[Numeric_chars1], 2, cust_sum_fun)
dia_test1 <- t(data.frame(dia_test1))
write.csv(dia_test1,"data1.csv",row.names = TRUE)
#CAPPING THE OUTLIERS
OT_function <- function(x){
quantiles <- quantile(x, c(.05, .95 ),na.rm=TRUE )
x[x < quantiles[1] ] <- quantiles[1]
x[ x > quantiles[2] ] <- quantiles[2]
x
}
#TREATMENT OF OUTLIER
Numeric_variables[,Numeric_chars1] <- apply(data.frame(Numeric_variables[,Numeric_chars1]), 2, OT_function)
#MISSING VALUE TREATMENT
Numeric_variables[,Numeric_chars1] <- apply(data.frame(Numeric_variables[,Numeric_chars1]), 2,
function(x){x <- replace(x, is.na(x), mean(x, na.rm=TRUE))})
#APPLYING MISSING VALUE
dia_test2 <- apply(Numeric_variables[Numeric_chars1], 2, cust_sum_fun)
dia_test2 <- t(data.frame(dia_test2))
View(dia_test1)
write.csv(dia_test2,"output_data.csv",row.names = TRUE)
Categorical_varibales <- Numeric_variables[,!names(Numeric_variables) %in% c( "age" , "ed", 'employ' , "income" , "lninc", "debtinc" , "creddebt" ,"lncreddebt" , "othdebt", "lnothdebt" ,
"spoused" ,"reside", "pets","pets_cats","pets_dogs" , "pets_birds","pets_reptiles","pets_small",
"pets_saltfish","pets_freshfish", "carvalue", "commutetime", "carditems" , "cardspent" , "card2items" ,"card2spent",
"tenure","longmon", "lnlongmon","longten" , "lnlongten", "tollmon" , "lntollmon" ,"tollten", "lntollten" ,
"equipmon" , "lnequipmon" ,"equipten",
"lnequipten", "cardmon" , "lncardmon",
"cardten" , "lncardten" , "wiremon" ,
"lnwiremon", "wireten", "lnwireten",
"hourstv")]
# SAVE THE DATA THROUGH WHICH Y HAS BECOME NORMAL
Numeric_variables$ln_ttl_spnd <- log(Numeric_variables$total_spend)
names(Numeric_variables)
Categorical_varibales <- cbind(Categorical_varibales , lntotalspend = Numeric_variables$ln_ttl_spnd)
names(Categorical_varibales)
Categorical_varibales$total_spend = NULL
#APPLYING ANOVA
anova_test <- aov(lntotalspend ~. , data = Categorical_varibales)
options(scipen=999)
summary(anova_test)
View(Categorical_varibales)
Categorical_varibales1 <- Categorical_varibales[,c(1,3:5,7:10,23,33,42,48,62,64,71,72,82)]
names(Categorical_varibales1)
Categorical_varibales1 <- cbind(Categorical_varibales1 , lntotalspend = Categorical_varibales$lntotalspend)
#APPLYING ANOVA
anova_test1 <- aov(lntotalspend ~. , data = Categorical_varibales1 )
summary(anova_test1)
#COMBINING CONTINOUS AND CATEGORICAL VARIABLES
Num_characters <- c( "age","ed",'employ',"income","lninc","debtinc","creddebt","lncreddebt" , "othdebt", "lnothdebt" ,
"spoused","reside", "pets","pets_cats", "pets_dogs","pets_birds","pets_reptiles","pets_small",
"pets_saltfish","pets_freshfish", "carvalue", "commutetime", "carditems" , "cardspent" , "card2items","card2spent", "tenure","longmon", "lnlongmon","longten" , "lnlongten",
"tollmon","lntollmon" ,"tollten", "lntollten" ,"equipmon","lnequipmon" ,"equipten",
"lnequipten", "cardmon", "lncardmon", "cardten","lncardten" , "wiremon" ,
"lnwiremon", "wireten", "lnwireten","hourstv")
customer_data <- cbind(Numeric_variables[,Num_characters],Categorical_varibales1)
names(customer_data)
# CREATING A INITIAL MODEL
First_model <- lm(lntotalspend ~., data = customer_data)
summary(First_model)
step_1 <- stepAIC(First_model , direction = "both")
Model1 <- lm(lntotalspend ~ income + lninc + creddebt + lncreddebt + pets_dogs +
carditems + cardspent + card2items + card2spent + longmon +
lnlongmon + longten + tollten + lntollten + cardmon + lncardmon +
cardten + lncardten + lnwiremon + wireten + gender + edcat +
union + card + card2 + internet + owndvd + response_03 , data = customer_data)
summary(Model1)
step45 <- stepAIC(Model1, direction = "both")
vif(Model1)
Model_2 <- lm(lntotalspend ~ lninc + lncreddebt + pets_dogs +
carditems + lnlongmon + tollten + lncardmon +
lncardten + lnwiremon + wireten + gender + edcat +
union + card + card2 + internet + owndvd + response_03 , data = customer_data)
summary(Model_2)
vif(Model_2)
rest_variables <- c('lninc' , 'lncreddebt' , 'pets_dogs' ,'lnlongmon' , 'tollten' , 'lncardmon' , 'lncardten','lnwiremon' , 'gender' , 'edcat' ,
'union' , 'card' , 'card2' , 'internet' , 'response_03', 'owndvd' ,"lntotalspend")
cust_data1234 <- customer_data[,rest_variables]
names(cust_data1234)
View(cust_data1234)
cust_data1234$owndvd <- as.factor(cust_data1234$owndvd)
cust_data1234$gender <- as.factor(cust_data1234$gender)
cust_data1234$edcat <- as.factor(cust_data1234$edcat)
cust_data1234$union <- as.factor(cust_data1234$union)
cust_data1234$card <- as.factor(cust_data1234$card)
cust_data1234$card2 <- as.factor(cust_data1234$card2)
cust_data1234$response_03 <- as.factor(cust_data1234$response_03)
cust_data1234$internet <- as.factor(cust_data1234$internet)
#SPLITTING TRAINING AND TESTING DATA
set.seed(999)
Traning_data123 <- sample(1:nrow(cust_data1234), size = floor(0.70 * nrow(cust_data1234)))
training_dataset <- cust_data1234[Traning_data123,]
testing_dataset <- cust_data1234[-Traning_data123,]
# APPLYING DATASET
final_model <- lm(lntotalspend ~. , data = training_dataset)
summary(final_model)
#Applying Cook's distance
training_dataset$Cd<- cooks.distance(final_model)
training_dataset1<-subset(training_dataset, Cd< (4/3500))
#Apply Model on variables from stepAIC
Final_model1 <- lm(lntotalspend ~ lninc+lncreddebt+pets_dogs+lnlongmon+tollten+lncardmon+
lncardten+lnwiremon+gender+edcat+union+card+card2+internet+owndvd+response_03 , data = training_dataset1)
summary(Final_model1)
ls(Final_model1)
anova(Final_model1)
step_3 <- stepAIC(Final_model1)
#FINAL MODEL
Final_model12 <- lm(lntotalspend ~lninc + gender + edcat + card + card2 + internet + owndvd , data = training_dataset1)
summary(Final_model12)
# TESTING DATASET
test_data1<-cbind(training_dataset, pred_spnd = exp(predict(Final_model12,training_dataset)))
test_data2<-cbind(testing_dataset, pred_spnd=exp(predict(Final_model12,testing_dataset)))
View(test_data1)
View(test_data2)
|
fdacca43a2278c8dede44ecf9087c502050d6641
|
20a4f4d42bad6d20437979af5d5100c851b5f947
|
/man/urbn_source.Rd
|
3f6ed3ed527c58978a15d9891c02ad545f703739
|
[] |
no_license
|
shanerock/jcathemes
|
3cfa1b9126c319d6ffe22d4c917e5631f4644da0
|
ab38f351bc19734e910d9378bde33fc301a30c79
|
refs/heads/master
| 2022-11-25T00:19:19.583039
| 2020-07-27T18:30:13
| 2020-07-27T18:30:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 379
|
rd
|
urbn_source.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/urbn_source.R
\name{urbn_source}
\alias{urbn_source}
\title{urbn_source}
\usage{
urbn_source(string, size = 8)
}
\arguments{
\item{string}{character string for a source statement}
\item{size}{font size for the source}
}
\value{
a grob formatted for a source in a ggplot
}
\description{
urbn_source
}
|
faeab45ae3e5a610b88b34d2ba1dc3754a834040
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802243-test.R
|
b8a4c3a1d1b8f228c126744ad3ba35e35f4e9a61
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
1612802243-test.R
|
testlist <- list(bytes1 = c(1903260017L, -1L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903260017L, 1903233537L, 2105376125L, 587923455L, -65536L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
be1d0fd8f172890496b4d7e34df3dff92845a34d
|
914c515a6c85356ee148950f5d93be59ee112b4c
|
/R/dependentData_Auxilliary.R
|
465b09960d672be42eb866298f7a50a601b6b7db
|
[] |
no_license
|
Mufabo/Rrobustsp
|
917fb8e9a353ffc7a838544fa5cd1d39eae34f6c
|
1700f4fed27e63cec6dfb67a14a208d8f46e2418
|
refs/heads/master
| 2022-06-14T22:15:50.045970
| 2022-06-05T12:13:32
| 2022-06-05T12:13:32
| 222,286,547
| 0
| 3
| null | 2019-12-19T14:17:46
| 2019-11-17T17:36:00
|
R
|
UTF-8
|
R
| false
| false
| 3,203
|
r
|
dependentData_Auxilliary.R
|
#' eta
#'
#' @param x: numeric vector, the signal
#' @param c: numeric, default = 1
#'
#' @examples
#' library(Rrobustsp)
#'
#' x <- rnorm(5)
#' eta(x)
#'
#' @note
#'
#' file location : dependentData_Auxilliary.R
#'
#' @export
eta <- function(x, c = 1){
x <- x / c
y <- x
y[abs(x) > 3] <- 0
y[abs(x) > 2 & abs(x) <= 3] <- 0.016 * x[abs(x) > 2 & abs(x) <= 3]^7 -
0.312 * x[abs(x) > 2 & abs(x) <= 3]^5 +
1.728 * x[abs(x) > 2 & abs(x) <= 3]^3 -
1.944 * x[abs(x) > 2 & abs(x) <= 3]
y[abs(x) <= 2] <- x[abs(x) <= 2]
y <- c * y
return(y)
}
ma_infinity <- function(phi, theta, Q_long){
Q <- length(theta)
P <- length(phi)
theta_inf <- pracma::deconv(c(1, theta, numeric(Q_long + P + Q)), c(1, -phi))$q
theta_inf <- theta_inf[2:(Q_long + 1)]
return(theta_inf)
}
muler_rho1 <- function(x){
x <- x / 0.405 # where does the 0.405 come from ?
intv <- abs(x) > 2 & abs(x) <= 3
rho <- numeric(length(x))
rho[abs(x) <= 2] <- 0.5 * x[abs(x) <= 2]^2
rho[intv] <- 0.002 * x[intv]^8 - 0.052 * x[intv]^6 + 0.432 * x[intv]^4 - 0.972 * x[intv]^2 + 1.792
rho[abs(x) > 3] <- 3.25
return(rho)
}
#' muler_rho2
#'
#'
#' @note
#' Location: .../Rrobustsp/dependentData_Auxiliary.R
#'
#' @export
muler_rho2 <- function(x){
rho <- rep(3.25, length(x))
intv <- (abs(x) > 2) & (abs(x) <= 3)
rho[intv] <-0.002 * x[intv]^8- 0.052 * x[intv]^6+ 0.432 * x[intv]^4- 0.972 * x[intv]^2+ 1.792
rho[abs(x) <= 2] <- 0.5 * x[abs(x) <= 2]^2
return(rho)
}
m_scale <- function(x){
N <- length(x)
sigma_k <- madn(x)
delta <- 3.25 / 2 # max(muler_rho1)/2
epsilon <- 1e-4
w_k <- rep(1, N)
max_iters <- 30
k <- 0
while(k<=max_iters & sigma_k < 10^5){
w_k[x != 0] <- muler_rho1(x[x != 0] / sigma_k) / (x[x != 0] / sigma_k)^2
sigma_k_plus1 <- sqrt(1 / (N * delta) * sum(w_k * x^2))
if(!is.nan(sigma_k_plus1 / sigma_k -1) & abs(sigma_k_plus1 / sigma_k -1) > epsilon){
sigma_k <- sigma_k_plus1
k <- k + 1
} else break
}
sigma_hat <- sigma_k
return(sigma_hat)
}
res_scale_approx <- function(phi_grid, a_bip_sc, fine_grid, a_sc) {
# polynomial approximation of residual scale for BIP-AR(p) tau-estimates
poly_approx <- polyfit(phi_grid, a_bip_sc, 5)
# interpolation of residual scale for BIP-AR(p) tau-estimates to fine grid
a_interp_scale <- c(polyval(poly_approx, fine_grid))
# polynomial approximation of residual scale for AR(p) tau-estimates
poly_approx2 <- polyfit(phi_grid, a_sc, 5)
# interpolation of residual scale for AR(p) tau-estimates to fine grid
a_interp_scale2 <- c(polyval(poly_approx2, fine_grid))
temp <- min(a_interp_scale)
ind_max <- which.min(a_interp_scale)
temp2 <- min(a_interp_scale2)
ind_max2 <- which.min(a_interp_scale2)
return(list('ind1' = ind_max, 'min1' = temp, 'ind2' = ind_max2, 'min2' = temp2))
}
#' tau_scale
#'
#'
#' @param x
#'
#' @return scale
#'
#' @note
#' Location: .../Rrobustsp/R/dependentData_Auxiliary
#'
#' @export
tau_scale <- function(x){
b <- 0.398545548533895; # E(muler_rho2) under the standard normal distribution
sigma_m <- m_scale(x);
sigma_hat <- sqrt(sigma_m^2/(length(x)) * 1/b * sum(muler_rho2(x/sigma_m)));
return(sigma_hat)
}
|
0aa881f16283f241b9695044750e3cdb0012499c
|
ed633d145dfa8b32511b3cb13ba95b822e2559c8
|
/R/reff.chn.r
|
de23fdbf2765531a376e02eddfd740a1835fe169
|
[] |
no_license
|
wendellopes/rvswf
|
51a09f034e330fbb7fd58816c3de2b7f7fdba9dc
|
ee243c3e57c711c3259a76051a88cc670dfe9c4b
|
refs/heads/master
| 2020-05-19T19:38:18.987560
| 2016-09-11T22:57:37
| 2016-09-11T22:57:37
| 19,242,694
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 501
|
r
|
reff.chn.r
|
#' Reference function Cylindrical Hankel function (1,2).
#'
#' @details The Cylindrical Hankel
#' function given by \eqn{h_n^{(1,2)}=j_n(x)\pm y_n(x)}.
#' @param x The argument of the function
#' @param n The order of the function
#' @param type 1 or 2.
#' @export
reff.chn<-function(x,n,type=1){
if(abs(type)!=1){
stop("type must be plus or minus 1!")
}
if(type==1){
return(besselJ(x,n)+1i*besselY(x,n))
}
if(type==2){
return(besselJ(x,n)-1i*besselY(x,n))
}
}
|
235d0db78d5864b007d79aa8782ceec406c29b10
|
93005dac4be25d7fb42cc09a08ab303439c32c3c
|
/man/checkPolygonsHoles.Rd
|
73bea185b1f3db8e482a0aef5f129ee00bcc3c2e
|
[] |
no_license
|
cran/maptools
|
826901e049878087bdc9deb1b2e8076b43d5f79b
|
729e48b3254b5d8ccb85ca4bbbb32bbd7d07b026
|
refs/heads/master
| 2023-07-24T12:12:31.900627
| 2023-07-18T19:10:02
| 2023-07-18T20:30:36
| 17,697,281
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,044
|
rd
|
checkPolygonsHoles.Rd
|
\name{checkPolygonsHoles}
\alias{checkPolygonsHoles}
\alias{rgeosStatus}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Check holes in Polygons objects}
\description{
The function checks holes in Polygons objects. Use of the rgeos package functions is prefered, and if rgeos is available, they will be used automatically. In this case, member Polygon objects are checked against each other for containment, and the returned Polygons object has component hole slots set appropriately. In addition, the output Polygons object may be provided with a comment string, encoding the external and internal rings.
}
\usage{
checkPolygonsHoles(x, properly=TRUE, avoidGEOS=FALSE, useSTRtree=FALSE)
rgeosStatus()
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{An Polygons object as defined in package sp}
\item{properly}{default TRUE, use \code{\link[rgeos]{gContainsProperly}} rather than \code{\link[rgeos]{gContains}}}
\item{avoidGEOS}{default FALSE}
\item{useSTRtree}{default FALSE, if TRUE, use \pkg{rgeos} STRtree in checking holes, which is much faster, but uses a lot of memory and does not release it on completion (work in progress)}
}
\value{
An Polygons object re-created from the input object.
}
\author{Roger Bivand}
%\seealso{\code{\link[rgeos]{createPolygonsComment}}, \code{\link[rgeos]{gIsValid}}, \code{\link[rgeos]{gEquals}}, \code{\link[rgeos]{gContainsProperly}}}
\examples{
if (rgeosStatus()) {
nc1 <- readShapePoly(system.file("shapes/sids.shp", package="maptools")[1],
proj4string=CRS("+proj=longlat +ellps=clrk66"))
pl <- slot(nc1, "polygons")
sapply(slot(pl[[4]], "Polygons"), function(x) slot(x, "hole"))
pl[[4]] <- Polygons(list(slot(pl[[4]], "Polygons")[[1]],
Polygon(slot(slot(pl[[4]], "Polygons")[[2]], "coords"), hole=TRUE),
slot(pl[[4]], "Polygons")[[3]]), slot(pl[[4]], "ID"))
sapply(slot(pl[[4]], "Polygons"), function(x) slot(x, "hole"))
pl_new <- lapply(pl, checkPolygonsHoles)
sapply(slot(pl_new[[4]], "Polygons"), function(x) slot(x, "hole"))
srs <- slot(slot(pl[[1]], "Polygons")[[1]], "coords")
hle2 <- structure(c(-81.64093, -81.38380, -81.34165, -81.66833, -81.64093,
36.57865, 36.57234, 36.47603, 36.47894, 36.57865), .Dim = as.integer(c(5, 2)))
hle3 <- structure(c(-81.47759, -81.39118, -81.38486, -81.46705, -81.47759,
36.56289, 36.55659, 36.49907, 36.50380, 36.56289), .Dim = as.integer(c(5, 2)))
x <- Polygons(list(Polygon(srs), Polygon(hle2), Polygon(hle3)),
ID=slot(pl[[1]], "ID"))
sapply(slot(x, "Polygons"), function(x) slot(x, "hole"))
res <- checkPolygonsHoles(x)
sapply(slot(res, "Polygons"), function(x) slot(x, "hole"))
\dontrun{
opar <- par(mfrow=c(1,2))
SPx <- SpatialPolygons(list(x))
plot(SPx)
text(t(sapply(slot(x, "Polygons"), function(i) slot(i, "labpt"))),
labels=sapply(slot(x, "Polygons"), function(i) slot(i, "hole")), cex=0.6)
title(xlab="Hole slot values before checking")
SPres <- SpatialPolygons(list(res))
plot(SPres)
text(t(sapply(slot(res, "Polygons"), function(i) slot(i, "labpt"))),
labels=sapply(slot(res, "Polygons"), function(i) slot(i, "hole")), cex=0.6)
title(xlab="Hole slot values after checking")
par(opar)
p1 <- Polygon(cbind(x=c(0, 0, 10, 10, 0), y=c(0, 10, 10, 0, 0))) # I
p2 <- Polygon(cbind(x=c(3, 3, 7, 7, 3), y=c(3, 7, 7, 3, 3))) # H
p8 <- Polygon(cbind(x=c(1, 1, 2, 2, 1), y=c(1, 2, 2, 1, 1))) # H
p9 <- Polygon(cbind(x=c(1, 1, 2, 2, 1), y=c(5, 6, 6, 5, 5))) # H
p3 <- Polygon(cbind(x=c(20, 20, 30, 30, 20), y=c(20, 30, 30, 20, 20))) # I
p4 <- Polygon(cbind(x=c(21, 21, 29, 29, 21), y=c(21, 29, 29, 21, 21))) # H
p14 <- Polygon(cbind(x=c(21, 21, 29, 29, 21), y=c(21, 29, 29, 21, 21))) # H
p5 <- Polygon(cbind(x=c(22, 22, 28, 28, 22), y=c(22, 28, 28, 22, 22))) # I
p15 <- Polygon(cbind(x=c(22, 22, 28, 28, 22), y=c(22, 28, 28, 22, 22))) # I
p6 <- Polygon(cbind(x=c(23, 23, 27, 27, 23), y=c(23, 27, 27, 23, 23))) # H
p7 <- Polygon(cbind(x=c(13, 13, 17, 17, 13), y=c(13, 17, 17, 13, 13))) # I
p10 <- Polygon(cbind(x=c(24, 24, 26, 26, 24), y=c(24, 26, 26, 24, 24))) # I
p11 <- Polygon(cbind(x=c(24.25, 24.25, 25.75, 25.75, 24.25),
y=c(24.25, 25.75, 25.75, 24.25, 24.25))) # H
p12 <- Polygon(cbind(x=c(24.5, 24.5, 25.5, 25.5, 24.5),
y=c(24.5, 25.5, 25.5, 24.5, 24.5))) # I
p13 <- Polygon(cbind(x=c(24.75, 24.75, 25.25, 25.25, 24.75),
y=c(24.75, 25.25, 25.25, 24.75, 24.75))) # H
lp <- list(p1, p2, p13, p7, p6, p5, p4, p3, p8, p11, p12, p9, p10, p14, p15)
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# 0 1 11 0 6 0 8 0 1 13 0 1 0 (7) (6)
# I H H I H I H I H H I H I ? ?
pls <- Polygons(lp, ID="1")
comment(pls)
pls1 <- checkPolygonsHoles(pls)
comment(pls1)
opar <- par(mfrow=c(1,2))
plot(SpatialPolygons(list(pls)), col="magenta", pbg="cyan", usePolypath=FALSE)
title(xlab="Hole slot values before checking")
plot(SpatialPolygons(list(pls1)), col="magenta", pbg="cyan", usePolypath=FALSE)
title(xlab="Hole slot values after checking")
par(opar)
}
}
}
\keyword{spatial}
|
ec8541c96d715e747b0a5a8a52e79547ed1b458d
|
3a3e3e050d6deb8544ff2838ab4b698a492d2eb7
|
/roadmap/scripts/03.correlated_regions_sig_heatmap.R
|
0b1fe3bff1a3941633ac42cb910be0438a670e02
|
[] |
no_license
|
jokergoo/epik
|
f9eb86c38eab46913a2787296fe5e023caf70f2b
|
16ae793be02554ddda89401a888327dce87c5a4a
|
refs/heads/master
| 2021-01-12T05:25:21.598897
| 2019-09-27T08:04:03
| 2019-09-27T08:04:03
| 77,924,435
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,781
|
r
|
03.correlated_regions_sig_heatmap.R
|
library(methods)
library(GetoptLong)
cutoff = 0.05
meandiff = 0.1
rerun = FALSE
GetoptLong("cutoff=f", "0.05",
"meandiff=s", "0",
"rerun!", "rerun")
BASE_DIR = "/icgc/dkfzlsdf/analysis/B080/guz/roadmap_analysis/re_analysis"
source(qq("@{BASE_DIR}/scripts/configure/roadmap_configure.R"))
neg_cr = readRDS(qq("@{OUTPUT_DIR}/rds/all_neg_cr_w6s3_fdr_less_than_@{cutoff}_methdiff_larger_than_@{meandiff}.rds"))
pos_cr = readRDS(qq("@{OUTPUT_DIR}/rds/all_pos_cr_w6s3_fdr_less_than_@{cutoff}_methdiff_larger_than_@{meandiff}.rds"))
foo_cr = c(neg_cr, pos_cr)
foo_cr$direction = c(rep("neg", length(neg_cr)), rep("pos", length(pos_cr)))
sample_id = attr(neg_cr, "sample_id")
add_mean_methylation = function(gr) {
gr2 = GRanges()
mean_meth = NULL
for(chr in unique(as.vector(seqnames(gr)))) {
methylation_hooks$set(chr)
sub_gr = gr[seqnames(gr) == chr]
gr_cpg = methylation_hooks$GRanges()
m = methylation_hooks$meth(col_index = sample_id)
mtch = as.matrix(findOverlaps(sub_gr, gr_cpg))
mean_m = do.call("rbind", tapply(mtch[, 2], mtch[, 1], function(ind) colMeans(m[ind, , drop = FALSE], na.rm = TRUE)))
mean_meth = rbind(mean_meth, mean_m)
gr2 = c(gr2, sub_gr)
}
rownames(mean_meth) = NULL
colnames(mean_meth) = paste0("mean_meth_", colnames(mean_meth))
mcols(gr2) = cbind(mcols(gr2), as.data.frame(mean_meth))
return(gr2)
}
## add mean methylation matrix to the `GRanges` object
rdata_file = qq("@{OUTPUT_DIR}/rds/sig_cr_mean_methylation_fdr_@{cutoff}_methdiff_@{meandiff}.rds")
if(file.exists(rdata_file) && !rerun) {
foo_cr2 = readRDS(rdata_file)
} else {
foo_cr2 = add_mean_methylation(foo_cr)
saveRDS(foo_cr2, file = rdata_file)
}
meth_mat = as.matrix(mcols(foo_cr2)[, grep("mean_meth", colnames(mcols(foo_cr2)))])
expr_mat = EXPR[foo_cr2$gene_id, sample_id]
meth_diff = rowMeans(meth_mat[, SAMPLE$subgroup == "subgroup1"]) -
rowMeans(meth_mat[, SAMPLE$subgroup == "subgroup2"])
gm = genes(TXDB)
gl = width(gm)
names(gl) = names(gm)
## since there are multiple samples in a subgroup, this function
## returns the common regions that cover regions in most of the samples
get_chromatin_states = function(name, sample_id) {
fn = dir(qq("@{BASE_DIR}/data/chromatin_states/"))
nm = gsub("^(E\\d+?)_.*$", "\\1", fn)
fn = fn[nm %in% sample_id]
all_sample_chromatin_states = lapply(fn, function(x) {
x = qq("@{BASE_DIR}/data/chromatin_states/@{x}")
qqcat("reading @{x}...\n")
gr = read.table(x, sep = "\t")
gr = gr[gr[[1]] %in% CHROMOSOME, ]
GRanges(seqnames = gr[[1]], ranges = IRanges(gr[[2]] + 1, gr[[3]]), states = gr[[4]])
})
names(all_sample_chromatin_states) = gsub("^(E\\d+)_.*$", "\\1", fn)
gf_list = lapply(all_sample_chromatin_states, function(gf) {
gf[gf$states == name]
})
# a given region should cover at least 50% of the samples
epic::common_regions(gf_list, min_width = 1000, min_coverage = ceiling(0.5*length(gf_list)), gap = 0)
}
subgroup = SAMPLE[sample_id, "subgroup"]
rdata_file = qq("@{OUTPUT_DIR}/rds/genomic_features_list_fdr_@{cutoff}_methdiff_@{meandiff}.rds")
if(file.exists(rdata_file) && !rerun) {
gr_list = readRDS(rdata_file)
} else {
df = read.table("/icgc/dkfzlsdf/analysis/B080/guz/roadmap_analysis/re_analysis/data/chromatin_states/E099_15_coreMarks_mnemonics.bed.gz", sep = "\t", stringsAsFactors = FALSE)
all_states = sort(unique(df[[4]]))
# separate by subgroups
cs_list_1 = lapply(all_states, get_chromatin_states, sample_id[subgroup == "subgroup1"])
names(cs_list_1) = paste0(all_states, "_1")
cs_list_2 = lapply(all_states, get_chromatin_states, sample_id[subgroup == "subgroup2"])
names(cs_list_2) = paste0(all_states, "_2")
tfbs = read.table("/icgc/dkfzlsdf/analysis/hipo/hipo_016/analysis/WGBS_final/bed/encode_uniform_tfbs_merged_1kb.bed", sep = "\t", stringsAsFactors = FALSE)
tfbs = GRanges(seqnames = tfbs[[1]], ranges = IRanges(tfbs[[2]], tfbs[[3]]))
gr_list = c(list(CGI = CGI, shore = CGI_SHORE, tfbs = tfbs), cs_list_1, cs_list_2)
saveRDS(gr_list, file = rdata_file)
}
## for each region in `foo_cr2`, how much is covered by regions in `gr_list`
foo_cr2 = epic::annotate_to_genomic_features(foo_cr2, gr_list)
## whether it is at tss, gene body or intergenic regions
ga = ifelse(foo_cr2$gene_tss_dist > -1000 & foo_cr2$gene_tss_dist < 2000, "tss",
ifelse(foo_cr2$gene_tss_dist > 2000 & foo_cr2$gene_tss_dist < gl[foo_cr2$gene_id], "gene", "intergenic"))
# a matrix for the overlapping of CGI/shore/tfbs
overlap_mat_0 = as.matrix(mcols(foo_cr2)[, grep("CGI|shore|tfbs", colnames(mcols(foo_cr2)))])
colnames(overlap_mat_0) = gsub("overlap_to_", "", colnames(overlap_mat_0))
# difference for the chromHMM segmentation overlapping in the two subgroups
overlap_mat_1 = as.matrix(mcols(foo_cr2)[, grep("overlap_to.*_1$", colnames(mcols(foo_cr2)))])
colnames(overlap_mat_1) = gsub("overlap_to_", "", colnames(overlap_mat_1))
overlap_mat_2 = as.matrix(mcols(foo_cr2)[, grep("overlap_to.*_2$", colnames(mcols(foo_cr2)))])
colnames(overlap_mat_2) = gsub("overlap_to_", "", colnames(overlap_mat_2))
overlap_mat_diff = overlap_mat_1 - overlap_mat_2
dim(overlap_mat_diff) = dim(overlap_mat_1)
dimnames(overlap_mat_diff) = dimnames(overlap_mat_1)
colnames(overlap_mat_diff) = gsub("_\\d$", "", colnames(overlap_mat_diff))
# when clustering columns, we cluster samples in each subgroup separately
dend1 = as.dendrogram(hclust(dist(t(meth_mat[, SAMPLE$subgroup == "subgroup1"]))))
hc1 = as.hclust(reorder(dend1, colMeans(meth_mat[, SAMPLE$subgroup == "subgroup1"])))
expr_col_od1 = hc1$order
dend2 = as.dendrogram(hclust(dist(t(meth_mat[, SAMPLE$subgroup == "subgroup2"]))))
hc2 = as.hclust(reorder(dend2, colMeans(meth_mat[, SAMPLE$subgroup == "subgroup2"])))
expr_col_od2 = hc2$order
expr_col_od = c(which(SAMPLE$subgroup == "subgroup1")[expr_col_od1], which(SAMPLE$subgroup == "subgroup2")[expr_col_od2])
abs_tss_dist = abs(foo_cr2$gene_tss_dist)
q = quantile(abs_tss_dist, 0.9); q = 5e4
abs_tss_dist[abs_tss_dist > q] = q
# rows are split into four slices for neg_cr and pos_cr separately and ordered by mean value
set.seed(123)
km_meth1 = kmeans(meth_mat[foo_cr2$direction == "neg", SAMPLE$subgroup == "subgroup1"], centers = 4)$cluster
x = tapply(rowMeans(meth_mat[foo_cr2$direction == "neg", ]), km_meth1, mean)
od = structure(rank(x), names = names(x))
km_meth1 = od[as.character(km_meth1)]
km_meth2 = kmeans(meth_mat[foo_cr2$direction == "pos", SAMPLE$subgroup == "subgroup1"], centers = 4)$cluster
x = tapply(rowMeans(meth_mat[foo_cr2$direction == "pos", ]), km_meth2, mean)
od = structure(rank(x), names = names(x))
km_meth2 = od[as.character(km_meth2)]
split = numeric(nrow(meth_mat))
split[foo_cr2$direction == "neg"] = paste0("neg", km_meth1)
split[foo_cr2$direction == "pos"] = paste0("pos", km_meth2)
## now we concatenate heatmaps
## 1. a one-column heatmap shows row slices
ht_list = Heatmap(split, name = "split", show_row_names = FALSE, show_column_names = FALSE, width = unit(5, "mm"),
col = c(neg1 = "darkgreen", neg2 = "darkgreen", neg3 = "darkgreen", neg4 = "darkgreen",
pos1 = "red", pos2 = "red", pos3 = "red", pos4 = "red"), show_heatmap_legend = FALSE) +
## 2. methylation for the CRs
Heatmap(meth_mat, name = "methylation", col = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red")),
show_row_names = FALSE, show_column_names = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = HeatmapAnnotation(group = SAMPLE[sample_id, ]$group, sample_type = SAMPLE[sample_id, ]$sample_type, subgroup = subgroup,
col = list(group = COLOR$group, sample_type = COLOR$sample_type, subgroup = COLOR$subgroup)),
column_title = "methylation", show_row_dend = FALSE, combined_name_fun = NULL,
use_raster = TRUE, raster_quality = 2) +
Heatmap(meth_diff, name = "meth_diff", col = colorRamp2(c(-0.3, 0, 0.3), c("green", "white", "red")),
show_row_names = FALSE, width = unit(5, "mm"), show_heatmap_legend = FALSE) +
## 3. expression matrix
Heatmap(expr_mat, name = "expr", show_row_names = FALSE,
show_column_names = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = HeatmapAnnotation(group = SAMPLE[sample_id, ]$group, sample_type = SAMPLE[sample_id, ]$sample_type, subgroup = subgroup,
col = list(group = COLOR$group, sample_type = COLOR$sample_type, subgroup = COLOR$subgroup), show_legend = FALSE, show_annotation_name = TRUE),
column_title = "Expression", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2) +
## 4. overlapping matrix for CGI/shore/tfbs
Heatmap(overlap_mat_0, name = "overlap0", show_row_names = FALSE, col = colorRamp2(c(0, 1), c("white", "orange")),
show_column_names = TRUE, cluster_columns = FALSE,
column_title = "overlap to gf", show_row_dend = FALSE) +
## 5. overlapping matrix for the chromHMM segmentations
Heatmap(overlap_mat_diff, col = colorRamp2(c(-1, 0, 1), c("green", "white", "red")),
name = "overlap_diff", show_row_names = FALSE,
show_column_names = TRUE, cluster_columns = FALSE,
column_title = "overlap diff", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2) +
## 6. dist to tss
rowAnnotation(tss_dist = row_anno_points(abs_tss_dist, size = unit(1, "mm"), gp = gpar(col = "#00000020"), axis = TRUE),
width = unit(2, "cm")) +
## 7. annotation to genes
Heatmap(ga, name = "anno", col = c("tss" = "red", "gene" = "blue", "intergenic" = "green"), show_row_names = FALSE,
width = unit(5, "mm"))
pdf(qq("@{OUTPUT_DIR}/plots/sig_cr_heatmap_fdr_@{cutoff}_methdiff_@{meandiff}.pdf"), width = 20, height = 16)
draw(ht_list, main_heatmap = "methylation", split = split,
column_title = qq("@{length(foo_cr)} cr, width=@{sum(width(foo_cr))}"))
decorate_annotation("tss_dist", slice = length(unique(split)), {
grid.text("tss_dist", 0.5, unit(0, "npc") - unit(1, "cm"), gp = gpar(fontsize = 10))
})
all_levels = sort(unique(split))
for(i in seq_along(all_levels)) {
decorate_heatmap_body("split", slice = i, {
grid.text(all_levels[i], rot = 90, gp = gpar(col = "white"))
})
decorate_heatmap_body("methylation", slice = i, {
grid.rect(gp = gpar(col = "black", fill = "transparent"))
})
decorate_heatmap_body("expr", slice = i, {
grid.rect(gp = gpar(col = "black", fill = "transparent"))
})
decorate_heatmap_body("overlap0", slice = i, {
grid.rect(gp = gpar(col = "black", fill = "transparent"))
})
decorate_heatmap_body("overlap_diff", slice = i, {
grid.rect(gp = gpar(col = "black", fill = "transparent"))
})
}
dev.off()
sample_id_subgroup1 = intersect(sample_id, rownames(SAMPLE[SAMPLE$subgroup == "subgroup1", ]))
sample_id_subgroup2 = intersect(sample_id, rownames(SAMPLE[SAMPLE$subgroup == "subgroup2", ]))
## barplots or boxplots for the annotation matrix in the eight row slices
pdf(qq("@{OUTPUT_DIR}/plots/sig_cr_heatmap_annotation_barplots_fdr_@{cutoff}_methdiff_@{meandiff}.pdf"), width = 20, height = 12)
par(mfrow = c(3, 5), mar = c(4, 4, 4, 1))
x1 = tapply(rowMeans(meth_mat[, paste0("mean_meth_", sample_id_subgroup1)]), split, mean)
x2 = tapply(rowMeans(meth_mat[, paste0("mean_meth_", sample_id_subgroup2)]), split, mean)
plot(1:8, x1, ylim = c(0, 1), type = "l", main = "mean methylation", axes = FALSE, ylab = "mean methylation")
points(1:8, x1, cex = 1.5, bg = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))(x1), pch = 21)
lines(1:8, x2, lty = 2)
points(1:8, x2, cex = 1.5, bg = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))(x2), pch = 21)
axis(side = 1, at = 1:8, labels = names(x1))
axis(side = 2)
for(i in 1:ncol(overlap_mat_0)) {
x = tapply(overlap_mat_0[, i], split, mean)
if(max(abs(x)) > 0.05) {
barplot(x, main = colnames(overlap_mat_0)[i], col = "orange", ylim = c(0, 1))
}
}
for(i in 1:ncol(overlap_mat_diff)) {
x = tapply(overlap_mat_diff[, i], split, function(x) {
c(sum(x[x < 0])/length(x), sum(x[x > 0]/length(x)))
})
x = do.call("cbind", x)
if(max(abs(x)) > 0.05) {
barplot(abs(x), main = colnames(overlap_mat_diff)[i], col = c("green", "red"),
ylim = c(-0.3, 0.3), offset = x[1, ])
}
}
boxplot(split(abs(foo_cr2$gene_tss_dist), split), outline = FALSE, main = "dist2tss")
m = do.call("cbind", tapply(ga, split, table))
m = apply(m, 2, function(x) x/sum(x))
barplot(m, main = "annotation to genes", col = c("tss" = "red", "gene" = "blue", "intergenic" = "green")[rownames(m)])
dev.off()
# for(cutoff in c(0.1, 0.05, 0.01)) {
# for(meandiff in c(0, 0.1, 0.2, 0.3)) {
# cmd = qq("Rscript-3.1.2 /icgc/dkfzlsdf/analysis/B080/guz/roadmap_analysis/re_analysis/scripts/03.correlated_regions_sig_heatmap.R --cutoff @{cutoff} --meandiff @{meandiff} --no-rerun")
# cmd = qq("perl /home/guz/project/development/ngspipeline2/qsub_single_line.pl '-l walltime=30:00:00,mem=10G -N correlated_regions_sig_heatmap_fdr_@{cutoff}_meandiff_@{meandiff}' '@{cmd}'")
# system(cmd)
# }
# }
|
26b69072cf8cf980997d9eede2e4431aa0adfb57
|
a07d07ddc490dcbcef631a3aae46fe8cba0dc21b
|
/My Part/Power Comparison/compare_code.R
|
bc46684063b8cbe6f82fbef8dd02a8c3a0ec8d2d
|
[] |
no_license
|
Sweata1501/nonparametric_project
|
20a5e1df7b81bd247316869cccfe6c649eadfa79
|
3fff9660b0038ce5f097e56dd78f625f6806bca8
|
refs/heads/master
| 2021-04-18T10:13:33.207465
| 2020-04-13T15:59:06
| 2020-04-13T15:59:06
| 249,533,826
| 0
| 0
| null | 2020-03-31T05:19:59
| 2020-03-23T20:12:26
|
R
|
UTF-8
|
R
| false
| false
| 691
|
r
|
compare_code.R
|
library(ggplot2)
library(dplyr)
library(reshape2)
Distribution=rep(c("Exponential","Gamma","Logistic","Normal","Weibull"),each=1000,times=3)
Statistic=rep(c("F","Capon","Savage"),each=5000)
theta=rep(seq(1,5,length=1000),times=15)
df=melt(cbind(Parametric,Capon,Savage)) %>% select(-variable) %>% mutate(Distribution=Distribution,Statistic=
Statistic, theta=theta)
ggplot(df,aes(x=theta,y=value,col=Statistic))+geom_line()+facet_wrap(~Distribution,ncol=3,scales = "free")+
labs(title = "Comparison of power graph of F, Capon and Savage statistic",y="Power")+
theme(plot.title = element_text(hjust = 0.5))
|
22ae4a53b9461d0236f0abac574e236aeeb3ce89
|
bb3e734d36602499409c51f70cf30ea016898e39
|
/plot3.R
|
c6ec7188bee581b34a38441be6ed3c2261c4955f
|
[] |
no_license
|
CotrimJR/ExData_Plotting1
|
d853f0145f77dfd928e1befeba906356725b4f6b
|
58969fe17541ea8f079d57375da5def2d3bbd5f8
|
refs/heads/master
| 2021-01-19T07:16:51.723809
| 2017-04-07T23:08:58
| 2017-04-07T23:08:58
| 87,533,875
| 0
| 0
| null | 2017-04-07T10:19:17
| 2017-04-07T10:19:16
| null |
UTF-8
|
R
| false
| false
| 3,351
|
r
|
plot3.R
|
# ============================================================================================
# File: plot3.R
# ============================================================================================
# Reference: UC Irvine Machine Learning Repository,
# Dataset : https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Title : Individual household electric power consumption Data Set
# Variables:
# - Date: Date in format dd/mm/yyyy
# - Time: time in format hh:mm:ss
# - Global_active_power: household global minute-averaged active power (in kilowatt)
# - Global_reactive_power: household global minute-averaged reactive power (in kilowatt)
# - Voltage: minute-averaged voltage (in volt)
# - Global_intensity: household global minute-averaged current intensity (in ampere)
# - Sub_metering_1: energy sub-metering No. 1 (in watt-hour of active energy). It corresponds to the kitchen, containing mainly a dishwasher, an oven and a microwave (hot plates are not electric but gas powered).
# - Sub_metering_2: energy sub-metering No. 2 (in watt-hour of active energy). It corresponds to the laundry room, containing a washing-machine, a tumble-drier, a refrigerator and a light.
# - Sub_metering_3: energy sub-metering No. 3 (in watt-hour of active energy). It corresponds to an electric water-heater and an air-conditioner.
# ============================================================================================
# 1. Loading Data
# ============================================================================================
# Download File if does not exists
filename <- "household_power_consumption.zip"
if (!file.exists(filename)) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile=filename)
}
# Extract and load data into memmory
data <- subset(
read.csv( unz("household_power_consumption.zip","household_power_consumption.txt"),
header = TRUE,
sep = ";",
dec = ".",
na.strings = c("?")),
Date == "1/2/2007" | Date == "2/2/2007")
# ============================================================================================
# 2. Data Conversion
# ============================================================================================
# Date and Time conversion
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data$Time <- strptime( paste(data$Date, data$Time, sep=" "), format="%Y-%m-%d %H:%M:%S" )
# ============================================================================================
# 3. Processing / Results
# ============================================================================================
# Set parameters to create .png file
png(filename="plot3.png", width=480, height=480)
# Plot
plot(x = data$Time,
y = data$Sub_metering_1,
xlab = "",
ylab = "Energy sub metering",
type = "n")
points(data$Time, data$Sub_metering_1, col="black", type = "l")
points(data$Time, data$Sub_metering_2, col="red", type = "l")
points(data$Time, data$Sub_metering_3, col="blue", type = "l")
legend("topright",
lty = c(1,1),
col = c("black","red","blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3") )
# Close the device and save the file
dev.off()
|
041c38dc29f517a19db2a2c2cc4db5ea5914a654
|
e26420970229a0c55ec092168797ed6e42f5708f
|
/tests/testthat/test_preprocess_sample_colors.R
|
75b7c5f3b26e91ef77db4f9fead009a550591ff6
|
[
"MIT"
] |
permissive
|
kcha/psiplot
|
cd26b3d39301d20d82e453e4882b60a214afb600
|
7c840f69f7b321b8690e9f3f03191a0cd699a371
|
refs/heads/master
| 2022-06-03T02:54:17.403767
| 2022-03-18T15:30:05
| 2022-03-18T15:30:05
| 27,505,866
| 2
| 1
|
MIT
| 2022-03-18T15:30:06
| 2014-12-03T20:20:07
|
R
|
UTF-8
|
R
| false
| false
| 1,869
|
r
|
test_preprocess_sample_colors.R
|
context("Test re-ordering of event data using configuration file")
config <- data.frame(Order=c(1,2),
SampleName=c("Sample1", "Sample4"),
GroupName=c("ESC", "Neural"),
RColorCode=c("red", "blue")
)
formatted_psi <- format_table(psi)
formatted_crpkm <- format_table(crpkm, expr = TRUE)
test_that("Only samples in config are retained", {
r <- preprocess_sample_colors(formatted_psi, config = config)
expect_equal(ncol(r$data), nrow(config))
})
context("Test re-ordering of expression data using configuration file")
test_that("Quality scores is NULL", {
r <- preprocess_sample_colors(formatted_crpkm, config = config, expr = TRUE)
expect_true(is.null(r$qual))
expect_equal(ncol(r$data), nrow(config))
expect_equal(nrow(r$data), nrow(formatted_crpkm))
})
test_that("Quality scores is NULL when config is not used", {
r <- preprocess_sample_colors(formatted_crpkm, config = NULL, expr = TRUE)
expect_true(is.null(r$qual))
expect_equal(ncol(r$data), 8)
expect_equal(nrow(r$data), nrow(formatted_crpkm))
})
context("Test absence of optional columns")
test_that("Natural order is used with no Order column is specified", {
config2 <- config
config2$Order <- NULL
r <- preprocess_sample_colors(formatted_psi, config2)
expect_true(all(r$sample_order$SampleOrder== 1:nrow(config2)), "Using natural order")
expect_true("Order" %in% colnames(r$config))
})
test_that("Default colors are used if RColorCode is missing", {
config2 <- config
config2$RColorCode <- NULL
r <- preprocess_sample_colors(formatted_psi, config2)
expect_true("RColorCode" %in% colnames(r$config))
})
context("Test that input PSI table is formattted correctly")
test_that("Error is returned if first column is not ID", {
expect_error(preprocess_sample_colors(psi, config))
})
|
7271db13b63594f209ff07358dd5e7b03bc0c75a
|
16cc0f4bb42a2081e38f358cc202dcd8e9f30faf
|
/R - Básico Mapas/Script_R_Mapas.R
|
960dea15b8bcd89546858c26f8cadaecc082e370
|
[] |
no_license
|
Prof-Rodrigo-Silva/ScriptR
|
d71d76998a391656ffde4ca04111334df6775346
|
67bc2e7ba7bad07597849d1ecaf342632a913777
|
refs/heads/master
| 2022-07-31T15:15:07.568838
| 2022-07-15T17:52:07
| 2022-07-15T17:52:07
| 181,120,450
| 13
| 16
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 14,131
|
r
|
Script_R_Mapas.R
|
#####################################################################################
# Introdução
# Conteúdo:
#1. Mapas Básicos
#2. Mapas com Shapefile + ggplot2
#3. Mapas com Pacote Leaflet
#4. Mapas com Google API( Dependendo do Tempo )
#####################################################################################
#1. Mapas Básicos
install.packages("maps")
library(maps) #mapas simples, eixos, escala, cidades
install.packages("mapproj")
library(mapproj)
install.packages("rgdal", dependencies = T, force = T)
library(rgdal)
map("world")
par(mar=c(1,1,1,1))
map("world","Brazil")
map.axes()
#map.scale(ratio = F, cex = 0.5)
map(,,add = T)
map.scale(x=-47,y=-30, ratio = F, cex = .5)
map("world","Brazil",fill = T, col = "lightgray")
map.axes()
abline(h=-31.332952, lty = 2, lwd = 1)
abline(v=-54.099830, lty = 2, lwd = 1)
map("world","Brazil",fill = T, col = "lightgray",xlim = c(-58,-49),
ylim = c(-35,-27))
par(mar=c(1,1,1,1))
m = map("world","Brazil",fill = T, col = "lightgray", plot = T)
#map.grid(m,col = "grey50", font = 1, cex=0.7, pretty = T)
map.grid(m,nx = 5, ny = 5, col = "grey50", font = 1, cex=0.7, pretty = T)
map.cities(country = "Brazil", minpop = 2000000, pch = 20,cex = 1)
#install.packages("RgoogleMaps")
library(RgoogleMaps)
center = c(-31.335785, -54.095573)
zoom = 15
mapa.bage = GetMap(center = center, zoom = zoom, maptype = "terrain",
destfile = "mapa_bage.png")
######################################################################
#2. Mapas com Shapefile + ggplot2
#Malha cartográfica = https://mapas.ibge.gov.br/bases-e-referenciais/bases-cartograficas/malhas-digitais
#Arquivo = http://datasus.saude.gov.br/informacoes-de-saude-tabnet/
library(ggplot2)
library(rgdal)
rs = readOGR("C:/Users/fermat/Documents/ScriptR/R - Básico Mapas","43MUE250GC_SIR")
head(rs@data)
rs$CD_GEOCMU = substr(rs$CD_GEOCMU,1,6)
populacao = read.csv2(file.choose(), header = T, sep = ",")
head(populacao)
populacao = na.omit(populacao)
names(populacao) = c("Municipio", "Populacao")
head(rs@data)
populacao$CD_GEOCMU = substr(populacao$Municipio,1,6)
head(populacao)
dim(populacao)
dim(rs@data)
head(rs@data)
populacao = populacao[order(populacao$CD_GEOCMU),]
malhaRS = rs@data[order(rs@data$CD_GEOCMU),]
head(malhaRS)
dim(populacao)
dim(malhaRS)
linhas = c(1,2)
malhaRS = malhaRS[-linhas,]
dim(populacao)
dim(malhaRS)
#Dica
#malhaRS = subset(malhaRS,CD_GEOCMU!="430000")
head(malhaRS)
head(populacao)
rs2 = merge(malhaRS,populacao)
head(rs2)
#install.packages("ggplot2", dependencies = T)
library(ggplot2)
#install.packages("rgeos",dependencies = T)
library(rgeos)
#install.packages("gpclib", type="source")
library(gpclib)
#install.packages("maptools")
library(maptools)
head(rs)
rs.rsf = fortify(rs, region = "CD_GEOCMU")
head(rs.rsf)
rs.rsf = subset(rs.rsf,id!="430000")
rs.rsf = merge(rs.rsf, rs@data, by.x = "id", by.y = "CD_GEOCMU")
rs2$PopulacaoCat = cut(rs2$Populacao, breaks = c(0,20000,40000,60000,80000,100000,2000000),
labels = c('0-20000',
'20000-40000',
'40000-60000',
'60000-80000',
'80000-100000',
'+100000'),
include.lowest = T)
head(rs2)
#rm(rs2)
#rm(rs.rsf)
rs.rsf = merge(rs.rsf, rs2, by.x = "id", by.y = "CD_GEOCMU")
head(rs.rsf)
#names(rs2)[1]=c("id")
#install.packages("RColorBrewer",dependencies = T)
library(RColorBrewer)
ggplot(rs.rsf, aes(rs.rsf$long,rs.rsf$lat, group=rs.rsf$group,fill=rs.rsf$PopulacaoCat)) +
geom_polygon(colour='green') + coord_equal() + ggtitle("População") +
labs(x = "Longitude", y = "Latitude", fill="População") +
scale_fill_manual(values = brewer.pal(9,'Reds')[4:9]) +
theme(plot.title = element_text(size = rel(1), lineheight = 0.9, face = "bold",
colour = 'blue'))
###########################################################################################
#2.1 Mapas com Shapefile + ggplot
library(ggplot2)
library(rgdal)
rs = readOGR("C:/Users/fermat/Documents/ScriptR/R - Básico Mapas","43MUE250GC_SIR")
head(rs@data)
rs$CD_GEOCMU = substr(rs$CD_GEOCMU,1,6)
#importar dados tabnet!
populacao = read.csv2(file.choose(),header = T, sep = ",")
nascimentos = read.csv2(file.choose(),header = T, sep = ",")
obitos = read.csv2(file.choose(),header = T, sep = ",")
head(populacao)
head(nascimentos)
head(obitos)
populacao = na.omit(populacao)
nascimentos = na.omit(nascimentos)
obitos = na.omit(obitos)
names(populacao) = c("Municipio", "Populacao")
names(nascimentos) = c("Municipio", "Nascimentos")
names(obitos) = c("Municipio", "Obitos")
head(populacao)
head(nascimentos)
head(obitos)
populacao$CD_GEOCMU = substr(populacao$Municipio,1,6)
nascimentos$CD_GEOCMU = substr(nascimentos$Municipio,1,6)
obitos$CD_GEOCMU = substr(obitos$Municipio,1,6)
head(populacao)
head(nascimentos)
head(obitos)
head(rs@data)
#rs@data
dim(populacao)
dim(nascimentos)
dim(obitos)
dim(rs@data)
#Ordenando os objetos pelo id
populacao = populacao[order(populacao$CD_GEOCMU),]
nascimentos = nascimentos[order(nascimentos$CD_GEOCMU),]
obitos = obitos[order(obitos$CD_GEOCMU),]
malhaRS = rs@data[order(rs@data$CD_GEOCMU),]
dim(malhaRS)
head(malhaRS)
linhas = c(1,2)
malhaRS = malhaRS[-linhas,]
dim(malhaRS)
head(malhaRS)
dados = populacao
dados$Nascimentos = nascimentos$Nascimentos
dados$Obitos = obitos$Obitos
rs2 = merge(malhaRS,dados)
head(rs2)
rs2$PercNascimentos = (rs2$Nascimentos*100)/rs2$Populacao
rs2$PercObitos = (rs2$Obitos*100)/rs2$Populacao
head(rs2)
rs.rsf = fortify(rs,region = "CD_GEOCMU")
head(rs.rsf)
rs.rsf = subset(rs.rsf,id!="430000")
head(rs.rsf)
rs.rsf = merge(rs.rsf, rs@data, by.x="id", by.y = "CD_GEOCMU")
head(rs.rsf)
head(rs2)
rs2$NascimentosCat = cut(rs2$Nascimentos, breaks = c(0,200,400,600,800,1000,20000),
labels = c('0-200',
'200-400',
'400-600',
'600-800',
'800-1000',
'+1000'),
include.lowest = T)
rs2$ObitosCat = cut(rs2$Obitos, breaks = c(0,200,400,600,800,1000,12000),
labels = c('0-200',
'200-400',
'400-600',
'600-800',
'800-1000',
'+1000'),
include.lowest = T)
rs2$PercNascCat = cut(rs2$PercNascimentos, breaks = c(0,0.3,0.6,0.9,1.2,1.5,1.8,
2.2),
labels = c('0-0.3',
'0.3-0.6',
'0.6-0.9',
'1.2-1.5',
'1.5-1.8',
'1.8-2',
'+2'),
include.lowest = T)
rs2$PercObitosCat = cut(rs2$PercObitos, breaks = c(0,0.2,0.4,0.6,0.8,1.0,1.2,
1.6),
labels = c('0-0.2',
'0.2-0.4',
'0.4-0.6',
'0.6-0.8',
'0.8-1.0',
'1.0-1.2',
'+1.2'),
include.lowest = T)
head(rs2)
#rm(rs2)
#rm(rs.rsf)
rs.rsf = merge(rs.rsf, rs2, by.x = "id", by.y = "CD_GEOCMU")
head(rs.rsf)
#names(rs2)[1]=c("id")
#install.packages("RColorBrewer",dependencies = T)
library(RColorBrewer)
library(ggplot2)
ggplot(rs.rsf, aes(rs.rsf$long,rs.rsf$lat, group=rs.rsf$group,fill=rs.rsf$NascimentosCat)) +
geom_polygon(colour='red') + coord_equal() + ggtitle("Nascimentos") +
labs(x = "Longitude", y = "Latitude", fill="Nascimentos") +
scale_fill_manual(values = brewer.pal(9,'Greens')[4:9]) +
theme(plot.title = element_text(size = rel(1), lineheight = 0.9, face = "bold",
colour = 'blue'))
ggplot(rs.rsf, aes(rs.rsf$long,rs.rsf$lat, group=rs.rsf$group,fill=rs.rsf$ObitosCat)) +
geom_polygon(colour='red') + coord_equal() + ggtitle("Obitos") +
labs(x = "Longitude", y = "Latitude", fill="Obitos") +
scale_fill_manual(values = brewer.pal(9,'Purples')[4:9]) +
theme(plot.title = element_text(size = rel(1), lineheight = 0.9, face = "bold",
colour = 'blue'))
ggplot(rs.rsf, aes(rs.rsf$long,rs.rsf$lat, group=rs.rsf$group,fill=rs.rsf$PercNascCat)) +
geom_polygon(colour='green') + coord_equal() + ggtitle("Percentual Nascimentos") +
labs(x = "Longitude", y = "Latitude", fill="Perc. Nascimentos") +
scale_fill_manual(values = brewer.pal(9,'Oranges')[3:9]) +
theme(plot.title = element_text(size = rel(1), lineheight = 0.9, face = "bold",
colour = 'blue'))
ggplot(rs.rsf, aes(rs.rsf$long,rs.rsf$lat, group=rs.rsf$group,fill=rs.rsf$PercObitosCat)) +
geom_polygon(colour='green') + coord_equal() + ggtitle("Percentual Obitos") +
labs(x = "Longitude", y = "Latitude", fill="Perc. Obitos") +
scale_fill_manual(values = brewer.pal(9,'OrRd')[3:9]) +
theme(plot.title = element_text(size = rel(1), lineheight = 0.9, face = "bold",
colour = 'blue'))
###########################################################################################
#3. Mapas com Leaflet
#install.packages("dplyr")
library(dplyr)
#install.packages("ggplot2")
library(ggplot2)
#install.packages("rjson")
library(rjson)
#install.packages("jsonlite", dependencies = T)
library(jsonlite)
#install.packages("leaflet",dependencies = T)
library(leaflet)
#install.packages("RCurl")
library(RCurl)
# https://rstudio.github.io/leaflet/
leaflet() %>% addTiles()
leaflet() %>% addTiles() %>% addProviderTiles(providers$MtbMap) %>%
addProviderTiles(providers$Stamen.TonerLines,
options = providerTileOptions(opacity = 0.50)) %>%
addProviderTiles(providers$Stamen.TonerLabels,
options = providerTileOptions(opacity = 0.90))
lat = -31.333019
long = -54.100074
leaflet() %>% addTiles() %>% addMarkers(long,lat)
leaflet() %>% addTiles() %>% addCircleMarkers(long,lat)
#Diversos Pontos No Mapa
p = pontosMapa
leaflet() %>% addTiles() %>% addMarkers(p$long,p$lat)
class(p$lat)
p$lat = as.numeric(p$lat)
p$long = as.numeric(p$long)
leaflet() %>% addTiles() %>% addMarkers(p$long,p$lat)
leaflet() %>% addTiles() %>% addMarkers(p$long,p$lat, popup = p$ponto)
leaflet() %>% addTiles() %>% addCircleMarkers(p$long,p$lat)
###Mudando as cores dos marcadores
#install.packages("dplyr")
library(dplyr)
#install.packages("ggplot2")
library(ggplot2)
#install.packages("rjson")
library(rjson)
#install.packages("jsonlite", dependencies = T)
library(jsonlite)
#install.packages("leaflet",dependencies = T)
library(leaflet)
#install.packages("RCurl")
library(RCurl)
p = pontosMapa
class(p$long)
p$lat = as.numeric(p$lat)
p$long = as.numeric(p$long)
cor = c()
nrow(p)
for(i in 1 : nrow(p)){
if(p$tipo[i] == 1){
cor[i] = "green"
}else if(p$tipo[i] == 2){
cor[i] = "red"
}else if(p$tipo[i] == 3){
cor[i] = "pink"
}else{
cor[i] = "blue"
}
}
cor
icone = awesomeIcons(icon = "pin",library = "ion", markerColor = cor)
leaflet() %>% addTiles() %>% addAwesomeMarkers(p$long,p$lat, icon = icone,
popup = p$ponto, label = p$ponto)
#Clusters
icone = awesomeIcons(icon=" ",markerColor = cor)
leaflet() %>% addTiles() %>% addAwesomeMarkers(p$long,p$lat, icon = icone,
popup = p$ponto, label = p$ponto,
clusterOptions = markerClusterOptions())
#Alterando Marcadores Circulares
leaflet()%>% addTiles() %>% addCircleMarkers(p$long,p$lat, color = cor,
label = p$ponto,
stroke = T,
fillOpacity = 0.5,
radius = ifelse(p$tipo == 1, 10, 6)
)
#Adicionando formas - Círculos
leaflet()%>% addTiles()
lat = -31.333019
long = -54.100074
leaflet()%>% addTiles() %>% addCircles(long,lat)
pf = populacaoFronteira
class(pf$lat)
class(pf$log)
class(pf$Populacao_estimada)
pf$pop = as.character(pf$Populacao_estimada)
leaflet()%>% addTiles() %>% addCircles(lng = pf$log, lat = pf$lat,
radius = sqrt(pf$Populacao_estimada)*20,
stroke = F,fillOpacity = 0.5,
label = pf$pop)
#Adicionando formas - Retângulos
lat = -31.328593
lng = -54.101329
lat1 = -31.327218
lng1 = -54.100138
leaflet()%>% addTiles() %>% addRectangles(lng,lat,lng1,lat1,
fillOpacity = .5)
###########################################################################################
#4. Mapas com Google API (NÃO FOI GRAVADO AINDA)
# Criar um novo projeto em: https://console.cloud.google.com
# Gerar Chave de Ativação, chave: KEY
# Instale o pacote ggmap R e defina a chave da API
#no R executando os comandos conforme abaixo.
install.packages("ggmap")
remove.packages("ggmap")
if(!requireNamespace("devtools")) install.packages("devtools")
devtools::install_github("dkahle/ggmap", ref = "tidyup", force=TRUE)
#Load the library
library("ggmap")
install.packages("devtools")
library(ggmap)
library(ggplot2)
library(dplyr)
#API Key
ggmap::register_google(key = "Key")
#Notes: If you get still have a failure then I suggest to restart R and run the library and register google commands again.
center = c(-54.106141,-31.331287)
get_googlemap(-54.106141,-31.331287)
|
dc143e090cfc5e958578c9dd12e1c82582111dac
|
a411bbff2c1718c7d1823155138ef10a0c27da89
|
/iLCM/global/match_language.R
|
27b9c331d942bb1a38c91b0c0ff2f36377a6e998
|
[] |
permissive
|
ChristianKahmann/data_science_image
|
5a0e805ca2cc2d3d8d99ab652dffb4b470dc102f
|
eb06582d6eaa521a59193ffbfc55c0a0a3eaa886
|
refs/heads/master
| 2020-10-01T13:53:17.130831
| 2020-01-14T12:48:00
| 2020-01-14T12:48:00
| 227,551,494
| 0
| 0
|
MIT
| 2019-12-12T07:58:15
| 2019-12-12T07:58:14
| null |
UTF-8
|
R
| false
| false
| 431
|
r
|
match_language.R
|
match_language_udpipe<-function(lang_abbr){
if(lang_abbr=="de"){
lang<-"german"
}
if(lang_abbr=="en"){
lang<-"english"
}
if(lang_abbr=="es"){
lang<-"spanish"
}
if(lang_abbr=="fr"){
lang<-"french"
}
if(lang_abbr=="it"){
lang<-"italian"
}
if(lang_abbr=="nl"){
lang<-"dutch"
}
if(lang_abbr=="pt"){
lang<-"portugese"
}
if(lang_abbr=="el"){
lang<-"greek"
}
return(lang)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.