blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb847a1effb4b21a416874ccb62b00d288f6d873
|
05c6c5192018fa59f713e894dc3c3cf0d588036f
|
/man/fst.one.plink.Rd
|
68d9efa5736d5b8740217ad056dec22d86975cc3
|
[] |
no_license
|
dyerlab/gwscaR
|
029dd73185098375d6f136757cd4cf4855c8f605
|
226fef34caeceff472af8894ff5062a58ab77e5e
|
refs/heads/master
| 2023-03-22T08:30:42.405029
| 2019-11-13T02:47:17
| 2019-11-13T02:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,081
|
rd
|
fst.one.plink.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gwscaR_fsts.R
\name{fst.one.plink}
\alias{fst.one.plink}
\title{Calculate pairwise fsts from a dataset in ped format}
\usage{
fst.one.plink(raw, group1, group2, cov.thresh = 0.2, loc_names = NULL)
}
\arguments{
\item{raw}{A data.frame with data in ped format}
\item{group1}{A list with the individuals in group 1}
\item{group2}{A list with the individuals in group 2}
\item{cov.thresh}{A threshold for the number of individuals in the populations (default is 0.2)}
\item{loc_names}{Locus names to be included in the output (optional)}
}
\value{
fst.dat A data.frame with columns:
Locus Name = the locus name
Hs1 = expected heterozygosity in pop 1
Hs2 = expected heterozygosity in pop 2
Hs = expected heterozygosity within populations
Ht = expected heterozygosity among populations
Fst = Fst
NumAlleles = number of alleles at the locus
Num1 = the number of individuals in pop 1
Num2 = the number of individuals in pop 2
}
\description{
Calculate pairwise fsts from a dataset in ped format
}
|
6a6d4f18176c3564394b0478c8500de6139e54d1
|
9de3b2b8b28f89cfb13723b6be99f157fc13a313
|
/3_Analysis/1_Social/1_Experimental_analyses/Causal_inference/laavan example.R
|
3bf040de1116d7171db29d1c353240bf99436215
|
[] |
no_license
|
WWF-ConsEvidence/MPAMystery
|
0e730dd4d0e39e6c44b36d5f9244a0bfa0ba319b
|
6201c07950206a4eb92531ff5ebb9a30c4ec2de9
|
refs/heads/master
| 2023-06-22T04:39:12.209784
| 2021-07-20T17:53:51
| 2021-07-20T19:34:34
| 84,862,221
| 8
| 1
| null | 2019-07-24T08:21:16
| 2017-03-13T18:43:30
|
R
|
UTF-8
|
R
| false
| false
| 3,356
|
r
|
laavan example.R
|
library(lavaan)
library(qgraph)
library(semPlot)
data(HolzingerSwineford1939)
HS.model <- 'visual =~ x1 + x2 + x3
textual =~ x4 + x5 + x6
speed =~ x7 + x8 + x9 '
fit <- cfa(HS.model, data = HolzingerSwineford1939)
summary(fit, fit.measures = TRUE)
data(PoliticalDemocracy)
?PoliticalDemocracy
head(PoliticalDemocracy)
model <-
'# measurement model
ind60 =~ x1 + x2 + x3 (indicators that attempt to measure ind)
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ ind60 + dem60
# residual correlations
y1 ~~ y5 (these variables are somehow correlated with each other e.g.)
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8'
fit <- sem(model, data = PoliticalDemocracy)
summary(fit, standardized = TRUE, rsq=T)
#Plot it
qgraph(fit,edge.label.cex=1.5,label.cex=1.5)
semPaths(fit, "std",edge.label.cex=1, curvePivot=T, layout ="tree")
# did we miss any obvious relationships? shows you potential correlations that you might hve missed
modindices(fit) # gives all possible paths given the data. Look for extremely large variables
#fit your SEM
fit <- sem(model, data = PD)
#summarize results
summary(fit, standardized = TRUE, rsq = T)
##plot results using semPaths function in qgraph
semPaths(fit, "std", edge.label.cex = 0.5, curvePivot = TRUE, layout = "tree")
##check to see if you missed anything. High mi values suggest that there is a path that you missed.
modindices(fit)
## looks good
##can also look at variance tables
vartable(fit)
## sometimes you get warnings about the scale of your variables
#Warning message:
# In getDataFull(data = data, group = group, group.label = group.label, :
# lavaan WARNING: some observed variances are (at least) a factor 100 times larger than others; please rescale
# in this case, all you have to do to make this error go away is rescale variables
#model comparison
#you can compare alternative pathway models using AIC, BIC, etc:
#create second alternative model
names(PD)
model2 <- '
# measurement model
ind60 =~ x1 + x2 + x3
dem60 =~ y1 + y2 + y3 + y4
dem65 =~ y5 + y6 + y7 + y8
# regressions
dem60 ~ ind60
dem65 ~ dem60
#took out ind60 from regression
# residual correlations
y1 ~~ y5
y2 ~~ y4 + y6
y3 ~~ y7
y4 ~~ y8
y6 ~~ y8
'
fit2 <- sem(model2, data = PD)
summary(fit2)
AIC(fit, fit2)
## what about nonlinear data?
# Set your working directory
("~/Desktop/sem workshop")
#commands in bold
# Load data and name file ?k.dat?
k.dat<-read.csv("./Keeley_rawdata_select4.csv")
# Examine contents of keeley data file
names(k.dat)
head(k.dat)
# Write lavaan code for this single equation model
mod <- '
rich ~ cover
cover ~ firesev
'
k.dat$cov2<-k.dat$cover^2
mod2<- '
rich ~ cover + cov2
cover ~ firesev
cover ~~ cov2
cov2 ~~ firesev
'
# Fit the model (i.e. est. parameters)
mod1.fit <- sem(mod, data=k.dat)
mod2.fit<- sem(mod2, data=k.dat,fixed.x=FALSE)
#need to rescale data.
vartable(mod1.fit)
k.dat$rich<-k.dat$rich/100
# Output a summary of the computed results - summary of mod2 suggests that both cover and cover squared can impact
summary(mod1.fit, rsq=T) # rsq=T means output the r-sqr
summary(mod2.fit, rsq=T)
semPaths(mod1.fit, "std", edge.label.cex = 0.5, curvePivot = TRUE, layout = "tree")
semPaths(mod2.fit, "std", edge.label.cex = 0.5, curvePivot = TRUE, layout = "tree")
|
53f3cfb72d4c3c5a60e1da0accf8863f7eaabedd
|
55e8db068fbb5fae93e946b4d94ca7820a8b88b9
|
/man/getHeight.monerod.Rd
|
59c0afa932f6fd685a710dfbbb75a71887679bb6
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cnxtech/moneRo
|
cc80786ba5b85d21a1aeaaa5d39f2d8c47d770f1
|
f78f82a9714f8dd214e2b556d94615163268c70a
|
refs/heads/master
| 2020-07-02T02:02:34.994096
| 2017-09-24T04:08:43
| 2017-09-24T04:08:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 744
|
rd
|
getHeight.monerod.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monero.R
\name{getHeight.monerod}
\alias{getHeight.monerod}
\alias{getHeight}
\title{getHeight}
\usage{
getHeight.monerod(ip = getOption("monerod.ip", "127.0.0.1"),
port = getOption("monerod.port", 18081))
getHeight(ip = getOption("monerod.ip", "127.0.0.1"),
port = getOption("monerod.port", 18081))
}
\arguments{
\item{ip}{daemon ip address}
\item{port}{daemon port}
}
\value{
height unsigned int; Current length of th elongest chain known to
the daemon.
}
\description{
Get the node's current height
}
\examples{
\dontrun{
getHeight()
}
}
\references{
\url{https://getmonero.org/knowledge-base/developer-guides/daemon-rpc#getheight}
}
\author{
Garrett See
}
|
ddbca7f45ace08fd15601b9266bbe4a396782189
|
0b3e9ec793e6eaff23ab3275be43326acf76f5aa
|
/rcppreqtl/man/makeXmatr.Rd
|
e34b7bc8fa1dedf2c99c38036457954c65794f3e
|
[] |
no_license
|
Sun-lab/rcppreqtl
|
ce213847efb32a5f7cab55db435fbe5f07d9b631
|
bc172c3fd5edec8ea5017d95540529613d27402b
|
refs/heads/master
| 2020-03-27T18:36:55.770576
| 2019-05-07T13:52:22
| 2019-05-07T13:52:22
| 146,932,849
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 815
|
rd
|
makeXmatr.Rd
|
\name{makeXmatr}
\alias{makeXmatr}
\title{Create example design matrix for simulations}
\description{
Produces a design matrix of several sample sizes which can be used to generate simulated dataset
}
\usage{
makeXmatr(ss)
}
\arguments{
\item{ss}{Sample size class: ss=1 implies dample size 32, ss=2 implies sample size 64, etc}
}
\value{
a design matrix of 4 variables
}
\author{ Vasyl Zhabotynsky vasyl@unc.edu }
\seealso{\code{\link{fitsh}}, \code{\link{data}}, \code{\link{simu4}}, \code{\link{simu2}}, \code{\link{readCounts}}.}
\keyword{ methods }
\examples{\dontrun{
# fitting autosome data for a full model with allele-specific counts collected on gene level:
percase = 0.1
dblcnt = 0.2
mn = 100
b0 = 0
b1 = 0
phiNB = .5;
phiBB=phiNB/4
niter = 100
betas = c(3,.2,.05,.5)
ss=2
dep = makeXmatr(ss)
}}
|
414fa9d05cf0a35314de5c858edbcc055c42689b
|
224cdcad8036bc820f778be22097161d3adac513
|
/meetup-events/2meetupIBMBigData/codes/labs-codes/labs-bigr/BigRLab4.R
|
11ffbcf41eeaa3ba69e739ce0ce0db16cbc4ab68
|
[] |
no_license
|
caiomsouza/BigDataScienceEvents
|
4681ecb07a45c9340bbc716288a04a81545aaae1
|
5642a42a84fdb36e4121034d6397162a3523cb2b
|
refs/heads/master
| 2021-01-19T07:43:41.938455
| 2015-07-17T14:27:57
| 2015-07-17T14:27:57
| 39,254,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,897
|
r
|
BigRLab4.R
|
#___________________________________________________________________________#
# Start BigInsights
# From the Desktop, click on the “Start BigInsights” icon. This action will
# start up various BigInsights components, including HDFS and Map/Reduce, on
# your machine. A Terminal window will pop up that will indicate progress.
# Eventually, the Terminal window will disappear. Once done, return to
# RStudio.
#___________________________________________________________________________#
# Back to RStudio
# Return to RStudio session in the browser. When in Rstudio, you can use the
# F11 key to go into "Full Screen" mode. This will maximize your viewing
# area. Use F11 again to go back to the original view.
# From within RStudio, run the following statements.
setwd("~/labs-bigr") # change directory
rm(list = ls()) # clear workspace
#___________________________________________________________________________#
# Load Big R
# Load the Big R package into your R session.
library(bigr)
#___________________________________________________________________________#
# Connect to BigInsights
# Note how one needs to specify the right credentials for this call.
bigr.connect(host="localhost", port=7052,
user="biadmin", password="biadmin")
# Verify that the connection was successful.
is.bigr.connected()
# If you ever lose your connection during these exercises, run the following
# line to reconnect. You can also invoke bigr.connect() as we did above.
bigr.reconnect()
#___________________________________________________________________________#
# Browse files on HDFS
# Once connected, you will be able to browse the HDFS file system and examine
# datasets that have already been loaded onto the cluster.
bigr.listfs() # List files under root "/"
bigr.listfs("/user/biadmin") # List files under /user/biadmin
|
d7c5118de1261662f3096b6c4d7ebd4619c046cb
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diversityForest/inst/testfiles/numSmaller/libFuzzer_numSmaller/numSmaller_valgrind_files/1610037631-test.R
|
5805bf334456dd77492b501dc0df420979a40427
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
1610037631-test.R
|
testlist <- list(reference = c(NaN, NA, NaN, 3.03578768055078e-111, NaN, NaN, 3.4602157353272e-102, 1.36240696149202e-105, 9126801005.92792, -3.38087344363186e+221, -3.27638994300565e+296, NaN, NaN, NaN, 1.32024272918069e-192, NaN, 8.01967015274577e+168, NaN, -2.93379056240461e+304, NA, 8.81442357825871e-280, 1.30857323773385e+214, 1.37562548371163e+214, 4.87620583420809e-153, 1.66054346436334e+170, 1.58456325028525e+29, 8.96776278941048e-44, -7.15227739922054e-304, 1.84016936281088e-314, 7.54792484964308e+168, 8.09253754683264e+175, 1.69376375318878e+190, NaN, 0, 1.3202428078721e-192, NaN, -1.40507255244384e+295, 2.11381931680943e-307, 0), values = c(NaN, NA, 4.94660802946195e+173, NaN, NaN, NaN, NaN, NaN, -5.82526594016669e+303, -3.38862025531171e+221, 1.40585835498462e-96, -7.3701226551802e+111, NaN, 9.67140655688361e+25, 6.19976595872092e+223, 3.22270204450998e-115, 8.76431881444561e+252, 7.35876460944816e+223, 8.90389806738183e+252, 3.62481397672511e+228, 1.06399915245307e+248, 1.1461546353604e+247, 3.65027901756887e+180, 5.43230922380886e-312, NaN, 3.35069945751673e-308, NaN, 1.25986739689518e-321, 2.02284438507954e-53, -2.99902786516807e-241, -3.38084306397822e+221, 8.23141685644055e+107, 1.89834382482261e-130, NaN, -1.49222440869543e+306, 3.68069868587517e+180, 1.71721740627347e+262, 4.87620583420803e-153, 3.63372088255387e+228, 3.8710423954659e+233, 1.3090586576362e-310, -4.21660066445943e+305, 0, -1.34095520796864e+295, -2.63333821500444e+306, 3.52953696533713e+30, 3.52953696534134e+30, 3.52953696534134e+30, 0, 2.11338725991955e-307, -3.38084318264874e+221, -4.38889639062999e+305, NaN, 5.43230905664669e-312, 0, 0, 0, -5.48612406879369e+303, NaN, NaN, 3.87069807020594e+233, 2.14899131997207e+233, 9.2637000607593e+25, 0))
result <- do.call(diversityForest:::numSmaller,testlist)
str(result)
|
be949621c03e07542025c5cae7d4f09b54df50a2
|
a1445458bcd29f5f04ee367392d0ba0424710b8d
|
/j/ints/intsScript.R
|
ca3ca3adfdeab7edce13784cb0855a5133d6b92f
|
[] |
no_license
|
jamesmungall/jScripts
|
87e81b285f79cd707435aeb24676c515f96dc99f
|
4cfe9121ac1a0511ac87a9b406aab17974da593b
|
refs/heads/master
| 2021-01-10T10:34:36.309682
| 2015-06-02T12:01:39
| 2015-06-02T12:01:39
| 36,662,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,421
|
r
|
intsScript.R
|
# Integer Functions
# ==================================
# Start date: 21st July 2012
j.ints.startup=function(showPrints=FALSE){
# dependent packages: sfsmisc
library(sfsmisc);
if(showPrints){
print("loading j/ints/intsScript.R");
print("dependent packages: sfsmisc");
}
}
#
# Aim: Functions for integer work
# Functions: (all preceeded by j.ints. as a namespace)
# 1. toDigitVector
# 2. intToBin
# 3. binToInt
# 4. digitMap
# 5. unique digits only - identifies numbers which have no repeating digits
j.ints.toDigitVector = function(int){
# input: 23648
# output: c(2,3,6,4,8)
j.check.isInteger(int);
result = digitsBase(int,10);
return(result[,1]);
}
j.ints.intToBin = function(i){
# input: 7, i is integer to be converted
# ouput: 111
# code: digitsBase(7,2) (sfsmisc package, returns a matrix)
if(!j.check.isInteger(i))return();
m = digitsBase(i,2);
v = as.vector(m);
return(v);
}
j.ints.binToInt <- function(b) {
# input: c(1,1,1)
# output: 7
# code: (copied from http://tolstoy.newcastle.edu.au/R/e2/help/07/02/10596.html)
if(!j.check.isBoolean(b)){warning("input should contain only 1,0,TRUE,FALSE");return();}
result = sum(2^(which(rev(as.numeric(b))==TRUE)-1));
return(result)
}
j.ints.digitMap1701 = function(d){
if(d==0) return(6);
if(d==1) return(2);
if(d==2) return(5);
if(d==3) return(5);
if(d==4) return(4);
if(d==5) return(5);
if(d==6) return(6);
if(d==7) return(3);
if(d==8) return(7);
if(d==9) return(6);
return("digit not found");
}
j.ints.uniqueDigitsVector = function(v){
# input: c(123,1432,222,98723,992)
# output: c(123,1432,98723) i.e. all elements with duplicate digits are removed
uniqueDigitsVectorBoolean = sapply(v,j.ints.uniqueDigitsElement);
return(v[uniqueDigitsVectorBoolean]);
}
j.ints.uniqueDigitsElement= function(e){
# input: c(11); output: FALSE
# input: c(1234567890); output: TRUE
eDigitVector = j.ints.toDigitVector(e);
uniqueEDigitVector = unique(eDigitVector);
allDigitsUniqueBoolean = (length(eDigitVector)==length(uniqueEDigitVector));
return(allDigitsUniqueBoolean);
}
j.ints.uniqueDigitsVector.test = function(){
v = c(1234,14312,13512,35612,577532);
# should return c(1234,35612); since these are the numbers with unique digits
uniqueVector=j.ints.uniqueDigitsVector(v);
return(uniqueVector);
}
|
b85b749ebd9d66c93ccd48d247dc848d360eb088
|
1e018375afab08fc10bc5456448234c788ff1aae
|
/packages/gist/R/gists.R
|
13af9742f1ca1046a8cf79e0a1d3da5442b273d1
|
[
"MIT"
] |
permissive
|
att/rcloud
|
5187a71e83726e9e7425adde8534cf66690cac7f
|
3630ec73cebfc8df1b2ee4bd4a07fbe81cb03bb0
|
refs/heads/develop
| 2023-08-23T18:14:45.171238
| 2022-08-25T23:49:52
| 2022-08-25T23:49:52
| 5,250,457
| 322
| 138
|
MIT
| 2023-05-22T19:46:48
| 2012-07-31T19:32:52
|
JavaScript
|
UTF-8
|
R
| false
| false
| 490
|
r
|
gists.R
|
get.gist <- function (id, version = NULL, ctx = current.gist.context())
UseMethod("get.gist", ctx)
fork.gist <- function (id, ctx = current.gist.context())
UseMethod("fork.gist", ctx)
get.gist.forks <- function (id, ctx = current.gist.context())
UseMethod("get.gist.forks", ctx)
modify.gist <- function (id, content, ctx = current.gist.context())
UseMethod("modify.gist", ctx)
create.gist <- function (content, ctx = current.gist.context())
UseMethod("create.gist", ctx)
|
d716f9b95a25fedff87c01f5739b9e55054ccf46
|
bfe4fa7d35d25dbd4749c7db18284630743f943b
|
/for channels/compare_num_samples_variability.R
|
b12ac78bbe18f7944b77f9f48130dc9c895d28a1
|
[] |
no_license
|
innertron/REU_Kam
|
0ccfe2d4e178b241cdf836d9c066188dbbd65e82
|
bf4028b193f13cc202f66cd28963290722b312ac
|
refs/heads/master
| 2021-01-17T20:16:07.187792
| 2016-08-02T21:29:38
| 2016-08-02T21:29:38
| 61,396,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,046
|
r
|
compare_num_samples_variability.R
|
#calculate the cross map skill for different sample sizes in order to
#find most efficient sample size to use that does not compromise accurazy
#Import the rEDM and igraph libraries
library(rEDM)
#you need to import the neural_data.txt file by hand.
time_span = 1:1000
#get the first second of the data
nd <- neural_data[time_span,]
lib <- c(1, length(nd))
pred <- c(1, length(nd))
sampleSizes <- seq(10, 100, by=10)
from <- c()
to <- c()
sample_size <- c()
strength <- c()
for (i in 1:31)
{
Ch1 <- nd[,i]
#run and plot the simplex algorithm to get the best embedding dimension
simplex_output <- simplex(Ch1, lib, pred, E=1:6)
bestE_i <- which.max(simplex_output$rho)
rhos <- c()
i2 = i+1
#for all nodes other than the current one, look for the edge that
#most causes this one (i) and form an edge from it to i.
for(j in i2:32)
{
Ch2 <- nd[,j]
#run and plot the simplex algorithm to get the best embedding dimension
simplex_output <- simplex(Ch2, lib, pred, E=1:6)
bestE_j <- which.max(simplex_output$rho)
for (smp in sampleSizes)
{
#get the convergent cross map calculations
Ch2_xmap_Ch1 <- ccm(nd, E = bestE_i, lib_column = j, first_column_time = FALSE,
target_column = i, lib_sizes = 80, num_samples = smp)
#take the means of the ccm's and get the standard deviation
ch2_map_1_mean <- data.frame(ccm_means(Ch2_xmap_Ch1), sd.rho = with(Ch2_xmap_Ch1,
tapply(rho, lib_size, sd)))
#get the convergent cross map calculations
Ch1_xmap_Ch2 <- ccm(nd, E = bestE_j, lib_column = i, first_column_time = FALSE,
target_column = j, lib_sizes = 80, num_samples = smp)
#take the means of the ccm's and get the standard deviation
ch1_map_2_mean <- data.frame(ccm_means(Ch1_xmap_Ch2), sd.rho = with(Ch1_xmap_Ch2,
tapply(rho, lib_size, sd)))
from <- rbind(from, i)
to <- rbind(to, j)
strength <- rbind(strength, ch2_map_1_mean$rho)
sample_size <- rbind(sample_size, smp)
from <- rbind(from, j)
to <- rbind(to, i)
strength <- rbind(strength, ch1_map_2_mean$rho)
sample_size <- rbind(sample_size, smp)
print(paste("finished ",i,j,"with",smp))
}
}
}
samples_data <- data.frame(from=from, to=to, strength=strength, sample_size=sample_size)
#draw the histogram
histog <- data.frame(lib_size = integer(), rho_diff = double())
for (i in 1:31)
{
for (j in 1:31)
{
if(i!=j)
{
best <- subset(samples_data, select=strength, from==i & to==j & sample_size==100)
diffs <- subset(samples_data, select=c(strength,sample_size), from==i & to ==j & sample_size != 100)
histog <- rbind(histog, data.frame(lib_size=diffs$sample_size, rho_diff=diffs$strength-best$strength))
}
}
}
plot(histog$lib_size, histog$rho_diff, ylab=expression(paste("Difference in ", rho, " from 100 samples")), xlab="Sample size", main="Rho strength is not dramatically affected by a \n small sample size")
|
a56fffae39e1f6b49e9888d31145f33c2e11b1a9
|
fbc5705f3a94f34e6ca7b9c2b9d724bf2d292a26
|
/edX/DS R Basics/Indexing/counting obs below average.R
|
97f903c5079e0af9a37644d2e682f5283a17e953
|
[] |
no_license
|
shinichimatsuda/R_Training
|
1b766d9f5dfbd73490997ae70a9c25e9affdf2f2
|
df9b30f2ff0886d1b6fa0ad6f3db71e018b7c24d
|
refs/heads/master
| 2020-12-24T20:52:10.679977
| 2018-12-14T15:20:15
| 2018-12-14T15:20:15
| 58,867,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
counting obs below average.R
|
# Store the murder rate per 100,000 for each state, in murder_rate
murder_rate <- murders$total/murders$population*100000
# Compute average murder rate and store in avg using `mean`
avg <- mean(murder_rate)
# How many states have murder rates below avg ? Check using sum
sum(murder_rate < avg)
|
0aa4882692705ae48887433f4a063c803743fe41
|
ba9c2741339f66bfd24c6dda7cd40b30919a0984
|
/chapter_1/analysis_scripts/kmer_pl/007a_gc_cor_filter.R
|
940119845dc266071481c93dafca1374e3f661c2
|
[
"Unlicense"
] |
permissive
|
cjfiscus/2022_Fiscus_Dissertation
|
928b38d55135e125ace47088eddad4615613cc00
|
25110d7120d52d94c99616ebd4eed40da5aed3bf
|
refs/heads/main
| 2023-04-17T19:35:12.737005
| 2022-09-05T20:26:09
| 2022-09-05T20:26:09
| 532,716,601
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 371
|
r
|
007a_gc_cor_filter.R
|
#!/usr/bin/env Rscript
# gc corr filter
# cjfiscus
args = commandArgs(trailingOnly=TRUE)
# new data
df<-read.table(args[1])
# threshold
h<-quantile(df$V2)[4] + 1.5*IQR(df$V2)
l<-quantile(df$V2)[2] - 1.5*IQR(df$V2)
# new bad list
df2<-df[df$V2 > h,]
df3<-df[df$V2 < l,]
out<-rbind(df2, df3)
write.table(out$V1, args[2], sep="\t", quote=F, col.names=F, row.names=F)
|
9a116ac6249020da4d2b9147b9d662e865f4426d
|
a829fe89f88c687ff86f9a4a2939ebaf968c1c15
|
/predictRatings.R
|
a1e54958635e9a9b6414d03ea10cc853bd7b30f6
|
[] |
no_license
|
arman1371/Simple-Recommender-With-R
|
d49e267e1b6c475ea6f47b99d8be319a3fd64734
|
9ad99d35f967cff4693bd5f84664e508cfb75a1b
|
refs/heads/master
| 2016-09-15T23:11:31.487727
| 2015-08-23T07:45:38
| 2015-08-23T07:45:38
| 41,089,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
predictRatings.R
|
predictRatings <- function(userCors, usersItems, userMean, userSd){
pred <- data.frame(ItemID = colnames(usersItems), Predict = rep(NA, ncol(usersItems)), stringsAsFactors = F)
for(i in 1:ncol(usersItems)){
sharingNums <- !is.na(usersItems[,i]) & !is.na(userCors[,2])
pred[i, 2] <- sum(usersItems[sharingNums,i] * userCors[sharingNums,2], na.rm = T) / sum(userCors[sharingNums,2], na.rm = T)
}
pred[,2] <- (pred[,2] * userSd) + userMean
pred
}
|
b1cb854ad1eecbb82830855f63baacdec899caff
|
fa657cc59114f6a53a59f01471132076f115033f
|
/R/biblio_cocitation.R
|
622df7c6f68ce89cb6d7d391f135bc3f964cce96
|
[
"MIT"
] |
permissive
|
agoutsmedt/biblionetwork
|
c53142e8ce6eae15efd9ce023730bece1ebd9c7d
|
c05eb9a093bb3f63c1665f25550990479a448397
|
refs/heads/master
| 2023-04-17T13:49:06.551025
| 2023-02-25T18:50:23
| 2023-02-25T18:50:23
| 334,688,094
| 4
| 2
|
NOASSERTION
| 2021-02-26T14:21:10
| 2021-01-31T15:27:49
|
R
|
UTF-8
|
R
| false
| false
| 6,082
|
r
|
biblio_cocitation.R
|
biblio_cocitation <- function(dt, source, ref, normalized_weight_only=TRUE, weight_threshold = 1, output_in_character = TRUE)
{
#' Calculating the Coupling Angle Measure for Edges in a Co-citation Network
#'
#' `r lifecycle::badge("experimental")`
#'
#' @description This function is basically the same as the [biblio_coupling()] function but it is explicitly framed
#' for bibliographic co-citation network (and not for bibliographic coupling networks). It takes a data frame
#' with direct citations, and calculates the number of times two references are citing together, and calculate a measure
#' similar to the coupling angle value \insertCite{sen1983}{biblionetwork}: it divides the number of times two references are
#' cited together by the square root of the product of the total number of citations (in the whole corpus) of each reference.
#' The more two references are cited in general, the more they have to be cited together for their link to be important.
#'
#' @details This function uses data.table package and is thus very fast. It allows the user to compute the coupling angle
#' on a very large data frame quickly.
#'
#' @param dt
#' The dataframe with citing and cited documents.
#'
#' @param source
#' The column name of the source identifiers, that is the documents that are citing.
#'
#' @param ref
#' The column name of the cited references identifiers. In co-citation network, these references are the nodes of the network.
#'
#' @param normalized_weight_only
#' If set to FALSE, the function returns the weights normalized by the cosine measure,
#' but also simply the number of times two references are cited together.
#'
#' @param weight_threshold
#' Correspond to the value of the non-normalized weights of edges. The function just keeps the edges
#' that have a non-normalized weight superior to the `weight_threshold`. In a large bibliographic co-citation network,
#' you can consider for instance that being cited only once together is not sufficient/significant for two references to be linked together.
#' This parameter could also be modified to avoid creating intractable networks with too many edges.
#'
#' @param output_in_character
#' If TRUE, the function ends by transforming the `from` and `to` columns in character, to make the
#' creation of a [tidygraph](https://tidygraph.data-imaginist.com/index.html) graph easier.
#'
#' @return A data.table with the articles (or authors) identifier in `from` and `to` columns,
#' with one or two additional columns (the coupling angle measure and
#' the number of shared references). It also keeps a copy of `from` and `to` in the `Source` and `Target` columns. This is useful is you
#' are using the tidygraph package then, where `from` and `to` values are modified when creating a graph.
#'
#' @examples
#' library(biblionetwork)
#' biblio_cocitation(Ref_stagflation,
#' source = "Citing_ItemID_Ref",
#' ref = "ItemID_Ref")
#'
#' # It is basically the same as:
#' biblio_coupling(Ref_stagflation,
#' source = "ItemID_Ref",
#' ref = "Citing_ItemID_Ref")
#'
#' @references
#' \insertAllCited{}
#'
#' @export
# Listing the variables not in the global environment to avoid a "note" saying "no visible binding for global variable ..." when using check()
# See https://www.r-bloggers.com/2019/08/no-visible-binding-for-global-variable/
id_ref <- id_art <- N <- .N <- Source <- Target <- weight <- nb_cit_Target <- nb_cit_Source <- NULL
# Making sure the table is a datatable
dt <- data.table(dt)
# Renaming and simplifying
setnames(dt, c(source,ref), c("id_art", "id_ref"))
dt <- dt[,c("id_art","id_ref")]
setkey(dt,id_ref,id_art)
# removing duplicated citations with exactly the same source and target
dt <- unique(dt)
# remove loop
dt <- dt[id_art!=id_ref]
# Computing how many items a reference is cited
id_nb_cit <- dt[,list(nb_cit = .N),by=id_ref]
# Removing articles with only one reference in the bibliography:
dt <- dt[,N := .N, by = id_art][N > 1][, list(id_art,id_ref)]
#Creating every combinaison of articles per references
bib_cocit <- dt[,list(Target = rep(id_ref[1:(length(id_ref)-1)],(length(id_ref)-1):1),
Source = rev(id_ref)[sequence((length(id_ref)-1):1)]),
by= id_art]
# remove loop
bib_cocit <- bib_cocit[Source!=Target]
# Inverse Source and Target so that couple of Source/Target are always on the same side
bib_cocit <- unique(bib_cocit[Source > Target, c("Target", "Source") := list(Source, Target)]) # exchanging and checking for doublons
#Calculating the weight
bib_cocit <- bib_cocit[,.N,by=list(Target,Source)] # This is the number of go references
# keeping edges over threshold
bib_cocit <- bib_cocit[N>=weight_threshold]
# We than do manipulations to normalize this number with the cosine measure
bib_cocit <- merge(bib_cocit, id_nb_cit, by.x = "Target",by.y = "id_ref" )
data.table::setnames(bib_cocit,"nb_cit", "nb_cit_Target")
bib_cocit <- merge(bib_cocit, id_nb_cit, by.x = "Source",by.y = "id_ref" )
data.table::setnames(bib_cocit,"nb_cit", "nb_cit_Source")
bib_cocit[,weight := N/sqrt(nb_cit_Target*nb_cit_Source)] # cosine measure
# Renaming columns
data.table::setnames(bib_cocit, c("N"),
c("nb_shared_citations"))
# Transforming the Source and Target columns in character (and keeping the Source and Target in copy)
# Then selection which columns to return
if(output_in_character == TRUE){
bib_cocit$from <- as.character(bib_cocit$Source)
bib_cocit$to <- as.character(bib_cocit$Target)
if(normalized_weight_only==TRUE){
bib_cocit[, c("from","to","weight","Source","Target")]
} else {
bib_cocit[, c("from","to","weight","nb_shared_citations","Source","Target")]
}
} else{
if(normalized_weight_only==TRUE){
bib_cocit[, c("Source","Target","weight")]
} else {
bib_cocit[, c("Source","Target","weight","nb_shared_references")]
}
}
}
|
28941e06342526d7fe413ccb2bddf078b37e3364
|
5eb1c6bf57c65103d0e51508a94f7631df52f857
|
/man/TNRS.Rd
|
a7f616029f7399589308cdf1a6e5c0baceb71377
|
[] |
no_license
|
EnquistLab/RTNRS
|
54f90ff42157afe6c3860cffd04a2124ee12a578
|
4f40c76d34f4be8a15282c0d0c146c3e4713c9a5
|
refs/heads/master
| 2023-06-25T01:29:56.950510
| 2023-06-13T00:28:19
| 2023-06-13T00:28:19
| 196,649,263
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,207
|
rd
|
TNRS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TNRS.R
\name{TNRS}
\alias{TNRS}
\title{Resolve plant taxonomic names}
\usage{
TNRS(
taxonomic_names,
sources = c("wcvp", "wfo"),
classification = "wfo",
mode = "resolve",
matches = "best",
accuracy = NULL,
skip_internet_check = FALSE,
...
)
}
\arguments{
\item{taxonomic_names}{Data.frame containing two columns: 1) Row number, 2) Taxonomic names to be resolved (or parsed). Alternatively, a character vector of names can be supplied.}
\item{sources}{Character. Taxonomic sources to use. Default is c("wcvp", "wfo"). Options include "wfo", and "wcvp".}
\item{classification}{Character. Family classification to use. Currently options include "wfo" (the default).}
\item{mode}{Character. Options are "resolve" and "parse". Default option is "resolve"}
\item{matches}{Character. Should all matches be returned ("all") or only the best match ("best", the default)?}
\item{accuracy}{numeric. If specified, only matches with a score greater than or equal to the supplied accuracy level will be returned. If left NULL, the default threshold will be used.}
\item{skip_internet_check}{Should the check for internet connectivity be skipped? Default is FALSE.}
\item{...}{Additional parameters passed to internal functions}
}
\value{
Dataframe containing TNRS results.
}
\description{
Resolve plant taxonomic names.
}
\note{
usda = United States Department of Agriculture, wfo = World Flora Online, wcvp = World Checklist of Vascular Plants.
For queries of more than 5000 names, the function will automatically divide the query into batches of 5000 names and then run the batches one after the other. Thus, for very large queries this may take some time. When this is the case, a progress bar will be displayed.
IMPORTANT: Note that parallelization of queries is automatically handled by the API, and so there is no need to further parallelize in R (in fact, doing so may actually slow things down!).
}
\examples{
\dontrun{
# Take a subset of the testfile to speed up runtime
tnrs_testfile <- tnrs_testfile[1:20, ]
results <- TNRS(taxonomic_names = tnrs_testfile)
# Inspect the results
head(results, 10)
}
}
|
ac3958d79328079afe5b9e268380ea279807dd40
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/MGDrivE/R/RcppExports.R
|
a2f6b11fb70065aa03986f7a4e3efb22c3aae306
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,222
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Dirichlet Distribution
#'
#' Make a single draw from a Dirichlet distribution with the shape parameter
#' one.
#'
#' @param migrationPoint Vector of weights for draws. Must be positive.
#'
rDirichlet <- function(migrationPoint) {
.Call('_MGDrivE_rDirichlet', PACKAGE = 'MGDrivE', migrationPoint)
}
#' Quantiles Function
#'
#' Calculate the given quantiles of a matrix.
#'
#' @usage quantileC(Trials, Probs)
#'
#' @param Trials Integer matrix to calculate quantiles over
#' @param Probs Vector of quantiles
#'
#' @details This function calculates the given quantiles over the rows of an
#' integer matrix. It uses method 8 of the stat::quantiles() function. It gives
#' the same result, to numerical accuracy, and is designed to handle matrix input.
#' It is only designed to work on integer matrices!
#'
#' @return Numeric Matrix
#'
quantileC <- function(Trials, Probs) {
.Call('_MGDrivE_quantileC', PACKAGE = 'MGDrivE', Trials, Probs)
}
#' Calculate Geodesic Distance - Cosine Method
#'
#' This function calculates geodesic distance using the cosine method.
#'
#' @param latLongs Two column matrix of latitudes/longitudes
#' @param r Earth radius. Default is WGS-84 radius
#'
#' @examples
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # cosine distance formula
#' distMat = calcCos(latLongs = latLong)
#'
#' @export
calcCos <- function(latLongs, r = 6378137) {
.Call('_MGDrivE_calcCos', PACKAGE = 'MGDrivE', latLongs, r)
}
#' Calculate Geodesic Distance - Haversine Method
#'
#' This function calculates geodesic distance using the Haversine method.
#'
#' @param latLongs Two column matrix of latitudes/longitudes
#' @param r Earth radius. Default is WGS-84 radius
#'
#' @examples
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Haversine distance formula
#' distMat = calcHaversine(latLongs = latLong)
#'
#' @export
calcHaversine <- function(latLongs, r = 6378137) {
.Call('_MGDrivE_calcHaversine', PACKAGE = 'MGDrivE', latLongs, r)
}
#' Calculate Geodesic Distance - Vincenty Sphere Method
#'
#' This function calculates geodesic distance using the Vincenty sphere method.
#'
#' @param latLongs Two column matrix of latitudes/longitudes
#' @param r Earth radius. Default is WGS-84 radius
#'
#' @examples
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Sphere distance formula
#' distMat = calcVinSph(latLongs = latLong)
#'
#' @export
calcVinSph <- function(latLongs, r = 6378137) {
.Call('_MGDrivE_calcVinSph', PACKAGE = 'MGDrivE', latLongs, r)
}
#' Calculate Geodesic Distance - Vincenty Ellipsoid Method
#'
#' This function calculates geodesic distance using the original Vincenty Ellipsoid method.
#'
#' @param latLongs Two column matrix of latitudes/longitudes
#' @param a Equatorial radius of the earth, default is WGS-84 radius
#' @param b Polar radius of the earth, default is WGS-84 radius
#' @param f Flattening or inverse eccentricity, default eccentricity is WGS-84
#' @param eps Convergence criteria
#' @param iter Maximum number of iterations to attempt convergence
#'
#' @examples
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Ellipsoid distance formula
#' distMat = calcVinEll(latLongs = latLong)
#'
#' @export
calcVinEll <- function(latLongs, a = 6378137, b = 6356752.3142, f = 1.0/298.257223563, eps = 1e-12, iter = 100) {
.Call('_MGDrivE_calcVinEll', PACKAGE = 'MGDrivE', latLongs, a, b, f, eps, iter)
}
#' Calculate Lognormal Stochastic Matrix
#'
#' Given a distance matrix from \code{\link[MGDrivE]{calcVinEll}},
#' calculate a stochastic matrix where one step movement probabilities follow a lognormal density.
#'
#' The distribution and density functions for the lognormal kernel are given below:
#' \deqn{
#' F(x)=\frac{1}{2} + \frac{1}{2} \mathrm{erf}[\frac{\mathrm{ln}x-\mu}{\sqrt{2}\sigma}]
#' }
#' \deqn{
#' f(x)=\frac{1}{x\sigma\sqrt{2\pi}}\mathrm{exp}\left( -\frac{(\mathrm{ln}x-\mu)^{2}}{2\sigma^{2}} \right)
#' }
#' where \eqn{\mu} is the mean on the log scale, and \eqn{\sigma} is the standard deviation on the log scale.
#'
#' @param distMat Distance matrix from \code{\link[MGDrivE]{calcVinEll}}
#' @param meanlog Log mean of \code{\link[stats]{Lognormal}} distribution
#' @param sdlog Log standard deviation of \code{\link[stats]{Lognormal}} distribution
#'
#' @examples
#' # setup distance matrix
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Ellipsoid distance formula
#' distMat = calcVinEll(latLongs = latLong)
#'
#' # calculate lognormal distribution over distances
#' # mean and standard deviation are just for example
#' kernMat = calcLognormalKernel(distMat = distMat, meanlog = 100, sdlog = 10)
#'
#' @export
calcLognormalKernel <- function(distMat, meanlog, sdlog) {
.Call('_MGDrivE_calcLognormalKernel', PACKAGE = 'MGDrivE', distMat, meanlog, sdlog)
}
#' Calculate Gamma Stochastic Matrix
#'
#' Given a distance matrix from \code{\link[MGDrivE]{calcVinEll}}, calculate a
#' stochastic matrix where one step movement probabilities follow a gamma density.
#'
#' The distribution and density functions for the gamma kernel are given below:
#' \deqn{
#' F(x)=\frac{1}{\Gamma(\alpha)}\gamma(\alpha,\beta x)
#' }
#' \deqn{
#' f(x)=\frac{\beta^{\alpha}}{\Gamma(\alpha)}x^{\alpha-1}e^{-\beta x}
#' }
#' where \eqn{\Gamma(\alpha)} is the Gamma function, \eqn{\gamma(\alpha,\beta x)} is the lower incomplete
#' gamma function, and \eqn{\alpha,\beta} are the shape and rate parameters, respectively.
#'
#' @param distMat Distance matrix from \code{\link[MGDrivE]{calcVinEll}}
#' @param shape Shape parameter of \code{\link[stats]{GammaDist}} distribution
#' @param rate Rate parameter of \code{\link[stats]{GammaDist}} distribution
#'
#' @examples
#' # setup distance matrix
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Ellipsoid distance formula
#' distMat = calcVinEll(latLongs = latLong)
#'
#' # calculate gamma distribution over distances
#' # shape and rate are just for example
#' kernMat = calcGammaKernel(distMat = distMat, shape = 1, rate = 1)
#'
#' @export
calcGammaKernel <- function(distMat, shape, rate) {
.Call('_MGDrivE_calcGammaKernel', PACKAGE = 'MGDrivE', distMat, shape, rate)
}
#' Calculate Exponential Stochastic Matrix
#'
#' Given a distance matrix from \code{\link[MGDrivE]{calcVinEll}}, calculate a
#' stochastic matrix where one step movement probabilities follow an exponential density.
#'
#' The distribution and density functions for the exponential kernel are given below:
#' \deqn{
#' F(x)=1-e^{-\lambda x}
#' }
#' \deqn{
#' f(x)=\lambda e^{-\lambda x}
#' }
#' where \eqn{\lambda} is the rate parameter of the exponential distribution.
#'
#' @param distMat Distance matrix from \code{\link[MGDrivE]{calcVinEll}}
#' @param rate Rate parameter of \code{\link[stats]{Exponential}} distribution
#'
#' @examples
#' # setup distance matrix
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Ellipsoid distance formula
#' distMat = calcVinEll(latLongs = latLong)
#'
#' # calculate exponential distribution over distances
#' # rate is just for example
#' kernMat = calcExpKernel(distMat = distMat, rate = 10)
#'
#' @export
calcExpKernel <- function(distMat, rate) {
.Call('_MGDrivE_calcExpKernel', PACKAGE = 'MGDrivE', distMat, rate)
}
#' Calculate Zero-inflated Exponential Stochastic Matrix
#'
#' Given a distance matrix from \code{\link[MGDrivE]{calcVinEll}}, calculate a
#' stochastic matrix where one step movement probabilities follow an zero-inflated
#' exponential density with a point mass at zero. The point mass at zero represents
#' the first stage of a two-stage process, where mosquitoes decide to stay at
#' their current node or leave anywhere. This parameter can be calculated from
#' lifetime probabilities to stay at the current node with the helper function
#' \code{\link[MGDrivE]{calcZeroInflation}}.
#'
#' If a mosquito leaves its current node, with probability \eqn{1-p_{0}}, it
#' then chooses a destination node according to a standard exponential density
#' with rate parameter \eqn{rate}.
#'
#' The distribution and density functions for the zero inflated exponential kernel are given below:
#' \deqn{
#' F(x)=p_{0}\theta(x) + (1-p_{0})(1-e^{-\lambda x})
#' }
#' \deqn{
#' f(x)=p_{0}\delta(x)+(1-p_{0})\lambda e^{-\lambda x}
#' }
#' where \eqn{\lambda} is the rate parameter of the exponential distribution,
#' \eqn{\theta(x)} is the Heaviside step function and \eqn{\delta(x)} is the
#' Dirac delta function.
#'
#' @param distMat Distance matrix from \code{\link[MGDrivE]{calcVinEll}}
#' @param rate Rate parameter of \code{\link[stats]{Exponential}} distribution
#' @param p0 Point mass at zero
#'
#' @examples
#' # setup distance matrix
#' # two-column matrix with latitude/longitude, in degrees
#' latLong = cbind(runif(n = 5, min = 0, max = 90),
#' runif(n = 5, min = 0, max = 180))
#'
#' # Vincenty Ellipsoid distance formula
#' distMat = calcVinEll(latLongs = latLong)
#'
#' # calculate hurdle exponential distribution over distances
#' # rate and point mass are just for example
#' kernMat = calcHurdleExpKernel(distMat = distMat, rate = 1/1e6, p0 = 0.1)
#'
#' @export
calcHurdleExpKernel <- function(distMat, rate, p0) {
.Call('_MGDrivE_calcHurdleExpKernel', PACKAGE = 'MGDrivE', distMat, rate, p0)
}
|
3c56718faaa0b7d481199dda8aa1a3f5293d5c5e
|
7bb3f64824627ef179d5f341266a664fd0b69011
|
/Statistics_For_Psychology_by_Arthur_Aron,_Elliot_J._Coups,_And_Elaine_N._Aron/CH2/EX2.3a/Ex2_3a.R
|
81593723f9174b7dc38e589b35ea1f49b9eb95bb
|
[
"MIT"
] |
permissive
|
prashantsinalkar/R_TBC_Uploads
|
8bd0f71834814b1d03df07ce90b2eae3b7d357f8
|
b3f3a8ecd454359a2e992161844f2fb599f8238a
|
refs/heads/master
| 2020-08-05T23:06:09.749051
| 2019-10-04T06:54:07
| 2019-10-04T06:54:07
| 212,746,586
| 0
| 0
|
MIT
| 2019-10-04T06:03:49
| 2019-10-04T06:03:48
| null |
UTF-8
|
R
| false
| false
| 790
|
r
|
Ex2_3a.R
|
# Page no. : 58 - 60
# Worked - out Exapmles
# a) Mean
scores <- c(8, 6, 6, 9, 6, 5, 6, 2)
value <- mean(scores)
cat("Mean of scores is", value)
# b) Median
scores <- c(1, 7, 4, 2, 3, 6, 2, 9, 7)
value <- median(scores)
cat("Median of value is", value)
# c) Sum of Squares, Variance and Standard Deviation
scores <- c(8, 6, 6, 9, 6, 5, 6, 2)
value <- sum((scores - mean(scores))**2)
cat("Sum of Squares of scores is", value)
# Install Library if not install
install.packages("rafalib")
# Import Library
library(rafalib)
variance <- popvar(scores)
cat("Variance of scores is", variance)
standard_deviation <- popsd(scores)
cat("Standard Deviation of scores is", round(standard_deviation, 2))
|
584ea60d8d1e5380687d5351b4516045657f755e
|
0075a16293acba55aeacc0defb3a566e3eaa3740
|
/plot4.R
|
dc59897888f3231c55427ce51cbd9dc3326b61ef
|
[] |
no_license
|
charlesott/ExData_Plotting1
|
8343493809f5f7ea81dfb9435fca4f951ebbb660
|
922189372f53e61bfd2beb1c63f33514322f9814
|
refs/heads/master
| 2021-01-15T20:42:48.915543
| 2014-07-10T17:20:56
| 2014-07-10T17:20:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,123
|
r
|
plot4.R
|
# Get the data into memory
hp <- read.csv2("household_power_consumption.txt", header = T)
# Cast the Date column
hp$Date <- strptime(paste(hp$Date,hp$Time), format="%d/%m/%Y %H:%M:%S")
# Subset to get the dates important to this project
hp.sub <- subset(hp, Date >= as.POSIXlt('2007-02-01 00:00')
& Date <= as.POSIXlt('2007-02-02 23:59'))
# Get a numeric vector out of the factor
hp.v <- as.numeric(as.character(hp.sub[,3]))
# Create a dataframe for proper labeling with the days of week
hp.df <- data.frame(hp.sub$Date,hp.v)
# Prepare the data for plotting
hp.sub$Sub_metering_1 <- as.numeric(as.character(hp.sub$Sub_metering_1))
hp.sub$Sub_metering_2 <- as.numeric(as.character(hp.sub$Sub_metering_2))
hp.sub$Sub_metering_3 <- as.numeric(as.character(hp.sub$Sub_metering_3))
hp.sub$Voltage <- as.numeric(as.character(hp.sub$Voltage))
hp.sub$Global_reactive_power <- as.numeric(as.character(hp.sub$Global_reactive_power))
hp.sub$Global_active_power <- as.numeric(as.character(hp.sub$Global_active_power))
# Use the png device to avoid squishing the legend
png("plot4.png", width=480, height=480)
# Plot as required
# Set up environment parameters
par(mfcol=c(2,2))
# Global active power
hp.df <- data.frame(hp.sub$Date,hp.sub$Global_active_power)
plot(hp.df, type="l", ylab="Global Active Power", xlab="")
# Energy sub metering
plot(hp.sub$Date, hp.sub$Sub_metering_1, type="n",
ylab="Energy sub metering",
xlab="")
lines(hp.sub$Date, hp.sub$Sub_metering_1, type="l")
lines(hp.sub$Date, hp.sub$Sub_metering_2, type="l", col="red")
lines(hp.sub$Date, hp.sub$Sub_metering_3, type="l", col="blue")
legend("topright",
lty="solid",
bty="n",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Voltage plot
hp.df <- data.frame(hp.sub$Date,hp.sub$Voltage)
plot(hp.df, type="l", xlab="datetime", ylab="Voltage")
# Global reactive power plot
hp.df <- data.frame(hp.sub$Date,hp.sub$Global_reactive_power)
plot(hp.df, type="l", xlab="datetime", ylab="Global_reactive_power")
# Save the plot to a file
dev.off()
|
5d22523a373ab9a5cf65f387fe92fc607ee884bb
|
34e79c005fbf68149d27283f7749a247992e0b35
|
/getting_and_cleaning_data/run_analysis.R
|
7d6b660419929637d6661e82c93e968ded1ea33c
|
[] |
no_license
|
brockwebb/datasciencecoursera
|
f9e2c0a90091f5cb3cbd785cd4176dbceadb77bc
|
7dc18a6f03c591b9d653b5e2f35652a0574bff1b
|
refs/heads/master
| 2016-09-16T14:16:02.231300
| 2014-09-21T22:31:38
| 2014-09-21T22:31:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,688
|
r
|
run_analysis.R
|
# This script was generated for the Coursera Getting and Cleaning Data class.
# The data file was obtained from the URL:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# to run the script, first unzip the data in your working directory
# The assignment is, (source: Getting and Cleaning Data (get-006) website)
# 1) "Merges the training and the test sets to create one data set.
# 2) Extracts only the measurements on the mean and standard deviation for each measurement.
# 3) Uses descriptive activity names to name the activities in the data set
# 4) Appropriately labels the data set with descriptive variable names.
# 5) Creates a second, independent tidy data set with the average of each variable for each activity and each subject. "
# First read in all tables
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt", header=F)
ytest <- read.table("./UCI HAR Dataset/test/y_test.txt", header=F)
subtest <- read.table("./UCI HAR Dataset/test/subject_test.txt", header=F)
xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt", header=F)
ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt", header=F)
subtrain <- read.table("./UCI HAR Dataset/train/subject_train.txt", header=F)
# Activity/Feature Lablels (for assignment part 3)
actlabels = read.table("UCI HAR Dataset/activity_labels.txt", header=F)
features = read.table('UCI HAR Dataset/features.txt',header=F);
# Assign meaningful column names... keeping things the same on both set for a merge
# test
colnames(actlabels) = c('act_ID','act_type')
colnames(subtest) = "sub_ID"
colnames(xtest) = features[,2]
colnames(ytest) = "act_ID"
# training
colnames(subtrain) = "sub_ID"
colnames(xtrain) = features[,2]
colnames(ytrain) = "act_ID"
# Bind the test data columns together into one table, same for training
testdata = cbind(ytest,subtest,xtest)
traindata = cbind(ytrain,subtrain,xtrain)
# Now we can bind all rows together...
test_train = rbind(testdata,traindata)
# Extracting mean and standard deviation
# Getting the columns
cnames = colnames(test_train)
cselect = (grepl("act..",cnames) | grepl("sub..",cnames) | grepl("mean..",cnames) | grepl("std..",cnames) );
# Keeping only columns related to mean and std
mean_std = test_train[cselect==T];
# Using descriptive activity names by merging activity names
mean_std = merge(mean_std,actlabels,by='act_ID',all.x=T);
# Create a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy = aggregate(mean_std, by=list(activity = mean_std$act_ID, subject=mean_std$sub_ID), mean)
write.table(tidy, "tidy.csv", sep=",")
|
8830c9de044a3e1d49853d87254c8a527dbbe298
|
779c1eb6debd083c70271e1e6d38fc540c66a0ee
|
/main.r
|
be31ece884b9a8443c501d0bb7742def1c6c7a47
|
[] |
no_license
|
dvillaunal/Dia_55_10mayo
|
3cb51c3c071c241100393e92d9226edc4aac8a75
|
c9e067a8765cdd12490ec6ce2e2263b7f30aa9da
|
refs/heads/main
| 2023-04-19T21:42:43.856878
| 2021-05-11T06:41:32
| 2021-05-11T06:41:32
| 366,227,759
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,528
|
r
|
main.r
|
## ----Protocolo, eval=FALSE, include=TRUE----------------------------------------------------------------
## "Protocolo:
##
## 1. Daniel Felipe Villa Rengifo
##
## 2. Lenguaje: R
##
## 3. Tema: Manejo de archivos *.xlsx
## (realice al menos dos ejercicios que
## requieran cargar archivos externos,
## leer y procesar la información del archvo leído,
## y guardar las respuestas a los ejercicios
## en archivos independientes)
##
## 4. Fuentes:
## http://rpubs.com/BrendaAguilar/manual_uso_de_Paquete_xlsx_en_R"
## ----Ejemplo1 readxl, eval=FALSE, include=TRUE----------------------------------------------------------
## # install.packages("readxl")
## library(readxl)
##
## # Obtener la ruta de un archivo XLSX de ejemplo del paquete
## ruta_archivo <- readxl_example("Ruta del Archivo")
## ----Ejemplo2 readxl, eval=FALSE, include=TRUE----------------------------------------------------------
## read_excel(ruta_archivo)
## ----Ejemplo3 openxlsx, eval=FALSE, include=TRUE--------------------------------------------------------
## # install.packages("openxlsx")
## library(openxlsx)
##
## read.xlsx(ruta_archivo)
## ----Ejemplo xlsx, eval=FALSE, include=TRUE-------------------------------------------------------------
## # install.packages("xlsx")
## library(xlsx)
##
## read.xlsx(ruta_archivo)
## read.xlsx2(ruta_archivo)
## ----Ejemplo XLConnect, eval=FALSE, include=TRUE--------------------------------------------------------
## # install.packages("XLConnect")
## library(XLConnect)
##
## data <- readWorksheetFromFile(ruta_archivo, sheet = "list-column",
## startRow = 1, endRow = 5,
## startCol = 1, endCol = 2)
## ----Ejemplo rio, eval=FALSE, include=TRUE--------------------------------------------------------------
## # install.packages("rio")
## library(rio)
##
## convert(ruta_archivo, "file.csv")
## -------------------------------------------------------------------------------------------------------
#Leemos una base de datos:
library(readxl)
base100 <- read_excel("100 Sales Records.xlsx")
## -------------------------------------------------------------------------------------------------------
# Convirtamos a data.frame la base de datos (seleccionando solamente los datos que necesitamos)
library(dplyr)
base100$"Item Type" <- as.factor(base100$"Item Type")
## Nos guiaremos con el ID de la orden y el item Type (de una vez lo volvemos dataframe asi ir trabajando los item Type como factores):
cal_ben <- data.frame(select(base100, "Item Type","Order ID", "Units Sold", "Unit Price"))
## Lo que haremos es [Precio de venta - Precio de compra = Benefecio]
## Añadiendo una nueva columna:
## Creamos una nuevva columna que contenga [Precio de venta X (1 - 0.19) - Precio de compra = Benefecio]
## (descontando el iva 19%)
cal_ben$beneficio <- ((cal_ben$Units.Sold * (1-0.19)) - cal_ben$Unit.Price)
#observemos que hay valores negativos, es decir, hay una perdida:
perdida <- filter(cal_ben, beneficio < 0)
names(perdida)
# Eso quiere decir que la compañia de envios perdio dinero por producto unitario:
cat("\nNumero de productos donde se genera una perdida = ", length(perdida$beneficio))
cat("\n \nProdcutos que generaron perdida")
print(perdida)
## -------------------------------------------------------------------------------------------------------
# Exportemos los archivos:
## 1° donde se registra el beneficio unitario por venta:
write.xlsx(cal_ben, file = "Beneficio_Unit.xlsx")
## 2° donde se registran las perdidas:
write.xlsx(perdida, file = "Perdidas_Unit.xlsx")
## -------------------------------------------------------------------------------------------------------
## Saquemos la columnas que necesitamos:
baseItem <- aggregate(base100$"Total Profit" ~ base100$"Item Type", data = base100, FUN = sum)
baseItem <- baseItem %>%
rename(
"Beneficio_Total" = "base100$\"Total Profit\"",
"Producto_Envio" = "base100$\"Item Type\""
)
# Convertimos a factor una columna:
baseItem$"Producto_Envio" <- as.factor(baseItem$"Producto_Envio")
# Observemos varias cosas
"que los cosmeticos son el producto con mayor ingrse para la compañia e envios"
## ordenar los datos:
baseItem <- arrange(baseItem, Beneficio_Total,Producto_Envio)
cat("\nAhora el 1° termino = MENOR GANANCIA, 12° termino = MAYOR GANANCIA\n")
print(baseItem)
cat("\nen la empresa las frutas no son tan rentables como los cosmeticos\n")
# Exportamos nuestro resultado:
write.xlsx(
baseItem,
file = "BeneficioXProdcutoEnviado.xlsx"
)
|
42d26dcb5dd04aee1cc33fb58ad31be863764b37
|
2726e7bdfc2b688ccf7ecaac75a259bcaa0e9817
|
/R/analysis/caselist.R
|
3d1fb3be3920e1b6e3572378b66789a0e618fe13
|
[
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"AGPL-3.0-or-later"
] |
permissive
|
marvinmarnold/oipm_annual_report_2018
|
e62c292c4398bc8f1222b75bb1003b6c5bc0100f
|
da5e5bf06b38280552ea8029cfa0ab4d6dc17c66
|
refs/heads/master
| 2023-03-12T06:24:14.192280
| 2022-02-14T19:47:34
| 2022-02-14T19:56:01
| 172,810,954
| 1
| 1
|
MIT
| 2023-02-28T12:16:21
| 2019-02-27T00:09:56
|
HTML
|
UTF-8
|
R
| false
| false
| 9,218
|
r
|
caselist.R
|
case.list <- read.csv(file = "data_public/caselist.csv", header = FALSE)
trimws(case.list$V1)
allegations.for.year %>% distinct(PIB.Control.Number)
matching.cases <- allegations.all %>%
filter(
trimws(PIB.Control.Number) %in% trimws(case.list$V1)) %>%
mutate(
district = case_when(
Assigned.division == "First District" ~ "1st District",
Assigned.division == "Second District" ~ "2nd District",
Assigned.division == "Third District" ~ "3rd District",
Assigned.division == "Fourth District" ~ "4th District",
Assigned.division == "Fifth District" ~ "5th District",
Assigned.division == "Sixth District" ~ "6th District",
Assigned.division == "Seventh District" ~ "7th District",
Assigned.division == "Eighth District" ~ "8th District",
TRUE ~ "Other"
)
)
# %>%
# select(
# PIB.Control.Number,
# Allegation.Finding.OIPM,
# Officer.Race,
# Officer.sex,
# Citizen.sex,
# Citizen.race,
# officer.age.bucket,
# citizen.age.bucket,
# Officer.years.exp.at.time.of.UOF,
# officer.exp.bucket,
# district
# ) %>% distinct()
#write.csv(matching.cases, file = "data/case_demographics_v01.csv")
# Num officers
matching.cases %>% distinct(Officer.primary.key) %>% nrow()
# Num cases
matching.cases %>% distinct(PIB.Control.Number) %>% nrow()
# Num sustained allegations
matching.cases %>% filter(Allegation.Finding.OIPM == "Sustained") %>% nrow()
# Num sustained cases
matching.cases %>% distinct(PIB.Control.Number, .keep_all = TRUE) %>% filter(Disposition.NOPD == "Sustained") %>% nrow()
# Num allegations
matching.cases %>% nrow()
# Pending
matching.cases %>% filter(Allegation.Finding.OIPM == "Pending")
# Race
## Public
cases.public.race.count <- matching.cases %>%
distinct(PIB.Control.Number, Citizen.primary.key, .keep_all = TRUE) %>%
group_by(Citizen.race) %>%
summarise(num.allegations = n())
title <- "Race of complainant"
p.cases.public.race.count <- plot_ly(cases.public.race.count,
type = 'pie',
name = title,
labels = ~Citizen.race,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
## NOPD
cases.nopd.race.count <- matching.cases %>%
distinct(PIB.Control.Number, Officer.primary.key, .keep_all = TRUE) %>%
group_by(Officer.Race) %>%
summarise(num.allegations = n())
title <- "Race of officer"
p.cases.officer.race.count <- plot_ly(cases.nopd.race.count,
type = 'pie',
name = title,
labels = ~Officer.Race,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
# Sex
## Public
cases.public.sex.count <- matching.cases %>%
distinct(PIB.Control.Number, Citizen.primary.key, .keep_all = TRUE) %>%
group_by(Citizen.sex) %>%
summarise(num.allegations = n())
title <- "Sex of complainant"
p.cases.public.sex.count <- plot_ly(cases.public.sex.count,
type = 'pie',
name = title,
labels = ~Citizen.sex,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
## NOPD
cases.nopd.sex.count <- matching.cases %>%
distinct(PIB.Control.Number, Officer.primary.key, .keep_all = TRUE) %>%
group_by(Officer.sex) %>%
summarise(num.allegations = n())
title <- "Sex of officer"
p.cases.nopd.sex.count <- plot_ly(cases.nopd.sex.count,
type = 'pie',
name = title,
labels = ~Officer.sex,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
# Age
## Public
cases.public.age.count <- matching.cases %>%
distinct(PIB.Control.Number, Citizen.primary.key, .keep_all = TRUE) %>%
group_by(citizen.age.bucket) %>%
summarise(num.allegations = n())
title <- "Age of complainant"
p.cases.public.age.count <- plot_ly(cases.public.age.count,
type = 'pie',
name = title,
labels = ~citizen.age.bucket,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
## NOPD
cases.nopd.age.count <- matching.cases %>%
distinct(PIB.Control.Number, Officer.primary.key, .keep_all = TRUE) %>%
group_by(officer.age.bucket) %>%
summarise(num.allegations = n())
title <- "Age of officer"
p.cases.nopd.age.count <- plot_ly(cases.nopd.age.count,
type = 'pie',
name = title,
labels = ~officer.age.bucket,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
# Experience
cases.nopd.exp.count <- matching.cases %>%
distinct(PIB.Control.Number, Officer.primary.key, .keep_all = TRUE) %>%
group_by(officer.exp.bucket) %>%
summarise(num.allegations = n())
title <- "Experience of officer"
p.cases.nopd.exp.count <- plot_ly(cases.nopd.exp.count,
type = 'pie',
name = title,
labels = ~officer.exp.bucket,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
# District
cases.nopd.district.count <- matching.cases %>%
distinct(PIB.Control.Number, .keep_all = TRUE) %>%
group_by(district) %>% summarise(num.allegations = n())
title <- "District assigned"
p.cases.nopd.district.count <- plot_ly(cases.nopd.district.count,
type = 'pie',
name = title,
labels = ~district,
values = ~num.allegations,
textposition = 'inside',
textinfo = 'label+value+percent',
insidetextfont = list(color = '#FFFFFF')) %>%
layout(hovermode = "compare", title = title, showlegend = FALSE)
# Allegation outcomes
cases.finding.count <- matching.cases %>%
distinct(Allegation.primary.key, .keep_all = TRUE) %>%
group_by(Allegation, Allegation.Finding.OIPM) %>%
summarise(num.allegations = n())
title <- "Allegation finding"
p.cases.allegation.findings <- plot_ly(cases.finding.count,
x = ~Allegation,
y = ~num.allegations,
type = 'bar',
name = ~Allegation.Finding.OIPM,
color = ~Allegation.Finding.OIPM) %>%
layout(xaxis = list(title = "Type of allegation",
showgrid = F),
yaxis = list(title = 'Number allegations'),
barmode = 'stack',
hovermode = 'compare',
margin = list(r = 100, b = 200))
p.cases.allegation.findings
#write.csv(cases.finding.count, file = "data/export/caselist_findings.csv")
|
24d7fcb378ea20bf37a766b418c95a1818462b66
|
21428ae02e7a3181462ed2020a3b24f4cc65e53e
|
/man/write_s3.Rd
|
af8d67643dc351012a5d2dc1f4c5a8b14ceead25
|
[] |
no_license
|
plataformapreventiva/dbrsocial
|
9eadcafea990f27c5ba2e278bc5b5eb243ee0835
|
f20fea2580c99ab0205b9860295f4618ee4ebe5d
|
refs/heads/master
| 2021-09-19T10:31:13.443347
| 2018-06-22T22:53:11
| 2018-06-22T22:53:11
| 126,061,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 500
|
rd
|
write_s3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/retrieve.R
\name{write_s3}
\alias{write_s3}
\title{write_s3}
\usage{
write_s3(dataf, name, s3bucket = Sys.getenv("S3_DIR"))
}
\arguments{
\item{dataf}{The data.frame objet to write as a CSV}
\item{name}{The name of the file in the S3 bucket}
\item{s3bucket}{The name of the S3 bucket}
}
\description{
Writes a dataframe as a CSV in a S3 bucket
}
\examples{
write_s3(the_dic, "dict/fun_dict.csv", Sys.getenv("S3_DIR"))
}
|
0ba9b33c7fffe216c996fdda2a7091a2477d537e
|
57fdd20c9dc7ab94268be24fa69f59c1e09a22cf
|
/tests/testthat/test-est_power_root_test.R
|
9d347af9e93a7b97895ebc2720524527ddf43b6f
|
[] |
no_license
|
slzhao/RnaSeqSampleSize
|
bd991b858b432f097612c5563d03dcfcc52f23df
|
24a51fa945ec44d09cf17a54251dc9220684b023
|
refs/heads/master
| 2023-02-05T06:16:17.389538
| 2022-02-25T15:44:49
| 2022-02-25T15:44:49
| 16,217,256
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95
|
r
|
test-est_power_root_test.R
|
test_that("est_power_root", {
expect_equal(est_power_root(n=65),0.1538854,tolerance=0.001)
})
|
1dc2b6909bcdc65675018a9ac803c502d3f4d46e
|
a9b64bf34b7d5fd005e4fc24c26a2c5d5500650c
|
/Dia19.R
|
d66b258e68f5654502f9a6208f269e23234a6290
|
[] |
no_license
|
gamerino/30diasdegraficosRFDS
|
877ed7a8bf70df88a557c6dc45e503897bdc1623
|
f3593e51baba0797ea290ae77f0b0b66452c7a93
|
refs/heads/master
| 2022-10-15T19:39:25.690351
| 2020-06-11T00:25:52
| 2020-06-11T00:25:52
| 263,490,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,269
|
r
|
Dia19.R
|
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
#### my black theme
my_theme_black<- function (base_size = 14, base_family = ""){
theme_minimal() +
theme(line = element_line(colour = "black", size = 0.5, linetype = 1,
lineend = "butt"),
rect = element_rect(fill = "black",
colour = "black", size = 0.5, linetype = 1),
text = element_text(family = base_family,
face = "plain", colour = "lightseagreen", size = base_size,
angle = 0, lineheight = 0.9, hjust = 0.5, vjust = 0.5),
plot.background = element_rect(colour = 'black', fill = 'black'),
plot.title = element_text(size = rel(1.2)),
panel.border = element_rect(fill = NA, colour = "lightseagreen"),
panel.grid.major = element_line(colour = "black", size = 0.2),
panel.grid.minor = element_line(colour = "black", size = 0.5),
strip.background = element_rect(fill = "black", colour = "black"),
axis.text = element_text(family = base_family,
face = "plain", colour = "lightseagreen", size = base_size-4,
angle = 0, lineheight = 0.9, hjust = 0.5, vjust = 0.5)
)
}
datos=read.csv("data.csv",sep=",", header=T)
datos %>%
drop_na() ->datos
datos %>%
mutate(date=as.Date(datos$date)) %>%
mutate(anio=year(date)) %>%
filter(anio >=1999 & anio < 2019)-> datos
#sacamos 2019 porque está solo hasta junio
datos %>%
separate(location, c("City","State", "Country"),
sep=", ") ->datos
datos$Country[is.na(datos$Country)]=datos$State[is.na(datos$Country)]
datos %>%
group_by(anio, Country) %>%
summarise(n=n())->datosFilt
datosFilt %>%
streamgraph("Country", "n", "anio", interactive = T,
top = 30, height = 500,width = 700) %>%
sg_annotate("# combates UFC últimos 20 años",
x = as.Date("2000-01-01"), y = 400,
size = 20,color="Navy")%>%
sg_axis_x(1, "year", "%Y") %>%
sg_fill_brewer("Set3") %>%
sg_legend(show=T, label="País: ")
|
a1f9856164b68269085e0a46d42e39046e80ea1e
|
88c996723dd3fba59692eadc9cbd0a14634e0d12
|
/man/nNormal.Rd
|
3ed6215dfe8e8ff8da8aa93309a5536830c104cf
|
[] |
no_license
|
AEBilgrau/gsDesign
|
58fc02d16e3034baf613c85c6f45a076cb27abeb
|
f2e1d13bc35aa5a0dc04fc8ecfa6c8a2573921f1
|
refs/heads/master
| 2020-04-09T11:37:22.043117
| 2017-09-12T18:59:05
| 2017-09-12T18:59:05
| 160,317,205
| 2
| 0
| null | 2018-12-04T07:33:26
| 2018-12-04T07:33:26
| null |
UTF-8
|
R
| false
| false
| 3,592
|
rd
|
nNormal.Rd
|
\name{nNormal}
\alias{nNormal}
\title{Normal distribution sample size (2-sample)}
\description{\code{nNormal()} computes a fixed design sample size for comparing 2 means where variance is known. T
The function allows computation of sample size for a non-inferiority hypothesis.
Note that you may wish to investigate other R packages such as the \code{pwr} package which uses the t-distr
}
\usage{
nNormal(delta1=1,sd=1.7,sd2=NULL,alpha=.025,beta=.1,ratio=1, sided=1,
n=NULL,delta0=0,outtype=1)
}
\arguments{
\item{delta1}{difference between sample means under the alternate hypothesis.}
\item{delta0}{difference between sample means under the null hypothesis; normally this will be left as the default of 0.}
\item{ratio}{randomization ratio of experimental group compared to control.}
\item{sided}{1 for 1-sided test (default), 2 for 2-sided test.}
\item{sd}{Standard deviation for the control arm.}
\item{sd2}{Standard deviation of experimental arm; this will be set to be the same as the control arm with the default of \code{NULL}.}
\item{alpha}{type I error rate. Default is 0.025 since 1-sided testing is default.}
\item{beta}{type II error rate. Default is 0.10 (90\% power). Not needed if \code{n} is provided.}
\item{n}{Sample size; may be input to compute power rather than sample size. If \code{NULL} (default) then sample size is computed.}
\code{delta0}{default value of 0 is set to test for superiority; negative values used for non-inferiority (assuming \code{delta1>0}).}
\item{outtype}{controls output; see value section below.}
}
\details{
\code{nNormal()} computes sample size for comparing two normal means when the variance for observations in
}
\value{
If \code{n} is \code{NULL} (default), total sample size (2 arms combined) is computed. Otherwise, power is computed.
If \code{outtype=1} (default), the computed value (sample size or power) is returned in a scalar or vector.
If \code{outtype=2}, a data frame with sample sizes for each arm (\code{n1}, \code{n2})is returned; if \code{n} is not input as \code{NULL}, a third variable, \code{Power}, is added to the output data frame.
If \code{outtype=3}, a data frame with is returned with the following columns:
\item{n}{A vector with total samples size required for each event rate comparison specified}
\item{n1}{A vector of sample sizes for group 1 for each event rate comparison specified}
\item{n2}{A vector of sample sizes for group 2 for each event rate comparison specified}
\item{alpha}{As input}
\item{sided}{As input}
\item{beta}{As input; if \code{n} is input, this is computed}
\item{Power}{If \code{n=NULL} on input, this is \code{1-beta}; otherwise, the power is computed for each sample size input}
\item{sd}{As input}
\item{sd2}{As input}
\item{delta1}{As input}
\item{delta0}{As input}
\item{se}{standard error for estimate of difference in treatment group means}
}
\seealso{\link{gsDesign package overview}}
\author{Keaven Anderson \email{keaven_anderson@merck.com}}
\references{
Lachin JM (1981),
Introduction to sample size determination and power analysis for clinical trials. \emph{Controlled Clinical Trials} 2:93-113.
Snedecor GW and Cochran WG (1989),
Statistical Methods. 8th ed. Ames, IA:
Iowa State University Press.
}
\examples{
# EXAMPLES
# equal variances
nNormal(delta1=.5,sd=1.1,alpha=.025,beta=.2)
# unequal variances
nNormal(delta1=.5,sd=1.1,sd2=2,alpha=.025,beta=.2)
# unequal sample sizes
nNormal(delta1=.5,sd=1.1,alpha=.025,beta=.2, ratio=2)
# non-inferiority assuming a better effect than null
nNormal(delta1=.5,delta0=-.1,sd=1.2)
}
\keyword{design}
|
22f296992a5bd4b7432a550905adb1733dbb2dca
|
0d52cf571dbc0a15096357a46c7a52b954ee090d
|
/karinaScripts/KarinaDateTimeScript.R
|
1266f04e17d315295c36edc45ea5228714cca2d7
|
[] |
no_license
|
arixha/MVTEC-Stats-Project1
|
6da88ca4dcfc767945e00ea54696117bc148dc06
|
6619df7860673f79c75471b005af8a5c2cf936bf
|
refs/heads/main
| 2023-02-15T20:07:54.299459
| 2021-01-08T18:51:54
| 2021-01-08T18:51:54
| 316,367,309
| 0
| 4
| null | 2020-12-04T15:24:43
| 2020-11-27T00:56:30
|
R
|
UTF-8
|
R
| false
| false
| 4,486
|
r
|
KarinaDateTimeScript.R
|
#Time Management
setwd("D:/karina/docencia/01areferenciesPPT/0DadesPractiques/planta")
#planta<- read.table("depdani41dates.csv",header=T);
planta<- read.table("plantaVminitabMisOK.csv",header=T, sep=";");
names(planta)
n<-dim(planta)[1]
sapply(planta,class)
planta<- read.table("plantaVminitabMisOK.csv",header=T, sep=";", dec=".", na.strings="*");
attach(planta)
hist(Q.E)
boxplot(Q.E, horizontal=TRUE)
summary(Q.E)
sd(Q.E)
sd(Q.E, na.rm=TRUE)
#Graphical representation
plot.ts(Q.E)
#quasi equivale a
index<-c(1:n)
plot(index, Q.E)
lines(index, Q.E)
#equivalent to
plot(index, Q.E, type="n")
lines(index, Q.E)
#for all numerical variables
K=dim(planta)[2]
par(ask=TRUE)
for(k in 4:K){plot.ts(planta[,k], main=paste("Time Series of", names(planta)[k]))}
par(ask=FALSE)
#Rotular eix X amb les dates
plot.ts(Q.E, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=1)
axis(side = 1, at=ticks, labels = DATE, cex.axis = 0.7, las=2)
plot.ts(Q.E, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=14)
axis(side = 1, at=ticks, labels = DATE[ticks], cex.axis = 0.7, las=2)
plot.ts(Q.E, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=14)
axis(side = 1, at=ticks, labels = DATEformated[ticks], cex.axis = 0.7, las=2)
#equivalent a
CaudalE<-ts(Q.E)
class(Q.E)
class(CaudalE)
plot(CaudalE, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=14)
axis(side = 1, at=ticks, labels = DATEformated[ticks], cex.axis = 0.7, las=2)
par(ask=TRUE)
for(k in 4:K){
plot.ts(planta[,k], xaxt="n",main=paste("Time Series of", names(planta)[k]))
axis(side = 1, at=ticks, labels = DATEformated[ticks], cex.axis = 0.7, las=2)
}
par(ask=FALSE)
plot.ts(NH4.S)
#NH4 seems empty!!!!
hist(NH4.S)
plot(index, NH4.S)
plot.ts(NH4.S)
points(index, NH4.S)
lines(index,NH4.S)
plot.ts(IM.B, xaxt="n",main="Time Series of IM.B")
axis(side = 1, at=ticks, labels = DATEformated[ticks], cex.axis = 0.7, las=2)
#install.packages("zoo")
#library(zoo)
IM.Bcomplete<-na.approx(IM.B)
plot.ts(IM.Bcomplete)
plot.ts(IM.Bcomplete, xaxt="n",main="Time Series of IM.B imputed")
axis(side = 1, at=ticks, labels = DATEformated[ticks], cex.axis = 0.7, las=2)
#Managing Dates
hist(DATEformated)
# R no enten que es aquesta variable
class(DATEformated)
#la pren com a qualitative!
summary(DATEformated)
Data<-as.Date(DATEformated, format="%d/%m/%y")
summary(Data)
#Symbol Meaning Example
# %d day as a number (0-31) 01-31
# %D Date format
# %a Abbreviated weekday Mon
# %A Unabbreviated weekdat Monday
# %m Month (01-12) 00-12
# %b Abbreviated month Jan
# %B Unabbreviated month January
# %y 2-digit year 07
# %Y 4-digit year 2007
##
# %c Date and time
# %C Century
# %H Hours (00-23) 15
# %I Hours (1-12) 3
# %j Day of the year (0-365) 250
# %M Minute (00-59) 34
# %S Second as integer (0-61) 07
Data<-as.Date(DateNorm, format="%d/%m/%y")
class(Data)
summary(Data)
sd(Data)
sd(Data, na.rm=TRUE)
par(ask=FALSE)
hist(Data)
hist(Data,breaks="weeks")
hist(Data,breaks="months")
hist(Data,breaks="quarters")
fmt <- "%b-%d-%y" # format for axis labels
labs <- format(Data, fmt)
CaudalE<-ts(Q.E)
class(CaudalE)
plot(CaudalE, main="Time Series of Caudal.E")
plot(CaudalE, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=14)
axis(side = 1, at=ticks, labels = Data[ticks], cex.axis = 0.7, las=2)
fmt <- "%b-%d-%y" # format for axis labels
labs <- format(Data, fmt)
CaudalE<-ts(Q.E)
plot(CaudalE, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=14)
axis(side = 1, at=ticks, labels = labs[ticks], cex.axis = 0.7, las=2)
plot(CaudalE, xaxt ="n", main="Time Series of Caudal.E")
ticks<-seq(1,n,by=7)
axis(side = 1, at=ticks, labels = labs[ticks], cex.axis = 0.7, las=2)
# Hores
DataHoraria<-as.POSIXct(DateNorm, format="%d/%m/%y")
head(DataHoraria)
DH<-format(DataHoraria,"%d/%m/%y %H:%M" )
print(head(DH))
#no es propaga la condicio de variable temporal a copies
hist(DH, breaks="months")
summary(DH)
#cal declara-les sempre
DH<-as.POSIXct(DH, format="%d/%m/%y %H:%M")
hist(DH, breaks="months")
summary(DH)
#per descomposar la data
weekdays(DataHoraria)
diaSemana<-strftime(as.Date(trending_date, format="%y.%d.%m"), "%d")
#Alerta! Si tenim varies mesures hor?ries podem operar amb elles
#difftime<-time1-time2
#hist(unclass(difftime))
#summary(as.numeric(difftime))
|
76b761ef924b967b80fe967d07cc0c7d523a4e93
|
ff9a90bbc356f91b643fc89e7360c4e69172b030
|
/R_code_pca_remote_sensing.r
|
cc0ce4a42c1c96d446469461ab2608cf14c3dfa7
|
[] |
no_license
|
GiuliaAdeleTuci/repository1
|
4e80d6b76067e6d488dcda3ef7e7ec7af0060de9
|
e73692ebd91e773f4e7fb2609d7c8a3b7633a464
|
refs/heads/master
| 2021-04-23T23:09:01.259237
| 2020-06-22T14:25:59
| 2020-06-22T14:25:59
| 250,028,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,351
|
r
|
R_code_pca_remote_sensing.r
|
# R code for pcs remote sensing
setwd("/Users/giulia/lab")
library(raster)
library(RStoolbox)
# now we use the function brick that is used to import the whole image of the satellite.
p224r63_2011 <- brick("p224r63_2011_masked.grd")
#b1 blue
#b2 green
#b3 red
#b4 NIR
#b5 SWIR (short-wave infrared)
#b6 thermal infrared
#b7 SWIR
#b8 panchromatic
# now we plot the image in the RGB space
plotRGB(p224r63_2011, r=5, g=4, b=3, stretch="Lin")
# how to plot this data by ggplot2
library(ggplot2)
ggRGB(p224r63_2011,5,4,3)
# similar image, different way of plotting
# let's do the same with the 1988 image
p224r63_1988 <- brick("p224r63_1988_masked.grd")
plotRGB(p224r63_1988, r=5, g=4, b=3, stretch="Lin")
# now i put together the two images
par(mfrow=c(1,2))
plotRGB(p224r63_1988, r=5, g=4, b=3, stretch="Lin")
plotRGB(p224r63_2011, r=5, g=4, b=3, stretch="Lin")
# let's see if the bands are correlated to eachother
# being correlated means that you are following the pattern of another variable,
# we are going to see if band3 is correlated to band1 so if having small values of B1 is related to the values on B3
# first we need to know the names of those bands
names(p224r63_2011)
#"B1_sre" "B2_sre" "B3_sre" "B4_sre" "B5_sre" "B6_bt" "B7_sre"
# $ links the bands to the image
plot(p224r63_2011$B1_sre, p224r63_2011$B3_sre)
# we see very high correlation, we can then see the correlation coefficient R.
# positive correlation: R=1, flat correlation: R=0, negative correlation: R=-1
# in this case it is very high
# in this case 90% of the data is in the PC1 and only a small amount on th PC2 (proncipal component)
# let's see this in R
# first we need to reduce the resolution, now there are too many pixels -> aggregate function, we are decreasing with a factor 10
p224r63_2011_res <- aggregate(p224r63_2011, fact=10)
#library RStoolbox is needed
p224r63_2011_pca <- rasterPCA(p224r63_2011_res)
plot(p224r63_2011_pca$map)
# $ is linking all the different pieces of the output, call, model and map
# let's change the colors
cl <- colorRampPalette(c("dark grey","grey","light grey"))(100)
plot(p224r63_2011_pca$map, col=cl)
# we want to see the info about the model
summary(p224r63_2011_pca$model)
# we can see that PC1 is accounting for 99.83% of the whole variation
pairs(p224r63_2011)
# to see the amount of correlation between the different data
# we see that the whole set is hugely correlated to each other
# now we can plot the first 3 components for example, with plotRGB
plotRGB(p224r63_2011_pca$map, r=1, g=2, b=3, stretch="Lin")
# let's do the same for the 1988
p224r63_1988_res <- aggregate(p224r63_1988, fact=10)
p224r63_1988_pca <- rasterPCA(p224r63_1988_res)
plot(p224r63_1988_pca$map, col=cl)
# here as well we can see that the PC1 has the highest amount of information
summary(p224r63_1988_pca$model) # we see that there is high correlation
pairs(p224r63_1988)
# now we can make a difference between the 1988 and 2011 and then plotting the difference.
# we are making the difference of every pixel
difpca <- p224r63_2011_pca$map - p224r63_1988_pca$map
plot(difpca)
# since the PC1 contains most of the information so we can also only plot this one, only 1 layer
cldif <- colorRampPalette(c('blue','black','yellow'))(100)
plot(difpca$PC1, col=cldif)
# we see the areas that have changed most
|
dc0f828681dbcbdb13369ce600020c73836734a4
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/elo/R/fitted.R
|
f06e439a515bc21f4982a845ddce9b8f64569138
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
fitted.R
|
#' Extract model values
#'
#' Extract model values from \code{elo} functions.
#'
#' @param object An object.
#' @param ... Other arguments
#' @param running logical, denoting whether to use the running predicted values.
#' @name fitted.elo
NULL
#> NULL
## stats:::terms.default and stats:::weights.default also work
#' @rdname fitted.elo
#' @export
fitted.elo.run <- function(object, ...)
{
out <- object$elos[, sum(object$n.players) + 1]
stats::napredict(object$na.action, out)
}
#' @rdname fitted.elo
#' @export
residuals.elo.run <- function(object, ...)
{
out <- object$elos[, sum(object$n.players) + 2] - object$elos[, sum(object$n.players) + 1]
stats::naresid(object$na.action, out)
}
#' @rdname fitted.elo
#' @export
fitted.elo.running <- function(object, running = TRUE, ...)
{
if(!running) return(NextMethod())
stats::napredict(object$na.action, object$running.values)
}
#' @rdname fitted.elo
#' @export
fitted.elo.glm <- function(object, ...)
{
stats::napredict(object$na.action, object$fitted.values)
}
#' @rdname fitted.elo
#' @export
fitted.elo.markovchain <- fitted.elo.glm
#' @rdname fitted.elo
#' @export
fitted.elo.winpct <- fitted.elo.glm
#' @rdname fitted.elo
#' @export
fitted.elo.colley <- fitted.elo.glm
|
8ae708b8ab67de3295bf7c80dd0186eca33fa31f
|
60128cee7625b0fb020c4387931964af7e498b36
|
/man/refineryLP.Rd
|
21f8d810f13725d1b51f86060d07d2440f017ed6
|
[] |
no_license
|
fmair/RTL
|
7b91cfde5d690dce43b28107e6fcf445a994b642
|
3892587a12caa1b403ccf10f99c1dc200fe5392b
|
refs/heads/master
| 2023-03-04T00:42:39.117858
| 2021-01-26T01:10:56
| 2021-01-26T01:10:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 554
|
rd
|
refineryLP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/refineryLP.R
\name{refineryLP}
\alias{refineryLP}
\title{\code{refineryLP}}
\usage{
refineryLP(crudes = ref.opt.inputs, products = ref.opt.outputs)
}
\arguments{
\item{crudes}{Data frame of crude inputs}
\item{products}{Data frame of product outputs and max outputs.}
}
\value{
Optimal crude slate and profits
}
\description{
Plain vanilla refinery optmization LP model.
}
\examples{
refineryLP(crudes = ref.opt.inputs, products = ref.opt.outputs)
}
\author{
Philippe Cote
}
|
942c6b0cb67256bdd976d1cbf24006bed7260686
|
13c0687d2111ea43aad32f319b46a6f67bd2f53a
|
/__09_week_01/__09_week_serialize_02blobs/__09_go_createxAudioTable_03segsFun.R
|
0acf904e52d287e4090204ffd9387c38af4476b3
|
[] |
no_license
|
davezes/MAS405_S2021
|
0a672892ca320dbad952697948b610adcffc0656
|
025a3ea1de268c05c1a426bdb8944d2367789847
|
refs/heads/master
| 2023-05-10T22:14:20.857391
| 2021-06-05T21:33:26
| 2021-06-05T21:33:26
| 350,537,508
| 4
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,234
|
r
|
__09_go_createxAudioTable_03segsFun.R
|
######################### The directory that encloses this file
######################### is your working directory
######################### __09_audioFiles_01 must be a sibling
######################### efficient
options(stringsAsFactors=FALSE, width=200)
projpath <- getwd()
if(!exists("xdbsock")) {
xdbsock <- ""
cat("\n", "Parameter 'xdbsock' not found, setting to empty string for general usage", "\n")
}
source("__f_funs.R" )
library(RMySQL)
library(rjson)
#library(png)
library(audio)
drv <- dbDriver("MySQL")
#############
xdbuser <- Sys.getenv("MAS405_AWS_MY_DB_ADMIN_USER")
xdbpw <- Sys.getenv("MAS405_AWS_MY_DB_ADMIN_PW")
xdbname <- Sys.getenv("MAS405_AWS_MY_DB_ADMIN_DBNAME")
xdbhost <- Sys.getenv("MAS405_AWS_MY_DB_ADMIN_HOST")
xdbport <- as.integer( Sys.getenv("MAS405_AWS_MY_DB_ADMIN_PORT") )
########################################
#########################
#########################
###tools::file_path_sans_ext(xfl)
######################### example of "serializing" to raw then segmenting
xfl <- list.files( file.path( "..", "__09_week_audioFiles_01" ), pattern="*.wav$" )
ii <- 1
xthisFN <- xfl[ii]
xwav <- load.wave( file.path("..", "__09_week_audioFiles_01", xfl[ii]))
xdim <- dim(xwav) ; xdim
class(xwav)
aobj <- play(x=xwav)
pause(x=aobj)
rewind(x=aobj)
resume(x=aobj)
########################### use function to write
yynow <- Sys.time()
for(ii in 1:length(xfl)) {
xthisFN <- xfl[ii]
xwav <- load.wave( file.path("..", "__09_week_audioFiles_01", xfl[ii]))
f_dbwrite_raw(
xobj=xwav,
xtableName="MAS405audio_serialSegs",
xfile=xthisFN,
xdrv=drv,
xdbuser=xdbuser,
xdbpw=xdbpw,
xdbname=xdbname,
xdbhost=xdbhost,
xdbport=xdbport,
xoverWrite=TRUE,
xsegSize=10^6,
xverbosity=2,
xcomp="xz"
)
cat("\n\n\n\n", ii, "\n\n\n\n\n")
}
cat(difftime(yynow, Sys.time(), units="mins"))
########################### use function to write
ii <- 2
xthisFN <- xfl[ii] ; xthisFN
zobj <-
f_dbread_raw(
xtableName="MAS405audio_serialSegs",
xfile=xthisFN,
xdrv=drv,
xdbuser=xdbuser,
xdbpw=xdbpw,
xdbname=xdbname,
xdbhost=xdbhost,
xdbport=xdbport,
xverbosity=2,
xcomp="xz"
)
zwav <- zobj
############################# convert to string -- use compression
############################# convert to string -- use compression
class(zwav)
cobj <- play(x=zwav)
pause(x=cobj)
rewind(x=cobj)
resume(x=cobj)
##### remember what I said earlier about full paths ?
### save.wave(zwav, file.path("~", "Desktop", "audio_file_out.wav"))
### save.wave(zwav, file.path("", "Users", "davezes", "Desktop", "audio_file_out.wav"))
####################################
xthis_file <- "thisTrackOut.wav"
xthis_file <- "Wheelfull4B.wav"
xthis_file <- "MassAttackIamHome.wav"
##########################
con <- dbConnect(drv, user=xdbuser, password=xdbpw, dbname=xdbname, host=xdbhost, port=xdbport, unix.sock=xdbsock)
qstr <-
"
SELECT
table_name AS 'Table_Name',
round(((data_length + index_length) / 1024 / 1024), 3) 'Size_in_MB'
FROM
information_schema.TABLES
"
dbGetQuery(con, qstr)
####################### beautiful
dbDisconnect(con)
#################
|
384c86d495980a8e1d806e1c099e74e95d135264
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Devore7/examples/ex13.32.Rd.R
|
ad348c280cbf0d203c711a54f6dc04cf54929eab
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
ex13.32.Rd.R
|
library(Devore7)
### Name: ex13.32
### Title: R Data set: ex13.32
### Aliases: ex13.32
### Keywords: datasets
### ** Examples
data(ex13.32)
str(ex13.32)
|
c3fc668ad9f5c8548d6536bb0a516bddcc5917b6
|
e4755d1e2207edc616f4f20eb2d4e5fb65a71c42
|
/man/DiversityCurves.Rd
|
f0ce1bec0f56d6731867cc447b7a1bdb01592c54
|
[
"CC0-1.0"
] |
permissive
|
dwbapst/paleotree
|
14bbfd5b312848c109a5fc539a1e82978a760538
|
95c2f57e91c4204c04cd59d9662ba94c43c87a60
|
refs/heads/master
| 2022-09-23T03:57:35.959138
| 2022-08-25T18:29:50
| 2022-08-25T18:29:50
| 3,827,289
| 20
| 11
| null | 2022-08-25T18:30:48
| 2012-03-25T21:01:38
|
R
|
UTF-8
|
R
| false
| true
| 11,061
|
rd
|
DiversityCurves.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DiversityCurves.R
\name{DiversityCurves}
\alias{DiversityCurves}
\alias{taxicDivCont}
\alias{taxicDivDisc}
\alias{phyloDiv}
\title{Diversity Curves}
\usage{
taxicDivCont(
timeData,
int.length = 1,
int.times = NULL,
plot = TRUE,
plotLogRich = FALSE,
timelims = NULL,
drop.cryptic = FALSE
)
taxicDivDisc(
timeList,
int.times = NULL,
drop.singletons = FALSE,
plot = TRUE,
plotLogRich = FALSE,
timelims = NULL,
extant.adjust = 0.001,
split.int = TRUE
)
phyloDiv(
tree,
int.length = 0.1,
int.times = NULL,
plot = TRUE,
plotLogRich = FALSE,
drop.ZLB = TRUE,
timelims = NULL
)
}
\arguments{
\item{timeData}{Two-column matrix giving the per-taxon first and last
appearances in absolute time. The simulated data tables output by \code{fossilRecord2fossilTaxa}
following simulation with \code{simFossilRecord} can also be supplied to \code{taxicDivCont}.}
\item{int.length}{The length of intervals used to make the diversity curve.
Ignored if \code{int.times} is given.}
\item{int.times}{An optional two-column matrix of the interval start and end
times for calculating the diversity curve. If \code{NULL}, calculated internally.
If given, the argument \code{split.int} and \code{int.length} are ignored.}
\item{plot}{If \code{TRUE}, a diversity curve generated from the data is plotted.}
\item{plotLogRich}{If \code{TRUE}, taxic diversity is plotted on log scale.}
\item{timelims}{Limits for the x (time) axis for diversity curve plots. Only
affects plotting. Given as either \code{NULL} (the default) or as a vector of
length two as for \code{xlim} in the basic R function \code{plot}. Time axes
will be plotted \emph{exactly} to these values.}
\item{drop.cryptic}{If \code{TRUE}, cryptic taxa are merged to form one taxon for
estimating taxon curves. Only works for objects from \code{simFossilRecord}
via \code{fossilRecord2fossilTaxa}.}
\item{timeList}{A list composed of two matrices, giving interval start and end
dates and taxon first and last occurrences within those intervals. See details.}
\item{drop.singletons}{If \code{TRUE}, taxa confined to a single interval will be
dropped prior to the diversity curve calculation. This is sometimes done if
single intervals have overly high diversities due to the 'monograph' effect
where more named taxa are known in certain intervals largely due to
taxonomic expert effort and not real changes in historical biotic diversity.}
\item{extant.adjust}{Amount of time to be added to extend start time for
(0,0) bins for extant taxa, so that the that 'time interval' does not appear
to have an infinitely small width.}
\item{split.int}{For discrete time data, should calculated/input intervals
be split at discrete time interval boundaries? If \code{FALSE}, can create apparent
artifacts in calculating the diversity curve. See details.}
\item{tree}{A time-scaled phylogeny of class \code{phylo}.}
\item{drop.ZLB}{If \code{TRUE}, zero-length terminal branches are dropped from the
input tree for phylogenetic datasets, before calculating standing diversity.}
}
\value{
These functions will invisibly return a three-column matrix, where
the first two columns are interval start and end times and the third column
is the number of taxa (or lineages) counted in that interval.
}
\description{
Functions to plot diversity curves based on taxic range data, in both
discrete and continuous time, and for phylogenies.
}
\details{
First, some background.
Diversity curves are plots of species/taxon/lineage richness
over time for a particular group of organisms. For paleontological studies,
these are generally based on per-taxon range data while more recently in
evolutionary biology, molecular phylogenies have been used to calculate
lineage-through-time plots (LTTs). Neither of these approaches are without
their particular weaknesses; reconstructing the true history of biodiversity
is a difficult task no matter what data is available.
The diversity curves produced by these functions will always measure
diversity within binned time intervals (and plot them as rectangular bins).
For continuous-time data or phylogenies, one could decrease the int.length
used to get what is essentially an 'instantaneous' estimate of diversity.
This is warned against, however, as most historical diversity data will have
some time-averaging or uncertain temporal resolution and thus is probably
not finely-resolved enough to calculate instantaneous estimates of
diversity.
As with many functions in the \code{paleotree} library, absolute time is always
decreasing, i.e. the present day is zero.
As diversity is counted within binned intervals, the true standing diversity
may be somewhat lower than the measured (observed) quantity, particularly if
intervals are longer than the mean duration of taxa is used. This will be an
issue with all diversity curve functions, but particularly the discrete-time
variant. For diversity data in particularly large discrete time intervals,
plotting this data in smaller bins which do not line up completely with the
original intervals will create a 'spiky' diversity curve, as these smaller
intersecting bins will have a large number of taxa which may have been
present in either of the neighboring intervals. This will give these small
bins an apparently high estimated standing diversity. This artifact is
avoided with the default setting \code{split.int = TRUE}, which will split any input
or calculated intervals so that they start and end at the boundaries of the
discrete-time range bins.
The \code{timeList} object should be a list composed of two matrices, the first
matrix giving by-interval start and end times (in absolute time), the second
matrix giving the by-taxon first and last appearances in the intervals
defined in the first matrix, numbered as the rows. Absolute time should be
decreasing, while the intervals should be numbered so that the number
increases with time. Taxa alive in the modern should be listed as last
occurring in a time interval that begins at time 0 and ends at time 0.
See the documentation for the time-scaling function
\code{\link{bin_timePaleoPhy}} and the simulation function
\code{\link{binTimeData}} for more information on formatting.
Unlike some \code{paleotree} functions,
such as \code{\link{perCapitaRates}}, the intervals
can be overlapping or of unequal length. The diversity curve functions
deal with such issues by assuming taxa occur from the base of the interval
they are first found in until the end of the last interval they are occur
in. Taxa in wide-ranging intervals that contain many others will be treated
as occurring in all nested intervals.
\code{\link{phyloDiv}} will resolve polytomies to be dichotomous nodes separated by
zero-length branches prior to calculating the diversity curve. There is no
option to alter this behavior, but it should not affect the use of the
function because the addition of the zero-length branches should produce an
identical diversity history as a polytomy. \code{phyloDiv} will also drop
zero-length terminal branches, as with the function \code{\link{dropZLB}}. This the
default behavior for the function but can be turned off by setting the
argument \code{drop.zlb} to FALSE.
}
\examples{
# taxicDivDisc example with the retiolinae dataset
data(retiolitinae)
taxicDivDisc(retioRanges)
##################################################
# simulation examples
# 07-15-19
# note that the examples below are weird and rather old
# the incomplete sampling can now be done
# with the same function that simulates diversification
set.seed(444)
record <- simFossilRecord(
p = 0.1,
q = 0.1,
nruns = 1,
nTotalTaxa = c(30,40),
nExtant = 0)
taxa <- fossilRecord2fossilTaxa(record)
# let's see what the 'true' diversity curve looks like in this case
#plot the FADs and LADs with taxicDivCont
taxicDivCont(taxa)
# simulate a fossil record with imperfect sampling via sampleRanges
rangesCont <- sampleRanges(taxa, r = 0.5)
# plot the diversity curve based on the sampled ranges
layout(1:2)
taxicDivCont(rangesCont)
# Now let's use binTimeData to bin in intervals of 1 time unit
rangesDisc <- binTimeData(rangesCont,
int.length = 1)
# plot with taxicDivDisc
taxicDivDisc(rangesDisc)
# compare to the continuous time diversity curve
layout(1)
# Now let's make a tree using taxa2phylo
tree <- taxa2phylo(taxa,obs_time = rangesCont[,2])
phyloDiv(tree)
# a simple example with phyloDiv
# using a tree from rtree in ape
set.seed(444)
tree <- rtree(100)
phyloDiv(tree)
###########################################################
#a neat example of using phyDiv with timeSliceTree
#to simulate doing molecular-phylogeny studies
#of diversification...in the past
set.seed(444)
record <- simFossilRecord(
p = 0.1,
q = 0.1,
nruns = 1,
nTotalTaxa = c(30,40),
nExtant = 0)
taxa <- fossilRecord2fossilTaxa(record)
taxicDivCont(taxa)
#that's the whole diversity curve
#with timeSliceTree we could look at the lineage accumulation curve
#we'd get of species sampled at a point in time
tree <- taxa2phylo(taxa)
#use timeSliceTree to make tree of relationships up until time = 950
tree950 <- timeSliceTree(tree,
sliceTime = 950,
plot = TRUE,
drop.extinct = FALSE)
#use drop.extinct = TRUE to only get the tree of lineages extant at time = 950
tree950 <- timeSliceTree(tree,
sliceTime = 950,
plot = TRUE,
drop.extinct = TRUE)
#now its an ultrametric tree with many fewer tips...
#lets plot the lineage accumulation plot on a log scale
phyloDiv(tree950,
plotLogRich = TRUE)
##################################################
#an example of a 'spiky' diversity curve
# and why split.int is a good thing
set.seed(444)
record <- simFossilRecord(
p = 0.1,
q = 0.1,
nruns = 1,
nTotalTaxa = c(30,40),
nExtant = 0)
taxa <- fossilRecord2fossilTaxa(record)
taxaDiv <- taxicDivCont(taxa)
#simulate a fossil record with imperfect sampling with sampleRanges
rangesCont <- sampleRanges(taxa, r = 0.5)
rangesDisc <- binTimeData(rangesCont,
int.length = 10)
#now let's plot with taxicDivDisc
# but with the intervals from taxaDiv
# by default, split.int = TRUE
taxicDivDisc(rangesDisc,
int.times = taxaDiv[,1:2],
split.int = TRUE)
#look pretty!
#now let's turn off split.int
taxicDivDisc(rangesDisc,
int.times = taxaDiv[,1:2],
split.int = FALSE)
#looks 'spiky'!
}
\seealso{
\code{\link{multiDiv}}, \code{\link{timeSliceTree}},
\code{\link{binTimeData}}
There are several different functions for traditional LTT plots
(phylogenetic diversity curves), such as the function
,\code{\link{ltt.plot}} in the package \code{ape}, the function \code{ltt} in the
package \code{phytools}, the function \code{plotLtt} in the package \code{laser} and the
function \code{LTT.average.root} in the package \code{TreeSim}.
}
\author{
David W. Bapst
}
|
df0f729fb01441cb974de8cc61d30f0b7532de24
|
9901e0bb747feddc319e723071a2723b424f1f1a
|
/sample30_transformdemo.R
|
2e3cf17787bb230cc3ee815d9843a1f6ed9dd87a
|
[] |
no_license
|
paeoigner/rps_citi_ml_r2018
|
9707bf35196b5dfbf34326aae5026d33a4d0326f
|
2dff20f380606b505c0eed9f93d64fd45955aaf3
|
refs/heads/master
| 2020-05-16T10:46:51.350889
| 2018-04-13T12:24:49
| 2018-04-13T12:24:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
sample30_transformdemo.R
|
attach(airquality)
airquality
transform(Ozone, logOzone = log(Ozone)) # marginally interesting ...
transform(airquality, Ozone = -Ozone)
transform(airquality, new = -Ozone, Temp = (Temp-32)/1.8)
detach(airquality)
|
c6cde54892742bdd6f5d9669db4d9635422254df
|
21095827293e28a04cee3d8c13b1ea24080e7334
|
/man/pichor.Rd
|
4b213e36bd2e12da76d5c32810dcfcddf0fc7e41
|
[] |
no_license
|
mikldk/pichor
|
791d572caaa9c8097b98bbcd70f1de719418a402
|
3f02903ac651dd70c2fe4a9200e58bd42307165d
|
refs/heads/master
| 2023-02-06T00:02:28.986768
| 2023-01-22T13:08:29
| 2023-01-22T13:08:29
| 184,456,422
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 198
|
rd
|
pichor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pichor.R
\docType{package}
\name{pichor}
\alias{pichor}
\title{\code{pichor} package}
\description{
Piano chords in R
}
|
698e8317e30c4936067dcea594252f8d500359ff
|
cec39fbac1e0086f328739d63b7a946b0efbd82a
|
/ShinyApp/data/ShinyAppStuff/app.R
|
3fa5981d6b2a1f2648f015c743f00c4d0428e9e1
|
[] |
no_license
|
DSPG-Young-Scholars-Program/dspg21zimbabwe
|
88c3c2feabb3d48ef14aa875d4ef67bf5ea3244c
|
3807aaf4cbc5b627d2169bf755dbe234b9880149
|
refs/heads/main
| 2023-07-12T20:20:59.691496
| 2021-08-04T22:15:44
| 2021-08-04T22:15:44
| 392,812,259
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,927
|
r
|
app.R
|
# Set Working Directory
setwd("/Users/mattb24/Zimbabwe/2021_DSPG_Zimbabwe/ShinyApp/ShinyAppStuff")
# clean the memory
rm(list=ls())
library(shiny)
library(ggplot2)
library(shinydashboard)
library(DT)
library(shinyWidgets)
library(shinydashboardPlus)
library(leaflet)
library(dashboardthemes)
library(readr)
library(collapsibleTree)
library(tidyverse)
library(viridis)
library(sf)
library(mapview)
library(dplyr)
library(tidycensus)
library(sp)
library(readxl)
library(tigris)
library(shinyjs)
library(rgdal)
library(broom)
#library(RColorBrewer)
#library(osmdata)
#library(purrr)
#library(osrm)
#library(rmapzen)
#library(rgdal)
#library(ggplot2)
#library(scales)
#library(nycflights13)
#install.packages("rsconnect")
#library(rsconnect)
#rsconnect::deployApp('~/git/WytheFinalDash/ShinyFinalPresentation', account = "wythecountydash")
#rsconnect::deployApp('path/to/your/app')
# source("theme.R")
# #Get Data
## SETTING UP MPI Data
# Loads in the shapefile
ZimMap <- readOGR(dsn = paste0(getwd(),"/ProvinceShapes"), layer="zwe_admbnda_adm1_zimstat_ocha_20180911")
# Loading the MPI data and combining
id <- ZimMap@data[["ADM1_EN"]]
MPIData = read.csv(file = 'ProvinceData.csv')
# Rename the district to id
colnames(MPIData)[1] <- "id"
### 'fortify' the data to get a dataframe format required by ggplot2 By Yang
ZimMap_fortified <- tidy(ZimMap, region = "ADM1_EN")
# Currently we need to manually merge the two together
datapoly <- merge(ZimMap_fortified, MPIData , by = c("id"))
# Siderbar(LEFT) ----------------------------------------------------------
sidebar <- dashboardSidebar(
sidebarMenu(
id = "tabs",
menuItem(
tabName = "overview",
text = "Project Overview",
icon = icon("globe-africa")),
menuItem(
tabName = "data",
text = "Data & Methodology",
icon = icon("database"), badgeLabel = "data", badgeColor = "green"),
menuItem(
tabName = "MPI",
text = "MPI",
icon = icon("map-marked-alt"), badgeLabel = "data", badgeColor = "green"),
menuItem(
"Poverty Index",
tabName = 'M0'
),
menuItem(
"Adjusted Poverty Gap",
tabName = "M1"),
menuItem(
"Adjusted Poverty Severity",
tabName = "M2"),
menuItem(
tabName = "team",
text = "Team",
icon = icon("user-friends"))
)
)
# Body ----------------------------------------------------------
body <- dashboardBody(
tabItems(
## Tab Introduction to Zimbabwe --------------------------------------------
tabItem(tabName = "overview",
#fluidRow(
box(
title = "Introduction to Zimbabwe",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
img(src = "Zimbabwe_Flag.png", height="100", width="200", alt="Image", style="float: left; margin: 3px 12px 3px 0px; border: 1px solid #000000;"),
h2("Country Briefing"),
br(),
br(),
p("Text"),
br(),
br(),
h2("Recent History"),
p("In the first decade of the 21st century, Zimbabwe suffered from significant hyperinflation resultant of an overall government budget deficit and a simultaneous period of monetary policy that increased the amount of money in circulation. This hyperinflation, in turn, led to economic crisis as foreign investment dropped and Zimbabwean currency eventually crashed. In 2009, Zimbabwe was dollarized in an effort to mitigate inflation. Although this move was relatively successful at stabilizing the economy, the effects of economic strife still linger throughout the country. A money metric approach to defining poverty is understandably insufficient in this case due to the extreme discrepancies between Zimbabwe’s modern currency and its antiquated currency. Additionally, variations in consumption, prices, and household income distribution can make it difficult to provide an accurate account of money metric poverty as the value of money is hardly standardized.")
),
box(
title = ("Data Science for the Public Good"),
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
h2("Potential Application of a Multidimensional Poverty Index"),
p("To address these shortcomings of typical poverty analysis, Alkire-Foster developed methodology requisite for Multidimensional Poverty Indices (MPIs). An MPI is designed to account for such discrepancies by interpreting poverty as the inability to satisfy a certain list of needs. In this way, MPIs allow for an encompassing assessment of poverty that is applicable regardless of the predictability, or lack thereof, of money. This feature is especially helpful when measuring poverty in Zimbabwe due to the recent volatility of the country’s economy. Due to the demonstrated utility and applicability of such indexes, the DSPG Zimbabwe team has been tasked with creating an MPI that will accurately measure poverty in Zimbabwe and to map the calculated values across the country’s districts. The final result will include an interactive visualization of poverty in Zimbabwe as it exists in multiple dimensions, incidences, and intensities. ")
),
box(
title = "References",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
p("Alkire, S., & Santos, M. “Measuring Acute Poverty in the Developing World: Robustness and Scope of the Multidimensional Poverty.” Index World Development 59 (2014): 251-274."),
p("Coomer, J., & Gstraunthaler. “The Hyperinflation in Zimbabwe.” Quarterly journal of Australian economics 14.3 (2011): 311-346.")
),
),
## Tab Data & Methodology--------------------------------------------
tabItem(tabName = "data",
#fluidRow(
box(
title = "Data",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
tabBox(
title = NULL, width = 16, height = "auto",
tabPanel("PICES Overview",
img(src = "zimstat_logo.png", height="100", width="200", alt="Image", style="float: left; margin: 3px 12px 3px 0px; border: 1px solid #000000;"),
p("To gather data necessary for MPI construction, the DSPG team utilized the 2017 Poverty, Income, Consumption and Expenditure Survey (PICES) administered by the Zimbabwe National Statistics Agency (ZimStat). The country-wide survey is conducted every five years as a means of collecting comprehensive information regarding demographics, overall standards of living, poverty levels, and disparities between socio-economic groups. This data is categorized along individual, household, district, and country levels. The PICES Questionnaire is comprised of various modules that collect respondent input. The 2017 iteration included modules focused on population and household characteristics, household economy, household incomes, agriculture output and input, informal sector activities, international migration, and disability. These modules, completed by survey respondents in the civilian sector, provide insight on the general state of the population and will be used by our team to understand specific aspects of poverty in Zimbabwe."),
h3("References:"),
p("Zimbabwe National Statistics Agency. “Poverty, Income, Consumption and Expenditure Survey 2017 Report.” (2018): 1-160.")),
tabPanel("Sampling"),
tabPanel("Weights"))
),
#Methodology box
box(
title = "Methodology Overview",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
tabBox(
title = NULL, width = 16, height = "auto",
tabPanel("Multidimensional Poverty Index",
h2("MPI Overview"),
p("Work produced by the DSPG team will emulate that of Stoeffler, et al. which constructed an MPI and utilized 2001, 2007, and 2011-2012 PICES data to track Zimbabwean poverty over time in a 2015. Following their lead, our MPI will consist of eight dimensions and fourteen variables that indicate a populations status in said dimension. Each dimension and variable is weighted on the grounds of how impactful it is to the wellbeing of either the individual or the household.
The relevant dimensions, their respective variables, and the designated weights of the MPI can be found under the ‘Tables’ tab below:
")),
tabPanel("Measurements",
h2("Measurements"),
p("A deprivation count, k, falling between k = 1 and k = 4 is used as a threshold for determining poverty. Those who exceed or equal threshold k will be considered poor while those who do not exceed or equal threshold k will be considered not poor and thus will not be included. To achieve this result, an Alkire-Foster ‘counting approach’ will be employed and poverty will be assigned only to those whose weighted sum deprivation count is greater than k. This creates a double cut-off in which only those who are deprived in multiple dimensions are considered and those who are deprived in a single dimension are non-factors. Once these impoverished populations are determined, three main measurements can be calculated."),
h3("M0"),
p("M0 is the primary method for determining a population’s poverty. It is calculated by multiplying the Headcount Ratio, H, with the Average Deprivation Share. The Headcount Ratio is the number of people who are considered poor based on a threshold, k. The Average Deprivation Share is the number of actual deprivations collected from each dimension divided by the number of potential deprivations that the state could have. In our case, this is the sum of the weights of each dimension multiplied by the population of the country."),
h3("M1"),
p("M1 measures what is called the Adjusted Poverty Gap. This measure is obtained by taking the average of the gaps of poor individuals below some poverty line, k. If the individual is above this threshold, k, their poverty gap is zero. Otherwise, it is always positive. This ensures that the needs of poor people are not skewed by wealthier counterparts."),
h3("M2"),
p("M2 is a measure of the Adjusted Poverty Severity. This measure is obtained by taking the average of the square of the gaps of poor individuals below some poverty line, k. It is very similar to M1, the only difference is that the poverty gap is squared. The quadratic nature of this measurement helps to give more weight to the people who are significantly below the poverty line as opposed to those who fall just beneath it."),
p("The formulas needed to calculate these measures are indicated in the 'Formulas' tab below:")),
tabPanel("Formulas",
img(src = "zimbabwe_formulas.png", height="700", width="500", alt="Image", style="float: middle; margin: 10px 10px 125px 125px; border: 5px solid #000000;"),
p("Source: Source: Stoeffler, Quentin, et al. “Multidimensional poverty in crisis: Lessons from Zimbabwe.” The journal of development studies 52.3 (2016): 428-446.")),
tabPanel("Variables and Dimensions",
img(src = "zimbabwe_var_dim.png", height="700", width="500", alt="Image", style="float: middle; margin: 10px 10px 125px 125px; border: 5px solid #000000;"),
p("Source: Source: Stoeffler, Quentin, et al. “Multidimensional poverty in crisis: Lessons from Zimbabwe.” The journal of development studies 52.3 (2016): 428-446."))
)),
####Methodology references
box(
title = "References",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
p("Stoeffler, Quentin, et al. “Multidimensional poverty in crisis: Lessons from Zimbabwe.” The journal of development studies 52.3 (2016): 428-446.")
),
),
## Tab 3 MPI--------------------------------------------
tabItem(tabName = "MPI",
fluidRow(
h2("MPI"),
p("To understand the full suite of amenities available to HGBs in Wythe,
we used publicly available demographic and infrastructure data to provide
an overview of the built capital amenities in Wythe."),
p("In many respects, Wythe County is uniquely endowed with built amenities
attractive to businesses (William and Lamb, 2010). It is situated at the
intersection of two major interstates, and it is within a six-to-eight-hour
drive of most of the population in the United States. As the map shows,
it also has easy access to rail and other supporting infrastructure (e.g., powerplants)
for commerce and manufacturing. From an “access to major markets” perspective,
Wythe is an attractive location for both light and heavy industry."),
box(
title = "Loudoun County Programs/Services",
closable = FALSE,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
width = NULL,
#enable_dropdown = TRUE,
#dropdown_icon = "",
#dropdown_menu = tagList(selectInput("var","Select a Variable",choices = c("Level of Education","Industry","Home Values","Household Income","Household Size"))),
#leafletOutput("wythe_infrastructure"),
tabBox(
title = NULL , width = 16,
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1", height = "350px",
tabPanel("All",
sidebarLayout(
sidebarPanel(
selectInput("pillar_variable", "Pillar Variable:",
c("Education", "Employment", "Housing","Insurance","Transportation","Policy and Funding","All")),
### Can add more inputs????
# selectInput("time_variable", "Time Variable:",
# c("60 minutes" = "60",
# "45 minutes" = "45",
# "30 minutes" = "30"))
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("label_1"),
leafletOutput("mapplot_1"),
#mapview:::plainViewOutput("test")
)
)
),
tabPanel("Juvenile",
sidebarPanel(
selectInput("pillar_variable", "Pillar Variable:",
c("Education", "Employment", "Housing","Insurance","Transportation","Policy and Funding","All"))
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("label_2"),
leafletOutput("mapplot_2")
#mapview:::plainViewOutput("test")
)
),
tabPanel("Foster Care",
sidebarPanel(
selectInput("pillar_variable", "Pillar Variable:",
c("Education", "Employment", "Housing","Insurance","Transportation","Policy and Funding","All"))
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("label_3"),
leafletOutput("mapplot_3")
#mapview:::plainViewOutput("test")
)
)
)
),
)),
## Tab 8 Team--------------------------------------------
tabItem(tabName = "team",
fluidRow(
box(
title = "Team",
closable = FALSE,
width = NULL,
status = "warning",
solidHeader = TRUE,
collapsible = TRUE,
h2("Data Science for the Public Good Program"),
p("The Data Science for the Public Good (DSPG) Young Scholars program is a summer immersive program held at the Biocomplexity Institute’s Social and Decision Analytics Division (SDAD). In its seventh year, the program engages students from across the country to work together on projects that address state, federal, and local government challenges around critical social issues relevant in the world today. DSPG young scholars conduct research at the intersection of statistics, computation, and the social sciences to determine how information generated within every community can be leveraged to improve quality of life and inform public policy. For more information on program highlights, how to apply, and our annual symposium, please visit the official Biocomplexity DSPG website."),
h2("2021 Loudoun County Summer Project"),
p("Our project goal was to identify industries and the code that are expected to grow rapidly in the future. We visualized these code by the skills, education, experience and training needed to do them. We then created measures to visualize and assess the ability of Wythe County and the surrounding region to respond to training the workers of tomorrow. Our team is comprised of talented individuals with a broad range of skills and experience."),
h2("DSPG Team Members"),
img(src = 'Josh.Beverly.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Dylan.Glover.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Afrina.Tabassum.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Adam.Wells.VT.jpg', height = "150", width = "140", align = "center"),
br(),
br(),
p("Yang Cheng, Fellow (Ph.D. Student at Virginia Tech, )"),
p("JaiDa Robinson, Fellow (Ph.D. Student at Virginia State University, )"),
p("Julie Rebstock, Intern (Undergraduate Student at Virginia Tech, Computational Modeling and Data Anaylytics and Economics)"),
p("Austin Burcham, Intern (Undergraduate. Student at Virginia Tech, )"),
p("Kyle Jacobs, Intern (Undergraduate Student at Virginia State University,)"),
h2("Virginia Tech Faculty Team Members"),
img(src = 'Susan.Chen.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Conaway.Haskins.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Matt.Holt.VT.jpg', height = "150", width = "140", align = "center"),
img(src = 'Ford.Ramsey.VT.jpg', height = "150", width = "140", align = "center"),
br(),
br(),
p("Susan Chen (Associate Professor, Food and Health Economics, DSPG Project Co-Lead)"),
p("Chanita Homles ()"),
p("Isabel Bradburn ()"),
h2("Project Sponsors"),
img(src = 'VCE.Logo.png', height = "150", width = "200", align = "center", style="display: block; margin-left: auto; margin-right: auto;"),
p("Matthew Miller (Unit Coordinator and Extension Agent, Agriculture and Natural Resources - Farm Business Management)"),
h2("Acknowledgements"),
p("We would like to thank:"),
p(" (),"),
p(" ()"),
p(" ()")
)
)),
tabItem(
tabName = "M0",
# Everything has to be put in a row or column
fluidPage(
# Change the theme
theme = shinytheme('superhero'),
# Make a box with a plot inside of it
box(
title = "Multidimensional Poverty Index (By Province)",
plotOutput("M0_plot", height = 300, width = 400),
width = 12
),
box(
title = 'Deprivation Cutoff',
sliderInput("slider0", "K-Threshold Value", 1, 9, 3),
width = 12))),
tabItem(
tabName = "M1",
fluidPage(
box(
title = "Adjusted Poverty Gap (By Province)",
plotOutput("M1_plot", height = 300, width = 440),
width = 12
),
box(
title = "Deprivation Cutoff",
sliderInput("slider1", "K-Threshold Value", 1, 9, 3),
width = 12
)
)
),
tabItem(
tabName = "M2",
fluidPage(
box(
title = "Adjusted Poverty Severity (By Province)",
plotOutput("M2_plot", height = 300, width = 480),
width = 12
),
box(
title = "Deprivation Cutoff",
sliderInput("slider2", "K-Threshold Value", 1, 9, 3),
width = 12
)
)
)
)
)
# UI--------------------------
ui <- dashboardPage(
skin = 'midnight',
dashboardHeader(title = "Zimbabwe(Draft)"),
sidebar = sidebar,
body = body)
# Server------------------------------------------
server <- function(input, output, session) {
output$myplot <- renderPlot({
gg <- ggplot(data = mtcars, aes(x = mpg, y = disp)) +
geom_point()
idx <- input$mytable_rows_selected
if (!is.null(idx))
gg + geom_point(size = 5, data = mtcars %>% slice(idx))
else gg
})
output$mytable <- DT::renderDT({
mtcars
})
output$M0_plot <- renderPlot({
ggplot(datapoly, aes(x=long, y=lat, group = group)) +
geom_polygon(aes(fill = switch(input$slider0,
M0_k1,
M0_k2,
M0_k3,
M0_k4,
M0_k5,
M0_k6,
M0_k7,
M0_k8,
M0_k9), group = id)) +
scale_fill_gradient(low='grey', high = 'maroon') +
labs(fill = "Poverty Index")
})
output$M1_plot <- renderPlot({
ggplot(datapoly, aes(x=long, y=lat, group = group)) +
geom_polygon(aes(fill = switch(input$slider1,
M1_k1,
M1_k2,
M1_k3,
M1_k4,
M1_k5,
M1_k6,
M1_k7,
M1_k8,
M1_k9), group = id)) +
scale_fill_gradient(low='grey', high = 'maroon') +
labs(fill = "Adj. Poverty Gap")
})
output$M2_plot <- renderPlot({
ggplot(datapoly, aes(x=long, y=lat, group = group)) +
geom_polygon(aes(fill = switch(input$slider2,
M2_k1,
M2_k2,
M2_k3,
M2_k4,
M2_k5,
M2_k6,
M2_k7,
M2_k8,
M2_k9), group = id)) +
scale_fill_gradient(low='grey', high = 'maroon') +
labs(fill = "Adj. Poverty Severity")
})
}
# Shiny App------------------------
shinyApp(ui = ui, server = server)
|
3b3bdeb080c0b585ebd745a59078f397092fcca1
|
cfdf91f84e6d70e4c26f41e335a78cb553dd8e3e
|
/Desktop/binomial/man/bin_distribution.Rd
|
d48d3cab731bc19984355e83a3e68e252973a6c4
|
[] |
no_license
|
stat133-sp19/hw-stat133-eunicesou-1
|
e2747697708fabad9f18118a7d180bf6394df32d
|
bf5649a29b29a53bf943c4aebe5662fc23671ab1
|
refs/heads/master
| 2020-04-28T19:53:11.229228
| 2019-05-02T22:07:02
| 2019-05-02T22:07:02
| 175,525,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 608
|
rd
|
bin_distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomal.R
\name{bin_distribution}
\alias{bin_distribution}
\title{Binomial distribution Function}
\usage{
bin_distribution(trials, prob)
}
\arguments{
\item{trials}{Total number of trials}
\item{prob}{Probability of success}
}
\value{
A dataframe representing the binomial distribution with two classes ("bindis" and "data.frame)
}
\description{
Creates a binomial distribution data frame using the binomial probability function with successes and probabilities as columns
}
\examples{
bin_distribution(trials = 5, prob = 0.5)
}
|
cff2711635a55e4733d779ee1a39980ad31041bd
|
6393059185fa3456b16d4e69193828686bcc3a22
|
/Code/step1_dictionary/step1_dictionary_PITF.R
|
5f431c3e46ba44ab20e4ba8f627fd53442ddb244
|
[] |
no_license
|
zhukovyuri/xSub_ReplicationCode
|
49c6f092a8dfdd44762e493ed4155dedcc37c371
|
a8c83d60ed1076d5a22f4d3fb3bc28fbbbcd0a81
|
refs/heads/master
| 2021-05-03T10:21:31.921202
| 2019-06-19T00:33:06
| 2019-06-19T00:33:06
| 120,532,879
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,956
|
r
|
step1_dictionary_PITF.R
|
rm(list=ls())
## Set directory
setwd("~/Dropbox2/Dropbox (Zhukov research team)/XSub/Data/")
# setwd("C:/Users/nadiya/Dropbox (Zhukov research team)/XSub/Data")
## Install & load packages (all at once)
list.of.packages <- c("gdata","countrycode","maptools","foreign","plotrix","sp","raster","rgeos","gdata")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]; if(length(new.packages)){install.packages(new.packages,dependencies=TRUE)}
lapply(list.of.packages, require, character.only = TRUE)
## Load custom functions
source("Code/functions.R")
#############################
## Clean up data
#############################
data1 <- read.csv("Input/Events/PITF/pitf_1995_2012.csv")
data1$X <- data1$X.1 <- NULL
data2 <- read.csv("Input/Events/PITF/pitf_2013_2015.csv")
data2$X <- data2$X.1 <- NULL
data3 <- read.csv("Input/Events/PITF/pitf.world.20160101-20170930.csv")
data3$X <- data3$X.1 <- NULL
# Make columns consistent
names(data1)[!names(data1)%in%names(data2)]
names(data2)[!names(data2)%in%names(data1)]
# names(data2)[!names(data2)%in%names(data1)] <- names(data1)[!names(data1)%in%names(data2)]
names(data1)[!names(data1)%in%names(data3)]
names(data3)[!names(data3)%in%names(data1)]
names(data3)[!names(data3)%in%names(data1)] <- names(data1)[!names(data1)%in%names(data3)]
names(data2)[!names(data2)%in%names(data3)]
names(data3)[!names(data3)%in%names(data2)]
# Fix column formats
lvarz <- names(data1)[grepl("LAT_|LONG_",names(data1))&(!grepl("Direction$",names(data1)))]
for(j in seq_along(lvarz)){
data1[,lvarz[j]] <- as.numeric(as.character(data1[,lvarz[j]]))
data2[,lvarz[j]] <- as.numeric(as.character(data2[,lvarz[j]]))
data3[,lvarz[j]] <- as.numeric(as.character(data3[,lvarz[j]]))
}
classez<-c();for(j in 1:ncol(data1)){classez[j]<-class(data1[,j]);if(classez[j]=="factor"){data1[,j]<-as.character(data1[,j])}}
classez<-c();for(j in 1:ncol(data2)){classez[j]<-class(data2[,j]);if(classez[j]=="factor"){data2[,j]<-as.character(data2[,j])}}
classez<-c();for(j in 1:ncol(data3)){classez[j]<-class(data3[,j]);if(classez[j]=="factor"){data3[,j]<-as.character(data3[,j])}}
# Merge
commonvars <- intersect(names(data1),names(data2))
commonvars <- intersect(commonvars,names(data3))
data <- rbind(data1[,commonvars],data2[,commonvars],data3[,commonvars])
head(data); rm(data1,data2,data3,commonvars)
# Country codes
countrylist <- sort(unique(as.character(data$country)))
countrylist <- data.frame(country=countrylist,iso3=countrycode(countrylist,origin="iso3c",destination="iso3c"))
for(j in 1:2){countrylist[,j]<-as.character(countrylist[,j])}
countrylist[is.na(countrylist$iso3),]
countrylist[countrylist$country=="ALG","iso3"] <- countrycode("Algeria","country.name","iso3c")
countrylist[countrylist$country=="BRZ","iso3"] <- countrycode("Brazil","country.name","iso3c")
countrylist[countrylist$country=="CAR","iso3"] <- countrycode("Central African Republic","country.name","iso3c")
countrylist[countrylist$country=="ELS","iso3"] <- countrycode("El Salvador","country.name","iso3c")
countrylist[countrylist$country=="GZS","iso3"] <- countrycode("Gaza","country.name","iso3c")
countrylist[countrylist$country=="\nIRQ","iso3"] <- countrycode("Iraq","country.name","iso3c")
countrylist[countrylist$country=="IRQ ","iso3"] <- countrycode("Iraq","country.name","iso3c")
countrylist[countrylist$country=="NGR","iso3"] <- countrycode("Nigeria","country.name","iso3c")
countrylist[countrylist$country=="SOM ","iso3"] <- countrycode("Somalia","country.name","iso3c")
countrylist[countrylist$country=="SUD","iso3"] <- countrycode("Sudan","country.name","iso3c")
countrylist[countrylist$country=="SYR ","iso3"] <- countrycode("Syria","country.name","iso3c")
countrylist[countrylist$country=="THL","iso3"] <- countrycode("Thailand","country.name","iso3c")
countrylist[countrylist$country=="\nYEM","iso3"] <- countrycode("Yemen","country.name","iso3c")
pitf.raw <- merge(data,countrylist,by="country",all.x=T,all.y=T)
pitf.raw <- pitf.raw[!is.na(pitf.raw$iso3),]
# save(pitf.raw,file="Input/Events/PITF/pitf_1995_2017.RData")
#############################
## Create actor dictionary
#############################
countrylist <- countrylist[!is.na(countrylist$iso3),]
dir("Dictionaries/PITF/PITF_1995_2012/")
dir("Dictionaries/PITF/")
countrylist <- countrylist[countrylist$iso3%in%countrylist$iso3[!countrylist$iso3%in%gsub("PITF_|_Actors.RData","",dir("Dictionaries/PITF"))],]
# Loop by country (first line finds k where you left off)
k0 <- max(which(countrylist$iso3%in%gsub("PITF_|_Actors.RData","",dir("Dictionaries/PITF/"))))+1
k0
for(k in 1:nrow(countrylist)){
subdata <- pitf.raw[pitf.raw$country%in%c(countrylist[k,"country"]),]
print(countrylist[k,"country"])
actorlist <- actorList(subdata,sidea="Perp.State.Role",sideb="Perp.State.Role",timevar="year",countryvar="country")
save(actorlist,file=paste0("Dictionaries/PITF/PITF_",countrylist[k,"iso3"],"_Actors.RData"))
}
|
6d040f4e1759d807ba1eda44281cad51799dc30b
|
7715230204185f9f0c8d7f1fe2d480c1e7056779
|
/Scripts/Personal Stuff/Lyse van Wijk/RYouReady/Scripts/RYouReady_visualization.R
|
5c4cb543b7423a5ce223ba20b9363cf293feedd8
|
[] |
no_license
|
lysevanwijk/RCourseHWStudyGroup
|
c8900133bb3f5518ab24bf582f935d2fc5e1aa0f
|
5af6054e405805503bcfaf9cd723c84349393e39
|
refs/heads/main
| 2023-08-30T18:11:15.073722
| 2021-10-20T09:08:48
| 2021-10-20T09:08:48
| 323,101,186
| 0
| 0
| null | 2020-12-20T15:17:53
| 2020-12-20T15:17:53
| null |
UTF-8
|
R
| false
| false
| 4,551
|
r
|
RYouReady_visualization.R
|
#clear global environment
rm(list=ls())
#load packages
library(tidyverse)
library(here)
library(janitor)
library(janeaustenr)
library(ggbeeswarm)
library(RColorBrewer)
#plotten
Example_LongFormat %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1))+
geom_point()
Example_LongFormat %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1))+
geom_jitter()
Example_LongFormat %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1))+
geom_quasirandom()
#plotten met kleur en opmaak
Example_LongFormat %>%
na.omit()%>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1)) +
geom_point()
Example_LongFormat %>%
na.omit()%>%
ggplot(aes(x=rounddescription, y=vasPijnGemiddeld_1,colour = Geslacht)) +
geom_jitter() +
coord_flip()
#using facet wrap
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1)) +
geom_jitter() +
facet_wrap(~rounddescription)
#kleur voor geslacht toevoegen
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1, colour = Geslacht)) +
geom_jitter() +
facet_wrap(~rounddescription)
#%>% the plot
Example_LongFormat %>%
na.omit() %>%
filter(vasPijnGemiddeld_1 <100) %>%
filter(vasPijnGemiddeld_1 > 0 ) %>%
ggplot(aes(x=Geslacht, y= vasPijnGemiddeld_1, colour = Geslacht)) +
geom_jitter()+
facet_wrap(~rounddescription)
#plot opslaan -----
ggsave("plotgeslachtpijn.png")
#boxes and violins -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1, colour = Geslacht)) +
geom_boxplot ()
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1, colour = Geslacht)) +
geom_violin()
#facet wrap en inkleuren violin
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1, colour = Geslacht, fill = Geslacht)) +
geom_violin()+
facet_wrap(~rounddescription)
#histogram -----
hist(Example_LongFormat$vasPijnGemiddeld_1)
Example_LongFormat %>%
na.omit()%>%
filter(vasPijnGemiddeld_1 <100) %>%
ggplot(aes(x=vasPijnGemiddeld_1, colour = Geslacht, fill = Geslacht)) +
geom_histogram(binwidth = 5)
#combination plot -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1)) +
geom_boxplot ()+
geom_point(aes(colour = rounddescription))+
coord_flip()
#bar ad column plots -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht))+
geom_bar()
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=Geslacht, y=vasPijnGemiddeld_1))+
geom_col()
#errorbar toevoegen-----
Example_LongFormat%>%
na.omit()%>%
group_by(Geslacht) %>%
summarise(mean=mean(vasPijnGemiddeld_1),
sd = sd(vasPijnGemiddeld_1),
n = n(),
sterr = sd/sqrt(n)) %>%
ggplot(aes(x = Geslacht, y=mean)) +
geom_col()+
geom_errorbar(aes(x=Geslacht, ymin=mean-sterr, ymax=mean+sterr, colour = Geslacht))
#scatter plots -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = Geslacht)) +
geom_point() +
geom_smooth()
# how do I change x in plots -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = Geslacht)) +
geom_point() +
geom_smooth() +
theme_classic()
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = Geslacht)) +
geom_point() +
geom_smooth()+
theme_minimal()
#als je een thema standaard wilt instellen, plaats je bovenaan je script het volgende:
# theme_set(theme_classic())
#kleuren aanpassen
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = vasPijnGemiddeld_1)) +
geom_point() +
geom_smooth() +
theme_classic() +
scale_colour_gradient (low= "blue", high="red")
#use a pallete for setting color
display.brewer.all()
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = vasPijnGemiddeld_1)) +
geom_point() +
geom_smooth() +
theme_classic() +
scale_colour_distiller(palette = "Pastel2")
#add titles and change axis labels -----
Example_LongFormat %>%
na.omit() %>%
ggplot(aes(x=vasPijnGemiddeld_1, y=vasFunctie_1, color = vasPijnGemiddeld_1)) +
geom_point() +
geom_smooth() +
theme_classic() +
scale_colour_distiller(palette = "Pastel2") +
labs(title = "Relationship between pain and handfunction",
subtitle = "Shown for men and women",
caption = "Data from HWSG",
x = "VAS function 0-100",
y = "VAS pain 0-100")
|
17bb6d0c99ad298ddb27c857298cba4aceae3929
|
992b81fbb548f19eae4af8be17a37ba3d4cdfb8a
|
/lab03.R
|
6fedfc147f72382488e1bd6f7955e30888df359b
|
[] |
no_license
|
OsamaNadeeem/lab03
|
39da5c0a6c7e8fa7646bf7ab40d877a23c23d742
|
3be0e7c647f7bbc78ef388ede148c51f93d498b2
|
refs/heads/master
| 2021-07-07T19:49:50.020055
| 2017-09-29T12:08:38
| 2017-09-29T12:08:38
| 105,267,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
r
|
lab03.R
|
barplot(col=rainbow(1), mtcars$mpg, ylab="MPG", mtcars$hp,
xlab="Horse Power", main="Miles/h Vs Horse Power",
names.arg=mtcars$hp, las=2,cex.names=0.8)
barplot(mtcars$qs,ylab="Qsec",xlab="EngineCC",col=rainbow(1),
names.arg=mtcars$disp,main="Engine Vs Qsec",las=2)
barplot(mtcars$gear,ylab="Gears",xlab="Transmission",col=rainbow(1),
names.arg=mtcars$am,main="Transmission Vs Gears",las=2)
barplot(mtcars$wt,ylab="Weight",xlab="Horse Power",col=rainbow(1),
names.arg=mtcars$hp,main="Weight Vs Horse",las=2)
|
11f814613fc54c2241b1a5c5010fcb00f7e95ea5
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/reliability/round_prob.R
|
ec90bd09a289f140aa19b2a8bcb4eb724630f758
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
round_prob.R
|
#
# round_prob.R, 23 Apr 18
# Data from:
# Example
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG example round-number number_round
source("ESEUR_config.r")
pal_col=rainbow(4)
# Formula from:
# Very, Many, Small, {Penguins}: {Vaguely} Related Topics
# Harald A. Bastiaanse
speaker_rounded=function(k)
{
return(k/(k+(1/x)-1))
}
x=seq(0.05, 0.9, by=0.05)
plot(x, speaker_rounded(2), type="l", col=pal_col[4],
ylim=c(0.1, 1.0),
xlab="Estimate that speaker rounded", ylab="Probability speaker actually rounded\n")
lines(x, speaker_rounded(4), col=pal_col[3])
lines(x, speaker_rounded(6), col=pal_col[2])
lines(x, speaker_rounded(8), col=pal_col[1])
lines(c(0, 1), c(0.5, 0.5), col="grey")
legend(x="bottomright", legend=c("k=8", "k=6", "k=4", "k=2"), bty="n", fill=pal_col, cex=1.2)
|
f2804e3e7de8e94588b61a10bd8caf95376162a9
|
7d43fe7a590ee1e03cc8629a1086603b20b3733e
|
/forecasting/03 - exponential.smoothing/06 - ets.R
|
be08dce16c3d5cd3ce94b66bcb0b0b8065625aea
|
[] |
no_license
|
aliawaischeema/DataCamp
|
5a0f27b26fe26f8f171fa3e4a2f865841eb82304
|
9f86505b46637fb299b7b2e9391c607ee0d1c285
|
refs/heads/master
| 2023-03-17T15:30:29.538126
| 2019-08-26T20:48:33
| 2019-08-26T20:48:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
06 - ets.R
|
# Fit ETS model to austa in fitaus
fitaus <- ets(austa)
# Check residuals
checkresiduals(fitaus)
# Plot forecasts
autoplot(forecast(fitaus))
# Repeat for hyndsight data in fiths
fiths <- ets(hyndsight)
checkresiduals(fiths)
autoplot(forecast(fiths))
# Which model(s) fails test? (TRUE or FALSE)
fitausfail <- FALSE
fithsfail <- TRUE
# Function to return ETS forecasts
fets <- function(y, h) {
forecast(ets(y), h = h)
}
# Apply tsCV() for both methods
e1 <- tsCV(cement, fets, h = 4)
e2 <- tsCV(cement, snaive, h = 4)
# Compute MSE of resulting errors (watch out for missing values)
mean(e1 ^ 2, na.rm = TRUE)
mean(e2 ^ 2, na.rm = TRUE)
# Copy the best forecast MSE
bestmse <- 0.02910046
|
dbfe94fa60257eeed51d0082b06aae6739697e18
|
e5cff5c16e1ddbcd759da049b2614cdb5123cb1e
|
/Exploratory Data Analysis/week4/plot6.R
|
7a1940f32a9830b58a2be2233447e997e0032a67
|
[] |
no_license
|
kronik/datasciencecoursera
|
88afe7f2a9181237ea9774fbdf383529373be6d6
|
91e53c671e634f814c22b28b99008922afc76605
|
refs/heads/master
| 2021-01-21T13:49:36.792143
| 2016-05-22T09:26:42
| 2016-05-22T09:26:42
| 51,052,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 710
|
r
|
plot6.R
|
library(dplyr)
library(ggplot2)
#Set new working directory
setwd("~/work/datasciencecoursera/Exploratory Data Analysis/week4/")
NEI <- readRDS("summarySCC_PM25.rds")
cities <- NEI[(NEI$fips == "06037" | NEI$fips == "24510") & NEI$type=="ON-ROAD",]
s <- summarise(group_by(cities, fips, year), Emissions = sum(Emissions))
png("plot6.png", width=480, height=480)
g <- ggplot(s, aes(factor(year), Emissions))
g <- g + facet_grid(. ~ fips)
g <- g + geom_bar(stat="identity") + xlab("year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions from motor vehicle (type=ON-ROAD) in Baltimore City, MD (fips = "24510") vs Los Angeles, CA (fips = "06037") 1999-2008')
print(g)
dev.off()
|
57925b86db36231da6506db68d82823d7f8d6a1c
|
8ec42d6b5fd354f587bd666e5bae8f6e755af7fb
|
/basic/rep_repint.R
|
f4d3bd0610264f90e91cc48734ae934e94e7cb1c
|
[] |
no_license
|
hihumi/Learn-R
|
55c1451b67e53fa2fdbbdbf25b917d43ad0cd9d1
|
040c75f1ef6d2cf48c2ee02d34d5b6eb7f93bc3f
|
refs/heads/master
| 2021-01-10T10:41:53.651518
| 2016-10-19T06:02:00
| 2016-10-19T06:02:00
| 52,103,830
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
rep_repint.R
|
# 要素を繰り返す数列
# rep(), rep.int()
# rep()
x1 <- c(1, 5, 10)
rep(x1, 10)
typeof(rep(x1, length = 5))
class(rep(x1, length = 5))
# length引数で長さを指定
rep(x1, length = 5)
# each引数でx1の各要素の繰り返しを指定
rep(x1, each = 5)
# times引数でx1全体の繰り返しを指定
rep(x1, times = 5)
# length.out引数はlengthと同様の結果
rep(x1, length.out = 5)
# 上記の組み合わせ例
rep(x1, length = 20, each = 3)
rep(x1, times = 20, each = 3)
rep(x1, length.out = 20, each = 3)
# c()で指定することができる
# 以下は、c(1, 5, 10)をc(3, 3, 3)で各3回の繰り返し
rep(c(1, 5, 10), c(3, 3, 3))
# rep.int()
# rep()と同様の結果となるが、内部処理が効率的
rep.int(x1, 10)
typeof(rep.int(x1, 10))
class(rep.int(x1, 10))
# 引数は、timesだけが使用可
rep.int(x1, times = 10)
rep.int(x1, times = 20)
|
90e944b2466d507446866c8c9483f7a459318b26
|
06402333efefe2b1fd6a85eee57ece90e2374afe
|
/02_download_data.R
|
f69da04a5eb99fa025c2166c90a5f0e4b2362b44
|
[] |
no_license
|
chuckwong13/MISCADA_CoreIIA_Classification
|
6d36c19dfc5126415b6e84b1200e81c69e1fd359
|
bad63f4463d886cbaec36fa9253434d551c7c8f0
|
refs/heads/master
| 2021-03-12T02:47:34.798831
| 2020-03-20T11:30:29
| 2020-03-20T11:30:29
| 246,583,241
| 0
| 0
| null | 2020-03-11T13:47:07
| 2020-03-11T13:47:06
| null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
02_download_data.R
|
# Write code here to download the data you are using for your report.
# DO NOT push the data to your Github repository.
# For example, to download the simple Iris data from the UCI Machine Learning
# Repository
# uci.wine <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data",header = FALSE)
# uci.adult <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", header = FALSE, na.strings = "?")
uci.adult <- read.table(url("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"),
sep = ",",
header = FALSE,
na.strings = " ?")
library("dplyr")
library("forcats")
# Apply the names as defined on the website https://archive.ics.uci.edu/ml/datasets/Wine
# and update the class labels to be shorter
uci.adult <- uci.adult %>%
transmute(age = V1,
workclass = V2,
fnlwgt =V3,
education = V4,
education_num = V5,
marital_status = V6,
occupation = V7,
relationship = V8,
race = V9,
sex = V10,
capital_gain = V11,
capital_loss = V12,
hours_per_week = V13,
native_country = V14,
annual_income = V15)
# Save into Data directory which is not pushed to Github
# saveRDS(uci.wine, "Data/uci_wine.rds")
saveRDS(uci.adult, "Data/uci_adult.rds")
|
dce729cc9a6ef8fc18560f03a360602e18b9a5c3
|
445e123499826e42b0770384ba3a1fe9086f6875
|
/R/makeTranscriptDbFromBiomart.R
|
9f772de1cf987fe923c735dcdc46f3b79a3ead92
|
[] |
no_license
|
genome-vendor/r-bioc-genomicfeatures
|
aa5d2f9b0c7e3844546de95aaeddac3ffa5cb269
|
a9ffc2fb3682122dc28939aba0403364092e27b1
|
refs/heads/master
| 2016-09-06T01:33:31.530482
| 2013-02-14T17:34:25
| 2013-02-14T18:03:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,143
|
r
|
makeTranscriptDbFromBiomart.R
|
### =========================================================================
### makeTranscriptDbFromBiomart()
### -------------------------------------------------------------------------
###
### For people who want to tap BioMart.
### Typical use:
### txdb <- makeTranscriptDbFromBiomart(biomart="ensembl",
### dataset="hsapiens_gene_ensembl")
### Speed:
### - for biomart="ensembl" and dataset="hsapiens_gene_ensembl":
### (1) download takes about 8 min.
### (2) db creation takes about 60-65 sec.
###
## helper to extract the Genus and species name from the dataset string.
.extractSpeciesFromDatasetDesc <- function(description){
vals <- unlist(strsplit(description, " "))
paste(vals[[1]], vals[[2]])
}
.getBiomartDbVersion <- function(biomart, host, port)
{
marts <- listMarts(mart=biomart, host=host, port=port)
mart_rowidx <- which(as.character(marts$biomart) == biomart)
## This should never happen.
if (length(mart_rowidx) != 1L)
stop("found 0 or more than 1 \"", biomart, "\" BioMart database")
as.character(marts$version)[mart_rowidx]
}
.extractEnsemblReleaseFromDbVersion <- function(db_version)
sub("^ENSEMBL GENES ([^[:space:]]+) \\(SANGER UK\\)", "\\1", db_version)
### Groups of BioMart attributes:
### - A1, A2 and G are required attributes;
### - B, C and D are optional attributes: C is required for inferring the
### CDS (they cannot be inferred from D). Therefore, if C is missing,
### the TranscriptDb object can still be made but won't have any CDS (no
### row in the cds table). D is only used for sanity check.
.A1_ATTRIBS <- c("ensembl_transcript_id",
"chromosome_name",
"strand",
"transcript_start",
"transcript_end")
.A2_ATTRIBS <- c("ensembl_transcript_id",
"strand",
"rank",
"exon_chrom_start",
"exon_chrom_end")
.B_ATTRIB <- "ensembl_exon_id"
.C_ATTRIBS <- c("5_utr_start",
"5_utr_end",
"3_utr_start",
"3_utr_end")
.D_ATTRIBS <- c("cds_start",
"cds_end",
"cds_length")
.G_ATTRIB <- "ensembl_gene_id"
### 'attribs' can be either a Mart object or a 2-col data frame as returned by
### 'listAttributes()'.
.getDatasetAttrGroups <- function(attribs)
{
if (is(attribs, "Mart"))
attribs <- listAttributes(attribs)
else if (!is.data.frame(attribs) ||
!identical(colnames(attribs), c("name", "description")))
stop("invalid 'attribs' object")
attrgroups <- "none"
## Group A: Required attributes.
attrA <- unique(c(.A1_ATTRIBS, .A2_ATTRIBS))
if (all(attrA %in% attribs$name))
attrgroups <- "A"
## Groups B, C and D are optional attributes.
## C is required for inferring the CDS (they cannot be inferred from D).
## Therefore, if C is missing, the TranscriptDb object can still be made
## but won't have any CDS (no row in the cds table).
if (.B_ATTRIB %in% attribs$name)
attrgroups <- paste0(attrgroups, "B")
if (all(.C_ATTRIBS %in% attribs$name))
attrgroups <- paste0(attrgroups, "C")
if (all(.D_ATTRIBS %in% attribs$name))
attrgroups <- paste0(attrgroups, "D")
## Group G: Required attribute.
if (.G_ATTRIB %in% attribs$name)
attrgroups <- paste0(attrgroups, "G")
attrgroups
}
### 'attrlist' can be a list (as returned by getMartAttribList()), a Mart
### object, or the name of a Mart service (single string).
### Typical use:
### ensembl_attrgroups <-
### GenomicFeatures:::.getAllDatasetAttrGroups("ensembl")
.getAllDatasetAttrGroups <- function(attrlist)
{
if (!is.list(attrlist))
attrlist <- getMartAttribList(attrlist)
sapply(attrlist, .getDatasetAttrGroups)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'transcripts' data frame.
###
.makeBiomartTranscripts <- function(filters, values, mart, transcript_ids,
biomartAttribGroups, id_prefix)
{
message("Download and preprocess the 'transcripts' data frame ... ",
appendLF=FALSE)
bm_table <- getBM(biomartAttribGroups[['A1']], filters=filters,
values=values, mart=mart)
##bm_table_names <- sub(paste0("^", biomartAttribGroups[['id_prefix']]),
## "",
## colnames(bm_table))
##colnames(bm_table) <- bm_table_names
tx_id_colname <- paste0(id_prefix, "transcript_id")
if (!is.null(transcript_ids)) {
idx <- !(transcript_ids %in% bm_table[[tx_id_colname]])
if (any(idx)) {
bad_ids <- transcript_ids[idx]
stop("invalid transcript ids: ",
paste0(bad_ids, collapse=", "))
}
}
## Those are the strictly required fields.
transcripts0 <- data.frame(
tx_id=integer(0),
tx_chrom=character(0),
tx_strand=character(0),
tx_start=integer(0),
tx_end=integer(0)
)
if (nrow(bm_table) == 0L) {
message("OK")
return(transcripts0)
}
tx_id <- seq_len(nrow(bm_table))
tx_name <- bm_table[[tx_id_colname]]
##if (any(duplicated(tx_name)))
## stop(paste("the '",
## tx_id_colname,
## "'transcript_id' attribute contains duplicated values"))
if (any(duplicated(bm_table)))
stop("The 'transcripts' data frame from biomart contains duplicated rows.")
tx_chrom <- as.character(bm_table$chromosome_name)
tx_strand <- ifelse(bm_table$strand == 1, "+", "-")
tx_start <- bm_table$transcript_start
tx_end <- bm_table$transcript_end
transcripts <- data.frame(
tx_id=tx_id,
tx_name=tx_name,
tx_chrom=tx_chrom,
tx_strand=tx_strand,
tx_start=tx_start,
tx_end=tx_end
)
message("OK")
transcripts
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'chrominfo' data frame.
###
### Returns NULL if it fails to fetch the chromosome lengths from the
### remote resource.
.makeBiomartChrominfo <- function(mart, extra_seqnames=NULL,
circ_seqs=character(0), host, port)
{
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
if (biomart == "ensembl") {
message("Download and preprocess the 'chrominfo' data frame ... ",
appendLF=FALSE)
db_version <- .getBiomartDbVersion(biomart, host, port)
ensembl_release <- .extractEnsemblReleaseFromDbVersion(db_version)
chromlengths <- try(fetchChromLengthsFromEnsembl(dataset,
release=ensembl_release,
extra_seqnames=extra_seqnames),
silent=TRUE)
if (is(chromlengths, "try-error")) {
message("FAILED! (=> skipped)")
return(NULL)
}
chrominfo <- data.frame(
chrom=chromlengths$name,
length=chromlengths$length,
is_circular=matchCircularity(chromlengths$name, circ_seqs)
)
message("OK")
return(chrominfo)
}
NULL
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Allow users to discover 'chrominfo' data frame.
###
getChromInfoFromBiomart <- function(biomart="ensembl",
dataset="hsapiens_gene_ensembl",
id_prefix="ensembl_",
host="www.biomart.org",
port=80)
{
biomartAttribGroups <- .getBiomartAttribGroups(id_prefix)
mart <- .parseBMMartParams(biomart=biomart,
dataset=dataset,
host=host,
port=port)
filters <- .parseBMFiltersParams(transcript_ids=NULL, id_prefix)
values <- .parseBMValuesParams(transcript_ids=NULL)
transcripts <- .makeBiomartTranscripts(filters, values, mart,
transcript_ids=NULL,
biomartAttribGroups,
id_prefix)
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
host=host, port=port)
chrominfo[,1:2]
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'splicings' data frame.
###
.normUtrCoords <- function(coords)
{
if (is.numeric(coords))
return(coords)
if (is.logical(coords) && all(is.na(coords)))
return(as.integer(coords))
stop("BioMart data anomaly: utr coordinates don't ",
"have a numeric type")
}
.generateBioMartDataAnomalyReport <- function(bm_table, idx, id_prefix, msg)
{
## Part 3.
tx_id_colname <- paste0(id_prefix, "transcript_id")
tx_ids <- bm_table[[tx_id_colname]]
first_tx_ids <- unique(tx_ids[idx])
total_nb_tx <- length(first_tx_ids)
first_six_only <- total_nb_tx > 6L
if (first_six_only)
first_tx_ids <- first_tx_ids[1:6]
bm_table <- bm_table[tx_ids %in% first_tx_ids, , drop=FALSE]
bm_table0 <- bm_table[-match(tx_id_colname, names(bm_table))]
f <- factor(bm_table[[tx_id_colname]], levels=first_tx_ids)
first_tx_tables <- split(bm_table0, f)
.DETAILS_INDENT <- " "
options(width=getOption("width")-nchar(.DETAILS_INDENT))
part3 <- lapply(seq_len(length(first_tx_tables)),
function(i) {
tx_table <- first_tx_tables[[i]]
if ("rank" %in% colnames(tx_table)) {
oo <- order(tx_table[["rank"]])
tx_table <- tx_table[oo, , drop=FALSE]
}
row.names(tx_table) <- NULL
subtitle <- paste0(" ", i, ". Transcript ",
names(first_tx_tables)[i],
":")
details <- capture.output(print(tx_table))
c(subtitle, paste0(.DETAILS_INDENT, details))
})
options(width=getOption("width")+nchar(.DETAILS_INDENT))
part3 <- unlist(part3, use.names=FALSE)
if (first_six_only)
part3 <- c(paste(" (Showing only the first 6 out of",
total_nb_tx,
"transcripts.)"),
part3)
## Part 1.
part1 <- "BioMart data anomaly: in the following transcripts, "
## Part 2.
msg[length(msg)] <- paste0(msg[length(msg)], ".")
part2 <- paste0(" ", msg)
## Assemble the parts.
paste(c(part1, part2, part3), collapse="\n")
}
.stopWithBioMartDataAnomalyReport <- function(bm_table, idx, id_prefix, msg)
{
msg <- .generateBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
stop(msg)
}
.warningWithBioMartDataAnomalyReport <- function(bm_table, idx, id_prefix, msg)
{
msg <- .generateBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
new_length <- nchar(msg) + 5L
## 8170L seems to be the maximum possible value for the 'warning.length'
## option on my machine (R-2.15 r58124, 64-bit Ubuntu).
if (new_length > 8170L)
new_length <- 8170L
if (new_length >= getOption("warning.length")) {
old_length <- getOption("warning.length")
on.exit(options(warning.length=old_length))
options(warning.length=new_length)
}
warning(msg)
}
.utrIsNa <- function(utr_start, utr_end, exon_start, exon_end,
what_utr, bm_table, id_prefix)
{
is_na <- is.na(utr_start)
if (!identical(is_na, is.na(utr_end)))
stop("BioMart data anomaly: ",
"NAs in \"", what_utr, "_utr_start\" attribute don't match ",
"NAs in \"", what_utr, "_utr_end\" attribute")
idx <- which(utr_start > utr_end + 1L)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs have a start > end")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
idx <- which(utr_start < exon_start | exon_end < utr_end)
if (length(idx) != 0L) {
msg <- paste0("the ", what_utr, "' UTRs ",
"are not within the exon limits")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
is_na | utr_start == utr_end + 1L
}
.extractCdsRangesFromBiomartTable <- function(bm_table, id_prefix)
{
if (nrow(bm_table) == 0L)
return(IRanges())
strand <- bm_table[["strand"]]
cds_start <- exon_start <- bm_table[["exon_chrom_start"]]
cds_end <- exon_end <- bm_table[["exon_chrom_end"]]
utr5_start <- .normUtrCoords(bm_table[["5_utr_start"]])
utr5_end <- .normUtrCoords(bm_table[["5_utr_end"]])
utr3_start <- .normUtrCoords(bm_table[["3_utr_start"]])
utr3_end <- .normUtrCoords(bm_table[["3_utr_end"]])
if (!all(strand %in% c(1, -1)))
stop("BioMart data anomaly: \"strand\" attribute should be 1 or -1")
if (!is.numeric(exon_start) || !is.numeric(exon_end))
stop("BioMart data anomaly: exon coordinates don't ",
"have a numeric type")
no_utr5 <- .utrIsNa(utr5_start, utr5_end, exon_start, exon_end,
"5", bm_table, id_prefix)
no_utr3 <- .utrIsNa(utr3_start, utr3_end, exon_start, exon_end,
"3", bm_table, id_prefix)
idx <- strand == 1 & !no_utr5
if (!all(utr5_start[idx] == exon_start[idx])) {
msg <- c("located on the plus strand, the 5' UTRs don't start",
"where their corresponding exon starts")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
cds_start[idx] <- utr5_end[idx] + 1L
idx <- strand == 1 & !no_utr3
if (!all(utr3_end[idx] == exon_end[idx])) {
msg <- c("located on the plus strand, the 3' UTRs don't end",
"where their corresponding exon ends")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
cds_end[idx] <- utr3_start[idx] - 1L
idx <- strand == -1 & !no_utr3
if (!all(utr3_start[idx] == exon_start[idx])) {
msg <- c("located on the minus strand, the 3' UTRs don't start",
"where their corresponding exon starts")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
cds_start[idx] <- utr3_end[idx] + 1L
idx <- strand == -1 & !no_utr5
if (!all(utr5_end[idx] == exon_end[idx])) {
msg <- c("located on the minus strand, the 5' UTRs don't end",
"where their corresponding exon ends")
.stopWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
cds_end[idx] <- utr5_start[idx] - 1L
ans <- IRanges(start=cds_start, end=cds_end)
if (length(ans) != 0L) {
tx_id_colname <- paste0(id_prefix, "transcript_id")
cds_cumlength <-
sapply(split(width(ans), bm_table[[tx_id_colname]]), sum)
idx <- which(cds_cumlength[as.vector(bm_table[[tx_id_colname]])] !=
bm_table$cds_length)
if (length(idx) != 0L) {
msg <- c("the CDS total length inferred from the exon and UTR info",
"doesn't match the \"cds_length\" attribute from BioMart")
.warningWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
}
#idx <- which(cds_cumlength %% 3L != 0L)
#if (length(idx) != 0L) {
# msg <- c("the CDS total length (\"cds_length\" attribute) is not",
# "a multiple of 3")
# .warningWithBioMartDataAnomalyReport(bm_table, idx, id_prefix, msg)
#}
}
ans
}
.makeCdsDataFrameFromRanges <- function(cds_ranges)
{
nocds_idx <- width(cds_ranges) == 0L
cds_start <- start(cds_ranges)
cds_start[nocds_idx] <- NA_integer_
cds_end <- end(cds_ranges)
cds_end[nocds_idx] <- NA_integer_
data.frame(cds_start=cds_start, cds_end=cds_end)
}
### Surprisingly the cds_start and cds_end attributes that we get from
### BioMart are pretty useless because they are relative to the coding
### mRNA. However, the utr coordinates are relative to the chromosome so
### we use them to infer the cds coordinates. We also retrieve the
### cds_length attribute to do a sanity check.
.makeBiomartSplicings <- function(filters, values, mart, transcripts_tx_id,
biomartAttribGroups, id_prefix)
{
## Those are the strictly required fields.
splicings0 <- data.frame(
tx_id=integer(0),
exon_rank=integer(0),
exon_start=integer(0),
exon_end=integer(0)
)
if (length(transcripts_tx_id) == 0L)
return(splicings0)
message("Download and preprocess the 'splicings' data frame ... ",
appendLF=FALSE)
allattribs <- listAttributes(mart)$name
attributes <- biomartAttribGroups[['A2']]
if (biomartAttribGroups[['B']] %in% allattribs)
attributes <- c(attributes, biomartAttribGroups[['B']])
if (all(biomartAttribGroups[['C']] %in% allattribs))
attributes <- c(attributes, biomartAttribGroups[['C']])
if ("cds_length" %in% allattribs)
attributes <- c(attributes, "cds_length")
bm_table <- getBM(attributes, filters=filters, values=values, mart=mart)
tx_id_colname <- paste0(id_prefix, "transcript_id")
splicings_tx_id <- transcripts_tx_id[bm_table[[tx_id_colname]]]
exon_id_col_name <- paste0(id_prefix, "exon_id")
splicings <- data.frame(
tx_id=splicings_tx_id,
exon_rank=bm_table$rank,
exon_name=bm_table[[exon_id_col_name]],
exon_start=bm_table$exon_chrom_start,
exon_end=bm_table$exon_chrom_end
)
if (all(biomartAttribGroups[['C']] %in% allattribs)
&& ("cds_length" %in% allattribs)) {
cds_ranges <- .extractCdsRangesFromBiomartTable(bm_table, id_prefix)
splicings <- cbind(splicings, .makeCdsDataFrameFromRanges(cds_ranges))
}
message("OK")
splicings
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Download and preprocess the 'genes' data frame.
###
.makeBiomartGenes <- function(filters, values, mart,
transcripts_tx_id, biomartAttribGroups,
id_prefix)
{
message("Download and preprocess the 'genes' data frame ... ",
appendLF=FALSE)
attributes <- c(biomartAttribGroups[['G']],
paste0(id_prefix, "transcript_id"))
bm_table <- getBM(attributes, filters=filters, values=values, mart=mart)
tx_id_colname <- paste0(id_prefix, "transcript_id")
genes_tx_id <- transcripts_tx_id[bm_table[[tx_id_colname]]]
message("OK")
gene_id_col_name <- paste0(id_prefix, "gene_id")
data.frame(
tx_id=genes_tx_id,
gene_id=bm_table[[gene_id_col_name]]
)
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Prepare the 'metadata' data frame.
###
.prepareBiomartMetadata <- function(mart, is_full_dataset, host, port,
miRBaseBuild)
{
message("Prepare the 'metadata' data frame ... ",
appendLF=FALSE)
biomart <- biomaRt:::martBM(mart)
dataset <- biomaRt:::martDataset(mart)
mart_url <- biomaRt:::martHost(mart)
mart_url <- sub("^[^/]+//", "", mart_url)
mart_url <- unlist(strsplit(mart_url, "/"))[1]
db_version <- .getBiomartDbVersion(biomart, host, port)
datasets <- listDatasets(mart)
dataset_rowidx <- which(as.character(datasets$dataset) == dataset)
## This should never happen (the above call to useMart() would have failed
## in the first place).
if (length(dataset_rowidx) != 1L)
stop("the BioMart database \"", biomaRt:::martBM(mart),
"\" has no (or more than one) \"", dataset, "\" datasets")
description <- as.character(datasets$description)[dataset_rowidx]
dataset_version <- as.character(datasets$version)[dataset_rowidx]
species <- .extractSpeciesFromDatasetDesc(description)
message("OK")
if(is.null(miRBaseBuild)){ miRBaseBuild <- NA }
metadata <- data.frame(
name=c("Data source",
"Genus and Species",
"Resource URL",
"BioMart database",
"BioMart database version",
"BioMart dataset",
"BioMart dataset description",
"BioMart dataset version",
"Full dataset",
"miRBase build ID"),
value=c("BioMart",
species,
mart_url,
biomart,
db_version,
dataset,
description,
dataset_version,
ifelse(is_full_dataset, "yes", "no"),
miRBaseBuild)
)
message("metadata: OK")
metadata
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### makeTranscriptDbFromBiomart()
###
.parseBMMartParams <- function(biomart="ensembl",
dataset="hsapiens_gene_ensembl",
host, port)
{
if (is.factor(biomart))
biomart <- as.character(biomart)
if (is(dataset, "AsIs"))
dataset <- as.character(dataset)
if (!isSingleString(biomart))
stop("'biomart' must be a single string")
useMart(biomart=biomart, dataset=dataset, host=host, port=port)
}
.parseBMFiltersParams <- function(transcript_ids, id_prefix)
{
if (is.null(transcript_ids)) {
filters <- ""
} else if (is.character(transcript_ids)
&& !any(is.na(transcript_ids))) {
filters <- paste0(id_prefix, "transcript_id")
}
filters
}
.parseBMValuesParams <- function(transcript_ids)
{
if (is.null(transcript_ids)) {
values <- ""
}else if (is.character(transcript_ids)
&& !any(is.na(transcript_ids))) {
if (length(transcript_ids) == 0L)
values <- "____a_very_unlikely_valid_transcript_id____"
else
values <- transcript_ids
} else {
stop("'transcript_ids' must be a character vector with no NAs")
}
values
}
## .testMakeTxDbFromBMParams <- function(biomart="ensembl",
## dataset="hsapiens_gene_ensembl",
## circ_seqs=DEFAULT_CIRC_SEQS,
## transcript_ids=NULL)
## {
## if (is.factor(biomart))
## biomart <- as.character(biomart)
## if (is(dataset, "AsIs"))
## dataset <- as.character(dataset)
## if (!isSingleString(biomart))
## stop("'biomart' must be a single string")
## mart <- useMart(biomart=biomart, dataset=dataset)
## if (is.null(transcript_ids)) {
## filters <- values <- ""
## } else if (is.character(transcript_ids)
## && !any(is.na(transcript_ids))) {
## filters <- "ensembl_transcript_id"
## if (length(transcript_ids) == 0L)
## values <- "____a_very_unlikely_valid_transcript_id____"
## else
## values <- transcript_ids
## } else {
## stop("'transcript_ids' must be a character vector with no NAs")
## }
## }
### Note that listMarts() and listDatasets() are returning data frames where
### the columns are character factors for the former and "AsIs" character
### vectors for the latter.
makeTranscriptDbFromBiomart <- function(biomart="ensembl",
dataset="hsapiens_gene_ensembl",
transcript_ids=NULL,
circ_seqs=DEFAULT_CIRC_SEQS,
filters="",
id_prefix="ensembl_",
host="www.biomart.org",
port=80,
miRBaseBuild=NULL)
{
## Could be that the user got the 'biomart' and/or 'dataset' values
## programmatically via calls to listMarts() and/or listDatasets().
mart <- .parseBMMartParams(biomart=biomart,
dataset=dataset,
host=host,
port=port)
##combines user-specified filters with those created from supplied
##transcripts_ids
.mergeTxIDsAndFilters <- function(transcript_ids, filters, id_prefix) {
if (filters == "")
filters <- list()
if (class(filters) != "list")
stop("filters parameter must be a named list")
if(length(filters) != 0) {
if(is.null(names(filters)))
stop("filters parameter must be a named list")
}
transcript_filters <- .parseBMFiltersParams(transcript_ids, id_prefix)
transcript_values <- .parseBMValuesParams(transcript_ids)
##merge transcript_ids into filters
transcript_list <- list()
if(transcript_filters != "") {
transcript_list <- list(transcript_values)
names(transcript_list) <- transcript_filters
}
transcripts_and_filters <- append(filters, transcript_list)
f <- ""
v <- ""
if (length(transcripts_and_filters) > 0) {
f <- names(transcripts_and_filters)
v <- unname(transcripts_and_filters)
}
res <- list()
res[['filters']] <- f
res[['values']] <- v
return(res)
}
return_list <- .mergeTxIDsAndFilters(transcript_ids,
filters, id_prefix)
filters <- return_list$filters
values <- return_list$values
biomartAttribGroups <- .getBiomartAttribGroups(id_prefix)
transcripts <- .makeBiomartTranscripts(filters, values, mart,
transcript_ids,
biomartAttribGroups,
id_prefix)
transcripts_tx_id <- transcripts$tx_id
names(transcripts_tx_id) <- transcripts$tx_name
chrominfo <- .makeBiomartChrominfo(mart,
extra_seqnames=transcripts$tx_chrom,
circ_seqs=circ_seqs,
host, port)
splicings <- .makeBiomartSplicings(filters, values, mart,
transcripts_tx_id,
biomartAttribGroups,
id_prefix=id_prefix)
genes <- .makeBiomartGenes(filters, values, mart, transcripts_tx_id,
biomartAttribGroups, id_prefix)
metadata <- .prepareBiomartMetadata(mart, is.null(transcript_ids), host,
port, miRBaseBuild)
message("Make the TranscriptDb object ... ", appendLF=FALSE)
txdb <- makeTranscriptDb(transcripts, splicings,
genes=genes, chrominfo=chrominfo,
metadata=metadata, reassign.ids=TRUE)
message("OK")
txdb
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Some non-exported tools to help exploring/scanning the BioMart landscape.
###
### 'mart' can be either a Mart object or the name of a Mart service (single
### string). Returns a named list of 2-col data frames with one elt per
### dataset in 'mart'. Each data frame describes the attributes that are
### available for the corresponding dataset.
### Typical use:
### ensembl_attrlist <- GenomicFeatures:::getMartAttribList("ensembl")
### sapply(ensembl_attrlist, nrow)
getMartAttribList <- function(mart)
{
if (!is(mart, "Mart"))
mart <- useMart(mart)
datasets <- listDatasets(mart)
ans_length <- nrow(datasets)
ans <- vector(mode="list", length=ans_length)
names(ans) <- as.character(datasets$dataset)
for (i in seq_len(ans_length)) {
dataset <- names(ans)[i]
mart <- useDataset(dataset, mart=mart)
message("Getting attributes for dataset \"", dataset, "\"... ",
appendLF=FALSE)
ans[[i]] <- listAttributes(mart)
message("OK")
}
ans
}
### 'biomart' and 'version' must be single character strings.
scanMart <- function(biomart, version)
{
cat("Scanning ", biomart, "... ", sep="")
suppressMessages(attrgroups <- .getAllDatasetAttrGroups(biomart))
cat("OK\n")
cat("biomart: ", biomart, "\n", sep="")
cat("version: ", version, "\n", sep="")
tmp <- names(attrgroups)
if (length(tmp) > 3L)
tmp <- c(tmp[1:3], "...")
cat("nb of datasets: ", length(attrgroups),
" (", paste(tmp, collapse=", "), ")\n",
sep="")
if (length(attrgroups) != 0L) {
tbl <- table(attrgroups)
tbl2 <- as.integer(tbl)
names(tbl2) <- names(tbl)
tmp <- paste0(names(tbl2), ":", tbl2, collapse=", ")
cat("table of attribute groups: ", tmp, "\n", sep="")
}
cat("\n")
}
scanMarts <- function(marts=NULL)
{
if (is.null(marts))
marts <- listMarts()
biomarts <- as.character(marts$biomart)
versions <- as.character(marts$version)
for (i in seq_len(nrow(marts)))
scanMart(biomarts[i], versions[i])
}
### scanMarts() output as of 6/28/2010 (only biomarts with at least groups
### A and G are listed):
###
### biomart: ensembl
### version: ENSEMBL GENES 58 (SANGER UK)
### nb of datasets: 51 (hsapiens_gene_ensembl, oanatinus_gene_ensembl,
### tguttata_gene_ensembl, cporcellus_gene_ensembl, ...)
### NOTE: the mgallopavo_gene_ensembl dataset seems to be broken!
### table of attribute groups: ABCDG:50
###
### biomart: bacterial_mart_5
### version: ENSEMBL BACTERIA 5 (EBI UK)
### nb of datasets: 183 (str_57_gene, esc_20_gene, myc_25994_gene, ...)
### table of attribute groups: ABG:183
###
### biomart: fungal_mart_5
### version: ENSEMBL FUNGAL 5 (EBI UK)
### nb of datasets: 12 (aniger_eg_gene, aflavus_eg_gene, aterreus_eg_gene, ...)
### table of attribute groups: ABG:12
###
### biomart: metazoa_mart_5
### version: ENSEMBL METAZOA 5 (EBI UK)
### nb of datasets: 23 (dgrimshawi_eg_gene, ppacificus_eg_gene,
### dpseudoobscura_eg_gene, ...)
### table of attribute groups: ABG:23
###
### biomart: plant_mart_5
### version: ENSEMBL PLANT 5 (EBI UK)
### nb of datasets: 8 (sbicolor_eg_gene, bdistachyon_eg_gene,
### alyrata_eg_gene, ...)
### table of attribute groups: ABG:8
###
### biomart: protist_mart_5
### version: ENSEMBL PROTISTS 5 (EBI UK)
### nb of datasets: 6 (tpseudonana_gene, ptricornutum_gene, pknowlesi_gene, ...)
### table of attribute groups: ABG:6
###
### biomart: ensembl_expressionmart_48
### version: EURATMART (EBI UK)
### nb of datasets: 1 (rnorvegicus_expr_gene_ensembl)
### table of attribute groups: AG:1
###
### biomart: Ensembl56
### version: PANCREATIC EXPRESSION DATABASE (INSTITUTE OF CANCER UK)
### nb of datasets: 1 (hsapiens_gene_pancreas)
### table of attribute groups: ABCDG:1
###
### biomart: ENSEMBL_MART_ENSEMBL
### version: GRAMENE 30 ENSEMBL GENES (CSHL/CORNELL US)
### nb of datasets: 8 (sbicolor_eg_gene, bdistachyon_eg_gene,
### alyrata_eg_gene, ...)
### table of attribute groups: ABG:8
.getBiomartAttribGroups <- function(id_prefix) {
attribs <- list()
attribs[['A1']] <- c(paste0(id_prefix, "transcript_id"),
"chromosome_name",
"strand",
"transcript_start",
"transcript_end")
attribs[['A2']] <- c(paste0(id_prefix, "transcript_id"),
"strand",
"rank",
"exon_chrom_start",
"exon_chrom_end")
attribs[['B']] <- paste0(id_prefix, "exon_id")
attribs[['C']] <- c("5_utr_start",
"5_utr_end",
"3_utr_start",
"3_utr_end")
attribs[['D']] <- c("cds_start",
"cds_end",
"cds_length")
attribs[['G']] <- paste0(id_prefix, "gene_id")
attribs[['id_prefix']] <- id_prefix
return(attribs)
}
|
fb41194749d72de2cbd168fc18ce12daf07c8b20
|
0d193831c182d0860777a08e4a923f33c42702bf
|
/cachematrix.R
|
5e67ee06d829e0e4fb057d7ef6bf83cf15cca6d3
|
[] |
no_license
|
wjbolles/ProgrammingAssignment2
|
947fb21ed007b057ce462eec24c1bb37bb2f1c64
|
91ac9158cd472a75f7d99ecd999655a98051b73c
|
refs/heads/master
| 2021-01-18T03:01:58.476316
| 2015-07-26T20:45:38
| 2015-07-26T20:45:38
| 39,698,217
| 0
| 0
| null | 2015-07-25T18:46:43
| 2015-07-25T18:46:41
| null |
UTF-8
|
R
| false
| false
| 1,117
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Returns a list of 4 functions for returning precomputed inverse matrices:
# get,
# set,
# setinverse,
# and getinverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
# Calculates the inverse of the matrix. If it has been already been computed it retrieves it using makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# using makeCacheMatrix & cacheSolve
testmat <- matrix(1:4, 2, 2)
test <- makeCacheMatrix(testmat)
cacheSolve(test)
# using solve
solve(testmat)
|
ab07295a2f2063cbc3a6555e8129f2135b36307a
|
0363e9059653e5ce2a8fd4dfa1bcfe981072ea82
|
/R/getLapplyIndex.R
|
b28ca46f0ae0990574fb999499045bd98f03e021
|
[] |
no_license
|
mwrowe/microRutils
|
7725bd4d5e2ac60337932f384562ed39abcf86a1
|
654cd867bafe126593089441f63c88906ecf60ed
|
refs/heads/master
| 2021-07-07T19:59:43.732449
| 2021-06-10T16:59:33
| 2021-06-10T16:59:33
| 245,310,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,267
|
r
|
getLapplyIndex.R
|
#' getLapplyIndex: Access Index of List Element Within lapply()
#'
#' Get name/index of current list element from within FUN argument of lapply()
#'
#' \itemize{
#' \item This is meant to be called from within the FUN argument passed to
#' lapply(); it cannot be used as the FUN argument itself.
#' \item It will also return the index along the current dimension DIM if
#' called from a function within the apply() function, though without a
#' name.
#' \item This function enables the user to change the processing that occurs
#' within FUN according to which element is being operated on.
#' }
#'
#' @return
#' Returns an integer value indicating the current index of the list passed
#' as the X argument to lapply(). If X is named, the name of the current
#' element will be the name of the index.
#'
#' @examples
#' x <- list(a=1:10, b=1:10, c=1:10)
#' FUNS <- list(a=mean, b=prod, c=max)
#' FUN <- function(x){
#' ndx <- getLapplyIndex()
#' y <- FUNS[[names(ndx)]](x)
#' y
#' }
#' lapply(x, FUN)
#'
#' @author M.W.Rowe, \email{mwr.stats@gmail.com}
#' @export
getLapplyIndex <-
function(){
ndx <- get("i", parent.frame(2))
names(ndx) <- names(get("X", parent.frame(2)))[[ndx]]
ndx
}
|
1da6a42f5eebd5dfe7b6defc5fe61400ca698788
|
63e1231faa30a4cea6dd9f25e87c2372383aa2f4
|
/man/Choose.Rd
|
3a74b2254c4916a018ff7b2463763175d0fe4d60
|
[] |
no_license
|
cran/MSEtool
|
35e4f802f1078412d5ebc2efc3149c46fc6d13a5
|
6b060d381adf2007becf5605bc295cca62f26770
|
refs/heads/master
| 2023-08-03T06:51:58.080968
| 2023-07-19T22:10:23
| 2023-07-20T01:47:18
| 145,912,213
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,325
|
rd
|
Choose.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sketch_Functions.R
\name{Choose}
\alias{Choose}
\alias{ChooseEffort}
\alias{ChooseM}
\alias{ChooseSelect}
\title{Manually map parameters for the historical period of operating model}
\usage{
ChooseEffort(Fleet, Years = NULL)
ChooseM(OM, type = c("age", "length"), x = NULL, y = NULL)
ChooseSelect(Fleet, Stock, FstYr = NULL, SelYears = NULL)
}
\arguments{
\item{Fleet}{A fleet object.}
\item{Years}{An optional vector of years. Should be nyears long.}
\item{OM}{An object of class 'OM'}
\item{type}{A character string - is M to be mapped by 'age' or 'length'?}
\item{x}{Optional vector for x-axis}
\item{y}{Optional vector for y-axis}
\item{Stock}{Optional Stock object. If provided, average length-at-maturity
is included on plot for reference.}
\item{FstYr}{Optional value for first historical year. If empty, user must
specify the year in console.}
\item{SelYears}{Optional vector of values for each year where selectivity
pattern changed. If empty, user must specify the years in console (comma
separated).}
}
\value{
\code{ChooseEffort} and \code{ChooseSelect} return a Fleet object while
\code{ChooseM} returns an OM object.
}
\description{
Interactive plots to specify trends and variability in
fishing effort, fleet selectivity, and natural mortality for the
operating model.
}
\details{
\tabular{ll}{
\code{ChooseEffort} \tab Interactive plot which allows users to specify the
relative trajectory and variability in the historical fishing effort and
populates Fleet object. \cr
\code{ChooseM} \tab Interactive plot which allows users to specify M by age
or size class \cr
\code{ChooseSelect} \tab Input the first historical year, and all years where
selectivity pattern
changed (separated by comma). Interactive plot which allows users to
specify a range for the length at 5\\% and full selection (LFS), as well as
selectivity at maximum length for each year. Produces a simple plot which
shows the range in selectivity pattern for each break-point year.
Selectivity-at-length is fixed in between break-point years. Note that this
function replaces 'nyears' in the Fleet object with the value defined here
(FstYr:current year). \cr
}
}
\author{
A. Hordyk
}
|
9db708cdd6fc5bd9050881539ab520bd93cc537b
|
dc4ee159f747caade7c36cb7ee673fd5819f6e3b
|
/Exploratory Data Analysis/EDA1/plot3.R
|
7984a57a91aecadbc18e5cc41bb22073134cf875
|
[] |
no_license
|
SampurnR/Coursera
|
d59b1b4cb2c5b4826e24a34ae812225159fcb6a0
|
09db173f326a49d6d9a751803c86a74ac5482f5d
|
refs/heads/master
| 2020-06-04T19:35:35.176125
| 2015-04-22T03:23:12
| 2015-04-22T03:23:12
| 34,346,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
plot3.R
|
setwd("C:\\Workspace\\R\\Codes\\Coursera\\EDA")
########## reading data
consumption_All <- read.table("household_power_consumption.txt", header=TRUE, sep=';', na.strings="?", stringsAsFactors=FALSE)
consumption_All$Date <- as.Date(consumption_All$Date, format = "%d/%m/%Y")
############## subsetting data
consumption_subset <- subset(consumption_All, Date >= as.Date("01/02/2007", format ="%d/%m/%Y") & Date <= as.Date("02/02/2007", format ="%d/%m/%Y"))
datetime <- paste(as.Date(consumption_subset$Date), consumption_subset$Time)
consumption_subset$Datetime <- as.POSIXct(datetime)
########## constructing plot
plot(consumption_subset$Sub_metering_1~consumption_subset$Datetime, type = "l", ylab = "Global Active Power (Kilowatts", xlab = "")
lines(consumption_subset$Sub_metering_2~consumption_subset$Datetime, col = "red")
lines(consumption_subset$Sub_metering_3~consumption_subset$Datetime, col = "blue")
legend("topright", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## saving plot
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
17f487dea2016e5fdadba0d0faa51bd78ae78783
|
5532bd62517bbadf3a402e637eff9896024a44eb
|
/man/RcmdrPlugin.SM-internal.Rd
|
2f779c73ed717250a9738628db198610d9b61aab
|
[] |
no_license
|
cran/RcmdrPlugin.SM
|
f78e7a1a75c918e26644d157b6721e207e4442b0
|
d79f4c0800be272583f84f0cb7d5ea850d652a82
|
refs/heads/master
| 2021-01-01T19:11:09.974424
| 2012-12-21T00:00:00
| 2012-12-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 677
|
rd
|
RcmdrPlugin.SM-internal.Rd
|
\name{RcmdrSMPlugin-internal}
\title{Internal RcmdrSMPlugin objects}
\alias{dotGraph}
\alias{dotchartTable}
\alias{barGraph2}
\alias{pieChart2}
\alias{frequencyDistribution2}
\alias{Histogram2}
\alias{boxPlot2}
\alias{QQPlot2}
\alias{barCode}
\alias{barCode2}
\alias{signifNC}
\alias{oneWayAnova2}
\alias{twoWayTable1}
\alias{percent}
\alias{twoWayTable2}
\alias{twoWayTable3}
\alias{signifCC}
\alias{scatterPlot2}
\alias{statNN}
\alias{signifNN}
\alias{LM2}
\alias{plotLikert}
\alias{plotCor}
\alias{enConstruction}
\description{Internal RcmdrSMPlugin objects.}
\details{These are not to be called by the user.}
\keyword{internal}
|
a21d4a945627cd0a545ef05bb5224c794a30a62b
|
26e26aca4102f40bc848120c4ebc99bb40d4a3c1
|
/R/Archive/June 2020 (removed code files)/207-Homemade.R
|
d8226721bec89fab81ff0432897dc0395d6f3d4b
|
[] |
no_license
|
IPRCIRI/IRHEIS
|
ee6c00dd44e1e4c2090c5ef4cf1286bcc37c84a1
|
1be8fa815d6a4b2aa5ad10d0a815c80a104c9d12
|
refs/heads/master
| 2023-07-13T01:27:19.954174
| 2023-07-04T09:14:58
| 2023-07-04T09:14:58
| 90,146,792
| 13
| 6
| null | 2021-12-09T12:08:58
| 2017-05-03T12:31:57
|
R
|
UTF-8
|
R
| false
| false
| 1,638
|
r
|
207-Homemade.R
|
# 35-Homemade.R
# Builds the HomemadeIncome data.table for households
#
# Copyright © 2017: Arin Shahbazian
# Licence: GPL-3
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ HHHomemadeIncome =====================================\n")
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(data.table)
library(stringr)
library(readxl)
HomemadeTable <- data.table(read_excel(Settings$MetaDataFilePath,sheet=Settings$MDS_Homemade))
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:",year,"\n"))
load(file=paste0(Settings$HEISRawPath,"Y",year,"Raw.rda"))
Homemadewt <- HomemadeTable[Year==year]
tab <- Homemadewt$Table
if(is.na(tab))
next
UTHomemadeW <- Tables[[paste0("U",year,tab)]]
RTHomemadeW <- Tables[[paste0("R",year,tab)]]
THomemadeW <- rbind(UTHomemadeW,RTHomemadeW,fill=TRUE)
for(n in names(THomemadeW)){
x <- which(Homemadewt==n)
if(length(x)>0)
setnames(THomemadeW,n,names(Homemadewt)[x])
}
pcols <- intersect(names(THomemadeW),c("HHID","Code","homemade","IndivNo"))
THomemadeW <- THomemadeW[,pcols,with=FALSE]
if(year %in% 63:68){
THomemadeW <- THomemadeW[Code %in% Homemadewt$StartCode:Homemadewt$EndCode]
}
THomemadeW[is.na(THomemadeW)] <- 0
save(THomemadeW, file = paste0(Settings$HEISProcessedPath,"Y",year,"THomemadeW.rda"))
HomemadeWageData <- THomemadeW[,lapply(.SD,sum),by=HHID]
save(HomemadeWageData, file = paste0(Settings$HEISProcessedPath,"Y",year,"HomemadeWage.rda"))
}
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime)
|
1db5e1edbb36ef1b48ee4ccbb96dd45883437467
|
a252fb33f4fa269e78ce3fd35fd6af169d624acb
|
/model/lauren/mleFits/models/noToM_model.R
|
4f7d6160aa987d2e2e35f90bbe78d7373db9b415
|
[] |
no_license
|
la-oey/Bullshitter
|
a55347378c6293c78dc50f02476060bb96437388
|
38a068660e7dca8a369c0e18c3e7324abcf451b5
|
refs/heads/master
| 2021-11-27T17:34:24.706342
| 2021-11-22T07:21:15
| 2021-11-22T07:21:15
| 157,911,292
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,930
|
r
|
noToM_model.R
|
setwd("/Users/loey/Desktop/Research/FakeNews/Bullshitter/model/lauren/mleFits")
source("ToMModelFunctions.R")
library(tidyverse)
noToM.s.pred <- function(alph, eta.S, weight){
weight = logitToProb(pmin(10, pmax(-10, weight)))
predS = mapply(function(i, j){
p.L_ksay.k.r(j, alph, eta.S, i, lastlvl=TRUE, rep(0.5,11))
},
rep(c(0.2,0.5,0.8), 2), rep(c(1,-1), each=3))
predS = array(predS, dim=c(11,11,6))
offDiag <- array(1-diag(11), dim=c(11,11,6))*predS
weightedOffDiag <- array(1-diag(11), dim=c(11,11,6))*(weight*offDiag + (1-weight)*1/10)
onDiag <- array(diag(11), dim=c(11,11,6))*predS
weightedS <- sweep(weightedOffDiag, MARGIN=c(2,3), (1-apply(onDiag, MARGIN=3, diag))/colSums(weightedOffDiag),`*`) + onDiag # lapse rate on lies only
weightedS
}
p_t.ksay.r_p.L <- function(p, p.L) { #probability of not telling the truth
P.K <- matrix(rep(p.k(0:numMarbles, p), each=numMarbles+1), nrow=numMarbles+1)
P.L_KSAY.K <- matrix(p.L, nrow=numMarbles+1, ncol=numMarbles+1)
LIE = 1-diag(numMarbles+1)
rowSums(P.K*P.L_KSAY.K*LIE)/rowSums(P.K*P.L_KSAY.K)
}
null0.2 <- p_true.ksay(p.k(0:numMarbles, 0.2),
matrix(rep(1/11,121), nrow=numMarbles+1, ncol=numMarbles+1))
null0.5 <- p_true.ksay(p.k(0:numMarbles, 0.5),
matrix(rep(1/11,121), nrow=numMarbles+1, ncol=numMarbles+1))
null0.8 <- p_true.ksay(p.k(0:numMarbles, 0.8),
matrix(rep(1/11,121), nrow=numMarbles+1, ncol=numMarbles+1))
nullVec <- function(br, kstar){
case_when(
br == 0.2 ~ null0.2[kstar+1],
br == 0.5 ~ null0.5[kstar+1],
br == 0.8 ~ null0.8[kstar+1]
)
}
noToM.r.pred <- function(alph, eta.R){
matrix(
mapply(
function(i,j,k) p.D_bs.ksay.r(i, j, k, alph, eta.R, lastlvl=TRUE, nullVec(j, i)),
rep(0:10,6),
rep(rep(c(0.2,0.5,0.8),each=11),2),
rep(c(1,-1), each=33)
),
nrow=11
)
}
noToM.r.pred(0.2, 5)
|
f4ced9679297ee5a6341a83596d6bb7bb1bd340a
|
8dc01e5b732f84181089d0b36be6edf049459c3f
|
/run_analysis.R
|
dce8b31ff52d0e6c54c8149fe9137322ae0f3815
|
[] |
no_license
|
clankford/getting-and-cleaning-data-project
|
2afa57d3c937e55ffbfcaf298ca97d38f2d3241f
|
40fb46b0406c998ec6e0ec1e9096ee6c9d506801
|
refs/heads/master
| 2021-01-10T13:43:00.421124
| 2015-05-24T06:27:18
| 2015-05-24T06:27:18
| 36,158,876
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,543
|
r
|
run_analysis.R
|
##setwd("~/Dropbox/Data Science Specialization/Getting and Cleaning Data/data")
rawFeatures <- read.table("UCI HAR Dataset/features.txt")
rawActivityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
##Removing factor levels and treat data as characters
rawActivityLabels$V2 <- as.character(rawActivityLabels$V2)
names(rawActivityLabels)[1] <- "activity"
rawTestData <- read.table("UCI HAR Dataset/test/X_test.txt")
rawSubjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
rawActivityTest <- read.table("UCI HAR Dataset/test/y_test.txt")
##Merge subject data to raw data
mergedTest <- cbind(rawSubjectTest, rawTestData)
##Merge activity labels with activity data
mergedTest <- cbind(rawActivityTest, mergedTest)
##Rename columns for merging
names(mergedTest)[1] <- "activity"
names(mergedTest)[2] <- "subject"
##Merge activity labels in
mergedTest <- merge(rawActivityLabels, mergedTest, by="activity")
rawTrainData <- read.table("UCI HAR Dataset/train/X_train.txt")
rawSubjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
rawActivityTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
##Merge subject data to raw data
mergedTrain <- cbind(rawSubjectTrain, rawTrainData)
##Merge activity labels with activity data
mergedTrain <- cbind(rawActivityTrain, mergedTrain)
##Rename columns for merging
names(mergedTrain)[1] <- "activity"
names(mergedTrain)[2] <- "subject"
##Merge activity labels in
mergedTrain <- merge(rawActivityLabels, mergedTrain, by="activity")
##Merge rawTest & rawTrain
mergedComplete <- rbind.data.frame(mergedTest, mergedTrain)
##Creates a vector with all the variable names.
vFeatures <- rawFeatures[['V2']]
vFeatures <- as.character(vFeatures)
vFeatures <- c("activityKey", "activityLabel", "subject", vFeatures)
##Leave only mean and standard deviation columns behind
colsToKeep <- grepl("mean|std", vFeatures)
colsToKeep[1:3] <- TRUE
reducedComplete <- data.frame(mergedComplete[, colsToKeep])
vFeatures <- vFeatures[colsToKeep]
##Give descriptive variable names
Names <- as.character(vFeatures)
Names <- gsub("-std", "StandardDeviation", Names)
Names <- gsub("-mean", "Mean", Names)
Names <- gsub("\\(\\)", "", Names)
Names <- gsub("-", "", Names)
Names <- gsub("_", "", Names)
for (i in 1:length(Names)) {
names(reducedComplete)[i] <- Names[i]
}
##Removes the row.names column added from the above line.
rownames(reducedComplete) <- NULL
##Script outputs the summarized tidy data
reducedComplete %>% group_by(subject,activityLabel) %>% summarise_each(funs(mean))
|
2bd789d7bacc88d25484fa0eeb6931bcf7ed0da3
|
86e4124e3a2884a0a2bec25915c9476ecc97ddbf
|
/plot6.R
|
fd19c75bde94bab9e4bfd3cf44c650b42a52ae93
|
[] |
no_license
|
sjrocks/ExData_Plotting2
|
bdeda2f24ba479a39856a45394fbc63a83fa0e20
|
2b8a049ff14ebcd08b5dc8808ce944ae100cd912
|
refs/heads/master
| 2021-01-10T03:22:34.775229
| 2016-02-15T00:56:05
| 2016-02-15T00:56:05
| 51,721,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
plot6.R
|
library(dplyr)
library(ggplot2)
#clear session
rm(list=ls())
# Read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
SCC.motorVehicle <- filter(SCC, grepl("vehicle", EI.Sector, perl = T, ignore.case = T))
NEI.motorVehicle <- inner_join(NEI, SCC.motorVehicle, by="SCC")
# Aggregate By Year
totalByYear <- NEI %>%
filter(fips=="24510" | fips=="06037") %>%
group_by(fips, year) %>%
summarize(total = sum(Emissions)) %>%
mutate(city=ifelse(fips=="24510", "Baltimore", "Los Angeles"))
# Create Plot
png("plot6.png", width = 640, height = 480)
p <- ggplot(totalByYear, aes(x=year, y=total/1000, colour=city)) +
geom_point(alpha=.3) +
geom_smooth(alpha=.2, size=1, method="loess") +
xlab("Year") + ylab("Emission in KTon")
ggtitle("Vehicle Emissions in Baltimore vs. Los Angeles")
print(p)
dev.off()
|
87b42cb4710b26d0ff6fba929bafe43664cd491d
|
6aead4c3fbbff49544c12c0d1b02d944cc3fce85
|
/makingValuesCategorical.r
|
a61f8989eb6780618f07e5725343ff5d46fc48b8
|
[] |
no_license
|
mycakmak/acikakademikodlari
|
d619ad69697193abaf7d5709e20f85931aaee232
|
4712c15a07ddd6714849b11934f9530b159a9117
|
refs/heads/master
| 2021-01-23T21:17:20.822709
| 2017-11-16T13:59:42
| 2017-11-16T13:59:42
| 102,891,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 187
|
r
|
makingValuesCategorical.r
|
frame2 <- maml.mapInputPort(1)
frame2[, c("customer_id","product_id","weekday")] = lapply(frame1[,c("customer_id","product_id","weekday")], as.factor)
maml.mapOutputPort("frame2");
|
41dc6257d7ada41819bd8f4ffefb203ffd7fe523
|
ab2749ed7092f3d79690a2495867b109daf9a48b
|
/R/app_ui_results.R
|
e9028e76d6f697627461e7f29a1dd8954e9b121a
|
[
"MIT"
] |
permissive
|
TileDB-Inc/gtexplorer
|
49f93d47f055fd1498dd52ed25ee1df1a972b2d5
|
df9a9814d095e53d8b93e97164f81ca42a8847d0
|
refs/heads/master
| 2023-08-26T22:16:00.507436
| 2021-09-30T16:00:45
| 2021-09-30T16:01:07
| 368,531,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 217
|
r
|
app_ui_results.R
|
app_ui_results <- function() {
div(
id = "options",
shiny::fluidRow(
DT::DTOutput("table_genes")
),
shiny::fluidRow(
plotly::plotlyOutput("plot_results", height = "550px")
)
)
}
|
7c0968b03b3a179b52084615a26c9cdfcaefbc38
|
701b77156f818bc155617253405eeac6fe80b6a1
|
/misc/pdf_to_text.R
|
c86d566c43dfbd56578ec62cfbb212a235bf4a14
|
[
"MIT"
] |
permissive
|
kingwatam/csrp_hku
|
63c6f367587114d44f82215d15bfa78138ba6b86
|
5688966281dd0a7ea5cda793303268833f09274c
|
refs/heads/main
| 2023-03-19T19:58:14.538540
| 2021-03-05T09:31:16
| 2021-03-05T09:31:16
| 308,496,334
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
pdf_to_text.R
|
rm(list=ls())
graphics.off()
# par(mar=c(0,0,0,0)) # set plot margins to 0
setpath <- "/MEGAsync/Work/HKU/CSRP"
setwd(sprintf("~%s", setpath))
source("helper_functions.R")
library(pdftools)
library(magrittr)
library(readr)
setwd(sprintf("~%s/misc", setpath))
text <- pdf_text('example.pdf')
cat(text, file = 'example.txt')
|
4a9d08790a96ec3d3bf0686f735b00c44d026752
|
92630399e7e476b1307eba0deceeab6048c69687
|
/R/optimaltrees.R
|
439433576f23fdcf327ee5952a27eea4b8f92d07
|
[] |
no_license
|
cran/iai
|
53bbc8fa096659614e4b288902cec2021feb35d0
|
591d213c1a7f9cafa0673e3f897f2c589f3e0435
|
refs/heads/master
| 2023-06-23T03:16:43.953505
| 2023-06-13T15:10:06
| 2023-06-13T15:10:06
| 197,544,870
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,047
|
r
|
optimaltrees.R
|
#' Learner for training Optimal Classification Trees
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreeClassifier}{\code{IAI.OptimalTreeClassifier}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_classifier()}
#'
#' @export
optimal_tree_classifier <- function(...) {
set_obj_class(jl_func("IAI.OptimalTreeClassifier", ...))
}
#' Learner for training Optimal Regression Trees
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreeRegressor}{\code{IAI.OptimalTreeRegressor}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_regressor()}
#'
#' @export
optimal_tree_regressor <- function(...) {
set_obj_class(jl_func("IAI.OptimalTreeRegressor_convert", ...))
}
#' Learner for training Optimal Survival Trees
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreeSurvivalLearner}{\code{IAI.OptimalTreeSurvivalLearner}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_survival_learner()}
#'
#' @export
optimal_tree_survival_learner <- function(...) {
if (iai_version_less_than("2.0.0")) {
set_obj_class(jl_func("IAI.OptimalTreeSurvivor_convert", ...))
} else {
set_obj_class(jl_func("IAI.OptimalTreeSurvivalLearner_convert", ...))
}
}
#' Learner for training Optimal Survival Trees
#'
#' This function was deprecated and renamed to [optimal_tree_survival_learner()]
#' in iai 1.3.0. This is for consistency with the IAI v2.0.0 Julia release.
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_survivor()}
#'
#' @export
#' @md
optimal_tree_survivor <- function(...) {
lifecycle::deprecate_warn("1.3.0", "iai::optimal_tree_survivor()",
"optimal_tree_survival_learner()")
optimal_tree_survival_learner(...)
}
#' Learner for training Optimal Prescriptive Trees where the prescriptions
#' should aim to minimize outcomes
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreePrescriptionMinimizer}{\code{IAI.OptimalTreePrescriptionMinimizer}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_prescription_minimizer()}
#'
#' @export
optimal_tree_prescription_minimizer <- function(...) {
set_obj_class(jl_func("IAI.OptimalTreePrescriptionMinimizer_convert", ...))
}
#' Learner for training Optimal Prescriptive Trees where the prescriptions
#' should aim to maximize outcomes
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreePrescriptionMaximizer}{\code{IAI.OptimalTreePrescriptionMaximizer}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_prescription_maximizer()}
#'
#' @export
optimal_tree_prescription_maximizer <- function(...) {
set_obj_class(jl_func("IAI.OptimalTreePrescriptionMaximizer_convert", ...))
}
#' Learner for training Optimal Policy Trees where the policy should aim to
#' minimize outcomes
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreePolicyMinimizer}{\code{IAI.OptimalTreePolicyMinimizer}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_policy_minimizer()}
#'
#' @section IAI Compatibility:
#' Requires IAI version 2.0 or higher.
#'
#' @export
optimal_tree_policy_minimizer <- function(...) {
requires_iai_version("2.0.0", "optimal_tree_policy_minimizer")
set_obj_class(jl_func("IAI.OptimalTreePolicyMinimizer_convert", ...))
}
#' Learner for training Optimal Policy Trees where the policy should aim to
#' maximize outcomes
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreePolicyMaximizer}{\code{IAI.OptimalTreePolicyMaximizer}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_policy_maximizer()}
#'
#' @section IAI Compatibility:
#' Requires IAI version 2.0 or higher.
#'
#' @export
optimal_tree_policy_maximizer <- function(...) {
requires_iai_version("2.0.0", "optimal_tree_policy_maximizer")
set_obj_class(jl_func("IAI.OptimalTreePolicyMaximizer_convert", ...))
}
#' Learner for training multi-task Optimal Classification Trees
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreeMultiClassifier}{\code{IAI.OptimalTreeMultiClassifier}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_multi_classifier()}
#'
#' @section IAI Compatibility:
#' Requires IAI version 3.2 or higher.
#'
#' @export
optimal_tree_multi_classifier <- function(...) {
requires_iai_version("3.2.0", "optimal_tree_multi_classifier")
set_obj_class(jl_func("IAI.OptimalTreeMultiClassifier_convert", ...))
}
#' Learner for training multi-task Optimal Regression Trees
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.OptimalTreeMultiRegressor}{\code{IAI.OptimalTreeMultiRegressor}}
#'
#' @param ... Use keyword arguments to set parameters on the resulting learner.
#' Refer to the Julia documentation for available parameters.
#'
#' @examples \dontrun{lnr <- iai::optimal_tree_multi_regressor()}
#'
#' @section IAI Compatibility:
#' Requires IAI version 3.2 or higher.
#'
#' @export
optimal_tree_multi_regressor <- function(...) {
requires_iai_version("3.2.0", "optimal_tree_multi_regressor")
set_obj_class(jl_func("IAI.OptimalTreeMultiRegressor_convert", ...))
}
#' Refit the models in the leaves of a trained learner using the supplied data
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.refit_leaves!}{\code{IAI.refit_leaves!}}
#'
#' @param lnr The learner to refit
#' @param ... Refer to the Julia documentation for available parameters
#'
#' @examples \dontrun{iai::refit_leaves(lnr, ...)}
#'
#' @section IAI Compatibility:
#' Requires IAI version 3.0 or higher.
#'
#' @export
refit_leaves <- function(lnr, ...) {
requires_iai_version("3.0.0", "refit_leaves")
set_obj_class(jl_func("IAI.refit_leaves_convert", lnr, ...))
}
#' Copy the tree split structure from one learner into another and refit the
#' models in each leaf of the tree using the supplied data
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.copy_splits_and_refit_leaves!}{\code{IAI.copy_splits_and_refit_leaves!}}
#'
#' @param new_lnr The learner to modify and refit
#' @param orig_lnr The learner from which to copy the tree split structure
#' @param ... Refer to the Julia documentation for available parameters
#'
#' @examples \dontrun{iai::copy_splits_and_refit_leaves(new_lnr, orig_lnr, ...)}
#'
#' @section IAI Compatibility:
#' Requires IAI version 3.0 or higher.
#'
#' @export
copy_splits_and_refit_leaves <- function(new_lnr, orig_lnr, ...) {
requires_iai_version("3.0.0", "copy_splits_and_refit_leaves")
set_obj_class(jl_func("IAI.copy_splits_and_refit_leaves_convert", new_lnr,
orig_lnr, ...))
}
#' Use the trained trees in a learner along with the supplied validation data to
#' determine the best value for the `cp` parameter and then prune the trees
#' according to this value
#'
#' Julia Equivalent:
#' \href{https://docs.interpretable.ai/v3.1.1/OptimalTrees/reference/#IAI.prune_trees!}{\code{IAI.prune_trees!}}
#'
#' @param lnr The learner to prune
#' @param ... Refer to the Julia documentation for available parameters
#'
#' @examples \dontrun{iai::prune_trees(lnr, ...)}
#'
#' @section IAI Compatibility:
#' Requires IAI version 3.0 or higher.
#'
#' @export
prune_trees <- function(lnr, ...) {
requires_iai_version("3.0.0", "prune_trees")
set_obj_class(jl_func("IAI.prune_trees_convert", lnr, ...))
}
|
efdc90dcc7598ffb2e4cf128d7728c46339ec763
|
6edb95a7572e2248c2c772e55953a452ee699dbc
|
/Web Crawling/한빛미디어 1.R
|
754c29d3da18f22d7ef73c1084086d84b0428c02
|
[] |
no_license
|
wotjr5477/R-Lecture-2021
|
064f53854f4d17a0c87cfdcc683f3723bc3d3b86
|
d1871cf852eda6fc5417db7ae62e785b649f8ece
|
refs/heads/main
| 2023-08-27T14:02:06.012255
| 2021-10-25T04:38:15
| 2021-10-25T04:38:15
| 359,694,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,437
|
r
|
한빛미디어 1.R
|
# 210510
# 한빛 미디어 사이트로 웹 크롤링 연습하기
library(rvest)
library(stringr)
library(dplyr)
# 웹 사이트 읽기
base_url = "https://www.hanbit.co.kr/media/books"
sub_url = "new_book_list.html"
url = paste(base_url, sub_url, sep="/")
url
html = read_html(url)
container = html_node(html, "#container") # id = "container"
book_list = html_node(container, ".new_book_list_wrap") # class="new~"
sub_book_list = html_node(book_list, ".sub_book_list_area")
sub_book_list
lis = html_nodes(sub_book_list, "li") # <li> 모두 찾기기
lis # sub_book_list 와동일 "li"로 필터링
li = lis[1]
info = html_node(li, ".info")
title = html_node(info, ".book_tit")
title = html_text(title)
title
writer = info %>% html_node(".book_writer") %>%
html_text()
writer
title_vector = c()
writer_vector = c()
for (li in lis) {
info = html_node(li, ".info")
title = info %>% html_node(".book_tit") %>%
html_text()
writer = info %>% html_node(".book_writer") %>%
html_text()
title_vector = c(title_vector, title)
writer_vector = c(writer_vector, writer)
}
new_books = data.frame(
title = title_vector,
writer = writer_vector
)
View(new_books)
#########################
# 도서 세부 내용 크롤링 #
#########################
li = lis[1]
li # 첫번째 책
href = li %>% html_node(".info") %>%
html_node("a") %>% html_attr("href")
href
book_url = paste(base_url, substr(href,2, length(base_url)), sep="/" )
book_url = paste(base_url, href, sep="/" )
book_html = read_html(book_url)
# 페이지 수
info_list = html_node(book_html, "ul.info_list"); info_list
lis = html_nodes(info_list, "li"); lis # info_list 안에 li밖에 없어서 안해도 됨
for (li in lis) {
item = li %>% html_node("strong") %>% html_text() # li 내 strong 선택
if(substring(item, 1, 3) == "페이지") { # strong 선택 >> "페이지"필터링
page = li %>% html_node("span") %>% html_text()
#print(page) # "284 쪽"
#as.integer(substr(page, 1, 3))
#print(as.integer(substr(page, 1, nchar(page)-2)))
page = as.integer(substr(page, 1, nchar(page)-2))
print(page)
break;
}
}
#page_li = lis[4]
#page = html_node(page_li, "span") %>% html_text()
#page
# 가격
pay_info = html_node(book_html, ".payment_box.curr")
ps = html_nodes(pay_info, "p")
price = ps[2] %>%
html_node(".pbr") %>%
html_node("strong") %>%
html_text()
price = as.integer(gsub(",","",price))
|
147791936161453950589a3f9f866324c52402a1
|
73590bd798f17fbc859c89726c3eb2c293394384
|
/curation/ovarian/curatedOvarianData/man/GSE19829.GPL570_eset.Rd
|
7dc312ba0b9d179f3e679b37417a2acb61d6299e
|
[] |
no_license
|
tgerke/catd
|
b8f4d68f61d92ad3d5e6264325137637d65079cd
|
ba2b6298532cf86cfb8d8223ca35f688b9c332f9
|
refs/heads/master
| 2021-05-11T19:45:09.159306
| 2018-01-17T20:39:26
| 2018-01-17T20:39:26
| 117,888,686
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,550
|
rd
|
GSE19829.GPL570_eset.Rd
|
\name{GSE19829.GPL570_eset}
\alias{GSE19829.GPL570_eset}
\docType{data}
\title{Gene Expression Profile of BRCAness That Correlates With Responsiveness to Chemotherapy and With Outcome in Patients With Epithelial Ovarian Cancer.}
\description{To define a gene expression profile of BRCAness that correlates with chemotherapy response and outcome in epithelial ovarian cancer (EOC).}
\usage{data(GSE19829.GPL570_eset)}
\format{
Authors: Konstantinopoulos PA, Spentzos D, Karlan BY, Taniguchi T et al.
Lab: Beth Israel Deaconess Medical Center, Harvard Medical School, Boston, MA, USA.
Contact: scannist@bidmc.harvard.edu
Title: Gene Expression Profile of BRCAness That Correlates With Responsiveness to Chemotherapy and With Outcome in Patients With Epithelial Ovarian Cancer.
Url: http://pubget.com/search?q=20547991
PubMedID: 20547991
}
\details{
Platform used: [HG-U133_Plus_2] Affymetrix Human Genome U133 Plus 2.0 Array.
frma normalization used from the frma bioconductor package.
assayData: 18708 features, 28 samples.
GEO_platform_accession: GSE19829-GPL570.
platform_summary: GPL570
biomart_ID: HG-U133_Plus_2 affy_hg_u133_plus_2
Overall survival time-to-event summary (in years):
Call: survfit(formula = Surv(time, cens) ~ -1)
records = 28.00 n.max = 28.00 n.start = 28.00 events = 17.00 median = 3.95 0.95LCL = 2.71 0.95UCL = NA
------------------------------------------------------------------------------
Available sample meta-data:
------------------------------------------------------------------------------
alt_sample_name:
Length = 28.
Class = character.
Mode = character.
sample_type:
tumor = 28.
primarysite:
ov = 28.
days_to_death:
Min. = 150 1st Qu.= 540 Median = 1050 Mean = 1291 3rd. Qu = 1688 Max. = 3450
vital_status:
deceased = 17. living = 11.
debulking:
unknown = 28.
batch:
2009-08-14
28
}
\source{
http://www.ncbi.nlm.nih.gov/projects/geo/query/acc.cgi?acc=GSE19829
}
\examples{
data(GSE19829.GPL570_eset)
## maybe str(GSE19829-GPL570_eset) ; plot(GSE19829-GPL570_eset) ...
if(require(affy)){
summary(GSE19829.GPL570_eset$vital_status)
time <- GSE19829.GPL570_eset$days_to_death / 365
cens <- ifelse(GSE19829.GPL570_eset$vital_status=="deceased",1,0)
library(survival)
fit <- survfit(Surv(time,cens)~-1)
##fit #for summary of survival
summary(fit)
plot(fit,xlab="Time (years)",ylab="Survivor function")
inverse.fit <- survfit(Surv(time,1-cens) ~ -1)
inverse.fit #for summary of follow-up time
}
}
\keyword{datasets}
|
4426d9259adabd39cbcba0c36a21c4417d1fc53a
|
6c2e5e53628f944665176749b7b29491789acc8d
|
/testes_hipotese.r
|
e2279c4dab68c94ea34a317de098f19628feb079
|
[] |
no_license
|
anselmobattisti/loadbalance_tests
|
367ea1fb1ac516b43dbda10ca88ada362a77a0d5
|
9be81726a59fd2ed2a55523fa8a9789ada282c9e
|
refs/heads/master
| 2020-04-07T00:38:31.727399
| 2018-11-29T23:09:13
| 2018-11-29T23:09:13
| 157,912,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 126
|
r
|
testes_hipotese.r
|
wilcox.test(dados_4$Round.Robin, dados_4$Round.Robin.Weight, paired = TRUE, exact = FALSE, conf.int = TRUE, conf.level = 0.95)
|
4890548047615cd66d8608cffd1199623aa8780d
|
caf56f313d6e34f4da4c5a0a29d31ff86262533a
|
/R/all-equal.r
|
003491d26a4a3f7cf4ed85c019406b3b02f26141
|
[] |
no_license
|
bhive01/tibble
|
c00b4894e4067a2d6443a33808649bf327367b3a
|
7c0aca252cba66ff02e48e9a9dffea816ffe4d4f
|
refs/heads/master
| 2021-01-17T04:56:06.995980
| 2016-03-19T00:43:30
| 2016-03-19T00:43:30
| 54,232,754
| 0
| 1
| null | 2016-03-19T00:43:30
| 2016-03-18T21:34:01
|
R
|
UTF-8
|
R
| false
| false
| 3,409
|
r
|
all-equal.r
|
#' Flexible equality comparison for data frames.
#'
#' When comparing two \code{tbl_df} using \code{\link{all.equal}}, column and
#' row order is ignored by default, and types are not coerced. The \code{dplyr}
#' package provides a much more efficient implementation for this functionality.
#'
#' @param target,current Two data frames to compare.
#' @param ignore_col_order Should order of columns be ignored?
#' @param ignore_row_order Should order of rows be ignored?
#' @param convert Should similar classes be converted? Currently this will
#' convert factor to character and integer to double.
#' @param ... Ignored. Needed for compatibility with \code{all.equal}.
#' @return \code{TRUE} if equal, otherwise a character vector describing
#' the reasons why they're not equal. Use \code{\link{isTRUE}} if using the
#' result in an \code{if} expression.
#' @examples
#' scramble <- function(x) x[sample(nrow(x)), sample(ncol(x))]
#' mtcars_df <- as_data_frame(mtcars)
#'
#' # By default, ordering of rows and columns ignored
#' all.equal(mtcars_df, scramble(mtcars_df))
#'
#' # But those can be overriden if desired
#' all.equal(mtcars_df, scramble(mtcars_df), ignore_col_order = FALSE)
#' all.equal(mtcars_df, scramble(mtcars_df), ignore_row_order = FALSE)
#'
#' # By default all.equal is sensitive to variable differences
#' df1 <- data_frame(x = "a")
#' df2 <- data_frame(x = factor("a"))
#' all.equal(df1, df2)
#' # But you can request to convert similar types
#' all.equal(df1, df2, convert = TRUE)
all_equal <- function(target, current, ignore_col_order = TRUE,
ignore_row_order = TRUE, convert = FALSE, ...) {
if (!identical(class(target), class(current))) {
return(paste0("Different types: x ", paste(class(target), collapse = ", "),
", y ", paste(class(current), collapse = ", ")))
}
if (nrow(target) != nrow(current)) {
return("Different number of rows")
}
extra_x <- setdiff(names(target), names(current))
if (length(extra_x) > 0L) {
return(paste0("Cols in x but not y: ", paste(extra_x, collapse = ", ")))
}
extra_y <- setdiff(names(current), names(target))
if (length(extra_y) > 0L) {
return(paste0("Cols in y but not x: ", paste(extra_y, collapse = ", ")))
}
if (!ignore_col_order && names(target) != names(current)) {
return("Column names same but in different order")
}
current <- `rownames<-`(current[names(target)], rownames(current))
types <- unlist(mapply(
function(x, y) {
if (!identical(class(x), class(y))) {
paste0("x ", class(x), ", y ", class(y))
}
},
target, current
))
if (length(types) > 0L) {
types <- paste0("Incompatible type for column ", names(types), ": ", types)
if (convert) {
lapply(types, warning, call. = FALSE)
} else {
return(types)
}
}
factor_levels <- unlist(mapply(
function(x, y) {
if (!identical(levels(x), levels(y))) {
TRUE
}
},
target, current
))
if (length(factor_levels) > 0L) {
return(paste0("Factor levels not equal for column ", names(factor_levels)))
}
if (ignore_row_order) {
target <- target[do.call(order, target), ]
current <- current[do.call(order, current), ]
}
all.equal(as.data.frame(target), as.data.frame(current), ...)
}
#' @export
#' @rdname all_equal
#' @method all.equal tbl_df
all.equal.tbl_df <- all_equal
|
f600993c3d5c17b427af6d279091f22713b34348
|
9e1a32fb410bbd83a8c3935eb820d5fd5e97b668
|
/analysis.R
|
cc0e159198524e05f88030bd23745ab4c96abd64
|
[
"MIT"
] |
permissive
|
inti/ASE
|
3f72b72c3cce8302c4f9a6f6eab67e7a94a13207
|
4f7d40487f80643a7981945598c03932c7d4a38e
|
refs/heads/master
| 2020-03-26T17:04:49.435801
| 2018-11-13T19:20:03
| 2018-11-13T19:20:03
| 145,140,411
| 0
| 0
|
MIT
| 2018-10-04T12:58:01
| 2018-08-17T16:00:34
| null |
UTF-8
|
R
| false
| false
| 770
|
r
|
analysis.R
|
library(brms)
library(VGAM)
library(plyr)
library(ggplot2)
library(lmerTest)
info = read.delim("SraRunTable.txt")
data = read.delim("test_all_10K",header=T)
data$x = as.integer(data$alpha_post)
data$n = as.integer(data$beta_post)
head(data,3)
s = subset(data, name =="LOC100650155")
s2 = merge(s[,c("bam","pASE","x","n")],info[,c("Run","treatment","colony_of_origin")],by.x="bam",by.y="Run")
r = lmer(pASE ~ 0 + (1|colony_of_origin) + (1|treatment),data=s2)
f = brm(formula = pASE ~ 0 + (1|colony_of_origin) + (1|treatment),data=s2, family="binomial",sample_prior = TRUE)
h <- paste("sd_colony_of_origin__Intercept^2 / (sd_colony_of_origin__Intercept^2 +",
"sd_treatment__Intercept^2 + sd^2) = 0")
(hyp2 <- hypothesis(f, h, class = NULL))
|
be21fd57d4165cd4fb46e3978758444b80cf935a
|
04b2447224d6321e0eba324b1b786eab2e3c5f10
|
/digit_f.r
|
24a394c250fb15ea530f5a0bb373973aaeb56759
|
[] |
no_license
|
woofer30/tkfd_model
|
811b607cdd4cd93e8e9eb6d208cfd8ff4dc7f2ff
|
0af91bac794b10470902b403fbd64466ae6a0b39
|
refs/heads/master
| 2020-09-27T14:05:07.463202
| 2016-09-04T07:21:05
| 2016-09-04T07:21:05
| 67,331,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
digit_f.r
|
## シナリオ変数 財政
base$DMY_F_DLT_MID = ifelse(index(base)>2016,ifelse(index(base)==F_DLT_Y0_MID,1,0),0)
base$F_PRB_FIX_RGDP = dexpd(0*base$one, 0.01*base$one ,2013)
base$F_DLT = dexpd(base$F_DLT,base$F_DLT+as.numeric(F_DLT_P1)*base$one,as.numeric(F_DLT_Y1)-1)
base$F_DLT = dexpd(base$F_DLT,base$F_DLT+as.numeric(F_DLT_P0)*base$one-0.5*as.numeric(F_DLT_P0)*base$DMY_F_DLT_MID,as.numeric(F_DLT_Y0)-1)
base$F_DLT = dexpd(base$F_DLT,base$F_DLT+as.numeric(F_DLT_P)*base$one,as.numeric(F_DLT_Y)-1)
|
a0cbfceca7292f6437c6d92ce25cccb3c35d9af8
|
0b2690313a9a81596ea6ef899dabd192ca050a9d
|
/data-raw/build_fips.R
|
55d9c297f537b380d6f955ae117db5db9cb3fc73
|
[
"MIT"
] |
permissive
|
jeffreyawright/censable
|
e151daf5c7e1305d874741ef94a6a16e6a421252
|
127bfa87242155e0f18240426c5273c44d0116bf
|
refs/heads/main
| 2023-08-17T12:04:20.569014
| 2021-10-07T14:59:28
| 2021-10-07T14:59:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,427
|
r
|
build_fips.R
|
# Create FIPS 2000
fips_2000 <- readxl::read_excel('fips_2000.xlsx', col_names = FALSE)
names(fips_2000) <- 'raw'
fips_2000 <- fips_2000 %>%
dplyr::mutate(
raw = stringr::str_squish(raw),
state = stringr::str_sub(raw, 1, 2),
county = stringr::str_sub(raw, 3, 5)
)
fips_2000 <- fips_2000 %>%
dplyr::mutate(name = stringr::str_replace_all(raw, pattern = '[:digit:]', ''))
fips_2000 <- fips_2000 %>%
dplyr::filter(county != '000')
fips_2000 <- fips_2000 %>%
dplyr::select(-raw)
# Create FIPS 2010
data(stata)
fips_2010 <- lapply(1:57, function(x) {
fip <- stata$fips[x]
labb <- tolower(stata$abb)[x]
x <- NULL
try({
x <- readr::read_lines(glue::glue('https://www2.census.gov/geo/docs/reference/codes/files/st{fip}_{labb}_cou.txt'))
})
x
})
fips_2010 <- lapply(fips_2010, function(x) {
do.call('rbind', stringr::str_split(x, ','))
})
fips_2010 <- lapply(fips_2010, function(x) {
names(x) <- c('abb', 'fips', 'county', 'name', 'v')
tibble::as_tibble(x)
})
fips_2010 <- do.call('rbind', fips_2010)
fips_2010 <- fips_2010 %>% dplyr::select(state = V2, county = V3, name = V4)
fips_2010 <- fips_2010 %>% dplyr::filter(state %in% unique(fips_2000$state))
# Create FIPS 2020
fips_2020 <- readxl::read_excel('all-geocodes-v2020.xlsx', skip = 4)
fips_2020 <- fips_2020 %>%
dplyr::filter(`Summary Level` == '050')
fips_2020 <- fips_2020 %>% dplyr::filter(state %in% unique(fips_2000$state))
|
b2c0019afa5fb47d647830e3ae73321020f184a1
|
77b27c431d919ff6c463b1eb70576b811c5a84dc
|
/paper_specific_analyses/regional_select_countries_prevalence_and_counts.R
|
7e96d330fae5216eac01273d03538fafe15fbb8a
|
[] |
no_license
|
ihmeuw/cgf
|
5db05acb4778c6a1aa03e314d69f0ea3f3426939
|
0cf00065a42cc7e2bc824d2eb4ee3d9b5a4ae066
|
refs/heads/main
| 2023-04-07T09:31:00.515179
| 2022-03-01T22:06:52
| 2022-03-01T22:06:52
| 463,323,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,956
|
r
|
regional_select_countries_prevalence_and_counts.R
|
# load functions and libraries
invisible(sapply(list.files("filepath", full.names = T), source))
library(data.table)
library(ggplot2)
library(dplyr)
library(scales)
library('cowplot', lib.loc = "filepath")
library(gridExtra)
library("ggridges", lib.loc = "filepath")
"%ni%" <- Negate("%in%")
# loading populations
populations <- get_population(gbd_round_id = 7, decomp_step = 'iterative', location_id = 'all', location_set_id = 35,
year_id = c(1990:2020), age_group_id = c(1, 2, 3, 388, 389, 238, 34), sex_id = c(1, 2, 3))
populations$run_id <- NULL
# setting id vars to use in melts for this section
id.vars <- c("metric_id", "age_group_id", "location_id", "measure_id", "modelable_entity_id", "sex_id", "year_id", "model_version_id")
################################################################################################################
# 2020 Prevalences of CGF
################################################################################################################
# Stunting: Highlighting Global, South Asia, sub-Sahran Africa, and Southeast Asia
################################################################################################################
stunting.locs <- data.table(location_id = c(1, 158, 166, 9),
location_name = c("Global", "South Asia", "Sub-Saharan Africa", "Southeast Asia"))
exposure.cgf.stunting <- get_draws("modelable_entity_id", 10556, year_id=c(2020),
source="epi", gbd_round_id=7, decomp_step="iterative",
age_group_id = 1, sex_id = c(3), location_id = stunting.locs$location_id)
exposure.cgf.stunting <- melt(exposure.cgf.stunting, id.vars = id.vars)
exposure.cgf.stunting <- merge(exposure.cgf.stunting, populations)
for (loc.id in unique(stunting.locs$location_id)) {
stunting.2020.mean <- mean(exposure.cgf.stunting[location_id == loc.id]$value)
stunting.2020.lower <- quantile(exposure.cgf.stunting[location_id == loc.id]$value, probs = .025)
stunting.2020.upper <- quantile(exposure.cgf.stunting[location_id == loc.id]$value, probs = .975)
print(paste0(stunting.locs[location_id == loc.id]$location_name, " stunting prevalence in 2020: ", signif(stunting.2020.mean*100,3), "% (",
signif(stunting.2020.lower*100, 3), "% - ", signif(stunting.2020.upper*100, 3), "%)."))
}
# Wasting: Highlighting Global, South Asia, Southeast Asia, Sahel countries South Sudan and Chad
################################################################################################################
wasting.locs <- data.table(location_id = c(1, 158, 9, 435, 204, 163),
location_name = c("Global", "South Asia", "Southeast Asia", "South Sudan", "Chad", "India"))
exposure.cgf.wasting <- get_draws("modelable_entity_id", 10558, year_id=c(2020),
source="epi", gbd_round_id=7, decomp_step="iterative",
age_group_id = 1, sex_id = c(3), location_id = wasting.locs$location_id)
exposure.cgf.wasting <- melt(exposure.cgf.wasting, id.vars = id.vars)
exposure.cgf.wasting <- merge(exposure.cgf.wasting, populations)
for (loc.id in unique(wasting.locs[location_id != 163]$location_id)) {
wasting.2020.mean <- mean(exposure.cgf.wasting[location_id == loc.id]$value)
wasting.2020.lower <- quantile(exposure.cgf.wasting[location_id == loc.id]$value, probs = .025)
wasting.2020.upper <- quantile(exposure.cgf.wasting[location_id == loc.id]$value, probs = .975)
print(paste0(wasting.locs[location_id == loc.id]$location_name, " wasting prevalence in 2020: ", signif(wasting.2020.mean*100,3), "% (",
signif(wasting.2020.lower*100, 3), "% - ", signif(wasting.2020.upper*100, 3), "%)."))
}
exposure.cgf.wasting[, wasting.count := value * population]
india.wasting.2020.mean <- mean(exposure.cgf.wasting[location_id == 163]$wasting.count)
india.wasting.2020.lower <- quantile(exposure.cgf.wasting[location_id == 163]$wasting.count, probs = .025)
india.wasting.2020.upper <- quantile(exposure.cgf.wasting[location_id == 163]$wasting.count, probs = .975)
print(paste0("In 2020 in India, ", signif(india.wasting.2020.mean/1000000, 3), " Million (", signif(india.wasting.2020.lower/1000000, 3), " Million - ",
signif(india.wasting.2020.upper/1000000, 3), " Million) children experienced wasting."))
# Underweight: Highlighting Global, South Asia, Southeast Asia, and sub-Saharal Africa
################################################################################################################
underweight.locs <- data.table(location_id = c(1, 158, 9, 166),
location_name = c("Global", "South Asia", "Southeast Asia", "Sub-Saharan Africa"))
exposure.cgf.underweight <- get_draws("modelable_entity_id", 10560, year_id=c(2020),
source="epi", gbd_round_id=7, decomp_step="iterative",
age_group_id = 1, sex_id = c(3), location_id = c(underweight.locs$location_id))
exposure.cgf.underweight <- melt(exposure.cgf.underweight, id.vars = id.vars)
exposure.cgf.underweight <- merge(exposure.cgf.underweight, populations)
for (loc.id in unique(underweight.locs$location_id)) {
underweight.2020.mean <- mean(exposure.cgf.underweight[location_id == loc.id]$value)
underweight.2020.lower <- quantile(exposure.cgf.underweight[location_id == loc.id]$value, probs = .025)
underweight.2020.upper <- quantile(exposure.cgf.underweight[location_id == loc.id]$value, probs = .975)
print(paste0(underweight.locs[location_id == loc.id]$location_name, " underweight prevalence in 2020: ", signif(underweight.2020.mean*100,3), "% (",
signif(underweight.2020.lower*100, 3), "% - ", signif(underweight.2020.upper*100, 3), "%)."))
}
|
ae9ae16a1f67019f1e99a1661c39508302c60666
|
92445969b4bde82452b46bc4745a760d11f80863
|
/Exerciese/Ch9/Ch9-4.R
|
5597a7cd223426dd3f73c47f5a7366b2ff581470
|
[] |
no_license
|
ChenPH0522/ISLR
|
c44c3cd2ee5e080da922f4d9551bf5fed31b03cb
|
cfc961e5b3f26144ee60c8a0174d19773ae5387e
|
refs/heads/master
| 2020-04-22T18:52:36.910641
| 2019-08-08T01:54:35
| 2019-08-08T01:54:35
| 170,590,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
Ch9-4.R
|
library(e1071)
set.seed(1)
# ------------------------------------
n = 100
p = 2
x = matrix(rnorm(n*p), ncol=p)
y = ifelse( x[,1]^2+x[, 2]^2 < 1, 1, 0 )
plot(x, col=(2-y))
# SVM: polynomial kernal
dat = data.frame(x=x, y=as.factor(y))
svm.poly = svm(y~., data=dat, kernel='polynomial', degree=2, cost=1)
summary(svm.poly)
plot(svm.poly, dat)
svm.pred = predict(svm.poly, newdata=dat)
table(pred=svm.pred, actual=y) # error rate: 4%
# SVM: radial kernel
dat = data.frame(x=x, y=as.factor(y))
svm.radial = svm(y~., data=dat, kernel='radial', gamma=0.1, cost=1)
summary(svm.radial)
plot(svm.radial, dat)
svm.pred = predict(svm.radial, newdata=dat)
table(pred=svm.pred, actual=y) # error rate: 10%
# Support Vector Classifier
dat = data.frame(x=x, y=as.factor(y))
svm.linear = svm(y~., data=dat, kernel='linear', cost=10, scale=FALSE)
summary(svm.linear)
plot(svm.linear, dat)
svm.pred = predict(svm.linear, newdata=dat)
table(pred=svm.pred, actual=y) # error rate: 44%
|
8943a47c9b1fc7636ad15b89ab01bd02dbf84b3d
|
95e7aff56e3ca1db4a7968e24b8fcaa178a57237
|
/XGBOOST_importance_plots.R
|
0a4e28698ea805f3b773bb4cda391f51b7ba6116
|
[] |
no_license
|
eherdter/Seatrout_ENV_Chapter2
|
2525954cde07b41e8805523bb6b94f1975edac94
|
6bf05d4bd2efacd5ac2160cd878e424d4bb578f2
|
refs/heads/master
| 2021-10-08T14:59:07.902986
| 2018-12-13T19:59:08
| 2018-12-13T19:59:08
| 109,744,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,701
|
r
|
XGBOOST_importance_plots.R
|
#importance plots for BRT and GLM
#"Seatrout_ENV_Chapter2/xgboost_results",
rm(list=ls())
# Set Location
IS_HOME = FALSE
if (IS_HOME == TRUE) {
data = "~/Desktop/PhD project/Projects/Seatrout/Data/Exported R Dataframes/Seatrou"
source_location= "/Desktop/PhD project/Projects/Seatrout/Seatrout_ENV_Chapter2"
out = "~/Desktop/PhD project/Projects/Seatrout/Data/Exported R Dataframes"
setwd(data)
source("~/Desktop/PhD project/Projects/Seatrout/Seatrout_ENV_Chapter2/brt.functions.R")
} else {
data = "U:/PhD_projectfiles/Exported_R_Datafiles/Seatrout_ENV_Chapter2"
setwd(data)
}
library(tidyverse)
library(xgboost)
#read in importance for xgboost
var_names =data.frame(c("number", "year", "month", "salinity", "temperature", "riv_flow","allrivers", "Nit_val",
"Z_anom", "MaxT_anom", "MinT_anom", "TotalMonthlyRF",
"aten_ceof", "winter_dis" , "prev_autumn_dis",
"winter_Z_anom", "winter_MaxT_anom", "winter_MinT_anom" ,
"prev_autumn_Z_anom", "prev_autumn_MaxT_anom",
"prev_autumn_MinT_anom", "winter_RF", "prev_autumn_RF",
"winter_dis_ALL", "prev_autumn_dis_ALL", "atspawn_salinity",
"atspawn_waterT",
"DissolvedO2", "StartDepth",
"atspawn_nitro", "avg_last2_nitro", "avg_last3_nitro", "Clor_val") )
colnames(var_names) <- "Feature"
require(data.table)
AP <- read.csv(paste(data, "/xgboost_results/AP4_pos_importance.csv", sep=""), header=T)
setDT(AP)
AP <- AP[AP$Gain > 0.01,]
AP$Feature <- as.character(AP$Feature)
AP <- full_join(var_names,AP)
AP$Area = "AP"
AP[is.na(AP)] <- 0
CK <- read.csv(paste(data, "/xgboost_results/CK4_pos_importance.csv", sep=""), header=T)
setDT(CK)
CK <- CK[CK$Gain > 0.01,]
CK$Feature <- as.character(CK$Feature)
CK <- full_join(var_names,CK)
CK$Area = "CK"
CK[is.na(CK)] <- 0
TB <- read.csv(paste(data, "/xgboost_results/TB4_pos_importance.csv", sep=""), header=T)
setDT(TB)
TB <- TB[TB$Gain > 0.01,]
TB$Feature <- as.character(TB$Feature)
TB <- full_join(var_names,TB)
TB$Area = "TB"
TB[is.na(TB)] <- 0
CH <- read.csv(paste(data, "/xgboost_results/CH4_pos_importance.csv", sep=""), header=T)
setDT(CH)
CH<- CH[CH$Gain > 0.01,]
CH$Feature <- as.character(CH$Feature)
CH <- full_join(var_names,CH)
CH$Area = "CH"
CH[is.na(CH)] <- 0
IR <- read.csv(paste(data, "/xgboost_results/IR4_pos_importance.csv", sep=""), header=T)
setDT(IR)
IR <- IR[IR$Gain > 0.01,]
IR$Feature <- as.character(IR$Feature)
IR <- full_join(var_names,IR)
IR$Area = "IR"
IR[is.na(IR)] <- 0
JX <- read.csv(paste(data, "/xgboost_results/JX4_pos_importance.csv", sep=""), header=T)
setDT(JX)
JX <- JX[JX$Gain > 0.01,]
JX$Feature <- as.character(JX$Feature)
JX <- full_join(var_names,JX)
JX$Area = "JX"
JX[is.na(JX)] <- 0
all <- rbind(AP, CK, TB, CH, JX, IR)
all$Area <- factor(all$Area, levels=c("AP", "CK", "TB", "CH", "JX", "IR"))
all <- all[!(all$Feature %in% c("number", "aten_ceof", "avg_last3_nitro", "avg_last2_nitro", "astspawn_nitro", "atspawn_watert", "atspawn_salinity", "prev aut river flow all rivers")),]
all$Feature <- toupper(all$Feature)
all$Feature[all$Feature == "RIV_FLOW"] <- "RIVER FLOW (CLOSEST)"
all$Feature[all$Feature == "ALLRIVERS"] <- "RIVER FLOW (ALL)"
all$Feature[all$Feature == "TOTALMONTHLYRF"] <- "PRECIPITATION"
all$Feature[all$Feature == "WINTER_RF"] <- "WINTER PRECIPITATION"
all$Feature[all$Feature == "WINTER_DIS_ALL"] <- "WINTER RIVER FLOW ALL RIVERS"
all$Feature[all$Feature == "WINTER_DIS"] <- "WINTER RIVER FLOW (CLOSEST)"
all$Feature[all$Feature == "PREV_AUTUMN_RF"] <- "PREV AUT PRECIPITATION"
all$Feature[all$Feature == "PREV_AUTUMN_DIS"] <- "PREV AUT RIVER FLOW (CLOSEST)"
all$Feature[all$Feature == "NIT_VAL"] <- "DIN"
all$Feature[all$Feature == "CLOR_VAL"] <- "CHLOR A"
all$Feature[all$Feature == "EXT_CEOF"] <- "ATEN COEF"
all$Feature[all$Feature == "STARTDEPTH"] <- "DEPTH"
all$Feature[all$Feature == "Z_ANOM"] <- "Z ANOM"
all$Feature[all$Feature == "MAXT_ANOM"] <- "MAX T ANOM"
all$Feature[all$Feature == "MINT_ANOM"] <- "MIN T ANOM"
all$Feature[all$Feature == "WINTER_Z_ANOM"] <- "WINTER Z ANOM"
all$Feature[all$Feature == "WINTER_MAXT_ANOM"] <- "WINTER MAXT ANOM"
all$Feature[all$Feature == "WINTER_MINT_ANOM"] <- "WINTER MINT ANOM"
all$Feature[all$Feature == "PREV_AUTUMN_Z_ANOM"] <- "PREV AUTUMN Z ANOM"
all$Feature[all$Feature == "PREV_AUTUMN_MAXT_ANOM"] <- "PREV AUTUMN MAXT ANOM"
all$Feature[all$Feature == "PREV_AUTUMN_MINT_ANOM"] <- "PREV AUTUMN MINT ANOM"
CD_vars = c("Z ANOM","MAX T ANOM","MIN T ANOM","PREV AUTUMN Z ANOM","PREV AUTUMN MAXT ANOM" , "PREV AUTUMN MINT ANOM", "WINTER Z ANOM","WINTER MAXT ANOM","WINTER MINT ANOM")
Fim_vars = c("YEAR" ,"MONTH","SALINITY" ,"TEMPERATURE" ,"DISSOLVEDO2" ,"DEPTH", "ATEN COEF")
water_vars =c("PRECIPITATION" ,"RIVER FLOW (CLOSEST)","RIVER FLOW (ALL)", "WINTER RIVER FLOW (CLOSEST)","PREV AUT RIVER FLOW (CLOSEST)","WINTER PRECIPITATION","PREV AUT PRECIPITATION","WINTER RIVER FLOW ALL RIVERS")
bio_vars = c("DIN", "CHLOR A" )
#xgb.ggplot.importance(AP) + guides(fill=FALSE) + theme_bw()
orderfeats = c(Fim_vars, bio_vars, CD_vars, water_vars)
all <- all[all$Feature %in% orderfeats,]
all$Feature <- factor(all$Feature, levels=orderfeats)
File <- (paste("U:/PhD_projectfiles/Figures/importance_plots.png"))
if (file.exists(File)) stop(File, " already exists")
dir.create(dirname(File), showWarnings = FALSE)
png(File, units="in", width=7, height=9, res=300)
ggplot(all, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() +
xlab("Variable")+
geom_vline(xintercept=seq(1.5, length(unique(all$Feature))-0.5, 1),
lwd=0.25,linetype="dashed", colour="black")
dev.off()
#by management region
File <- (paste("U:/PhD_projectfiles/Figures/importance_plots_SWregion.png"))
if (file.exists(File)) stop(File, " already exists")
dir.create(dirname(File), showWarnings = FALSE)
png(File, units="in", width=7, height=9, res=300)
all_sw <- all[all$Area %in% c("TB", "CH"),]
all_sw <- all_sw[!(all_sw$Feature %in% c("FIRST_SPAWN_WATERT", "FIRST_SPAWN_SALINITY", "PREV AUT PRECIPITATION", "WINTER PRECIPITATION", "PREV AUTUMN MINT ANOM", "WINTER Z ANOM")),]
ggplot(all_sw, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() +
xlab("Variable")+
geom_vline(xintercept=seq(1.5, length(unique(all$Feature))-0.5, 1),
lwd=0.25,linetype="dashed", colour="black")
dev.off()
File <- (paste("U:/PhD_projectfiles/Figures/importance_plots_NWregion.png"))
if (file.exists(File)) stop(File, " already exists")
dir.create(dirname(File), showWarnings = FALSE)
png(File, units="in", width=7, height=9, res=300)
all_nw <- all[all$Area %in% c("AP", "CK"),]
all_nw <- all_nw[!all_nw$Feature %in% c("CHLOR A", "DIN", "WINTER RIVER FLOW ALL RIVERS", "PREV AUTUMN MAXT ANOM"),]
ggplot(all_nw, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() +
xlab("Variable")+
geom_vline(xintercept=seq(1.5, length(unique(all$Feature))-0.5, 1),
lwd=0.25,linetype="dashed", colour="black")
dev.off()
#by varaibles
all_CD <- all[all$Feature %in% c("prev_autumn_Z_anom", "prev_autumn_MaxT_anom", "prev_autumn_MinT_anom", "winter_Z_anom", "winter_MaxT_anom", "winter_MinT_anom", "Z_anom", "MaxT_anom", "MinT_anom"),]
all_CD$Feature <- toupper(all_CD$Feature)
all_fim <- all[all$Feature %in% c("year", "month", "temperature", "salinity", "DissolvedO2", "StartDepth", "aten_ceof"),]
all_fim$Feature <- toupper(all_fim$Feature)
all_fim$Feature[all_fim$Feature == "ATEN_CEOF"] <- "ATTENUATION_COEF"
all_water <- all[all$Feature %in% c("riv_flow", "TotalMonthlyRF", "winter_dis", "prev_autumn_dis", "winter_RF", "prev_autumn_RF", "winter_dis_ALL", "prev_autumn_dis_ALL"),]
all_water$Feature <- toupper(all_water$Feature)
all_water$Feature[all_water$Feature == "RIV_FLOW"] <- "RIVER FLOW (CLOSEST)"
all_water$Feature[all_water$Feature == "TOTALMONTHLYRF"] <- "PRECIPITATION"
all_water$Feature[all_water$Feature == "WINTER_RF"] <- "WINTER PRECIPITATION"
all_water$Feature[all_water$Feature == "WINTER_DIS_ALL"] <- "WINTER RIVER FLOW ALL RIVERS"
all_water$Feature[all_water$Feature == "WINTER_DIS"] <- "WINTER RIVER FLOW (CLOSEST)"
all_water$Feature[all_water$Feature == "PREV_AUTUMN_RF"] <- "PREV AUT PRECIPITATION"
all_water$Feature[all_water$Feature == "PREV_AUTUMN_DIS_ALL"] <- "PREV AUT RIVER FLOW ALL RIVERS"
all_water$Feature[all_water$Feature == "PREV_AUTUMN_DIS"] <- "PREV AUT RIVER FLOW (CLOSEST)"
all_water <- all_water[!(all_water$Feature == "PREV AUT RIVER FLOW ALL RIVERS"),]
all_bio <- all[all$Feature %in% c("Nit_val", "first_spawn_salinity", "first_spawn_waterT", "atpsawn_nitro", "Clor_val"),]
all_bio$Feature <- toupper(all_bio$Feature)
all_bio$Feature[all_bio$Feature == "NIT_VAL"] <- 'DIN'
CD <- ggplot(all_CD, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
scale_x_discrete(labels=function(x) str_wrap(x, width=10))+
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() + geom_vline(xintercept=seq(1.5, length(unique(all_CD$Feature))-0.5, 1),
lwd=1,linetype="dashed", colour="black")
Water <- ggplot(all_water, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() + geom_vline(xintercept=seq(1.5, length(unique(all_water$Feature))-0.5, 1),
lwd=1,linetype="dashed", colour="black")
fim <- ggplot(all_fim, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() + geom_vline(xintercept=seq(1.5, length(unique(all_fim$Feature))-0.5, 1),
lwd=1,linetype="dashed", colour="black")
bio <- ggplot(all_bio, aes(x=Feature, y=Gain, fill=Area)) + geom_bar(stat="identity", position=position_dodge(), width=0.75) +
theme(panel.grid.minor=element_line(colour='grey'),panel.background=element_rect(fill='white', colour='black'),axis.text.x = element_text(angle=0, vjust=0.5, size=16)) +
coord_flip() + geom_vline(xintercept=seq(1.5, length(unique(all_bio$Feature))-0.5, 1),
lwd=1,linetype="dashed", colour="black")
library(gridExtra)
grid.arrange(CD, Water, fim, bio, ncol=2)
#basic plot
require(data.table)
AP <- read.csv(paste(data, "/xgboost_results/AP4_pospos_importance.csv", sep=""), header=T)
setDT(AP)
xgb.ggplot.importance(AP) + guides(fill=FALSE) + theme_bw()
CK <- read.csv(paste(data, "/xgboost_results/CK4_pospos_importance.csv", sep=""), header=T)
setDT(CK)
xgb.ggplot.importance(CK) + guides(fill=FALSE) + theme_bw()
TB <- read.csv(paste(data, "/xgboost_results/TB4_pospos_importance.csv", sep=""), header=T)
setDT(TB)
xgb.ggplot.importance(TB) + guides(fill=FALSE) + theme_bw()
CH <- read.csv(paste(data, "/xgboost_results/CH4_pospos_importance.csv", sep=""), header=T)
setDT(CH)
xgb.ggplot.importance(CH) + guides(fill=FALSE) + theme_bw()
IR <- read.csv(paste(data, "/xgboost_results/IR4_pospos_importance.csv", sep=""), header=T)
setDT(IR)
xgb.ggplot.importance(IR) + guides(fill=FALSE) + theme_bw()
JX <- read.csv(paste(data, "/xgboost_results/JX4_pospos_importance.csv", sep=""), header=T)
setDT(JX)
xgb.ggplot.importance(JX) + guides(fill=FALSE) + theme_bw()
|
33c834b1e1f9e362e1b31f819d56372185997aef
|
6454ad6876db56ce0bdf0a6cbc45efe7502cecaa
|
/R/final_model.R
|
16ccec4c3626af7588001bf44d5a6740a95c1993
|
[] |
no_license
|
dpoursanidis/modleR
|
aa148a01556f3dbf2299f837bd90a7bf36725e2d
|
97ea4cfc95297cd244fbd58fd252f1cacd1bcd5f
|
refs/heads/master
| 2020-07-30T18:08:55.220380
| 2019-08-15T13:54:41
| 2019-08-15T13:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,802
|
r
|
final_model.R
|
#' Joins ENM from several partitions, creating a model per algorithm.
#'
#' This function reads the output from dismo.mod and creates a model per species
#' @param species_name A character string with the species name
#' @param algorithms Which algorithms will be processed. If no name is given it
#' will process all algorithms present in the evaluation files
#' @param weight_par Which performance statistic should be used to weight the
#' partitions. Defaults to NULL but either \code{c("AUC", "TSS")} can be used.
#' @param select_partitions TRUE ou FALSE
#' @param threshold Which selecting threshold will be used to cut the mean
#' models in final_model_3 approach (see vignettes), it
#' defaults to "spec_sens" but any dismo threshold
#' can be used: "kappa", "no_omission", "prevalence",
#' "equal_sens_spec", "sensitivity".
#' @param scale_models Logical. Whether input models should be scaled between 0
#' and 1
#' @param select_par Which performance statistic should be used to select the
#' partitions- Defaults to NULL but either \code{c("AUC", "TSS")} can be used.
#' @param select_par_val Threshold to select models from TSS values
#' @param consensus_level Which proportion of models will be kept when creating
#' \code{bin_consensus} (binary)
#' @param models_dir Character. Folder path where the input files are located
#' @param final_dir Character. Name of the folder to save the output files.
#' A subfolder will be created.
#' @param proj_dir Character. The name of the subfolder with the projection.
#' Defaults to "present" but can be set according to the other projections (i.e.
#' to execute the function in projected models)
#' @param which_models Which final_model() will be used? Currently it can be:
#' \describe{
#' \item{\code{weighted_AUC} or \code{weighted_TSS}}{the models weighted
#' by TSS or AUC}
#' \item{\code{raw_mean}}{the mean of the selected raw models}
#' \item{\code{bin_mean_th}}{the binary model created by cutting
#' \code{raw_mean} by the mean of the thresholds that
#' maximize the selected evaluation metric (e.g. TSS (\code{spec_sens}) or
#' other dismo thresholds)}
#' \item{\code{cut_mean_th}}{the cut model created by recovering
#' \code{raw_mean} values above the mean threshold that
#' maximizes the selected evaluation metric (e.g. TSS (\code{spec_sens}) or
#' other dismo thresholds)}
#' \item{\code{bin_mean}}{the mean of the selected binary models}
#' \item{\code{bin_consensus}}{the binary consensus from \code{bin_mean}.
#' \code{consensus_level} must be defined, 0.5 means a majority consensus}
#' \item{\code{cut_mean}}{the mean of the selected cut models}
#' }
#' @param uncertainty Whether an uncertainty map, measured as range (max-min)
#' should be calculated
#' @param write_png Writes png files of the final models
#' @param ... Other parameters from writeRaster
#' @return A set of ecological niche models and figures (optional) written in
#' the \code{final_dir} subfolder
#' @import raster
#' @importFrom utils read.table write.csv read.csv
#' @export
final_model <- function(species_name,
algorithms = NULL,
weight_par = NULL,
select_partitions = TRUE,
threshold = c("spec_sens"),
scale_models = TRUE,
select_par = "TSS",
select_par_val = 0.7,
consensus_level = 0.5,
models_dir = "./models",
final_dir = "final_models",
proj_dir = "present",
which_models = c("raw_mean"),
uncertainty = F,
write_png = T,
...) {
# Escribe final
final_path <- paste(models_dir, species_name, proj_dir,
final_dir, sep = "/")
if (file.exists(final_path) == FALSE) {
dir.create(final_path)
}
print(date())
cat(paste(species_name, "\n"))
cat(paste("Reading evaluation files for", species_name, "in", proj_dir, "\n"))
evall <- list.files(
path = paste0(models_dir, "/", species_name, "/present/partitions"),
pattern = "^evaluate.+.csv$", full.names = T)
lista_eval <- lapply(evall, read.csv, header = T)
stats <- data.table::rbindlist(lista_eval)
stats <- as.data.frame(stats)
names(stats)[1] <- "species"
write.csv(stats, file = paste0(models_dir, "/", species_name, "/present/",
final_dir, "/", species_name,
"_final_statistics.csv"))
# Extracts only for the selected algorithm
# if the user doesnt specify, it will take all of them
if (is.null(algorithms)) {
algorithms <- unique(stats$algoritmo)
}
algorithms <- as.factor(algorithms)
for (algo in algorithms) {
final_algo <- raster::stack()
cat(paste("Extracting data for", species_name, algo, "\n"))
stats.algo <- stats[stats$algoritmo == algo, ]
#stats.algo <- stats.run[stats.run$algoritmo == algo, ]
n.part <- nrow(stats.algo) #How many partitions were there
#n.part <- length(unique(stats.algo$partition)) #How many partitions were there
cat(paste("Reading models from .tif files", "\n"))
modelos.cont <-
list.files(
path = paste0(models_dir, "/", species_name, "/", proj_dir,
"/partitions"),
full.names = T,
#pattern = paste0(algo, "_cont_", species_name, "_", run, "_")
pattern = paste0(algo, "_cont_", ".*tif$")
)
mod.cont <- raster::stack(modelos.cont) #(0)
#select partitions----
sel.index <- 1:n.part
if (select_partitions == T) {
cat(paste("selecting partitions for", species_name, algo, "\n"))
sel.index <- which(stats.algo[, select_par] >= select_par_val)
}
if (!is.null(weight_par)) {
pond.stats <- stats.algo[, weight_par][sel.index]
if ("TSS" %in% weight_par)
pond.stats <- (pond.stats + 1) / 2
} else {
pond.stats <- rep(1, length(sel.index))#either selected or not
}
if (length(sel.index) == 0) {
cat(paste("No partition selected", species_name, algo, proj_dir, "\n"))
} else if (length(sel.index) != 0) {
message(paste(length(sel.index), "/", n.part,
"partitions will be used for", species_name, algo, "\n"))
if (length(sel.index) == 1) {
warning(paste("when only one partition is selected some final models
are identical", "\n"))
cont.sel.1 <- mod.cont[[c(sel.index, sel.index)]]
pond.stats <- c(pond.stats, pond.stats)#(1)
}
if (length(sel.index) > 1) {
cont.sel.1 <- mod.cont[[sel.index]] #(1)
}
#first column of the map. takes raw means and makes them binary or cut by a single mean threshold
raw_mean <- raster::weighted.mean(cont.sel.1, w = pond.stats)
if ("raw_mean" %in% which_models) {
names(raw_mean) <- "raw_mean"#(4)
final_algo <- raster::addLayer(final_algo, raw_mean)####layerz#
}
if (any(c("raw_mean_th", "raw_mean_cut") %in% which_models)) {
if (is.numeric(threshold)) {#este threshold se repite na outra coluna, verificar que seja equivalente ¬¬ [ö]
th.mean <- threshold
} else {
th.mean <- mean(stats.algo[, threshold][sel.index])
}
raw_mean_th <- (raw_mean > th.mean) #(7)
if ("raw_mean_th" %in% which_models) {
names(raw_mean_th) <- "raw_mean_th"
final_algo <- raster::addLayer(final_algo, raw_mean_th)
}
if ("raw_mean_cut" %in% which_models) {
raw_mean_cut <- raw_mean * raw_mean_th #(9)
names(raw_mean_cut) <- "raw_mean_cut"
final_algo <- raster::addLayer(final_algo, raw_mean_cut)####layerz#
}
}
#second column of the figure. creates binary selected
if (any(c("bin_mean", "cut_mean", "bin_consensus") %in% which_models)) {
if (is.numeric(threshold)) {#este aqui se repete, linha 145, é equivalente cortar aqui e lá?
cont.sel.1_scaled <- rescale_layer(cont.sel.1)
mod.sel.bin <- cont.sel.1_scaled > threshold #(0)
} else {
mod.sel.bin <- cont.sel.1 > (stats.algo[, threshold][sel.index]) #(0)
}
if (any(c("bin_mean", "bin_consensus") %in% which_models)) {
bin_mean <- raster::weighted.mean(mod.sel.bin, w = pond.stats) #(5)
names(bin_mean) <- "bin_mean"
final_algo <- raster::addLayer(final_algo, bin_mean)####layerz#
if ("bin_consensus" %in% which_models) {
if (is.null(consensus_level)) {
stop( "consensus_level must be specified")
}
bin_consensus <- (bin_mean > consensus_level) #(8)
names(bin_consensus) <- "bin_consensus"
final_algo <- raster::addLayer(final_algo, bin_consensus)####layerz#
}
}
#third column of the figure depends on mod.sel.bin
if ("cut_mean" %in% which_models) {
mod.cut.sel <- mod.sel.bin * cont.sel.1
cut_mean <- raster::weighted.mean(mod.cut.sel, w = pond.stats) #(6)
names(cut_mean) <- "cut_mean"
final_algo <- raster::addLayer(final_algo, cut_mean)####layerz#
}
}
if (scale_models == T) {
final_algo <- rescale_layer(final_algo)
}
#incerteza #ö está criando esta camada duplicada com cada algoritmo
if (uncertainty == T) {
raw_inctz <- raster::calc(cont.sel.1,
fun = function(x) {max(x) - min(x)})
names(raw_inctz) <- "raw_uncertainty"
final_algo <- raster::addLayer(final_algo, raw_inctz)####layerz#
}
#creation ok
#cat(paste("selected final models for", species_name, algo, "run", run, "DONE", "\n"))
cat(paste("selected final models for", species_name, algo, "DONE", "\n"))
}
#################
if (raster::nlayers(final_algo) != 0) {
if (uncertainty == T) {
which_f <- c(which_models, "raw_uncertainty")
} else {
which_f <- which_models
}
which_final <- final_algo[[which_f]]
message(paste("writing models", algo, names(which_final), "\n"))
if (raster::nlayers(which_final) > 1 ) {
raster::writeRaster(which_final,
filename = paste0(final_path,
"/", species_name, "_", algo),
suffix = "names",
bylayer = T,
format = "GTiff", ...)
}
if (raster::nlayers(which_final) == 1 ) {
raster::writeRaster(which_final,
filename = paste0(final_path,
"/", species_name, "_", algo,
"_", names(which_final)),
format = "GTiff", ...)
}
if (write_png == T) {
for (i in 1:raster::nlayers(which_final)) {
png(filename = paste0(final_path, "/",
species_name, "_", algo, "_",
names(which_final)[i], ".png"))
raster::plot(which_final[[i]], main = names(which_final)[i])
dev.off()
}
}
}
} #else {
# warning(paste("no models were selected for", species_name, algo, "\n"))
#}
# }
print(paste("DONE", algo, "\n"))
return(stats)
print(date())
}
|
116536b70e927b12d835c3581e13761ff4c25014
|
a6cf1dcd8e192497e2a78176d3d08a60db501b27
|
/WBYU.R
|
b2f5dba1c987f5ee5de8614454309610e403c0fa
|
[] |
no_license
|
aaronabraham311/World-Bank-Youth-Unemployment-EDA
|
360438a01fa0a97b11c56540188a12e815e05621
|
506e58fac7338cdbedb5f33b26faef005668b7a0
|
refs/heads/master
| 2021-06-29T18:27:18.401846
| 2017-09-19T14:46:16
| 2017-09-19T14:46:16
| 103,993,583
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,334
|
r
|
WBYU.R
|
# Including libraries
library(tidyverse)
library(rworldmap)
# Loading and viewing data
data <- read_csv("API_ILO_country_YU.csv")
View(data)
# Notice that there are rows that are not countries. Rather, it is a classification (based on income or continent)
# First question: how does youth unemployment change over years?
averageYears <- tribble (~year, ~average,
"2010", mean(data$"2010"),
"2011", mean(data$"2011"),
"2012", mean(data$"2012"),
"2013", mean(data$"2013"),
"2014", mean(data$"2014"))
ggplot(data = averageYears) + geom_histogram(mapping = aes(x = year, y = average, fill = year),
stat = "identity") # Plot shows that average unemployment doesn't change much over the year.
# Displaying histogram to see how average unemployment changes over years
data <- data %>% mutate(averageChange = (`2014` - `2010`)/ `2014`)
ggplot(data) + geom_histogram(aes(x = `Country Name`, y = `averageChange`), stat = "identity") # Most generally showcase small changes, should investigate outliers
# Make a horizontal bar graph that displays % change in unemployment from 2010 to 2014 in BRICS
brics <- c("Brazil", "Russian Federation", "China", "India",
"South Africa")
pcUnemBrics <- data %>% filter(`Country Name` %in% brics) %>%
mutate(averageChange = (`2014` - `2010`)/`2014`)
ggplot(pcUnemBrics) + geom_bar(mapping = aes(x = `Country Name`, y = `averageChange`,
fill = `Country Name`), stat = "identity") + coord_flip()
# Conclusion: unemployment in Brazil and Russia fell. India barely changed
# Horizontal bar graph of G7
g7 <- c("Canada", "France", "Germany", "Italy", "Japan", "United Kingdom", "United States", "European Union")
pcUnemG7 <- data %>% filter(`Country Name` %in% g7) %>%
mutate(averageChange = (`2014` - `2010`)/`2014`)
arrange(pcUnemG7, averageChange)
ggplot(pcUnemG7) + geom_bar(mapping = aes(x = `Country Name`, y = `averageChange`,
fill = `Country Name`), stat = "identity") + coord_flip()
# Conclusion: most G7 countries were able to have positive employment gains, but Italy (proabably due to bankruptcy) had unemployment skyrocket
# Horizontal bar graph of countries that almost bankrupted?!?
# Map unemployment in countries: http://blog.kaggle.com/2016/11/30/seventeen-ways-to-map-data-in-kaggle-kernels/
mapped_data <- joinCountryData2Map(data, joinCode = "ISO3",
nameJoinColumn = "Country Code")
par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i")
mapCountryData(mapped_data, nameColumnToPlot = "averageChange")
# Comparing youth unemployment in European regions
centralEurope <- data[30, 3:7]
centralEurope <- tribble (~year, ~rate,
"2010", centralEurope$`2010`,
"2011", centralEurope$`2011`,
"2012", centralEurope$`2012`,
"2013", centralEurope$`2013`,
"2014", centralEurope$`2014`)
ggplot(data = centralEurope) + geom_histogram(mapping = aes(x = year, y = rate, fill = year),
stat = "identity") #2014 had a drastic drop
|
18fa640bfd24b0d9de393960a016431b9ab12dac
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/fmf/R/RcppExports.R
|
d87eb08404524491c5ced4f8de882043b50cbc3a
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
knn_index_dist_rcpp <- function(MATRIX, TEST_DATA, k, method, threads, eps = 1.0e-6) {
.Call(`_fmf_knn_index_dist_rcpp`, MATRIX, TEST_DATA, k, method, threads, eps)
}
DIST_MATRIX_knn <- function(DIST_MAT, TEST_IDX = NULL, is_min = TRUE, k = 5L, threads = 1L, rcpp_list_names = FALSE) {
.Call(`_fmf_DIST_MATRIX_knn`, DIST_MAT, TEST_IDX, is_min, k, threads, rcpp_list_names)
}
|
04874714d56ad3a245db7c623488d85ca5f7fc52
|
d4830fc3ad754c22929791f9b8153fdcdcf8cc11
|
/ELFGEN/internal/elf_assemble_batch.R
|
3dccb3e6fec35dd28d6d451050e7243acd6b1173
|
[] |
no_license
|
HARPgroup/r-dh-ecohydro
|
9afc44c5c332ca2db6fdfb5c3151f2c27364d1a9
|
f7c43a3da6fe68da8ec1e62f34e4f67147db63d2
|
refs/heads/master
| 2022-05-08T11:05:45.134772
| 2022-04-20T13:51:28
| 2022-04-20T13:51:28
| 104,229,291
| 1
| 1
| null | 2022-04-20T13:51:29
| 2017-09-20T14:53:28
|
HTML
|
UTF-8
|
R
| false
| false
| 34,233
|
r
|
elf_assemble_batch.R
|
library(quantreg);
library(ggplot2);
library(ggrepel);
library(ggpmisc);
library(grid);
library(httr);
library(data.table);
library(scales);
library(rgeos); #used for geospatial processing
library(sp); #contains SpatialPolygonsDataFrame()
library(ggsn); #used for adding scale bar and north arrow to map
library(tidyverse)
library(sf)
library(maps)
elf_run_method <- function( method, inputs, data, x_metric_code, y_metric_code, ws_ftype_code,
Feature.Name_code, Hydroid_code, search_code, token, startdate, enddate, geom
) {
if(method == "quantreg") {
print(paste("PLOTTING - method quantreg breakpoint ...",sep=""))
plt <- elf_quantreg (
inputs, data, x_metric_code, y_metric_code, ws_ftype_code, Feature.Name_code,
Hydroid_code, search_code, token, startdate, enddate, geom
)
return;
}
if(method == "ymax") {
print(paste("PLOTTING - method ymax quantreg breakpoint at y-max...",sep=""))
plt <- elf_ymax (
inputs, data, x_metric_code, y_metric_code, ws_ftype_code, Feature.Name_code,
Hydroid_code, search_code, token, startdate, enddate, geom
)
return;
}
if(method == "pwit") {
print(paste("PLOTTING - method quantreg breakpoint using piecewise function...",sep=""))
plt <- elf_pw_it (
inputs, data, x_metric_code, y_metric_code, ws_ftype_code,
Feature.Name_code, Hydroid_code, search_code, token, startdate, enddate, geom
)
return;
}
if(method == "twopoint") {
print(paste("PLOTTING - method two-point function...",sep=""))
plt <- elf_twopoint (
inputs, data, x_metric_code, y_metric_code, ws_ftype_code, Feature.Name_code,
Hydroid_code, search_code, token, startdate, enddate
)
return;
}
if(method == "pwit_RS") {
print(paste("PLOTTING - method quantreg breakpoint using piecewise function (Including regression to the right of breakpoint)...",sep=""))
plt <- elf_pw_it_RS (
inputs, data, x_metric_code, y_metric_code, ws_ftype_code,
Feature.Name_code, Hydroid_code, search_code, token, startdate, enddate, geom
)
return;
}
if(method == "pw_it_RS_IFIM") {
print(paste("PLOTTING - method quantreg breakpoint using piecewise function (Including regression to the right of breakpoint)...",sep=""))
plt <- elf_pw_it_RS_IFIM (inputs, data, x_metric_code, y_metric_code, ws_ftype_code, Feature.Name_code, Hydroid_code, search_code, token, startdate, enddate)
return(plt)
}
}
elf_cleandata <- function (data, inputs, startdate = FALSE, enddate = FALSE) {
#makes sure all metric values are numeric and not factorial (fixes error with ni, total)
data$y_value <- as.numeric(data$y_value)
#Subset by date range
data$tstime <- as.Date(data$tstime,origin="1970-01-01")
if (typeof(startdate) != 'logical') {
data <- subset(data, tstime > startdate)
}
if (typeof(enddate) != 'logical') {
data <- subset(data, tstime < enddate)
}
#ADD COLUMN OF RATIO OF DRAINAGE AREA TO MEAN FLOW
data["ratio"] <- (data$drainage_area)/(data$qmean_annual)
#REMOVE ALL STATIONS WHERE THE RATIO OF DA:Q IS GREATER THAN 1000
data<-data[!(data$ratio > 1000),]
#USE ONLY MAX NT VALUE FOR EACH STATION
if(inputs$station_agg == "max"){
aa <- data[order(data$hydrocode, data$y_value, decreasing=TRUE),]
aa <- aa[!duplicated(aa$hydrocode),]
aa <- aa[order(aa$hydrocode, aa$y_value),]
data <- aa
}
#subsets data to exclude anything with a flowmetric value greater than the "xaxis_thresh" specified in the user inputs file
data <- subset(data, x_value >= .001 & x_value < inputs$xaxis_thresh);
#Export data as spreadsheet
##write.table(data, paste(save_directory,"data.tsv",sep=""), sep="\t")
print(paste("Found ", nrow(data), sep=''));
#If statement needed in case geographic region does not contain more than 3 points
if(nrow(data) <= 3) {
print("... Skipping (fewer than 3 datapoints)")
return(FALSE)
}
#Skip if there is only 1 or 2 unique flow metric values for this watershed (either only a single EDAS station, or multiple with the same flow metric, which would result in a vertical bar of points in the plot)
station_x_value <- data$x_value
remove_da_duplicates <- unique(station_x_value, incomparables = FALSE)
if(length(remove_da_duplicates) == 1 | length(remove_da_duplicates) == 2) {
print("... Skipping (the points are all organized in 1 or 2 vertical lines in )");
return(FALSE)
} #closes bar of points skip if-statement (rare)
#Skip if there is only 1 or 2 unique biometric values for this watershed
station_y_value <- data$y_value
remove_metric_duplicates <- unique(station_y_value, incomparables = FALSE)
if(length(remove_metric_duplicates) == 1 | length(remove_metric_duplicates) == 2) {
print("... Skipping (the points are all organized in 1 or 2 horizontal lines in )");
return(FALSE)
} #closes bar of points skip if-statement (rare)
return(data)
}
elf_upper <- function(data, quantile) {
upper <- rq(y_value ~ log(x_value),data = data, tau = quantile) #calculate the quantile regression
newy <- c(log(data$x_value)*coef(upper)[2]+coef(upper)[1]) #find the upper quantile values of y for each value of DA based on the quantile regression
upper.quant <- subset(data, data$y_value > newy) #create a subset of the data that only includes the stations with NT values higher than the y values just calculated
return(upper.quant)
}
elf_assemble_batch <- function(inputs = list()){
batchlist = FALSE;
#Load inputs
x_metric <- inputs$x_metric
y_metric <- inputs$y_metric
ws_ftype <- inputs$ws_ftype
target_hydrocode <- inputs$target_hydrocode
offset_x_metric <- inputs$offset_x_metric
offset_y_metric <- inputs$offset_y_metric
offset_ws_ftype <- inputs$offset_ws_ftype
offset_hydrocode <- inputs$offset_hydrocode
site <- inputs$site
xaxis_thresh <- inputs$xaxis_thresh
sampres <- inputs$sampres
analysis_timespan <- inputs$analysis_timespan
station_agg <- inputs$station_agg
quantreg <- inputs$quantreg
ymax <- inputs$ymax
pw_it <- inputs$pw_it
pw_it_RS <- inputs$pw_it_RS
pw_it_RS_IFIM <- inputs$pw_it_RS_IFIM
twopoint <- inputs$twopoint
token <- inputs$token
for (l in offset_ws_ftype:length(ws_ftype)) {
print(paste("ws_ftype ",l,". of ",length(ws_ftype),". ",ws_ftype[l],sep=""))
#Automatic bundle specification (WILL BE ELIMINATED ONCE WE UPDATE VAHYDRO STORAGE SCHEME)
if(ws_ftype[l] == "hwi_region"){
bundle <- "ecoregion"
} else if(ws_ftype[l] == "state") {
bundle <- "landunit"
} else if(ws_ftype[l] == "ecoregion_iii") {
bundle <- "ecoregion"
} else if(ws_ftype[l] == "ecoregion_iv") {
bundle <- "ecoregion"
} else if(ws_ftype[l] == "ecoiii_huc6") {
bundle <- "ecoregion"
} else {
bundle <- "watershed"
}
#Pull in full list of Virginia watersheds for the specified ftype
#If we define a hydrocode > 'XXXXXX' it will retrieve that single one
HUClist_url_base <- paste(site,"/?q=elfgen_regions_export/",bundle, sep = "");
if (!(target_hydrocode == '')) {
HUClist_url_full <- paste(HUClist_url_base, ws_ftype[l], target_hydrocode, sep = "/");
} else {
HUClist_url_full <- paste(HUClist_url_base, ws_ftype[l], sep = "/");
}
print(paste("Searching ", HUClist_url_full, " for target_hydrocode ", target_hydrocode, sep=''))
#print(HUClist_url_full)
HUClist <- read.table(HUClist_url_full,header = TRUE, sep = ",")
Watershed_Hydrocode <- HUClist$Hydrocode
Feature.Name <- HUClist$Feature.Name
Hydroid <- HUClist$HydroID
for (k in offset_y_metric:length(y_metric)) {
print(paste("y_metric ", k, ". of ",length(y_metric),". Beginning loop for ", y_metric[k], sep=''));
for (j in offset_x_metric:length(x_metric)) {
print(paste("x_metric ", j, ". of 14. Beginning loop for ", x_metric[j], sep=''));
for (i in offset_hydrocode:length(Watershed_Hydrocode)) {
print(paste("Feature ", i, ". of ",length(Watershed_Hydrocode),". Searching for stations from ", Watershed_Hydrocode[i], sep=''));
search_code <- Watershed_Hydrocode[i];
Feature.Name_code <- as.character(Feature.Name[i]);
Hydroid_code <- Hydroid[i];
ws_ftype_code <- ws_ftype[l]
x_metric_code <- x_metric[j];
y_metric_code <- y_metric[k];
if (typeof(data) == 'logical') {
next
}
# now, add this to a master list to return
if (batchlist == FALSE) {
batchlist = data.frame(
target_hydrocode = search_code,
hydroid = Hydroid_code,
name = Feature.Name_code,
method = inputs$method,
ws_ftype = ws_ftype_code,
bundle = bundle,
dataset_tag = inputs$dataset_tag,
x_metric = x_metric_code,
y_metric = y_metric_code,
sampres = sampres
)
} else {
batchlist <- rbind(
batchlist, data.frame(
target_hydrocode = search_code,
hydroid = Hydroid_code,
name = Feature.Name_code,
method = inputs$method,
ws_ftype = ws_ftype_code,
bundle = bundle,
dataset_tag = inputs$dataset_tag,
x_metric = x_metric_code,
y_metric = y_metric_code,
sampres = sampres
)
)
}
} #closes watershed for loop
} #closes x_metric for loop
} #closes y_metric for loop
} #closes ws_ftype for loop
return(batchlist)
} #close function
elf_plot_distribution <- function(
data,
x_metric_code,
y_metric_code,
ws_ftype_code,
Feature.Name_code,
Hydroid_code,
search_code) {
hist(data$y_value / log(data$x_value))
}
base.plot <- function(geom, data, full_dataset, upper.quant,
yaxis_thresh, quantile,
plot_title, xaxis_title, yaxis_title,
EDAS_upper_legend,EDAS_lower_legend,Reg_upper_legend,Quantile_Legend
) {
# SPECIFY BOUNDING BOX: *should really calculate bb from the VADF shape, but for now hard code
bb=readWKT("POLYGON((-85 35, -74 35, -74 41, -85 41, -85 35))")
bbProjected <- SpatialPolygonsDataFrame(bb,data.frame("id"), match.ID = FALSE)
bbProjected@data$id <- rownames(bbProjected@data)
bbPoints <- fortify(bbProjected, region = "id")
bbDF <- merge(bbPoints, bbProjected@data, by = "id")
#--------------------------------------------------------------------------------------------
# Geoprocess edas stations
STATIONS_data <- full_dataset
split_1 <- read.table(text = as.character(STATIONS_data$geom), sep = "(", colClasses = "character")
split_2 <- read.table(text = split_1$V2, sep = ")", colClasses = "character")
split_3 <- read.table(text = split_2$V1, sep = " ", colClasses = "character")
STATIONSDF <- data.frame(x=as.numeric(split_3$V1),y=as.numeric(split_3$V2),X.id.="id",id="1")
BLUSTATIONS_data <- data
BLUsplit_1 <- read.table(text = as.character(BLUSTATIONS_data$geom), sep = "(", colClasses = "character")
BLUsplit_2 <- read.table(text = BLUsplit_1$V2, sep = ")", colClasses = "character")
BLUsplit_3 <- read.table(text = BLUsplit_2$V1, sep = " ", colClasses = "character")
BLUSTATIONSDF <- data.frame(x=as.numeric(BLUsplit_3$V1),y=as.numeric(BLUsplit_3$V2),X.id.="id",id="1")
GRNSTATIONS_data <- upper.quant
GRNsplit_1 <- read.table(text = as.character(GRNSTATIONS_data$geom), sep = "(", colClasses = "character")
GRNsplit_2 <- read.table(text = GRNsplit_1$V2, sep = ")", colClasses = "character")
GRNsplit_3 <- read.table(text = GRNsplit_2$V1, sep = " ", colClasses = "character")
GRNSTATIONSDF <- data.frame(x=as.numeric(GRNsplit_3$V1),y=as.numeric(GRNsplit_3$V2),X.id.="id",id="1")
#--------------------------------------------------------------------------------------------
# CLIP WATERSHED GEOMETRY TO BOUNDING BOX
watershed_geom <- readWKT(geom)
watershed_geom_clip <- gIntersection(bb, watershed_geom)
if (is.null(watershed_geom_clip)) {
watershed_geom_clip = watershed_geom
}
wsdataProjected <- SpatialPolygonsDataFrame(watershed_geom_clip,data.frame("id"), match.ID = FALSE)
#class(dataProjected)
wsdataProjected@data$id <- rownames(wsdataProjected@data)
watershedPoints <- fortify(wsdataProjected, region = "id")
watershedDF <- merge(watershedPoints, wsdataProjected@data, by = "id")
#LOAD STATE GEOMETRY
STATES <- read.table(file=paste(fxn_locations,"STATES.tsv",sep=""), header=TRUE, sep="\t")
VA <- STATES[which(STATES$state == "VA"),]
VA_geom <- readWKT(VA$geom)
VA_geom_clip <- gIntersection(bb, VA_geom)
VAProjected <- SpatialPolygonsDataFrame(VA_geom_clip,data.frame("id"), match.ID = TRUE)
VAProjected@data$id <- rownames(VAProjected@data)
VAPoints <- fortify( VAProjected, region = "id")
VADF <- merge(VAPoints, VAProjected@data, by = "id")
TN <- STATES[which(STATES$state == "TN"),]
TN_geom <- readWKT(TN$geom)
TN_geom_clip <- gIntersection(bb, TN_geom)
TNProjected <- SpatialPolygonsDataFrame(TN_geom_clip,data.frame("id"), match.ID = TRUE)
TNProjected@data$id <- rownames(TNProjected@data)
TNPoints <- fortify( TNProjected, region = "id")
TNDF <- merge(TNPoints, TNProjected@data, by = "id")
NC <- STATES[which(STATES$state == "NC"),]
NC_geom <- readWKT(NC$geom)
NC_geom_clip <- gIntersection(bb, NC_geom)
NCProjected <- SpatialPolygonsDataFrame(NC_geom_clip,data.frame("id"), match.ID = TRUE)
NCProjected@data$id <- rownames(NCProjected@data)
NCPoints <- fortify( NCProjected, region = "id")
NCDF <- merge(NCPoints, NCProjected@data, by = "id")
KY <- STATES[which(STATES$state == "KY"),]
KY_geom <- readWKT(KY$geom)
KY_geom_clip <- gIntersection(bb, KY_geom)
KYProjected <- SpatialPolygonsDataFrame(KY_geom_clip,data.frame("id"), match.ID = TRUE)
KYProjected@data$id <- rownames(KYProjected@data)
KYPoints <- fortify( KYProjected, region = "id")
KYDF <- merge(KYPoints, KYProjected@data, by = "id")
WV <- STATES[which(STATES$state == "WV"),]
WV_geom <- readWKT(WV$geom)
WV_geom_clip <- gIntersection(bb, WV_geom)
WVProjected <- SpatialPolygonsDataFrame(WV_geom_clip,data.frame("id"), match.ID = TRUE)
WVProjected@data$id <- rownames(WVProjected@data)
WVPoints <- fortify( WVProjected, region = "id")
WVDF <- merge(WVPoints, WVProjected@data, by = "id")
MD <- STATES[which(STATES$state == "MD"),]
MD_geom <- readWKT(MD$geom)
MD_geom_clip <- gIntersection(bb, MD_geom)
MDProjected <- SpatialPolygonsDataFrame(MD_geom_clip,data.frame("id"), match.ID = TRUE)
MDProjected@data$id <- rownames(MDProjected@data)
MDPoints <- fortify( MDProjected, region = "id")
MDDF <- merge(MDPoints, MDProjected@data, by = "id")
DE <- STATES[which(STATES$state == "DE"),]
DE_geom <- readWKT(DE$geom)
DE_geom_clip <- gIntersection(bb, DE_geom)
DEProjected <- SpatialPolygonsDataFrame(DE_geom_clip,data.frame("id"), match.ID = TRUE)
DEProjected@data$id <- rownames(DEProjected@data)
DEPoints <- fortify( DEProjected, region = "id")
DEDF <- merge(DEPoints, DEProjected@data, by = "id")
PA <- STATES[which(STATES$state == "PA"),]
PA_geom <- readWKT(PA$geom)
PA_geom_clip <- gIntersection(bb, PA_geom)
PAProjected <- SpatialPolygonsDataFrame(PA_geom_clip,data.frame("id"), match.ID = TRUE)
PAProjected@data$id <- rownames(PAProjected@data)
PAPoints <- fortify( PAProjected, region = "id")
PADF <- merge(PAPoints, PAProjected@data, by = "id")
NJ <- STATES[which(STATES$state == "NJ"),]
NJ_geom <- readWKT(NJ$geom)
NJ_geom_clip <- gIntersection(bb, NJ_geom)
NJProjected <- SpatialPolygonsDataFrame(NJ_geom_clip,data.frame("id"), match.ID = TRUE)
NJProjected@data$id <- rownames(NJProjected@data)
NJPoints <- fortify( NJProjected, region = "id")
NJDF <- merge(NJPoints, NJProjected@data, by = "id")
OH <- STATES[which(STATES$state == "OH"),]
OH_geom <- readWKT(OH$geom)
OH_geom_clip <- gIntersection(bb, OH_geom)
OHProjected <- SpatialPolygonsDataFrame(OH_geom_clip,data.frame("id"), match.ID = TRUE)
OHProjected@data$id <- rownames(OHProjected@data)
OHPoints <- fortify( OHProjected, region = "id")
OHDF <- merge(OHPoints, OHProjected@data, by = "id")
map <- ggplotGrob(ggplot(data = VADF, aes(x=long, y=lat, group = group))+
geom_polygon(data = VADF, color="gray46", fill = "gray")+
geom_polygon(data = TNDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = NCDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = KYDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = WVDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = MDDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = DEDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = PADF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = NJDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = OHDF, color="gray46", fill = NA, lwd=0.5)+
geom_polygon(data = watershedDF, color="khaki4", fill = "yellow",alpha = 0.25,lwd=0.5)+
geom_point(aes(x = x, y = y, group = id), data = STATIONSDF, color="gray66", size = 0.025)+
geom_point(aes(x = x, y = y, group = id), data = BLUSTATIONSDF, color="blue", size = 0.025)+
geom_point(aes(x = x, y = y, group = id), data = GRNSTATIONSDF, color="forestgreen", size = 0.025)+
geom_polygon(data = bbDF, color="black", fill = NA,lwd=0.5)+
#ADD NORTH ARROW AND SCALE BAR
north(bbDF, location = 'topleft', symbol = 12, scale=0.2)+
#scalebar(bbDF, dist = 100, dd2km = TRUE, model = 'WGS84',st.bottom=FALSE,st.size=1.5,st.dist=0.04)+ #text too small to read
scalebar(bbDF, dist = 100, dd2km = TRUE, model = 'WGS84',st.bottom=TRUE,st.size=1.5,st.dist=0.04)+
scale_x_continuous(limits = c(-85, -74))+
scale_y_continuous(limits = c(35, 41))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_blank())
)
result <- ggplot(data, aes(x=x_value,y=y_value)) + ylim(0,yaxis_thresh) +
geom_point(data = full_dataset,aes(colour="aliceblue")) +
geom_point(data = data,aes(colour="blue")) +
stat_smooth(method = "lm",fullrange=FALSE,level = .95, data = upper.quant, aes(x=x_value,y=y_value,color = "red")) +
geom_point(data = upper.quant, aes(x=x_value,y=y_value,color = "black")) +
geom_quantile(data = data, quantiles= quantile,show.legend = TRUE,aes(color="red")) +
geom_smooth(data = data, method="lm",formula=y ~ x,show.legend = TRUE, aes(colour="yellow"),se=FALSE) +
geom_smooth(data = upper.quant, formula = y ~ x, method = "lm", show.legend = TRUE, aes(x=x_value,y=y_value,color = "green"),se=FALSE) +
#add map to upper right of plot
annotation_custom(
grob = map,
xmin = 4.54,
xmax = 7.72,
ymin = yaxis_thresh-(0.1*yaxis_thresh),
ymax = yaxis_thresh+(0.3*yaxis_thresh)
)+
ggtitle(plot_title) +
theme(
plot.title = element_text(size = 12, face = "bold"),
axis.text = element_text(colour = "blue"),
panel.grid.minor.x = element_blank()
) +
labs(x=xaxis_title,y=yaxis_title) +
scale_x_log10(
limits = c(0.001,15000),
breaks = c(0.001,0.01,0.1,1.0,10,100,1000,10000),
labels =c("0.001","0.01","0.1","1.0","10","100","1,000","10,000")
) +
annotation_logticks(sides = "b")+
theme(legend.key=element_rect(fill='white')) +
#Add legend
scale_color_manual(
"Legend",
values=c("gray66","forestgreen","blue","orange","black","red"),
labels=c("Full Dataset",EDAS_upper_legend,EDAS_lower_legend,Reg_upper_legend,Quantile_Legend,"Regression (Data Subset)")
) +
guides(
colour = guide_legend(
override.aes = list(
size=c(1,1,1,1,1,1),
linetype=c(0,0,0,1,1,1),
shape=c(16,16,16,NA,NA,NA)
),
label.position = "right"
)
);
return(result)
}
base.map <- function(geom, data, full_dataset, upper.quant,
yaxis_thresh, quantile,
plot_title, xaxis_title, yaxis_title,
EDAS_upper_legend,EDAS_lower_legend,Reg_upper_legend,Quantile_Legend
) {
# SPECIFY BOUNDING BOX:
#extent <- data.frame(x = c(-84, -75),
# y = c(35, 41))
extent <- data.frame(x = c(-85.5, -74.5),
y = c(35, 41))
# extent <- data.frame(x = c(-90, -60),
# y = c(20, 60))
bb=readWKT(paste0("POLYGON((",extent$x[1]," ",extent$y[1],",",extent$x[2]," ",extent$y[1],",",extent$x[2]," ",extent$y[2],",",extent$x[1]," ",extent$y[2],",",extent$x[1]," ",extent$y[1],"))",sep=""))
bbProjected <- SpatialPolygonsDataFrame(bb,data.frame("id"), match.ID = FALSE)
bbProjected@data$id <- rownames(bbProjected@data)
bbPoints <- fortify(bbProjected, region = "id")
bbDF <- merge(bbPoints, bbProjected@data, by = "id")
#--------------------------------------------------------------------------------------------
# Geoprocess edas stations
STATIONS_data <- full_dataset
split_1 <- read.table(text = as.character(STATIONS_data$geom), sep = "(", colClasses = "character")
split_2 <- read.table(text = split_1$V2, sep = ")", colClasses = "character")
split_3 <- read.table(text = split_2$V1, sep = " ", colClasses = "character")
STATIONSDF <- data.frame(x=as.numeric(split_3$V1),y=as.numeric(split_3$V2),X.id.="id",id="1")
BLUSTATIONS_data <- data
BLUsplit_1 <- read.table(text = as.character(BLUSTATIONS_data$geom), sep = "(", colClasses = "character")
BLUsplit_2 <- read.table(text = BLUsplit_1$V2, sep = ")", colClasses = "character")
BLUsplit_3 <- read.table(text = BLUsplit_2$V1, sep = " ", colClasses = "character")
BLUSTATIONSDF <- data.frame(x=as.numeric(BLUsplit_3$V1),y=as.numeric(BLUsplit_3$V2),X.id.="id",id="1")
GRNSTATIONS_data <- upper.quant
GRNsplit_1 <- read.table(text = as.character(GRNSTATIONS_data$geom), sep = "(", colClasses = "character")
GRNsplit_2 <- read.table(text = GRNsplit_1$V2, sep = ")", colClasses = "character")
GRNsplit_3 <- read.table(text = GRNsplit_2$V1, sep = " ", colClasses = "character")
GRNSTATIONSDF <- data.frame(x=as.numeric(GRNsplit_3$V1),y=as.numeric(GRNsplit_3$V2),X.id.="id",id="1")
#--------------------------------------------------------------------------------------------
# CLIP WATERSHED GEOMETRY TO BOUNDING BOX
watershed_geom <- readWKT(geom)
watershed_geom_clip <- gIntersection(bb, watershed_geom)
if (is.null(watershed_geom_clip)) {
watershed_geom_clip = watershed_geom
}
wsdataProjected <- SpatialPolygonsDataFrame(watershed_geom_clip,data.frame("id"), match.ID = FALSE)
#class(dataProjected)
wsdataProjected@data$id <- rownames(wsdataProjected@data)
watershedPoints <- fortify(wsdataProjected, region = "id")
watershedDF <- merge(watershedPoints, wsdataProjected@data, by = "id")
#LOAD STATE AND River GEOMETRY
STATES <- read.table(file=paste(hydro_tools,"GIS_LAYERS","STATES.tsv",sep="\\"), header=TRUE, sep="\t") #Load state geometries
RIVDF <- read.table(file=paste(hydro_tools,"GIS_LAYERS","RIVDF.csv",sep="/"), header=TRUE, sep=",") #Load river geometries
WBDF <- read.table(file=paste(hydro_tools,"GIS_LAYERS","WBDF.csv",sep="/"), header=TRUE, sep=",") #Load waterbody geometries
VA <- STATES[which(STATES$state == "VA"),]
VA_geom <- readWKT(VA$geom)
VA_geom_clip <- gIntersection(bb, VA_geom)
VAProjected <- SpatialPolygonsDataFrame(VA_geom_clip,data.frame("id"), match.ID = TRUE)
VAProjected@data$id <- rownames(VAProjected@data)
VAPoints <- fortify( VAProjected, region = "id")
VADF <- merge(VAPoints, VAProjected@data, by = "id")
TN <- STATES[which(STATES$state == "TN"),]
TN_geom <- readWKT(TN$geom)
TN_geom_clip <- gIntersection(bb, TN_geom)
TNProjected <- SpatialPolygonsDataFrame(TN_geom_clip,data.frame("id"), match.ID = TRUE)
TNProjected@data$id <- rownames(TNProjected@data)
TNPoints <- fortify( TNProjected, region = "id")
TNDF <- merge(TNPoints, TNProjected@data, by = "id")
NC <- STATES[which(STATES$state == "NC"),]
NC_geom <- readWKT(NC$geom)
NC_geom_clip <- gIntersection(bb, NC_geom)
NCProjected <- SpatialPolygonsDataFrame(NC_geom_clip,data.frame("id"), match.ID = TRUE)
NCProjected@data$id <- rownames(NCProjected@data)
NCPoints <- fortify( NCProjected, region = "id")
NCDF <- merge(NCPoints, NCProjected@data, by = "id")
KY <- STATES[which(STATES$state == "KY"),]
KY_geom <- readWKT(KY$geom)
KY_geom_clip <- gIntersection(bb, KY_geom)
KYProjected <- SpatialPolygonsDataFrame(KY_geom_clip,data.frame("id"), match.ID = TRUE)
KYProjected@data$id <- rownames(KYProjected@data)
KYPoints <- fortify( KYProjected, region = "id")
KYDF <- merge(KYPoints, KYProjected@data, by = "id")
WV <- STATES[which(STATES$state == "WV"),]
WV_geom <- readWKT(WV$geom)
WV_geom_clip <- gIntersection(bb, WV_geom)
WVProjected <- SpatialPolygonsDataFrame(WV_geom_clip,data.frame("id"), match.ID = TRUE)
WVProjected@data$id <- rownames(WVProjected@data)
WVPoints <- fortify( WVProjected, region = "id")
WVDF <- merge(WVPoints, WVProjected@data, by = "id")
MD <- STATES[which(STATES$state == "MD"),]
MD_geom <- readWKT(MD$geom)
MD_geom_clip <- gIntersection(bb, MD_geom)
MDProjected <- SpatialPolygonsDataFrame(MD_geom_clip,data.frame("id"), match.ID = TRUE)
MDProjected@data$id <- rownames(MDProjected@data)
MDPoints <- fortify( MDProjected, region = "id")
MDDF <- merge(MDPoints, MDProjected@data, by = "id")
DE <- STATES[which(STATES$state == "DE"),]
DE_geom <- readWKT(DE$geom)
DE_geom_clip <- gIntersection(bb, DE_geom)
DEProjected <- SpatialPolygonsDataFrame(DE_geom_clip,data.frame("id"), match.ID = TRUE)
DEProjected@data$id <- rownames(DEProjected@data)
DEPoints <- fortify( DEProjected, region = "id")
DEDF <- merge(DEPoints, DEProjected@data, by = "id")
PA <- STATES[which(STATES$state == "PA"),]
PA_geom <- readWKT(PA$geom)
PA_geom_clip <- gIntersection(bb, PA_geom)
PAProjected <- SpatialPolygonsDataFrame(PA_geom_clip,data.frame("id"), match.ID = TRUE)
PAProjected@data$id <- rownames(PAProjected@data)
PAPoints <- fortify( PAProjected, region = "id")
PADF <- merge(PAPoints, PAProjected@data, by = "id")
NJ <- STATES[which(STATES$state == "NJ"),]
NJ_geom <- readWKT(NJ$geom)
NJ_geom_clip <- gIntersection(bb, NJ_geom)
NJProjected <- SpatialPolygonsDataFrame(NJ_geom_clip,data.frame("id"), match.ID = TRUE)
NJProjected@data$id <- rownames(NJProjected@data)
NJPoints <- fortify( NJProjected, region = "id")
NJDF <- merge(NJPoints, NJProjected@data, by = "id")
OH <- STATES[which(STATES$state == "OH"),]
OH_geom <- readWKT(OH$geom)
OH_geom_clip <- gIntersection(bb, OH_geom)
OHProjected <- SpatialPolygonsDataFrame(OH_geom_clip,data.frame("id"), match.ID = TRUE)
OHProjected@data$id <- rownames(OHProjected@data)
OHPoints <- fortify( OHProjected, region = "id")
OHDF <- merge(OHPoints, OHProjected@data, by = "id")
SC <- STATES[which(STATES$state == "SC"),]
SC_geom <- readWKT(SC$geom)
SC_geom_clip <- gIntersection(bb, SC_geom)
SCProjected <- SpatialPolygonsDataFrame(SC_geom_clip,data.frame("id"), match.ID = TRUE)
SCProjected@data$id <- rownames(SCProjected@data)
SCPoints <- fortify( SCProjected, region = "id")
SCDF <- merge(SCPoints, SCProjected@data, by = "id")
DC <- STATES[which(STATES$state == "DC"),]
DC_geom <- readWKT(DC$geom)
DC_geom_clip <- gIntersection(bb, DC_geom)
DCProjected <- SpatialPolygonsDataFrame(DC_geom_clip,data.frame("id"), match.ID = TRUE)
DCProjected@data$id <- rownames(DCProjected@data)
DCPoints <- fortify( DCProjected, region = "id")
DCDF <- merge(DCPoints, DCProjected@data, by = "id")
IN <- STATES[which(STATES$state == "IN"),]
IN_geom <- readWKT(IN$geom)
IN_geom_clip <- gIntersection(bb, IN_geom)
INProjected <- SpatialPolygonsDataFrame(IN_geom_clip,data.frame("id"), match.ID = TRUE)
INProjected@data$id <- rownames(INProjected@data)
INPoints <- fortify( INProjected, region = "id")
INDF <- merge(INPoints, INProjected@data, by = "id")
#########################################################
#########################################################
map <- ggplotGrob(ggplot(data = VADF, aes(x=long, y=lat, group = group))+
geom_polygon(data = bbDF, color="black", fill = "powderblue",lwd=0.5)+
geom_polygon(data = VADF, color="gray46", fill = "gray")+
geom_polygon(data = TNDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = NCDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = SCDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = KYDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = WVDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = MDDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = DEDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = PADF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = NJDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = OHDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = DCDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = INDF, color="gray46", fill = "gray", lwd=0.5)+
geom_polygon(data = INDF, color="gray46", fill = "gray", lwd=0.5)+
#Plot watershed outline
geom_polygon(data = watershedDF, color="khaki4", fill = "yellow",alpha = 0.25,lwd=0.5)+
# ADD RIVERS ####################################################################
geom_point(data = RIVDF, aes(x = long, y = lat), color="steelblue1", size=0.09)+
#################################################################################
# ADD WATERBODIES ###############################################################
geom_point(data = WBDF, aes(x = long, y = lat), color="steelblue1", size=0.09)+
#################################################################################
#geom_point(aes(x = x, y = y, group = id), data = STATIONSDF, color="gray66", size = 0.025)+
#geom_point(aes(x = x, y = y, group = id), data = BLUSTATIONSDF, color="blue", size = 0.025)+
#geom_point(aes(x = x, y = y, group = id), data = GRNSTATIONSDF, color="forestgreen", size = 0.025)+
#larger points attempt
geom_point(aes(x = x, y = y, group = id), data = STATIONSDF, color="gray66", size = 0.3)+
geom_point(aes(x = x, y = y, group = id), data = BLUSTATIONSDF, color="blue", size = 0.3)+
geom_point(aes(x = x, y = y, group = id), data = GRNSTATIONSDF, color="forestgreen", size = 0.3)+
#bold outter border
geom_polygon(data = bbDF, color="black", fill = "NA",lwd=0.5)+
#ADD NORTH ARROW AND SCALE BAR
north(bbDF, location = 'topleft', symbol = 12, scale=0.1)+
scalebar(bbDF, location = 'bottomleft', dist = 100, dd2km = TRUE, model = 'WGS84',st.bottom=FALSE,st.size=3.5,
anchor = c(
x = (((extent$x[2] - extent$x[1])/2)+extent$x[1])-1.1,
y = extent$y[1]+(extent$y[1])*0.001
))+
scale_x_continuous(limits = c(extent$x[1], extent$x[2]))+
scale_y_continuous(limits = c(extent$y[1], extent$y[2]))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_blank())
)
result <- map
return(result)
}
|
7e6a2eb6ff5b775d27c753638262cd3dbdb3ee36
|
02127f6ff5483e535efb0e0dd487e0d5bbdbe102
|
/Practise Assignment.R
|
1f576d3792b2caeaf1423c641b513ecfc7d8013f
|
[] |
no_license
|
helenatan/R_Programming_Github
|
25081548ca4f096878cb3d50f86b15989f2ff1c8
|
6229b6753d2731e607f72d177c222823bdc880e6
|
refs/heads/master
| 2020-05-15T06:03:40.150335
| 2015-03-05T08:09:42
| 2015-03-05T08:09:42
| 30,334,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,229
|
r
|
Practise Assignment.R
|
## Task Description
## Date: Feb 5, 2015
## 1. Download the file "diet_data.zip" from http://s3.amazonaws.com/practice_assignment/diet_data.zip
## and unzip it into your R working directory
## 2. Get Steve's weight on the last day, and determine if he has lost any
## weight compared to the first day
## 3. Among all 5 people, who weights the most on the first day?
## 4. Develop a function that calculates the average weight for a given day
## QUESTION 1
## Double check working directory and download the files from the specified URL
getwd()
dataset_url <- "http://s3.amazonaws.com/practice_assignment/diet_data.zip"
download.file(dataset_url, "diet_data.zip")
unzip("diet_data.zip", exdir="diet_data")
list.files("diet_data")
## QUESTION 2
## Open the Steve.csv file and see what's in it
steve <-read.csv("diet_data/Steve.csv")
head(steve)
## Check the last day of Steve's file
## Ensure that the last day is Day 30
max(steve$Day)
min(steve$Day)
## Steve's weight on Day 30
steve_30 <-subset(steve$Weight, steve$Day==30)
steve_30
## Steve's weight on Day1
steve_1 <-subset(steve$Weight, steve$Day==1)
steve_1
## Check to see if Steve lost any weight
steve_30-steve_1
## -11, so yes, Steve lost weight!
## QUESTION 3
## Combine all 5 csv files into one
all_files <-list.files("diet_data", full.names=TRUE) ## append the files including their names
all_files[1]
tmp <-vector(mode="list",length=length(all_files))
summary(tmp)
for (i in seq_along(all_files)) {
tmp[[i]]<-read.csv(all_files[[i]])
}
## do.call(function_you_want_to_use, list_of_arguments)
## this approach avoicds all the messy copying and pasting
combined <-do.call(rbind,tmp)
summary(combined)
day1_file <-subset(combined, Day==1)
chub_day1 <-subset(day1_file, Weight==max(day1_file$Weight))
chub_day1
## Steve weights the heaviest on Day 1, 225 lb
## QUESTION 4
##
mean_weight <-function(directory,day) {
all_files2 <-list.files(directory, full.names=TRUE)
tmp<-vector(mode="list", length=length(all_files2))
for (i in seq_along(all_files2)){
tmp[[i]]<-read.csv(all_files[[i]])
}
combined<-do.call(rbind,tmp)
subset_day <- subset(combined,Day==day)
mean(subset_day[,"Weight"], na.rm=TRUE)
}
mean_weight("diet_data",20)
|
9252ff4a40d6967b933f9973a2838086c2900973
|
f609028e25e06a84cffe690261f5f828fea2e857
|
/tests/testthat/test-request-object.R
|
e7ab08422a1807eab5be28522b8a7247994a2adc
|
[] |
no_license
|
nteetor/prairie
|
89dd08bf751a5c896e9b17d6355b07763265d655
|
f275f64392b0c52c0b6b905c156c33ee9e5fc57b
|
refs/heads/master
| 2020-04-16T04:51:56.702687
| 2017-07-08T20:09:54
| 2017-07-08T20:09:54
| 40,051,783
| 39
| 3
| null | 2017-07-21T01:51:55
| 2015-08-01T15:56:12
|
R
|
UTF-8
|
R
| false
| false
| 1,076
|
r
|
test-request-object.R
|
context('request object / as.request')
template_request <- list2env(
list(
HTTP_ACCEPT = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
HTTP_ACCEPT_ENCODING = "gzip, deflate, sdch",
HTTP_ACCEPT_LANGUAGE = "en-US,en;q=0.8",
HTTP_CACHE_CONTROL = "max-age=0",
HTTP_CONTENT_TYPE = 'text/html; charset=utf-8',
HTTP_CONNECTION = "keep-alive",
HTTP_HOST = "localhost:3030",
REQUEST_METHOD = 'GET',
SCRIPT_NAME = '',
PATH_INFO = '/foo/bar',
QUERY_STRING = '',
SERVER_NAME = '127.0.0.1',
SERVER_PORT = '3030',
HTTP_HOST = '127.0.0.1:3030',
rook.version = 'nope',
rook.url_scheme = 'https',
rook.input = list(
read_lines = function() '<p>Hello, world!</p>'
),
rook.errors = 'Should I care?'
)
)
test_that('request initialize with defaults', {
req <- as.request(template_request)
expect_equal(req$uri, '/foo/bar')
expect_equal(req$body, '<p>Hello, world!</p>')
expect_equal(req$headers$`Accept-Language`, 'en-US,en;q=0.8')
expect_equal(req$query, '')
})
|
ba9cd0b03770065f1087c41dfc96383bd250848a
|
f5d675843979c36e841b4940453147717806d0d3
|
/plot2.R
|
8a48c64d04c84d93392e476a6e18b8b5fbd870f0
|
[] |
no_license
|
nryedida/ExData_Plotting1
|
870512a845a27db3b10f1d0fab2a1a603f47cd6c
|
5e038c91a6a45af2df928357176a75db95f45da0
|
refs/heads/master
| 2020-03-20T06:37:56.825480
| 2018-06-13T18:28:30
| 2018-06-13T18:28:30
| 137,254,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 455
|
r
|
plot2.R
|
hpc <- read.table("household_power_consumption.txt", sep=";", header = TRUE, na.strings = "?")
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
hpc_df <- hpc[(hpc$Date=="2007-02-01") | (hpc$Date=="2007-02-02"),]
hpc_df$x <- strptime(paste(hpc_df$Date, hpc_df$Time), "%Y-%m-%d %H:%M:%S")
plot(hpc_df$x, hpc_df$Global_active_power, type="l", xlab="", ylab = "Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
e7a3eab83b472a0f7bedba67b639c2612ae69ffa
|
9aaacc9395f7950012c60deb494eae3b35eed805
|
/field_abundances_and_field_fitness.R
|
93641a74b36173791bc85bf9d531acdef7161966
|
[] |
no_license
|
jensculrich/coexistence_manuscript_2019
|
4a78fe95fbdb29ae18246852da688465ffb087b8
|
28569319c540ffba42edb16ceb9e8b3a9b4cbde4
|
refs/heads/master
| 2022-12-02T21:52:56.322247
| 2020-08-12T22:23:02
| 2020-08-12T22:23:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,813
|
r
|
field_abundances_and_field_fitness.R
|
# this file contains code to analyze relationships
# between fecundity and hetero-/con-specific abundance
# and between abundance and hetero/conspecific abundance
library(tidyverse)
library(lme4)
#################################
## Data Structuring #############
#################################
fitness_and_abundances <- read.csv("fitness_and_abundances.csv")
fitness_and_abundances <- fitness_and_abundances %>%
mutate(Transect = factor(Transect)) %>%
mutate(Plot = factor(Plot))
fitness_and_abundances_PLCO <- read.csv("fitness_and_abundances_PLCO.csv")
fitness_and_abundances_PLCO <- fitness_and_abundances_PLCO %>%
mutate(Transect = factor(Transect)) %>%
mutate(Plot = factor(Plot))
fitness_and_abundances_VALO <- read.csv("fitness_and_abundances_VALO.csv")
fitness_and_abundances_VALO <- fitness_and_abundances_VALO %>%
mutate(Transect = factor(Transect)) %>%
mutate(Plot = factor(Plot))
### remove the big outlier from the data set (many more seeds preditced vs any other plot)
which.max(fitness_and_abundances_PLCO$num_seeds2)
fitness_and_abundances_PLCO2 <- fitness_and_abundances_PLCO[-31, ]
### remove the big outlier from the data set (more than twice as many Plectritis density than second highest value)
which.max(fitness_and_abundances_VALO$X.Plectritis..including.pollinator.focal.plants.)
fitness_and_abundances_VALO2 <- fitness_and_abundances_VALO[-9, ]
### remove thetis lake data, where no Valerianella found
fitness_and_abundances_VALO2_filtered <- fitness_and_abundances_VALO2 %>%
filter(Site != "Thetis")
fitness_and_abundances_PLCO2_filtered <- fitness_and_abundances_PLCO2 %>%
filter(Site != "Thetis")
############################################################
# Relationships between fecundity and competitor densities #
############################################################
# Presence of a relationship was determined by
# evaluating chi-squared statistic from a likelihood ratio test
# between model with and without competitor as a fixed effect
# Densities are reported from 0.1m^2 plots nested in 1m^2 plots, nested in a transects.
# 1m^2 plot, nested in transect, is included as a random effect
# to account for spatial correlation in the data.
# plectritis fitness against plectritis density with mixed effects glm.nb
str(fitness_and_abundances_PLCO2_filtered)
mixed_m1 <- glmer.nb(num_seeds2 ~ X.Plectritis..including.pollinator.focal.plants.
+ (1|Transect/Plot),
data = fitness_and_abundances_PLCO2_filtered, na.action=na.omit)
summary(mixed_m1)
## estimate intercept without independent variables
mixed_m2 <- glmer.nb(num_seeds2 ~ (1|Transect/Plot),
data = fitness_and_abundances_PLCO2_filtered, na.action=na.omit)
summary(mixed_m2)
anova(mixed_m1, mixed_m2, test = "LRT")
# valerianella fitness against plectritis density with mixed effects glm.nb
mixed_m3 <- glmer.nb(num_seeds2 ~ X.Plectritis..including.pollinator.focal.plants.
+ (1|Transect/Plot),
data = fitness_and_abundances_VALO2_filtered, na.action=na.omit)
summary(mixed_m3)
mixed_m4 <- glmer.nb(num_seeds2 ~
+ (1|Transect/Plot),
data = fitness_and_abundances_VALO2_filtered, na.action=na.omit)
summary(mixed_m4)
anova(mixed_m3, mixed_m4, test = "LRT")
#################
## plectritis fitness against valerianella density with mixed effects glm.nb
mixed_m5 <- glmer.nb(num_seeds2 ~ X.Valerianella
+ (1|Transect/Plot),
data = fitness_and_abundances_PLCO2_filtered, na.action=na.omit)
summary(mixed_m5)
# estimate intercept without independent variables
mixed_m6 <- glmer.nb(num_seeds2 ~ (1|Transect/Plot),
data = fitness_and_abundances_PLCO2_filtered, na.action=na.omit)
summary(mixed_m6)
anova(mixed_m5, mixed_m6, test = "LRT")
# valerianella fitness against valerianella density with mixed effects glm.nb
mixed_m7 <- glmer.nb(num_seeds2 ~ X.Valerianella
+ (1|Transect/Plot),
data = fitness_and_abundances_VALO2_filtered, na.action=na.omit)
summary(mixed_m7)
mixed_m8 <- glmer.nb(num_seeds2 ~ (1|Transect/Plot),
data = fitness_and_abundances_VALO2_filtered, na.action=na.omit)
summary(mixed_m8)
anova(mixed_m7, mixed_m8, test = "LRT")
###################################################
# analyze relationship between species abundances #
###################################################
df <- read.csv("fitness_and_abundances.csv")
View(df)
df[df == "-"] <- NA
df[df == ""] <- NA
df[df == "#DIV/0!"] <- NA
# first filter out data from Thetis lake site which was not used in this study
df_filtered <- df %>%
filter(Site != "Thetis")
# relationship between abundances at 0.1 m^2 density
mixed_m9 <- glmer.nb(X.Valerianella ~ X.Plectritis..including.pollinator.focal.plants.
+ (1|Site/Transect/Plot),
data = df_filtered, na.action=na.omit)
summary(mixed_m9)
mixed_m9.2 <- glmer.nb(X.Valerianella ~
+ (1|Site/Transect/Plot),
data = df_filtered, na.action=na.omit)
summary(mixed_m9.2)
anova(mixed_m9, mixed_m9.2, test = "LRT")
# relationship between abundances at 1m^2 density
mixed_m10 <- glmer.nb(X.Valerianella.1m.2..including.subplots. ~ X.Plectritis.1m.2..including.subplots.
+ (1|Site/Transect/Plot),
data = df_filtered, na.action=na.omit)
summary(mixed_m10)
mixed_m10.2 <- glmer.nb(X.Valerianella.1m.2..including.subplots. ~
+ (1|Site/Transect/Plot),
data = df_filtered, na.action=na.omit)
summary(mixed_m10.2)
anova(mixed_m10, mixed_m10.2, test = "LRT")
|
a7a4c192109123a860c3734a7b52dc919cfc989f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GEOmap/examples/plotnicetix.Rd.R
|
575764566f476697d90cb45b7488a2ab0c5bf3ce
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
plotnicetix.Rd.R
|
library(GEOmap)
### Name: plotnicetix
### Title: Plot Lat-Lon tick marks
### Aliases: plotnicetix
### Keywords: misc
### ** Examples
proj = setPROJ(7, LAT0 = 0 , LON0= -93)
rx = c(652713.4, 656017.4)
ry = c(1629271, 1631755)
plot(rx, ry, type='n', asp=1, axes=FALSE , ann=FALSE)
plotnicetix(rx, ry, proj, PMAT=NULL)
|
f845f8f45255bcdf0f8cf40c0b8fb0825adbb21d
|
8a1f7a8d83ed93b69195864d08024798fda75957
|
/sandbox/GGally/ggallyTest6-Plots.R
|
c9a6fbda054e4ab0d2a25347ee0bb7b13a103ddd
|
[] |
no_license
|
hudsonchaves/diss-floss
|
9f40c84ca041870f05bdade3808bbf9313f359ee
|
ed048c3a547c67216768e283922c971afc662664
|
refs/heads/master
| 2020-12-26T05:02:01.682801
| 2014-12-04T05:33:21
| 2014-12-04T05:33:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,634
|
r
|
ggallyTest6-Plots.R
|
# GGally @examples for correlation plots
rm(list = ls(all.names = TRUE))
library("GGally")
PRJ_HOME <- Sys.getenv("DISS_FLOSS_HOME")
source(file.path(PRJ_HOME, "utils/data.R"))
MERGED_DIR <- file.path(PRJ_HOME, "data/merged")
MERGED_FILE <- "flossData" # default
RDS_EXT <- ".rds"
fileName <- paste0(MERGED_FILE, RDS_EXT)
mergedFile <- file.path(MERGED_DIR, fileName)
# load data
message("\nLoading data...")
df <- loadData(mergedFile)
# make column names syntactically valid
#names(df) <- make.names(names(df)) #TODO: test & remove
# select columns
df2 <- df[, c("Project.Age", "Development.Team.Size", "User.Community.Size",
"License.Restrictiveness", "Project.Stage")]
df3 <- df[, c("Project.Age", "Development.Team.Size", "User.Community.Size",
"License.Restrictiveness", "Project.Stage")]
# This plot might be useful (but separate plots are better?)
g1 <- ggpairs(df2, title = "Pairwise Scatterplots",
lower=list(continuous = "smooth", params = c(colour = "blue")),
upper=list(params = list(corSize = 6)),
diag=list(continuous = "bar", params = c(colour = "blue")),
axisLabels = "show")
print(g1)
# Plot with 'Project.Stage' as color (should be a factor)
# (this plot doesn't seem to be very informative, but...)
g2 <- ggpairs(df3, title = "Pairwise Scatterplots",
lower=list(continuous = "smooth", params = c(color = "blue")),
upper=list(params = list(corSize = 6)),
diag=list(continuous = "bar", params = c(color = "blue")),
axisLabels = "show",
color = "Project.Stage")
print(g2)
|
da6dc9d6c22d0146e31d8a54017819a4abda2743
|
e01a015d086bb9f0029e2c77070e3dc168a88562
|
/2-R programming/week4/assignment3/common.R
|
c90d74b4f79cdc6f3c5ee1d50e51c18838b737f1
|
[] |
no_license
|
YvesKamdjo/coursera
|
f471d1a9ec3267238c014624c7d31ecd99a8f57f
|
ba839d616ff4bf9c75cabd77dd060a9844a29c80
|
refs/heads/master
| 2021-01-21T01:43:47.186568
| 2016-06-29T06:04:24
| 2016-06-29T06:04:24
| 62,199,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
common.R
|
readfile <- function(fname="outcome-of-care-measures.csv"){
outcome <- read.csv(fname, colClasses = "character")
return(outcome)
}
|
ec22ef399ee1d52a182e24815149615a72d590c9
|
3fd2648f81e40341fc3581efea365c9203b8fd90
|
/plot4.R
|
946858173d4333fc98fad49216c2adeb2a43af26
|
[] |
no_license
|
TimWise/Coursera-ExploratoryDataAnalysis-Plotting1
|
7c61d44cbaa8b127cbf1f97302fae129aee3464a
|
c94fd3ea896ccfeb36341f168d3fc28ad081cbb8
|
refs/heads/master
| 2020-12-11T09:23:53.402875
| 2014-11-04T01:41:02
| 2014-11-04T01:41:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,453
|
r
|
plot4.R
|
# Whoa, this package helped with the two new plots looking really black
#
# Using this we can control the linewidth easily so that we don't get large blocks
# of black when points are dense
#
# It also gives the gray box and black axis look we were trying to get
# manually
#
install.packages("Cairo")
library(Cairo)
?Cairo
# Plot 4: Multiple plots in a 'page'
#
# Notes:
# - Looks like we need show plot2 and plot3
# - Just replicate the code here (yuk) but do put them their own functions
# - TODO: Restructure to remove duplicate code
# - Removed the comments for plot2 and plot3 since we've already discussed it.
# - Renamed plot2 and plot3
# - Made functions for the 2 new plots.
# - Looks like we should've named our Timestamp column datetime ;-)
# - Do what we did in plot 3 and plot everything to png device, don't just copy.
# . Text and legends are mucked when we copy.
# Plot 3: Multi-line plot of submetering values over time
#
plot.submetering <- function (DT) {
plot(x=DT$Timestamp,
y=DT$Sub_metering_1,
type='l',
col='black',
main="",
xlab="",
ylab="Energy sub metering"
)
lines(x=DT$Timestamp,
y=DT$Sub_metering_2,
type='l',
col='red')
lines(x=DT$Timestamp,
y=DT$Sub_metering_3,
type='l',
col='blue')
# Different from plot3
# - turn off box around legent
# - make legend text a bit smaller
legend('topright',
legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),
lty=1,
col=c('black','red','blue'),
text.col = 'black',
bty='n',
cex=0.9
)
}
# Plot 2: A line plot of Global Active Power over the interesting days
#
plot.global_active_power <- function(DT) {
plot(x=DT$Timestamp,
y=DT$Global_active_power,
type='l',
# col='gray20',
main="",
xlab="",
ylab="Global Active Power"
)
}
# New Plot: Line plot of voltage over time
#
plot.voltage <- function(DT) {
plot(x=DT$Timestamp,
y=DT$Voltage,
type='l',
# col='gray20',
lwd=0.5,
main="",
xlab="datetime",
ylab="Voltage"
)
}
# New Plot: Line plot of global_reactive_power over time
#
plot.global_reactive_power <- function(DT) {
plot(x=DT$Timestamp,
y=DT$Global_reactive_power,
type='l',
# col='gray20',
lwd=0.5,
main="",
xlab="datetime",
ylab="Global_reactive_power"
)
}
# Function to make the multi-plot chart
#
plot.plot4 <- function (DT) {
## Make chart background transparent, like the reference charts.
## Create a 2x2 grid of plots
par(bg='transparent',
mfrow=c(2,2)
)
# Charts fill row-wise, left to right
plot.global_active_power(DT)
plot.voltage(DT)
plot.submetering(DT)
plot.global_reactive_power(DT)
}
#
## Read in the data
##
## Get the data, using our helper function, into data table DT
## For first execution, set download=TRUE to get and unzip the data file
source('getdata.R')
DT <- getdata(download=FALSE)
str(DT)
head(D)
# plot to screen to get idea if things are working
plot.plot4(DT)
## Plot again to png and hopefully all text, legends, etc. will size correctly
## Open a png device, redo entire plot, close device
CairoPNG(filename="plot4.png",
bg='transparent',
width=480,
height=480
)
plot.plot4(DT)
dev.off()
|
9ac5fdf1af569371c6f94b23e0a3ac5b3dda457f
|
0bc1712f713ba17764248c1c9f496c411d265861
|
/script/CategoryBox.R
|
9d18b317908f2ed644a374839179240b4f1d8c15
|
[] |
no_license
|
ZhikunWu/PGC
|
012dc3867189f35330265204308885c5629916a3
|
8925d020a8f74bfda8e83daa6607c1ca6e9cfac6
|
refs/heads/master
| 2023-04-07T06:39:46.462351
| 2021-12-30T06:56:01
| 2021-12-30T06:56:01
| 284,568,907
| 0
| 3
| null | 2021-09-22T08:48:00
| 2020-08-03T00:44:06
|
Python
|
UTF-8
|
R
| false
| false
| 3,115
|
r
|
CategoryBox.R
|
#!/usr/bin/Rscript
library(ggplot2)
library(argparser)
library(reshape2)
library(readr)
#usage: Rscript /home/wuzhikun/github/NanoHub/script/CategoryBox.R --input /home/wuzhikun/Project/Population/population/genotype/Sample_SV_type_heter2homo.txt --pdf /home/wuzhikun/Project/Population/population/genotype/Sample_SV_type_heter2homo.pdf --width 4 --height 4
arg <- arg_parser('Box plot for structural variant types.')
arg <- add_argument(arg, '--input', help='The file with type and number of SV.')
arg <- add_argument(arg, '--pdf', help='output box file with pdf format.')
arg <- add_argument(arg, '--width', help='The width of picture.')
arg <- add_argument(arg, '--height', help='The height of picture.')
argv <- parse_args(arg)
category_heter2homo_box <- function(in_file, pdf_file, width, height){
width <- as.numeric(width)
height <- as.numeric(height)
data <- read_tsv(in_file)
# ## for type
# melt_dt <- reshape2::melt(data[1:nrow(data)-1, ])
# colnames(melt_dt) <- c("Category", "Sample", "Value")
# melt_dt$Category <- factor(melt_dt$Category, order=TRUE, levels=c("DEL", "INS", "DUP", "INV"))
# colors <-c("limegreen", "royalblue1", "gold2", "tomato")
### for category
melt_dt <- reshape2::melt(data)
colnames(melt_dt) <- c("Category", "Sample", "Value")
colors <- c( "mediumseagreen", "dodgerblue3","gold2", "tomato2")
melt_dt$Category <- factor(melt_dt$Category, order=TRUE, levels=c("Singleton", "Rare", "Low", "Common"))
# ### for feature
# melt_dt <- reshape2::melt(data)
# colnames(melt_dt) <- c("Category", "Sample", "Value")
# colors <- c("aquamarine3", "royalblue2", "purple2", "orangered3", "orange", "black", "blue")
# melt_dt$Category <- factor(melt_dt$Category, order=TRUE, levels=c("Enhancer", "Promoter", "UTR5/3", "CDS", "NC_Exon", "Intron", "Up/DownStream"))
BoxPlot <- ggplot(melt_dt, aes(x=Category, y=Value, fill=Category)) +
geom_boxplot() +
# theme_bw() +
ylim(0, 15) + #12 #15
xlab("") + ylab("Heter/Homo ratio") +
theme( panel.background=element_blank(),panel.grid.major=element_blank(), panel.grid.minor=element_blank(), panel.border = element_blank(), plot.background=element_blank(), axis.line = element_line(colour = "black")) +
# theme(plot.margin = margin(1,1,0.5,0, "cm")) +
scale_fill_manual(values=colors) +
theme(axis.text = element_text( size=rel(1.2))) +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5)) + #angle = 90, hjust = 1
theme(axis.title = element_text( size=rel(1.4 ))) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1, vjust= 1)) + #angle = 90, hjust = 1
# theme(axis.title = element_text( size=rel(0.8))) +
theme(plot.margin = margin(1,1,1,1, "cm")) +
guides(fill=FALSE)
BoxPlot
ggsave(pdf_file, width=width, height=height)
}
category_heter2homo_box(argv$input, argv$pdf, argv$width, argv$height)
|
c18b2210ba04cb09ad088fd62ba095207488b5da
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EurosarcBayes/examples/binom_one_alpha.Rd.R
|
f1536405bca26060bdfffc7d142695d74c105ab5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 510
|
r
|
binom_one_alpha.Rd.R
|
library(EurosarcBayes)
### Name: binom_one_alpha
### Title: Single arm, exact p-value calculator for single or multi-stage
### binomial trials.
### Aliases: binom_one_alpha
### ** Examples
# Simon's two stage design
failure=c(0,3)
success=c(NA,4)
n=c(7,18)
p0=0.1
result.success=4
result.n=18
# without accounting for interim analysis when calculating
# the p-value
1-pbinom(result.success-1,result.n,p0)
# account for interim analysis
binom_one_alpha(result.success,result.n,p0,failure,success,n)
|
4985e119cd94577a70485709fe9ecc7bb6877d06
|
5c0e5ff92e0ce763bce555f2da2522043aabdf0a
|
/survey/code/r/fa_04102020.R
|
f1943a2c9ce9e8a3f9d5c7053858211a94df91f2
|
[] |
no_license
|
ellelang/DCM
|
faad6cb457e43614a365d63ce2adb3a2241cad93
|
4c9b8a3d045606bbefb4d9b2ba47b685f444b4d6
|
refs/heads/master
| 2023-07-06T08:45:54.881813
| 2023-06-22T03:45:00
| 2023-06-22T03:45:00
| 159,539,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,245
|
r
|
fa_04102020.R
|
rm(list = ls())
setwd("C:/Users/langzx/Desktop/github/DCM/survey/data")
#setwd("~/Documents/github/DCM/survey/data")
library(psych)
library(FactoMineR)
library(dplyr)
library(tidyverse)
library(fastDummies)
library(sem)
library(factoextra)
library(lavaan)
library(likert)
library(missMDA)
library(corrplot)
library(polycor)
library(semPlot)
library(semTools)
#################
# factor analysis results
fa_dat <- read.csv( "wta_04112020.csv", header= TRUE)
cluster_data <- read.csv("fscore_04112020_cluster.csv")
names(cluster_data)
dim(cluster_data)
df_cluster <- select (cluster_data, c(id, Cluster))
dfcluster_dummy <- dummy_cols(df_cluster, select_columns = "Cluster")
head(dfcluster_dummy)
dim(fa_dat)
dat00 <- left_join(fa_dat,dfcluster_dummy)
write.csv(x = dat00, file= "wta_04122020.csv", row.names = FALSE)
dim(dat00)
##############
names(fa_dat)
fscore <- select(fa_dat, c(id,aware,past,appreciate, resp))
df_fscore <- distinct(fscore)
dim(df_fscore)
write.csv(x=df_fscore, file = "fscore_04112020.csv", row.names = FALSE)
scores <- select(df_fscore, -id)
multi.hist(scores)
#############factor analysis EFA& CFA
dat <- read.csv ("data_likertscale.csv", head = TRUE)
dat$id
p_data <- read.csv("../../data/wta_factors04112020.csv", head = TRUE)
head(dat)
dim(dat)
dat_dich <- read.csv ("data_binaryscale.csv", head = TRUE)
dim(dat_dich)
df <- dat_dich[,2:65]
nrow(na.omit(df))
colSums(is.na(df))
data_dich <- dat_dich[,2:42]
head(data_dich)
df_info <- dat_dich[,43:65]
names(df_info)
nrow(na.omit(df_dich))
colSums(is.na(df_dich))
impute_df_all = imputeMCA(df, ncp=4)$completeObs
het.mat_alldich <- hetcor(impute_all_dich)$cor
corrplot(cor(het.mat_alldich))
fa.parallel(het.mat_alldich)
fa_dich_all <- fa(het.mat_alldich, nfactor = 5,rotate = "varimax")
fa_dich_all$loadings
fa_dich_all <- fa(het.mat_alldich, nfactor = 5,rotate = "varimax")
fa_dich_all$loadings
poorloading_DICH <- c('infofsa', 'infoces','infoces','infonpo',
'infomedial','infospecialists','infosfdealer','infomdealer',
'infoneighborfriends','preferinternet','prefervisual','prefertradeshows',
'otherswetland', 'otherscovercrop','othersnm',
'nowcrp','noweqip','nowfcsv','nowcsp',
'practicegd','practicemintill','practicerb','practicerp')
dich_all_se <- select(impute_all_dich , -poorloading_DICH) %>% mutate_if(is.factor,as.numeric)
fa_all_alpha <- psych::alpha(dich_all_se)
summary(fa_all_alpha)
f_5_CFAmodel <- 'aware =~ obssediment + obsnutrients + obsodor + obstrash + obslackfish + obsunsafeswim + obscolor + obsunsafedrink
past =~ pastcrp + pastfcp + pastmci + pastlongterm + pastgcdt + pastgcsv
appreciate =~ acfish + acswim + acexplore + ackayak + achunt + achike + acbike + acpicnic + achorseride + acgooffroading
resp =~ sptlandowners + sptfarmmanager + sptrenters + sptgovstaff + sptmrbboard
info =~ infonrcs + infomedian + infoscdc
+ infovai + infoprivateconsultants + prefercountymeeting + infomedian + preferfielddemo + prefertelevision + prefermagazines + preferradio + preferprinted + preferonfarmconsul
'
bq_cfa <- cfa(model = f_5_CFAmodel,
data = dich_all_se, estimator = 'MLR')
summary(bq_cfa, standardized = T, fit.measures = T )
reliability(bq_cfa)
all_scores <- as.data.frame(predict(bq_cfa))
#data_dich <- df_dich %>% mutate_if(is.numeric,as.factor)
nb = estim_ncpMCA(data_dich,ncp.max=2)
tab.disj_dich = imputeMCA(data_dich, ncp=4)$tab.disj
res.mca = MCA(data_dich,tab.disj=tab.disj_dich)
impute_df_dich = imputeMCA(data_dich, ncp=4)$completeObs
write.csv(x = impute_df_dich, file = 'factorDICH_impute.csv', row.names = FALSE)
tab.disj_dich_info = imputeMCA(df_info, ncp=4)$tab.disj
#res.mca_info = MCA(data_dich,tab.disj=tab.disj_dich_info)
impute_df_dich_info = imputeMCA(df_info, ncp=4)$completeObs
impute_all_dich <-cbind(impute_df_dich,impute_df_dich_info)
het.mat_alldich <- hetcor(impute_all_dich)$cor
corrplot(cor(het.mat_alldich))
fa.parallel(het.mat_alldich)
fa_dich_all <- fa(het.mat_alldich, nfactor = 5,rotate = "varimax")
fa_dich_all$loadings
poorloading_DICH <- c('otherswetland', 'otherscovercrop','othersnm',
'nowcrp','noweqip','nowfcsv','nowcsp',
'practicegd','practicemintill','practicerb','practicerp')
poorloading_info <- c('infofsa', 'infoces','infoces','infonpo',
'infomedial','infospecialists','infosfdealer','infomdealer',
'infoneighborfriends','preferinternet','prefervisual','prefertradeshows')
dich_all_se <- select(impute_all_dich , -poorloading_info) %>% select(-poorloading_DICH) %>% mutate_if(is.factor,as.numeric)
names(dich_all_se)
fa_all_alpha <- psych::alpha(dich_all_se)
summary(fa_all_alpha)
het.mat_all <- hetcor(dich_all_se)$cor
corrplot(cor(het.mat_all))
f_5_CFAmodel <- 'aware =~ obssediment + obsnutrients + obsodor + obstrash + obslackfish + obsunsafeswim + obscolor + obsunsafedrink
past =~ pastcrp + pastfcp + pastmci + pastlongterm + pastgcdt + pastgcsv
appreciate =~ acfish + acswim + acexplore + ackayak + achunt + achike + acbike + acpicnic + achorseride + acgooffroading
resp =~ sptlandowners + sptfarmmanager + sptrenters + sptgovstaff + sptmrbboard
info =~ infonrcs + infomedian + infoscdc
+ infovai + infoprivateconsultants + prefercountymeeting + infomedian + preferfielddemo + prefertelevision + prefermagazines + preferradio + preferprinted + preferonfarmconsul
'
all_cfa <- cfa(model = f_5_CFAmodel ,
data = dich_all_se)
summary(all_cfa)
reliability(all_cfa)
het.mat_info <- hetcor(impute_df_dich_info)$cor
fa_info_alpha <- psych::alpha(impute_df_dich_info)
summary(fa_info_alpha)
corrplot(cor(het.mat_info))
fa.parallel(het.mat_info)
scree(het.mat_info)
fa_info <- fa(het.mat_info, nfactor = 1,rotate = "varimax")
fa_info$loadings
fa_info$R2.scores
summary(fa_info_alpha)
splitHalf(het.mat_info)
info_CFAmodel <- 'info_so =~ infonrcs + infomedian + infoscdc
+ infovai + infoprivateconsultants + prefercountymeeting + infomedian + preferfielddemo + prefertelevision + prefermagazines + preferradio + preferprinted + preferonfarmconsul'
poorloading_info <- c('infofsa', 'infoces','infoces','infonpo',
'infomedial','infospecialists','infosfdealer','infomdealer',
'infoneighborfriends','preferinternet','prefervisual','prefertradeshows')
dich_info_se <- select(impute_df_dich_info , -poorloading_info) %>% mutate_if(is.factor,as.numeric)
names(dich_info_se)
summary(fa_info_alpha)
info_cfa <- cfa(model = info_CFAmodel,
data = dich_info_se)
reliability(info_cfa )
summary(info_cfa, standardized = T, fit.measures = T)
info_scores <- as.data.frame(predict(info_cfa))
info_scores['id'] = dat_dich$..id
f7 <- read.csv(file = "../../data/factors7_0415.csv")
f7 <- left_join(f7, info_scores)
write.csv(x = f7, file = "../../data/factors7_0415.csv", row.names = FALSE)
info<- fa.poly(impute_df_dich_info, nfactor =1, rotate="varimax")
info$loadings
info$scores
####################3
impute_df_dich = imputeMCA(data_dich, ncp=4)$completeObs
het.mat <- hetcor(impute_df_dich)$cor
fa_alpha <- psych::alpha(het.mat)
summary(fa_alpha)
splitHalf(het.mat)
corrplot(cor(het.mat))
fa.parallel(het.mat)
scree(het.mat)
DICH_fa4_EFA <- fa(het.mat, nfactor = 4,rotate = "varimax")
DICH_fa4_EFA$loadings
DICH_fa4_EFA$score.cor
poorloading_DICH <- c('otherswetland', 'otherscovercrop','othersnm',
'nowcrp','noweqip','nowfcsv','nowcsp',
'practicegd','practicemintill','practicerb','practicerp')
dep_var <- select(impute_df_dich , c('practicegd','practicemintill','practicerb','practicerp')) %>% mutate_if(is.factor,as.numeric)
dep_eng <- dep_var %>% mutate(practicegd = ifelse(practicegd==2, 1,0),
practicemintill = ifelse(practicemintill==2, 1,0),
practicerb = ifelse( practicerb ==2,1,0),
practicerp = ifelse( practicerp ==2,1,0))
practice_indicator <- apply(dep_eng, 1, sum)
practice_indicator1 <- ifelse(practice_indicator>0, 1, 0)
describe(practice_indicator)
dich_se <- select(impute_df_dich , -poorloading_DICH) %>% mutate_if(is.factor,as.numeric)
dich_alpha <- psych::alpha(dich_se)
summary(dich_alpha)
het.mat <- hetcor(dich_se)$cor
corrplot(cor(het.mat))
fa.parallel(het.mat)
scree(het.mat)
fa4_EFA_dich <- fa(het.mat, nfactor = 4,rotate = "varimax")
fa4_EFA_dich$loadings
f_4_CFAmodel <- 'aware =~ obssediment + obsnutrients + obsodor + obstrash + obslackfish + obsunsafeswim + obscolor + obsunsafedrink
past =~ pastcrp + pastfcp + pastmci + pastlongterm + pastgcdt + pastgcsv
appreciate =~ acfish + acswim + acexplore + ackayak + achunt + achike + acbike + acpicnic + achorseride + acgooffroading
social =~ sptlandowners + sptfarmmanager + sptrenters + sptgovstaff + sptmrbboard'
mardia(dich_se)
bq_cfa <- cfa(model = f_4_CFAmodel,
data = dich_se)
summary(bq_cfa, standardized = T, fit.measures = T )
reliability(bq_cfa)
bq4_scores <- as.data.frame(predict(bq_cfa))
##############################
csat_scores <- as.data.frame(predict(bq_cfa)) %>% mutate (practice_ind = practice_indicator1)
names(csat_scores )
dim(csat_scores)
fit <- glm(practice_ind~aware+past+appreciate+resp,data=csat_scores, family = binomial())
summary(fit)
pra_predict <- as.vector(predict(fit))
csat_scores <- csat_scores %>% mutate(id =dat$id, prac_pred = pra_predict)
dim(p_data)
dim(csat_scores)
names(p_data)
names(csat_scores)
new_p_data <-left_join(p_data,csat_scores)
names(new_p_data)
dim(new_p_data)
write.csv(x = new_p_data, file = "wta_04112020.csv", row.names = FALSE)
data <- df %>% mutate_if(is.numeric,as.factor)
nb = estim_ncpMCA(data,ncp.max=2)
tab.disj = imputeMCA(data, ncp=4)$tab.disj
res.mca = MCA(data,tab.disj=tab.disj)
impute_df = imputeMCA(data, ncp=4)$completeObs
write.csv(x = impute_df, file = 'factor_impute.csv', row.names = FALSE)
#############################################################################
setwd("C:/Users/langzx/Desktop/github/DCM/survey/data")
impute_df <- read.csv("factor_impute.csv")
names(impute_df)
describe(impute_df)
head(impute_df)
# df_likert <- as.data.frame(impute_df) %>%
# mutate_if(is.integer,
# as.factor) %>%
# likert()
df_num <- impute_df%>% mutate_if(is.factor,as.numeric)
corrplot(cor(df_num))
fa.parallel(df_num)
scree(df_num)
fa4_EFA <- fa(df_num, nfactor = 5)
fa4_EFA$loadings
fa4_EFA$e.values
fa4_EFA$score.cor
fa4_EFA$scores
poorloadings <- c('pollutionobs',
'opwetlandopen',
'opwetlandrestored','opcovercropplant',
'opnmopen','opcovercropopen','valpaymentimportant','valinfluence'
)
# 'valundueblame','valpaymentimportant','valinfluence','familiar25' ,'opcovercroprp','opcovercroptimep','opcovercroptimeh'
# poorloadings <- c('valundueblame','vallandregulate',
# 'valpaymentimportant','valinfluence','pollutionobs',
# 'opwetlandequipment','opnmis','opwetlandopen',
# 'opwetlandrestored','opcovercropplant',
# 'opnmopen','opcovercropopen',
# 'valknowconservation', 'valwaterimportant', 'valtogether')
library(car)
df_se <- select(df_num, -poorloadings)
dfse_alpha <- psych::alpha(df_se, check.keys = TRUE)
summary(dfse_alpha)
splitHalf(df_se)
#df_se <- select(df_num, -c(alundueblame, vallandregulate,valinfluence,pollutionobs
#opwetlandrestored, opcovercropplant,
#opnmopen,opwetlandopen,opcovercropopen))
dim(df_num)
dim(df_se)
names(df_num)
#df_se <- df_num[,1:15]
#df_se<-scale(df_se)
dim(df_num)
fa.parallel(df_se)
fa3_EFA <- fa(df_se, nfactor = 5, rotate = "varimax")
fa3_EFA$loadings
fa3_EFA$score.cor
fa4_CFAmodel <- 'concern =~ scenicconcern + scenicmoderate + nutrientconcern + nutrientmoderate + nutrientmajor + habitatconcern + habitatmoderate + habitatmajor + sedimentconcern + sedimentmoderate + sedimentmajor + recreationconcern +recreationmoderate +
recreationmajor + valwaterproblem
fav =~ valwaterimportant + vallandregulate + +valtogether
+ valstaff + opwetlandhabitat+ opwetlandsoil + opcovercroprs + opcovercropis + opcovercropln + opnmrf + opnmis
unfav =~ opwetlandcostr + opwetlandcontrol + opwetlandequipment + opwetlandcostm + valundueblame + opcovercroprp + opcovercroptimep + opcovercroptimeh + opnmrs + opnmrl + opnmcost
norm =~ valknowconservation + valsteward + opwetlandfamiliar + opcovercropfamiliar + opnmfamiliar'
fa5_CFAmodel <- 'concern =~ scenicconcern + scenicmoderate + nutrientconcern + nutrientmoderate + nutrientmajor + habitatconcern + habitatmoderate + habitatmajor + sedimentconcern + sedimentmoderate + sedimentmajor + recreationconcern +recreationmoderate +
recreationmajor + valwaterproblem
att_wld_unfav =~ opwetlandcostr + opwetlandcontrol + opwetlandequipment + opwetlandcostm
att_nm_unfav = ~opnmrs + opnmrl + opnmcost
comp =~ valwaterimportant +valtogether + valstaff + opwetlandhabitat + opwetlandsoil + opcovercroprs + opcovercropis + opcovercropln + opnmis
norm_control =~ valknowconservation + valsteward + opwetlandfamiliar + opcovercropfamiliar + opnmfamiliar +familiar25 '
fa_5cfa <- cfa(model = fa5_CFAmodel,data = df_se)
summary(fa_5cfa, standardized = T, fit.measures = T)
reliability(fa_5cfa)
fitMeasures(fa_5cfa)
f5_scores <- as.data.frame(predict(fa_5cfa))
daaa <- read.csv("data_likertscale.csv", head = TRUE)
daaa$id
scores_all <- cbind(f5_scores,bq4_scores)
scores_all['id'] = daaa$id
dim(scores_all)
write.csv(x= scores_all, file = "scoresall_0501.csv", row.names = FALSE)
all <- read.csv( "wta_04122020.csv")
all_fa <- left_join(all, scores_all, by = 'id')
dim(all_fa)
write.csv(x= all_fa, file = "wta_factorsall_0501.csv", row.names = FALSE)
# fa3_CFAmodel <- 'concern =~ scenicconcern + scenicmoderate + nutrientconcern + nutrientmoderate + nutrientmajor + habitatconcern + habitatmoderate + habitatmajor + sedimentconcern + sedimentmoderate + sedimentmajor + recreationconcern +recreationmoderate +
# recreationmajor + valwaterproblem + valwaterimportant + valtogether
# value =~ valknowconservation + valsteward + opnmfamilr + opwetlandfamiliar+ familiar25 + opcovercropfamiliar
# landcontrol =~ valundueblame + opwetlandcostr + opwetlandcontrol + opwetlandcostm + opwetlandsoil + opwetlandhabitat + valstaff +
# opcovercroprp + opcovercroptimep +opcovercroptimeh + opcovercroprs + opcovercropis + opcovercropln + opnmrs + opnmrl + opnmis + opnmrf + opnmcost+
# vallandregulate+valpaymentimportant + valinfluence + opwetlandequipment '
df_se_SCALE<-scale(df_se)
fa_cfa <- cfa(model = fa4_CFAmodel,data = df_se)
summary(fa_cfa, standardized = T, fit.measures = T)
reliability(fa_cfa)
f_scores <- as.data.frame(predict(fa_cfa))
scores_all <- cbind(f_scores,all_scores)
daaa <- read.csv("data_likertscale.csv", head = TRUE)
daaa$id
scores_all['id'] = daaa$id
write.csv(scores_all, file = "factorscores_all0429.csv", row.names = FALSE)
all <- read.csv( "wta_04122020.csv")
all_fa <- left_join(all, scores_all, by = 'id')
dim(all_fa)
write.csv(x= all_fa, file = "wta_factorsall_0429.csv", row.names = FALSE)
names(all_fa)
#lavResiduals(fa_cfa)
library(semTools)
reliability(fa_cfa)
f_scores <- as.data.frame(predict(fa_cfa))
names(f_scores)
daaa <- read.csv("data_likertscale.csv", head = TRUE)
daaa$id
f_scores['id'] = daaa$id
write.csv(x= f_scores, file = "factorscores_likertlike.csv", row.names = FALSE)
di_fscores <- read.csv("fscore_04112020.csv")
dim(di_fscores)
names(di_fscores)
all <- read.csv( "wta_04112020.csv")
all_fa <- left_join(all, f_scores)
dim(all_fa)
write.csv(x= all_fa, file = "wta_factorsall_0415.csv", row.names = FALSE)
fscoresall <- left_join (di_fscores, f_scores)
dim(fscoresall)
names(fscoresall)
write.csv(x= fscoresall, file = "factors7_0415.csv", row.names = FALSE)
##############
f_dich <- read.csv("fscore_04112020_cluster.csv")
f_likert <- read.csv("factorscores_likertlike.csv")
dim(f_likert)
f_all <- left_join(f_dich, f_likert) %>% select(-c(Cluster,id))
fa.parallel(f_all)
f_all <- scale(f_all)
fa3_EFA <- fa(f_all, nfactor = 2)
fa3_EFA$loadings
|
94d39977c3cb01ff0de493ae3dc533756fb38105
|
8b063cda337ef9be263e3ebb1a132cbcbf453022
|
/assignment/9.15/Assignment 03.R
|
02cbf98a181edc8d13b76c17d90b9564563102d9
|
[] |
no_license
|
linleiwen/Course_Programming-_for_analytics
|
7285946965ff5521d841e22ceaf7d2c3e6f7532a
|
4b08a89ce0e49e1eb04b1c4128294de37deb67b0
|
refs/heads/master
| 2021-08-23T16:59:31.038610
| 2017-12-05T19:46:30
| 2017-12-05T19:46:30
| 111,733,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
Assignment 03.R
|
rnorm2 <- function(n,mean,sd) { mean+sd*scale(rnorm(n)) }
set.seed(1239)
r1 <- rnorm2(100,25,4)
r2 <- rnorm2(50,10,3)
samplingframe <- c(r1,r2)
hist(samplingframe, breaks=20,col = "pink")
mean(sample(samplingframe,size = 50, replace = T))
a= 1:50
a = apply(a,2)
for(i in 1:50) a[i]=mean(sample(samplingframe,size = 50, replace = T))
hist(a)
|
077e029b6c51f166a6b2c87281b0121b66ccada2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/greyzoneSurv/examples/mydata.Rd.R
|
6e58b683665d82a8c0962e116443a13b1791568d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
mydata.Rd.R
|
library(greyzoneSurv)
### Name: mydata
### Title: Package Data
### Aliases: mydata
### ** Examples
data(mydata)
|
87403e3cfe8d613b8f58baa14c3a49d3e941a78d
|
c555092c911699a657b961a007636208ddfa7b1b
|
/man/qplot.Rd
|
bbe53e883e7d493720f330bd082054aa6a7075c6
|
[] |
no_license
|
cran/ggplot2
|
e724eda7c05dc8e0dc6bb1a8af7346a25908965c
|
e1b29e4025de863b86ae136594f51041b3b8ec0b
|
refs/heads/master
| 2023-08-30T12:24:48.220095
| 2023-08-14T11:20:02
| 2023-08-14T12:45:10
| 17,696,391
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,971
|
rd
|
qplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quick-plot.R
\name{qplot}
\alias{qplot}
\alias{quickplot}
\title{Quick plot}
\usage{
qplot(
x,
y,
...,
data,
facets = NULL,
margins = FALSE,
geom = "auto",
xlim = c(NA, NA),
ylim = c(NA, NA),
log = "",
main = NULL,
xlab = NULL,
ylab = NULL,
asp = NA,
stat = deprecated(),
position = deprecated()
)
quickplot(
x,
y,
...,
data,
facets = NULL,
margins = FALSE,
geom = "auto",
xlim = c(NA, NA),
ylim = c(NA, NA),
log = "",
main = NULL,
xlab = NULL,
ylab = NULL,
asp = NA,
stat = deprecated(),
position = deprecated()
)
}
\arguments{
\item{x, y, ...}{Aesthetics passed into each layer}
\item{data}{Data frame to use (optional). If not specified, will create
one, extracting vectors from the current environment.}
\item{facets}{faceting formula to use. Picks \code{\link[=facet_wrap]{facet_wrap()}} or
\code{\link[=facet_grid]{facet_grid()}} depending on whether the formula is one-
or two-sided}
\item{margins}{See \code{facet_grid()}: display marginal facets?}
\item{geom}{Character vector specifying geom(s) to draw. Defaults to
"point" if x and y are specified, and "histogram" if only x is specified.}
\item{xlim, ylim}{X and y axis limits}
\item{log}{Which variables to log transform ("x", "y", or "xy")}
\item{main, xlab, ylab}{Character vector (or expression) giving plot title,
x axis label, and y axis label respectively.}
\item{asp}{The y/x aspect ratio}
\item{stat, position}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}}
}
\description{
\code{qplot()} is now deprecated in order to encourage the users to
learn \code{\link[=ggplot]{ggplot()}} as it makes it easier to create complex graphics.
}
\examples{
# Use data from data.frame
qplot(mpg, wt, data = mtcars)
qplot(mpg, wt, data = mtcars, colour = cyl)
qplot(mpg, wt, data = mtcars, size = cyl)
qplot(mpg, wt, data = mtcars, facets = vs ~ am)
\donttest{
set.seed(1)
qplot(1:10, rnorm(10), colour = runif(10))
qplot(1:10, letters[1:10])
mod <- lm(mpg ~ wt, data = mtcars)
qplot(resid(mod), fitted(mod))
f <- function() {
a <- 1:10
b <- a ^ 2
qplot(a, b)
}
f()
# To set aesthetics, wrap in I()
qplot(mpg, wt, data = mtcars, colour = I("red"))
# qplot will attempt to guess what geom you want depending on the input
# both x and y supplied = scatterplot
qplot(mpg, wt, data = mtcars)
# just x supplied = histogram
qplot(mpg, data = mtcars)
# just y supplied = scatterplot, with x = seq_along(y)
qplot(y = mpg, data = mtcars)
# Use different geoms
qplot(mpg, wt, data = mtcars, geom = "path")
qplot(factor(cyl), wt, data = mtcars, geom = c("boxplot", "jitter"))
qplot(mpg, data = mtcars, geom = "dotplot")
}
}
|
36ed1a7ce1c12cedee12f34d619fa403ff337a59
|
419a2bf110b5bfe2a0ae3e56d26a32347cb2e191
|
/My_func.R
|
f379e868d892061b477736701650aec68c472474
|
[] |
no_license
|
mshariful/TestRipo
|
83af60701fadef8c562c5c6b9d51849f453e6bc4
|
4c2fabae607f41ef5128715602af74f02ed7898e
|
refs/heads/master
| 2020-12-17T21:04:41.346951
| 2020-01-21T15:04:59
| 2020-01-21T15:04:59
| 235,295,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
My_func.R
|
library(dplyr)
str(iris)
summary(iris)
# This is a line from RStudio
plot(iris$'Sepal.Length',iris$'Sepal.Width')
# This looks better
# This looks better 2
# This looks cool 3
|
339a98f94b6a94bbd4151019810887ef74d7f87e
|
55ebbf72246f8c4addab7acfd0a2fbaae20eb5e1
|
/man/sensitivitiesSymb.Rd
|
0662b6f517f60407ac1d90c004d3352e97c53882
|
[
"MIT"
] |
permissive
|
dkaschek/cOde
|
07571c98397610f8bea5c2187eced6f9defc7b55
|
3c9ddff4108262615f148858848ba583fc1dde86
|
refs/heads/master
| 2022-05-07T07:06:44.496680
| 2022-04-19T07:30:18
| 2022-04-19T07:30:18
| 43,353,323
| 11
| 3
| null | 2019-07-09T11:44:54
| 2015-09-29T07:59:42
|
R
|
UTF-8
|
R
| false
| true
| 7,254
|
rd
|
sensitivitiesSymb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/derivedEquations.R
\name{sensitivitiesSymb}
\alias{sensitivitiesSymb}
\title{Compute sensitivity equations of a function symbolically}
\usage{
sensitivitiesSymb(
f,
states = names(f),
parameters = NULL,
inputs = NULL,
events = NULL,
reduce = FALSE
)
}
\arguments{
\item{f}{named vector of type character, the functions}
\item{states}{Character vector. Sensitivities are computed with respect to initial
values of these states}
\item{parameters}{Character vector. Sensitivities are computed with respect to initial
values of these parameters}
\item{inputs}{Character vector. Input functions or forcings. They are excluded from
the computation of sensitivities.}
\item{events}{data.frame of events with columns "var" (character, the name of the state to be
affected), "time" (numeric or character, time point),
"value" (numeric or character, value), "method" (character, either
"replace" or "add"). See \link[deSolve]{events}.
Within \code{sensitivitiesSymb()} a \code{data.frame} of additional events is generated to
reset the sensitivities appropriately, depending on the event method.}
\item{reduce}{Logical. Attempts to determine vanishing sensitivities, removes their
equations and replaces their right-hand side occurences by 0.}
}
\value{
Named vector of type character with the sensitivity equations. Furthermore,
attributes "chi" (the integrand of the chisquare functional), "grad" (the integrand
of the gradient of the chisquare functional), "forcings" (Character vector of the
additional forcings being necessare to compute \code{chi} and \code{grad}) and "yini" (
The initial values of the sensitivity equations) are returned.
}
\description{
Compute sensitivity equations of a function symbolically
}
\details{
The sensitivity equations are ODEs that are derived from the original ODE f.
They describe the sensitivity of the solution curve with respect to parameters like
initial values and other parameters contained in f. These equtions are also useful
for parameter estimation by the maximum-likelihood method. For consistency with the
time-continuous setting provided by \link{adjointSymb}, the returned equations contain
attributes for the chisquare functional and its gradient.
}
\examples{
\dontrun{
######################################################################
## Sensitivity analysis of ozone formation
######################################################################
library(deSolve)
# O2 + O <-> O3
f <- c(
O3 = " build_O3 * O2 * O - decay_O3 * O3",
O2 = "-build_O3 * O2 * O + decay_O3 * O3",
O = "-build_O3 * O2 * O + decay_O3 * O3"
)
# Compute sensitivity equations
f_s <- sensitivitiesSymb(f)
# Generate ODE function
func <- funC(c(f, f_s))
# Initialize times, states, parameters and forcings
times <- seq(0, 15, by = .1)
yini <- c(O3 = 0, O2 = 3, O = 2, attr(f_s, "yini"))
pars <- c(build_O3 = .1, decay_O3 = .01)
# Solve ODE
out <- odeC(y = yini, times = times, func = func, parms = pars)
# Plot solution
par(mfcol=c(2,3))
t <- out[,1]
M1 <- out[,2:4]
M2 <- out[,5:7]
M3 <- out[,8:10]
M4 <- out[,11:13]
M5 <- out[,14:16]
M6 <- out[,17:19]
matplot(t, M1, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="solution")
legend("topright", legend = c("O3", "O2", "O"), lty=1, col=1:3)
matplot(t, M2, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="d/(d O3)")
matplot(t, M3, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="d/(d O2)")
matplot(t, M4, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="d/(d O)")
matplot(t, M5, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="d/(d build_O3)")
matplot(t, M6, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="d/(d decay_O3)")
}
\dontrun{
######################################################################
## Estimate parameter values from experimental data
######################################################################
library(deSolve)
# O2 + O <-> O3
# diff = O2 - O3
# build_O3 = const.
f <- c(
O3 = " build_O3 * O2 * O - decay_O3 * O3",
O2 = "-build_O3 * O2 * O + decay_O3 * O3",
O = "-build_O3 * O2 * O + decay_O3 * O3"
)
# Compute sensitivity equations and get attributes
f_s <- sensitivitiesSymb(f)
chi <- attr(f_s, "chi")
grad <- attr(f_s, "grad")
forcings <- attr(f_s, "forcings")
# Generate ODE function
func <- funC(f = c(f, f_s, chi, grad), forcings = forcings,
fcontrol = "nospline", modelname = "example3")
# Initialize times, states, parameters
times <- seq(0, 15, by = .1)
yini <- c(O3 = 0, O2 = 2, O = 2.5)
yini_s <- attr(f_s, "yini")
yini_chi <- c(chi = 0)
yini_grad <- rep(0, length(grad)); names(yini_grad) <- names(grad)
pars <- c(build_O3 = .2, decay_O3 = .1)
# Initialize forcings (the data)
data(oxygenData)
forcData <- data.frame(time = oxygenData[,1],
name = rep(
colnames(oxygenData[,-1]),
each=dim(oxygenData)[1]),
value = as.vector(oxygenData[,-1]))
forc <- setForcings(func, forcData)
# Solve ODE
out <- odeC(y = c(yini, yini_s, yini_chi, yini_grad),
times = times, func = func, parms = pars, forcings = forc,
method = "lsodes")
# Plot solution
par(mfcol=c(1,2))
t <- out[,1]
M1 <- out[,2:4]
M2 <- out[,names(grad)]
tD <- oxygenData[,1]
M1D <- oxygenData[,2:4]
matplot(t, M1, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="states")
matplot(tD, M1D, type="b", lty=2, col=1:3, pch=4, add=TRUE)
legend("topright", legend = names(f), lty=1, col=1:3)
matplot(t, M2, type="l", lty=1, col=1:5,
xlab="time", ylab="value", main="gradient")
legend("topleft", legend = names(grad), lty=1, col=1:5)
# Define objective function
obj <- function(p) {
out <- odeC(y = c(p[names(f)], yini_s, yini_chi, yini_grad),
times = times, func = func, parms = p[names(pars)],
forcings = forc, method="lsodes")
value <- as.vector(tail(out, 1)[,"chi"])
gradient <- as.vector(
tail(out, 1)[,paste("chi", names(p), sep=".")])
hessian <- gradient\%*\%t(gradient)
return(list(value = value, gradient = gradient, hessian = hessian))
}
# Fit the data
myfit <- optim(par = c(yini, pars),
fn = function(p) obj(p)$value,
gr = function(p) obj(p)$gradient,
method = "L-BFGS-B",
lower=0,
upper=5)
# Model prediction for fit parameters
prediction <- odeC(y = c(myfit$par[1:3], yini_s, yini_chi, yini_grad),
times = times, func = func, parms = myfit$par[4:5],
forcings = forc, method = "lsodes")
# Plot solution
par(mfcol=c(1,2))
t <- prediction[,1]
M1 <- prediction[,2:4]
M2 <- prediction[,names(grad)]
tD <- oxygenData[,1]
M1D <- oxygenData[,2:4]
matplot(t, M1, type="l", lty=1, col=1:3,
xlab="time", ylab="value", main="states")
matplot(tD, M1D, type="b", lty=2, col=1:3, pch=4, add=TRUE)
legend("topright", legend = names(f), lty=1, col=1:3)
matplot(t, M2, type="l", lty=1, col=1:5,
xlab="time", ylab="value", main="gradient")
legend("topleft", legend = names(grad), lty=1, col=1:5)
}
}
|
e7f2c8552ecd0e4bcf3683d14df8bb895b052480
|
287f022f6e04b20aaa27f165d1dea80994b0e492
|
/curvefit/man/curvefit-package.Rd
|
7b2b8fd2b31bd137782ebbc2089e7585c9c748aa
|
[] |
no_license
|
prsteele/curvefit
|
6559d72d746dc1035eb3860f591b3ab597ae4831
|
720276f5263e966bfb95ef9d2e296ceaa361f634
|
refs/heads/master
| 2020-05-30T15:45:57.799241
| 2012-05-08T15:06:42
| 2012-05-08T15:06:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,333
|
rd
|
curvefit-package.Rd
|
\name{curvefit-package}
\alias{curvefit-package}
\alias{curvefit}
\docType{package}
\title{Bayesian curve fitting}
\description{
Fits data using Bayesian techniques.
}
\details{
\tabular{ll}{
Package: \tab curvefit\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-04-22\cr
License: \tab GPLv3\cr
LazyLoad: \tab yes\cr
}
The function curvefit fits a piecewise polynomial to data. The
function `curvefit.at' is used to evaluate the fitted function at
specified values.
}
\usage{
curvefit(formula, data, ...)
\method{curvefit}{default}(formula, data, prior_mean,
max_knots=length(data), c_param=.4, poly_deg=2, knot_continuity=1,
mse_relative=10^-3, mse_absolute=10^3, burnin=10, diagnostics=FALSE, ...)
\method{curvefit}{print}(fit, ...)
\method{curvefit}{at}(fit, x, ...)
}
\arguments{
\item{formula}{ a formula specifying the predictor and response
variable. }
\item{data}{ the data to be fit. If the formula specified is "y ~ x",
then "data" must contain columns "y" and "x". }
\item{prior_mean}{ the prior mean of the number of knots in the fitted
model. Must be a nonnegative integer. }
\item{max_knots}{ the maximum number of knot locations in the
model. Increase to increase the granularity of fits. }
\item{c_param}{ a parameter that controls the ratio between 'birth'
and 'death' steps and 'move' steps. Must be in (0, .5). }
\item{poly_deg}{ the degree of fitted polynomials. Must be a
positive integer. }
\item{knot_continuity}{ controls the smoothness of the resulting
fit. If zero, the result is possibly noncontinuous. If positive, the
fit will have "knot_continuity - 1" continuous derivatives. }
\item{mse_relative}{ the relative mean-squared error required to
halt. Decrease to improve fit, at the expense of running time. }
\item{mse_absolute}{ the absolute mean-squared error required to
halt. Decrease to improve fit, at the expense of running time. }
\item{burnin}{ minimum number of iterations we require before testing
for absolute and relative halting conditions. }
\item{diagnostics}{ if TRUE, prints out diagnostics as the fit
runs. Can be useful to see how fast a fit is converging. }
\item{fit}{ a curvefit object. }
\item{x}{ a vector of values to evaluate the fit at. }
\item{\dots}{ ignored. }
}
\value{
An object of class \code{curvefit}, a list including elements
\item{coef}{ a named vector of fit coefficients. }
\item{niters}{ the number of iterations required to terminate. }
\item{mse}{ the mean-squared error of the fitted model. }
\item{knots}{ a vector of interior knot indices in the fitted
model. The value of knot points can be found by accessing the
corresponding index of the "locations" parameter. }
\item{locations}{ a vector of candidate knot locations. }
\item{l}{ the degree of polynomials used. }
\item{l0}{ the degree of continuity of the fit. If 0, the fit is not
guaranteed to be continuous. If positive, the fit has l0 - 1
continuous derivatives. }
}
\author{
Patrick Steele
Maintainer: Patrick Steele, <prs233@cornell.edu>
}
\references{
"Automatic Bayesian curve fitting", Denison et al., 1998
}
\keyword{ regression }
\examples{
# data(EuStockMarkets
# x = 1:200
# y = EuStockMarkets[x, 1]
# fit = curvefit(y ~ x, list(y=y, x=x), prior_mean=10)
# plot(x, fit.at(fit, x))
}
|
ef93b0e8f1f9e58359f9a79077f3ca59a3cb936b
|
e96286a6c47c377596b8857a6661d113ad75b5b8
|
/Code/Source/predictive_fit_hmnl.R
|
deed4c8977d4f8dc4d5a28613e149a90b679e5cc
|
[
"MIT"
] |
permissive
|
marcdotson/conjoint-ensembles
|
1dcaf6413b12851d9bfd2d05285703f6ebf199bc
|
fbb1163b03e77d14238584565247630f934fdefa
|
refs/heads/main
| 2022-06-17T07:16:00.905779
| 2021-09-16T21:55:34
| 2021-09-16T21:55:34
| 134,318,937
| 0
| 1
|
MIT
| 2023-08-30T16:59:39
| 2018-05-21T20:00:33
|
R
|
UTF-8
|
R
| false
| false
| 2,483
|
r
|
predictive_fit_hmnl.R
|
predictive_fit_hmnl = function(hmnl_draws, test_X, test_Y, test_Z){
# Compute the hit rate for the indicated model.
# hmnl_fit - hmnl output with log_lik, betadraws, gammadraws, and Omegadraws
# test_Y - choices (hold-out sample)
# test_X - design matrices (hold-out sample)
# test_Z - matrix of covariates
ndraw <- length(hmnl_draws$Gamma[,1,1]) # Number of draws
nresp <- length(test_Y[,1]) # Number of respondents
nscns <- length(test_X[1, ,1,1]) # Number of choice tasks
nalts <- length(test_X[1,1, ,1]) # Number of alternatives
nlvls <- length(test_X[1,1,1, ]) # Number of att levels
if(is.null(test_Z)){test_Z <- matrix(1, nr=nresp, nc = 1)}
#stack resps and scns to avoid loops (this needs changed if using hold out tasks)
test_X_stacked <- NULL
for(resp in 1:nresp){
for(scn in 1:nscns){
test_X_stacked <- rbind(test_X_stacked,test_X[resp,scn,,])
}
}
#stack scn choices to avoid loops
test_Y_stacked <- matrix(t(test_Y),nc=1)
#get utilities for 2 different hit rate calculations:
#using mean (over draws) of the mean of the post dist
Umat <- matrix(0, nr = nresp*nscns*nalts)
#loop over respondents
for(resp in 1:nresp){
#use upper level to get mean of dist of heterogeneity
gammadraws=hmnl_draws$Gamma
#transpose dimensions of gammadraw array
meangammas <- apply(gammadraws,c(2,3),mean)
#multiply by Z to get mean of dist of het
betas <- matrix(test_Z[resp,]%*%meangammas, nc=1)
#get utility for each alternative
# Umat_meangammas[((resp-1)*nalts*nscns+1):((resp)*nalts*nscns),] <-
# exp(test_X_stacked[((resp-1)*nalts*nscns+1):((resp)*nalts*nscns),]%*%
# matrix(betas))
Umat[((resp-1)*nalts*nscns+1):((resp)*nalts*nscns),] <-
exp(test_X_stacked[((resp-1)*nalts*nscns+1):((resp)*nalts*nscns),]%*%
matrix(betas))
}
#find probabilities for each task, resp
# Umat_byscn <- matrix(Umat_meangammas, nr = nalts)
Umat_byscn <- matrix(Umat, nr = nalts)
sums <- t(matrix(rep(colSums(Umat_byscn),nalts), nc=nalts))
probs <- (Umat_byscn/sums)
#find location of highest prob
locs <- apply(probs,2,which.max)
#calculate hits meangammas
hits <- double(nresp*nscns*ndraw)
hits[locs ==test_Y_stacked] <- 1
#calculate hit probs meangammas
hit_probs<- colSums(probs*diag(nalts)[,test_Y_stacked])
return(list(hit_prob=mean(hit_probs), hit_rate=mean(hits)))
}
|
2060cc26143da1249455659e0523e1edf8d48155
|
0c4914d3c503a936f0f7531aefd56de756012a9d
|
/man/simData2.Rd
|
c75d521085db54e6d9d5e6b6972f94db35873192
|
[] |
no_license
|
feiliu135/FADPclust
|
88ebf030a00692b15ec3536fd7dbef628f782435
|
d81d53f13ef261b51e132e65802a09a983732cf1
|
refs/heads/master
| 2023-08-19T05:00:59.197574
| 2021-10-08T06:10:09
| 2021-10-08T06:10:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 439
|
rd
|
simData2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simData2.R
\docType{data}
\name{simData2}
\alias{simData2}
\title{Simulated multivariate functional data for method FADPclust}
\format{fd, see FDA R package for details.}
\description{
Simulated three-dimensional multivariate functional data, with 2 clusters each containing 100 sample curves, were for users to apply the method FADPclust.
}
\keyword{datasets}
|
6dc97f617e255d435e9e2aafcf9ddb07d36bebfe
|
3e51c995b0766e621819e8d5804156e7d29b2c43
|
/R2OpenBUGS_ex1.R
|
7f25a4ed15d5285049eacab84cfbbccc98c82a5d
|
[] |
no_license
|
miaomiao7777/Bayesian-Stats
|
39efd0a0eb0440ecf6a638b402b2f74b37996eeb
|
94a38b26b8d95eba725f104c1aff7346d9f44f5a
|
refs/heads/master
| 2020-04-29T14:45:05.246126
| 2019-03-18T04:49:35
| 2019-03-18T04:49:35
| 176,206,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
R2OpenBUGS_ex1.R
|
install.packages("R2OpenBUGS")
library(R2OpenBUGS)
bugsex1 <- function() {
pi ~ dbeta(1,1)%_%I(0.2,0.45)
r ~ dbin(pi,N)
}
N <- 14
r <- 4
filename <- file.path("C:/Users/wangsaja/Desktop/openbugs/","bugsex1.txt")
write.model(bugsex1,filename)
filename
data <- list("N","r")
#inits <- function() {list(pi=0.5)}
parameters <- c("pi")
#bugsex1.sim <- bugs(data,inits=NULL,parameters,filename,codaPkg=TRUE,n.iter=1000)
bugsex1.sim <- bugs(data,inits=NULL,parameters,filename,n.iter=1000)
print(bugsex1.sim)
plot(bugsex1.sim)
#print(mean(pi))
#print(quantile(pi, c(.25, .75)))
#library("coda")
#codaex1 <- read.bugs(bugsex1.sim)
#plot(codaex1)
|
0eb8d053270a39eb6476f0a14ce0d0e4f1953d9e
|
25d6eec99ea583edb64b56cc113e59364de65038
|
/eibshiny2018/app2/ui.R
|
2a8117620fd888829cce1014f983d2f164815deb
|
[
"MIT"
] |
permissive
|
ShinyHub/ShinyEiB
|
3c4af4d9c559440ebf66e6af3f45f02311a7eae7
|
d2fcf437fdd59d8fd61b55b77c7457df6ef6df45
|
refs/heads/master
| 2020-03-31T04:07:06.001451
| 2018-10-07T01:05:22
| 2018-10-07T01:05:22
| 151,891,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,441
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Application 2"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
#fileInput
fileInput(inputId="fileUp", label = "Upload dataset",
placeholder = "select file...", accept=".xlsx"),
#select input
selectInput(inputId= "selChart", label="Choose graphic",
choices = c("boxplot", "scatterplot"), selected=1),
#uiOutput
conditionalPanel(
condition= "input.selChart=='boxplot'",
uiOutput("outVarTrait"),
uiOutput("outGroup")
),
conditionalPanel(
condition = "input.selChart=='scatterplot'",
uiOutput("outX"),
uiOutput("outY")
),
#labels for the graphic
textInput(inputId="xlabel", label = "X label", value=""),
textInput(inputId= "ylabel", label = "Y label", value = "")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("outchart")
)
)
))
|
e2b23aec2a7ee3b2399f7d521fdea72cc36b9fe9
|
a0e4276f3f4d002263655d00f67d0878cacf6d3c
|
/lib/PKfit/R/plotting.sim.R
|
802754c06f9a3951e98480a5b2a043bb5f4d034a
|
[] |
no_license
|
shanmdphd/mypkfit
|
a4dff0b1181d89acce8ba7838dffde081a825ace
|
508012aea85b47f77976a7d6cd8dfe55f090332c
|
refs/heads/master
| 2021-01-01T16:43:18.814912
| 2017-07-21T03:20:37
| 2017-07-21T03:20:37
| 97,901,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
r
|
plotting.sim.R
|
### ----------------plot for simulation----------------
### Draw 2 windows, and consider that 2 more windows are coming
plotting.sim <- function(i,x,y,xaxis,yaxis,MD=FALSE)
{
### dev.new()
### par(mfrow=c(2,1),ask = FALSE) ### moved to each module since v1.3.7
main<-paste(c("Subject:- ", i),collapse="")
if(MD){
## linear plot
plot(y~x,type='l',main=main,ylim=c(0,max(y)*1.1), ### plot lines only('l'ine only)
xlab=xaxis,ylab=yaxis,pch=15,col="black",bty="l",
font.lab=2,cex.lab=1,cex.axis=1,cex.main=1)
text("linear plot",side=3,cex=0.88)
## semi-log plot
plot(x,y,log="y",type='l',main=main,ylim=c(1,max(y)*1.1),
xlab=xaxis,ylab=yaxis,pch=15,col="black",bty="l",
font.lab=2,cex.lab=1,cex.axis=1,cex.main=1)
mtext("semi-log plot",side=3,cex=0.88)
}
else{
## linear plot
plot(y~x,type='b',main=main,ylim=c(0,max(y)*1.1), ### plot lines and symbols ('b'oth)
xlab=xaxis,ylab=yaxis,pch=15,col="black",bty="l",
font.lab=2,cex.lab=1,cex.axis=1,cex.main=1)
text("linear plot",side=3,cex=0.88)
## semi-log plot
plot(x,y,log="y",type='b',main=main,ylim=c(1,max(y)*1.1),
### plot(x,y,log="y",type='l',main=main,
xlab=xaxis,ylab=yaxis,pch=15,col="black",bty="l",
font.lab=2,cex.lab=1,cex.axis=1,cex.main=1)
mtext("semi-log plot",side=3,cex=0.88)
}
}
|
113fd1067680a8e54374ee2a2b88411eb1983a58
|
3b1e41af7baf9feec33697fc842268af76625f41
|
/17. Figure S8.R
|
bfb93b5f72ef0718513acfbb5a7e17de5a246c75
|
[] |
no_license
|
Barardo/ASM_HumanLT
|
4daa8b1f659f57d8ce33c916dedea125a6395a5e
|
d6bf86afbf62a615ed9393f8a5d7e062e8e291a3
|
refs/heads/master
| 2023-01-02T11:49:37.071803
| 2020-10-30T09:52:25
| 2020-10-30T09:52:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,322
|
r
|
17. Figure S8.R
|
# Clean up
rm(list=ls())
# Working directory
wd<-"/Users/alistairsenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition" # Work iMac
#wd<-"/Users/asenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition" # Macbook
wd<-"/Users/asenior/Dropbox (Sydney Uni)/Human lifetables and Nutrition"
setwd(wd)
# Load libraries
library(arm)
library(plyr)
library(ggplot2)
library(mgcv)
library(gridExtra)
library(MortalityLaws)
library(Cairo)
source("scripts/0. Header_Functions.R")
# Read in the standard from Wilmoth et al 2012
standard<-read.csv("clean_data/wilmoth_standard.csv")
#################################################
#################### FIGURE S8 ##################
#################################################
# Load the full data
full_data<-read.csv("brass_data/Brass_imputed.csv")
# So the model with macros * time + GDP has the best fit
load("models/Imputed_AIC_GAMS.rdata")
model_aic<-AIC_favoured_models[[2]]
# Get year of choice surfaces for PCF in females
year_plot<-2016
dataset_plot<-full_data[which(full_data$Year == year_plot), ]
med_GDP<-round(median(dataset_plot$GDP_perCapita, na.rm=T))
predict_val<-data.frame(Year=year_plot, GDP_perCapita=med_GDP, Sex=as.factor("Females"))
# Specify the desired layout for the surfaces
XYZ_list<-list()
XYZ_list[[1]]<-c("Protein.kcal", "Carbo.kcal", "Fat.kcal")
XYZ_list[[2]]<-c("Protein.kcal", "Fat.kcal", "Carbo.kcal")
XYZ_list[[3]]<-c("Carbo.kcal", "Fat.kcal", "Protein.kcal")
# Limits for the y.axis
y_limits<-list()
y_limits[[1]]<-c(1000, 2100)
y_limits[[2]]<-c(400, 1600)
y_limits[[3]]<-c(750, 1600)
labels_list<-c("Protein kcal/capita/day", "Carbohydrate kcal/capita/day", "Fat kcal/capita/day")
# Find the min and max set of predictions
mins<-array(NA, c(3,2))
maxs<-mins
for(j in 1:3){
# Set the parameters for XYZ set j
XYZ<-XYZ_list[[j]]
z.val<-round(quantile(dataset_plot[,XYZ[3]])[3])
gg_surfaces<-ggSurface(GAM=model_aic, data=dataset_plot, XYZ=XYZ, labels=XYZ, exclude=c("s(Country)", "s.1(Country)"), predict_val=predict_val, traits=c("alpha", "beta"), z.val=z.val, y.limits=y_limits[[j]])
# Save the min and max values for scaling
mins[j,1]<-min(gg_surfaces[[1]]$data$fit)
maxs[j,1]<-max(gg_surfaces[[1]]$data$fit)
mins[j,2]<-min(gg_surfaces[[2]]$data$fit)
maxs[j,2]<-max(gg_surfaces[[2]]$data$fit)
}
# Find the min and max
min_use<-apply(mins, 2, min)
max_use<-apply(maxs, 2, max)
# Now refit to normalised scaling across surfaces and save those for presentation
surfaces_list_F<-list()
for(j in 1:3){
# Set the parameters for XYZ set j
XYZ<-XYZ_list[[j]]
labels<-labels_list[match(XYZ, XYZ_list[[1]])]
z.val<-round(quantile(dataset_plot[,XYZ[3]])[3])
# Remake the surfaces sacles by the corss-surface min and max
surfaces<-ggSurface(GAM=model_aic, data=dataset_plot, XYZ=XYZ, labels=labels, exclude=c("s(Country)", "s.1(Country)"), predict_val=predict_val, traits=c("alpha", "beta"), surf_min=min_use, surf_max=max_use, subtitle=paste0(labels[3], " = ", z.val), z.val=z.val, y.limits=y_limits[[j]])
# Annotate
surfaces[[1]]<-surfaces[[1]] + annotate("text", x = floor(min(dataset_plot[,XYZ[1]])), y = max(y_limits[[j]]), label = expression(italic(alpha)~~Females), hjust = 0, vjust = 1, size = 7)
surfaces[[2]]<-surfaces[[2]] + annotate("text", x = floor(min(dataset_plot[,XYZ[1]])), y = max(y_limits[[j]]), label = expression(italic(beta)~~Females), hjust = 0, vjust = 1, size = 7)
# Save them
surfaces_list_F[[j]]<-surfaces
}
################# REPEAT for males
# Get year of choice surfaces for PCF
model_aic<-AIC_favoured_models[[1]]
predict_val<-data.frame(Year=year_plot, GDP_perCapita=med_GDP, Sex=as.factor("Males"))
# Find the min and max set of predictions
mins<-array(NA, c(3,2))
maxs<-mins
for(j in 1:3){
# Set the parameters for XYZ set j
XYZ<-XYZ_list[[j]]
z.val<-round(quantile(dataset_plot[,XYZ[3]])[3])
gg_surfaces<-ggSurface(GAM=model_aic, data=dataset_plot, XYZ=XYZ, labels=XYZ, exclude=c("s(Country)", "s.1(Country)"), predict_val=predict_val, traits=c("alpha", "beta"), z.val=z.val, y.limits=y_limits[[j]])
# Save the min and max values for scaling
mins[j,1]<-min(gg_surfaces[[1]]$data$fit)
maxs[j,1]<-max(gg_surfaces[[1]]$data$fit)
mins[j,2]<-min(gg_surfaces[[2]]$data$fit)
maxs[j,2]<-max(gg_surfaces[[2]]$data$fit)
}
# Find the min and max
min_use<-apply(mins, 2, min)
max_use<-apply(maxs, 2, max)
# Now refit to normalised scaling across surfaces and save those for presentation
surfaces_list_M<-list()
for(j in 1:3){
# Set the parameters for XYZ set j
XYZ<-XYZ_list[[j]]
labels<-labels_list[match(XYZ, XYZ_list[[1]])]
z.val<-round(quantile(dataset_plot[,XYZ[3]])[3])
# Remake the surfaces sacles by the corss-surface min and max
surfaces<-ggSurface(GAM=model_aic, data=dataset_plot, XYZ=XYZ, labels=labels, exclude=c("s(Country)", "s.1(Country)"), predict_val=predict_val, traits=c("alpha", "beta"), surf_min=min_use, surf_max=max_use, subtitle=paste0(labels[3], " = ", z.val), z.val=z.val, y.limits=y_limits[[j]])
# Annotate
surfaces[[1]]<-surfaces[[1]] + annotate("text", x = floor(min(dataset_plot[,XYZ[1]])), y = max(y_limits[[j]]), label = expression(italic(alpha)~~Males), hjust = 0, vjust = 1, size = 7)
surfaces[[2]]<-surfaces[[2]] + annotate("text", x = floor(min(dataset_plot[,XYZ[1]])), y = max(y_limits[[j]]), label = expression(italic(beta)~~Males), hjust = 0, vjust = 1, size = 7)
# Save them
surfaces_list_M[[j]]<-surfaces
}
################################################
# Now lets arrange all those plots
CairoPDF("figures/Figure_S8.pdf", height=20, width=15)
grid.arrange(surfaces_list_F[[1]][[1]]+labs(title="A"),
surfaces_list_F[[2]][[1]]+labs(title="B"),
surfaces_list_F[[3]][[1]]+labs(title="C"),
surfaces_list_F[[1]][[2]]+labs(title="D"),
surfaces_list_F[[2]][[2]]+labs(title="E"),
surfaces_list_F[[3]][[2]]+labs(title="F"),
surfaces_list_M[[1]][[1]]+labs(title="G"),
surfaces_list_M[[2]][[1]]+labs(title="H"),
surfaces_list_M[[3]][[1]]+labs(title="I"),
surfaces_list_M[[1]][[2]]+labs(title="J"),
surfaces_list_M[[2]][[2]]+labs(title="K"),
surfaces_list_M[[3]][[2]]+labs(title="L"),
layout_matrix=rbind(c(1,2,3),
c(4,5,6),
c(7,8,9),
c(10,11,12)))
dev.off()
|
4bc18033e1c516287363309049c2b13f307d274d
|
95fe9b8378d3a5d70037ea0fe1e52f8840ac2587
|
/tree/prob.R
|
d369f60169987ce176c24fae28f79efc5753b34f
|
[
"MIT"
] |
permissive
|
tor4z/RKDD98
|
6780f1d9fa8ef68819150010d9b75574d507d44a
|
0e80ffd1c341bd880ff76317c3cc0605836e30f3
|
refs/heads/master
| 2021-01-11T14:13:47.311165
| 2017-02-12T07:15:25
| 2017-02-12T07:15:25
| 81,192,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
prob.R
|
source("./const.R")
cup98.chisqTest <- function(name) {
tb <- table(cup98pos[,name], cup98pos$TARGET_D2)
plot(tb, main = name, las=1)
print(name)
print(chisq.test(tb))
}
|
bde4c7de6c4f0c5d2802b8ebd8e60cebe42d7914
|
4a7f4e3c1f830349bbf52fbf73ae4dbc4114375b
|
/tmev_package/mevr/man/predict.mevr.Rd
|
3573922707efa89547a16c6a83f37d9b68a790d1
|
[
"MIT"
] |
permissive
|
Falke96/extreme_precipitation_austria
|
f3f96328db1fd3459f8b6beff57703c0cb2c5e7e
|
e001fcea31c7e1fb2d30bf8ea3a2bb56707a18ec
|
refs/heads/main
| 2023-08-16T23:40:06.534867
| 2023-08-08T13:05:36
| 2023-08-08T13:05:36
| 611,209,754
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,747
|
rd
|
predict.mevr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mevr.R
\name{predict.mevr}
\alias{predict.mevr}
\title{TMEV prediction}
\usage{
\method{predict}{mevr}(object, newdata, term, ...)
}
\arguments{
\item{object}{Object of class \code{mevr}, fitted with the TMEV.}
\item{newdata}{A data frame with the model covariates (year, yday) at which predictions are required.
Note that depending on argument term, only covariates that are needed by the corresponding model terms need to be supplied.
If not supplied, predictions are made on the data supplied by the fitted object \code{x}.}
\item{term}{Character of the model terms for which predictions shall be calculated.
Can only be \code{"year"} or \code{"yday"}. If not specified, predictions for all terms are calculated.}
\item{...}{Arguments passed to prediction functions that are part of a bamlss.family object, i.e., the objects has a $predict() function that should be used instead.}
}
\value{
A data.frame with the supplied covariables and the predicted parameters.
}
\description{
Takes a \code{mevr} object where the TMEV has been fitted to rainfall data and calculates
\code{bamlss} predictions for the distributional parameters and the model terms. Basically
a wrapper to the corresponding function \code{predict.bamlss}
}
\details{
See also the details of \code{\link{ftmev}} for an explanation of the model terms used to fit the temporal trend
of the Weibull parameters.
}
\examples{
data(dailyrainfall)
# restrict for the sake of speed
idx <- which(as.POSIXlt(dailyrainfall$date)$year + 1900 < 1976)
data <- dailyrainfall[idx, ]
f <- ftmev(data, minyears = 5)
predict(f, term = "year")
}
\seealso{
\code{\link{ftmev}}, \code{\link{predict.bamlss}}
}
|
486a7f98718904af9001b6ce7e27e6dced090668
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.management/man/cloudtrail_list_import_failures.Rd
|
36a7ad243a01bf2fe846f8fb5d793d7cb44c63a7
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 742
|
rd
|
cloudtrail_list_import_failures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudtrail_operations.R
\name{cloudtrail_list_import_failures}
\alias{cloudtrail_list_import_failures}
\title{Returns a list of failures for the specified import}
\usage{
cloudtrail_list_import_failures(ImportId, MaxResults = NULL, NextToken = NULL)
}
\arguments{
\item{ImportId}{[required] The ID of the import.}
\item{MaxResults}{The maximum number of failures to display on a single page.}
\item{NextToken}{A token you can use to get the next page of import failures.}
}
\description{
Returns a list of failures for the specified import.
See \url{https://www.paws-r-sdk.com/docs/cloudtrail_list_import_failures/} for full documentation.
}
\keyword{internal}
|
f7a0e183ef0eebaf0ca033f7f936aa3c79380069
|
265606ff19843908088e74461a2e3f2c2153cdb5
|
/Section_4_Cross_validation.R
|
eb1ca4d6fdcfcd3f875823f894ea78bd8805bbc8
|
[] |
no_license
|
masa951125/machine_learning
|
474b598a94f48c690d2bf60f3884d22112e76d8b
|
b0e647c4d61a2604d4a3cac0c7f0c2ad3ee3ba48
|
refs/heads/main
| 2023-05-03T02:32:42.272870
| 2021-05-26T14:06:09
| 2021-05-26T14:06:09
| 343,765,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,491
|
r
|
Section_4_Cross_validation.R
|
library(tidyverse)
library(caret)
set.seed(1996, sample.kind="Rounding")
n <- 1000
p <- 10000
x <- matrix(rnorm(n*p), n, p)
colnames(x) <- paste("x", 1:ncol(x), sep = "_")
y <- rbinom(n, 1, 0.5) %>% factor()
x_subset <- x[ ,sample(p, 100)]
fit <- train(x_subset,y, method="glm")
fit
install.packages("BiocManager")
BiocManager::install("genefilter")
library(genefilter)
tt <- colttests(x, y)
ind <- which(tt$p.value <0.01)
length(ind)
x_subset <- x[,ind]
x_subset
fit <- train(x_subset,y, method="glm")
fit
fit <- train(x_subset, y, method= "knn",
tuneGrid =data.frame(k = seq(101, 301, 25)))
indexes <- createDataPartition(y, times = 5, p = 0.2)
dat <- data.frame(y=y, data.frame(x))
res <- sapply(indexes, function(test_index){
train_set <- slice(dat, -test_index)
test_set <- slice(dat, test_index)
pvals <- colttests(as.matrix(train_set[,-1]), train_set$y)$p.value
ind <- c(TRUE, pvals <= 0.01)
train_set <- train_set[, ind]
fit <- glm(y ~ ., data = train_set, family = "binomial")
y_hat <- ifelse(predict(fit, newdata = test_set[, ind], type = "response") > 0.5, 1, 0) %>%
factor()
mean(y_hat == test_set$y)
})
res
fit <- train(tissue_gene_expression$x, tissue_gene_expression$y,
method="knn", tuneGrid =data.frame(k = seq(1,7,2)))
max(fit$results$Accuracy)
#bootstrap
n <- 10^6
income <- 10^(rnorm(n, log10(45000), log10(3)))
qplot(log10(income), bins = 30, color = I("black"))
m <- median(income)
m
set.seed(1, sample.kind="Rounding")
N <- 250
X <- sample(income, N)
M<- median(X)
M
library(gridExtra)
B <- 10^5
M <- replicate(B, {
X <- sample(income, N)
median(X)
})
p1 <- qplot(M, bins = 30, color = I("black"))
p2 <- qplot(sample = scale(M)) + geom_abline()
grid.arrange(p1, p2, ncol = 2)
p1
B <- 10^5
M_star <- replicate(B, {
X_star <- sample(X, N, replace = TRUE)
median(X_star)
})
tibble(monte_carlo = sort(M), bootstrap = sort(M_star)) %>%
qplot(monte_carlo, bootstrap, data = .) +
geom_abline()
quantile(M, c(0.05, 0.95))
quantile(M_star, c(0.05, 0.95))
median(X) + 1.96 * sd(X) / sqrt(N) * c(-1, 1)
mean(M) + 1.96 * sd(M) * c(-1,1)
mean(M_star) + 1.96 * sd(M_star) * c(-1, 1)
#comprehension check
library(dslabs)
library(caret)
data(mnist_27)
set.seed(1995, sample.kind="Rounding")
indexes <- createResample(mnist_27$train$y, 10)
sum(indexes$Resample01 == 4)
sum(indexes ==3)
sum(indexes[[1]]==3) +
sum(indexes[[2]]==3)+
sum(indexes[[3]]==3)+
sum(indexes[[4]]==3)+
sum(indexes[[5]]==3)+
sum(indexes[[6]]==3)+
sum(indexes[[7]]==3)+
sum(indexes[[8]]==3)+
sum(indexes[[9]]==3)+
sum(indexes[[10]]==3)
x <- sapply(indexes, function(ind){
sum(ind == 3)
})
sum(x)
y <- rnorm(100, 0, 1)
qnorm(0.75)
quantile(y, 0.75)
set.seed(1, sample.kind="Rounding")
dat <- replicate(10000,{
y <-rnorm(100,0,1)
quantile(y, 0.75)
})
mean(dat)
sd(dat)
#Q4
set.seed(NULL)
set.seed(1, sample.kind = "Rounding")
q <- rnorm(100,0,1)
set.seed(1, sample.kind = "Rounding")
ind <- createResample(q, 10)
ind.data <-as.data.frame(ind)
N <- c(1,10)
q_75 <-sapply(N, function(k){
quantile(q[ind.data[,k]],0.75)
})
mean(q_75)
sd(q_75)
#model method
q_75_answer <- sapply(ind, function(k){
q_star <- y[k]
quantile(q_star, 0.75)
})
#Q5
set.seed(1, sample.kind = "Rounding")
ind_10000 <- createResample(q,10000)
ind_10000_data <-as.data.frame(ind_10000)
N <- c(1:10000)
q_75 <-sapply(N, function(k){
quantile(q[ind_10000_data[,k]],0.75)
})
mean(q_75)
sd(q_75)
|
9a73bd7ff666ed00db435ee19af5cacc498cd462
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/KoulMde/R/MdeFunc31.R
|
7def768aaf2e01b27c7e201afb2e750247c47226
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,293
|
r
|
MdeFunc31.R
|
#' Minimum distance estimation in linear regression model.
#'
#' Estimates the regression coefficients in the model \eqn{Y=X\beta + \epsilon}.
#'@param Y - Vector of response variables in linear regression model.
#'@param X - Design matrix of explanatory variables in linear regression model.
#'@param D - Weight Matrix. Dimension of D should match that of X. Default value is XA where A=(X'X)^(-1/2).
#'@param b0 - Initial value for beta.
#'@param IntMeasure - Symmetric and \eqn{\sigma}-finite measure: Lebesgue, Degenerate, and Robust
#'@param TuningConst - Used only for Robust measure.
#'@return betahat - Minimum distance estimator of \eqn{\beta}.
#'@return residual - Residuals after minimum distance estimation.
#'@return ObjVal - Value of the objective function at minimum distance estimator.
#'@examples
#'####################
#'n <- 10
#'p <- 3
#'X <- matrix(runif(n*p, 0,50), nrow=n, ncol=p) #### Generate n-by-p design matrix X
#'beta <- c(-2, 0.3, 1.5) #### Generate true beta = (-2, 0.3, 1.5)'
#'eps <- rnorm(n, 0,1) #### Generate errors from N(0,1)
#'Y <- X%*%beta + eps
#'
#'D <- "default" #### Use the default weight matrix
#'b0 <- solve(t(X)%*%X)%*%(t(X)%*%Y) #### Set initial value for beta
#'IntMeasure <- "Lebesgue" ##### Define Lebesgue measure
#'
#'
#'MDEResult <- KoulLrMde(Y,X,D, b0, IntMeasure, TuningConst=1.345)
#'
#'betahat <- MDEResult$betahat ##### Obtain minimum distance estimator
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'
#'
#'IntMeasure <- "Degenerate" ##### Define degenerate measure at 0
#'
#'MDEResult <- KoulLrMde(Y,X,D, b0, IntMeasure, TuningConst=1.345)
#'betahat <- MDEResult$betahat ##### Obtain minimum distance estimator
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'
#'
#'
#'IntMeasure <- "Robust" ##### Define "Robust" measure
#'TuningConst <- 3 ##### Define the tuning constant
#'MDEResult <- KoulLrMde(Y,X,D, b0, IntMeasure, TuningConst)
#'
#'
#'betahat <- MDEResult$betahat ##### Obtain minimum distance estimator
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'@references
#'[1] Kim, J. (2018). A fast algorithm for the coordinate-wise minimum distance estimation. J. Stat. Comput. Simul., 3: 482 - 497
#'@references
#'[2] Kim, J. (2020). Minimum distance estimation in linear regression model with strong mixing errors. Commun. Stat. - Theory Methods., 49(6): 1475 - 1494
#'@references
#'[3] Koul, H. L (1985). Minimum distance estimation in linear regression with unknown error distributions. Statist. Probab. Lett., 3: 1-8.
#'@references
#'[4] Koul, H. L (1986). Minimum distance estimation and goodness-of-fit tests in first-order autoregression. Ann. Statist., 14 1194-1213.
#'@references
#'[5] Koul, H. L (2002). Weighted empirical process in nonlinear dynamic models. Springer, Berlin, Vol. 166
#'@export
#'@seealso KoulArMde() and Koul2StageMde()
#'@importFrom Rcpp evalCpp
#'@importFrom expm sqrtm
#'@useDynLib KoulMde
######################################################################
KoulLrMde <- function(Y, X, D, b0, IntMeasure, TuningConst=1.345){
if( (nargs() != 5) && (nargs() != 6) ){
message("Number of arguments should be five or six.")
stop()
}
if(IntMeasure == "Robust"){
if(is.numeric(TuningConst) == FALSE ){
message("Tuning constant should be numeric. Default value will be tried.")
TuningConst = 1.345
}else{
if(TuningConst <= 0){
message("Tuning constant should be positive. Default value will be tried.")
TuningConst = 1.345
}
}
}
if (is.vector(X) == TRUE ){
nXRow <- length(X)
nXCol <- 1
LengY <- length(Y)
XMat <- matrix(X, nXRow, nXCol)
if (nXRow != LengY){
message("Dimension of X does not match dimension of Y.")
stop()
}
if(is.vector(D) == TRUE){
nDRow <- length(D)
nDCol <- 1
DMat <- matrix(D, nDRow, nDCol)
}else{
message("When X is a vector, D should be a vector too.")
stop()
}
if(nDRow != nXRow){
str= paste("D should be ", nXRow, "-by-1 vector.")
message(str)
stop()
}
}else if(is.matrix(X) == TRUE){
DimMat <- dim(X)
LengY <- length(Y)
nXRow <- DimMat[1]
nXCol <- DimMat[2]
XMat <- X
if(is.matrix(D) == TRUE){
DDimMat <- dim(D)
}else if(D == "default"){
tempA <- (t(X)%*%X)
A <- sqrtm(solve(tempA))
#A <- sqrtmat(tempA, -0.5)
D <- X%*%A
}else{
message("D should be a matrix.")
stop()
}
DMat <- D
DDimMat <- dim(D)
nDRow <- DDimMat[1]
nDCol <- DDimMat[2]
if (nXRow != LengY){
message("Dimension of X does not match dimension of Y.")
stop()
}
if (nXCol != length(b0) ){
message("b0 is not conformable to X.")
stop()
}
if( (nXRow != nDRow) || ((nXCol != nDCol)) ) {
message("Dimesion of D should match dimension of X.")
stop()
}
}else{
message("X is not a valid design matrix.")
stop()
}
iter <- 3000
critVal <- 0.001
type = 0
if(IntMeasure == "Lebesgue"){
type = 1
}else if(IntMeasure == "Robust"){
type = 2
}else if(IntMeasure == "Degenerate"){
type = 3
}else{
message("Integrating measure should be Lebesgue, Degenerate or Robust.")
stop()
}
YMat = matrix(Y, LengY, 1)
b0Mat = matrix(b0, nXCol, 1)
bhat_ObjVal <- EstimateBetaMDESimple(YMat, XMat, DMat, b0Mat, iter, critVal, type, TuningConst)
bhat <- bhat_ObjVal[1:nXCol]
ObjVal <- bhat_ObjVal[(nXCol+1)]
if (is.vector(X) == TRUE ){
res <- YMat - XMat%*% bhat
}else{
res <- YMat - XMat %*% bhat
}
lst = list(betahat=bhat, residual = res, ObjVal = ObjVal)
return(lst)
}
####################################
#' Minimum distance estimation in the autoregression model of the known order.
#'
#' Estimates the autoressive coefficients in the \eqn{X_t = \rho' Z_t + \xi_t } where \eqn{Z_t} is the vector of \eqn{q} observations at times \eqn{t-1,...,t-q}.
#'@param X - Vector of \code{n} observed values.
#'@param AR_Order - Order of the autoregression model.
#'@param IntMeasure - Symmetric and \eqn{\sigma}-finite measure: Lebesgue, Degenerate, and Robust
#'@param TuningConst - Used only for Robust measure.
#'@return rhohat - Minimum distance estimator of \eqn{\rho}.
#'@return residual - Residuals after minimum distance estimation.
#'@return ObjVal - Value of the objective function at minimum distance estimator.
#'@examples
#'##### Generate stationary AR(2) process with 10 observations
#'n <- 10
#'q <- 2
#'rho <- c(-0.2, 0.8) ##### Generate true parameters rho = (-0.2, 0.8)'
#'eps <- rnorm(n, 0,1) ##### Generate innovations from N(0,1)
#'X <- rep(0, times=n)
#'for (i in 1:n){
#' tempCol <- rep(0, times=q)
#' for (j in 1:q){
#' if(i-j<=0){
#' tempCol[j] <- 0
#' }else{
#' tempCol[j] <- X[i-j]
#' }
#' }
#'X[i] <- t(tempCol)%*% rho + eps[i]
#'}
#'
#'IntMeasure <- "Lebesgue" ##### Define Lebesgue measure
#'
#'MDEResult <- KoulArMde(X, q, IntMeasure, TuningConst=1.345)
#'rhohat <- MDEResult$rhohat ##### Obtain minimum distance estimator
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'
#'
#'IntMeasure <- "Degenerate" ##### Define degenerate measure at 0
#'MDEResult <- KoulArMde(X, q, IntMeasure, TuningConst=1.345)
#'rhohat <- MDEResult$rhohat ##### Obtain minimum distance estimator
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'
#'
#'IntMeasure <- "Robust" ##### Define "Robust" measure at 0
#'TuningConst <- 3 ##### Define the tuning constant
#'MDEResult <- KoulArMde(X, q, IntMeasure, TuningConst)
#'
#'resid <- MDEResult$residual ##### Obtain residual
#'objVal <- MDEResult$ObjVal ##### Obtain the value of the objective function
#'
#'@references
#'[1] Kim, J. (2018). A fast algorithm for the coordinate-wise minimum distance estimation. J. Stat. Comput. Simul., 3: 482 - 497
#'@references
#'[2] Kim, J. (2020). Minimum distance estimation in linear regression model with strong mixing errors. Commun. Stat. - Theory Methods., 49(6): 1475 - 1494
#'@references
#'[3] Koul, H. L (1985). Minimum distance estimation in linear regression with unknown error distributions. Statist. Probab. Lett., 3: 1-8.
#'@references
#'[4] Koul, H. L (1986). Minimum distance estimation and goodness-of-fit tests in first-order autoregression. Ann. Statist., 14 1194-1213.
#'@references
#'[5] Koul, H. L (2002). Weighted empirical process in nonlinear dynamic models. Springer, Berlin, Vol. 166
#'@export
#'@seealso KoulLrMde() and Koul2StageMde()
KoulArMde <- function(X, AR_Order, IntMeasure, TuningConst=1.345){
Hx = IntMeasure
if(IntMeasure == "Robust"){
if(is.numeric(TuningConst) == FALSE ){
message("Tuning constant should be numeric. Default value will be tried.")
TuningConst = 1.345
}else{
if(TuningConst <= 0){
message("Tuning constant should be positive. Default value will be tried.")
TuningConst = 1.345
}
}
}
if ( (Hx != "Lebesgue") && (Hx != "Degenerate") && (Hx != "Robust")){
message("Integrating measure should be Lebesgue, Degenerate or Robust.")
stop()
}
nLength <- length(X)
if(nLength <= AR_Order){
message("Length of vector X should be greater than AR_Order.")
stop()
}
Xres <- rep(0, times=(nLength-AR_Order))
tempvec <- rep(0, times= AR_Order*(nLength-AR_Order) )
Xexp <- matrix( tempvec, nrow = (nLength-AR_Order), ncol = AR_Order)
Dmat <- matrix( tempvec, nrow = (nLength-AR_Order), ncol = AR_Order)
for (i in 1:(nLength-AR_Order) ) {
Xres[i] <- X[nLength - (i-1)]
for (j in 1:AR_Order){
Xexp[i,j] <- X[nLength-(i+j-1) ]
Dmat[i,j] <- X[nLength-(i+j-1) ] / sqrt(nLength-AR_Order)
}
}
XresMat <- matrix(Xres, (nLength-AR_Order), 1)
tempdet <- det( t(Xexp) %*% Xexp )
if ( tempdet < 0.01 ){
rho0 <- 0.5*rep(1, times = AR_Order)
}else{
rho0 <- solve(t(Xexp)%*%Xexp)%*% (t(Xexp)%*%Xres)
}
rho0Mat <- matrix(rho0, AR_Order, 1)
iter=1000
critVal=0.001
nXRow = nLength-AR_Order
nXCol = AR_Order
type = 0
if(IntMeasure == "Lebesgue"){
type = 1
}else if(IntMeasure == "Robust"){
type = 2
}else if(IntMeasure == "Degenerate"){
type = 3
}else{
message("Integrating measure should be Lebesgue, Degenerate or Robust.")
stop()
}
rhohat_ObjVal <- EstimateBetaMDESimple(XresMat, Xexp, Dmat, rho0Mat, iter, critVal, type, TuningConst)
rho_hat <- rhohat_ObjVal[1:AR_Order]
ObjVal <- rhohat_ObjVal[(AR_Order+1)]
resid <- XresMat - Xexp%*% rho_hat
lst <- list(rhohat=rho_hat, residual=resid, ObjVal=ObjVal)
return(lst)
}
#'Two-stage minimum distance estimation in linear regression model with autoregressive error.
#'
#'Estimates both regression and autoregressive coefficients in the model \eqn{Y=X\beta + \epsilon} where \eqn{\epsilon} is autoregressive process of known order \code{q}
#'@param Y - Vector of response variables in linear regression model.
#'@param X - Design matrix of explanatory variables in linear regression model.
#'@param D - Weight Matrix. Dimension of D should match that of X. Default value is XA where A=(X'X)^(-1/2).
#'@param b0 - Initial value for beta.
#'@param RegIntMeasure - Symmetric and \eqn{\sigma}-finite measure used for estimating \eqn{\beta}: Lebesgue, Degenerate or Robust.
#'@param AR_Order - Order of the autoregressive error.
#'@param ArIntMeasure - Symmetric and \eqn{\sigma}-finite measure used for estimating autoregressive coefficients of the error: Lebesgue, Degenerate or Robust.
#'@param TuningConst - Used only for Robust measure.
#'@return MDE1stage - The list of the first stage minimum distance estimation result. It contains betahat1stage, residual1stage, and rho1stage.
#'\itemize{
#' \item betahat1stage - The first stage minimum distance estimators of regression coefficients.
#' \item residual1stage - Residuals after the first stage minimum distance estimation.
#' \item rho1stage - The first stage minimum distance estimators of autoregressive coefficients of the error.
#'}
#'@return MDE2stage - The list of the second stage minimum distance estimation result. It contains betahat2stage, residual2stage, and rho2stage.
#'\itemize{
#' \item betahat2stage - The second stage minimum distance estimators of regression coefficients.
#' \item residual2stage - Residuals after the second stage minimum distance estimation.
#' \item rho2stage - The second stage minimum distance estimators of autoregressive coefficients of the error.
#'}
#'@examples
#'####################
#'n <- 10
#'p <- 3
#'X <- matrix(runif(n*p, 0,50), nrow=n, ncol=p) #### Generate n-by-p design matrix X
#'beta <- c(-2, 0.3, 1.5) #### Generate true beta = (-2, 0.3, 1.5)'
#'rho <- 0.4 #### True rho = 0.4
#'eps <- vector(length=n)
#'xi <- rnorm(n, 0,1) #### Generate innovation from N(0,1)
#' #### Generate autoregressive process of order 1
#'for(i in 1:n){
#' if(i==1){eps[i] <- xi[i]}
#' else{eps[i] <- rho*eps[i-1] + xi[i]}
#'}
#'Y <- X%*%beta + eps
#'#####################
#'D <- "default" #### Use the default weight matrix
#'b0 <- solve(t(X)%*%X)%*%(t(X)%*%Y) #### Set initial value for beta
#'
#'IntMeasure <- "Lebesgue" ##### Define Lebesgue measure
#'MDEResult <- Koul2StageMde(Y,X, "default", b0, IntMeasure, 1, IntMeasure, TuningConst = 1.345)
#'MDE1stageResult <- MDEResult[[1]]
#'MDE2stageResult <- MDEResult[[2]]
#'
#'beta1 <- MDE1stageResult$betahat1stage
#'residual1 <- MDE1stageResult$residual1stage
#'rho1 <- MDE1stageResult$rhohat1stage
#'
#'beta2 <- MDE2stageResult$betahat2stage
#'residual2 <- MDE1stageResult$residual2stage
#'rho2 <- MDE2stageResult$rhohat2stage
#'@references
#'[1] Kim, J. (2018). A fast algorithm for the coordinate-wise minimum distance estimation. J. Stat. Comput. Simul., 3: 482 - 497
#'@references
#'[2] Kim, J. (2020). Minimum distance estimation in linear regression model with strong mixing errors. Commun. Stat. - Theory Methods., 49(6): 1475 - 1494
#'@references
#'[3] Koul, H. L (1985). Minimum distance estimation in linear regression with unknown error distributions. Statist. Probab. Lett., 3: 1-8.
#'@references
#'[4] Koul, H. L (1986). Minimum distance estimation and goodness-of-fit tests in first-order autoregression. Ann. Statist., 14 1194-1213.
#'@references
#'[5] Koul, H. L (2002). Weighted empirical process in nonlinear dynamic models. Springer, Berlin, Vol. 166
#'@seealso KoulArMde() and KoulLrMde()
#'@export
Koul2StageMde <- function(Y,X,D, b0, RegIntMeasure, AR_Order, ArIntMeasure, TuningConst=1.345){
DimMat <- dim(X)
n <- DimMat[1]
p <- DimMat[2]
if( (RegIntMeasure == "Robust") || (ArIntMeasure == "Robust") ){
if(is.numeric(TuningConst) == FALSE ){
message("Tuning constant should be numeric. Default value will be tried.")
TuningConst = 1.345
}else{
if(TuningConst <= 0){
message("Tuning constant should be positive. Default value will be tried.")
TuningConst = 1.345
}
}
}
MDE1Result <- KoulLrMde(Y,X, D, b0, RegIntMeasure, TuningConst)
beta1 <- MDE1Result$betahat
resid1 <- MDE1Result$residual
objval1 <- MDE1Result$ObjVal
ArMDE1Result <- KoulArMde(resid1, AR_Order, ArIntMeasure, TuningConst)
rho1 <- ArMDE1Result$rhohat
MDE1 <- list(betahat1stage=beta1, residual1stage=resid1, rhohat1stage=rho1, ObjVal1 = objval1)
########################### 2 stage MDE
Ytilde <- vector(length=(n-AR_Order))
Xtilde <- matrix(rep(0,times=(n-AR_Order)*p), nrow=(n-AR_Order), ncol=p )
for(j in 1:(n-AR_Order)){
tempX <- rep(0, times=p)
tempY <- 0
for (k in 1: AR_Order){
tempX <- tempX + rho1[k]*X[AR_Order+j-k, ]
tempY <- tempY + rho1[k]*Y[AR_Order+j-k]
}
Xtilde[j, ] <- X[(j+AR_Order), ] - tempX
Ytilde[j] <- Y[j+AR_Order] - tempY
}
MDE2Result <- KoulLrMde(Ytilde, Xtilde, D, beta1, RegIntMeasure, TuningConst)
beta2 <- MDE2Result$betahat
resid2 <- Y-X%*%beta2
objval2 <- MDE2Result$ObjVal
ArMDE2Result <- KoulArMde(resid2, AR_Order, ArIntMeasure, TuningConst)
rho2 <- ArMDE2Result$rhohat
MDE2 <- list(betahat2stage=beta2, residual2stage=resid2, rhohat2stage=rho2, ObjVal2 = objval2)
ResultVal <- list(MDE1stage=MDE1, MDE2stage=MDE2)
return(ResultVal)
}
#' Detecting Non-numeric Values.
#'
#' Check whether or not an input matrix includes any non-numeric values (NA, NULL, "", character, etc) before being used for training. If any non-numeric values exist, then TrainBuddle() or FetchBuddle() will return non-numeric results.
#'@param X an n-by-p matrix.
#'
#'@return A list of (n+1) values where n is the number of non-numeric values. The first element of the list is n, and all other elements are entries of X where non-numeric values occur. For example, when the (1,1)th and the (2,3)th entries of a 5-by-5 matrix X are non-numeric, then the list returned by CheckNonNumeric() will contain 2, (1,1), and (2,3).
#'
#'@examples
#'
#'n = 5;
#'p = 5;
#'X = matrix(0, n, p) #### Generate a 5-by-5 matrix which includes two NA's.
#'X[1,1] = NA
#'X[2,3] = NA
#'
#'lst = CheckNonNumeric(X)
#'
#'lst
#'
#'@export
CheckNonNumeric = function(X){
dimm = dim(X)
n = dimm[1]
p = dimm[2]
nInc = 0
lst = list()
nIndex=2
for(i in 1:n){
for(j in 1:p){
val = X[i, j]
if((is.na(val)==TRUE) || is.null(val)==TRUE || is.numeric(val)==FALSE){
nInc = nInc+1
lst[[nIndex]] = c(i,j)
nIndex=nIndex+1
}
}
}
lst[[1]] = nInc
return(lst)
}
|
f1aa84a074ecc65aef71ceebd543d41f944817e9
|
2a811f8be6af323af6d0fe823a3f1e62b2b79a46
|
/man/plot.hglasso.Rd
|
1c2109839655de47e0eef1a65d111d141a7615b3
|
[] |
no_license
|
cran/hglasso
|
681f0fe36f1a10c5e53dbf144d144b13a4a5111d
|
da7e8d5f000b176dffff2d68d5a0118ea78e1d24
|
refs/heads/master
| 2022-06-01T20:17:07.281739
| 2022-05-13T07:20:02
| 2022-05-13T07:20:02
| 18,368,817
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,025
|
rd
|
plot.hglasso.Rd
|
\name{plot.hglasso}
\alias{plot.hglasso}
\alias{plot.hcov}
\alias{plot.hbn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot an object of class \code{hglasso}, \code{hcov}, or \code{hbn}
}
\description{
This function plots an object hglasso or hcov --- graphical representation of the estimated inverse covariance matrix from \code{\link{hglasso}}, covariance matrix from \code{\link{hcov}}, or binary network from \code{\link{hbn}}
}
\usage{
\method{plot}{hglasso}(x, layout=NULL,\dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
an object of class \code{\link{hglasso}}, \code{\link{hcov}}, or \code{\link{hbn}}.
}
\item{layout}{
the layout of the graph to use. If not specified, \code{\link{layout.kamada.kawai}} is used.
}
\item{\dots}{
additional parameters to be passed to \code{\link{plot.igraph}}.
}
}
\details{
This function plots a graphical representation of the estimated inverse covariance matrix or covariance matrix. The hubs are colored in red and has a large vertex size. Features indices for hubs are shown.
}
\references{
Tan et al. (2014). Learning graphical models with hubs. To appear in Journal of Machine Learning Research. arXiv.org/pdf/1402.7349.pdf.
}
\author{
Kean Ming Tan
}
\seealso{
\code{\link{image.hglasso}}
\code{\link{summary.hglasso}}
\code{\link{hglasso}}
\code{\link{hcov}}
\code{\link{hbn}}
}
\examples{
##############################################
# Example from Figure 1 in the manuscript
# A toy example to illustrate the results from
# Hub Graphical Lasso
##############################################
library(mvtnorm)
set.seed(1)
n=100
p=100
# A network with 4 hubs
Theta<-HubNetwork(p,0.99,4,0.1)$Theta
# Generate data matrix x
x <- rmvnorm(n,rep(0,p),solve(Theta))
x <- scale(x)
# Run Hub Graphical Lasso to estimate the inverse covariance matrix
res1 <- hglasso(cov(x),0.3,0.3,1.5)
# Graphical representation of the estimated Theta
plot(res1,main="conditional independence graph")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.