blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d43463d9d2268247051ccea445444689d091aace
|
7f2f87ede305731816bbbb76e2d7fd605e0d966f
|
/analise-preditiva-credit-card.r
|
c9ca1d7b34ade54d9b51ea4bc4becda1a8b663eb
|
[] |
no_license
|
junqueira/azure-ml
|
ee9b0111d2dc58255afc765037786c58d9ab8e34
|
ef2c2c154bcbfa7c295abc79f2b601093316735b
|
refs/heads/master
| 2020-03-26T19:04:42.646531
| 2018-09-02T13:47:54
| 2018-09-02T13:47:54
| 145,247,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,994
|
r
|
analise-preditiva-credit-card.r
|
library("RCurl")
library("rjson")
# Accept SSL certificates issued by public Certificate Authorities
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
h = basicTextGatherer()
hdr = basicHeaderGatherer()
req = list(
Inputs = list(
"input1" = list(
"ColumnNames" = list("Col1", "Col2", "Col3", "Col4", "Col5", "Col6", "Col7", "Col8", "Col9", "Col10", "Col11", "Col12", "Col13", "Col14", "Col15", "Col16", "Col17", "Col18", "Col19", "Col20", "Col21"),
"Values" = list( list( "A11", "6", "A34", "A43", "1169", "A65", "A75", "4", "A93", "A101", "4", "A121", "67", "A143", "A152", "2", "A173", "1", "A192", "A201", "1" ), list( "A11", "6", "A34", "A43", "1169", "A65", "A75", "4", "A93", "A101", "4", "A121", "67", "A143", "A152", "2", "A173", "1", "A192", "A201", "1" ) )
) ),
GlobalParameters = setNames(fromJSON('{}'), character(0))
)
body = enc2utf8(toJSON(req))
api_key = "vLwltwL06uyeCcREsvzohAFJyMmnanSvShhJDaAoM2QSmCIZ9LlMtZXM19K6EzridYbQMrIRh9BNfqnuxc2Q5g==" # Replace this with the API key for the web service
authz_hdr = paste('Bearer', api_key, sep=' ')
h$reset()
curlPerform(url = "https://ussouthcentral.services.azureml.net/workspaces/244cb34e3a684e3392e20fadd97fa92a/services/ae662f8f66a9464ebaf78e1a8c68f94c/execute?api-version=2.0&details=true",
httpheader=c('Content-Type' = "application/json", 'Authorization' = authz_hdr),
postfields=body,
writefunction = h$update,
headerfunction = hdr$update,
verbose = TRUE
)
headers = hdr$value()
httpStatus = headers["status"]
if (httpStatus >= 400)
{
print(paste("The request failed with status code:", httpStatus, sep=" "))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(headers)
}
print("Result:")
result = h$value()
print(fromJSON(result))
|
01c9423cfbe3b5b96ffc2af01c55eec74757137c
|
d7233bc626c84dee685eada4632075f9c060f4a2
|
/tweet_stream.R
|
62011404f33b3afba085acdef5b8ab66efded138
|
[] |
no_license
|
jasoncanney/DataAnalytics_Public
|
036d37c6371896685930c5bd5581abf34374ce49
|
e0bec3a597fa266ce1e6c8d474dfbea14856c5a4
|
refs/heads/master
| 2021-01-20T21:35:03.678295
| 2017-08-29T14:58:53
| 2017-08-29T14:58:53
| 101,771,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,768
|
r
|
tweet_stream.R
|
# Description: This file is meant to recreate the tweetstream.py in R.
# It loads necessary packages, makes a connection to Twitter api and
# then captures tweets based on your search string
# Author: Jason Canney
# Date: 11/05/2014
# ------ Execute sections of code as instructed by highlighting and running each section
#install the necessary packages
# Run sessionInfo() from your RStudio console to make sure the following packages are not loaded
# Highlight and run the next four lines
install.packages('ROAuth',repos='http://cran.cnr.Berkeley.edu')
install.packages('twitteR', repos='http://cran.cnr.Berkeley.edu')
install.packages('wordcloud', repos='http://cran.cnr.Berkeley.edu')
install.packages('tm', repos='http://cran.cnr.Berkeley.edu')
install.packages("RJSONIO", repos='http://cran.cnr.Berkeley.edu')
#load the libraries
#Highlight and run the next four lines to load the libraries into the session
# After you run these lines then run sessionInfo() in the console to see that the packages
# are now active in your session as well as any other required packages for these libraries
library('ROAuth')
library('twitteR')
library('wordcloud')
library('tm')
library('RJSONIO')
#download the cert file - Run this single line next
download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.pem")
#to get your consumerKey and consumerSecret see the twitteR documentation for instructions
# put your consumerkey and consumersecret values in here and then highlight next five lines and run
cred <- OAuthFactory$new(consumerKey='z7Fw4JCMnO2haKXljvyyXNGqg',
consumerSecret='KfOxmOQXSPS4nJ1rxBuU9oP76jPezWKsPHDMN2g6emZlU6n637',
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
#Required to make the connection with twitter
#highlight and run the next line. You will be prompted to copy a URL, put it in your web browser
# and approve the authorization of your twitter app. Do that and then enter the numeric value in your
# RStudio console window and hit enter
cred$handshake(cainfo="cacert.pem")
#Highlight and run next line
save(cred, file="twitter authentication.Rdata")
#Highlight and run next line. Checks whether your Rstudio session is now ready to search twitter via R
#Should return a TRUE value. If it does not, go back and run previous steps until you get a TRUE response
registerTwitterOAuth(cred)
#Capture the tweets
tweet_stats<- searchTwitter("TWD", n=1500, cainfo="cacert.pem")
#save just the text from the tweets
tweet_stats_text <- sapply(tweet_stats, function(x) x$getText())
|
2bba61544c68476fbfe042bedc00df43711f178c
|
d6ab4726dfa802b465117ac81e480b58ec54cd3e
|
/man/is_big_endian.Rd
|
06b1b20e3fc4caf7b07100cce5fdafc2d4bbb4b5
|
[] |
no_license
|
zbarutcu/qs
|
60c288f1ab9ca47bea63bf51dc258b6985cbdecd
|
ee080cdc8b6f1d8a94b1229e8ffb5d0f5b6fb501
|
refs/heads/master
| 2023-03-21T00:15:14.978802
| 2021-03-20T07:23:11
| 2021-03-20T07:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 601
|
rd
|
is_big_endian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_files.R
\name{is_big_endian}
\alias{is_big_endian}
\title{System Endianness}
\usage{
is_big_endian()
}
\value{
`TRUE` if big endian, `FALSE` if little endian.
}
\description{
Tests system endianness. Intel and AMD based systems are little endian, and so this function will likely return `FALSE`.
The `qs` package is not capable of transferring data between systems of different endianness. This should not matter for the large majority of use cases.
}
\examples{
is_big_endian() # returns FALSE on Intel/AMD systems
}
|
6a1b2f88b9244deed71d4f1c762fc4b9348a4326
|
99b903076bd1a723c9f0bef40e76b6ba21024344
|
/tests/testthat/test_proximity_matrix.R
|
0a6f210c93083d6be4415c6343089163598cdc01
|
[] |
no_license
|
bbest/prioritizr
|
f61962f2dd32bd350b7dd3d4880ce3c3c0373fc2
|
814d0ec1cdfea6bffe0ac8208e8bf28f62475662
|
refs/heads/master
| 2022-06-04T19:57:43.122107
| 2022-04-25T05:16:21
| 2022-04-25T05:16:21
| 185,241,796
| 0
| 0
| null | 2019-05-06T17:28:35
| 2019-05-06T17:28:34
| null |
UTF-8
|
R
| false
| false
| 7,284
|
r
|
test_proximity_matrix.R
|
context("proximity matrix")
test_that("RasterLayer (adjacent non-NA pixels are proximal)", {
# data
x <- raster::raster(matrix(c(NA, 2:9), ncol = 3),
xmn = 0, ymn = 0, xmx = 3, ymx = 3)
m <- proximity_matrix(x, distance = 1)
s <- boundary_matrix(x)
s[s > 0] <- 1
Matrix::diag(s) <- 0
s <- Matrix::drop0(as(s, "symmetricMatrix"))
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_true(all(m == s))
})
test_that("RasterLayer (all non-NA pixels are proximal)", {
# data
x <- raster::raster(matrix(c(NA, 2:8, NA), byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
m <- proximity_matrix(x, distance = 100)
s <- matrix(1, ncol = 9, nrow = 9)
diag(s) <- 0
s[, 1] <- 0
s[, 9] <- 0
s[1, ] <- 0
s[9, ] <- 0
s <- as(as(s, "dgCMatrix"), "symmetricMatrix")
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("RasterLayer (none are proximal)", {
# data
x <- raster::raster(matrix(c(NA, 2:8, NA), byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
m <- proximity_matrix(x, distance = 1e-3)
s <- Matrix::sparseMatrix(i = integer(0), j = integer(0),
x = numeric(0), dims = c(2, 2))
s <- as(s, "symmetricMatrix")
m <- proximity_matrix(sim_pu_polygons[c(1, 3), ], distance = 0.01)
# tests
expect_equal(s, m)
})
test_that("RasterLayer (all polygons are proximal)", {
# data
x <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- raster::rasterToPolygons(x, n = 4)
m <- proximity_matrix(x, distance = 100)
s <- matrix(1, ncol = 9, nrow = 9)
diag(s) <- 0
s <- as(as(s, "dgCMatrix"), "symmetricMatrix")
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("RasterLayer (multiple layers)", {
# data
x <- raster::stack(
raster::raster(
matrix(c(NA, NA, 3:9), ncol = 3), xmn = 0, ymn = 0, xmx = 3, ymx = 3
),
raster::raster(
matrix(c(NA, 2:9), ncol = 3), xmn = 0, ymn = 0, xmx = 3, ymx = 3
)
)
m <- proximity_matrix(x, distance = 1)
s <- boundary_matrix(x)
s[s > 0] <- 1
Matrix::diag(s) <- 0
s <- Matrix::drop0(as(s, "symmetricMatrix"))
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_true(all(m == s))
expect_true(all(m[1, ] == 0))
expect_true(all(m[, 1] == 0))
expect_gt(min(Matrix::rowSums(m)[-1]), 0)
})
test_that("SpatialPolygons (adjacent polygons are proximal)", {
# data
x <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- raster::rasterToPolygons(x, n = 4)
s <- adjacency_matrix(x)
m <- proximity_matrix(x, distance = 0.1)
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("SpatialPolygons (all polygons are proximal)", {
# data
x <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- raster::rasterToPolygons(x, n = 4)
m <- proximity_matrix(x, distance = 100)
s <- matrix(1, ncol = 9, nrow = 9)
diag(s) <- 0
s <- as(as(s, "dgCMatrix"), "symmetricMatrix")
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("SpatialPolygons (no polygons are proximal)", {
data(sim_pu_polygons)
s <- Matrix::sparseMatrix(i = integer(0), j = integer(0),
x = numeric(0), dims = c(2, 2))
s <- as(s, "symmetricMatrix")
m <- proximity_matrix(sim_pu_polygons[c(1, 3), ], distance = 0.01)
expect_equal(s, m)
})
test_that("sf (adjacent polygons are proximal)", {
# data
x <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- sf::st_as_sf(raster::rasterToPolygons(x, n = 4))
s <- adjacency_matrix(x)
m <- proximity_matrix(x, distance = 0.1)
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("sf (all polygons are proximal)", {
# data
x <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- sf::st_as_sf(raster::rasterToPolygons(x, n = 4))
m <- proximity_matrix(x, distance = 100)
s <- matrix(1, ncol = 9, nrow = 9)
diag(s) <- 0
s <- as(as(s, "dgCMatrix"), "symmetricMatrix")
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("sf (no polygons are proximal)", {
data(sim_pu_polygons)
x <- st_as_sf(sim_pu_polygons[c(1, 3), ])
s <- Matrix::sparseMatrix(i = integer(0), j = integer(0),
x = numeric(0), dims = c(2, 2))
s <- as(s, "symmetricMatrix")
m <- proximity_matrix(x, distance = 0.01)
expect_equal(s, m)
})
test_that("SpatialLines (intersecting lines are proximal)", {
# data
x <- sp::SpatialLines(list(
sp::Lines(ID = "1", list(sp::Line(matrix(
c(
0, 0,
1, 1,
2, 2), ncol = 2, byrow = TRUE)))),
sp::Lines(ID = "2", list(sp::Line(matrix(
c(
2, 2,
3, 3,
4, 4), ncol = 2, byrow = TRUE)))),
sp::Lines(ID = "3", list(sp::Line(matrix(
c(
5, 5,
7, 7), ncol = 2, byrow = TRUE)))),
sp::Lines(ID = "4", list(sp::Line(matrix(
c(
0, 1,
4, 1), ncol = 2, byrow = TRUE))))))
m <- proximity_matrix(x, distance = 1e-3)
s <- Matrix::sparseMatrix(
i = c(1, 1),
j = c(2, 4),
x = c(1, 1),
dims = c(4, 4), symmetric = TRUE)
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("SpatialLines (no proximal lines)", {
data(sim_pu_lines)
x <- sim_pu_lines[c(1, 3), ]
m <- proximity_matrix(x, distance = 1e-3)
s <- Matrix::sparseMatrix(i = integer(0), j = integer(0),
x = numeric(0), dims = c(2, 2))
s <- as(s, "symmetricMatrix")
expect_equal(s, m)
})
test_that("SpatialPoints (some points are proximal)", {
# data
r <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- suppressWarnings({
sf::as_Spatial(
sf::st_centroid(sf::st_as_sf(raster::rasterToPolygons(r, n = 4)))
)
})
s <- adjacency_matrix(r)
m <- proximity_matrix(x, distance = 1.0)
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("SpatialPoints (no points are proximal)", {
# data
r <- raster::raster(matrix(0:8, byrow = TRUE, ncol = 3),
xmn = 0, xmx = 3, ymn = 0, ymx = 3)
x <- suppressWarnings({
sf::as_Spatial(
sf::st_centroid(sf::st_as_sf(raster::rasterToPolygons(r, n = 4)))
)
})
m <- proximity_matrix(x, distance = 1e-3)
s <- Matrix::sparseMatrix(i = integer(0), j = integer(0),
x = numeric(0), dims = c(9, 9))
s <- as(s, "symmetricMatrix")
# tests
expect_true(inherits(m, "dsCMatrix"))
expect_equal(s, m)
})
test_that("invalid input", {
x <- sf::st_sf(
id = c(1, 2),
geom = sf::st_sfc(
sf::st_point(x = c(1, 2)),
sf::st_geometrycollection(
list(
sf::st_point(x = c(10, 20)),
sf::st_point(x = c(100, 200))
)
)
)
)
expect_error(proximity_matrix(x, 2))
expect_error(proximity_matrix(4, 1))
})
|
37e5dc4c7679bffbd18a8f8196a1baba3a82a465
|
b5f9a63e54964eec69cdbb5c6f18c420cbacc577
|
/Scripts/BSB_MiraRiver_OverWintering_Migration_Temperature_plots.R
|
ae2342afdb395eb7ea84373c90c7d5873ce61396
|
[] |
no_license
|
BuhariwallaCF/Thesis-Tracking-Chapter
|
2c6fed601d6dd1a91001c3a57af4ed664474efcb
|
9cd1f6c4cf323582ba68710445c5515436c9467b
|
refs/heads/master
| 2020-04-15T12:46:52.265236
| 2016-11-07T17:25:39
| 2016-11-07T17:25:39
| 62,088,810
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,628
|
r
|
BSB_MiraRiver_OverWintering_Migration_Temperature_plots.R
|
#2016-10-19 Colin F Buhariwalla
# OVERWINTERING - plot departure/arrival times with temperature across years
# last updated:
# This script is meant to plot the overwintering departure/return events from Albert bridge into the OW area + Leaving overwintering area
# I need: 1) Temperature at BSB MR07 from Hobo Temp Logger (include relevant metadata in the script for easy reference)
# 2) a file with albert bridge departure events, arrival in OW events, and first out events
# 3) Need to combine this event file with temperature file (take the temperature at the closest time stamp and give it to the departure event)
# Want to Produce: a plot with date on the X axis, temperature on the y axis, have events (leaving ab, arrival in ow, exit of ow) as different symbols
# along the temperature line, by year?
# I need to decide on:
# 1) What temperature to use (mean daily?, min, max)
#Steps:
#4) figure out how to fill in the temperature for each fish at the time of it's departure
require(ggplot2)
require(scales)
require(dplyr)
require(tidyr)
#require(grid) #?
#require(gridExtra)#?
"%nin%" <- Negate("%in%")
####
# temp df
tdf <- read.csv("data/MR07t_2012-2015_UTC_temperature.csv", colClasses = "character")
names(tdf) <- c("date", "temp")
tdf$event <- "hobo"
tdf$station <- "007"
#events df
edf <- read.csv("data/overwintering_events.csv", stringsAsFactors = F)
#stn df to eliminate associated hobo loggings of non deployed station
stn.df <- read.csv("data/stndf.csv", stringsAsFactors = F)
#get some posix on the go
source("functions/dates_and_times_fun.R")
tdf <- dates_and_times_fun(tdf)
tdf <- mutate(tdf, date = date + hours(3)) ### ATTENTION: this needs to happen with the combined hobo file from above since UTC correction wasn't applied
edf <- dates_and_times_fun(edf)
#edf$ddate <- ymd(substr(edf$date, 1, 10))
# recovery/deploy metadata
rdf <- read.csv("raw data/bsb_metadata_recovery_2012-2015_update20160829.csv", colClasses = "character")
ddf <- read.csv("raw data/bsb_metadata_deployment_2012-2015_update20160829.csv", colClasses = "character") # careful, this file had quotations (" ") in it and R had trouble reading it in
rdf <- select(rdf, STATION_NO, CONSECUTIVE_DEPLOY_NO, INS_SERIAL_NO, RECOVER_DATE_TIME..yyyy.mm.ddThh.mm.ss.)
ddf <- select(ddf, STATION_NO, CONSECUTIVE_DEPLOY_NO, INS_SERIAL_NO, DEPLOY_DATE_TIME....yyyy.mm.ddThh.mm.ss.,DEPLOY_LAT, DEPLOY_LONG, BOTTOM_DEPTH, INSTRUMENT_DEPTH)
names(rdf) <- c("station", "deploy_no", "serial_no","recov_date")
names(ddf) <- c("station", "deploy_no", "serial_no", "depl_date", "lat", "long", "depth", "ins_depth")
#rdf$stn <- as.numeric(rdf$station) # the formats aren't the same during the imports..
#ddf$stn <- as.numeric(ddf$station)
stndf <- full_join(ddf, rdf, by = c("station", "deploy_no", "serial_no"))
stndf <- stndf[1:106,] # the blank rows that excel always has in it get imported, need to delete.
stndf <- dates_and_times_fun(stndf)
source("functions/hobo_cleanup_fun.R") # to see how this works, see "hobo_cleanup_fun.R"/ page 102 of Lab book
df <- hobo_cleanup_fun(tdf, stndf, "007")
df$temp <- as.numeric(df$temp)
### now there are also some problems where a logger was removed without being redeployed with the station (for rebatterying)
df <- df[!df$date %within% interval("2013-04-24 23:00:00", "2013-05-03 21:00:00") & !df$date %within% interval("2014-05-03 16:30:00", "2014-05-03 17:00:00"),]
#df <- df %>% mutate(year = year(date), month = month(date), ddate = ymd(substr(df$date, 1, 10)))
#df$winter <- ifelse(df$month > 6, paste(df$year, df$year+1, sep="-"), paste(df$year-1,df$year, sep="-"))
#### feel the construction love ####
# idea: join the events df with df and then use the closest temp value to insert into the dataframe (lead for next possible temp event)
adf <- union_all(df, edf)
adf$temp <- as.numeric(adf$temp)
adf <- adf %>% arrange(date)
adf <- adf %>% fill(temp)
# add a winter/year/months/ddate column to data
adf <- adf %>% mutate(year = year(date), month = month(date), ddate = ymd(substr(adf$date, 1, 10)))
adf$winter <- ifelse(adf$month > 6, paste(adf$year, adf$year+1, sep="-"), paste(adf$year-1,adf$year, sep="-"))
adf <- adf %>% group_by(ddate) %>% mutate(mtemp = mean(temp))
adf$event <- factor(adf$event, levels = c("Last Albert Bridge", "Last Outside Overwintering", "First Overwintering","End Overwintering", "hobo"))
#### PLOT: Mean Daily Temperature (°C) vs Date of EVENTs (last albert, first ow, first outside) ####
daily12 <- filter(adf, winter == "2012-2013")
daily13 <- filter(adf, winter == "2013-2014")
daily14 <- filter(adf, winter == "2014-2015")
p1 <- ggplot(data = filter(daily12, month %in% c(10:12,1:5)), aes(ddate, mtemp)) + geom_line(data = filter(daily12, month %in% c(10:12,1:5))) + geom_line(data = filter(daily12, month %in% c(4,5))) +
geom_point(data = filter(daily12, event %nin% c("hobo", "Last Outside Overwintering")),aes(shape = event, size = 5), position = "jitter") + scale_shape_manual(values=c(20,3)) + theme_bw() + xlab("")+ ylab("") +
theme(legend.position = "none", axis.text.x = element_blank()) + scale_x_date(breaks = pretty_breaks(8),limits = c(ymd("2012-10-01"), ymd("2013-05-31"))) + guides(size = F)
p2 <- ggplot(data = filter(daily13, month %in% c(10:12,4,5)), aes(ddate, mtemp)) + geom_line(data = filter(daily13, month %in% c(10:12,1:5))) + geom_line(data = filter(daily13, month %in% c(4,5))) +
geom_point(data = filter(daily13, event %nin% c("hobo", "Last Outside Overwintering")),aes(shape = event, size = 5), position = "jitter") + scale_shape_manual(values=c(20, 8, 3)) + theme_bw() + ylab("Mean Daily Temperature (°C)") + xlab("") +
theme(legend.position = c(0.5, 0.7), legend.direction = "horizontal", legend.title = element_blank() , axis.text.x = element_blank()) + scale_x_date(breaks = pretty_breaks(8),limits = c(ymd("2013-10-01"), ymd("2014-05-31"))) + guides(size = F)
p3 <- ggplot(data = filter(daily14, month %in% c(10:12,4,5)), aes(ddate, mtemp)) + geom_line(data = filter(daily14, month %in% c(10:12,1:5))) + geom_line(data = filter(daily14, month %in% c(4,5))) +
geom_point(data = filter(daily14, event %nin% c("hobo", "Last Outside Overwintering")),aes(shape = event, size = 5), position = "jitter") + scale_shape_manual(values=c(20, 8, 3)) + theme_bw() + xlab("Date") + ylab("") +
theme(legend.position = "none", legend.title = element_blank()) + scale_x_date(breaks = pretty_breaks(8),labels = date_format("%B"), limits = c(ymd("2014-10-01"), ymd("2015-05-31"))) + guides(size = F)
source("functions/multiplot_fun.R")
multiplot_fun(p1, p2, p3)
|
d88f2a6faab02811ebf33fd251c3c6db12fdc058
|
631447642ac9cedef030c952a1cf752c3fdb0a3e
|
/Melanoma_Survival/server.R
|
a2c716dfc35cb4d7fe6c7de1c5d5f38da99c8564
|
[] |
no_license
|
droogdim83/dataProducts_Melanoma
|
248a41b92f911a2356674e7882d7e7d12901942a
|
448ad8265ee4122e54c21eaf6f017f2d7172d718
|
refs/heads/master
| 2021-01-20T07:18:56.752407
| 2017-08-27T05:05:52
| 2017-08-27T05:05:52
| 101,531,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,111
|
r
|
server.R
|
library(shiny)
library(MASS)
library(caret)
data("Melanoma")
Melanoma$status <- as.factor(Melanoma$status)
levels(Melanoma$status) <- c("Died_Melanoma", "Alive", "Died_Other_Causes")
Melanoma$sex <- as.factor(Melanoma$sex)
levels(Melanoma$sex) <- c("Female","Male")
Melanoma$year <- as.factor(Melanoma$year)
Melanoma$ulcer <- as.factor(Melanoma$ulcer)
levels(Melanoma$ulcer) <- c("Absence", "Presence")
shinyServer(function(input, output) {
## Fit a linear model to the Melanoma survival dataset from the MASS package
## Create Dummy Variables and remove colinearity
dummies <- dummyVars(~ ., data = Melanoma, fullRank = TRUE)
MelanTrans <- data.frame(predict(dummies, newdata = Melanoma))
MelanTrans$time <- Melanoma$time
MelanTrans$age <- Melanoma$age
MelanTrans$thickness <- Melanoma$thickness
## Remove Near Zero Variance Columns
nzv_cols <- nearZeroVar(MelanTrans)
if(length(nzv_cols) > 0) MelanTrans <- MelanTrans[, -nzv_cols]
## Split the data into Training and Test sets
inTrain <- createDataPartition(y = MelanTrans$time, p = 0.7, list = FALSE)
training <- MelanTrans[inTrain,]
testing <- MelanTrans[-inTrain,]
## Train linear model
set.seed(123)
melanModFit<- train(time ~ ., data = training, method = "lm",
trControl = trainControl(method = "cv", number = 10),
preProcess = c("center", "scale"))
summary(melanModFit)
model1pred <- reactive({
## The following values need to be set to input into the predict function
# status.Alive
# status.Died_Other_Causes
# sex.Male
# age
# year.1965
# year.1967
# year.1968
# year.1969
# year.1970
# year.1971
# year.1972
# year.1973
# thickness
# ulcer.Presence
## Get status from Radio Button
statusSelect <- switch(input$status,
Alive = matrix(c(1,0), 1, 2),
Other = matrix(c(0,1), 1, 2),
Melanoma = matrix(c(0,0), 1, 2))
genderSelect <- switch(input$gender,
Female = 0,
Male = 1)
ageInput <- input$sliderAge
yearSelect <- switch(input$year,
six_five = matrix(c(1, 0, 0, 0, 0, 0, 0, 0, 0), 1, 9),
six_six = matrix(c(0, 1, 0, 0, 0, 0, 0, 0, 0), 1, 9),
six_seven = matrix(c(0, 0, 1, 0, 0, 0, 0, 0, 0), 1, 9),
six_eight = matrix(c(0, 0, 0, 1, 0, 0, 0, 0, 0), 1, 9),
six_nine = matrix(c(0, 0, 0, 0, 1, 0, 0, 0, 0), 1, 9),
seven_zero = matrix(c(0, 0, 0, 0, 0, 1, 0, 0, 0), 1, 9),
seven_one = matrix(c(0, 0, 0, 0, 0, 0, 1, 0, 0), 1, 9),
seven_two = matrix(c(0, 0, 0, 0, 0, 0, 0, 1, 0), 1, 9),
seven_three = matrix(c(0, 0, 0, 0, 0, 0, 0, 0, 1), 1, 9))
thickInput <- input$sliderTumorThick
ulcerSelect <- switch(input$ulcer,
Presence = 1,
Absence = 0)
newDataCombine <- cbind(statusSelect, genderSelect, ageInput, yearSelect,
thickInput, ulcerSelect)
newDataDF <- data.frame(newDataCombine)
colnames(newDataDF) <- c("status.Alive", "status.Died_Other_Causes",
"sex.Male", "age", "year.1965", "year.1966",
"year.1967", "year.1968", "year.1969", "year.1970",
"year.1971", "year.1972", "year.1973", "thickness", "ulcer.Presence")
round(predict(melanModFit, newdata = newDataDF), digits = 0)
})
## Display the prediction from Model 1 as numeric text
output$pred1 <- renderText({
model1pred()
})
output$modelSummary <- renderPrint({
summary(melanModFit)
})
})
|
ab6e9c699b6239119896aeeba2e537289e0cbf56
|
1203cc14b7416390beb8149c3a75c28c9177a681
|
/scripts/partnership_transactions.R
|
9740334f99d94f0e9c4b785bf32445544576be49
|
[] |
no_license
|
AZASRS/DB_R_Testing_Environment
|
7801fc9bcf078be259ed8ef80335729623bdb1dd
|
bf65706a2244dc67e63cd25382488d2219756b11
|
refs/heads/master
| 2020-03-20T06:49:08.414225
| 2018-07-27T00:31:20
| 2018-07-27T00:31:20
| 137,261,803
| 0
| 1
| null | 2018-07-16T22:23:26
| 2018-06-13T19:33:14
|
R
|
UTF-8
|
R
| false
| false
| 2,315
|
r
|
partnership_transactions.R
|
library('tidyverse')
partnership_transactions = function(){
Fund.raw = read_delim("data/201805 - Partnership_Investment_Transaction_Download.csv", delim = "|")
df_group_fees = Fund.raw %>%
transmute(`Asset Class` = `FUND_SHORT_NAME`,
`Fund Name` = `INVESTMENT_NAME`,
Date = `CONTRIB_DISTRIB_DATE`,
`Carry in Commit` = `BASE_FEES_IN_COMMIT`,
`Mgmt Fees in Commit` = `BASE_MF_IN_COMMIT`,
`Carry outside Commit` = `BASE_FEES_OUT_COMMIT`,
`Mgmt Fees outside Commit` = `BASE_MF_OUT_COMMIT`) %>%
mutate(Date = as.Date(Date, format='%d %b %Y')) %>%
transmute(Date = Date,
`Asset Class` = gsub("ASRS - ", "", `Asset Class`),
`Total Mgmt Fees` = `Mgmt Fees in Commit` + `Mgmt Fees outside Commit`,
`Total Carry` = `Carry in Commit` + `Carry outside Commit`)
return(df_group_fees)
}
partnership_transactions_report = function(df, DO_NOT_INCLUDE_BEFORE_DATE = "2018-01-01"){
DO_NOT_INCLUDE_BEFORE_DATE = as.Date(DO_NOT_INCLUDE_BEFORE_DATE)
df = dat %>%
filter(Date >= DO_NOT_INCLUDE_BEFORE_DATE) %>%
select(-Date) %>%
group_by(`Asset Class`) %>%
summarize_all(sum) %>%
replace_na(list(`Total Mgmt Fees` = 0,
`Total Carry` = 0)) %>%
bind_rows(summarise_all(., funs(if(is.numeric(.)) sum(.) else "Total All")))
}
dat = partnership_transactions()
report = partnership_transactions_report(df = dat)
pt = dat %>%
rename(date = Date,
asset_class = `Asset Class`,
total_mgmt_fees = `Total Mgmt Fees`,
total_carry = `Total Carry`) %>%
mutate(date = as.character(date)) %>%
group_by(date, asset_class) %>%
summarise(total_mgmt_fees = max(total_mgmt_fees),
total_carry = max(total_carry)) %>%
unique() %>%
na.omit()
fi = fundinfo
colnames(fi) = tolower(colnames(fi))
fi = fi %>%
rename(unfunded_date = unfunded.date,
yield_amt = yield,
class_type = class)
library('DBI')
library('RSQLite')
con <- dbConnect(RSQLite::SQLite(), "../DB_Application/temporary4.db")
dbWriteTable(con, name='partnership_transactions', value = pt %>% unique(), row.names=FALSE, append=TRUE)
dbWriteTable(con, name='fundinfo', value = fi %>% unique(), row.names=FALSE, append=TRUE)
|
cbeea2d69b243dd45fbd07d920d229125a4a0ccc
|
c42b40622b8b1a1305a0fc99ecc2c365cb994816
|
/sna_script.r
|
b70e40b2646652872a15f01bd352ffa7210e81ff
|
[] |
no_license
|
gourabchanda1990/SNA
|
f7af7233853c2964fbd4a5c85ebb3fbd00f96222
|
c79df98adbb8dc4e96f81afd6e94b954d651479e
|
refs/heads/master
| 2020-04-24T18:25:43.800752
| 2019-02-23T09:50:14
| 2019-02-23T09:50:14
| 172,179,281
| 1
| 0
| null | 2019-02-23T10:31:59
| 2019-02-23T06:19:29
|
R
|
UTF-8
|
R
| false
| false
| 483
|
r
|
sna_script.r
|
#load the required packages into the R Script
packages.req <- c("igraph","sna","dplyr","stringr","dplyr","ggplot2","network","reader")
packages.diff <- setdiff(packages.req,rownames(installed.packages()))
if(length(packages.diff)>0){
install.packages(packages.diff)
}
invisible(sapply(packages.req,library,character.only=T))
#read the input file into the script
input.data <- read.csv("https://raw.githubusercontent.com/OPER682-Tucker/Social-Network-Analysis/master/Actors.csv")
|
724f2358ef0ab3b540d21af64936b9971d9a18ac
|
86f7d5a0c079d7422cfad5774f3e4910b0e1a263
|
/Old code/reduced.plots.R
|
9c1e979189b8f177fee384da5d3a9b4dd5218182
|
[] |
no_license
|
peterbenmeyer/MeyerPlanes
|
03fc66e48c44fd32a714c9cd87d7a321b610c323
|
4be7b2d6ae82c537d15b6860d16547a58aed1697
|
refs/heads/master
| 2020-12-24T23:28:42.428557
| 2012-11-17T00:32:23
| 2012-11-17T00:32:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,910
|
r
|
reduced.plots.R
|
library(plyr)
library(ggplot2)
# Expects working directory set to "MeyerPlanes"
reduced.path <- file.path(getwd(), "Data", "Patents", "patents_small.csv")
reduced.df <- read.csv(file = reduced.path, as.is = TRUE, header = FALSE)
# --- define languages in order listed in file ---
langs.str <- c('Britain','Germany','France','United States')
names(reduced.df) <- c("Year", "Country", langs.str)
# Replace values in country column with full names
reduced.df[, "Country"] <- langs.str[match(reduced.df[, "Country"], c("br", "de", "fr", "us"))]
# Construct by year and by country-year tables
by.year.df <- ddply(reduced.df, "Year", "nrow")
by.year.country.df <- ddply(reduced.df, c("Year", "Country"), "nrow")
names(by.year.country.df)[3] <- names(by.year.df)[2] <- "Patents"
# Convert to factor for plotting
by.year.country.df[, "Country"] <- factor(by.year.country.df[, "Country"])
# Adjustable title and limits
beg_plot <- 1850 ##beg_year
end_plot <- 1910 ##end_year
country.title <- paste0("Aeronautically-relevant patents by country ", beg_plot, '-', end_plot)
# Summed by year
year.plot <- ggplot(data = subset(by.year.df, Year > beg_plot & Year <= end_plot),
aes(Year, Patents)) + geom_line() + xlab("") +
ylab('Patents') +
opts(title = sub(" by country", "", country.title))
# summed by country (Different versions)
# Each of these can have a theme for publication added on later, meaning b/w, grayscale,
# etc.
# basic ggplot2, no major changes
country.plot <- ggplot(data = subset(by.year.country.df, Year > beg_plot & Year <= end_plot), aes(Year, Patents, colour = Country)) +
geom_line(size = 1) + opts(title = country.title) +
xlab("") + ylab('Patents')
# set to more closely match the original, without line type changes
inset.legend <- country.plot + opts(legend.background = theme_rect(fill="white"),
legend.justification=c(0,1), legend.position=c(0,1),
legend.text = theme_text(size = 16), title = country.title)
# Show all countries seperately with common x axis for time.
# labeller argument allows us to drop facet labels.
# See https://github.com/hadley/ggplot2/wiki/Faceting-Attributes
country.facet <- country.plot + facet_grid(Country ~ . , labeller = label_bquote('')) +
guides(colour = FALSE) + opts(strip.background = theme_rect(colour = NA, fill = NA),
plot.title = theme_text(size=20)) +
geom_text(aes_string(x = 1855, y = 40, label = "Country"),
show_guide = FALSE, hjust = 0, size = 7)
# I recommend against removing the grid lines but this is it w/ just the gray background
country.facet.degrid <- country.facet + opts(panel.grid.major=theme_blank(),panel.grid.minor=theme_blank())
|
39abbb89cc16f9a23b76d5c370ac79ea1972be95
|
4f99d63538e2ef3c97c7e72837247311e49c6019
|
/script/detect-backout-commits.R
|
ac7d873b3fee5d97d85eceefd136acb397151928
|
[] |
no_license
|
rodrigorgs/withdrawal-firefox
|
724869530079fbaf4a4673429e4b87fff1f8f8d3
|
d54e0d3a9a5ca5055fc61135cc1959b5f71bf530
|
refs/heads/master
| 2016-09-06T18:43:34.078152
| 2014-12-01T09:38:28
| 2014-12-01T09:38:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,143
|
r
|
detect-backout-commits.R
|
rm(list=ls())
library(dplyr)
library(stringr)
source("../lib/unlist-column.R")
commits <- readRDS("../data/firefox-commits.rds")
###
commits$bug_fixed <- str_match(commits$message, "(?i)^ *bug *([0-9]{5,6})\\b")[,2]
#' # Find the bugs backed out by each backout commit
#' First, let's threat the case where the commit message specifies the bugs it backs out.
backout_commits <- commits
backout_commits <- cbind(backout_commits, str_locate(commits$message, "(?i)\\bback.{0,5}out\\b"))
backout_commits <- subset(backout_commits, !is.na(start))
backout_commits <- subset(backout_commits, !grepl("(?i)merg(e|ing)", message))
backout_commits <- subset(backout_commits, !grepl("(?i)re.?land", message))
#' Extract part of message that refers to bugs being backed out
msg <- substring(backout_commits$message, backout_commits$end + 1)
# ignore bug references after those expressions
msg <- gsub("caus.*", "", msg, ignore.case=T)
msg <- gsub("\\bfix.*", "", msg, ignore.case=T)
msg <- gsub("due to .*", "", msg, ignore.case=T)
msg <- gsub("\\bsee .*", "", msg, ignore.case=T)
msg <- gsub("\\bresolv.*", "", msg, ignore.case=T)
msg <- gsub("\\bsuspic.*", "", msg, ignore.case=T)
backout_commits$msg_after_backout <- msg
backout_commits$bugs_backedout <- str_match_all(backout_commits$msg_after_backout, "\\b[0-9]{6}\\b")
###########################################
#' Now, the case where the commit message specifies the changesets (commits) it backs out (so we have to look further to the bug fixed in the changesets that were backed out).
backout_commits$commits_backedout <- str_match_all(backout_commits$msg_after_backout, "\\b[0-9a-f]{7,12}\\b")
bch <- backout_commits %>%
select(commit, commits_backedout) %>%
unlist.column(commit, commits_backedout, "id", "commit_backedout") %>%
merge(commits, by.x="commit_backedout", by.y="commit") %>%
select(commit = id, bug = bug_fixed)
#' Mix the two
backouts <- unlist.column(backout_commits, commit, bugs_backedout, "commit", "bug") %>%
rbind(bch) %>%
arrange(commit, bug) %>%
mutate(bug = as.integer(bug)) %>%
unique()
###
saveRDS(backouts, "../data/firefox-backouts.rds")
|
ae38bbccb9f84a0ee5b60e4aa940158dbf99a922
|
decd805a323a5bdb863f9d5501c2963a4fb51ba0
|
/MarketSimulator/R/Orders.R
|
26e4e19752095c136323b9968c14474a7e9dd378
|
[] |
no_license
|
markhocky/MarketSimulator
|
eda0d2d11f01c480cc7a40506ae695284b77fee5
|
d7000d90bc822521cc084b4c245321c565b716b5
|
refs/heads/master
| 2016-08-09T20:51:56.167101
| 2016-04-09T01:56:27
| 2016-04-09T01:56:27
| 55,815,590
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,356
|
r
|
Orders.R
|
#'
#' Parent object from which different Order types may be derived.
#'
setClass("Order",
representation(
instrument = "character",
ID = "character",
status = "OrderStatus",
quantity = "integer",
execution.price = "xts",
txn.cost.model = "function",
submission.time = "POSIXct",
status.time = "POSIXct"
))
setID <- function(order, ID) {
order@ID <- ID
return(order)
}
getID <- function(order) {
return(order@ID)
}
setMethod("status",
signature("Order"),
function(object) {
status(object@status)
})
setMethod("status<-",
signature("Order"),
function(object, value) {
object@status <- value
return(object)
})
execution_price <- function(order) {
return(order@execution.price)
}
setTxnCostModel <- function(order, model) {
order@txn.cost.model <- model
return(order)
}
txnFees <- function(order) {
return(order@txn.cost.model(order))
}
"submissionTime<-" <- function(order, value) {
order@submission.time <- as.POSIXct(value)
return(order)
}
submissionTime <- function(order) {
return(order@submission.time)
}
"statusTime<-" <- function(order, value) {
order@status.time <- as.POSIXct(value)
return(order)
}
statusTime <- function(order) {
return(order@status.time)
}
setGeneric("areSimilar",
function(order1, order2, ...) {
return(identical(class(order1), class(order2)))
})
setGeneric("mergeOrders",
function(existing.order, new.order) {
if (instrumentOf(existing.order) != instrumentOf(new.order)) {
stop("Instruments differ between orders")
}
if (inherits(existing.order, "Order", which = TRUE) !=
inherits(new.order, "Order", which = TRUE)) {
stop("Attempt to merge different order types")
}
order <- standardGeneric("mergeOrders")
order <- setTxnCostModel(order, existing.order@txn.cost.model)
order <- setID(order, getID(existing.order))
if (quantity(order) == 0) {
order@status <- NullStatus()
}
return(order)
})
writeTransaction <- function(order) {
data.frame(
size = quantity(order),
price = as.numeric(execution_price(order)),
costs = txnFees(order),
row.names = instrumentOf(order))
}
asDataFrame <- function(x) {
order <- list(
type = class(x),
ID = getID(x),
instrument = instrumentOf(x),
status = status(x),
quantity = quantity(x),
price = execution_price(x),
submitted = submissionTime(x),
updated = statusTime(x))
order <- order[sapply(order, length) == 1 & sapply(order, class) != "S4"]
as.data.frame(order)
}
bookEntry <- function(order) {
timestamp <- as.Date("2010-04-20")
ordertemplate <- xts(t(c(
"Order.Qty" = quantity(order),
"Order.Price" = execution_price(order),
"Order.Type" = class(order),
"Order.Side" = "long",
"Order.Threshold" = 0,
"Order.Status" = status(order),
"Order.StatusTime" = as.character(as.POSIXct(timestamp)),
"Prefer" = "",
"Order.Set" = "",
"Txn.Fees" = txnFees(order),
"Rule" = "")),
order.by = submissionTime(order))
return(ordertemplate)
}
setMethod("show",
signature(object = "Order"),
function(object) {
show(asDataFrame(object))
})
#' notify order of market activity
#'
#' \code{notify} will generally be called by the broker when a new price bar is available.
#' If the instrument of the price bar and order are the same, and provided that the price
#' bar has both sufficient volume and price information, then the order will check to see
#' if it was executed.
#' If executed the order will change its status with the broker.
#'
#' @param order the order to be notified
#' @param broker the broker with which the order is held
#' @param price.bar the OHLCV information representing the day's activity.
#'
setGeneric("notify",
function(order, broker, price.bar, ...) {
if (are_related(order, price.bar) && active_market(price.bar)) {
standardGeneric("notify")
}
})
are_related <- function(order, price.bar) {
return(any(grepl(instrumentOf(order), names(price.bar))))
}
execute <- function(order, at, broker) {
order@execution.price <- at
order@status <- ClosedStatus()
statusTime(order) <- today(broker)
updateOrder(order, broker)
}
updateOrder <- function(order, broker) {
callUpdateProcedure(order@status, order, broker)
}
#' Market Order
#'
#' Opens position at first opportunity.
#'
setClass("MarketOrder",
contains = "Order")
#' Create market order
#'
#' Creates an order to be submitted to the broker which is to be executed at market.
#' This order will effectively be executed at the next day's open price.
#' The order is to be specified with one of either 'buy' or 'sell' amounts. If both
#' are provided it will throw an error.
#'
#' @param instrument character identifying the instrument the order is related to.
#' @param buy the number of shares to buy
#' @param sell the number of shares to sell
#'
Order <- function(instrument, buy = NULL, sell = NULL) {
check_order_parameters(instrument, buy, sell)
quantity <- ifelse(is.null(buy), -as.integer(abs(sell)), as.integer(abs(buy)))
order <- new("MarketOrder",
instrument = instrument,
status = OpenStatus(),
quantity = quantity,
execution.price = xts(),
submission.time = initDate(),
status.time = initDate())
return(order)
}
check_order_parameters <- function(instrument, buy, sell) {
if (missing(instrument)) {
stop("Require an instrument identifier")
}
if (is.null(buy) && is.null(sell)) {
stop("Require an order quantity")
}
if (is.numeric(buy) && is.numeric(sell)) {
stop("Must have only one of buy or sell")
}
}
setMethod("notify",
signature(order = "MarketOrder"),
function(order, broker, price.bar) {
execute(order, at = Op(price.bar), broker)
})
setMethod("mergeOrders",
signature(existing.order = "MarketOrder", new.order = "MarketOrder"),
function(existing.order, new.order) {
quantity <- quantity(existing.order) + quantity(new.order)
if (quantity > 0) {
order <- Order(instrumentOf(existing.order), buy = quantity)
} else {
order <- Order(instrumentOf(existing.order), sell = quantity)
}
return(order)
})
#' Limit Order
#'
setClass("LimitOrder",
representation(
limit.price = "xts"
),
contains = "MarketOrder")
setClass("BuyLimitOrder",
contains = "LimitOrder")
setClass("SellLimitOrder",
contains = "LimitOrder")
Limit <- function(instrument, buy = NULL, sell = NULL, at) {
check_order_parameters(instrument, buy, sell)
if (missing(at)) {
stop("Limit order must have a limit price")
}
if (is.null(buy)) {
order <- new("SellLimitOrder",
instrument = instrument,
status = OpenStatus(),
quantity = -abs(as.integer(sell)),
execution.price = xts(),
limit.price = at,
submission.time = initDate(),
status.time = initDate())
} else {
order <- new("BuyLimitOrder",
instrument = instrument,
status = OpenStatus(),
quantity = abs(as.integer(buy)),
execution.price = xts(),
limit.price = at,
submission.time = initDate(),
status.time = initDate())
}
return(order)
}
limit_price <- function(limit.order) {
return(limit.order@limit.price)
}
setMethod("notify",
signature(order = "BuyLimitOrder"),
function(order, broker, price.bar) {
if (as.numeric(Op(price.bar)) < as.numeric(limit_price(order))) {
execute(order, at = Op(price.bar), broker)
} else {
if (as.numeric(Lo(price.bar)) < as.numeric(limit_price(order))) {
execute(order, at = limit_price(order), broker)
}
}
})
setMethod("notify",
signature(order = "SellLimitOrder"),
function(order, broker, price.bar) {
if (as.numeric(Op(price.bar)) > as.numeric(limit_price(order))) {
execute(order, at = Op(price.bar), broker)
} else {
if (as.numeric(Hi(price.bar)) > as.numeric(limit_price(order))) {
execute(order, at = limit_price(order), broker)
}
}
})
setMethod("areSimilar",
signature(order1 = "LimitOrder", order2 = "LimitOrder"),
function(order1, order2) {
limit.dist1 <- inherits(order1, "LimitOrder", which = TRUE)
limit.dist2 <- inherits(order2, "LimitOrder", which = TRUE)
same.type <- limit.dist1 == limit.dist2
same.limit <- limit_price(order1) == limit_price(order2)
return(same.type && same.limit)
})
setMethod("mergeOrders",
signature(existing.order = "LimitOrder", new.order = "LimitOrder"),
function(existing.order, new.order) {
stop("merge for Limit orders not defined")
})
#' Stop Loss Order
#'
setClass("StopLossOrder",
contains = "LimitOrder")
setClass("BuyStopLoss",
contains = "StopLossOrder")
setClass("SellStopLoss",
contains = "StopLossOrder")
Stop <- function(instrument, buy = NULL, sell = NULL, at) {
check_order_parameters(instrument, buy, sell)
if (missing(at)) {
stop("Limit order must have a limit price")
}
if (is.null(buy)) {
order <- new("SellStopLoss",
instrument = instrument,
status = OpenStatus(),
quantity = -abs(as.integer(sell)),
execution.price = xts(),
limit.price = at,
submission.time = initDate(),
status.time = initDate())
} else {
order <- new("BuyStopLoss",
instrument = instrument,
status = OpenStatus(),
quantity = abs(as.integer(buy)),
execution.price = xts(),
limit.price = at,
submission.time = initDate(),
status.time = initDate())
}
return(order)
}
setMethod("notify",
signature(order = "BuyStopLoss"),
function(order, broker, price.bar) {
if (as.numeric(Op(price.bar)) > as.numeric(limit_price(order))) {
execute(order, at = Op(price.bar), broker)
} else {
if (as.numeric(Hi(price.bar)) > as.numeric(limit_price(order))) {
execute(order, at = limit_price(order), broker)
}
}
})
setMethod("notify",
signature(order = "SellStopLoss"),
function(order, broker, price.bar) {
if (as.numeric(Op(price.bar)) < as.numeric(limit_price(order))) {
execute(order, at = Op(price.bar), broker)
} else {
if (as.numeric(Lo(price.bar)) < as.numeric(limit_price(order))) {
execute(order, at = limit_price(order), broker)
}
}
})
setMethod("mergeOrders",
signature(existing.order = "StopLossOrder", new.order = "StopLossOrder"),
function(existing.order, new.order) {
quantity <- quantity(existing.order) + quantity(new.order)
limit <- limit_price(existing.order)
if (quantity > 0) {
order <- Stop(instrumentOf(existing.order), buy = quantity, at = limit)
} else {
order <- Stop(instrumentOf(existing.order), sell = quantity, at = limit)
}
return(order)
})
#' Combination order - Market with Stop loss
#'
setClass("MarketWithStop",
representation(
stop.point = "numeric"
),
contains = "MarketOrder")
setClass("LongMarketWithStop",
contains = "MarketWithStop")
setClass("ShortMarketWithStop",
contains = "MarketWithStop")
MarketWithStop <- function(instrument, buy = NULL, sell = NULL, stop.point) {
check_order_parameters(instrumnet, buy, sell)
if (missing(stop.point)) {
stop("Stop point must be supplied")
}
if (is.null(buy)) {
order <- new("ShortMarketWithStop",
instrument = instrument,
status = OpenStatus(),
quantity = -abs(as.integer(sell)),
execution.price = xts(),
stop.point = abs(stop.point),
submission.time = initDate(),
status.time = initDate())
} else {
order <- new("LongMarketWithStop",
instrument = instrument,
status = OpenStatus(),
quantity = abs(as.integer(buy)),
execution.price = xts(),
stop.point = abs(stop.point),
submission.time = initDate(),
status.time = initDate())
}
}
setMethod("notify",
signature(order = "LongMarketWithStop"),
function(order, broker, price.bar) {
notify(asMarketOrder(order), broker, price.bar)
stop.price <- Op(price.bar) * (1 - order@stop.point)
addOrder(broker,
Stop(instrumentOf(order), sell = quantity(order), at = stop.price))
})
setGeneric("asMarketOrder",
function(order) {
standardGeneric("asMarketOrder")
})
setMethod("asMarketOrder",
signature(order = "LongMarketWithStop"),
function(order) {
market.order <- Order(instrumentOf(order), buy = quantity(order))
market.order <- setID(market.order, getID(order))
market.order <- setTxnCostModel(market.order, order@txn.cost.model)
submissionTime(market.order) <- submissionTime(order)
return(market.order)
})
setMethod("asMarketOrder",
signature(order = "ShortMarketWithStop"),
function(order) {
market.order <- Order(instrumentOf(order), sell = quantity(order))
market.order <- setID(market.order, getID(order))
market.order <- setTxnCostModel(market.order, order@txn.cost.model)
submissionTime(market.order) <- submissionTime(order)
return(order)
})
|
5d38657b0a7f28d18d956a83766c9afb0c9ada19
|
5b361b730ddba75ede1111b3da7b1c04b4df75d3
|
/plot3.R
|
fcf369a8b532c5d0799d889af36442e21155cb09
|
[] |
no_license
|
akumar98/ExData_Plotting1
|
2c1845a67c2b730b05d69f0bea462e8efcf12183
|
f0a8a4f475f531dfa40b1af9d4efb1a35fc19244
|
refs/heads/master
| 2020-12-28T14:51:29.946535
| 2014-12-06T05:11:45
| 2014-12-06T05:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
plot3.R
|
##Read the Txt file
powerconsumption <- read.table("./data/household_power_consumption.txt", sep=";", stringsAsFactors=FALSE, dec=".", header=TRUE)
##Create a subset with only days "1/2/2007" and "2/2/2007"
powersubset<-powerconsumption[powerconsumption$Date %in% c("1/2/2007", "2/2/2007"),]
##Create Date with Time Object
##Create a Vector with data from globalActivepower column, subMeter1, subMeter2, subMeter3
datewithtime <- strptime(paste(powersubset$Date, powersubset$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(powersubset$Global_active_power)
subMeter1 <- as.numeric(powersubset$Sub_metering_1)
subMeter2 <- as.numeric(powersubset$Sub_metering_2)
subMeter3 <- as.numeric(powersubset$Sub_metering_3)
##Open the png device and plot the graph
png("plot3.png", width=480, height=480)
plot(datewithtime, subMeter1, type="l", xlab="", ylab="Energy Submetering")
lines(datewithtime, subMeter2, type="l", col="red")
lines(datewithtime, subMeter3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
##Close the png device to generate the plot.
dev.off()
|
d5a273dd808621800be559f07eee7cd5f11b2196
|
0266f06762c63dc2e742ae26e1a636246f999089
|
/man/sim_sptree_bdp_time.Rd
|
e1770725b539682a50ac59c6dd3b4b27cdbab445
|
[] |
no_license
|
jjustison/rtreeducken
|
eedc498564140d43a6ddb39aefb76d47e13565d9
|
b5a4c4092912f2db17a336ebb711c0b555126c32
|
refs/heads/master
| 2022-08-31T08:15:14.519329
| 2020-05-11T20:49:17
| 2020-05-11T20:49:17
| 263,359,298
| 0
| 0
| null | 2020-05-12T14:23:21
| 2020-05-12T14:23:21
| null |
UTF-8
|
R
| false
| true
| 1,179
|
rd
|
sim_sptree_bdp_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sim_sptree_bdp_time}
\alias{sim_sptree_bdp_time}
\title{Simulates species tree using constant rate birth-death process to a time}
\usage{
sim_sptree_bdp_time(sbr_, sdr_, numbsim_, t_)
}
\arguments{
\item{sbr_}{species birth rate (i.e. speciation rate)}
\item{sdr_}{species death rate (i.e. extinction rate)}
\item{numbsim_}{number of species trees to simulate}
\item{t_}{time to simulate to}
}
\value{
List of objects of the tree class (as implemented in APE)
}
\description{
Simulates species tree using constant rate birth-death process to a time
}
\details{
Forward simulates a tree until a provided time is reached.
}
\examples{
mu <- 0.5 # death rate
lambda <- 2.0 # birth rate
numb_replicates <- 10
time <- 4
sim_sptree_bdp(sbr_ = lambda,
sdr_ = mu,
numbsim_ = numb_replicates,
t_ = time)
}
\references{
K. Hartmann, D. Wong, T. Stadler. Sampling trees from evolutionary models.
Syst. Biol., 59(4): 465-476, 2010.
T. Stadler. Simulating trees on a fixed number of extant species.
Syst. Biol., 60: 676-684, 2011.
}
|
aa3f052d13fbb088d3b073929d06ef188397e2c7
|
ede41b362f24057224cbcd608695ad2ad2a0144c
|
/packages/oldWeather5/R/oldWeather5.R
|
3f36de39430cbc551a4a818aec951ba75f277495
|
[] |
no_license
|
oldweather/oldWeather5
|
57ac0a899ea5e3ee811decd2a452aec7e59ffd50
|
fc0eeda44e8bd4aae99ce545f4dec9d1bce2823e
|
refs/heads/master
| 2020-04-06T10:03:03.464364
| 2016-11-10T17:11:16
| 2016-11-10T17:11:16
| 48,338,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
oldWeather5.R
|
#' oldWeather5 - Process data for Panoptes oldWeather
#'
#' Reads the data files downloaded from
#' http://www.zooniverse.org/lab/1253/data-exports (project data) &
#' http://www.zooniverse.org/lab/28/data-exports (Talk data).
#'
#' @section functions:
#' \itemize{
#' \item \code{\link{ReadClassifications}} - Get latest classifications dump
#' }
#'
#' @section data structure:
#' Records read in are put in a data frames, rows are records,
#' columns are variables (YR, MO, SST, etc.)
#'
#' @docType package
#' @name oldWeather5
NULL
#> NULL
|
3f797ea721d6e8e39ce0d3f002ebab556dca7214
|
163d25753fbb4d5042f744f9379e7df7c004988b
|
/R/ggtaugplot.R
|
e1deb49445f2ece837d17558a99b59768639c832
|
[] |
no_license
|
cran/tensorBSS
|
bafb41acdad89b11c2f33f4c307f64991bc39e9c
|
14be1b8df1f3d6a4bcfe4dac13f048198e927cac
|
refs/heads/master
| 2021-06-20T06:19:24.496911
| 2021-06-02T05:20:02
| 2021-06-02T05:20:02
| 66,539,007
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,561
|
r
|
ggtaugplot.R
|
ggtaugplot <- function (x, crit = "gn", type = "l", scales = "free", position = "horizontal",
ylab = crit, xlab = "component", main = deparse(substitute(x)),
...)
{
crit <- match.arg(crit, c("gn", "fn", "phin",
"lambda"))
position <- match.arg(position, c("horizontal", "vertical"))
switch(crit,
"fn" = {
fn <- sapply(x$ResMode,"[[","fn")
comp <- sapply(x$ResMode,"[[","comp")
L <- lengths(fn)
DF <- data.frame(mode = factor(rep(names(L), L)), comp = unlist(comp, use.names=FALSE), crit = unlist(fn, use.names=FALSE))
},
"gn" = {
gn <- sapply(x$ResMode,"[[","gn")
comp <- sapply(x$ResMode,"[[","comp")
L <- lengths(gn)
DF <- data.frame(mode = factor(rep(names(L), L)), comp = unlist(comp, use.names=FALSE), crit = unlist(gn, use.names=FALSE))
},
"phin" = {
phin <- sapply(x$ResMode,"[[","phin")
comp <- sapply(x$ResMode,"[[","comp")
L <- lengths(phin)
DF <- data.frame(mode = factor(rep(names(L), L)), comp = unlist(comp, use.names=FALSE), crit = unlist(phin, use.names=FALSE))
},
"lambda" = {
lambda <- sapply(x$ResMode,"[[","lambda")
comp <- sapply(x$ResMode,"[[","comp")
comp <- lapply(comp, function(x) x[-2])
L <- lengths(lambda)
DF <- data.frame(mode = factor(rep(names(L), L)), comp = unlist(comp, use.names=FALSE), crit = unlist(lambda, use.names=FALSE))
}
)
crit <- match.arg(crit, c("gn", "fn", "phin", "lambda"))
type <- ifelse(type == "l", 1, 0)
if(position == "horizontal"){
ggplot(DF, aes(x = comp, y = crit)) +
geom_point() +
geom_line(alpha = type) +
facet_wrap(. ~ mode, scales = scales, ncol = length(x$AllSigHat2)) +
labs(x = xlab, y = ylab, title = main) +
ggtitle(main) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5), panel.spacing = unit(1, "lines"))
}
else if(position == "vertical"){
ggplot(DF, aes(x = comp, y = crit)) +
geom_point() +
geom_line(alpha = type) +
facet_wrap(mode ~ ., scales = scales, nrow = length(x$AllSigHat2)) +
labs(x = xlab, y = ylab, title = main) +
ggtitle(main) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5), panel.spacing = unit(1, "lines"))
}
}
|
6c6e8aca170e2932b180eacc7e38c6182655f2b5
|
acfc4a18c41c8bcd76ff898ec3899b9e59737474
|
/R/Prices.MB.R
|
f80a036eebcd00e10fb063e46a5c1b0fa937c38c
|
[] |
no_license
|
tomvar/bundling
|
8a2d135b69973df75320d2a78ba2a7457147af71
|
e8fc6e6a1f7b006a3d9ff59a33bb795bbf677a15
|
refs/heads/master
| 2021-01-10T21:54:43.301831
| 2018-03-14T16:22:51
| 2018-03-14T16:22:51
| 39,305,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
Prices.MB.R
|
#' This function creates a vector of combination from sequences of prices p.1, p.2 and pb
#' It will be searched to find prices that maximize profits in MB strategy
#'
#' @param p.1.min.max Minimum and maximum value of price p1 [p.1.min.max <- c(p.1.min, p.1.max)]
#' @param p.2.min.max Minimum and maximum value of price p2 [p.2.min.max <- c(p.2.min, p.2.max)]
#' @param pb.min.max Minimum and maximum value of price of bundle [pb.min.max <- c(pb.min, pb.max)]
#' @param step Increment of the sequences
#'
#' @return A vector of all possible combinations of p.1, p.2 and pb from sequences of prices:
#' (from p.1.min, p.1.max by step),
#' (from p.2.min, p.2.max by step),
#' (from pb.min to pb.max by step).
#'
#' @export
Prices.MB <- function(p.1.min.max, p.2.min.max, p.b.min.max, step) {
p.1.min <- p.1.min.max[1] # min price of good 1
p.1.max <- p.1.min.max[2] # max price of good 1
p.2.min <- p.2.min.max[1] # min price of good 2
p.2.max <- p.2.min.max[2] # max price of good 2
p.b.min <- p.b.min.max[1] # min price of bundle
p.b.max <- p.b.min.max[2] # max price of bundle
p.1 <-seq(p.1.min,p.1.max, by=step)
p.2 <-seq(p.2.min,p.2.max, by=step)
p.b <-seq(p.b.min,p.b.max, by=step)
fullp1 <-rep(p.1, each = length(p.2))# na 2
fullp2 <-rep(p.2, times = length(p.1)) # 1
fullpb <-rep(p.b, each=length(p.1)*length(p.2))
p1.p2.pb <-cbind(fullp1, fullp2, fullpb)
return(p1.p2.pb)}
|
ae6c18aea41601c342df9ed8e8c5df6db02ecc6d
|
c31ccca5b190016df5d85ad36d5dcfeb2c1dadde
|
/man/cell_extract_spatial.Rd
|
9a1f28129a28770a3e9039b16465add6559bf2fa
|
[] |
no_license
|
tremenyi/AgroClimGrapeIndices
|
837116651acac560c81bc8ae406eb529fb89ce09
|
e4e702b1a6eabb14e048e1fbd7f3616595f66638
|
refs/heads/master
| 2020-03-11T01:20:50.340888
| 2018-04-16T06:06:42
| 2018-04-16T06:06:42
| 129,689,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 827
|
rd
|
cell_extract_spatial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ACGI_functions.R
\name{cell_extract_spatial}
\alias{cell_extract_spatial}
\title{Spatial cell extract}
\usage{
cell_extract_spatial(raster_filename, spatial_layer, subset_layer = NULL,
fun_list = list(mean = mean, var = var, n = length))
}
\arguments{
\item{raster_filename}{File to process}
\item{spatial_layer}{Spatial layer to categorise data values by}
\item{subset_layer}{Permits extracting a subset of layers from the raster file}
\item{fun_list}{Functions to apply to each data point in the brick}
}
\value{
A data frame with extracted values from the raster file, grouped by each object in the spatial layer
}
\description{
Identify the cells that are touched by the regions in the spatial layer and extract the data from those cells
}
|
9320fcafd7c988cb1222d1727364ef9eeaacaff4
|
c9777199c78b32abc5230b74430312a6fe044bd8
|
/man/tabSERE.Rd
|
97323ca6c20d19a8c3a51d50cebc20b7fe052640
|
[] |
no_license
|
PF2-pasteur-fr/SARTools
|
114b64482549b6f91512ee7e63402be6e0717c74
|
3e504afd40caaa62bd6b75b7f0cfca34f6daa65e
|
refs/heads/master
| 2023-06-22T23:28:32.002032
| 2022-03-23T09:21:09
| 2022-03-23T09:21:09
| 27,759,216
| 112
| 70
| null | 2022-03-23T09:21:10
| 2014-12-09T09:31:15
|
R
|
UTF-8
|
R
| false
| true
| 403
|
rd
|
tabSERE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tabSERE.R
\name{tabSERE}
\alias{tabSERE}
\title{SERE statistics for several samples}
\usage{
tabSERE(counts)
}
\arguments{
\item{counts}{\code{matrix} of raw counts}
}
\value{
The \code{matrix} of SERE values
}
\description{
Compute the SERE statistic for each pair of samples
}
\author{
Marie-Agnes Dillies and Hugo Varet
}
|
9962097160544f589d227347c3d03e94e462ac2b
|
abf39cb642af0fdaf237a37ea84e5875a8ce0d29
|
/plot3.R
|
1e3878e1ed84dd1fe2f6531a1b4d979bad28d91b
|
[] |
no_license
|
xuxiao0330/ExData_Plotting1
|
3e0239ed9a586e77c72aa1be1deeca7356b77180
|
9d7b5b56177cf623dc149db4f7fd5cfdede2588c
|
refs/heads/master
| 2020-12-25T17:05:37.625844
| 2016-01-11T15:20:36
| 2016-01-11T15:20:36
| 42,379,508
| 0
| 0
| null | 2015-09-13T01:31:52
| 2015-09-13T01:31:52
| null |
UTF-8
|
R
| false
| false
| 949
|
r
|
plot3.R
|
<<<<<<< HEAD
#reading table
dt <- read.table("household_power_consumption.txt",sep=";",header = TRUE,stringsAsFactors=FALSE)
#making date format
dt$Date <- as.Date(dt$Date,format="%d/%m/%Y")
#subsetting 2007-02-01 ~ 2007-02-02
dt1 <- subset(dt,dt$Date == "2007-02-01"|dt$Date =="2007-02-02")
#Setting time in dt1
dt1$Time <- strptime(paste(dt1$Date,dt1$Time),format = "%Y-%m-%d %H:%M:%S")
#Setting numeric of Global Active Power
dt1$Global_active_power<- as.numeric(dt1$Global_active_power)
=======
>>>>>>> 7c8068dd48de047865a611ece77a170407548347
#Plot 3
png("plot3.png",height = 480, width = 480)
#Plot SM1
plot(dt1$Time,dt1$Sub_metering_1,type ="l",ylab = "Engergy sub metering",xlab="")
##Add Lines
lines(dt1$Time,dt1$Sub_metering_2, col ="red")
lines(dt1$Time,dt1$Sub_metering_3,col = "blue")
##Add Legend
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
8e669e38db11e989f60f13646ac0acb8f1f799b1
|
e33b78e73c726c361d4e7c66f01df56f63f7a76c
|
/TIDYR.R
|
33980c8423cbf3da0d068bbaa5270d47bb9d6b9c
|
[] |
no_license
|
Shalini-cmd/hello-world
|
cf4fc16e037ecb732d55232ebf6e9f6e127864a4
|
075ce2f7912f20f38da4c55b71b7593b2260fd63
|
refs/heads/master
| 2021-03-10T13:04:58.081805
| 2020-05-20T05:13:27
| 2020-05-20T05:13:27
| 246,455,821
| 0
| 0
| null | 2020-03-11T02:41:27
| 2020-03-11T02:27:38
| null |
UTF-8
|
R
| false
| false
| 3,580
|
r
|
TIDYR.R
|
#three rules for tidy dataset
#1. Each variable should have a column
#2. Each pbservation must have its own row
#3. Each value must have its own cell
table1%>%
group_by(year,country)%>%
summarize(n_cases=sum(cases))
table1%>%
group_by(country)%>%
mutate(per_cases=sum(cases)*10000/sum(population))
#Spreading and gathering
table4a
#variable values are columns here
tidy4a <- table4a%>%
gather('1999','2000',key='year',value = 'cases')
tidy4b <- table4b%>%
gather('1999','2000',key='year',value='population')
left_join(tidy4a,tidy4b)
library(tidyr)
table2
#each observation is spread across 2 rows
table2%>%
spread(key='type',value='count')
stocks <- tibble(
year = c(2015, 2015, 2016, 2016),
half = c(1, 2, 1, 2),
return = c(1.88, 0.59, 0.92, 0.17)
)
stocks
stocks%>%
spread(key='half',value='return')
table4a
table4a%>%
gather('1999','2000',key='year',value='cases')
people <- tribble(
~name, ~key, ~value,
#-----------------|--------|------
"Phillip Woods", "age", 45,
"Phillip Woods", "height", 186,
"Phillip Woods", "age", 50,
"Jessica Cordero", "age", 37,
"Jessica Cordero", "height", 156
)
glimpse(people)
people%>%
spread(key='key',value='value')
people2 <- people%>%
group_by(name,key)%>%
mutate(obs=row_number())
people2%>%
spread(key,value)
preg <- tribble(
~pregnant, ~male, ~female,
"yes", NA, 10,
"no", 20, 12
)
preg
preg%>%
gather('male','female',key='gender',value='age')
#Separating and Uniting
table3
#splitting a column with more than one variable values into separate variable columns
tidy3 <- table3%>%
separate(rate,c('cases','population'))
# by default the derived columns are of type character
class(tidy3$cases)
tidy3 <- table3%>%
separate(rate,c('cases','population'),convert=TRUE)
class(tidy3$cases)
table3
#if i want to split the column year
tidy3_t <- table3%>%
separate(year,c('century','decade'),sep=2)%>%
separate(rate,c('cases','population'),convert=TRUE)
tidy3_t
#Unite() is the inverse of separate()
#unites multiple columns into one
table5
table5%>%
unite(new,century,year)
table5%>%
unite(new,century,year,sep='')
tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>%
separate(x, c("one", "two", "three"),extra='drop')
tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>%
separate(x, c("one", "two", "three"),extra='merge')
tibble(x = c("a,b,c", "d,e,f,g", "h,i,j")) %>%
separate(x, c("one", "two", "three"),fill='right')
#Missing Values
stocks <- tibble(
year=c(2015,2015,2015,2015,2016,2016,2016),
qtr=c(1,2,3,4,2,3,4),
return=c(1.88,0.59,0.35,NA,0.92,0.17,2.66)
)
stocks
stocks%>%
spread(year,return)%>%
gather('2015','2016',key='year',value='return')
stocks%>%
complete(year,qtr)
#complete takes a set of columns and find all unique combinations and make sure our data has complete set
#CASE STUDY
who
who1 <- who%>%
gather(new_sp_m014:newrel_f65,key='key',value='cases',na.rm=TRUE)
who1 <- who1%>%
mutate(key=stringr::str_replace(key,'newrel','new_rel'))
who2 <- who1%>%
separate(key,c('type1','type2','type3'),sep='_')
who2
View(who2)
who3 <- who2%>%
separate(type3,c('gender','age_Range'),sep=1)
who3
View(who3)
who3 <- who3%>%
mutate(age_Range=stringr::str_replace(age_Range,'014','0014'))%>%
mutate(age_Range=stringr::str_replace(age_Range,'65','65++'))%>%
separate(age_Range,c('ll','ul'),sep=2)
tidywho <- who3%>%
unite(age_range,ll,ul,sep='-')%>%
select(-iso2,-iso3,-type1)
head(tidywho)
|
29e37f932c8f857c70f97b710a133f82d039335e
|
1aa413301db92dd918278f7d309260b0130f8cd8
|
/R/plot_rendite_comp.R
|
87183291d2878c3eed62f4d22e70e6c9c3d73dd5
|
[] |
no_license
|
philgee1981/btcecho
|
19467886cb335ddd3a6f28e67ac3edf883d979ab
|
37b01871ecb72da58e11643394c89428b9c8adf9
|
refs/heads/master
| 2022-01-10T01:23:24.811566
| 2019-05-27T15:50:27
| 2019-05-27T15:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,095
|
r
|
plot_rendite_comp.R
|
#' Plot Returns of different portfolios
#'
#' Plot one portfolio-development in comparison to two other ones
#'
#' @param returns_safe A xts with several columns consisting of data
#' @param portf_safe A vector defining the portfolio weights
#' @param returns_medium A xts with several columns consisting of data
#' @param portf_medium A vector defining the portfolio weights
#' @param returns_risky A xts with several columns consisting of data
#' @param portf_risky A vector defining the portfolio weights
#' @param date_margin A string specifying the start- and enddates
#' @param legend_pos where to set the legend
#'
#' @author Philipp Giese
#' @return A plot with development of tree portfolios
#' @export
#' @examples
#' returns_safe<-calculate_mutliple_cc("CCCAGG",c("BTC","LTC","DASH"),"USD",7,T)
#' portf_safe<-c(0.3,0.4,0.3)
#' returns_medium<-calculate_mutliple_cc("CCCAGG",c("ADA","EOS","ZRX"),"USD",7,T)
#' portf_medium<-c(0.2,0.4,0.4)
#' returns_risky<-calculate_mutliple_cc("CCCAGG",c("PAY","BNB","ZCL"),"USD",7,T)
#' portf_risky<-c(0.2,0.4,0.4)
#' plot_rendite_comp(returns_safe,portf_safe,returns_medium,portf_medium,returns_risky,portf_risky,"20180915/","bottomleft")
plot_rendite_comp <-
function(returns_safe,portf_safe,returns_medium,portf_medium,returns_risky,portf_risky,date_margin,legend_pos){
plot(100*merge(cumsum(btcecho::get_portf_dev(returns_safe,portf_safe,date_margin))-as.numeric(btcecho::get_portf_dev(returns_safe,portf_safe,date_margin)[1]),
cumsum(btcecho::get_portf_dev(returns_medium,portf_medium,date_margin))-as.numeric(btcecho::get_portf_dev(returns_medium,portf_medium,date_margin)[1]),
cumsum(btcecho::get_portf_dev(returns_risky,portf_risky,date_margin))-as.numeric(btcecho::get_portf_dev(returns_risky,portf_risky,date_margin)[1])),
lwd=c(3,3,3), main="Tägliche Rendite in Prozent",
yaxis.right = F, col=c("dark green","orange","red"))
addLegend(legend_pos, on=1, legend.names = c("Konservativ","Medium","Risiko"),
lty=1, lwd=c(3,3,3),col=c("dark green","orange","red"),bty="o")
}
|
aeaa0c318c79871dd7daf976b27e3f2721c86605
|
9afab26de81a32255d270beb335543d73759e4f0
|
/Practica2/Ejercicio3/Ejercicio3.R
|
597eafcfec1d02f37ad3486f0580587567c34ee0
|
[] |
no_license
|
Llampi/Probabilidad-y-estadistca
|
aa6a8f1506d4aae24f42991388f99a319cfde43b
|
f89fee10e3b94bf0baf09732e50a1059acce28d8
|
refs/heads/master
| 2020-03-09T21:03:08.615779
| 2018-06-01T13:17:46
| 2018-06-01T13:17:46
| 128,999,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,273
|
r
|
Ejercicio3.R
|
###PREG3
#Preg3.a
vec1 <- c(2,1,1,3,2,1,0)
vec2 <- c(3,8,2,2,0,0,0)
#Aqui vec1[1]=2, vec2[2]=8, entonces el la condicion vec1[1]+vec2[2]=10 es verdadera, por lo que se imprimira el mensaje
if((vec1[1]+vec2[2])==10){ cat("Imprime el resultado!") }
#vec1[1]=2,vec2[1]=3, como ambos enunciados son verdaderos, el mensaje se imprimira
if(vec1[1]>=2&&vec2[1]>=2){ cat("Imprime el resultado!") }
#La funcion all recibe un conjunto de vectres logicos y comprueba si todos son verdaderos,en esta linea vec2-vec1=(1,7,1,-1,-2,-1,0), con esto
#El segundo (7) y sexto elemento (-1) de este nuevo vector deben cumplir que son menores que 7, como el segndo elemento no cumple, el resultado es falso, con lo que la cadena no se imprime
if(all((vec2-vec1)[c(2,6)]<7)){ cat("Imprime el resultado!") }
#La funcion is.na(x) crea un vector logico del tamaño de x cuyos elementos solo valdran TRUE si el elemento correspondiente de x es NA, y FALSE en caso contrario.
#La condicion en este caso debe ser lo contrario de la funcion is.na(x), asi, sera verdadero cando x sea diferente a NA. Como vec2[3]=2,diferente a Na, la cadena se imprimirra
if(!is.na(vec2[3])){ cat("Imprime el resultado!") }
#Preg3.b
vec3<-vec1
for (i in 1: length(vec1))
{
if(vec1[i]+vec2[i]>3)
{
vec3[i]<-vec1[i]*vec2[i]
}
else
{
vec3[i]<-vec1[i]+vec2[i]
}
}
vec3
#Preg3.c
#Creamos unua funcion para almacenar el codigo
verificarMatr<-function(mymat)
{
#Inicializamos un contador para comprobar cuantas veces se reemplaza la palabra AQUI
contador<-0
#Como la matriz es cuadrada, su longitud sera n^2, asi, la dimension de mymat sera sqrt(length(mymat)) ó tambien dim(mymat)[1]
for (i in 1: sqrt(length(mymat)))
{
if(substring(mymat[i,i][1],1,1)=="g" || substring(mymat[i,i][1],1,1)=="G")
{
mymat[i,i]<-"AQUI"
contador<-contador+1
}
}
if(contador==0)
{
mymat<-diag(sqrt(length(mymat)))
}
mymat
}
#Probamos la funcion con las matrices dadas
mymat <- matrix(as.character(1:16),4,4)
verificarMatr(mymat)
mymat <- matrix(c("GREAT","ejercicioss","agua","hey"),2,2,byrow=T)
verificarMatr(mymat)
mymat <- matrix(c("DANDELION","Hyacinthus","Gerbera","MARIGOLD","geranium","ligularia","Pachysandra","SNAPDRAGON","GLADIOLUS"),3,3)
verificarMatr(mymat)
|
97a9d292fb7faef359ef87b2c0f8598297f22b91
|
2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89
|
/man/getXAM.ChipEffectFile.Rd
|
4c9082764e6c7fc97d2ac264d8f54f30092e9d47
|
[] |
no_license
|
HenrikBengtsson/aroma.affymetrix
|
a185d1ef3fb2d9ee233845c0ae04736542bb277d
|
b6bf76f3bb49474428d0bf5b627f5a17101fd2ed
|
refs/heads/master
| 2023-04-09T13:18:19.693935
| 2022-07-18T10:52:06
| 2022-07-18T10:52:06
| 20,847,056
| 9
| 4
| null | 2018-04-06T22:26:33
| 2014-06-15T03:10:59
|
R
|
UTF-8
|
R
| false
| false
| 1,663
|
rd
|
getXAM.ChipEffectFile.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ChipEffectFile.xam.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getXAM.ChipEffectFile}
\alias{getXAM.ChipEffectFile}
\alias{ChipEffectFile.getXAM}
\alias{getXAM,ChipEffectFile-method}
\title{Gets the physical position, log-intensities and log-ratios of chip effects for two arrays}
\description{
Gets the physical position, log-intensities and log-ratios of chip effects for two arrays of units on a certain chromosome.
}
\usage{
\method{getXAM}{ChipEffectFile}(this, other, chromosome, units=NULL, ..., verbose=FALSE)
}
\arguments{
\item{other}{The second \code{\link{ChipEffectFile}} object used as the
reference.}
\item{chromosome}{(The chromosome for which results should be returned.}
\item{units}{(The subset of units to be matched.
If \code{\link[base]{NULL}}, all units are considered.}
\item{...}{Not used.}
\item{verbose}{See \code{\link[R.utils]{Verbose}}.}
}
\value{
Returns a Nx3 matrix where N is the number of units returned.
The names of the columns are X (physical position in a given chromosome),
A (log-intensities) and M (log-ratios).
The names of the rows are the unit indices (as indexed by the CDF).
\emph{Note: The rows are ordered according to chromosomal position.}
}
\author{Henrik Bengtsson}
\seealso{
\code{\link[aroma.affymetrix:getAM.ChipEffectFile]{*getAM}()}.
For more information see \code{\link{ChipEffectFile}}.
}
\keyword{internal}
\keyword{methods}
|
2e28959006ababb0cc63fb3f98aa9fb6df0b78b2
|
d5fd9fcb1f81093a5d16184b889cf74edd985b2e
|
/haskell/statisticsScripts/rq4_tests.R
|
0787366b8c35bff0a2d9e16117376af8cad3a450
|
[] |
no_license
|
doyougnu/VSat
|
90e4f55a67f420e87e833bc94fd65d4b3263af29
|
4c9acbd1280792c0e9665da737131ebf2d9ea0aa
|
refs/heads/master
| 2021-10-11T06:10:34.149842
| 2021-10-01T13:35:43
| 2021-10-01T13:35:43
| 105,307,042
| 4
| 1
| null | 2018-07-12T17:02:33
| 2017-09-29T18:56:43
|
Haskell
|
UTF-8
|
R
| false
| false
| 5,629
|
r
|
rq4_tests.R
|
library(ggplot2)
library(dplyr)
library(tidyr)
library(broom)
library(scales)
finRawFile <- "../munged_data/financial_raw_singletons.csv"
autoRawFile <- "../munged_data/auto_raw_singletons.csv"
finRawDF <- read.csv(file=finRawFile) %>%
mutate(Algorithm = as.factor(Algorithm), Config = as.factor(Config)) %>%
mutate(Algorithm = gsub("-->", "\U27f6", Algorithm), data = "Financial") %>%
group_by(Algorithm, DataSet, Config) %>%
mutate(TimeCalc = Time - lag(Time, default = 0))
autoRawDF <- read.csv(file=autoRawFile) %>%
mutate(Algorithm = as.factor(Algorithm), Config = as.factor(Config)) %>%
mutate(Algorithm = gsub("-->", "\U27f6", Algorithm), data = "Auto") %>%
group_by(Algorithm, DataSet, Config) %>%
mutate(TimeCalc = Time - lag(Time, default = 0))
##################### Financial #############################
## Algorithms are significant
fin.alg.res <- kruskal.test(TimeCalc ~ Algorithm, finRawDF)
## Versions are significant as expected
fin.vers.res <- kruskal.test(TimeCalc ~ Config, finRawDF)
## Solvers also significant
fin.slvr.res <- kruskal.test(TimeCalc ~ DataSet, finRawDF)
## Interaction bewtween algorithm and version significant as expected
fin.alg.conf.inters <- interaction(finRawDF$Algorithm, finRawDF$Config)
fin.alg.slvr.inters <- interaction(finRawDF$Algorithm, finRawDF$DataSet)
fin.alg.conf.slvr.inters <- interaction(finRawDF$DataSet, finRawDF$Algorithm, finRawDF$Config)
fin.alg.conf.res <- kruskal.test(TimeCalc ~ fin.alg.conf.inters, finRawDF)
fin.alg.slvr.res <- kruskal.test(TimeCalc ~ fin.alg.slvr.inters, finRawDF)
fin.alg.conf.slvr.res <- kruskal.test(TimeCalc ~ fin.alg.conf.slvr.inters, finRawDF)
## Find the pairs which are significant
fin.pairs <- pairwise.wilcox.test(finRawDF$TimeCalc, fin.alg.conf.inters,
p.adj="bonf", exact=FALSE, method="holm",
paired=FALSE) %>%
tidy %>%
separate(group1, sep=c(3,4), into = c("AlgLeft", "Dump", "ConfigLeft")) %>%
separate(group2, sep=c(3,4), into = c("AlgRight", "Dump2", "ConfigRight")) %>%
select(-Dump, -Dump2) %>%
filter(ConfigRight == ConfigLeft) %>%
mutate(data = "Financial") %>%
arrange(p.value)
## We notice here that the p-values are all 1 after the bonferroni adjustment.
## Solver is not statistically significant
fin.slvr.pairs <- pairwise.wilcox.test(finRawDF$TimeCalc, fin.alg.slvr.inters,
p.adj="bonf", exact=TRUE, method="holm",
paired=FALSE) %>%
tidy %>%
separate(group1, sep="\\.", into = c("AlgLeft", "SolverLeft")) %>%
separate(group2, sep="\\.", into = c("AlgRight", "SolverRight")) %>%
filter(AlgLeft == AlgRight) %>%
mutate(Significance = case_when(p.value <= 0.05 ~ "Significant",
... = TRUE ~ "Not Significant")) %>%
arrange(p.value, SolverLeft)
##################### Auto #############################
## Algorithms are significant
auto.alg.res <- kruskal.test(TimeCalc ~ Algorithm, autoRawDF)
## Versions are significant as expected
auto.vers.res <- kruskal.test(TimeCalc ~ Config, autoRawDF)
## Solvers are actually not significant by themselves
auto.slvr.res <- kruskal.test(TimeCalc ~ DataSet, autoRawDF)
## Interaction bewtween algorithm and version significant as expected
auto.alg.conf.inters <- interaction(autoRawDF$Algorithm, autoRawDF$Config)
auto.alg.slvr.inters <- interaction(autoRawDF$Algorithm, autoRawDF$DataSet)
auto.alg.conf.slvr.inters <- interaction(autoRawDF$DataSet, autoRawDF$Algorithm, autoRawDF$Config)
auto.alg.conf.res <- kruskal.test(TimeCalc ~ auto.alg.conf.inters, autoRawDF)
auto.alg.slvr.res <- kruskal.test(TimeCalc ~ auto.alg.slvr.inters, autoRawDF)
auto.alg.conf.slvr.res <- kruskal.test(TimeCalc ~ auto.alg.conf.slvr.inters, autoRawDF)
## Autod the pairs which are significant
auto.pairs <- pairwise.wilcox.test(autoRawDF$TimeCalc, auto.alg.conf.inters,
p.adj="bonf", exact=FALSE, method="holm",
paired=FALSE) %>%
tidy %>%
separate(group1, sep=c(3,4), into = c("AlgLeft", "Dump", "ConfigLeft")) %>%
separate(group2, sep=c(3,4), into = c("AlgRight", "Dump2", "ConfigRight")) %>%
select(-Dump, -Dump2) %>%
filter(ConfigRight == ConfigLeft) %>%
mutate(data = "Auto") %>%
arrange(p.value)
## We notice here that the p-values are all 1 after the bonferroni adjustment.
## Solver is not statistically significant for both datasets
auto.slvr.pairs <- pairwise.wilcox.test(autoRawDF$TimeCalc, auto.alg.slvr.inters,
p.adj="bonf", exact=TRUE, method="holm",
paired=FALSE) %>%
tidy %>%
separate(group1, sep="\\.", into = c("AlgLeft", "SolverLeft")) %>%
separate(group2, sep="\\.", into = c("AlgRight", "SolverRight")) %>%
filter(AlgLeft == AlgRight) %>%
mutate(Significance = case_when(p.value <= 0.05 ~ "Significant",
... = TRUE ~ "Not Significant")) %>%
arrange(p.value,SolverLeft)
## ########################## Combined data frame ##################
options(scipen = 999)
rq4pvDF <- rbind(auto.pairs, fin.pairs) %>%
arrange(p.value) %>%
mutate(Significance = case_when(p.value <= 0.05 ~ "Significant",
TRUE ~ "Not Significant"),
Version = factor(ConfigLeft, levels =
c("V1", "V2", "V3", "V4", "V5", "V6",
"V7", "V8", "V9", "V10")))
rq4_counts <- rq4pvDF %>% count(AlgLeft, data, Significance)
|
44a43d3fb75fb4d471b2be6e7045eeb1c09b52c4
|
d431a10beb2e7f84ca6765c3d91bf5057196afb2
|
/Ecological Modelling/MatrixModel/LeslieMatrix.R
|
d2f48b5f6e61554288177b42be640a01b989de52
|
[] |
no_license
|
yaghan/bio7_examples
|
89d0de241e6e58dc9f37cee5b777f6dd63d96281
|
3815086a3500cdf1026c2652ed726e5575a14d15
|
refs/heads/master
| 2021-01-16T22:07:32.996426
| 2015-05-21T15:20:43
| 2015-05-21T15:20:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
LeslieMatrix.R
|
# Start the RServer and then interpret the source!
# Open the Expression dialog and then enter
# n1 to see the results in the console !
L<-matrix(0,nrow=4,ncol=4)
#Birth-rates
L[1,]<-c(0,4,3,1)
#Probability of survival
L[2,1]<-0.7
L[3,2]<-0.9
L[4,3]<-0.5
#Abundance
n0<-c(9,5,4,2)
#Abundance after one timestep t+1
n1<-L%*%n0
print("The result is:")
n1
|
b93c7b989c56e5bc9ce4ccf7583387c52b8411e8
|
8cd239031820b73aa8b6a7400c9651d8a80ba043
|
/plot4.R
|
07486f44303e140c4af9fae744dbe1c50af83a8c
|
[] |
no_license
|
radh07/ExData_Plotting1
|
84fc80437648fc6d5f6144926fe8e7ccf79882f9
|
25504a1c678f8dcdb5bd0c952a20c604fda578b4
|
refs/heads/master
| 2021-01-18T08:49:36.675456
| 2015-03-04T18:45:21
| 2015-03-04T18:45:21
| 31,574,854
| 0
| 0
| null | 2015-03-03T02:19:16
| 2015-03-03T02:19:16
| null |
UTF-8
|
R
| false
| false
| 1,832
|
r
|
plot4.R
|
# Read the data into R
data<-read.csv(file="household_power_consumption.txt", header=TRUE,sep=";",colClasses="character")
# Grab the data for the two dates of interest
relData<-subset(data, Date == "1/2/2007" | Date == "2/2/2007")
# Combine the Date and Time columns into a single time object using as.POSIXct
timeobj <- as.POSIXct(paste(as.Date(relData$Date,"%d/%m/%Y"), relData$Time), format="%Y-%m-%d %H:%M:%S")
# Open a png device
png(filename="plot4.png", width=480, height=480)
# Create plot window for 4 graphs in a single plot
par(mfrow=c(2,2))
# Plot a line graph (denoted by type="l") with time on the X-axis and Global Active Power on the Y-axis
plot(timeobj, relData$Global_active_power, type="l", main="", xlab="", ylab="Global Active Power")
# Plot a line graph (denoted by type="l") with time on the X-axis and Voltage on the Y-axis
plot(timeobj, relData$Voltage, type="l", main="", xlab="datetime", ylab="Voltage")
# Plot a line graph (denoted by type="l") with 3 lines depicting
# Sub_metering_1, Sub_metering_2, and Sub_metering_3 on the Y-axis
# and time on the X-axis
plot(timeobj, relData$Sub_metering_1, type="l", main="", xlab="", ylab="Energy sub metering")
lines(timeobj, relData$Sub_metering_2, type="l", main="", col="red")
lines(timeobj, relData$Sub_metering_3, type="l", main="", col="blue")
# Add the legend at the top right corner with line symbols in
# appropriate colors, corresponding text, no box around legend
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1), col=c("black","red","blue"),bty="n")
# Plot a line graph (denoted by type="l") with time on the X-axis and Voltage on the Y-axis
plot(timeobj, relData$Global_reactive_power, type="l", main="", xlab="datetime", ylab="Global_reactive_power")
# Close the graphics device
dev.off()
|
4cb0b7512b70cf5876691a4754fddb55236d32ac
|
fed93c5054545d927f3695b51f3a8c9dafb90086
|
/R/tagtools/R/hilbert_env.R
|
5be8984de3cb139298c90ccc734e6e8f206df220
|
[] |
no_license
|
spluque/TagTools
|
34629e360afd3170aa167437cccfd72001b2c69c
|
5f150109114cbbdf551cbf8a02e335006613d332
|
refs/heads/master
| 2021-12-07T10:54:11.656760
| 2021-10-14T20:36:29
| 2021-10-14T20:36:29
| 233,162,704
| 0
| 0
| null | 2020-01-11T02:11:30
| 2020-01-11T02:11:29
| null |
UTF-8
|
R
| false
| false
| 2,224
|
r
|
hilbert_env.R
|
#' Compute the envelope of X using Hilbert transform.
#'
#' Compute the envelope of the signal matrix X using the Hilbert transform.
#' To avoid long transforms, this function uses the overlap and add method.
#'
#' @param X a vector or matrix of signals. If X is a matrix, each column is treated as a separate signal.
#' The signals must be regularly sampled for the result to be correctly interpretable as the envelope.
#' @param N (optional) specifies the transform length used.
#' The default value is 1024 and this may be fine for most situations.
#' @return E, the envelope of X. E is the same size as X: it has the same number of columns
#' and the same number of samples per signal. It has the same units as
#' X but being an envelope, all values are >=0.
#' @export
#' @examples \dontrun{
#' s <- matrix(sin(0.1 * c(1:10000)), ncol = 1) *
#' matrix(sin(0.001 * c(1:10000)), ncol = 1)
#' E <- hilbert_env(s)
#' plot(c(1:length(s)), s, col = 'grey34')
#' lines(c(1:length(E)), E, col = 'black')
#' }
hilbert_env <- function(X, N = 1024) {
# note: N must be even
if (is.matrix(X)) {
if(nrow(X) == 1) { # make sure X is a column vector or matrix
X <- t(X)
}
} else {
X <- matrix(X, ncol = 1)
}
taper <- signal::triang(N)%*%matrix(1, nrow = 1, ncol = ncol(X))
nbuffs <- floor(nrow(X) / (N / 2) - 1)
iind <- c(1:N)
oind <- c(1:(N / 2))
lind <- c((N/ 2 + 1):N)
E <- matrix(0, nrow = nrow(X), ncol = ncol(X))
if (nbuffs == 0) {
E <- Mod(hht::HilbertTransform(X))
E <- check_mat(E)
return(E)
}
# first buffer
H <- hht::HilbertTransform(X[c(1:N),])
H <- check_mat(H)
E[oind,] <- Mod(H[oind,])
lastH <- H[lind,] * taper[lind,]
# middle buffers
for (k in c(2:(nbuffs-1))){
kk <- (k - 1) * N / 2
H0 <- check_mat(hht::HilbertTransform(X[kk+iind,]))
H <- H0*taper
E[kk+oind,] <- Mod(H[oind,]+lastH)
lastH = H[lind,]
}
# last buffer
kk <- (nbuffs - 1) * N / 2
H <- hht::HilbertTransform(X[c((kk + 1):nrow(X)),])
H <- check_mat(H)
E[kk+oind,] <- Mod(H[oind,]*taper[oind,]+lastH)
E[c((kk + N / 2 + 1):nrow(E)),] <- Mod(H[c((N / 2 + 1):nrow(H)),])
}#end of function
check_mat <- function(xx) {
if (!is.matrix(xx)) {
xx <- matrix(xx, nrow = length(xx))
return(xx)
}
}
|
2380239725d28a83f05776f86e3f9ea98c016705
|
f1b04a74ebfe7b0023d3d3c9af14791a4c2eef22
|
/R/call_coaccessible.R
|
b0bbe5d49070324fd82c8af8b9f58a088d175824
|
[
"MIT"
] |
permissive
|
Zeyu618/maize_single_cell_cis_regulatory_atlas
|
2a98385f0ddde5e534a4c01f0ce4e1798aec1cdd
|
3178a0c2458d9fe229635b1bd94902f579518c12
|
refs/heads/master
| 2023-05-02T07:12:54.782679
| 2020-10-08T12:32:25
| 2020-10-08T12:32:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,884
|
r
|
call_coaccessible.R
|
###############################################################################
## Cicero trajectories
###############################################################################
# default variables
threads <- 1
dims <- 50
# commandline arguments
args = commandArgs(TRUE)
if(length(args)==4){
# read in commandline arguments
threads <- as.numeric(args[1])
output <- as.character(args[2])
input <- as.character(args[3])
metafile <- as.character(args[4])
}else{
stop("wrong number of arguments")
}
###############################################################################
## Load data, functions, and wd
###############################################################################
# setwd
setwd(getwd())
# load libraries
source("call_coaccessible_UTILs.R")
# load data
message(" ... Loading data")
a <- read.table(input)
zm <- read.table("/scratch/apm25309/single_cell/ATACseq/v3/bedfiles/resources/Zm.v4.genome")
genes <- read.table("/scratch/apm25309/single_cell/ATACseq/v3/bedfiles/resources/Zm.geneAnnotation.bed", header=T)
meta <- read.table(metafile)
###################################################################################################
## Cluster cells
###################################################################################################
# create cicero object
message(" ... Creating CDS")
a <- a[as.character(a$V2) %in% rownames(meta),]
a$V1 <- droplevels(a$V1)
a$V2 <- droplevels(a$V2)
shuf <- a
shuf$V1 <- shuf$V1[sample(length(shuf$V1))]
shuf$V2 <- shuf$V2[sample(length(shuf$V2))]
cds <- make_atac_cds(a, binarize=T)
shufcds <- make_atac_cds(shuf, binarize=T)
c.colSums <- Matrix::colSums(exprs(cds))
c.rowSums <- Matrix::rowSums(exprs(cds))
s.colSums <- Matrix::colSums(exprs(shufcds))
s.rowSums <- Matrix::rowSums(exprs(shufcds))
# check shuffled
message(" * orig. matrix = ",nrow(cds), " | ", ncol(cds))
message(" * shuf. matrix = ",nrow(shufcds), " | ", ncol(shufcds))
message(" * orig. matrix colSums = ", paste(c.colSums[1:5], collapse=", "))
message(" * shuf. matrix colSums = ", paste(s.colSums[1:5], collapse=", "))
message(" * orig. matrix rowSums = ", paste(c.rowSums[1:5], collapse=", "))
message(" * shuf. matrix rowSums = ", paste(s.rowSums[1:5], collapse=", "))
# add metadata, filter, and run TFIDF/library regression/batch effect removal
cds <- cds[,colnames(cds) %in% rownames(meta)]
shufcds <- shufcds[,colnames(cds) %in% rownames(meta)]
pData(cds) <- meta[colnames(exprs(cds)),]
pData(shufcds) <- meta[colnames(exprs(shufcds)),]
cds <- cds[Matrix::rowSums(exprs(cds))>0,]
cds <- cds[,Matrix::colSums(exprs(cds))>0]
shufcds <- shufcds[Matrix::rowSums(exprs(shufcds))>0,]
shufcds <- shufcds[,Matrix::colSums(exprs(shufcds))>0]
# process basic
cds <- detectGenes(cds)
shufcds <- detectGenes(shufcds)
cds <- estimateSizeFactors(cds)
shufcds <- estimateSizeFactors(shufcds)
# load results from jaccard
message(" ... Loading Jaccard-based clustering results and reduced dimensions")
cds <- loadMeta(cds, meta)
shufcds <- loadMeta(shufcds, meta)
###################################################################################################
## Estimate connections, modules and gene activities
###################################################################################################
# run cicero to get co-accessible sites and modules BY CLUSTER
meta2 <- pData(cds)
meta2$Cluster <- as.character(meta2$Cluster)
print(table(meta2$Cluster))
clusts <- unique(meta2$Cluster)
cell_ids <- c()
# iterate
its <- 0
# foreach parameters
cl <- makeSOCKcluster(threads)
registerDoSNOW(cl)
tasks <- length(clusts)
pb <- txtProgressBar(max = tasks, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
package.labs <- c("cicero", "Matrix")
message(" ... Initializing per cluster cicero run")
# # run in parallel
# gact <- list()
# gact <- foreach(i=clusts, .combine='c', .packages=package.labs, .options.snow=opts) %dopar% {
#
# # get umap coordinates and make cicero CDS
# message("###--- Creating cicero object, cluster",i, " ---###")
# ids <- rownames(meta2[meta2$Cluster==i,])
# index.keep <- colnames(exprs(cds)) %in% ids
# s.cds <- cds[,index.keep]
#
# # only consider sites accessible in at least 1% of cells in cluster
# s.cds <- s.cds[Matrix::rowSums(exprs(s.cds))>(ncol(exprs(s.cds))*0.00),]
# s.cds <- s.cds[,Matrix::colSums(exprs(s.cds))>0]
# print(head(exprs(s.cds)[,1:5]))
# message(" - number of sites for cluster ", i, " = ", nrow(s.cds))
#
# # get UMAP coordinates
# umap_coords <- t(reducedDimA(s.cds))
# umap_coords <- umap_coords[colnames(s.cds),]
# message("# UMAP coords = ", nrow(umap_coords), " | # cells = ", ncol(s.cds))
# rownames(umap_coords) <- colnames(exprs(s.cds))
# cicero_cds <- make_cicero_cds(s.cds, reduced_coordinates=umap_coords, k=15)
#
# # run cicero (connections)
# message(" ... Running cicero")
# conns <- run_cicero(cicero_cds, zm, window=500000, sample_num=100)
#
# # write results to disk
# write.table(conns, file=paste("bycluster",i,".",output,".cicero.loops.txt",sep=""),
# sep="\t",quote=F, col.names=F, row.names=F)
# }
# close(pb)
# stopCluster(cl)
#
#
# ###################################################################################################
# ## SHUFFLE ----------------------------------------------------------------------------------------
# ###################################################################################################
#
# # for each cluster
# meta2 <- pData(shufcds)
# meta2$Cluster <- as.character(meta2$Cluster)
# print(table(meta2$Cluster))
# clusts <- unique(meta2$Cluster)
# cell_ids <- c()
#
# # iterate
# its <- 0
#
# # foreach parameters
# cl <- makeSOCKcluster(threads)
# registerDoSNOW(cl)
# tasks <- length(clusts)
# pb <- txtProgressBar(max = tasks, style = 3)
# progress <- function(n) setTxtProgressBar(pb, n)
# opts <- list(progress = progress)
# package.labs <- c("cicero", "Matrix")
# message(" ... Initializing shuffled per cluster cicero run")
#
# ## shuffled ##
# gact <- list()
# gact <- foreach(i=clusts, .combine='c', .packages=package.labs, .options.snow=opts) %dopar% {
#
# # get umap coordinates and make cicero CDS
# message("###--- Creating cicero object, cluster",i, " ---###")
# ids <- rownames(meta2[meta2$Cluster==i,])
# index.keep <- colnames(exprs(shufcds)) %in% ids
# s.cds <- shufcds[,index.keep]
#
# # only consider sites accessible in at least 1% of cells in cluster
# s.cds <- s.cds[Matrix::rowSums(exprs(s.cds))>(ncol(exprs(s.cds))*0.00),]
# s.cds <- s.cds[,Matrix::colSums(exprs(s.cds))>0]
# print(head(exprs(s.cds)[,1:5]))
# message(" - number of sites for cluster ", i, " = ", nrow(s.cds))
#
# # get UMAP coordinates
# umap_coords <- t(reducedDimA(s.cds))
# umap_coords <- umap_coords[colnames(s.cds),]
# message("# UMAP coords = ", nrow(umap_coords), " | # cells = ", ncol(s.cds))
# rownames(umap_coords) <- colnames(exprs(s.cds))
# cicero_cds <- make_cicero_cds(s.cds, reduced_coordinates=umap_coords, k=15)
#
# # run cicero (connections)
# message(" ... Running cicero")
# conns <- run_cicero(cicero_cds, zm, window=500000, sample_num=100)
#
# # write results to disk
# write.table(conns, file=paste("shuffled_bycluster",i,".",output,".cicero.loops.txt",sep=""),
# sep="\t",quote=F, col.names=F, row.names=F)
#
# }
# close(pb)
# stopCluster(cl)
###################################################################################################
## COMPUTE GENE ACTIVITY --------------------------------------------------------------------------
###################################################################################################
# for each cluster
meta2 <- pData(cds)
meta2$Cluster <- as.character(meta2$Cluster)
print(table(meta2$Cluster))
clusts <- unique(meta2$Cluster)
cell_ids <- c()
# iterate
its <- 0
# foreach parameters
message(" ... Initializing per cluster cicero run - GENE ACTIVITY")
gascores <- mclapply(clusts, function(x){
# load connections
id.true <- paste("bycluster",x,".",output,".cicero.loops.txt",sep="")
id.false <- paste("shuffled_bycluster",x,".",output,".cicero.loops.txt",sep="")
t.conns <- read.table(id.true)
s.conns <- read.table(id.false)
# filter loops --------------------------------------------------------------------------------
# empty vector
b.sub <- c()
lims <- seq(from=0, to=0.99, length.out=100)
# find cut-off
for(j in lims){
b.sub <- c(b.sub, nrow(subset(s.conns, s.conns$V3 >= j)))
}
fdr <- b.sub/nrow(s.conns)
threshold <- min(lims[which(fdr < 0.05)])
message(" - threshold = ", threshold, " | ", id.true)
# filter loops
a.sub <- subset(t.conns, t.conns$V3 >= threshold)
id <- gsub("bycluster", "filtered", id.true)
write.table(a.sub, file=id, quote=F, row.names=F, col.names=F, sep="\t")
colnames(a.sub) <- c("Peak1", "Peak2", "coaccess")
# get gene activity scores --------------------------------------------------------------------
message("--- estimating gene activity scores for cluster ",x)
ids <- rownames(meta2[meta2$Cluster==x,])
index.keep <- colnames(exprs(cds)) %in% ids
s.cds <- cds[,index.keep]
# only consider sites accessible in at least 1% of cells in cluster
s.cds <- s.cds[Matrix::rowSums(exprs(s.cds))>0,]
s.cds <- s.cds[,Matrix::colSums(exprs(s.cds))>0]
print(head(exprs(s.cds)[,1:5]))
message(" - number of sites for cluster ", x, " = ", nrow(s.cds))
# get UMAP coordinates
umap_coords <- t(reducedDimA(s.cds))
umap_coords <- umap_coords[colnames(s.cds),]
message("# UMAP coords = ", nrow(umap_coords), " | # cells = ", ncol(s.cds))
rownames(umap_coords) <- colnames(exprs(s.cds))
cicero_cds <- make_cicero_cds(s.cds, reduced_coordinates=umap_coords, k=15)
# estimate gene activity
message(" ... Estimating gene activity scores")
pos <- subset(genes, strand == "+")
pos <- pos[order(pos$start),]
pos <- pos[!duplicated(pos$transcript),]
pos$end <- pos$start
pos$start <- pos$start - 1000
neg <- subset(genes, strand == "-")
neg <- neg[order(neg$start, decreasing = T),]
neg <- neg[!duplicated(neg$transcript),]
neg$start <- neg$end
neg$end <- neg$end + 1000
# merge
gene_ann2 <- rbind(pos, neg)
gene_ann2 <- gene_ann2[,c(1:3, 8)]
gene_ann2 <- gene_ann2[order(gene_ann2$start, decreasing=F),]
colnames(gene_ann2)[4] <- "gene"
# annotate genes
message(" - annotate genes by peaks ...")
s.cds <- annotate_cds_by_site(s.cds, gene_ann2, all=F)
# estimate un-normalized activity
message(" - build gene activity matrix ... ")
unnorm_ga <- build_gene_activity_matrix(s.cds, a.sub, dist_thresh = 500000, coaccess_cutoff = 0)
unnorm_ga <- unnorm_ga[!Matrix::rowSums(unnorm_ga) == 0, !Matrix::colSums(unnorm_ga) == 0]
# gene activity per cluster
num_genes <- pData(s.cds)$num_genes_expressed
names(num_genes) <- row.names(pData(s.cds))
# normalize
cicero_gene_activities <- normalize_gene_activities(unnorm_ga, num_genes)
geneact <- as.data.frame(summary(cicero_gene_activities))
geneact$i <- rownames(cicero_gene_activities)[geneact$i]
geneact$j <- colnames(cicero_gene_activities)[geneact$j]
# output
write.table(geneact, file=paste("filtered",x,".",output,".cicero.geneActivity.txt",sep=""),
sep="\t",quote=F, col.names=F, row.names=F)
# return
return(unnorm_ga)
}, mc.cores=threads)
|
4968e3dc292b829f834c3d15d04637b24525b8e8
|
324f2e9fab245df3190adacd93de54eb313c23cf
|
/ui.R
|
f0cc003205cc58c068b57e609ed850a8bc2f347c
|
[] |
no_license
|
Yambcn/Shiny-Application
|
c46c3a36d3c81c7ae6613b7697af09d8b46557a7
|
5696282e3929f670f01c5941a40b6b0380550c70
|
refs/heads/master
| 2020-05-31T03:50:09.858800
| 2014-08-24T10:03:17
| 2014-08-24T10:03:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,624
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Calculation of the total cost of a Loan"),
sidebarPanel(
h4('Introduction / Documentation'),
h5('The goal of this application is to calculate to cost of amortization of a
loan. In this application just amortization for fixed loans could be
calculated. The user should introduce 3 inputs, the total money that want
to request, the interest rate that is required to deliver this money and
the years that are need to pay back that money. Once that the user
introduce these 3 inputs,just should press the bar Submit and the
results will be shown on the Main Panel'),
h4('Inputs'),
numericInput('money', 'Quantity of money that we request', 0, min=0, max=100000000000),
numericInput('interest', 'Interest ratio in %', 0, min=0, max=100),
numericInput('years', 'number of years to pay back', 0, min=0, max=100),
submitButton('Submit')
),
mainPanel(
h4('Output information'),
h5('Here the results are displayed, first one is the quantity that the user
should pay every month to return the loan in the required period, second
is the total interest that the user will pay by the end of the loan
considering the introduced inputs, the last one will be the total amount
that the user will have to pay back for that loan'),
h3('Results of the calculation'),
h4('Monthly payment'),
verbatimTextOutput("pm"),
h4('Total Interest payment'),
verbatimTextOutput("pi"),
h4('Total quantity to return'),
verbatimTextOutput("P")
)
))
|
46756b42cc262c7a98c4fea524dcbc3d2c22ad83
|
e596c317d5f14ec9675b4b245d08a5e13a550f8b
|
/sipp.R
|
eed035413247fc8d9609262c1100223e59c248e9
|
[
"MIT"
] |
permissive
|
jw3315/CensusSIPP
|
03c47770afdfb57e47597ee3b328ea5ca4714246
|
a242b711613b814264eadc0fa4bb746a6e872b0f
|
refs/heads/master
| 2020-03-28T02:32:29.966026
| 2018-09-06T20:37:04
| 2018-09-06T20:37:04
| 147,578,171
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,321
|
r
|
sipp.R
|
## This is an example of pulling SIPP Panel 2008's 16 waves data. Unzip and manipulate it in R.
library(readr)
library(SAScii)
library(data.table)
# library(rstudioapi) # load it
# current_path <- getActiveDocumentContext()$path
# setwd(dirname(current_path ))
## pull, unzip and save the core files
save<-function(i){
url<-paste("https://thedataweb.rm.census.gov/pub/sipp/2008/l08puw",i,".zip",sep = '')
path<-tempfile(fileext = '.zip')
download.file(url,path,mode = 'wb')
unzip(zipfile = path,exdir='A place you wanna save the raw data')
}
for(i in 1:16) save(i)
## process the SAS input statement from Census, thanks the SAScii package from Anthony Damico
sasinput2008.url<-'https://thedataweb.rm.census.gov/pub/sipp/2008/l08puw1.sas'
sasinput2008<-parse.SAScii(sasinput2008.url , beginline = 5 )
sasinput2008$end<-cumsum(sasinput2008$width)
sasinput2008$start<-sasinput2008$end-sasinput2008$width+1
## select the variable you need in your research
## my current research is about education attainment, focus on high quality certificate.
## here, I selected "age, gender, education attainment, income" and obviously the "weight, panel, wave, month".
var<-c('SPANEL',
'SWAVE',
'MONTHCODE',
'WPFINWGT',
'TAGE','ESEX','TFIPSST','EEDUCATE','TPTOTINC')
sasinput2008<-sasinput2008[sasinput2008$varname %in% var,]
## manipulate sipp data
i=1
df=data.frame()
while(i<=16){
location<-paste0('C:/Users/jwang/Downloads/LWC/sipp/unziped/l08puw',i,'.dat')
wave<-read_fwf(location,
fwf_positions(c(sasinput2008$start),c(sasinput2008$end),c(sasinput2008$varname)))
df=rbind(df,wave)
i=i+1
}
df<-apply(df,2,as.numeric)
cert<-df%>%
group_by(SWAVE,TFIPSST,ESEX,EEDUCATE)%>%
summarise(weight=sum(WPFINWGT))%>%
group_by(SWAVE,TFIPSST,ESEX)%>%
mutate(percent=weight/sum(weight))
cert<-cert[cert$EEDUCATE==41,]
write.csv(df,file = ' ',row.names = FALSE)
write.csv(cert,file = ' ',row.names = FALSE)
## use the same methodology to have other panels
## example Panel 2014
sipp2014wave2.location<-"https://www2.census.gov/programs-surveys/sipp/data/datasets/2014/w2/pu2014w2_dat.zip"
sipp2014.sasinput<-'https://thedataweb.rm.census.gov/pub/sipp/2014/pu2014w1.sas'
sipp2014wave1.location="http://thedataweb.rm.census.gov/pub/sipp/2014/pu2014w1_dat.zip"
|
ee747f473786b23209a63d492a454cd5573ce74c
|
1c814d6da3c3c612e8cafc5c337f9636eb415440
|
/tests/testthat/test-save_ts.R
|
0eb09020d4fc28919984668d9070a909ffe41732
|
[
"MIT"
] |
permissive
|
difiore/mapmate
|
28fdcf92bf67ccc50c3c3bb63c7b337f611fa0fa
|
7c36274078c4b6ffbd9dcc278f3176b7535ba86f
|
refs/heads/master
| 2022-01-09T03:26:22.252531
| 2018-05-05T23:37:50
| 2018-05-05T23:37:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,129
|
r
|
test-save_ts.R
|
library(mapmate)
suppressMessages(library(dplyr))
context("save_ts [functions.R]")
data(annualtemps)
temps <- mutate(annualtemps, frameID = Year - min(Year) + 1) %>%
group_by(Year, frameID) %>% summarise(z=mean(z))
xlm <- range(temps$Year)
ylm <- range(temps$z)
g1a <- save_ts(temps, "Year", "z", id="frameID", cap=round(nrow(temps)/2), col="red", xlm=xlm, ylm=ylm,
axes.only=FALSE, axes.space=TRUE, save.plot=FALSE, return.plot=TRUE)
g1b <- save_ts(temps, "Year", "z", id="frameID", cap=nrow(temps), col="#0000FF", xlm=xlm, ylm=ylm,
axes.only=FALSE, axes.space=FALSE, save.plot=FALSE, return.plot=TRUE)
g2a <- save_ts(temps, "Year", "z", id="frameID", cap=2, col="red", xlm=xlm, ylm=ylm,
axes.only=TRUE, axes.space=FALSE, save.plot=FALSE, return.plot=TRUE)
g2b <- save_ts(temps, "Year", "z", id="frameID", cap=2, col=2, xlm=xlm, ylm=ylm,
axes.only=TRUE, axes.space=TRUE, save.plot=FALSE, return.plot=TRUE)
test_that("save_ts returns ggplot objects", {
expect_is(g1a, "ggplot")
expect_is(g1b, "ggplot")
expect_is(g2a, "ggplot")
expect_is(g2b, "ggplot")
})
|
bf754289a4e749daa13ea1a3a89d4fdce23c54d2
|
829e9ea609fff4c100edc4140c3aa528edc58973
|
/heterogeneity/SNonparaAnalysis.R
|
8dbbd0d22e484f834d1ce2589126d6c31d8a5fda
|
[] |
no_license
|
applXcation/udacity
|
c4794f846812a29de0f15f68e8e43b0a165a2bfd
|
de522d0c31dd01e2a50837d5b9d1b7ba1f678dbe
|
refs/heads/master
| 2021-01-11T22:12:21.788806
| 2017-01-14T13:59:54
| 2017-01-14T13:59:54
| 78,938,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,734
|
r
|
SNonparaAnalysis.R
|
SNonparaAnalysis <- function(self.eval, v1, ..., grouping.var, moment, jointly = FALSE) {
# Simplified Non-Parametric Analysis. Takes, as an argument, the self-assessment and vignette variables, together with the grouping
# variables as a dataframe. Calculates by itself the "c" variable through NonparaComparable(), and then uses NonparaAnalysis to study the link
# between moments in c and moments in the vignettes.
# It should be remarked that we only perform the second step of the analysis with the "c"s that are not inconsistent. That is, we only keep the
# observations for which there is only one c (no multiple c-category in which the observation could belong), et no inconsistency in the ranking
# of the vignettes if there are multiple vignettes (the individual does not inverse the ranking of the vignettes in his/her evaluations).
#
# Args:
# self.eval: respondent self-evaluation
# v1: first vignette
# ...: other vignettes
# Remark: In the list of arguments, the vignettes should be ordered such that the first vignette corresponds to the level of domain closest to
# label "1", and the last vignette to the level of domain closest to label "J"
# grouping.var: data.frame containing the grouping variables
# moment: name (in characters) of the moment function to be used (mean, var, etc.)
#
# Returns:
# plots the regression result(s)
# As a list:
# a matrix where the columns indicate the groups, first row gives for each group the moment of c, and the rest of the rows give for each
# group
# the moment of the each vignette variable
# as a sublist, the regression results
# Sanity Checks
if (!is.data.frame(grouping.var)) stop("SNonparaAnalysis: grouping.var is not a data.frame \ n")
if (!is.character(moment)) stop("SNonparaAnalysis: moment should be of type character")
# Obtaining the c variable
c.list <- NonparaComparable(self.eval = self.eval, v1 = v1, ... = ...) # List of the c variables
relevant.cases <- !is.na(c.list$c[, 1]) & c.list$multiple == 0 & c.list$inconsistencies == 0 # Index of the relevant observations for the
# analysis
# Studying the linear correlation between c and v. This evaluation is performed in the observations for which there is no inconsistency in the
# c, as described above.
assign(deparse(substitute), moment)
non.para.analysis <- NonparaAnalysis(c = c.list$c[, 1], v1 = v1, ... = ..., grouping.var = grouping.var, moment = moment, jointly = jointly,
subset.index = relevant.cases)
# Return
return(non.para.analysis)
}
|
95058a6c7a6f6ebbf2fe6418909f21aed54b4688
|
fdc4be9a687bbb9c66343cdfd72c22afd8221723
|
/cachematrix.R
|
581c41bcd09464156d356396cf71d9cb7c0aa9a5
|
[] |
no_license
|
ebecker54/ProgrammingAssignment2
|
af5e8187472e9a20fd3d33a2a8780dce216fa81f
|
ca81ced008ee23f201deedf35beba2d18b39e8bb
|
refs/heads/master
| 2020-02-26T15:26:50.956478
| 2015-10-25T16:19:07
| 2015-10-25T16:19:07
| 44,875,868
| 0
| 0
| null | 2015-10-25T16:19:08
| 2015-10-24T17:04:17
|
R
|
UTF-8
|
R
| false
| false
| 1,070
|
r
|
cachematrix.R
|
## makeCacheMatrix and cacheSolve work by creating
## a vector of functions that create and stores
## the value of a matrix inverse, then caches its inverse
## then recalls that value from the cache or computes it
## and stores the calculation in the cache
## makeCacheMatrix function creates a list object
## with functions to set and get a matrix value
## then set and get the value of the matrix inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## cacheSolve function gets and sets
##value of matrix inverse from makeCacheMatrix and
##if value is alread calculated, value is retrieved from cache
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
146aebe967bac488a5c1145543b99c7d2bfd0cb4
|
509919d0d70bab8a9c4e8618b7e99656948d3e6b
|
/lmer_vs_anova/lmer_tab.R
|
79baa8aa1f5d53c047b5c43824848318384fb861
|
[
"MIT"
] |
permissive
|
debruine/proveit
|
50981bd0ae9a06bcf2f1c2a9b148c5e828ff589a
|
95d4842b204164308eea668a5da759d38e9dcfda
|
refs/heads/master
| 2021-07-23T09:22:14.499489
| 2019-01-15T15:51:29
| 2019-01-15T15:51:29
| 142,279,290
| 0
| 0
|
MIT
| 2018-09-20T10:40:14
| 2018-07-25T09:37:38
|
R
|
UTF-8
|
R
| false
| false
| 422
|
r
|
lmer_tab.R
|
### lmer_tab ----
lmer_tab <- tabItem(
tabName = "lmer_tab",
fluidRow(
column(
width = 4,
plotOutput(outputId = "ranef_sub_plot", height = "auto")
),
column(
width = 8,
plotOutput(outputId = "ranef_stim_plot", height = "auto")
)
),
box(
title = "VarCor",
solidHeader = TRUE, collapsible = TRUE, collapsed = TRUE,
width = 12,
tableOutput("lmer_varcor")
)
)
|
1816c6eea22a7e7e1e478916d657ea8aec7a9294
|
2553cdad83162e35aefef486816184d520302665
|
/R/taxa.meansdn.R
|
7368a6b65ac0aa6de42900b4564ac27341d5fa6e
|
[] |
no_license
|
cran/metamicrobiomeR
|
b9cf404e83f0e8ae4cc024289ccca8dc1e590b56
|
a55866e6cdda36d6d59a59cbd5f20baccb8bd970
|
refs/heads/master
| 2021-07-08T18:42:53.689765
| 2020-11-09T10:20:05
| 2020-11-09T10:20:05
| 206,034,869
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,774
|
r
|
taxa.meansdn.R
|
#' Summarize abundance by group
#'
#' This function summarizes taxa/pathway abundance tables to provide mean, sd, count by groups.
#' @param taxtab taxa/pathway abundance table from phylum to species or any preferred highest taxa level.
#' @param sumvar main variable for summary
#' @param groupvar variable to be stratified.
#' @param percent.filter prevalence threshold (the percentage of number of samples the taxa/pathway available). Default is 0.05.
#' @param relabund.filter relative abundance threshold (the minimum of the average relative abundance for a taxa/pathway to be retained). Default is 0.00005.
#' @param othervar vector of variables that are not abundance variables to be summarized. Default is "none".
#' @return table of mean, sd, count by group.
#' @keywords abundance summary
#' @import dplyr
#' @importFrom stats sd
#' @export
#' @examples
#' #Load summary tables of bacterial taxa relative abundance from Bangladesh data
#' data(taxtab6)
#' taxa.meansdn.rm<-taxa.meansdn(taxtab=taxtab6,sumvar="bf",groupvar="age.sample")
taxa.meansdn<-function(taxtab, sumvar, groupvar,percent.filter=0.05,relabund.filter=0.00005,othervar="none"){
taxdat<-as.data.frame(taxtab)
taxdat$sumvar<-taxdat[,sumvar]
taxdat$groupvar<-taxdat[,groupvar]
# get assigned taxa only
if (othervar!="none"){
taxlist<-colnames(taxdat)[!colnames(taxdat) %in% othervar]
}
if (othervar=="none") {
taxlist<-colnames(taxdat)[grep("k__",colnames(taxdat))]
}
#filter using percent.filter
taxtest<-apply(taxdat[,taxlist],2,function(x){length(x[!is.na(x)&x>0])})
taxget<-taxtest[taxtest>=percent.filter*(nrow(taxdat))]
#filter using relabund.filter
taxtestm<-apply(taxdat[,taxlist],2,mean,na.rm=T)
taxgetm<-taxtestm[taxtestm>relabund.filter]
taxname<-names(taxget)[names(taxget) %in% names(taxgetm)]
sumdat<-taxdat[,c("sumvar", "groupvar",taxname)]
sumdat[,taxname]<-lapply(sumdat[,taxname],as.character)
sumdat[,taxname]<-lapply(sumdat[,taxname],as.numeric)
sumdat<-sumdat[!is.na(sumdat[,"sumvar"]),]
if (is.numeric(sumdat$groupvar)){
estisum<-sumdat %>%
dplyr::mutate(groupvar=as.factor(as.character(round(as.numeric(as.character(groupvar)),0)))) %>%
dplyr::group_by(sumvar, groupvar) %>%
dplyr::summarise_all(list(~mean(.), ~sd(.), ~n()))
estisum<-estisum%>%
dplyr::mutate(groupvar=as.numeric(as.character(groupvar))) %>%
dplyr::arrange(sumvar,groupvar)
estisum<-stats::na.omit(estisum)
}
else {
estisum<-sumdat %>%
dplyr::group_by(sumvar, groupvar) %>%
dplyr::summarise_all(list(~mean(.), ~sd(.), ~n()))
}
colnames(estisum)[colnames(estisum) %in% c("sumvar","groupvar")]<-c(sumvar,groupvar)
return(estisum)
}
|
740abe8ab04af416d865f8f99982072ddedf5f32
|
0d6c580a2fd19cd76b2f94e7800278ebfffa82c9
|
/src/fare_count/Plot_Perform.R
|
9a6d98756edb5b6ac5f9dfaad19af585ee678eb0
|
[] |
no_license
|
haoyu987/bigdata2016taxi_weather
|
38021f5e0c92b9aecff73b751200ebd12373074c
|
410a9d212fd6be2baf58142737e4138701c529ff
|
refs/heads/master
| 2020-06-15T09:41:44.148389
| 2016-05-16T21:47:30
| 2016-05-16T21:47:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 923
|
r
|
Plot_Perform.R
|
#
# See the performance of In-Mapper Combiner v.s. Classic
#
library(ggplot2)
theme_set(theme_bw(base_size = 22))
myGthm <- theme(text = element_text(size = 25))
perf <- read.delim("src/fare_count/perform_benchmark.tab")
perf$SizeLevel <- factor(perf$Size, sort(unique(perf$Size)),
format(sort(unique(perf$Size)), big.mark = ',', scientific = F))
p <- ggplot(data = perf, aes(x = SizeLevel, y = Seconds, fill = Scheme)) +
geom_bar(position = 'dodge', stat="identity") +
geom_text(aes(label = Seconds), position = position_dodge(width = .9),
hjust = -0.05) +
xlab('Amount of Tuples') + ylab('Time (s)') +
ggtitle('In-Mapper Combiner v.s. Classic') +
coord_flip() +
myGthm
png(filename = file.path(getwd(), 'fig/Perf_InMapCombiner.png'), width = 1200, height = 700)
print(p)
dev.off()
pdf(file = file.path(getwd(), 'fig/Perf_InMapCombiner.pdf'), 12, 7)
print(p)
dev.off()
|
02253e98b02d3b76b47e875c3609823dc49a6f66
|
f131a2e7dd6a0eee65e0fc6dc8325c95c3b19623
|
/Homework/Day_2_R.R
|
0fbd92cca6d081220aaa465c0ee2a452fb7453c9
|
[] |
no_license
|
EthanUWC/Intro_R_UWC
|
30636212b8a86651f72a46974463e9a1d0448d0f
|
90779359951fd5959bececfacf70d8c8d8252278
|
refs/heads/master
| 2020-04-19T09:20:06.281981
| 2019-05-14T20:11:34
| 2019-05-14T20:11:34
| 168,107,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,106
|
r
|
Day_2_R.R
|
#plotting in R using ggplot2
# day2
#Ethan
#30/01/2019
#load libraries
library(tidyverse)
lam <- read_csv("~/R/R/data/laminaria.csv")
chicks <- datasets::ChickWeight
??ChickWeight # the "??" tells R to refer you to the help tab
ggplot(data = chicks, aes(x = Time, y = weight)) + # ggplot is a function that allows a graph to be produced
geom_point() + #geom_point - function that navigates the data points on a graph
geom_line(aes(group = Chick)) #geom_line - function that navigates lines to points to fill the graph
# aes - function that navigates and distinguishes x and y coordinates. So in this case, aes specifies which data in the 'chick data' to use.
#ggplot is a function within tidyverse. If you are not using the tidyverse package then you plot a graph using ggplot2.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point() +
geom_line(aes(group = Chick)) #Creates a line graph with colour
ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point() +
geom_smooth(method = "lm") # geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm)
ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point( colour = "blue") + # adding the colour description to the geom_point function changes the colour of each point to whatever the user specifies it to.
geom_line(aes(group = Chick))
ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point(aes(size = weight)) + # 'size = weight' -using weight as reference to show the size of a point. This also adds a third variable to the graph.
geom_smooth(method = "lm") +
labs(x = "Days", y = "Weight(kg)") + # labs function allows the user to change or add descriptions to the labels.
ggtitle("Weight gain based on chick diets")
theme_bw()
ggplot(lam, aes(x = blade_length, y = total_length, colour = site)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("blade length vs total length") +
theme_bw()
ggplot(lam, aes(x = blade_length, y = total_length, colour = site)) +
geom_point() +
geom_line(aes(group = blade_length)) +
ggtitle("blade length vs total length") +
theme_bw()
#Facetting in ggplot
library(ggpubr) #allow multiple objectives to be plotted on one frame.
ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point() +
geom_smooth(method = "lm") +
facet_wrap(~Diet, ncol = 1, nrow = 4) #facet_wrap function allows you to wrap multiple plots on one frame. "ncol" means number of columns. "nrow" means number of rows.
chicks_2 <- chicks %>%
filter(Time == 21)
plot_1 <- ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point() +
geom_line(aes(group = Chick)) +
ggtitle("Chick diets") +
labs(x = "days", y = "weight(kg)") +
theme_bw()
plot_2 <- ggplot(chicks, aes(x = Time, y = weight, colour = Diet)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Weight gain based on chick diets")
plot_3<- ggplot(data = chicks_2, aes(x = weight)) +
geom_histogram(aes(fill = Diet), position = "dodge", binwidth = 100) + #the 'position = dodge' command allows the histogram to align the bars next to each other instead of on top of each other.
labs(x = "Final Mass", y = "Count") +
ggtitle("Weight gain based on chick diets") +
theme_bw()
plot_4 <- ggplot(data = chicks_2, aes(x = Diet, y = weight)) +
geom_boxplot(aes(fill = Diet)) +
labs(x = "Diet", y = "Final Mass") +
ggtitle("Weight gain based on chick diets") +
theme_bw()
plot_combined <- ggarrange(plot_1, plot_2, plot_3, plot_4) #plot_combined combines the graphs that were generated into ine pane and ggarrange allows you to arrange the graphs in a certian sequence.
## 3rd library
library(boot)
urine <- boot::urine
??urine
urine %>%
select(-cond)
ggplot(data = urine, aes(x = osmo, y = ph)) +
geom_point(aes(colour = as.factor(r)))
# Homework
#---- formulate hypotheses of 3 sets of data. Use the 3 built-in datasets (use the datasets function) to produce 2 graphs per dataset.Calculate the means of one column in each dataset.
#---- Produce 3 graphs using the laminaria dataset
#Class assignment
#30-01-2019
#Ethan
#load libraries
library(tidyverse)
library(ggpubr) #allow multiple objectives to be plotted on one frame.
beaver <- datasets::beaver1 # Used the datasets function to select built-in datasets to use for the exercise.
??beaver # Used the help function to understand how to place the data in context
# Beaver data
# 1. Hypothesis: The beaver activity will decrease as body temperatures increase.
Beav_plot <- ggplot(beaver, aes(x = time, y = temp, colour = activ)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Beaver activity based on temperature") +
labs(x = "time (sec)", y = "temp")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# Mean beaver temperature
beaver %>%
select(temp) %>%
summarise(mean_temp = mean(temp))
# The mean body temperature is 36, 86219 degrees Celcius
# 2. Hypothesis: Beaver body temperature decreased over time.
Beav_plot_2 <- ggplot(beaver, aes(x = time, y = temp)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Temperature fluctuations over time") +
labs(x = "time (sec)", y = "temp")
# labs function allows user to specify labels
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# Iris data
iris <- datasets::iris
??iris
# 3. Hypothesis - Petal sizes of species of Iris will be the same.
Iris_plot <- ggplot(iris, aes(x = Petal.Width, y = Petal.Length, colour = Species)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Comparison of petals of species of Iris") +
labs(x = "Petal Width", "Petal Length")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# 4. Hypothesis - Sepal lengths and Sepal widths are longer in Iris setova than the other species.
Iris_plot <- ggplot(iris, aes(x = Sepal.Width, y = Sepal.Length, colour = Species)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Comparison of Sepals of species of Iris") +
labs(x = "Sepal Width", y = "Petal Length")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
iris %>%
select(Sepal.Length) %>%
summarise(mean_sepal_Length = mean(Sepal.Length))
# Mean Sepal length = 5, 843333
# Trees data
trees <- datasets::trees
??trees
# 5. Hypothesis - Girth (circumference) of trees increases as height of the tree increases
Trees_plot <- ggplot(trees, aes(x = Girth, y = Height)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Girth vs height") +
labs(x = "Girth", "Height")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# 6. Hypothesis - The greater the Girth the more timber a tree has.
Trees_plot <- ggplot(trees, aes(x = Girth, y = Volume)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Girth vs Volume") +
labs(x = "Girth", "Volume")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
trees %>%
select(Height) %>%
summarise(mean_height = mean(Height))
# Mean height = 76
# Laminaria data
# Scatter plot
lam_plot_1 <- ggplot(Lam, aes(x = blade_weight, y = blade_thickness, colour = site)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("blade Weight vs blade thickness")
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# Histogram
lam_plot_2<- ggplot(Lam, aes(x = blade_weight)) +
geom_histogram(aes(fill = site), position = "dodge", binwidth = 10) +
labs(x = "blade_weight", y = "count") +
ggtitle("blade Weight based on blade height") +
theme_bw()
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
# Boxplot
lam_plot_3 <- ggplot(Lam, aes(x = total_length, y = stipe_length)) +
geom_boxplot(aes(fill = site)) +
labs(x = "Total length", y = "Stipe length") +
ggtitle("Stipe length compared to total length") +
theme_bw()
# ggplot is a function that allows a graph to be produced
# geom_smooth smooths out the data points to produce a straight line; method = "lm" is a function that directs R to produce a linear model (lm).
# geom_point - function that navigates the data points on a graph
# aes - function that navigates and distinguishes x and y coordinates.
#When using the ggplot function you use a + sign to continue a script instead of the pipe function.
#ggtitle is a function that allows the user to name the graph.
|
995538bcd6a65963ea27c8ed1d8fc2f17c03b0fb
|
ee50ce44675c7181d1ea5bf6b9a7539f4b42cc42
|
/plots.r
|
5443f8aff2fc81038ab1711e6979ac221c76cb3e
|
[
"MIT"
] |
permissive
|
mdthorn/nyc_restaurant_grades
|
16bd3bbc93e01b25430f2b88674ac18ea447d846
|
abd228588babf5fd99528d6eb8d7fa1718e6fa92
|
refs/heads/master
| 2020-08-01T10:56:49.453299
| 2016-11-12T20:35:01
| 2016-11-12T20:35:01
| 73,576,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
plots.r
|
library(ggplot2)
library(reshape)
library(scales)
library(choroplethr)
ggplot(grades_by_month, aes(x=month, y=value, group=variable, color=variable)) + geom_line() +
geom_hline(aes(yintercept=avg), linetype="dashed", color="gray57") + xlab("") +
ylab("% of Restaurants with 'A' Grade") + scale_y_continuous(labels=percent) +
theme(legend.position="none") + ggtitle("NYC Restaurant Grades by Month of Year") +
theme(plot.title=element_text(face="bold")) + facet_wrap(~ variable, ncol=1)
zip_choropleth(total_clean, county_zoom=manhattan, title="Manhattan Restaurant Grades by Zip Code", legend="% of Restaurants Graded A")
|
47a775fd1896914a3f0ae9a45741573701794599
|
4ed740aeec1366c7647bd599406f65ef78f7786b
|
/man/replace_number.Rd
|
f37c36abc60dc6fc5030cd424f864313e8c2f313
|
[] |
no_license
|
trinker/qdap2
|
00f97557a43eeee487c6a11074f940f0204d042c
|
b9244fe90c5f5cec9cd891b1ba1f55b157467e5f
|
refs/heads/master
| 2021-01-01T17:21:41.171478
| 2013-01-29T03:55:12
| 2013-01-29T03:55:12
| 7,884,841
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
rd
|
replace_number.Rd
|
\name{replace_number}
\alias{replace_number}
\title{Replace Numbers With Text Representation}
\usage{
replace_number(text.var, num.paste = "separate")
}
\arguments{
\item{text.var}{The text variable.}
\item{num.paste}{A character vector of either
\code{"separate"} or \code{"combine"}. Of
\code{"separate"} is specified the elements of larger
numbers are separated with spaces. If \code{"combine"}
is selected the elements will be joined without spaces.}
}
\value{
Returns a vector with abbreviations replaced.
}
\description{
Replaces numeric represented numbers with words (e.g.
1001 becomes one thousand one).
}
\examples{
x <- c("I like 346,457 ice cream cones.", "They are 99 percent good")
y <- c("I like 346457 ice cream cones.", "They are 99 percent good")
replace_number(x)
replace_number(y)
replace_number(x, "combine")
}
\references{
Fox, J. (2005). Programmer's niche: How do you spell that
number? R News. Vol. 5(1), pp. 51-55.
}
\seealso{
\code{\link[qdap]{bracketX}}, \code{\link[qdap]{qprep}},
\code{\link[qdap]{replace_abbreviation}},
\code{\link[qdap]{replace_contraction}},
\code{\link[qdap]{replace_symbol}}
}
\keyword{number-to-word}
|
74f70999be81c2a4b4bf2997bd83a6529cd104cd
|
da317c3ad3a26dd1fbde456c191f41dbc438a4ef
|
/shiny/server.R
|
dd96735c4b0c1d601263c3db43556899f3feb05e
|
[
"MIT"
] |
permissive
|
maxfilip98/APPR-2017-18
|
e17cf82207865cf7a68cff2833ea3a26601e1dda
|
f9fd04f90ca6f8b711d443ea1f52b03d97ea9890
|
refs/heads/master
| 2021-09-06T20:00:48.464904
| 2018-02-10T18:23:54
| 2018-02-10T18:23:54
| 110,083,843
| 0
| 0
| null | 2017-11-09T07:47:04
| 2017-11-09T07:47:03
| null |
UTF-8
|
R
| false
| false
| 1,597
|
r
|
server.R
|
library(shiny)
function(input, output) {
output$grafi1 <- renderPlot({
tabela1 <- BDP %>% filter(drzava == input$drzava1, sestava ==input$sestava)
print(ggplot(tabela1, aes(x = leta, y = vrednost/1000)) + geom_line() +
xlab("leta")+ ylab("vrednost") +
ggtitle("Vrednost BDP-ja v miljardah"))
})
output$grafi2 <- renderPlot({
tabela2 <- inflacija %>% filter(drzava == input$drzava2)
print(ggplot(tabela2, aes(x = leto, y = stopnja)) + geom_line() +
xlab("leto")+ ylab("stopnja") + scale_x_continuous(breaks = seq(2005, 2017, 2)) +
ggtitle("Stopnja inflacije"))
})
output$grafi3 <- renderPlot({
tabela3 <- pomozna_BDP_obrestne_mere %>% filter(sestava == "Gross domestic product at market prices",
vrsta == "deposit_facility", drzava == input$drzava3)
print(ggplot(tabela3, aes(x = BDP, y = obrestna.mera)) + geom_point() +
xlab("BDP")+ ylab("obrestna mera") + geom_smooth(method = "lm") +
ggtitle("Odvisnost BDP-ja od obrestne mere v posameznih državah"))
})
output$grafi4 <- renderPlot({
tabela4 <- pomozna_inflacija_BDP %>% filter(sestava == "Gross domestic product at market prices",
drzava == input$drzava4)
print(ggplot(tabela4, aes(x = vrednost, y = stopnja)) + geom_point() +
xlab("BDP")+ ylab("inflacija") + geom_smooth(method = "lm") +
ggtitle("Odvisnost BDP-ja od inflacije v posameznih državah"))
})
}
|
ca178c4053c2afd2bc044efebfb243e1a446e5b6
|
a4b8d053e3936d63c09c29a7338509c7bf49e738
|
/man/add_kml.Rd
|
8c43a83bc210e837ec921dcc7b6d8af628cf2cb4
|
[] |
no_license
|
ManuelDeFrancisco/googleway
|
1d7b35d368579f4123b86a44c40b33a5d2b90d8b
|
cfc53dcf019de587f30b2f3c04648c626bc6459c
|
refs/heads/master
| 2020-05-20T23:42:34.050917
| 2017-03-10T10:24:46
| 2017-03-10T10:24:46
| 84,547,853
| 0
| 0
| null | 2017-03-10T10:21:28
| 2017-03-10T10:21:27
| null |
UTF-8
|
R
| false
| true
| 423
|
rd
|
add_kml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/google_map_layers.R
\name{add_kml}
\alias{add_kml}
\title{Add KML}
\usage{
add_kml(map, kml_data, layer_id = NULL)
}
\arguments{
\item{map}{a googleway map object created from \code{google_map()}}
\item{kml_data}{kml data layer}
\item{layer_id}{single value specifying an id for the layer.}
}
\description{
Adds a kml layer to a google map
}
|
7075cb8afa82b74e8a836ae645d54f4ed0f79b22
|
a189b6b9003ae77bc2d774ea5845f4842f06f5ba
|
/man/summary.bootAverageDominanceAnalysis.Rd
|
912089539bfb68a6798ca4b56cbd242e40b38fcb
|
[] |
no_license
|
clbustos/dominanceAnalysis
|
28a95b324aa65167f4556a59f4f6cfeb9ad55962
|
94846f37de1617f40d9381bd42e49c14e6717761
|
refs/heads/master
| 2023-05-25T13:27:37.933994
| 2023-05-12T21:13:32
| 2023-05-12T21:13:32
| 13,853,056
| 22
| 12
| null | 2020-06-20T13:18:39
| 2013-10-25T06:20:48
|
R
|
UTF-8
|
R
| false
| true
| 630
|
rd
|
summary.bootAverageDominanceAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.bootAverageDominanceAnalysis.r
\name{summary.bootAverageDominanceAnalysis}
\alias{summary.bootAverageDominanceAnalysis}
\title{Summary for bootAverageDominanceAnalysis.}
\usage{
\method{summary}{bootAverageDominanceAnalysis}(object, fit.functions = NULL, ...)
}
\arguments{
\item{object}{a \code{\link{bootAverageDominanceAnalysis}} object}
\item{fit.functions}{name of the fit indices to retrieve. If NULL, all fit indices will be retrieved}
\item{...}{ignored}
}
\description{
Summary for bootAverageDominanceAnalysis.
}
\keyword{internal}
|
617fe8f0322ebc58dfddedab2644596e1438125b
|
57854e2a3731cb1216b2df25a0804a91f68cacf3
|
/tests/testthat/test-variable-type.R
|
03c24b48a05ae6ffd48d0f561badb5078af17bca
|
[] |
no_license
|
persephonet/rcrunch
|
9f826d6217de343ba47cdfcfecbd76ee4b1ad696
|
1de10f8161767da1cf510eb8c866c2006fe36339
|
refs/heads/master
| 2020-04-05T08:17:00.968846
| 2017-03-21T23:25:06
| 2017-03-21T23:25:06
| 50,125,918
| 1
| 0
| null | 2017-02-10T23:23:34
| 2016-01-21T17:56:57
|
R
|
UTF-8
|
R
| false
| false
| 1,775
|
r
|
test-variable-type.R
|
context("Variable types")
with_mock_HTTP({
ds <- loadDataset("test ds")
test_that("Variable type method", {
expect_identical(type(ds[["birthyr"]]), "numeric")
expect_identical(type(ds$gender), "categorical")
})
test_that("Changing numeric type by <- makes requests", {
expect_POST(type(ds$birthyr) <- "categorical",
'api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"categorical"}')
expect_POST(type(ds$birthyr) <- "text",
'api/datasets/1/variables/birthyr/cast/',
'{"cast_as":"text"}')
})
test_that("Setting the same type is a no-op", {
expect_no_request(type(ds$birthyr) <- "numeric")
})
test_that("Attempting to set an unsupported type fails", {
for (i in c("multiple_response", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$birthyr) <- i,
"is not a Crunch variable type that can be assigned",
info=i)
}
})
test_that("Changing multiple_response type by <- fails", {
for (i in c("categorical", "text", "numeric", "categorical_array", "datetime", "foo")) {
expect_error(type(ds$mymrset) <- i,
"Cannot change the type of a MultipleResponseVariable by type<-",
info=i)
}
})
})
with_test_authentication({
test_that("Type changing alters data on the server", {
ds <- newDataset(df[,1,drop=FALSE])
testvar <- ds$v1
expect_true(is.Numeric(testvar))
type(testvar) <- "text"
expect_true(is.Text(testvar))
ds <- refresh(ds)
expect_true(is.Text(ds$v1))
type(ds$v1) <- "numeric"
expect_true(is.Numeric(ds$v1))
})
})
|
cfed98e63adff8d2fc77fee4d169513114213bf5
|
c4abf97a3641c2a3adf1ab7adab87b47a036fb86
|
/TP2/plot-eje.r
|
77951ba1713f58bd6b3ef94efec7d012ac780379
|
[] |
no_license
|
elopez/IAA
|
b35ad98cdabbcb92f8e1aa37358222acde42425d
|
865838ec5e36771d2dbb849970c39133b4471786
|
refs/heads/master
| 2021-01-21T04:40:50.635383
| 2016-06-12T20:22:33
| 2016-06-12T20:22:33
| 54,748,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,206
|
r
|
plot-eje.r
|
library("ggplot2")
x = read.csv("out-ej7/plot-error-after-prune.csv", header=F)
a = aggregate(x[x[,1]=='a',3:4], list(x[x[,1]=='a',2]), mean)
b = aggregate(x[x[,1]=='b',3:4], list(x[x[,1]=='b',2]), mean)
w = read.csv("out-ej7/discrete-error.csv", header=F)
ann = aggregate(w[w[,1]=='a',3], list(w[w[,1]=='a',2]), median)
bnn = aggregate(w[w[,1]=='b',3], list(w[w[,1]=='b',2]), median)
atrain = aes(y = V3, colour = "A - DT - Train")
atest = aes(y = V4, colour = "A - DT - Test")
atestnn = aes(y = x, colour = "A - NN - Test")
btrain = aes(y = V3, colour = "B - DT - Train")
btest = aes(y = V4, colour = "B - DT - Test")
btestnn = aes(y = x, colour = "B - NN - Test")
ggplot(a, aes(Group.1)) +
# A
geom_point(atrain) +
geom_point(atest) +
geom_point(data = ann, atestnn) +
geom_line(atrain) +
geom_line(atest) +
geom_line(data=ann, atestnn) +
# B
geom_point(data = b, btrain) +
geom_point(data = b, btest) +
geom_point(data = bnn, btestnn) +
geom_line(data = b, btrain) +
geom_line(data = b, btest) +
geom_line(data = bnn, btestnn) +
labs(y = "Error porcentual promedio") +
labs(x = "Valor de d") +
labs(title = "Error porcentual") +
theme(legend.title=element_blank())
|
fb6ee1a99db3b4fa7c77d0451925e22b828b469b
|
5710b0ca77732f863616e6de4b8b68977bcce7a3
|
/Downstream_Analysis_WGS/Allele_freq_plots.R
|
d6283c7b9cee47dfe47c053887610e9d118b556a
|
[] |
no_license
|
mcadamme/FieldHz_Pop_Genomics
|
f96f566dc71c88bd3540b644adb3cbc3df23486a
|
bc52a6e0fe325c526ed1629508d6ddec6b0473cf
|
refs/heads/master
| 2021-11-05T20:45:34.482927
| 2021-10-05T18:04:21
| 2021-10-05T18:04:21
| 154,679,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,922
|
r
|
Allele_freq_plots.R
|
#This is the script I used to plot allele frequency change at Cry1 and Cry2 associated loci over time
#04122021 MF
library(tidyr); library(ggplot2)
setwd("~/Desktop/Hz_fieldColl_pop_gen/Reanalysis_PNAS")
#loading resistance-associated loci with signatures of temporal change in the field
Cry1 <- read.table("./hiCry1FST2002to2017.txt", header = T)
Cry2 <- read.table("./hiCry2FST2002to2017.txt", header = T)
#loading allele frequency data
freqs_2002 <- read.table("./thinned_FieldHzea_variantsonly_2002.freq.frq", header = F, sep="\t", stringsAsFactors = F)
freqs_2012 <- read.table("./thinned_FieldHzea_variantsonly_2012.freq.frq", header = F, sep="\t", stringsAsFactors = F)
freqs_2017 <- read.table("./thinned_FieldHzea_variantsonly_2017.freq.frq", header = F, sep="\t", stringsAsFactors = F)
#filtering based on n Chr - must have at least 6 individs per year
freqs_2002_filt <- subset(freqs_2002, V4 > 12)
freqs_2012_filt <- subset(freqs_2012, V4 > 12)
freqs_2017_filt <- subset(freqs_2017, V4 > 12)
#prepping for merge with Cry1 and Cry2 datasets
#Cry1
uniq_Cry1_scafs <- data.frame(unique(Cry1$Scaf))
names(uniq_Cry1_scafs) <- "scaf"
min_HiCry1ByScaf <- as.numeric(as.character(as.vector(tapply(Cry1$WinStart, Cry1$Scaf, min))))
max_HiCry1ByScaf <- as.numeric(as.character(as.vector(tapply(Cry1$WinStart, Cry1$Scaf, max))))
minMax_cry1 <- cbind(min_HiCry1ByScaf, max_HiCry1ByScaf)
str(minMax_cry1)
names(minMax_cry1) <- c("Min", "Max")
#Cry2
uniq_Cry2_scafs <- data.frame(unique(Cry2$Scaf))
names(uniq_Cry2_scafs) <- "scaf"
min_HiCry2ByScaf <- as.numeric(as.character(as.vector(tapply(Cry2$WinStart, Cry2$Scaf, min))))
max_HiCry2ByScaf <- as.numeric(as.character(as.vector(tapply(Cry2$WinStart, Cry2$Scaf, max))))
minMax_cry2 <- cbind(min_HiCry2ByScaf, max_HiCry2ByScaf)
str(minMax_cry2)
names(minMax_cry2) <- c("Min", "Max")
#Split allele from freqs
freqs_2002_spl <- freqs_2002_filt %>% separate(col = "V5", into = c("Allele1", "Freq1"), sep = ":") %>% separate(col = "V6", into = c("Allele2", "Freq2"), sep = ":")
freqs_2012_spl <- freqs_2012_filt %>% separate(col = "V5", into = c("Allele1", "Freq1"), sep = ":") %>% separate(col = "V6", into = c("Allele2", "Freq2"), sep = ":")
freqs_2017_spl <- freqs_2017_filt %>% separate(col = "V5", into = c("Allele1", "Freq1"), sep = ":") %>% separate(col = "V6", into = c("Allele2", "Freq2"), sep = ":")
#adding year for eventual rbind
freqs_2002_spl$Year <- rep("2002", times = nrow(freqs_2002_spl))
freqs_2012_spl$Year <- rep("2012", times = nrow(freqs_2012_spl))
freqs_2017_spl$Year <- rep("2017", times = nrow(freqs_2017_spl))
#adding ScafPos for rbind
freqs_2002_spl$ScafPos <- paste0(freqs_2002_spl$V1,"_",freqs_2002_spl$V2)
freqs_2012_spl$ScafPos <- paste0(freqs_2012_spl$V1,"_",freqs_2012_spl$V2)
freqs_2017_spl$ScafPos <- paste0(freqs_2017_spl$V1,"_",freqs_2017_spl$V2)
##### Cry1 First #####
#merging Cry1 with freq datasets by scaffold
freqs_2002_Cry1 <- merge(freqs_2002_spl, uniq_Cry1_scafs, by.x = "V1", by.y = "scaf")
freqs_2012_Cry1 <- merge(freqs_2012_spl, uniq_Cry1_scafs, by.x = "V1", by.y = "scaf")
freqs_2017_Cry1 <- merge(freqs_2017_spl, uniq_Cry1_scafs, by.x = "V1", by.y = "scaf")
merged <- rbind(freqs_2002_Cry1[,c(4:10)], freqs_2012_Cry1[,c(4:10)], freqs_2017_Cry1[,c(4:10)])#long format
#reshaping long to wide for subsetting
merged_W <- reshape(merged, idvar = c("ScafPos"), timevar = "Year", direction = "wide")
merged_W_spl <- merged_W %>% separate(col = "ScafPos", into = c("Scaf", "Pos"), sep = "_")
str(merged_W_spl)
merged_W_spl$Pos <- as.numeric(as.character(merged_W_spl$Pos))
#subsetting by Cry1 scaffolds & windows - commented out scaffolds that were not significant after we changed how we defined the QTL windows.
#Cry1_KZ118241.1 <- subset(merged_W_spl, Scaf == "KZ118241.1" & Pos > 140000 & Pos < (147000+1000))#adding 1000 to get full window
Cry1_KZ118067.1 <- subset(merged_W_spl, Scaf == "KZ118067.1" & Pos > 144000 & Pos < (146000+1000))
Cry1_KZ116099.1 <- subset(merged_W_spl, Scaf == "KZ116099.1" & Pos > 2000 & Pos < (7000+1000))
Cry1_KZ117975.1 <- subset(merged_W_spl, Scaf == "KZ117975.1" & Pos > 43000 & Pos < (46000+1000))
#Cry1_KZ118133.1 <- subset(merged_W_spl, Scaf == "KZ118133.1" & Pos > 34000 & Pos < (35000+1000))
#writing position tables
#write.csv(Cry1_KZ118241.1[,c(1,2)], file = "Cry1_KZ118241.1_pos.csv")
#write.csv(Cry1_KZ118067.1[,c(1,2)], file = "Cry1_KZ118067.1_pos.csv")
#write.csv(Cry1_KZ116099.1[,c(1,2)], file = "Cry1_KZ116099.1_pos.csv")
#write.csv(Cry1_KZ117975.1[,c(1,2)], file = "Cry1_KZ117975.1_pos.csv")
#write.csv(Cry1_KZ118133.1[,c(1,2)], file = "Cry1_KZ118133.1_pos.csv")
#All Cry1
#All_Cry1 <- rbind(Cry1_KZ118241.1, Cry1_KZ118067.1, Cry1_KZ116099.1, Cry1_KZ117975.1, Cry1_KZ118133.1)
All_Cry1 <- rbind(Cry1_KZ118067.1, Cry1_KZ116099.1, Cry1_KZ117975.1)
All_Cry1 <- na.omit(All_Cry1)
str(All_Cry1)
All_Cry1$Freq1.2002 <- as.numeric(as.character(All_Cry1$Freq1.2002))
All_Cry1$Freq2.2002 <- as.numeric(as.character(All_Cry1$Freq2.2002))
All_Cry1$Freq1.2012 <- as.numeric(as.character(All_Cry1$Freq1.2012))
All_Cry1$Freq2.2012 <- as.numeric(as.character(All_Cry1$Freq2.2012))
All_Cry1$Freq1.2017 <- as.numeric(as.character(All_Cry1$Freq1.2017))
All_Cry1$Freq2.2017 <- as.numeric(as.character(All_Cry1$Freq2.2017))
#checking for same ref allele (Allele1 across years)
all(All_Cry1$Allele1.2002 == All_Cry1$Allele1.2012)
all(All_Cry1$Allele1.2002 == All_Cry1$Allele1.2017)#good
#getting only changes > 0.1
All_Cry1$TotDiff <- abs(All_Cry1$Freq1.2002 - All_Cry1$Freq1.2017)
sub_All_Cry1 <- subset(All_Cry1, TotDiff > 0.1)
#getting highest 2002-2017 cry1 allele frequency change on each scaffold
Cry1_changes <- tapply(All_Cry1$TotDiff, All_Cry1$Scaf, max)
#plotting function
dat_for_Plot <- data.frame()
for (i in 1:nrow(sub_All_Cry1)){
if (sub_All_Cry1[i,5] - sub_All_Cry1[i,10] < 0) {
scaf <- rep(sub_All_Cry1[i,1], times = 3)
row <- rep(i, times = 3)
dat <- c(sub_All_Cry1[i,5], sub_All_Cry1[i,10], sub_All_Cry1[i,15])
year <- c("2002", "2012", "2017")
freq_change <- data.frame(cbind(scaf, row, dat, year))
dat_for_Plot <- rbind(dat_for_Plot, freq_change)}
else {
scaf <- rep(sub_All_Cry1[i,1], times = 3)
row <- rep(i, times = 3)
dat <- c(sub_All_Cry1[i,7], sub_All_Cry1[i,12], sub_All_Cry1[i,17])
year <- c("2002", "2012", "2017")
freq_change <- data.frame(cbind(scaf,row, dat, year))
dat_for_Plot <- rbind(dat_for_Plot, freq_change)}
}
str(dat_for_Plot)
dat_for_Plot$dat <- as.numeric(as.character(dat_for_Plot$dat))
ggplot(dat_for_Plot, aes(x = year, y = dat)) + ggtitle("Cry1_KZ118133.1" ) + geom_line(aes(color = scaf, group = row))
##### Cry2 #####
#merging Cry2 with freq datasets by scaffold
freqs_2002_Cry2 <- merge(freqs_2002_spl, uniq_Cry2_scafs, by.x = "V1", by.y = "scaf")
freqs_2012_Cry2 <- merge(freqs_2012_spl, uniq_Cry2_scafs, by.x = "V1", by.y = "scaf")
freqs_2017_Cry2 <- merge(freqs_2017_spl, uniq_Cry2_scafs, by.x = "V1", by.y = "scaf")
merged2 <- rbind(freqs_2002_Cry2[,c(4:10)], freqs_2012_Cry2[,c(4:10)], freqs_2017_Cry2[,c(4:10)])#long format
#reshaping long to wide for subsetting
merged_W2 <- reshape(merged2, idvar = c("ScafPos"), timevar = "Year", direction = "wide")
merged_W2_spl <- merged_W2 %>% separate(col = "ScafPos", into = c("Scaf", "Pos"), sep = "_")
str(merged_W2_spl)
merged_W2_spl$Pos <- as.numeric(as.character(merged_W2_spl$Pos))
#subsetting by Cry1 scaffolds & windows
Cry2_KZ117108.1 <- subset(merged_W2_spl, Pos > 257000 & Pos < (270000+1000))#adding 1000 to get full window
#All Cry2
All_Cry2 <- na.omit(Cry2_KZ117108.1)
All_Cry2$Freq1.2002 <- as.numeric(as.character(All_Cry2$Freq1.2002))
All_Cry2$Freq2.2002 <- as.numeric(as.character(All_Cry2$Freq2.2002))
All_Cry2$Freq1.2012 <- as.numeric(as.character(All_Cry2$Freq1.2012))
All_Cry2$Freq2.2012 <- as.numeric(as.character(All_Cry2$Freq2.2012))
All_Cry2$Freq1.2017 <- as.numeric(as.character(All_Cry2$Freq1.2017))
All_Cry2$Freq2.2017 <- as.numeric(as.character(All_Cry2$Freq2.2017))
#checking for same ref allele (Allele1 across years)
all(All_Cry2$Allele1.2002 == All_Cry2$Allele1.2012)
all(All_Cry2$Allele1.2002 == All_Cry2$Allele1.2017)#good
#getting only changes > 0.1
All_Cry2$TotDiff <- abs(All_Cry2$Freq1.2002 - All_Cry2$Freq1.2017)
sub_All_Cry2 <- subset(All_Cry2, TotDiff > 0.1)
#getting highest 2002-2017 cry2 allele frequency change on each scaffold
Cry2_changes <- tapply(All_Cry2$TotDiff, All_Cry2$Scaf, max)
#plotting function
dat_for_Plot <- data.frame()
for (i in 1:nrow(sub_All_Cry2)){
if (sub_All_Cry2[i,5] - sub_All_Cry2[i,10] < 0) {
scaf <- rep(sub_All_Cry2[i,1], times = 3)
row <- rep(i, times = 3)
dat <- c(sub_All_Cry2[i,5], sub_All_Cry2[i,10], sub_All_Cry2[i,15])
year <- c("2002", "2012", "2017")
freq_change <- data.frame(cbind(scaf, row, dat, year))
dat_for_Plot <- rbind(dat_for_Plot, freq_change)}
else {
scaf <- rep(sub_All_Cry2[i,1], times = 3)
row <- rep(i, times = 3)
dat <- c(sub_All_Cry2[i,7], sub_All_Cry2[i,12], sub_All_Cry2[i,17])
year <- c("2002", "2012", "2017")
freq_change <- data.frame(cbind(scaf,row, dat, year))
dat_for_Plot <- rbind(dat_for_Plot, freq_change)}
}
str(dat_for_Plot)
dat_for_Plot$dat <- as.numeric(as.character(dat_for_Plot$dat))
ggplot(dat_for_Plot, aes(x = year, y = dat)) + geom_line(aes(color = scaf, group = row))
##### LG5 changes #####
#freq datasets by scaffold - scaffold 569
freqs_2002_569 <- subset(freqs_2002_spl, V1 == "KZ118395.1")
freqs_2012_569 <- subset(freqs_2012_spl, V1 == "KZ118395.1")
freqs_2017_569 <- subset(freqs_2017_spl, V1 == "KZ118395.1")
merged_569 <- rbind(freqs_2002_569, freqs_2012_569, freqs_2017_569)
#reshaping long to wide for subsetting
merged_569 <- reshape(merged_569, idvar = c("ScafPos"), timevar = "Year", direction = "wide")
merged_569_spl <- merged_569 %>% separate(col = "ScafPos", into = c("Scaf", "Pos"), sep = "_")
str(merged_569_spl)
merged_569_spl$Pos <- as.numeric(as.character(merged_569_spl$Pos))
nonCry_KZ118395.1 <- subset(merged_569_spl, Pos < (151000+1000))#adding 1000 to get full window
nonCry_KZ118395.1 <- na.omit(nonCry_KZ118395.1)
nonCry_KZ118395.1$Freq1.2002 <- as.numeric(as.character(nonCry_KZ118395.1$Freq1.2002))
nonCry_KZ118395.1$Freq2.2002 <- as.numeric(as.character(nonCry_KZ118395.1$Freq2.2002))
nonCry_KZ118395.1$Freq1.2012 <- as.numeric(as.character(nonCry_KZ118395.1$Freq1.2012))
nonCry_KZ118395.1$Freq2.2012 <- as.numeric(as.character(nonCry_KZ118395.1$Freq2.2012))
nonCry_KZ118395.1$Freq1.2017 <- as.numeric(as.character(nonCry_KZ118395.1$Freq1.2017))
nonCry_KZ118395.1$Freq2.2017 <- as.numeric(as.character(nonCry_KZ118395.1$Freq2.2017))
#checking for same ref allele (Allele1 across years)
all(nonCry_KZ118395.1$Allele1.2002 == nonCry_KZ118395.1$Allele1.2012)
all(nonCry_KZ118395.1$Allele1.2002 == nonCry_KZ118395.1$Allele1.2017)#good
#getting only changes > 0.1
nonCry_KZ118395.1$TotDiff <- abs(nonCry_KZ118395.1$Freq1.2002 - nonCry_KZ118395.1$Freq1.2017)
#getting highest 2002-2017 cry2 allele frequency change on each scaffold
nonCry_KZ118395.1_changes <- tapply(nonCry_KZ118395.1$TotDiff, nonCry_KZ118395.1$Scaf, max)
#scaffold 1612
freqs_2002_1612 <- subset(freqs_2002_spl, V1 == "KZ117131.1")
freqs_2012_1612 <- subset(freqs_2012_spl, V1 == "KZ117131.1")
freqs_2017_1612 <- subset(freqs_2017_spl, V1 == "KZ117131.1")
merged_1612 <- rbind(freqs_2002_1612, freqs_2012_1612, freqs_2017_1612)
#reshaping long to wide for subsetting
merged_1612 <- reshape(merged_1612, idvar = c("ScafPos"), timevar = "Year", direction = "wide")
merged_1612_spl <- merged_1612 %>% separate(col = "ScafPos", into = c("Scaf", "Pos"), sep = "_")
str(merged_1612_spl)
merged_1612_spl$Pos <- as.numeric(as.character(merged_1612_spl$Pos))
nonCry_KZ117131.1 <- subset(merged_1612_spl, Pos < (151000+1000))#adding 1000 to get full window
nonCry_KZ117131.1 <- na.omit(nonCry_KZ117131.1)
nonCry_KZ117131.1$Freq1.2002 <- as.numeric(as.character(nonCry_KZ117131.1$Freq1.2002))
nonCry_KZ117131.1$Freq2.2002 <- as.numeric(as.character(nonCry_KZ117131.1$Freq2.2002))
nonCry_KZ117131.1$Freq1.2012 <- as.numeric(as.character(nonCry_KZ117131.1$Freq1.2012))
nonCry_KZ117131.1$Freq2.2012 <- as.numeric(as.character(nonCry_KZ117131.1$Freq2.2012))
nonCry_KZ117131.1$Freq1.2017 <- as.numeric(as.character(nonCry_KZ117131.1$Freq1.2017))
nonCry_KZ117131.1$Freq2.2017 <- as.numeric(as.character(nonCry_KZ117131.1$Freq2.2017))
#checking for same ref allele (Allele1 across years)
all(nonCry_KZ117131.1$Allele1.2002 == nonCry_KZ117131.1$Allele1.2012)
all(nonCry_KZ117131.1$Allele1.2002 == nonCry_KZ117131.1$Allele1.2017)#good
#getting only changes > 0.1
nonCry_KZ117131.1$TotDiff <- abs(nonCry_KZ117131.1$Freq1.2002 - nonCry_KZ117131.1$Freq1.2017)
#getting highest 2002-2017 cry2 allele frequency change on each scaffold
nonCry_KZ117131.1_changes <- tapply(nonCry_KZ117131.1$TotDiff, nonCry_KZ117131.1$Scaf, max)
|
caceec1aa6cc0eaebf9b1d124aaf396507ac06d7
|
eff0ebd361b8bb944c37771b7418e9087ad3307e
|
/man/with_check_disabled.Rd
|
da2e57cdccc4d0459bfb6e833e3f9ab423ca5824
|
[
"MIT"
] |
permissive
|
hongyuanjia/eplusbuildr
|
715d0a0be7fd51ca1a68c1db5cfd1ae85a3d6b89
|
f7683130fde1b128e36bd814f5f06a392189d3c0
|
refs/heads/master
| 2020-09-08T03:23:11.218913
| 2020-01-10T15:59:46
| 2020-01-10T15:59:46
| 221,000,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 530
|
rd
|
with_check_disabled.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/with.R
\name{with_check_disabled}
\alias{with_check_disabled}
\title{Evaluate an expression with eplusr checking components disabled}
\usage{
with_check_disabled(disable, expr)
}
\arguments{
\item{disable}{Names of checking components to disable during evaluation of
\code{expr}.}
\item{expr}{An expression to be evaluated with specified checking components
disabled.}
}
\description{
Evaluate an expression with eplusr checking components disabled
}
|
72edc3675118405ef4ee3e2d6a7fdd4574b404bb
|
e914260953075b7b6233a936251e37d329643a38
|
/cachematrix.R
|
76cec6a9a0851d33b1557982da18759ebb8b6f80
|
[] |
no_license
|
mustehsanikram/ProgrammingAssignment2
|
bf3788d845f836d2a5bc1c172a0da7e1f8f408b1
|
127ca37b69745ba22be1ed0de1057f65022b11a3
|
refs/heads/master
| 2021-01-14T09:49:47.362793
| 2019-08-06T05:18:09
| 2019-08-06T05:18:09
| 35,603,148
| 0
| 0
| null | 2015-05-14T09:35:13
| 2015-05-14T09:35:13
| null |
UTF-8
|
R
| false
| false
| 1,246
|
r
|
cachematrix.R
|
## Save actual matrix and return inverse of matrix from cache,
## (if calculation is already done) or first calculate inverse,cache it and then
## return
## This function caches the actual and inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
invs <- NULL
set <- function(y) {
x <<- y
invs <<- NULL
}
get <- function() x ## get actual matrix
setinverse <- function(inverse) invs <<- inverse
getinverse <- function() invs
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates inverse of matrix and cache it,
## if cache is already present then it returns it without calculation
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invs <- x$getinverse() ## get inverse of matrix if it is cached
if(!is.null(invs)) { ## return cached inverse of matrix
message("getting cached data")
return(invs)
}
data <- x$get() ##get actual matrix
inv <- solve(data) ##get inverse of matrix
x$setinverse(inv) ##set inverse of matrix
inv
}
|
e44782cfdce1ad9302bf511b56b32de41c36657c
|
b86924757435aed14d06e65fed2190c71f49617d
|
/beiliao/server.R
|
dca357ae89ed3322cc7aad34ddc23fb8bea5c8b3
|
[] |
no_license
|
yichunsung/Nanhua
|
06f00f3e16a33e154cec17788074c6719dfa340e
|
3c25da4b481d594b435e1e823b26112dfaa97cbb
|
refs/heads/master
| 2021-01-18T07:55:49.637059
| 2017-03-09T09:27:02
| 2017-03-09T09:27:02
| 84,296,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,551
|
r
|
server.R
|
library(magrittr)
library(httr)
library(rvest)
library(stringr)
library(reshape2)
library(knitr)
library(ggplot2)
library(plotly)
Sys.setlocale(category = "LC_ALL", locale = "")
getDataformCWB <- function(iterm){
fromdate <- Sys.Date()-4 # "2017-01-06"
todate <- Sys.Date()-1 # "2017-01-06"
date <- seq.Date(fromdate, todate, "day")
lengthDate <- as.numeric(length(date))
lengthDatep <- as.numeric(lengthDate+1)
# Beiliao id: C0O830
C0O830_url <- "http://e-service.cwb.gov.tw/HistoryDataQuery/DayDataController.do?command=viewMain&station=C0O830&stname=%25E7%25AB%25B9%25E6%259D%25B1&datepicker="
C0O830_date_dataFrame <- data.frame(date=date, urldate = paste(C0O830_url, date ,sep=""))
# ---------- Xpath ---------- #
inputxpathName <- c(iterm) # "ex: press"
xpathrain <- "//table[@id='MyTable']/tbody/tr/td[11]" # Xpath for rain data
xpathHum <- "//table[@id='MyTable']/tbody/tr/td[6]" # Xpath for RH data
xpathTtem <- "//table[@id='MyTable']/tbody/tr/td[4]" # Xpath for Temperature data
xpathPres <- "//table[@id='MyTable']/tbody/tr/td[2]" # Xpath for StnPres data
XpathName <- c("Rain", "Hum", "Tem", "Press")
xpathurl <- c(xpathrain, xpathHum, xpathTtem, xpathPres)
xpathList <- data.frame(XpathName, xpathurl)
xpathselect_dataframe <- subset(xpathList, xpathList$XpathName == inputxpathName)
xpathSelect_result <- as.vector(xpathselect_dataframe$xpathurl)
#-----
hr24 <- data.frame(Hour=1:24)
for (i in 1:lengthDate){
urlhtml <- as.vector(C0O830_date_dataFrame$urldate[i])# as.vector(date_dataFrame$urldate[1])
# doc <- read_html(urls)
datadoc <-read_html(urlhtml)# read_html(as.vector(date_dataFrame$urldate[1]))
data <- datadoc %>%
html_nodes(., xpath = xpathSelect_result)%>%
html_text
data_renew <- str_trim(sub("<U+00A0>",replacement ="",data)) # Delete something we don't need
hr24 <-cbind(hr24, data_renew)
}
names(hr24)[2:lengthDatep] <- as.vector(as.factor(date))
hr24_all <- melt(hr24, id=c("Hour") ) # Let them for one column
names(hr24_all) <- c("hour", "date", "data")
POStime <- as.POSIXct(paste(hr24_all$date, hr24_all$hour, sep = " "), "%Y-%m-%d %H", tz="GMT")
resultTable <- data.frame(time=POStime, data= hr24_all$data)
names(resultTable)[2] <-c(iterm)
return(resultTable)
}
DrawFigure <- function(iterm, nameforIterm, chooserangemode){
Beiliao_Plotly <- plot_ly(
data = Beiliao,
x = Beiliao$time,
y = iterm,
type = "scatter",
mode = "markers+lines",
name = nameforIterm
) %>% layout(yaxis =list(title="", rangemode = chooserangemode),
xaxis = list(zeroline = TRUE, showline = TRUE))
return(Beiliao_Plotly)
}
Beiliao <- data.frame(time = getDataformCWB("Rain")[[1]],
rain = as.numeric(as.vector(getDataformCWB("Rain")[[2]])),
hum = as.numeric(as.vector(getDataformCWB("Hum")[[2]])),
Tem = as.numeric(as.vector(getDataformCWB("Tem")[[2]])),
Press = as.numeric(as.vector(getDataformCWB("Press")[[2]]))
)
shinyServer(function(input, output) {
output$plotlyData <- renderPlotly({
Beiliao_CWB <- subplot(DrawFigure(Beiliao$rain, "Rain", "tozero"),
DrawFigure(Beiliao$hum, "hum", "tozero"),
DrawFigure(Beiliao$Tem, "Tem", "tozero"),
DrawFigure(Beiliao$Press, "Press", "auto"),
nrows = 4,
shareX = TRUE
)
Beiliao_CWB
})
})
|
f8d0c57d17d018298624d0c87a456e83759a3b94
|
d3c1254c2aefd9978c3ef22b094c498ed738bff6
|
/master.R
|
c25b18af019b87416eaef6e49f464d100f5ee496
|
[] |
no_license
|
joebrew/maltem_cost_effectiveness
|
543d0f6ca658debee010d9c7714c9d6f60ed5048
|
8aed2fba47fae30edda30051864b20fbcfabdf4e
|
refs/heads/master
| 2021-08-08T11:53:40.833966
| 2021-01-23T09:22:40
| 2021-01-23T09:22:40
| 79,894,422
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,124
|
r
|
master.R
|
# Libraries
library(tidyverse)
# BES data
source('get_bes_data.R', encoding = "UTF-8")
# Weather data
source('get_weather_data.R')
# # Lagged weather data
# source('get_wide_weather.R')
# IRS data
source('get_irs_data.R')
# ITN data
source('get_itn_data.R')
# Get population into itn
itn <-
left_join(x = itn,
y = pop %>%
group_by(year, district, province) %>%
summarise(population = sum(population, na.rm = TRUE)) %>%
ungroup,
by = c('province', 'district', 'year'))
# Get percentage coverage of itn
itn <-
itn %>%
mutate(itn_coverage = nets / population * 100) %>%
dplyr::select(province, district, year, itn_coverage)
# Join irs/itn information to df
df <- left_join(x = df,
y = itn,
by = c('province',
'district', 'year'))
# Set to 0 those NAs for itn
df$itn_coverage[is.na(df$itn_coverage)] <- 0
# Get population information in irs
irs <-
left_join(x = irs,
y = pop %>%
group_by(year, district, province) %>%
summarise(population = sum(population, na.rm = TRUE)) %>%
ungroup,
by = c('province', 'district', 'year'))
# Get percentage coverage of irs
irs <-
irs %>%
mutate(irs_coverage_houses = irs_houses / population * 100,
irs_coverage_people = irs_people / population * 100)
# Make weeks instead of days since campaign end
irs$weeks_since_last_irs_campaign_end <-
round(irs$days_since_last_irs_campaign_end / 7)
# Narrow down
irs <- irs %>%
dplyr::select(province, district, year, week,
irs_houses, irs_people,
weeks_since_last_irs_campaign_end,
irs_coverage_houses,
irs_coverage_people)
# Get which number irs campaign
cum_summer <- function(x){
x[is.na(x)] <-0
cumsum(x)
}
irs <- irs %>%
arrange(district, year, week) %>%
mutate(dummy = ifelse(weeks_since_last_irs_campaign_end == 1, 1, 0)) %>%
group_by(district) %>%
mutate(irs_campaign = cum_summer(dummy)) %>%
mutate(irs_campaign = ifelse(weeks_since_last_irs_campaign_end == 0,
irs_campaign + 1,
irs_campaign))
# Create a protection variable based on decline
irs <- irs %>%
mutate(irs_protection = irs_protect(weeks_since_last_irs_campaign_end) * irs_coverage_people) %>%
mutate(irs_protection = ifelse(irs_protection < 0, 0,
ifelse(irs_protection > 100, 100,
irs_protection))) %>%
mutate(irs_protection = ifelse(is.na(irs_protection), 0, irs_protection))
# Remove unecessary variables
irs <- irs %>%
dplyr::select(-dummy, -irs_campaign)
# # During an IRS campaign increase protection linearly (not working yet)
# x <-
# irs %>%
# # filter(!is.na(irs_campaign)) %>%
# arrange(year, week) %>%
# group_by(district, irs_campaign) %>%
# mutate(irs_protection = ifelse(is.na(irs_protection),
# dplyr::first(irs_protection[!is.na(irs_protection)]),
# irs_protection)) %>%
# mutate(irs_protection_campaign_start = dplyr::first(irs_protection[weeks_since_last_irs_campaign_end == 0]),
# irs_portection_campaign_end = dplyr::first(irs_protection[weeks_since_last_irs_campaign_end == 1])) %>%
# mutate(irs_protection_campaign_weeks = length(which(weeks_since_last_irs_campaign_end == 0))) %>%
# mutate(irs_protection_increase = irs_portection_campaign_end - irs_protection_campaign_start) %>%
# mutate(irs_protection_increase = ifelse(irs_protection_increase < 0,
# 0,
# irs_protection_increase)) %>%
# mutate(irs_protection_weekly_increase = irs_protection_increase / irs_protection_campaign_weeks) %>%
# group_by(district, irs_campaign) %>%
# mutate(x = irs_protection_campaign_start + cumsum(irs_protection_weekly_increase))
# Join irs data to df (bes + population + itn)
df <-
left_join(x = df,
y = irs,
by = c("year", "week", "province", "district"))
# Join df (bes + population) to weather
df <-
left_join(x = df,
y = weather_weekly,
by = c('district', 'date'))
# Make irs NAs be 0
df <- df %>%
mutate(irs_protection = ifelse(is.na(irs_protection),
0,
irs_protection))
# Get distance to south africa
source('get_distance_to_border.R')
df <-
left_join(x = df,
y = distances_to_border,
by = 'district')
# Make an older and younger dataset
df_young <-df %>% filter(age_group == '0-4')
df_old <- df %>% filter(age_group == '5+')
# Make an aggregated/pooled dataset
df_agg <- df %>%
group_by(year, week, date, month, day,
province, district, disease) %>%
summarise(x = n(),
cases = sum(cases),
population = sum(population),
itn_coverage = first(itn_coverage),
irs_houses = first(irs_houses),
irs_people = first(irs_people),
weeks_since_last_irs_campaign_end = first(weeks_since_last_irs_campaign_end),
irs_coverage_houses = first(irs_coverage_houses),
irs_coverage_people = first(irs_coverage_people),
irs_protection = first(irs_protection),
precipitation = first(precipitation),
temp = first(temp),
temp_max = first(temp_max),
temp_min = first(temp_min),
dew_point = first(dew_point),
wind_speed = first(wind_speed),
wind_speed_max = first(wind_speed_max),
distance_to_land_border = first(distance_to_land_border))
# # # Write data for sharing with collaborators
# write_csv(df_agg, 'data/outputs/cases_population_weather_itn_irs_pooled_age_groups.csv')
# write_csv(df_young, 'data/outputs/cases_population_weather_itn_irs_young_only.csv')
# write_csv(df_old, 'data/outputs/cases_population_weather_itn_irs_old_only.csv')
# write_csv(df, 'data/outputs/cases_population_weather_itn_irs.csv')
# write_csv(bes, 'data/outputs/cases_only.csv')
# write_csv(pop, 'data/outputs/population.csv')
# # Write csv for weather data
# write_csv(weather_weekly, 'data/outputs/weather_weekly.csv')
# write_csv(weather, 'data/outputs/weather_daily.csv')
# # write_csv(wide_weather, 'data/outputs/weather_wide.csv')
# # Write csv for itn data
# write_csv(itn, 'data/outputs/itn.csv')
# # Write csv for irs data
# write_csv(irs, 'data/outputs/irs.csv')
# # Join with wide weather
# df_wide <-
# left_join(x = df,
# y = wide_weather)
# # Write wide weather too
# write_csv(df_wide, 'data/outputs/cases_population_weather_wide_itn_irs.csv')
# ggplot(data = df_agg,
# aes(x = date,
# y = irs_protection)) +
# geom_line(color = 'darkred',
# alpha = 0.6) +
# facet_wrap(~district) +
# labs(x = 'Date',
# y = 'Protection score',
# title = 'IRS protection by district') +
# ggthemes::theme_hc()
|
66352c0f3d8151dddef6d3076aa41bd9f8da4125
|
9a581658d45500655d37957dd05dd6488524ff47
|
/development/R-main/unliked-markers-main-dev.R
|
f0814f1046d7427ad0baf9c05505c54714ee34f3
|
[] |
no_license
|
eriqande/CKMRsim
|
04def67a7f82539afb918f97e81b1361c100261d
|
652193e2e9e6dcac268b9816665cc2cf16f1d7ed
|
refs/heads/master
| 2022-10-31T01:40:18.174183
| 2022-10-26T16:25:22
| 2022-10-26T16:25:22
| 55,812,404
| 11
| 6
| null | 2021-04-30T13:43:51
| 2016-04-08T22:03:36
|
R
|
UTF-8
|
R
| false
| false
| 798
|
r
|
unliked-markers-main-dev.R
|
library(dplyr)
library(CKMRsim)
library(ggplot2)
# read in the "linked mhaps" but treat them as unlinked
mhlist <- long_markers_to_X_l_list(D = linked_mhaps,
kappa_matrix = kappas[c("MZ", "PO", "FS", "HS", "U"), ])
# add the matrices that account for genotyping error
mhlist2 <- insert_C_l_matrices(mhlist,
snp_err_rates = 0.005,
scale_by_num_snps = TRUE)
# do the matrix multiplication that gives the Y_l matrices
mhlist3 <- insert_Y_l_matrices(mhlist2)
# simulate values from each relationship, assuming unlinked. This takes
# about 12 seconds
Qvals <- simulate_and_calc_Q(mhlist3, reps = 10^3)
# collect the MC averages
mc_sample_simple(Qvals, nu = c("PO", "FS", "HS"), method = "both")
|
faba9f9dd635bb28e55b956544f88ab7a8b04945
|
c9ebf93e5aa135373f5200b51f80016e9e8b0739
|
/map_file.R
|
69ae37bb305a4d2d648d77399abe2b56ba6b3934
|
[] |
no_license
|
humanpowered/COVID19Shiny
|
3b82ddc61354f96627e28c080f09fc3d15115278
|
eae98cecde423b60f159477d0b6510ab9042dd9d
|
refs/heads/master
| 2022-11-10T23:03:06.498995
| 2020-06-29T22:09:57
| 2020-06-29T22:09:57
| 271,098,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
map_file.R
|
state_maps <- readRDS('C:/Users/craig/OneDrive/Documents/R Projects/COVID19Shiny/COVID_Tracker_1.0/state_maps.rds')
nation_map <- readRDS('C:/Users/craig/OneDrive/Documents/R Projects/COVID19Shiny/COVID_Tracker_1.0/nation_map.rds')
ca_counties_map <- readRDS('C:/Users/craig/OneDrive/Documents/R Projects/COVID19Shiny/COVID_Tracker_1.0/ca_counties_maps.rds')
nation_map$granularity <- 'Nation'
nation_map$region <- 'N/A'
nation_map$subregion[is.na(nation_map$subregion)] <- 'N/A'
state_maps$granularity <- 'State'
state_maps$subregion[is.na(state_maps$subregion)] <- 'N/A'
ca_counties_map$granularity <- 'County'
ca_counties_map$region <- 'N/A'
map_file <- bind_rows(nation_map, state_maps, ca_counties_map)
names(map_file) <- c('long', 'lat', 'group', 'order', 'state', 'county', 'granularity')
map_file$granularity <- gsub('County', "County (California Only)", map_file$granularity, fixed = T)
saveRDS(map_file, 'C:/Users/craig/OneDrive/Documents/R Projects/COVID19Shiny/COVID_Tracker_1.0/combined_map_files.rds')
|
4e8ae26af263bfd70b318bf0f25d4573c05c69e4
|
3c4550342213168a849defab116cb734bf35cd50
|
/code/reference/summaries_JT.R
|
5a6cecaca0505cbf0132e32505202c5a104f4ab3
|
[] |
no_license
|
guizar/Pest-MS
|
b8eaaaa8acb9390f6947624fa0dcdf6c031dbd37
|
95622943a8f23dd89215a49c82d37c5a500b4b8e
|
refs/heads/master
| 2020-03-31T09:01:09.293619
| 2015-05-09T06:06:58
| 2015-05-09T06:06:58
| 28,886,343
| 0
| 1
| null | 2015-01-14T23:12:01
| 2015-01-06T22:20:53
|
R
|
UTF-8
|
R
| false
| false
| 41,715
|
r
|
summaries_JT.R
|
# summary tables ----------------------------------------------------------
setwd("~/Dropbox/climate change/food security/climate and crop pressure MS/data/ascii_crops_hires")
C_TO_R<-data.frame(read.csv("COUNTRY_TO_REGION.csv", header=TRUE))
# now just need to use plyr and ddply to aggregate by region and subregion!
setwd("~/Dropbox/climate change/pest MS shared")
MED_c<-ddply(ALL,.(NAME),summarise,
IPM_M2=median(IPM_M2,na.rm=TRUE),
IPM_M3=median(IPM_M3,na.rm=TRUE),
IPM_M4=median(IPM_M4,na.rm=TRUE),
IPM_R2=median(IPM_R2,na.rm=TRUE),
IPM_R3=median(IPM_R3,na.rm=TRUE),
IPM_R4=median(IPM_R4,na.rm=TRUE),
IPM_W2=median(IPM_W2,na.rm=TRUE),
IPM_W3=median(IPM_W3,na.rm=TRUE),
IPM_W4=median(IPM_W4,na.rm=TRUE),
IY_M2=median(IY_M2,na.rm=TRUE),
IY_M3=median(IY_M3,na.rm=TRUE),
IY_M4=median(IY_M4,na.rm=TRUE),
IY_R2=median(IY_R2,na.rm=TRUE),
IY_R3=median(IY_R3,na.rm=TRUE),
IY_R4=median(IY_R4,na.rm=TRUE),
IY_W2=median(IY_W2,na.rm=TRUE),
IY_W3=median(IY_W3,na.rm=TRUE),
IY_W4=median(IY_W4,na.rm=TRUE),
CLF_M=median(CLF_M,na.rm=TRUE),
CL_M=median(CL_M,na.rm=TRUE),
CY_M=median(CY_M,na.rm=TRUE),
CA_M=median(CA_M,na.rm=TRUE),
CGS_M=median(CGS_M,na.rm=TRUE),
CLF_R=median(CLF_R,na.rm=TRUE),
CL_R=median(CL_R,na.rm=TRUE),
CY_R=median(CY_R,na.rm=TRUE),
CA_R=median(CA_R,na.rm=TRUE),
CGS_R=median(CGS_R,na.rm=TRUE),
CLF_W=median(CLF_W,na.rm=TRUE),
CL_W=median(CL_W,na.rm=TRUE),
CY_W=median(CY_W,na.rm=TRUE),
CA_W=median(CA_W,na.rm=TRUE),
CGS_W=median(CGS_W,na.rm=TRUE),
MET_M2=median(MET_M2,na.rm=TRUE),
MET_M3=median(MET_M3,na.rm=TRUE),
MET_M4=median(MET_M4,na.rm=TRUE),
MET_R2=median(MET_R2,na.rm=TRUE),
MET_R3=median(MET_R3,na.rm=TRUE),
MET_R4=median(MET_R4,na.rm=TRUE),
MET_W2=median(MET_W2,na.rm=TRUE),
MET_W3=median(MET_W3,na.rm=TRUE),
MET_W4=median(MET_W4,na.rm=TRUE),
POP_M2=median(POP_M2,na.rm=TRUE),
POP_M3=median(POP_M3,na.rm=TRUE),
POP_M4=median(POP_M4,na.rm=TRUE),
POP_R2=median(POP_R2,na.rm=TRUE),
POP_R3=median(POP_R3,na.rm=TRUE),
POP_R4=median(POP_R4,na.rm=TRUE),
POP_W2=median(POP_W2,na.rm=TRUE),
POP_W3=median(POP_W3,na.rm=TRUE),
POP_W4=median(POP_W4,na.rm=TRUE),
MET_AVG2=median(MET_AVG2,na.rm=TRUE),
MET_AVG3=median(MET_AVG3,na.rm=TRUE),
MET_AVG4=median(MET_AVG4,na.rm=TRUE),
POP_AVG2=median(POP_AVG2,na.rm=TRUE),
POP_AVG3=median(POP_AVG3,na.rm=TRUE),
POP_AVG4=median(POP_AVG4,na.rm=TRUE),
IPM_AVG2=median(IPM_AVG2,na.rm=TRUE),
IPM_AVG3=median(IPM_AVG3,na.rm=TRUE),
IPM_AVG4=median(IPM_AVG4,na.rm=TRUE),
YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
YLD_TOT_M=median(YLD_TOT_M,na.rm=TRUE),
YLD_TOT_R=median(YLD_TOT_R,na.rm=TRUE),
YLD_TOT_W=median(YLD_TOT_W,na.rm=TRUE),
CL2050_M2=median(CL2050_M2,na.rm=TRUE),
CL2050_M3=median(CL2050_M3,na.rm=TRUE),
CL2050_M4=median(CL2050_M4,na.rm=TRUE),
CL2050_R2=median(CL2050_R2,na.rm=TRUE),
CL2050_R3=median(CL2050_R3,na.rm=TRUE),
CL2050_R4=median(CL2050_R4,na.rm=TRUE),
CL2050_W2=median(CL2050_W2,na.rm=TRUE),
CL2050_W3=median(CL2050_W3,na.rm=TRUE),
CL2050_W4=median(CL2050_W4,na.rm=TRUE),
CLP2050_M2=median(CLP2050_M2,na.rm=TRUE),
CLP2050_M3=median(CLP2050_M3,na.rm=TRUE),
CLP2050_M4=median(CLP2050_M4,na.rm=TRUE),
CLP2050_R2=median(CLP2050_R2,na.rm=TRUE),
CLP2050_R3=median(CLP2050_R3,na.rm=TRUE),
CLP2050_R4=median(CLP2050_R4,na.rm=TRUE),
CLP2050_W2=median(CLP2050_W2,na.rm=TRUE),
CLP2050_W3=median(CLP2050_W3,na.rm=TRUE),
CLP2050_W4=median(CLP2050_W4,na.rm=TRUE),
IYCC_M2=median(IYCC_M2,na.rm=TRUE),
IYCC_M3=median(IYCC_M3,na.rm=TRUE),
IYCC_M4=median(IYCC_M4,na.rm=TRUE),
IYCC_R2=median(IYCC_R2,na.rm=TRUE),
IYCC_R3=median(IYCC_R3,na.rm=TRUE),
IYCC_R4=median(IYCC_R4,na.rm=TRUE),
IYCC_W2=median(IYCC_W2,na.rm=TRUE),
IYCC_W3=median(IYCC_W3,na.rm=TRUE),
IYCC_W4=median(IYCC_W4,na.rm=TRUE),
YLPH_M2=median(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_M3=median(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_M4=median(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_R2=median(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_R3=median(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_R4=median(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_W2=median(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_W3=median(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
YLPH_W4=median(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE))
#MEDIEANS BY SUBREGION
MED_sr<-ddply(ALL,.(Subregion),summarise,
IPM_M2=median(IPM_M2,na.rm=TRUE),
IPM_M3=median(IPM_M3,na.rm=TRUE),
IPM_M4=median(IPM_M4,na.rm=TRUE),
IPM_R2=median(IPM_R2,na.rm=TRUE),
IPM_R3=median(IPM_R3,na.rm=TRUE),
IPM_R4=median(IPM_R4,na.rm=TRUE),
IPM_W2=median(IPM_W2,na.rm=TRUE),
IPM_W3=median(IPM_W3,na.rm=TRUE),
IPM_W4=median(IPM_W4,na.rm=TRUE),
IY_M2=median(IY_M2,na.rm=TRUE),
IY_M3=median(IY_M3,na.rm=TRUE),
IY_M4=median(IY_M4,na.rm=TRUE),
IY_R2=median(IY_R2,na.rm=TRUE),
IY_R3=median(IY_R3,na.rm=TRUE),
IY_R4=median(IY_R4,na.rm=TRUE),
IY_W2=median(IY_W2,na.rm=TRUE),
IY_W3=median(IY_W3,na.rm=TRUE),
IY_W4=median(IY_W4,na.rm=TRUE),
CLF_M=median(CLF_M,na.rm=TRUE),
CL_M=median(CL_M,na.rm=TRUE),
CY_M=median(CY_M,na.rm=TRUE),
CA_M=median(CA_M,na.rm=TRUE),
CGS_M=median(CGS_M,na.rm=TRUE),
CLF_R=median(CLF_R,na.rm=TRUE),
CL_R=median(CL_R,na.rm=TRUE),
CY_R=median(CY_R,na.rm=TRUE),
CA_R=median(CA_R,na.rm=TRUE),
CGS_R=median(CGS_R,na.rm=TRUE),
CLF_W=median(CLF_W,na.rm=TRUE),
CL_W=median(CL_W,na.rm=TRUE),
CY_W=median(CY_W,na.rm=TRUE),
CA_W=median(CA_W,na.rm=TRUE),
CGS_W=median(CGS_W,na.rm=TRUE),
MET_M2=median(MET_M2,na.rm=TRUE),
MET_M3=median(MET_M3,na.rm=TRUE),
MET_M4=median(MET_M4,na.rm=TRUE),
MET_R2=median(MET_R2,na.rm=TRUE),
MET_R3=median(MET_R3,na.rm=TRUE),
MET_R4=median(MET_R4,na.rm=TRUE),
MET_W2=median(MET_W2,na.rm=TRUE),
MET_W3=median(MET_W3,na.rm=TRUE),
MET_W4=median(MET_W4,na.rm=TRUE),
POP_M2=median(POP_M2,na.rm=TRUE),
POP_M3=median(POP_M3,na.rm=TRUE),
POP_M4=median(POP_M4,na.rm=TRUE),
POP_R2=median(POP_R2,na.rm=TRUE),
POP_R3=median(POP_R3,na.rm=TRUE),
POP_R4=median(POP_R4,na.rm=TRUE),
POP_W2=median(POP_W2,na.rm=TRUE),
POP_W3=median(POP_W3,na.rm=TRUE),
POP_W4=median(POP_W4,na.rm=TRUE),
MET_AVG2=median(MET_AVG2,na.rm=TRUE),
MET_AVG3=median(MET_AVG3,na.rm=TRUE),
MET_AVG4=median(MET_AVG4,na.rm=TRUE),
POP_AVG2=median(POP_AVG2,na.rm=TRUE),
POP_AVG3=median(POP_AVG3,na.rm=TRUE),
POP_AVG4=median(POP_AVG4,na.rm=TRUE),
IPM_AVG2=median(IPM_AVG2,na.rm=TRUE),
IPM_AVG3=median(IPM_AVG3,na.rm=TRUE),
IPM_AVG4=median(IPM_AVG4,na.rm=TRUE),
YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
YLD_TOT_M=median(YLD_TOT_M,na.rm=TRUE),
YLD_TOT_R=median(YLD_TOT_R,na.rm=TRUE),
YLD_TOT_W=median(YLD_TOT_W,na.rm=TRUE),
CL2050_M2=median(CL2050_M2,na.rm=TRUE),
CL2050_M3=median(CL2050_M3,na.rm=TRUE),
CL2050_M4=median(CL2050_M4,na.rm=TRUE),
CL2050_R2=median(CL2050_R2,na.rm=TRUE),
CL2050_R3=median(CL2050_R3,na.rm=TRUE),
CL2050_R4=median(CL2050_R4,na.rm=TRUE),
CL2050_W2=median(CL2050_W2,na.rm=TRUE),
CL2050_W3=median(CL2050_W3,na.rm=TRUE),
CL2050_W4=median(CL2050_W4,na.rm=TRUE),
CLP2050_M2=median(CLP2050_M2,na.rm=TRUE),
CLP2050_M3=median(CLP2050_M3,na.rm=TRUE),
CLP2050_M4=median(CLP2050_M4,na.rm=TRUE),
CLP2050_R2=median(CLP2050_R2,na.rm=TRUE),
CLP2050_R3=median(CLP2050_R3,na.rm=TRUE),
CLP2050_R4=median(CLP2050_R4,na.rm=TRUE),
CLP2050_W2=median(CLP2050_W2,na.rm=TRUE),
CLP2050_W3=median(CLP2050_W3,na.rm=TRUE),
CLP2050_W4=median(CLP2050_W4,na.rm=TRUE),
IYCC_M2=median(IYCC_M2,na.rm=TRUE),
IYCC_M3=median(IYCC_M3,na.rm=TRUE),
IYCC_M4=median(IYCC_M4,na.rm=TRUE),
IYCC_R2=median(IYCC_R2,na.rm=TRUE),
IYCC_R3=median(IYCC_R3,na.rm=TRUE),
IYCC_R4=median(IYCC_R4,na.rm=TRUE),
IYCC_W2=median(IYCC_W2,na.rm=TRUE),
IYCC_W3=median(IYCC_W3,na.rm=TRUE),
IYCC_W4=median(IYCC_W4,na.rm=TRUE),
YLPH_M2=median(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_M3=median(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_M4=median(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_R2=median(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_R3=median(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_R4=median(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_W2=median(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_W3=median(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
YLPH_W4=median(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE))
#MEDIANS by region
MED_r<-ddply(ALL,.(Region),summarise,
IPM_M2=median(IPM_M2,na.rm=TRUE),
IPM_M3=median(IPM_M3,na.rm=TRUE),
IPM_M4=median(IPM_M4,na.rm=TRUE),
IPM_R2=median(IPM_R2,na.rm=TRUE),
IPM_R3=median(IPM_R3,na.rm=TRUE),
IPM_R4=median(IPM_R4,na.rm=TRUE),
IPM_W2=median(IPM_W2,na.rm=TRUE),
IPM_W3=median(IPM_W3,na.rm=TRUE),
IPM_W4=median(IPM_W4,na.rm=TRUE),
IY_M2=median(IY_M2,na.rm=TRUE),
IY_M3=median(IY_M3,na.rm=TRUE),
IY_M4=median(IY_M4,na.rm=TRUE),
IY_R2=median(IY_R2,na.rm=TRUE),
IY_R3=median(IY_R3,na.rm=TRUE),
IY_R4=median(IY_R4,na.rm=TRUE),
IY_W2=median(IY_W2,na.rm=TRUE),
IY_W3=median(IY_W3,na.rm=TRUE),
IY_W4=median(IY_W4,na.rm=TRUE),
CLF_M=median(CLF_M,na.rm=TRUE),
CL_M=median(CL_M,na.rm=TRUE),
CY_M=median(CY_M,na.rm=TRUE),
CA_M=median(CA_M,na.rm=TRUE),
CGS_M=median(CGS_M,na.rm=TRUE),
CLF_R=median(CLF_R,na.rm=TRUE),
CL_R=median(CL_R,na.rm=TRUE),
CY_R=median(CY_R,na.rm=TRUE),
CA_R=median(CA_R,na.rm=TRUE),
CGS_R=median(CGS_R,na.rm=TRUE),
CLF_W=median(CLF_W,na.rm=TRUE),
CL_W=median(CL_W,na.rm=TRUE),
CY_W=median(CY_W,na.rm=TRUE),
CA_W=median(CA_W,na.rm=TRUE),
CGS_W=median(CGS_W,na.rm=TRUE),
MET_M2=median(MET_M2,na.rm=TRUE),
MET_M3=median(MET_M3,na.rm=TRUE),
MET_M4=median(MET_M4,na.rm=TRUE),
MET_R2=median(MET_R2,na.rm=TRUE),
MET_R3=median(MET_R3,na.rm=TRUE),
MET_R4=median(MET_R4,na.rm=TRUE),
MET_W2=median(MET_W2,na.rm=TRUE),
MET_W3=median(MET_W3,na.rm=TRUE),
MET_W4=median(MET_W4,na.rm=TRUE),
POP_M2=median(POP_M2,na.rm=TRUE),
POP_M3=median(POP_M3,na.rm=TRUE),
POP_M4=median(POP_M4,na.rm=TRUE),
POP_R2=median(POP_R2,na.rm=TRUE),
POP_R3=median(POP_R3,na.rm=TRUE),
POP_R4=median(POP_R4,na.rm=TRUE),
POP_W2=median(POP_W2,na.rm=TRUE),
POP_W3=median(POP_W3,na.rm=TRUE),
POP_W4=median(POP_W4,na.rm=TRUE),
MET_AVG2=median(MET_AVG2,na.rm=TRUE),
MET_AVG3=median(MET_AVG3,na.rm=TRUE),
MET_AVG4=median(MET_AVG4,na.rm=TRUE),
POP_AVG2=median(POP_AVG2,na.rm=TRUE),
POP_AVG3=median(POP_AVG3,na.rm=TRUE),
POP_AVG4=median(POP_AVG4,na.rm=TRUE),
IPM_AVG2=median(IPM_AVG2,na.rm=TRUE),
IPM_AVG3=median(IPM_AVG3,na.rm=TRUE),
IPM_AVG4=median(IPM_AVG4,na.rm=TRUE),
YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
YLD_TOT_M=median(YLD_TOT_M,na.rm=TRUE),
YLD_TOT_R=median(YLD_TOT_R,na.rm=TRUE),
YLD_TOT_W=median(YLD_TOT_W,na.rm=TRUE),
CL2050_M2=median(CL2050_M2,na.rm=TRUE),
CL2050_M3=median(CL2050_M3,na.rm=TRUE),
CL2050_M4=median(CL2050_M4,na.rm=TRUE),
CL2050_R2=median(CL2050_R2,na.rm=TRUE),
CL2050_R3=median(CL2050_R3,na.rm=TRUE),
CL2050_R4=median(CL2050_R4,na.rm=TRUE),
CL2050_W2=median(CL2050_W2,na.rm=TRUE),
CL2050_W3=median(CL2050_W3,na.rm=TRUE),
CL2050_W4=median(CL2050_W4,na.rm=TRUE),
CLP2050_M2=median(CLP2050_M2,na.rm=TRUE),
CLP2050_M3=median(CLP2050_M3,na.rm=TRUE),
CLP2050_M4=median(CLP2050_M4,na.rm=TRUE),
CLP2050_R2=median(CLP2050_R2,na.rm=TRUE),
CLP2050_R3=median(CLP2050_R3,na.rm=TRUE),
CLP2050_R4=median(CLP2050_R4,na.rm=TRUE),
CLP2050_W2=median(CLP2050_W2,na.rm=TRUE),
CLP2050_W3=median(CLP2050_W3,na.rm=TRUE),
CLP2050_W4=median(CLP2050_W4,na.rm=TRUE),
IYCC_M2=median(IYCC_M2,na.rm=TRUE),
IYCC_M3=median(IYCC_M3,na.rm=TRUE),
IYCC_M4=median(IYCC_M4,na.rm=TRUE),
IYCC_R2=median(IYCC_R2,na.rm=TRUE),
IYCC_R3=median(IYCC_R3,na.rm=TRUE),
IYCC_R4=median(IYCC_R4,na.rm=TRUE),
IYCC_W2=median(IYCC_W2,na.rm=TRUE),
IYCC_W3=median(IYCC_W3,na.rm=TRUE),
IYCC_W4=median(IYCC_W4,na.rm=TRUE),
YLPH_M2=median(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_M3=median(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_M4=median(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
YLPH_R2=median(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_R3=median(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_R4=median(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
YLPH_W2=median(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
YLPH_W3=median(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
YLPH_W4=median(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE))
CL_c<-ddply(ALL,.(NAME),summarise,
MEAN_YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
MEAN_YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
MEAN_YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
TOT_YLD_TOT_M=sum(YLD_TOT_M,na.rm=TRUE),
TOT_YLD_TOT_R=sum(YLD_TOT_R,na.rm=TRUE),
TOT_YLD_TOT_W=sum(YLD_TOT_W,na.rm=TRUE),
TOT_CL2050_M2=sum(CL2050_M2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_M3=sum(CL2050_M3,na.rm=TRUE),
TOT_CL2050_M4=sum(CL2050_M4,na.rm=TRUE),
TOT_CL2050_R2=sum(CL2050_R2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_R3=sum(CL2050_R3,na.rm=TRUE),
TOT_CL2050_R4=sum(CL2050_R4,na.rm=TRUE),
TOT_CL2050_W2=sum(CL2050_W2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_W3=sum(CL2050_W3,na.rm=TRUE),
TOT_CL2050_W4=sum(CL2050_W4,na.rm=TRUE))
#MEDIEANS BY SUBREGION
CL_sr<-ddply(ALL,.(Subregion),summarise,
MEAN_YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
MEAN_YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
MEAN_YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
TOT_YLD_TOT_M=sum(YLD_TOT_M,na.rm=TRUE),
TOT_YLD_TOT_R=sum(YLD_TOT_R,na.rm=TRUE),
TOT_YLD_TOT_W=sum(YLD_TOT_W,na.rm=TRUE),
TOT_CL2050_M2=sum(CL2050_M2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_M3=sum(CL2050_M3,na.rm=TRUE),
TOT_CL2050_M4=sum(CL2050_M4,na.rm=TRUE),
TOT_CL2050_R2=sum(CL2050_R2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_R3=sum(CL2050_R3,na.rm=TRUE),
TOT_CL2050_R4=sum(CL2050_R4,na.rm=TRUE),
TOT_CL2050_W2=sum(CL2050_W2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_W3=sum(CL2050_W3,na.rm=TRUE),
TOT_CL2050_W4=sum(CL2050_W4,na.rm=TRUE))
#MEDIANS by region
CL_r<-ddply(ALL,.(Region),summarise,
MEAN_YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
MEAN_YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
MEAN_YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
TOT_YLD_TOT_M=sum(YLD_TOT_M,na.rm=TRUE),
TOT_YLD_TOT_R=sum(YLD_TOT_R,na.rm=TRUE),
TOT_YLD_TOT_W=sum(YLD_TOT_W,na.rm=TRUE),
TOT_CL2050_M2=sum(CL2050_M2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_M3=sum(CL2050_M3,na.rm=TRUE),
TOT_CL2050_M4=sum(CL2050_M4,na.rm=TRUE),
TOT_CL2050_R2=sum(CL2050_R2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_R3=sum(CL2050_R3,na.rm=TRUE),
TOT_CL2050_R4=sum(CL2050_R4,na.rm=TRUE),
TOT_CL2050_W2=sum(CL2050_W2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_W3=sum(CL2050_W3,na.rm=TRUE),
TOT_CL2050_W4=sum(CL2050_W4,na.rm=TRUE))
MED_r<-merge(MED_r,CL_r,by="Region",all=TRUE,sort=TRUE)
MED_sr<-merge(MED_sr,CL_sr,by="Subregion",all=TRUE,sort=TRUE)
MED_c<-merge(MED_c,CL_c,by="NAME",all=TRUE,sort=TRUE)
# country tables ----------------------------------------------------------
#making a datatables for IPM and yield impact by country
MAIZE<-ALL[complete.cases(ALL[ ,7:39]),] # subsets ALL to give only maize data
#Got to first calculate sums, then add them back into maize with merge, and then do these calcs.
TOT_YLD_MAIZE<-ddply(MAIZE,.(NAME),summarise,
TOT_YLD_TOT_M=sum(YLD_TOT_M))
MAIZE<-merge(MAIZE,TOT_YLD_MAIZE, by=c("NAME"),all=TRUE,sort=TRUE)
MAIZE_c<-ddply(MAIZE,.(NAME),summarise,
MEAN_LAT_M=mean(LAT),
WMEAN_LAT_M=sum((LAT*YLD_TOT_M)/TOT_YLD_TOT_M),
MED_YLD_HA_M=median(YLD_HA_M),
MEAN_YLD_HA_M=mean(YLD_HA_M),
WMEAN_YLD_HA_M=sum((YLD_HA_M*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_YLD_HA_M=min(YLD_HA_M),
MAX_YLD_HA_M=max(YLD_HA_M),
TOT_YLD_TOT_M=sum(YLD_TOT_M),
TOT_CL2050_M2=sum(CL2050_M2),
TOT_CL2050_M3=sum(CL2050_M3),
TOT_CL2050_M4=sum(CL2050_M4),
MED_IPM_M2=median(IPM_M2),
MEAN_IPM_M2=mean(IPM_M2),
WMEAN_IPM_M2=sum((IPM_M2*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_IPM_M2=min(IPM_M2),
MAX_IPM_M2=max(IPM_M2),
MED_IPM_M3=median(IPM_M3),
MEAN_IPM_M3=mean(IPM_M3),
WMEAN_IPM_M3=sum((IPM_M3*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_IPM_M3=min(IPM_M3),
MAX_IPM_M3=max(IPM_M3),
MED_IPM_M4=median(IPM_M4),
MEAN_IPM_M4=mean(IPM_M4),
WMEAN_IPM_M4=sum((IPM_M4*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_IPM_M4=min(IPM_M4),
MAX_IPM_M4=max(IPM_M4),
MED_IYCC_M2=median(IYCC_M2),
MEAN_IYCC_M2=mean(IYCC_M2),
MIN_IYCC_M2=min(IYCC_M2),
MAX_IYCC_M2=max(IYCC_M2),
MED_IYCC_M3=median(IYCC_M3),
MEAN_IYCC_M3=mean(IYCC_M3),
MIN_IYCC_M3=min(IYCC_M3),
MAX_IYCC_M3=max(IYCC_M3),
MED_IYCC_M4=median(IYCC_M4),
MEAN_IYCC_M4=mean(IYCC_M4),
MIN_IYCC_M4=min(IYCC_M4),
MAX_IYCC_M4=max(IYCC_M4),
MED_YLPH_M2=median(IPM_M2*CLF_M*(CY_M/CA_M)),
MEAN_YLPH_M2=mean(IPM_M2*CLF_M*(CY_M/CA_M)),
WMEAN_YLPH_M2=sum(((IPM_M2*CLF_M*(CY_M/CA_M))*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_YLPH_M2=min(IPM_M2*CLF_M*(CY_M/CA_M)),
MAX_YLPH_M2=max(IPM_M2*CLF_M*(CY_M/CA_M)),
MED_YLPH_M3=median(IPM_M3*CLF_M*(CY_M/CA_M)),
MEAN_YLPH_M3=mean(IPM_M3*CLF_M*(CY_M/CA_M)),
WMEAN_YLPH_M3=sum(((IPM_M3*CLF_M*(CY_M/CA_M))*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_YLPH_M3=min(IPM_M3*CLF_M*(CY_M/CA_M)),
MAX_YLPH_M3=max(IPM_M3*CLF_M*(CY_M/CA_M)),
MED_YLPH_M4=median(IPM_M4*CLF_M*(CY_M/CA_M)),
MEAN_YLPH_M4=mean(IPM_M4*CLF_M*(CY_M/CA_M)),
WMEAN_YLPH_M4=sum(((IPM_M4*CLF_M*(CY_M/CA_M))*YLD_TOT_M)/TOT_YLD_TOT_M),
MIN_YLPH_M4=min(IPM_M4*CLF_M*(CY_M/CA_M)),
MAX_YLPH_M4=max(IPM_M4*CLF_M*(CY_M/CA_M)),
CELLS_M=sum(CELLS))
RICE<-ALL[complete.cases(ALL[ ,40:72]),] # subsets ALL to give only maize data
#Got to first calculate sums, then add them back into maize with merge, and then do these calcs.
TOT_YLD_RICE<-ddply(RICE,.(NAME),summarise,
TOT_YLD_TOT_R=sum(YLD_TOT_R))
RICE<-merge(RICE,TOT_YLD_RICE, by=c("NAME"),all=TRUE,sort=TRUE)
RICE_c<-ddply(RICE,.(NAME),summarise,
MEAN_LAT_R=mean(LAT),
WMEAN_LAT_R=sum((LAT*YLD_TOT_R)/TOT_YLD_TOT_R),
MED_YLD_HA_R=median(YLD_HA_R),
MEAN_YLD_HA_R=mean(YLD_HA_R),
WMEAN_YLD_HA_R=sum((YLD_HA_R*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_YLD_HA_R=min(YLD_HA_R),
MAX_YLD_HA_R=max(YLD_HA_R),
TOT_YLD_TOT_R=sum(YLD_TOT_R),
TOT_CL2050_R2=sum(CL2050_R2),
TOT_CL2050_R3=sum(CL2050_R3),
TOT_CL2050_R4=sum(CL2050_R4),
MED_IPM_R2=median(IPM_R2),
MEAN_IPM_R2=mean(IPM_R2),
WMEAN_IPM_R2=sum((IPM_R2*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_IPM_R2=min(IPM_R2),
MAX_IPM_R2=max(IPM_R2),
MED_IPM_R3=median(IPM_R3),
MEAN_IPM_R3=mean(IPM_R3),
WMEAN_IPM_R3=sum((IPM_R3*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_IPM_R3=min(IPM_R3),
MAX_IPM_R3=max(IPM_R3),
MED_IPM_R4=median(IPM_R4),
MEAN_IPM_R4=mean(IPM_R4),
WMEAN_IPM_R4=sum((IPM_R4*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_IPM_R4=min(IPM_R4),
MAX_IPM_R4=max(IPM_R4),
MED_IYCC_R2=median(IYCC_R2),
MEAN_IYCC_R2=mean(IYCC_R2),
MIN_IYCC_R2=min(IYCC_R2),
MAX_IYCC_R2=max(IYCC_R2),
MED_IYCC_R3=median(IYCC_R3),
MEAN_IYCC_R3=mean(IYCC_R3),
MIN_IYCC_R3=min(IYCC_R3),
MAX_IYCC_R3=max(IYCC_R3),
MED_IYCC_R4=median(IYCC_R4),
MEAN_IYCC_R4=mean(IYCC_R4),
MIN_IYCC_R4=min(IYCC_R4),
MAX_IYCC_R4=max(IYCC_R4),
MED_YLPH_R2=median(IPM_R2*CLF_R*(CY_R/CA_R)),
MEAN_YLPH_R2=mean(IPM_R2*CLF_R*(CY_R/CA_R)),
WMEAN_YLPH_R2=sum(((IPM_R2*CLF_R*(CY_R/CA_R))*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_YLPH_R2=min(IPM_R2*CLF_R*(CY_R/CA_R)),
MAX_YLPH_R2=max(IPM_R2*CLF_R*(CY_R/CA_R)),
MED_YLPH_R3=median(IPM_R3*CLF_R*(CY_R/CA_R)),
MEAN_YLPH_R3=mean(IPM_R3*CLF_R*(CY_R/CA_R)),
WMEAN_YLPH_R3=sum(((IPM_R3*CLF_R*(CY_R/CA_R))*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_YLPH_R3=min(IPM_R3*CLF_R*(CY_R/CA_R)),
MAX_YLPH_R3=max(IPM_R3*CLF_R*(CY_R/CA_R)),
MED_YLPH_R4=median(IPM_R4*CLF_R*(CY_R/CA_R)),
MEAN_YLPH_R4=mean(IPM_R4*CLF_R*(CY_R/CA_R)),
WMEAN_YLPH_R4=sum(((IPM_R4*CLF_R*(CY_R/CA_R))*YLD_TOT_R)/TOT_YLD_TOT_R),
MIN_YLPH_R4=min(IPM_R4*CLF_R*(CY_R/CA_R)),
MAX_YLPH_R4=max(IPM_R4*CLF_R*(CY_R/CA_R)),
CELLS_R=sum(CELLS))
WHEAT<-ALL[complete.cases(ALL[ ,73:105]),] # subsets ALL to give only maize data
#Got to first calculate sums, then add them back into maize with merge, and then do these calcs.
#Not sure what the last line means.... hmm
TOT_YLD_WHEAT<-ddply(WHEAT,.(NAME),summarise,
TOT_YLD_TOT_W=sum(YLD_TOT_W))
WHEAT<-merge(WHEAT,TOT_YLD_WHEAT, by=c("NAME"),all=TRUE,sort=TRUE)
WHEAT_c<-ddply(WHEAT,.(NAME),summarise,
MEAN_LAT_W=mean(LAT),
WMEAN_LAT_W=sum((LAT*YLD_TOT_W)/TOT_YLD_TOT_W),
MED_YLD_HA_W=median(YLD_HA_W),
MEAN_YLD_HA_W=mean(YLD_HA_W),
WMEAN_YLD_HA_W=sum((YLD_HA_W*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_YLD_HA_W=min(YLD_HA_W),
MAX_YLD_HA_W=max(YLD_HA_W),
TOT_YLD_TOT_W=sum(YLD_TOT_W),
TOT_CL2050_W2=sum(CL2050_W2),
TOT_CL2050_W3=sum(CL2050_W3),
TOT_CL2050_W4=sum(CL2050_W4),
MED_IPM_W2=median(IPM_W2),
MEAN_IPM_W2=mean(IPM_W2),
WMEAN_IPM_W2=sum((IPM_W2*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_IPM_W2=min(IPM_W2),
MAX_IPM_W2=max(IPM_W2),
MED_IPM_W3=median(IPM_W3),
MEAN_IPM_W3=mean(IPM_W3),
WMEAN_IPM_W3=sum((IPM_W3*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_IPM_W3=min(IPM_W3),
MAX_IPM_W3=max(IPM_W3),
MED_IPM_W4=median(IPM_W4),
MEAN_IPM_W4=mean(IPM_W4),
WMEAN_IPM_W4=sum((IPM_W4*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_IPM_W4=min(IPM_W4),
MAX_IPM_W4=max(IPM_W4),
MED_IYCC_W2=median(IYCC_W2),
MEAN_IYCC_W2=mean(IYCC_W2),
MIN_IYCC_W2=min(IYCC_W2),
MAX_IYCC_W2=max(IYCC_W2),
MED_IYCC_W3=median(IYCC_W3),
MEAN_IYCC_W3=mean(IYCC_W3),
MIN_IYCC_W3=min(IYCC_W3),
MAX_IYCC_W3=max(IYCC_W3),
MED_IYCC_W4=median(IYCC_W4),
MEAN_IYCC_W4=mean(IYCC_W4),
MIN_IYCC_W4=min(IYCC_W4),
MAX_IYCC_W4=max(IYCC_W4),
MED_YLPH_W2=median(IPM_W2*CLF_W*(CY_W/CA_W)),
MEAN_YLPH_W2=mean(IPM_W2*CLF_W*(CY_W/CA_W)),
WMEAN_YLPH_W2=sum(((IPM_W2*CLF_W*(CY_W/CA_W))*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_YLPH_W2=min(IPM_W2*CLF_W*(CY_W/CA_W)),
MAX_YLPH_W2=max(IPM_W2*CLF_W*(CY_W/CA_W)),
MED_YLPH_W3=median(IPM_W3*CLF_W*(CY_W/CA_W)),
MEAN_YLPH_W3=mean(IPM_W3*CLF_W*(CY_W/CA_W)),
WMEAN_YLPH_W3=sum(((IPM_W3*CLF_W*(CY_W/CA_W))*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_YLPH_W3=min(IPM_W3*CLF_W*(CY_W/CA_W)),
MAX_YLPH_W3=max(IPM_W3*CLF_W*(CY_W/CA_W)),
MED_YLPH_W4=median(IPM_W4*CLF_W*(CY_W/CA_W)),
MEAN_YLPH_W4=mean(IPM_W4*CLF_W*(CY_W/CA_W)),
WMEAN_YLPH_W4=sum(((IPM_W4*CLF_W*(CY_W/CA_W))*YLD_TOT_W)/TOT_YLD_TOT_W),
MIN_YLPH_W4=min(IPM_W4*CLF_W*(CY_W/CA_W)),
MAX_YLPH_W4=max(IPM_W4*CLF_W*(CY_W/CA_W)),
CELLS_W=sum(CELLS))
####BY SUBREGION
MAIZE_r<-ddply(ALL,.(Region),summarise,
MED_YLD_HA_M=median(YLD_HA_M,na.rm=TRUE),
MEAN_YLD_HA_M=mean(YLD_HA_M,na.rm=TRUE),
MIN_YLD_HA_M=min(YLD_HA_M,na.rm=TRUE),
MAX_YLD_HA_M=max(YLD_HA_M,na.rm=TRUE),
TOT_YLD_TOT_M=sum(YLD_TOT_M,na.rm=TRUE),
TOT_CL2050_M2=sum(CL2050_M2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_M3=sum(CL2050_M3,na.rm=TRUE),
TOT_CL2050_M4=sum(CL2050_M4,na.rm=TRUE),
MED_IPM_M2=median(IPM_M2,na.rm=TRUE),
MEAN_IPM_M2=mean(IPM_M2,na.rm=TRUE),
MIN_IPM_M2=min(IPM_M2,na.rm=TRUE),
MAX_IPM_M2=max(IPM_M2,na.rm=TRUE),
MED_IPM_M3=median(IPM_M3,na.rm=TRUE),
MEAN_IPM_M3=mean(IPM_M3,na.rm=TRUE),
MIN_IPM_M3=min(IPM_M3,na.rm=TRUE),
MAX_IPM_M3=max(IPM_M3,na.rm=TRUE),
MED_IPM_M4=median(IPM_M4,na.rm=TRUE),
MEAN_IPM_M4=mean(IPM_M4,na.rm=TRUE),
MIN_IPM_M4=min(IPM_M4,na.rm=TRUE),
MAX_IPM_M4=max(IPM_M4,na.rm=TRUE),
MED_IYCC_M2=median(IYCC_M2,na.rm=TRUE),
MEAN_IYCC_M2=mean(IYCC_M2,na.rm=TRUE),
MIN_IYCC_M2=min(IYCC_M2,na.rm=TRUE),
MAX_IYCC_M2=max(IYCC_M2,na.rm=TRUE),
MED_IYCC_M3=median(IYCC_M3,na.rm=TRUE),
MEAN_IYCC_M3=mean(IYCC_M3,na.rm=TRUE),
MIN_IYCC_M3=min(IYCC_M3,na.rm=TRUE),
MAX_IYCC_M3=max(IYCC_M3,na.rm=TRUE),
MED_IYCC_M4=median(IYCC_M4,na.rm=TRUE),
MEAN_IYCC_M4=mean(IYCC_M4,na.rm=TRUE),
MIN_IYCC_M4=min(IYCC_M4,na.rm=TRUE),
MAX_IYCC_M4=max(IYCC_M4,na.rm=TRUE),
MED_YLPH_M2=median(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
MEAN_YLPH_M2=mean(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MIN_YLPH_M2=min(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MAX_YLPH_M2=max(IPM_M2*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MED_YLPH_M3=median(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MEAN_YLPH_M3=mean(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MIN_YLPH_M3=min(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MAX_YLPH_M3=max(IPM_M3*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MED_YLPH_M4=median(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MEAN_YLPH_M4=mean(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MIN_YLPH_M4=min(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
MAX_YLPH_M4=max(IPM_M4*CLF_M*(CY_M/CA_M),na.rm=TRUE),
CELLS_M=sum(CELLS,na.rm=TRUE))
RICE_r<-ddply(ALL,.(Region),summarise,
MED_YLD_HA_R=median(YLD_HA_R,na.rm=TRUE),
MEAN_YLD_HA_R=mean(YLD_HA_R,na.rm=TRUE),
MIN_YLD_HA_R=min(YLD_HA_R,na.rm=TRUE),
MAX_YLD_HA_R=max(YLD_HA_R,na.rm=TRUE),
TOT_YLD_TOT_R=sum(YLD_TOT_R,na.rm=TRUE),
TOT_CL2050_R2=sum(CL2050_R2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_R3=sum(CL2050_R3,na.rm=TRUE),
TOT_CL2050_R4=sum(CL2050_R4,na.rm=TRUE),
MED_IPM_R2=median(IPM_R2,na.rm=TRUE),
MEAN_IPM_R2=mean(IPM_R2,na.rm=TRUE),
MIN_IPM_R2=min(IPM_R2,na.rm=TRUE),
MAX_IPM_R2=max(IPM_R2,na.rm=TRUE),
MED_IPM_R3=median(IPM_R3,na.rm=TRUE),
MEAN_IPM_R3=mean(IPM_R3,na.rm=TRUE),
MIN_IPM_R3=min(IPM_R3,na.rm=TRUE),
MAX_IPM_R3=max(IPM_R3,na.rm=TRUE),
MED_IPM_R4=median(IPM_R4,na.rm=TRUE),
MEAN_IPM_R4=mean(IPM_R4,na.rm=TRUE),
MIN_IPM_R4=min(IPM_R4,na.rm=TRUE),
MAX_IPM_R4=max(IPM_R4,na.rm=TRUE),
MED_IYCC_R2=median(IYCC_R2,na.rm=TRUE),
MEAN_IYCC_R2=mean(IYCC_R2,na.rm=TRUE),
MIN_IYCC_R2=min(IYCC_R2,na.rm=TRUE),
MAX_IYCC_R2=max(IYCC_R2,na.rm=TRUE),
MED_IYCC_R3=median(IYCC_R3,na.rm=TRUE),
MEAN_IYCC_R3=mean(IYCC_R3,na.rm=TRUE),
MIN_IYCC_R3=min(IYCC_R3,na.rm=TRUE),
MAX_IYCC_R3=max(IYCC_R3,na.rm=TRUE),
MED_IYCC_R4=median(IYCC_R4,na.rm=TRUE),
MEAN_IYCC_R4=mean(IYCC_R4,na.rm=TRUE),
MIN_IYCC_R4=min(IYCC_R4,na.rm=TRUE),
MAX_IYCC_R4=max(IYCC_R4,na.rm=TRUE),
MED_YLPH_R2=median(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
MEAN_YLPH_R2=mean(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MIN_YLPH_R2=min(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MAX_YLPH_R2=max(IPM_R2*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MED_YLPH_R3=median(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MEAN_YLPH_R3=mean(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MIN_YLPH_R3=min(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MAX_YLPH_R3=max(IPM_R3*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MED_YLPH_R4=median(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MEAN_YLPH_R4=mean(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MIN_YLPH_R4=min(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
MAX_YLPH_R4=max(IPM_R4*CLF_R*(CY_R/CA_R),na.rm=TRUE),
CELLS_R=sum(CELLS,na.rm=TRUE))
WHEAT_r<-ddply(ALL,.(Region),summarise,
MED_YLD_HA_W=median(YLD_HA_W,na.rm=TRUE),
MEAN_YLD_HA_W=mean(YLD_HA_W,na.rm=TRUE),
MIN_YLD_HA_W=min(YLD_HA_W,na.rm=TRUE),
MAX_YLD_HA_W=max(YLD_HA_W,na.rm=TRUE),
TOT_YLD_TOT_W=sum(YLD_TOT_W,na.rm=TRUE),
TOT_CL2050_W2=sum(CL2050_W2,na.rm=TRUE), #total crop loss due to climate change in country
TOT_CL2050_W3=sum(CL2050_W3,na.rm=TRUE),
TOT_CL2050_W4=sum(CL2050_W4,na.rm=TRUE),
MED_IPM_W2=median(IPM_W2,na.rm=TRUE),
MEAN_IPM_W2=mean(IPM_W2,na.rm=TRUE),
MIN_IPM_W2=min(IPM_W2,na.rm=TRUE),
MAX_IPM_W2=max(IPM_W2,na.rm=TRUE),
MED_IPM_W3=median(IPM_W3,na.rm=TRUE),
MEAN_IPM_W3=mean(IPM_W3,na.rm=TRUE),
MIN_IPM_W3=min(IPM_W3,na.rm=TRUE),
MAX_IPM_W3=max(IPM_W3,na.rm=TRUE),
MED_IPM_W4=median(IPM_W4,na.rm=TRUE),
MEAN_IPM_W4=mean(IPM_W4,na.rm=TRUE),
MIN_IPM_W4=min(IPM_W4,na.rm=TRUE),
MAX_IPM_W4=max(IPM_W4,na.rm=TRUE),
MED_IYCC_W2=median(IYCC_W2,na.rm=TRUE),
MEAN_IYCC_W2=mean(IYCC_W2,na.rm=TRUE),
MIN_IYCC_W2=min(IYCC_W2,na.rm=TRUE),
MAX_IYCC_W2=max(IYCC_W2,na.rm=TRUE),
MED_IYCC_W3=median(IYCC_W3,na.rm=TRUE),
MEAN_IYCC_W3=mean(IYCC_W3,na.rm=TRUE),
MIN_IYCC_W3=min(IYCC_W3,na.rm=TRUE),
MAX_IYCC_W3=max(IYCC_W3,na.rm=TRUE),
MED_IYCC_W4=median(IYCC_W4,na.rm=TRUE),
MEAN_IYCC_W4=mean(IYCC_W4,na.rm=TRUE),
MIN_IYCC_W4=min(IYCC_W4,na.rm=TRUE),
MAX_IYCC_W4=max(IYCC_W4,na.rm=TRUE),
MED_YLPH_W2=median(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE), # yield (tonnes) lost per ha of planted area due to climate
MEAN_YLPH_W2=mean(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MIN_YLPH_W2=min(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MAX_YLPH_W2=max(IPM_W2*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MED_YLPH_W3=median(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MEAN_YLPH_W3=mean(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MIN_YLPH_W3=min(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MAX_YLPH_W3=max(IPM_W3*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MED_YLPH_W4=median(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MEAN_YLPH_W4=mean(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MIN_YLPH_W4=min(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE),
MAX_YLPH_W4=max(IPM_W4*CLF_W*(CY_W/CA_W),na.rm=TRUE),
CELLS_W=sum(CELLS,na.rm=TRUE))
setwd("~/Dropbox/climate change/food security/climate and crop pressure MS/data/ascii_crops_hires")
C_TO_R<-data.frame(read.csv("COUNTRY_TO_REGION.csv", header=TRUE))
MAIZE_c<-merge(C_TO_R,MAIZE_c, by=c("NAME"),all=TRUE,sort=TRUE)
RICE_c<-merge(C_TO_R,RICE_c, by=c("NAME"),all=TRUE,sort=TRUE)
WHEAT_c<-merge(C_TO_R,WHEAT_c, by=c("NAME"),all=TRUE,sort=TRUE)
C_TO_R$NUM<-1
SR_TO_R<-summarySE(C_TO_R,measurevar = "NUM",groupvars=c("Region","Subregion"))
SR_TO_R<-SR_TO_R[1:2]
MED_sr<-merge(SR_TO_R,MED_sr, by=c("Subregion"),all=TRUE,sort=TRUE)
MAIZE_sr<-merge(SR_TO_R,MAIZE_sr, by=c("Subregion"),all=TRUE,sort=TRUE)
RICE_sr<-merge(SR_TO_R,RICE_sr, by=c("Subregion"),all=TRUE,sort=TRUE)
WHEAT_sr<-merge(SR_TO_R,WHEAT_sr, by=c("Subregion"),all=TRUE,sort=TRUE)
setwd("~/Dropbox/climate change/pest MS shared")
write.table(MED_r, file = "Medians by region.csv", sep = ",", col.names = NA)
write.table(MED_sr, file = "Medians by subregion.csv", sep = ",", col.names = NA)
write.table(MED_c, file = "Medians by country.csv", sep = ",", col.names = NA)
write.table(MAIZE_c, file = "MAIZE by country.csv", sep = ",", col.names = NA)
write.table(MAIZE_sr, file = "MAIZE by subregion.csv", sep = ",", col.names = NA)
write.table(MAIZE_r, file = "MAIZE by region.csv", sep = ",", col.names = NA)
write.table(RICE_c, file = "RICE by country.csv", sep = ",", col.names = NA)
write.table(RICE_sr, file = "RICE by subregion.csv", sep = ",", col.names = NA)
write.table(RICE_r, file = "RICE by region.csv", sep = ",", col.names = NA)
write.table(WHEAT_c, file = "WHEAT by country.csv", sep = ",", col.names = NA)
write.table(WHEAT_sr, file = "WHEAT by subregion.csv", sep = ",", col.names = NA)
write.table(WHEAT_r, file = "WHEAT by region.csv", sep = ",", col.names = NA)
|
8b43ac48b23af06c468849a88a5a808032f7cd99
|
6ee38ee6cfe82f0385330c9f27c148fdad82ca1d
|
/Tableau/IBRD Loan/Data Preprocessing.R
|
7e904cceab9999c0314c8410268b4c4da22da8c4
|
[] |
no_license
|
NitinNandeshwar/Cork-Institute-of-Technology
|
4f0aeeb78bf26e4cc65e17163c90b9791af3e45a
|
88c0efc6da1c5dd948a69d73a09a584e7d179c02
|
refs/heads/master
| 2020-12-27T06:36:13.387901
| 2020-11-05T18:16:59
| 2020-11-05T18:16:59
| 237,798,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,431
|
r
|
Data Preprocessing.R
|
################################################################
################################################################
### ###
### DATA INPUT AND Preprocessing ###
### ###
################################################################
################################################################
##=======================================================
## Libraries & Dataset information
##=======================================================
library(tidyverse)
library(caret)
library(VIM)
library(readxl)
# Loading the comma separated file
A=read.csv("ibrd-statement-of-loans-historical-data.csv")
summary(A)
#----------Dimensions of Dataset----------------------------------
cat("Number of Columns :",dim(A)[1],"\n","Number of Rows :",dim(A)[2])
#--------- Column names of Dataset-------------------------------
colnames(A)
#----------Number of NA (Null) values in dataset -------------------
# Function for finding NA values
col_sumofNA <- function(n){
for (i in 1:ncol(n)) {
if(sum(is.na(n[i]))>0) # checking NA values using is.na() function
cat(colnames(n[i])," contains ",sum(is.na(n[i]))," NA values \n")
# print the NA Values containing column in dataset
}
}
col_sumofNA(A)
#visualization of Missing values
aggr(A)
a <- aggr(A)
# summary of missing values
summary(a)$missings[2]
##=============================================================
## DATA CLEANING
##=============================================================
#----------------- Deleting columns from a dataset-------------
# Column Currency.of.Commitment doesn't contain any data
B <- A[ , -which(names(A) %in% c("Currency.of.Commitment"))]
# Unique columns not containing any useful information
B <- B[ , -which(names(B) %in% c("Loan.Number",
"Country.Code",
"Guarantor.Country.Code",
"Project.ID"))]
# Dimensions of new dataset
cat("Number of Columns :",dim(B)[1],"\n","Number of Rows :",dim(B)[2])
#-------- Unique factor level in Loan Status column ---------
# Number of Factors in loan status column
str(B$Loan.Status)
# Factor levels are not Unique
levels(B$Loan.Status)
# Removing Whitespaces without changing dtypes of a particular column
B <- B %>% mutate_if(is.factor, funs(factor(trimws(.))))
# Unique Factor level in Loan status column
levels(B$Loan.Status)
#--------------------Near Zero Variance -----------------------
# Removing NA rows for finding Near Zero Variance Columns
C <- na.omit(B)
# Dimensions of new dataset
cat("Number of Columns :",dim(C)[1],"\n","Number of Rows :",dim(C)[2])
# Finding near zero variance columns
nzv_1 <- nearZeroVar(C,freqCut = 15,uniqueCut = 12)
nzv_1
# Removing near zero variance columns from original dataset
# containing NA values
filter_NZV <- B[,-nzv_1]
#filter_NZV <- C[,-c(9,11,12,14,15,16,17,18,19,20,21,28)]
# Dimensions of new dataset
cat("Number of Columns :",dim(filter_NZV)[1],"\n","Number of Rows :",dim(filter_NZV)[2])
#-----------------Adding Continent Column --------------------
C <- filter_NZV
# loading Country and Continent csv file
Continent <- read_xlsx("Countries.xlsx")
str(Continent)
# Matching Country with Continent
C$continent=0
for (i in c(1:nrow(C))) {
for (j in c(1:nrow(Continent))) {
if(C$Country[i]==Continent$Country[j]){
cat(i,Continent$Continent[j],"\n")
C$continent[i]= Continent$Continent[j]
}
}
}
# Filtering Country having Null Continent
z <- C %>%
filter(continent=="0")
# Matching Country key words
for (i in c(1:nrow(z))) {
for (j in c(1:nrow(Continent))) {
if(any(grep(Continent$Country[j],z$Country[i]))){
cat(i,Continent$Continent[j],"\n")
z$continent[i]= Continent$Continent[j]
}
}
}
# Removing all continent having Null values
zy <- z %>%
filter(!continent=="0")
zz <- C %>%
filter(!continent=="0")
# Combining the dataset
Final <- rbind(zz,zy)
#----------------------Final csv -------------------
write.csv(Final,"Final Data.csv")
a <- aggr(Final)
# summary of missing values
summary(a)$missings[2]
|
083bf9d57ef27b3293858abeabfc85c23f9584b9
|
6e4f004782186082b73025cda95f31bcae76afcf
|
/man/gl.assign.Rd
|
ba22d853915485ef2ad110e3ee5e640adaec8154
|
[] |
no_license
|
carlopacioni/dartR
|
319fbff40a385ca74ab7490b07857b0b027c93a8
|
06614b3a328329d00ae836b27616227152360473
|
refs/heads/master
| 2023-08-23T00:32:10.850006
| 2021-09-08T06:52:44
| 2021-09-08T06:52:44
| 262,468,788
| 0
| 0
| null | 2020-05-09T02:07:08
| 2020-05-09T02:07:07
| null |
UTF-8
|
R
| false
| true
| 4,253
|
rd
|
gl.assign.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gl.assign.r
\name{gl.assign}
\alias{gl.assign}
\title{Assign an individual of unknown provenance to population}
\usage{
gl.assign(
x,
unknown,
nmin = 10,
dim = NULL,
alpha = 0.05,
threshold = 0,
verbose = 3
)
}
\arguments{
\item{x}{-- name of the input genlight object [required]}
\item{unknown}{-- identity label of the focal individual whose provenance is unknown [required]}
\item{nmin}{-- minimum sample size for a target population to be included in the analysis [default 10]}
\item{dim}{-- number of dimensions to retain in the dimension reduction [default k, number of populations]}
\item{alpha}{-- probability level for bounding ellipses in the PCoA plot [default 0.05]}
\item{threshold}{-- populations to retain for consideration; those for which the focal individual has less than or equal to threshold loci with private alleles [default 0]}
\item{verbose}{-- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity]}
}
\value{
A genlight object containing the focal individual (assigned to population "unknown") and #' populations for which the focal individual is not distinctive (number of loci with private alleles less than or equal to thresold t.
}
\description{
This script assigns an individual of unknown provenance to one or more target populations based on first, an analysis
of private alleles, and then, if the assignment remains ambigous, on the basis of a weighted likelihood index.
}
\details{
The algorithm first identifies those target populations for which the individual has no private alleles. If no single
population emerges from this analysis, or if a higher threshold than 0 is chosen for the number of tollerable private
alleles, then the following process is followed.
(a) The space defined by the loci is ordinated to yield a series of orthogonal axes (independent), a necessary condition
for combining likelihoods calculated from each axis.
(b) A workable subset of dimensions is chosen, normally equal to the number of target populations or the number of dimensions
with substantive eigenvalues, whichever is the smaller.
(c) The log-likelihood of the value for the unknown on each axis is calculated, weighted by the eigenvalue for that axis,
and summed over all dimensions as an assignment index. The assignment index is calculated for a point on the boundary of
the 95% (or as specified) confidence envelope.
There are three considerations to the assignment. First, consider only those populations for which the unknown has no
private alleles. Private alleles are an indication that the unknown does not belong to a target population (provided that
the sample size is adequate, say >=10).
Second, consider the PCoA plot for populations where no private alleles have been
detected and the position of the unknown in relation to the confidence ellipses. Note, this is considering only the
top two dimensions of the ordination, and so an unknown lying outside the confidence ellipse can be interpreted as it lying
outside the confidence envelope. However, if the unknown lies inside the confidence ellipse in two dimensions, then it may still lie outside
the confidence envelope. This is good for eliminating populations from consideration, but does not provide confidence in
assignment.
Third, consider the assignment probabilities. This approach calculates the squared Generalised Linear Distance (Mahalanobis
distance) of the unknown from the centroid for each population, and calculates the probability associated with its quantile
under the zero truncated normal distribution. This index takes into account position of the unknown in relation to the
confidence envelope in all selected dimensions of the ordination.
Each of these approaches provides evidence, none are 100% definitive. They need to be interpreted cautiously.
}
\examples{
# Test run with a focal individual from the Macleay River (EmmacMaclGeor)
x <- gl.assign(testset.gl, unknown="UC_00146", nmin=10,
alpha=0.05, threshold=1)
}
\author{
Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr})
}
|
d0fb4e895f5633a67060d09b2fd57bb9e24cfacb
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/models/stics/man/model2netcdf.STICS.Rd
|
7fc4ce7524aca139b62c83da27566c720e6e8564
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 728
|
rd
|
model2netcdf.STICS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model2netcdf.STICS.R
\name{model2netcdf.STICS}
\alias{model2netcdf.STICS}
\title{Code to convert STICS' output into netCDF format}
\usage{
model2netcdf.STICS(
outdir,
sitelat,
sitelon,
start_date,
end_date,
overwrite = FALSE
)
}
\arguments{
\item{outdir}{Location of model output}
\item{sitelat}{Latitude of the site}
\item{sitelon}{Longitude of the site}
\item{start_date}{Start time of the simulation}
\item{end_date}{End time of the simulation}
\item{overwrite}{Whether or not to overwrite existing output files}
}
\description{
Convert STICS output into the NACP Intercomparison format (ALMA using netCDF)
}
\author{
Istem Fer
}
|
b936110458234ae71850c8d0673ba02b3d66de47
|
dac4a8f2b14dbb92dd07e9ca9642410ae407a2f2
|
/man/event1s.df.Rd
|
d6d77c85afd485c0d32df975661f5fd2cf13bc8c
|
[] |
no_license
|
dstgithub/GrpString
|
0710f0b5d1e8a90ee1e94e5a2f6facb19bc48c97
|
45b4da9cc59c71ddb8b53d7b6753665b7ff960fe
|
refs/heads/master
| 2021-01-12T03:26:45.555515
| 2017-11-15T21:40:25
| 2017-11-15T21:40:25
| 78,210,123
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
rd
|
event1s.df.Rd
|
\name{event1s.df}
\alias{event1s.df}
\docType{data}
\title{
Data frame containing event names
}
\description{
A data frame containing event names, There are 45 rows.
Each row has 26 event names.
}
\usage{data(event1s.df)}
\format{
A data frame with 45 observations or rows.
}
\note{
The event names are from an eye tracking study.
Thus, each event name is actually an area of interst (AOI).
}
\examples{
data(event1s.df)
}
\keyword{datasets}
|
90556f6a13a58393a7a5dda31349ca1785dd767f
|
189a7cf9828675253d12d941a80623c137ecc74f
|
/man/createLabtestDataFrame.Rd
|
fc90d7519d6bafa4b9afef07c50ace80795959f5
|
[] |
no_license
|
OHDSI/Cert
|
773415562764f9aa1fff9dd944810539ebb18cdd
|
f1584475686b9664e9e0d086cc365218ab4e4921
|
refs/heads/master
| 2020-04-16T19:41:52.600806
| 2016-08-30T06:06:44
| 2016-08-30T06:06:44
| 36,716,779
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 531
|
rd
|
createLabtestDataFrame.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cert.R
\name{createLabtestDataFrame}
\alias{createLabtestDataFrame}
\title{Create data frame for a laboratory test information}
\usage{
createLabtestDataFrame(id, name, type)
}
\arguments{
\item{id}{OMOP CONCEPT ID of laboratory test}
\item{name}{Name of Laboratory test
this name will be used for aggregation}
\item{type}{Laboratory test abnormality type}
}
\value{
A data frame
}
\description{
Create data frame for a laboratory test information
}
|
68217ab6827d593dacc3a1b8afff3f0eddbd821e
|
e646416a1bbc302f73d2fdcbe78c5a8069e40fc8
|
/metacommunity/mc_dit.R
|
59a9e3cbbaaed88854be50190e905f25389087b5
|
[
"MIT"
] |
permissive
|
jusinowicz/info_theory_eco
|
c0ef0c0f94eca2df3b7308098f05b72233261c43
|
b0770b10464732aa32d13f46ba3c5ef958a74dcc
|
refs/heads/master
| 2022-05-28T19:34:50.642858
| 2022-05-05T17:37:19
| 2022-05-05T17:37:19
| 140,295,569
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,020
|
r
|
mc_dit.R
|
#=============================================================================
#Load libraries
#=============================================================================
library(tidyverse)
library(RandomFields)
library(vegan)
#Patrick Thompson's metacommunity model functions:
source("./mcomsimr-master/R/MC_simulate.R")
source("./mcomsimr-master/R/sim_setup.R")
source("../info_theory_functions/info_theory_functions.R")
#=============================================================================
#=============================================================================
# I. Run the metacommunity models!
#=============================================================================
###This is an example of just running the model.
# a1=simulate_MC(patches = 16, species = 1, env1Scale = 100, dispersal = 0.001, min_inter = 1, max_inter = 1, env_niche_breadth = 1)
# env.df = a1$env.df
# g<-ggplot(env.df, aes(x = time, y = env1, group = patch, color = factor(patch)))+
# geom_line()+
# scale_color_viridis_d(guide=F)
# print(g)
############################################################
### Remove any temporal variance in the landscape
### (cannot figure out how to do this with RandomFields):
############################################################
patches = 16
nspp = 2
landscape = landscape_generate(patches = patches, plot = TRUE)
var_b = 1
env1Scale = 100
timesteps = 1200
burn_in = 1200
initialization = 200
#Make the env.df outside of the simulate_MC function with RandomFields.
#This code makes a single time realization of the RandomFields model, then just repeats it over the right number of
#time steps (timesteps + burn_in)
model = RMexp(var=var_b, scale=env1Scale) + # with variance 4 and scale 10
RMnugget(var=0) + # nugget
RMtrend(mean=0.01) # and mean
RF = RFsimulate(model = model, x = landscape$x*10, y = landscape$y*10, spConform=FALSE)
env.df = data.frame(env1 = decostand(RF,method = "range"), patch = 1:nrow(landscape), time = rep(1:(timesteps+burn_in), each = nrow(landscape)))
env.initial = env.df[env.df$time == 1,]
#I've added the "constant = TRUE" option to the function to remove demographic stochasticity.
#Added mortality to the model. With m = 0, this is the original model. This was necessary to get extinction without
#stochasticity.
mcm1 =simulate_MC(patches = patches , species = nspp, env1Scale = env1Scale, dispersal = .000001, min_inter = 1.1, max_inter = 1.1,
env_niche_breadth = 1, max_r = c(5,4), m = 0.5, env.df = env.df, landscape = landscape, constant = TRUE)
#Plot the environment by patch
g<-ggplot(mcm1$dynamics.df, aes(x = time, y = env, group = patch, color = factor(patch)))+
geom_line()+
scale_color_viridis_d(guide=F)
print(g)
############################################################
#=============================================================================
#=============================================================================
# II. Get the information theoretic metrics!
# There are two levels of metrics: a per-species environmental
# information, and a community-level environmental information.
#=============================================================================
###Environmental information per species:
#1. Make a time series out of the environmental state and the population.
# This is to get the per-species environmental information:
series1 = data.frame(cbind (env = as.factor(mcm1$dynamics.df$env), species = mcm1$dynamics.df$species, N = mcm1$dynamics.df$N) ,
time = mcm1$dynamics.df$time)
series1 = series1[series1$time>=1,]
for (s in 1:nspp){
for (t in 1:(timesteps)) {
#s1_temp = subset(series1, species == s & time == t)
s1_temp = spread( subset(series1, time == t), species, N)
#s1_temp = subset(series1, time == t)
s1_temp = s1_temp[,colnames(s1_temp)!="time" ]
#Expand this into an n-dimensional table where each species is
#considered jointly with every other (for interactions?)
#I think this is the right way to expand this.
# 1. When you look at s2_temp[1,,], it should be like a matrix of pairwise interactions
# per environment
# 2. If these were e.g. invasions, then s2_temp[,1,] gives species 1 invading into
# every other species, while s2_temp[,,1] shows 1 as resident and species invading
# into 1.
s2_temp = array(as.matrix(s1_temp[,2:(nspp+1)] ), dim= c(dim(s1_temp)[1], nspp,nspp) )
#Joint probability distribution:
#Now there are two interesting dimensions of information: one in the p(Ni|E), and the other
#in p(Ni|Nj). Also the simultaneous consideration as p(Ni|E, Nj).
#Also, without the actual factorial treatment of both intra and interspecific environment
#treatments there is an aspect of the information that is missing.
s1_joint = prop.table(s2_temp)
}
}
#2. Make an alphabet out of the
#Attach all of the output to these variables (Shannon E, Mutual Info, Conditional E)
SD = NULL
MI = NULL
CE = NULL
nseries = dim(series1)[2] #Assuming each entry is its own time series
ngens = dim(series1)[1] #Time series length
blocks_k = matrix(0.0, ngens, nseries) # marg of X(k)
#Need some additional pre-formatting to make this work:
#Find number of digits in largest integer:
width_A = nchar(trunc(max(series1)))
k=1 # This should always be 1 for the classic MMI
######################
# p( X(k) )
######################
for (f in 1:nseries) {
blocks_k[,f] = get_k(series1[,f],k,width_A )
}
marg_k = lapply( (apply(blocks_k,2,table)), prop.table)
######################
# p( X(k),P(k) )
######################
joint_kp = c( prop.table(table( as.data.frame(series1) )))
MMI1 =sum(unlist ( lapply(X_probs,shannon_D2) )) - shannon_D2(X_joint)
an alphabet out of the possible environmental states. For now, just round to
# 2 digits so anything at that level of similarity gets lumped together.
env_approx = round(unique(mcm1$dynamics.df$env),2)
env_alph = get_k(env_approx, k =1, width_A = 2 )
|
463908a84bdbfdad6e8b5c70e8dd31b04bd882ec
|
b700538ed9715b7a0e208097d07d044dd84e6e02
|
/R4CouchDB/man/cdbAddAttachment.Rd
|
ec1112587d15caaf451a1d6457399d7bc6d9d80f
|
[] |
no_license
|
lmilev/R4CouchDB
|
75849f83e61fc8d74bd2120c2188fffde7b8cca4
|
e5450d23ca2187c1fc4cfb27714daa31ab5c4781
|
refs/heads/master
| 2021-01-24T09:57:20.211744
| 2017-03-05T16:26:12
| 2017-03-05T16:26:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,542
|
rd
|
cdbAddAttachment.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/cdbAddAttachment.R
\name{cdbAddAttachment}
\alias{cdbAddAttachment}
\title{Add attachments}
\usage{
cdbAddAttachment(cdb)
}
\arguments{
\item{cdb}{The list \code{cdb} has to contain
\code{cdb$fileName},\code{cdb$serverName}, \code{cdb$DBName} and a
\code{cdb$id}.}
}
\value{
\item{cdb}{The result is stored in \code{cdb$res} }
}
\description{
This function adds attachments to a database document that already
exists.
}
\details{
The function uses the \code{RCurl}- function
\code{guessMIMEType()} to do exactly this: guessing the mime type of
\code{cdb$fileName}.
If the switch \code{cdb$attachmentsWithPath} is set to \code{TRUE}
the attachment is saved with the path. This behavior is default
since version 0.2.5 of R4CouchDB
}
\examples{
\dontrun{
ccc <- cdbIni(DBName="r4couch_db")
ccc$dataList <- list(normalDistRand = rnorm(20))
ccc <- cdbAddDoc(ccc)
# make a 3d plot (stolen from ?persp)
x <- seq(-10, 10, length= 30)
y <- x
f <- function(x,y) {r <- sqrt(x^2+y^2); 10 * sin(r)/r }
z <- outer(x, y, f)
z[is.na(z)] <- 1
op <- par(bg = "black")
ccc$fileName <- "3dplot.pdf"
pdf(ccc$fileName)
persp(x, y, z,
theta = 30,
phi = 30,
expand = 0.5,
col = "lightblue")
dev.off()
# add the plot as attachment to the database
# it workes over ccc$fileName
ccc <- cdbAddAttachment(ccc)
}
}
\author{
wactbprot
}
\keyword{misc}
|
f06d8bdb26397fcaf26b117eda506f8a584c9ad7
|
714287e4d7253d7ae41c2e178bf405e06cee7587
|
/oishi.week3.R
|
cf3f67abf6ff192be1d38dcc66e935fde71cf677
|
[] |
no_license
|
tfoishi/com521_tanya
|
7186a64ab40387d524a42d82ede9fe64b6420dc9
|
55fa22b5ba9b8abfe4170267aa1d2c4081624fd5
|
refs/heads/master
| 2021-04-29T06:20:04.481754
| 2017-02-21T23:07:10
| 2017-02-21T23:07:10
| 77,967,777
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,379
|
r
|
oishi.week3.R
|
#PC 4
#Next time do more notes.
summary(week3_dataset.tanya)
ncol(week3_dataset.tanya)
nrow(week3_dataset.tanya)
sd(week3_dataset.tanya$x)
sd(week3_dataset.tanya$y)
var(week3_dataset.tanya$x)
var(week3_dataset.tanya$y)
hist(week3_dataset.tanya$x)
hist(week3_dataset.tanya$j)
hist(week3_dataset.tanya$i)
hist(week3_dataset.tanya$k)
hist(week3_dataset.tanya$y)
table(week3_dataset.tanya$j)
table(week3_dataset.tanya$i)
table(week3_dataset.tanya$k)
#PC5
summary(week3_dataset.tanya$x)
summary(week2.dataset)
#PC6
week3_dataset.tanya$k <-factor(week3_dataset.tanya$j)
week3_dataset.tanya$i <-as.logical(week3_dataset.tanya$i)
week3_dataset.tanya$j<- as.logical(week3_dataset.tanya$j)
ggplot(data=week3_dataset.tanya) + geom_point() + aes(x=x, y=y, color=i, shape=j,size=k)
#PC 7
levels(week3_dataset.tanya$k)[levels(week3_dataset.tanya$k)=="3"] <- "all"
levels(week3_dataset.tanya$k)[levels(week3_dataset.tanya$k)=="2"] <- "lots"
levels(week3_dataset.tanya$k)[levels(week3_dataset.tanya$k)=="1"] <- "some"
levels(week3_dataset.tanya$k)[levels(week3_dataset.tanya$k)=="0"] <- "none"
week3_dataset.tanya$k
#PC 8
week3_dataset.tanya[week3_dataset.tanya$i == 0, "i"]<-NA
week3_dataset.tanya[is.na(week3_dataset.tanya$i)]<-0
#PC 9
#now they are frequency
summary(week3_dataset.tanya)
ggplot(data=week3_dataset.tanya) + geom_point() + aes(x=x, y=y, color=i, shape=j,size=k)
|
f5af84c6ad088d88f2ae35663711765b8d621808
|
01e6f98609708ebdfd6d1db5fda9cb443f9f7856
|
/man/year-day-arithmetic.Rd
|
68500ad98376cd4963c59c7f7b3fa75ddcc8b779
|
[
"MIT"
] |
permissive
|
isabella232/clock-2
|
3258459fe4fc5697ce4fb8b54d773c5d17cd4a71
|
1770a69af374bd654438a1d2fa8bdad3b6a479e4
|
refs/heads/master
| 2023-07-18T16:09:11.571297
| 2021-07-22T19:18:14
| 2021-07-22T19:18:14
| 404,323,315
| 0
| 0
|
NOASSERTION
| 2021-09-08T13:28:17
| 2021-09-08T11:34:49
| null |
UTF-8
|
R
| false
| true
| 1,777
|
rd
|
year-day-arithmetic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gregorian-year-day.R
\name{year-day-arithmetic}
\alias{year-day-arithmetic}
\alias{add_years.clock_year_day}
\title{Arithmetic: year-day}
\usage{
\method{add_years}{clock_year_day}(x, n, ...)
}
\arguments{
\item{x}{\verb{[clock_year_day]}
A year-day vector.}
\item{n}{\verb{[integer / clock_duration]}
An integer vector to be converted to a duration, or a duration
corresponding to the arithmetic function being used. This corresponds
to the number of duration units to add. \code{n} may be negative to subtract
units of duration.}
\item{...}{These dots are for future extensions and must be empty.}
}
\value{
\code{x} after performing the arithmetic.
}
\description{
These are year-day methods for the
\link[=clock-arithmetic]{arithmetic generics}.
\itemize{
\item \code{add_years()}
}
Notably, \emph{you cannot add days to a year-day}. For day-based arithmetic,
first convert to a time point with \code{\link[=as_naive_time]{as_naive_time()}} or \code{\link[=as_sys_time]{as_sys_time()}}.
}
\details{
\code{x} and \code{n} are recycled against each other.
}
\examples{
x <- year_day(2019, 10)
add_years(x, 1:5)
# A valid day in a leap year
y <- year_day(2020, 366)
y
# Adding 1 year to `y` generates an invalid date
y_plus <- add_years(y, 1)
y_plus
# Invalid dates are fine, as long as they are eventually resolved
# by either manually resolving, or by calling `invalid_resolve()`
# Resolve by returning the previous / next valid moment in time
invalid_resolve(y_plus, invalid = "previous")
invalid_resolve(y_plus, invalid = "next")
# Manually resolve by setting to the last day of the year
invalid <- invalid_detect(y_plus)
y_plus[invalid] <- set_day(y_plus[invalid], "last")
y_plus
}
|
2512487df8adfe4810509d585800d8feabb42d42
|
07283623f9530c8c1ac7408eb099059d6deb7919
|
/man/salvage_model.Rd
|
caab4724f2eabc1a5e5ef56ee763740afc97e6b8
|
[] |
no_license
|
hinkelman/DSM2Analysis
|
75314f00a8a0a0723d0f43813558148b29c80035
|
ebbe09bb57f504b6e1acb8f2939d5491b1abae4a
|
refs/heads/master
| 2023-04-30T16:50:14.140023
| 2021-05-12T20:12:59
| 2021-05-12T20:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
salvage_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/salvage_model.R
\name{salvage_model}
\alias{salvage_model}
\title{Salvage model}
\usage{
salvage_model(facility)
}
\arguments{
\item{facility}{Water export facility: CVP, SWP, both}
}
\description{
Zero-inflated model of salvage as function of Freeport flow, exports, and fish length.
}
|
ce0bbdc4ec3190d1669781f6b61c1e4a094ac151
|
8ecb53df18a2d1a3975368baaa892a5fc01086f8
|
/general_analysis.R
|
8d0f5525546a5f337fffc57ef7bf840b0274e20b
|
[] |
no_license
|
AndersenLab/CrossSim
|
1a39f48e18e447eed296b5587f51b4ce6d06d4b5
|
7d8f47107004f40d8bd1cd32450bb6188c5f41f9
|
refs/heads/master
| 2021-01-20T16:55:08.556304
| 2015-06-11T19:01:48
| 2015-06-11T19:01:48
| 16,160,349
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
r
|
general_analysis.R
|
data <- read.csv(file = "~/GitHub/CrossSim/general_statistics_1000_1.csv", header = TRUE, sep = ',')
data <- data[order(data$Number.of.Back.Crosses), ]
qplot(data$Number.of.Back.Crosses, data$Percent.Selected.Chromosome, data = data, geom = "jitter")
|
3b7ed817b6b61bc05840e8d9ced852867ddab8ff
|
b47aa2e09add49ab85ec3b04c3e3279f28706c1c
|
/man/tailindexplot.Rd
|
e8459440ce7f71a668b070ba4f46d0469da35d3e
|
[] |
no_license
|
ceesfdevalk/EVTools
|
db232bc94b0a22b1a0fdfbd8ba5e6e9e94e8ad3c
|
0e3440f031b6a8abcfd6fc00d981d0656710d93e
|
refs/heads/master
| 2022-09-30T06:25:51.784236
| 2022-08-22T07:48:01
| 2022-08-22T07:48:01
| 130,972,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 754
|
rd
|
tailindexplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tailindexplot.R
\name{tailindexplot}
\alias{tailindexplot}
\title{tailindexplot}
\usage{
tailindexplot(es, params)
}
\arguments{
\item{es}{list containing tail estimates from a single sample}
\item{params}{(optional) list (see below)}
}
\value{
A plot file (.png)
}
\description{
# Plot of tail index estimates with confidence interval
}
\details{
The parameter list params may contain:
\itemize{
\item{$pconf: coverage probability of confidence interval (0.9 by default) (double(1))}
\item{$plim: plot limits for sample fraction (double(2))}
\item{$tailindexlim: plot limits for tail index (double(2))}
}
}
\author{
Cees de Valk \email{ceesfdevalk@gmail.com}
}
|
0b046eea5866b1a941f8efe98511f8b6469e23b4
|
07a42f5c19d8007013051d1f825d5d782249494b
|
/R/Mooran.R
|
408a6247def9a457976d0a9afd061305c2e0bf3f
|
[] |
no_license
|
jcms2665/WorkshopR_3
|
b5267858e91c11c6020a6beb42bbda014b9daa38
|
57559b55956e0a6cebe22042d2c73a7878896ea1
|
refs/heads/master
| 2021-04-30T01:00:19.736606
| 2018-02-14T08:39:54
| 2018-02-14T08:39:54
| 121,471,750
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 4,724
|
r
|
Mooran.R
|
library(rgdal)
library(sp)
library(GISTools)
library(RColorBrewer)
library(ggplot2)
library(reshape2)
library(grid)
library(gridExtra)
library(foreign)
library(spdep)
# Lista de librerías:
list.of.packages <- c("rgdal", "sp", "GISTools", "RColorBrewer", "ggplot2",
"reshape2", "grid", "gridExtra")
# Ver qué no está instalado
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
# Si falta algo, instalarlo
if (length(new.packages)) install.packages(new.packages)
# Cambiar directorio de trabajo
setwd("C:/Users/jmartinez/Desktop/ANA/teTra-Red-master/teTra-Red-master")
robos <- readOGR("data", "robos")
colnames(robos@data)
# Definir márgenes para ocupar todo el espacio
par(mar = c(0, 0, 1.5, 0))
# Definir un esquema para colorear el mapa de acuerdo a desviaciones
# estándar
shades <- auto.shading(robos$N_robos, cutter = sdCuts, n = 6, cols = rev(brewer.pal(6,"RdYlBu")))
# Definimos el mapa temático
choropleth(robos, robos$N_robos, shades)
# Agregar título
title(main = "Número de robos",cex.main = 0.75)
#2. Índice de Mooran
# Librerías:
list.of.packages <- c("spdep")
# Ver qué no está instalado
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,
"Package"])]
# Si falta algo, instalarlo
if (length(new.packages)) install.packages(new.packages)
#Agregar una capa
frontera <- readOGR("data", "BOUNDARY")
# Calcular los vecinos más cercanos
k1 <- knn2nb(knearneigh(robos, k = 1, longlat = TRUE))
# Calcular distancias de los vecinos más cercanos
distancia <- max(unlist(nbdists(k1, robos)))
# Encontrar vecinos
vecinos <- dnearneigh(robos, 0, distancia)
# Lista de pesos
robos.lw <- nb2listw(vecinos)
# Estandarizar valores
robos$zScaled <- scale(robos$N_robos)
# Calcular la variable de retraso y salvarla
robos$lagZ <- lag.listw(robos.lw, robos$N_robos)
# Diagrama de dispersión
plot(robos$zScaled, robos$lagZ)
# Ejes que pasan por el origen
abline(h = 0, v = 0)
# Recta de ajuste lineal entre las dos variables
abline(lm(robos$lagZ ~ robos$zScaled), lty = 2, lwd = 2, col = "red")
title("Diagrama de dispersión de Moran")
# Con variables estandarizadas
plot(robos$zScaled,robos$lagZ-mean(robos$lagZ))
# o bien, plot(robos$zScaled,scale(robos$lagZ))
abline(h = 0, v = 0)
abline(lm(robos$lagZ-mean(robos$lagZ) ~ robos$zScaled), lty = 2, lwd = 2, col = "red")
title("Diagrama de dispersión de Moran")
#Índice de Mooran
moran.test(robos$N_robos, robos.lw)
# Calcular la I de Moran local
lmoran <- localmoran(robos$N_robos, robos.lw)
# Mapa de cúmulos
# Definir vector de cuadrante
cuadrante <- vector(mode = "numeric", length = nrow(lmoran))
# Definir significancia
significancia <- 0.05
# Centrar la variable de interés alrededor de su media
centerZ <- scale(robos$N_robos) #Es lo mismo que (robos$Z-mean(robos$Z))/sd(robos$Z)
# Centrar el valor de la I de Moran local alrededor de su media
centerLag <- robos$lagZscaled
# Colores para las significancias
colorsPValue <- c(rgb(0.74, 0.74, 0.74), rgb(0.22, 0.98, 0.3), rgb(0, 0.75,
0), rgb(0, 0.44, 0), rgb(0, 0, 0))
# gris, verde claro, verde medio, verde oscuro, negro
# Segundo mapa Definir márgenes para ocupar todo el espacio
par(mar = c(0, 0, 1, 0))
# Definir vector de significancias
pValues <- vector(mode = "numeric", length = nrow(lmoran))
# Definir niveles de significancia
pValues[(lmoran[, 5] > 0.05)] <- 0
pValues[(lmoran[, 5] <= 0.05)] <- 4
pValues[(lmoran[, 5] <= 0.01)] <- 3
pValues[(lmoran[, 5] <= 0.001)] <- 2
pValues[(lmoran[, 5] <= 0.0001)] <- 1
plot(frontera)
# Plot not significant
plot(robos[pValues == 0, ], col = rgb(0.74, 0.74, 0.74, alpha = 0.2), add = T,
pch = 16, cex = 0.75)
# Plot 0.05
plot(robos[pValues == 1, ], col = rgb(0.22, 0.98, 0.3, alpha = 0.2), add = T,
pch = 16, cex = 0.75)
# Plot 0.01
plot(robos[pValues == 2, ], col = rgb(0, 0.75, 0, alpha = 0.2), add = T, pch = 16,
cex = 0.75)
# Plot 0.001
plot(robos[pValues == 3, ], col = rgb(0, 0.44, 0, alpha = 0.2), add = T, pch = 16,
cex = 0.75)
# Plot 0.0001
plot(robos[pValues == 4, ], col = rgb(0, 0, 0, alpha = 0.75), add = T, pch = 16,
cex = 0.75)
legend("right", legend = c("No significativo", "p = 0.05", "p = 0.01", "p = 0.001",
"p = 0.0001"), fill = colorsPValue, bty = "n", cex = 0.7, y.intersp = 1,
x.intersp = 1)
title("p-value Local")
|
996b984b849cfa3064af028feecb1d2732901762
|
93948587ecb19bd226dd7c6b499f90a5ae3c472e
|
/R/canClu.R
|
99682d1a3897a53536d360b6b8714521877c80ef
|
[] |
no_license
|
cran/blockmodeling
|
2aee1677bc94cd7daf7b48f5a7b7a662100a648e
|
f1766c756496c05040f8a8015f31036f13224659
|
refs/heads/master
| 2022-12-01T06:35:33.586891
| 2022-11-22T11:30:02
| 2022-11-22T11:30:02
| 17,694,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
canClu.R
|
#' @encoding UTF-8
#' @title Create canonical partition and find unique canonical partitions in a list of partitions.
#'
#' @description
#' It is used to convert any partition to a canonical partition. A canonical partition is a partition where the first unit is in cluster 1, the next unit that is not in cluster 1 in in cluster 2 and so on. So if we would take first appearances of clusters in the order they appear in the partition vector, we would get integers from 1 to the number of clusters.
#'
#' @param clu A partition - a vector or a list of vectors/partitions.
#' @param cluList A list of partitions(vectors).
#' @return For function \code{canClu} - a canonical partition or a list of such partitions.
#' For function \code{canCluUniqe} - A list of unique canonical partitions.
#' @seealso \code{\link{clu}}
#' @examples
#' clu<-c(3,2,2,3,1,2)
#' canClu(clu)
#' @export
canClu<-function(clu){
if(!is.list(clu)){
return(as.numeric(factor(clu,levels=unique(clu))))
} else {
lapply(clu, canClu)
}
}
#' @rdname canClu
#'
#' @export
canCluUniqe<-function(cluList){
if(!is.list(cluList)){
stop("cluList must be a list of partitions!")
} else {
uniqueClu<-NULL
uniqueCluStr<-NULL
cluList<-lapply(cluList, canClu)
cluListStr<-sapply(cluList, paste, collapse=",")
for(i in 1:length(cluList)){
if(!(cluListStr[i]%in%uniqueCluStr)){
uniqueClu<-c(uniqueClu,cluList[i])
uniqueCluStr<-c(uniqueCluStr,cluListStr[i])
}
}
return(uniqueClu)
}
}
|
cbe1ec0c377891aafcf76bd5d4329d228622f7eb
|
e59105c4b262f1203231f5ea05c87c1d922eea81
|
/manuscript/code/eu_o3.R
|
7b6b15225223a4aa456910a5907e9285903b388b
|
[] |
no_license
|
RedCobbler/business_intelligence_with_r
|
d869050a9c8c63c66cdbd391d025845c45f1625d
|
fad45dc881d23108764f2a02a68b629d0ff6f02c
|
refs/heads/master
| 2021-01-12T00:39:57.191026
| 2016-09-28T21:06:38
| 2016-09-28T21:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,465
|
r
|
eu_o3.R
|
# Code used to set up Ozone Pollution in EU
# Chapter 7 Mapping section
# Net result is eu_o3.csv in this directory
# Data requires manual download from
# http://www.eea.europa.eu/data-and-maps/data/airbase-the-european-air-quality-database-8
# Download both the stations and the statistics zip files
o3stations = read.table("~/Downloads/AirBase_v8_stations.csv", sep="\t", header=T,
stringsAsFactors=F, quote="")
o3data = read.table("~/Downloads/AirBase_v8_statistics.csv", sep="\t", header=T,
stringsAsFactors=F, quote="")
require(sqldf)
eu_o3 = sqldf("SELECT
a.station_european_code
, a.component_caption
, a.statistics_year
, a.statistics_average_group
, a.statistic_shortname
, a.statistic_value
, b.station_european_code
, b.country_iso_code
, b.country_name
, b.station_city
, b.station_longitude_deg as longitude
, b.station_latitude_deg as latitude
, b.station_altitude as altitude
FROM o3data a
INNER JOIN o3stations b
ON a.station_european_code = b.station_european_code
WHERE a.component_caption = 'O3'
AND a.statistics_year = 2012
AND a.statistics_average_group = 'day'
AND a.statistic_shortname = 'P50'
")
|
80d3e1327783e6d53f1b1bff61c7d2cc824e4ef5
|
67c4efb56a9a6611325ac89d2914a7467fa378c0
|
/R/discard.R
|
c0647c2d325547e184006bf73ebef0641fb23a59
|
[] |
no_license
|
rpahl/container
|
0e197616a12bd0daff0d38c38740ab404dea7cde
|
e14d3c027c364de02be8198a7d3cc382577bf865
|
refs/heads/master
| 2022-12-06T09:05:31.094044
| 2022-12-05T15:27:44
| 2022-12-05T15:27:44
| 124,919,709
| 18
| 2
| null | 2022-12-05T15:57:48
| 2018-03-12T16:37:36
|
R
|
UTF-8
|
R
| false
| false
| 1,269
|
r
|
discard.R
|
#' Discard Container Elements
#'
#' Search and remove an element from an object. If the element is not found,
#' ignore the attempt.
#' @param .x any `R` object.
#' @param ... elements to be discarded.
#' @export
discard <- function(.x, ...) UseMethod("discard")
#' @rdname discard
#' @export
ref_discard <- function(.x, ...) UseMethod("ref_discard")
#' @rdname discard
#' @return For `Container`, an object of class `Container` (or one of the
#' respective derived classes).
#' @examples
#'
#' s = setnew("a", num = 1:3, data = iris)
#' print(s)
#' discard(s, 1:3, "a")
#' discard(s, iris)
#' discard(s, "b") # ignored
#' @export
discard.Container <- function(.x, ...) {
(ref_discard(.x$clone(deep = TRUE), ...))
}
#' @name ContainerS3
#' @rdname ContainerS3
#' @details
#' * `discard(.x, ...)` and `ref_discard(.x, ...)` find and discard elements.
#' Elements that don't exist, are ignored.
#' @examples
#'
#' co = container("a", num = 1:3, data = iris)
#' print(co)
#' discard(co, 1:3, "a")
#' discard(co, iris)
#' discard(co, "b") # ignored
NULL
#' @rdname discard
#' @export
ref_discard.Container <- function(.x, ...)
{
elems = list(...)
if (!length(elems))
return(.x)
lapply(elems, function(e) .x$discard(e))
invisible(.x)
}
|
e0cc49be9a2df93646fad6d737f499109ed578d6
|
7dc56416fcc41def35bae8065ece00f89421a608
|
/man/dcleaner.Rd
|
ac78b6951bb0a69520da763e3cafff6b26774784
|
[] |
no_license
|
Muscade/dcleaner
|
bc9f65eef1cf62315c92099ff4b51ed055021226
|
6a66040ec804a0b4ce29b21758f72774c7ca4536
|
refs/heads/master
| 2020-12-26T12:52:38.054351
| 2020-02-02T20:45:51
| 2020-02-02T20:45:51
| 237,514,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 480
|
rd
|
dcleaner.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dcleaner.R
\docType{package}
\name{dcleaner}
\alias{dcleaner}
\title{dcleaner: A package for cleanning and mungling data, importing and exporting data to DB.}
\description{
The dcleaner package provides three categories of important functions:
load_table, import_table, clean_table
}
\section{Loading files}{
dcleaner could work with excel file with additional files which describes their schemas.
}
|
861959ef7240187d923a480bd35f3d17bb164d2e
|
7cb268fcda08de51d8cdcf224f0d204e5fcbda22
|
/differential_expression/differential.expression.ages.resampled.pc.lncRNA.R
|
c8d5a0c837a560aa312b23fbd19a8fdb022929c0
|
[] |
no_license
|
anecsulea/LncEvoDevo
|
2fa8169c06875675166521ce44fe270225b2dd1d
|
5b22ccdc4e280c02bd58352cfd9f23b9cb69c44a
|
refs/heads/master
| 2020-06-01T07:51:34.135059
| 2019-06-30T17:48:19
| 2019-06-30T17:48:19
| 190,705,719
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,625
|
r
|
differential.expression.ages.resampled.pc.lncRNA.R
|
########################################################################
path="LncEvoDevo/"
pathResults=paste(path, "results/differential_expression/", sep="")
########################################################################
set.seed(19)
options(stringsAsFactors=F)
library(DESeq2)
########################################################################
for(sp in c("Mouse", "Rat")){
print(sp)
load(paste("RData/reads.resampling.pc.lncRNA.",sp,".RData",sep=""))
print("resampled")
reads=rbind(reads.pc.resampled, reads.lnc)
for(tiss in c("Brain", "Kidney", "Liver", "Testis")){
print(tiss)
this.reads=reads[,which(tissues==tiss)]
this.ages=as.factor(ages[which(tissues==tiss)])
print(this.ages)
colnames(this.reads)=paste("NbReads", colnames(this.reads), sep=".")
colData=data.frame("condition"=this.ages)
dds=DESeqDataSetFromMatrix(countData = this.reads, colData=colData, design = ~ condition)
dds=DESeq(dds,test="LRT", reduced = ~ 1, minReplicatesForReplace=15 )
res=results(dds)
res=res[order(res$pvalue),]
res=as.data.frame(res)
res=res[,c("pvalue", "padj")]
colnames(res)=c("PValue", "FDR")
res$GeneID=rownames(res)
res=cbind(res, this.reads[rownames(res),])
res=res[,c("GeneID", setdiff(colnames(res), "GeneID"))]
write.table(res, file=paste(pathResults, sp, "/DifferentialExpression_AllAges_ResampledReads_PCLncRNAs_",tiss, ".txt", sep=""), row.names=F, col.names=T, sep="\t", quote=F)
}
print("not resampled")
reads=rbind(reads.pc, reads.lnc)
for(tiss in c("Brain", "Kidney", "Liver", "Testis")){
print(tiss)
this.reads=reads[,which(tissues==tiss)]
this.ages=as.factor(ages[which(tissues==tiss)])
colnames(this.reads)=paste("NbReads", colnames(this.reads), sep=".")
colData=data.frame("condition"=this.ages)
dds=DESeqDataSetFromMatrix(countData = this.reads, colData=colData, design = ~ condition)
dds=DESeq(dds,test="LRT", reduced = ~ 1, minReplicatesForReplace=15 )
res=results(dds)
res=res[order(res$pvalue),]
res=as.data.frame(res)
res=res[,c("pvalue", "padj")]
colnames(res)=c("PValue", "FDR")
res$GeneID=rownames(res)
res=cbind(res, this.reads[rownames(res),])
res=res[,c("GeneID", setdiff(colnames(res), "GeneID"))]
write.table(res, file=paste(pathResults, sp, "/DifferentialExpression_AllAges_AllReads_PCLncRNAs_", tiss, ".txt", sep=""), row.names=F, col.names=T, sep="\t", quote=F)
}
}
########################################################################
|
22e37a3026013373619d6207e632f211a0ec33a4
|
b5e4d4f4c36d574393ab069e568d83885e61e1ca
|
/R/genPoints.R
|
6ae94a680de14977b50049ae5286f27d2563fcc8
|
[] |
no_license
|
robiRagan/voteR
|
9ed860de34e167d1ff0bc89c3e47962cd4cc064e
|
c20de1f2f8fcffe928d97e29d939a3cf272ba948
|
refs/heads/master
| 2020-12-24T15:40:49.572281
| 2020-02-20T15:46:13
| 2020-02-20T15:46:13
| 155,420,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,459
|
r
|
genPoints.R
|
#' genPoints
#' Generates one or two dimensional points.
#'
#' Generates one or two dimensional points from one of distributions in base R.
#' The user can set up bounds on the dimensions. If bounds are set then genPoints()
#' will discard any points outside the bounds and resample.
#'
#' Note that if the user chooses to set bounds on one or more dimensions and the parameters
#' and distribution chosen by user generates many poitns outside the bounds, then genPoints()
#' can take a long time to generate the set of points.
#' This is rarely called directly and is wrapped \code{\link{genIdeals}} and \code{\link{genAlts}}. \code{\link{genIdeals}} is further wrapped by
#' \code{\link{genVoters}}.
#' @section TODO:
#' Add calibration functionality and move this into C++.
#' Maybe add a warning if the user sets bounds and the resampling takes a long time.
#'
#' @param numberOfDimensionsGenPoints The number of policy dimensions.
#' @param numberOfPointsGenPoints Number of points to generate.
#' @param distributionTypeGenPoints A string identifying the base R discribution to draw
#' the ideal points from. Uses the base R random number generation family of
#' commands rxxxx (see ?distributions). The user should specify the
#' distribution as a string using the standard R abreviation for the
#' distribution (see ?distributions for a list). Currently supported are: "norm",
#' "unif", "beta", "cauchy", "chisq", "weibull"
#' @param distributionParametersGenPoints A vector that contains the additional
#' parameters needed by the particular rxxxx function for a distribtuion. (see
#' ?rxxxx where xxxx is a function listed under ?distribution). Example for a
#' Normal(0,1), use: c(0,1).
#' @param dimOneBoundsGenPoints A vector that contains the starting and ending poitns of t
#' he first dimension. Example: c(0,1). Defaults to c(-Inf, Inf) if no boundary is provided.
#' @param dimTwoBoundsGenPoints A vector that contains the starting and ending poitns of t
#' he first dimension. Example: c(0,1). Defaults to c(-Inf, Inf) if no boundary is provided.
#' @return outIdeals An ideal point matrix that is numVoters x numDimensions
#' @examples
#' genPoints(numberOfDimensionsGenPoints = 2, numberOfPointsGenPoints = 100, distributionTypeGenPoints = "norm", distributionParametersGenPoints = c(0,.2), dimOneBoundsGenPoints = c(0,1), dimTwoBoundsGenPoints = c(-1,1))
#'
#' genPoints(numberOfDimensionsGenPoints = 2, numberOfPointsGenPoints = 100, distributionTypeGenPoints = "beta", distributionParametersGenPoints = c(.1,.1), dimOneBoundsGenPoints = c(0,1), dimTwoBoundsGenPoints = c(0,1))
#'
#' genPoints(numberOfDimensionsGenPoints = 1, numberOfPointsGenPoints = 100, distributionTypeGenPoints = "unif", distributionParametersGenPoints = c(-1,1))
#'
#'
#'
#' @export
# numberOfDimensionsGenPoints <- 2 ## FOR TESTING
# numberOfPointsGenPoints <- 100 ## FOR TESTING
# distributionTypeGenPoints <- "beta" ## FOR TESTING
# distributionParametersGenPoints <- c(.1,.1) ## FOR TESTING
# dimOneBoundsGenPoints <- c(0,1) ## FOR TESTING
# dimTwoBoundsGenPoints <- c(0,1) ## FOR TESTING
genPoints <- function(numberOfDimensionsGenPoints=1, numberOfPointsGenPoints=100, distributionTypeGenPoints ="unif", dimOneBoundsGenPoints = c(-Inf,Inf), dimTwoBoundsGenPoints = c(-Inf,Inf), distributionParametersGenPoints = c(-1,1)){
if(numberOfDimensionsGenPoints==1){
dimOne <- truncdist::rtrunc( n = (numberOfPointsGenPoints*numberOfDimensionsGenPoints), spec = distributionTypeGenPoints, a = dimOneBoundsGenPoints[1], b = dimOneBoundsGenPoints[2], distributionParametersGenPoints[1], distributionParametersGenPoints[2])
rawPoints <- cbind(dimOne)
}
# rDistn <- match.fun(paste("r",distributionTypeGenPoints, sep=""))
if(numberOfDimensionsGenPoints==2){
dimOne <- truncdist::rtrunc( n = (numberOfPointsGenPoints*numberOfDimensionsGenPoints)/2, spec = distributionTypeGenPoints, a = dimOneBoundsGenPoints[1], b = dimOneBoundsGenPoints[2], distributionParametersGenPoints[1], distributionParametersGenPoints[2])
dimTwo <- truncdist::rtrunc( n = (numberOfPointsGenPoints*numberOfDimensionsGenPoints)/2, spec = distributionTypeGenPoints, a = dimTwoBoundsGenPoints[1], b = dimTwoBoundsGenPoints[2], distributionParametersGenPoints[1], distributionParametersGenPoints[2])
rawPoints <- cbind(dimOne,dimTwo)
}
rawPoints
}
|
114f956ff082ba51c16d47ded4e7f9f1d9bc274a
|
38d166ede31183e2121388be0f66fe9d7ac4e93a
|
/man/get_max_taxonomic_rank.Rd
|
7f0b0a6e1bf5dbba3861dccc53802a732fa7e31d
|
[
"MIT"
] |
permissive
|
vmikk/metagMisc
|
a01151347b620745b278265700e503dc74669af5
|
310b1a40951de46348084e150d7471ed66feb0c8
|
refs/heads/master
| 2023-08-31T08:41:27.684905
| 2023-08-28T10:09:50
| 2023-08-28T10:09:50
| 76,531,351
| 38
| 12
|
MIT
| 2019-07-29T06:12:12
| 2016-12-15T06:40:05
|
R
|
UTF-8
|
R
| false
| true
| 1,335
|
rd
|
get_max_taxonomic_rank.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_max_taxonomic_rank.R
\name{get_max_taxonomic_rank}
\alias{get_max_taxonomic_rank}
\title{Determine the lowest level of taxonomic classification}
\usage{
get_max_taxonomic_rank(x, return_rank_only = FALSE)
}
\arguments{
\item{x}{Either a phyloseq object, or a data frame with columns as taxonomic ranks and rows as entries (e.g., OTUs). Columns in the data frame should be ordered from the highest level of classification (e.g., Kingdom) to the lowest level (e.g., Species), missing data are coded as NA}
\item{return_rank_only}{Logical, if TRUE only name of the taxonomic rank will be returned}
}
\value{
Data frame with taxonomy and additional column containing the name of the lowest level of taxonomic classification. Alternatively, if 'return_rank_only = TRUE', a vector of the lowest taxonomic ranks for each OTU.
}
\description{
Determine the lowest level of taxonomic classification
}
\details{
This function will find the last non-NA column in the taxonomy table and return
}
\examples{
data(GlobalPatterns)
# phyloseq-class as input
taxx <- get_max_taxonomic_rank(GlobalPatterns)
summary(taxx$RankName)
# data frame as input
taxtbl <- as.data.frame(tax_table(GlobalPatterns))
taxx <- get_max_taxonomic_rank(taxtbl)
summary(taxx$RankName)
}
|
a09bdd9bea7cadac6e04f877d52734d2b0656948
|
fc8014537f843be228bc5808cc379b66dc75be94
|
/man/Daniels_2020C.Rd
|
4c1558b69dd944d361764d141b633147c89f6511
|
[
"MIT"
] |
permissive
|
mrc-ide/SIMPLEGEN
|
db09a91438b788853ec24541f73dc516fc07f4b0
|
a8500cecb0345f16b36226aa4c5b386f397d7981
|
refs/heads/master
| 2023-04-14T16:55:50.814805
| 2023-03-30T16:23:49
| 2023-03-30T16:23:49
| 186,381,846
| 13
| 1
|
MIT
| 2023-03-30T16:23:51
| 2019-05-13T08:51:43
|
R
|
UTF-8
|
R
| false
| true
| 1,705
|
rd
|
Daniels_2020C.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Daniels_2020C}
\alias{Daniels_2020C}
\title{24-SNP barcode data from Richard Toll, Senegal (Daniels et al., 2020)}
\format{
A dataframe with 30 columns, giving sample ID and year (columns 1:2),
genomic data at 24 SNPs (columns 3:26), and details of missingness and
designated mono/polyclonality (columns 27:30). Heterozygous genotyping
calls are identified by "N", and missing alleles are identified by "X".
}
\source{
\href{https://malariajournal.biomedcentral.com/articles/10.1186/s12936-020-03346-x}{Supplementary materials}
}
\usage{
data(Daniels_2020C)
}
\description{
Data from Daniels et al. (2020). Here we give a brief summary of the data -
see the original paper for full details.
\cr
\cr
Samples were obtained from routine case investigation carried out in Richard
Toll, Senegal between September 2012 and December 2015. Rapid diagnostic
tests (RDTs) were used to diagnose malaria cases either through
facility-based passive case detection (PCD) or through reactive case
detection (RACD). A standardized questionnaire was also filled out for all
participants to collect information on basic demographic information
including travel history. RDTs were used to genotype malaria infections using
a 24-SNP barcode. Samples were designated polygenomic if multiple alleles
were observed at two or more positions, otherwise they were designated
monogenomic. Samples with missing data at 5 or more loci were deemed to have
"failed" for the purposes of subsequent analyses, but are included in the
data anyway.
}
\references{
\insertRef{daniels_genetic_2020}{SIMPLEGEN}
}
\keyword{datasets}
|
4390f4ecdd8d5814560fda226427f58e7376defc
|
4455b3494711314fc826125fb0251b24de546c98
|
/cytometry.R
|
3745db45688d3cfd47cf69cfde7d8b11354158c9
|
[] |
no_license
|
hhhh5/HRS
|
908d5c5248cd9f9c3198cb6f3a1deca48aa49158
|
3be07971e4008e17beaa00ee12cf726ebb907b1f
|
refs/heads/main
| 2023-04-12T02:47:22.581679
| 2022-11-27T18:49:54
| 2022-11-27T18:49:54
| 345,698,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,884
|
r
|
cytometry.R
|
# Flow cytometry measurements
flow = '/nfs/turbo/bakulski1/Datasets/HRS/jonheiss/sensitive/flow/hrsflowdata2016.sas7bdat'
vbs = '/nfs/turbo/bakulski1/Datasets/HRS/jonheiss/sensitive/flow/hrs2016vbs.sas7bdat'
xwalk = '/nfs/turbo/bakulski1/Datasets/HRS/jonheiss/sensitive/flow/xwalk.csv'
flow %<>% read_sas %>% as.data.table
vbs %<>% read_sas %>% as.data.table
xwalk %<>% fread
# Make sure that the combination of `HHID` and `PN` is unique
# Result should be `0`
anyDuplicated(flow,by=c('HHID','PN'))
anyDuplicated(vbs ,by=c('HHID','PN'))
# Inner join
LC = flow[vbs,on=c('HHID','PN'),nomatch=NULL];
# 9933 subjects
LC[,.N]
xwalk = xwalk[,.(
FID = Sample_Name
,HHID = stri_pad_left(HHID,width=6,pad='0')
,PN = paste0('0',PN)
)]
LC = LC[xwalk,on=c('HHID','PN'),nomatch=NULL]
# Select the relevant variables
# I use PNEUT instead of PANEU/PWBC as PANEU seems to be a rounded value. Same goes for the other cell types.
LC = LC[,.(
HHID # Household Identifier
,PN # PERSON NUMBER
,FID
# ,PANEU # NEUTROPHIL COUNT - X10E9/L
# ,PAEOS # EOSINOPHIL COUNT - X10E9/L
# ,PABAS # BASOPHIL COUNT - X10E9/L
# ,PALYM # LYMPHOCYTE COUNT - X10E9/L
# ,PAMON # MONOCYTE COUNT - X10E9/L
,PWBC # WHITE BLOOD CELL COUNT - 10E9/L
# of the following 5 cell types I won't use lymphocytes but the more granular subtypes from `flow`
,NE = PNEUT / 100 # PERCENT NEUTROPHILS
,EO = PEOS / 100 # PERCENT EOSINOPHILS
,BA = PBASO / 100 # PERCENT BASOPHILS
,LY = PLYMP / 100 # PERCENT LYMPHOCYTES
,MO = PMONO / 100 # PERCENT MONOCYTES
,T = PTcell_count / PWBC # T cells
,CT = PCT_count / PWBC # cytotoxic T cells
,EM_CT = PEM_CT_count / PWBC # Effector memory cytotoxic T cells
,CM_CT = PCM_CT_count / PWBC # Central memory cytotoxic T cells
,E_CT = PE_CT_count / PWBC # Effector cytotoxic T cells
,N_CT = PN_CT_count / PWBC # Naive cytotoxic T cells
,HT = PHT_count / PWBC # Helper T cells
,EM_HT = PEM_HT_count / PWBC # Effector memory helper T cells !!!!!! Should I use the new measurements here?
,CM_HT = PCM_HT_count / PWBC # Central memory helper T cells
,E_HT = PE_HT_count / PWBC # Effector Memory
,N_HT = PN_HT_count / PWBC # Naive helper T cells
,B = PBcell_count / PWBC # B lymphocytes
,NaiveB = PNaiveB_count / PWBC # CD27- naive B cells
,IgD_Plus_MemB = PIgD_Plus_MemB_count / PWBC # IgD+ Memory B cells
,IgD_Minus_MemB = PIgD_Minus_MemB_count / PWBC # IgD- Memory B cells
,MON = PMONO_count / PWBC # Monocytes
,MONC = PMONOc_count / PWBC # classical monocytes
,MONNC = PMONOnc_count / PWBC # non-classical monocytes
,DC = PDC_count / PWBC # Dendritic cells
,DCm = PDCm_count / PWBC # CD11c+ myeloid dendritic cells
,DCp = PDCp_count / PWBC # CD123+ plasmacytoid dendritic cells
,NK = PNK_count / PWBC # Natural killer cells
,NKHI = PNKHI_count / PWBC # CD56 high NK cells
,NKLO = PNKLO_count / PWBC # CD56 low NK cells
# Percentages for the same blood cell types
# ,PDC_pct
# ,PDCm_pct
# ,PDCp_pct
# ,PNK_pct
# ,PNKHI_pct
# ,PNKLO_pct
# ,PMONO_pct
# ,PMONOc_pct
# ,PMONOnc_pct
# ,PBcell_pct
# ,PCM_CT_pct
# ,PCM_HT_pct
# ,PCT_pct
# ,PEM_CT_pct
# ,PEM_HT_pct
# ,PE_CT_pct
# ,PE_HT_pct
# ,PHT_pct
# ,PIgD_Plus_MemB_pct
# ,PIgD_Minus_MemB_pct
# ,PN_CT_pct
# ,PN_HT_pct
# ,PTcell_pct
# ,PNaiveB_pct
## Irrelevant variables
# ,PALB # ALBUMIN - G/DLLB
# ,PALKP2 # ALKALINE PHOSPHATASE - U/L
# ,PALT # ALANINE AMINOTRANSFERASE - U/L
# ,PAST # ASPARTATE AMINOTRANSFERASE - U/L
# ,PBILT # BILIRUBIN, TOTAL - MG/DL
# ,PBUN # UREA NITROGEN (BUN) - MG/DL
# ,PCA # CALCIUM - MG/DL
# ,PCHOL # CHOLESTEROL, TOTAL - MG/DL
# ,PCL # CHLORIDE - MMOL/L
# ,PCMVGE # CMV IGG - COI
# ,PCO2 # BICARBONATE (CO2) - MMOL/L
# ,PCR # CREATININE - MG/DL
# ,PCRP # C-REACTIVE PROTEIN (HIGH SENSITIVITY) - MG/L
# ,PCYSC # CYSTATIN C - MG/L
# ,PDHEASE # DEHYDROEPIANDROSTERONE SULFATE (DHEAS) - UMOL/L
# ,PFERTN # FERRITIN - UG/L
# ,PGLUFF # GLUCOSE, FASTING - MG/DL
# ,PHCT # HEMATOCRIT
# ,PHDLD # HDL-CHOLESTEROL, DIRECT-MEASURE - MG/DL
# ,PHGB # HEMOGLOBIN - G/DL
# ,PK # POTASSIUM - MMOL/L
# ,PLDLC # LDL-CHOLESTEROL, CALCULATED - MG/DL
# ,PMCH # MEAN CORPUSCULAR HEMOGLOBIN - PG
# ,PMCHC # MEAN CORPUSCULAR HEMOGLOBIN CONCENTRATION - G/DL
# ,PMCV # MEAN CORPUSCULAR VOLUME - FL
# ,PMPV # MEAN PLATELET VOLUME - FL
# ,PNA # SODIUM - MMOL/L
# ,PNTBNPE # B-TYPE NATRIURETIC PEPTIDE,N-TERMINAL PRO (NT-PROBNP)-PG/ML
# ,PPDW # PLATELET DISTRIBUTION WIDTH - FL
# ,PPLT # PLATELET COUNT - 10E9/L
# ,PRBC # RED BLOOD CELL COUNT - 10E12/L
# ,PRDW # RED CELL DISTRIBUTION WIDTH - %
# ,PTGF # RIGLYCERIDES - MG/DL
# ,PTP # PROTEIN, TOTAL - G/DL
# ,PVBSWGTR # VBS RESPONDENT WEIGHT
# ,PVBSWHY0WGT # VBS WHY ZERO WEIGHT
# ,PVBS_N_DAYS # NUMBER OF DAYS BETWEEN HRS IW DATE AND COLLECTION DATE
# ,PCMVGINT # CMV IGG (NON-REACTIVE, REACTIVE, BORDERLINE)
# ,PFASTYN # FASTING STATUS (Y/N)
# ,VERSION # DATASET VERSION
)]
# These are the cell types we'd like to estimate
celltypes = c('NE','EO','BA','MO','EM_CT','CM_CT','E_CT','N_CT','EM_HT','CM_HT','E_HT','N_HT','B','DC','NK')
# Drop rows with missing values for the required cell types
i = is.na(LC[,..celltypes])
i = apply(i,1,sum)
i = i == 0
LC = LC[i]
LC[,.N]
# Cell types should sum up to approximately 1 for each sample
# Drop samples for which this does not hold true
LC[,total:=rowSums(.SD),.SDcols=celltypes]
LC = LC[total %between% c(0.9,1.02)]
LC[,total:=NULL]
## Cleanup
rm(flow,vbs,xwalk,i)
|
601454029fec0159fdf945c65f09a486f2926527
|
35e702005356610d62e60fcf11efdaaca770398e
|
/gt:dp.dup.R
|
84e6d709d8807445b0a9987a5fcf86efe9340ae4
|
[] |
no_license
|
jingg46/Research-oral-cleft
|
744d1a23ca59f4aec4acacd8eaf52961b49e8f83
|
db42552e66cad11ec3cebb220858800a866507fb
|
refs/heads/master
| 2020-03-11T03:38:46.903604
| 2018-07-21T18:29:59
| 2018-07-21T18:29:59
| 129,754,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,132
|
r
|
gt:dp.dup.R
|
library(VariantAnnotation)
##target sequencing data
target <- readVcf("/users/jli/target.dup.vcf", "hg19")
target.gt <- geno(target)$GT
target.gt <- as.data.frame(target.gt, stringsAsFactors = F)
target.gt[target.gt == "."] <- NA
gmkf <- readVcf("/users/jli/gmkf.dup.vcf", "hg19")
gmkf.gt <- geno(gmkf)$GT
gmkf.gt <- as.data.frame(gmkf.gt, stringsAsFactors = F)
gmkf.gt[gmkf.gt == "0/."] <- NA
gmkf.gt[gmkf.gt == "1/."] <- NA
gmkf.gt[gmkf.gt == "."] <- NA
temp <- rowRanges(gmkf)
temp1 <- temp@ranges
gmkf.position <- temp1@start
##map rs name and position together for gmkf data
rs.name <- names(temp)
length(unique(rs.name))
gmkf.rs.position <- data.frame(rs.name, gmkf.position)
temp <- rowRanges(target)
temp1 <- temp@ranges
target.position <- temp1@start
###check for duplicate
length(unique(target.position))
length(unique(gmkf.position))
###get rid of ALL duplicated positions
gmkf.dup.inx <- c(which(duplicated(gmkf.position)), which(duplicated(gmkf.position))-1)
gmkf.dup.inx <- sort(gmkf.dup.inx)
gmkf.gt <- gmkf.gt[-gmkf.dup.inx,]
gmkf.position <- gmkf.position[-gmkf.dup.inx]
target.inx <- which(target.position %in% gmkf.position)
gmkf.inx <- which(gmkf.position %in% target.position)
gmkf.position <- gmkf.position[gmkf.inx]
gmkf.rs.position <- gmkf.rs.position[gmkf.rs.position$gmkf.position %in% gmkf.position,]
write.table(gmkf.rs.position$rs.name, "gmkf.dup.rs.name.txt",sep = " ", quote = FALSE)
gmkf.gt <- gmkf.gt[gmkf.inx,]
target.gt <- target.gt[target.inx,]
##get duplicate positions for snp plot
##gmkf.dup.position <- gmkf.position[which(gmkf.position %in% target.position)]
##gmkf.dup.position <- data.frame(cbind(rep("chr8", length(gmkf.dup.position)), gmkf.dup.position))
##write.table(gmkf.dup.position, "gmkf.dup.position.txt", quote = F, row.names = F, col.names = F, sep = " ")
combine.id <- read.csv("/users/jli/combine.id.csv", stringsAsFactors = F)
###change gmkf id into target id
gmkf.gt.id <- colnames(gmkf.gt)
inx = rep(NA, length(gmkf.gt.id))
for (i in 1:length(gmkf.gt.id)){
inx[i] <- which(combine.id$gmkf.id == gmkf.gt.id[i])
}
colnames(gmkf.gt) <- combine.id$target.id[inx]
###order colnames in both data sets
gmkf.gt <- gmkf.gt[ ,order(names(gmkf.gt))]
target.gt <- target.gt[ ,order(names(target.gt))]
###check
###by position
position.count <- rowSums(gmkf.gt!= target.gt, na.rm=T)
###check abnormal positions
list.pos <- sort(position.count, decreasing = T)[1:10]
##other than 213 (rs1119880, gmkf: 8:129320002_C/T, target: 8:129320002_C/A), all (error rate larget than 10%) are from the same positions
###check maf for abnormal positions
maf <- function(x){
alt <- (sum(x == "0/1", na.rm = T) + sum(x == "1/1", na.rm = T) *2)/((402 - sum(is.na(x)))*2)
ref <- (sum(x == "0/1", na.rm = T) + sum(x == "0/0", na.rm = T) *2)/((402 - sum(is.na(x)))*2)
if(alt > ref){
paste(c("ref", ref))
}
else{
paste(c("alt", alt))
}
}
for (i in 1:length(list.pos)){
print(c(maf(target.gt[which(position.count == list.pos[i]),]), maf(gmkf.gt[which(position.count == list.pos[i]),])))
}###only 1 position has different allels as minor allel (8:129950945_G/A)
###by sample
sample.count <- colSums(gmkf.gt != target.gt, na.rm=T)
pdf("hist.dup.gt.by.position.pdf")
hist(position.count, main = "Histogram of mismatch in GT by position", breaks = 30, xlab = "Number of pairs with mismatch calls")
dev.off()
pdf("hist.dup.gt.by.sample.pdf")
hist(sample.count, main = "Histogram of mismatch in GT by sample", breaks = 30, xlab = "Number of mismatch calls per pair")
dev.off()
###check DP
target.dp <- geno(target)$DP
target.dp <- as.data.frame(target.dp, stringsAsFactors = F)
target.dp[target.dp == "."] <- NA
gmkf.dp <- geno(gmkf)$DP
gmkf.dp <- as.data.frame(gmkf.dp, stringsAsFactors = F)
gmkf.dp[gmkf.dp == "."] <- NA
temp <- rowRanges(gmkf)
temp1 <- temp@ranges
gmkf.position <- temp1@start
temp <- rowRanges(target)
temp1 <- temp@ranges
target.position <- temp1@start
gmkf.dup.inx <- c(which(duplicated(gmkf.position)), which(duplicated(gmkf.position))-1)
gmkf.dup.inx <- sort(gmkf.dup.inx)
gmkf.dp <- gmkf.dp[-gmkf.dup.inx,]
gmkf.position <- gmkf.position[-gmkf.dup.inx]
target.inx <- which(target.position %in% gmkf.position)
gmkf.inx <- which(gmkf.position %in% target.position)
gmkf.dp <- gmkf.dp[gmkf.inx,]
target.dp <- target.dp[target.inx,]
gmkf.dp.id <- colnames(gmkf.dp)
inx = rep(NA, length(gmkf.dp.id))
for (i in 1:length(gmkf.dp.id)){
inx[i] <- which(combine.id$gmkf.id == gmkf.dp.id[i])
}
colnames(gmkf.dp) <- combine.id$target.id[inx]
###order colnames in both data sets
gmkf.dp <- gmkf.dp[ ,order(names(gmkf.dp))]
target.dp <- target.dp[ ,order(names(target.dp))]
###check
###by position
position.count <- rowSums(gmkf.dp - target.dp, na.rm=T)
###by sample
sample.count <- rep(NA, ncol(gmkf.dp))
for (i in 1:ncol(gmkf.dp)){
sample.count[i] <- mean((gmkf.dp[,i]-mean(gmkf.dp[,i], na.rm = T))/sd(gmkf.dp[,i], na.rm = T)
- (target.dp[,i] - mean(target.dp[,i], na.rm = T))/sd(target.dp[,i], na.rm = T), na.rm=T)
}
pdf("hist.dup.dp.by.position.pdf")
hist(position.count, main = "Histogram of mismatch in DP by position")
dev.off()
pdf("hist.dup.dp.by.sample.pdf")
hist(sample.count, main = "Histogram of mismatch in DP by sample")
dev.off()
###check for gq
target.gq <- geno(target)$GQ
target.gq <- as.data.frame(target.gq, stringsAsFactors = F)
target.gq[target.gq == "."] <- NA
gmkf.gq <- geno(gmkf)$GQ
gmkf.gq <- as.data.frame(gmkf.gq, stringsAsFactors = F)
gmkf.gq[gmkf.gq == "."] <- NA
temp <- rowRanges(gmkf)
temp1 <- temp@ranges
gmkf.position <- temp1@start
temp <- rowRanges(target)
temp1 <- temp@ranges
target.position <- temp1@start
gmkf.dup.inx <- c(which(duplicated(gmkf.position)), which(duplicated(gmkf.position))-1)
gmkf.dup.inx <- sort(gmkf.dup.inx)
gmkf.gq <- gmkf.gq[-gmkf.dup.inx,]
gmkf.position <- gmkf.position[-gmkf.dup.inx]
target.inx <- which(target.position %in% gmkf.position)
gmkf.inx <- which(gmkf.position %in% target.position)
gmkf.gq <- gmkf.gq[gmkf.inx,]
target.gq <- target.gq[target.inx,]
gmkf.gq.id <- colnames(gmkf.gq)
inx = rep(NA, length(gmkf.gq.id))
for (i in 1:length(gmkf.gq.id)){
inx[i] <- which(combine.id$gmkf.id == gmkf.gq.id[i])
}
colnames(gmkf.gq) <- combine.id$target.id[inx]
###order colnames in both data sets
gmkf.gq <- gmkf.gq[ ,order(names(gmkf.gq))]
target.gq <- target.gq[ ,order(names(target.gq))]
###check overall gq distribution for WGS sample-wise
gmkf.avg.gq <- colMeans(gmkf.gq, na.rm = T)
gmkf.avg.gq <- as.numeric(gmkf.avg.gq)
pdf("hist.gmkf.dup.gq.by.sample.pdf")
hist(gmkf.avg.gq, main = "Histogram of GQ distribution for WGS by sample", breaks = 30)
dev.off()
###check
###by position
position.count <- rowSums(gmkf.gq - target.gq, na.rm=T)
###by sample
sample.count <- rep(NA, ncol(gmkf.gq))
for (i in 1:ncol(gmkf.gq)){
sample.count[i] <- mean((gmkf.dp[,i]-mean(gmkf.dp[,i], na.rm = T))/sd(gmkf.dp[,i], na.rm = T)
- (target.dp[,i] - mean(target.dp[,i], na.rm = T))/sd(target.dp[,i], na.rm = T), na.rm=T)
}
##abnormal sample (< -0.002, > 0.002): "H_ME-DS10829_1-DS10829_1" "H_ME-DS10829_2-DS10829_2"
##3"H_ME-DS10895_1-DS10895_1" "H_ME-DS11323_3-DS11323_3" "H_ME-DS11337_2-DS11337_2"
pdf("hist.dup.gq.by.position.pdf")
hist(position.count, main = "Histogram of mismatch in GQ by position")
dev.off()
pdf("hist.dup.gq.by.sample.pdf")
hist(sample.count, main = "Histogram of mismatch in GQ by sample")
dev.off()
########gmkf data############
###check normalized dp by mismatch in GT across individual
inx <- mapply(function(x,y) which(x != y), gmkf.gt, target.gt)
avg.mismatch.dp <- vector(length = ncol(gmkf.dp))
avg.match.dp <- vector(length = ncol(gmkf.dp))
for (i in 1: ncol(gmkf.dp)){
if (length(unlist(inx[i]) != 0)){
a <- unlist(inx[i])
avg.mismatch.dp[i] <- mean(gmkf.dp[a,i], na.rm = T)
avg.match.dp[i] <- mean(gmkf.dp[-a,i], na.rm = T)
}
else{
avg.mismatch.dp[i] <- NA
avg.match.dp[i] <- mean(gmkf.dp[,i], na.rm = T)
}
}
avg.dp <- data.frame(cbind(c(avg.match.dp, avg.mismatch.dp), c(rep("match", length(avg.match.dp)), rep("mismatch", length(avg.mismatch.dp)))),stringsAsFactors=FALSE)
avg.dp[,1] <- as.numeric(avg.dp[,1])
##create summary statistics for avg.dp based on gt match
summary(avg.dp[avg.dp$X2 == "match",1])
sd(avg.dp[avg.dp$X2 == "match",1])
summary(avg.dp[avg.dp$X2 == "mismatch",1])
sd(avg.dp[avg.dp$X2 == "mismatch",1], na.rm = T)
library(ggplot2)
pdf("hist.avg.mismatch.dp.pdf")
ggplot(avg.dp, aes(x=X1, fill=X2)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of average DP based on mismatch GT by sample") +
xlab("Avg DP per individual") +
scale_fill_discrete(name = "GT match type")
dev.off()
###check normalized gq by mismatch in GT across individual
avg.mismatch.gq <- vector(length = ncol(gmkf.gq))
avg.match.gq <- vector(length = ncol(gmkf.gq))
for (i in 1: ncol(gmkf.gq)){
if (length(unlist(inx[i]) != 0)){
a <- unlist(inx[i])
avg.mismatch.gq[i] <- mean(gmkf.gq[a,i], na.rm = T)
avg.match.gq[i] <- mean(gmkf.gq[-a,i], na.rm = T)
}
else{
avg.mismatch.gq[i] <- NA
avg.match.gq[i] <- mean(gmkf.gq[,i], na.rm = T)
}
}
avg.gq <- data.frame(cbind(c(avg.match.gq, avg.mismatch.gq), c(rep("match", length(avg.match.gq)), rep("mismatch", length(avg.mismatch.gq)))),stringsAsFactors=FALSE)
avg.gq[,1] <- as.numeric(avg.gq[,1])
##create summary statistics for avg.gq based on gt match
summary(avg.gq[avg.gq$X2 == "match",1])
sd(avg.gq[avg.gq$X2 == "match",1])
summary(avg.gq[avg.gq$X2 == "mismatch",1])
sd(avg.gq[avg.gq$X2 == "mismatch",1], na.rm = T)
library(ggplot2)
pdf("hist.avg.mismatch.gq.pdf")
ggplot(avg.gq, aes(x=X1, fill=X2)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of average GQ based on mismatch GT by sample") +
xlab("Avg GQ per individual") +
scale_fill_discrete(name = "GT match type")
dev.off()
###########targeted data############
###check normalized dp by mismatch in GT across individual
inx <- mapply(function(x,y) which(x != y), target.gt, gmkf.gt)
avg.mismatch.dp <- vector(length = ncol(target.dp))
avg.match.dp <- vector(length = ncol(target.dp))
for (i in 1: ncol(target.dp)){
if (length(unlist(inx[i]) != 0)){
a <- unlist(inx[i])
avg.mismatch.dp[i] <- mean(target.dp[a,i], na.rm = T)
avg.match.dp[i] <- mean(target.dp[-a,i], na.rm = T)
}
else{
avg.mismatch.dp[i] <- NA
avg.match.dp[i] <- mean(target.dp[,i], na.rm = T)
}
}
avg.dp <- data.frame(cbind(c(avg.match.dp, avg.mismatch.dp), c(rep("match", length(avg.match.dp)), rep("mismatch", length(avg.mismatch.dp)))),stringsAsFactors=FALSE)
avg.dp[,1] <- as.numeric(avg.dp[,1])
##create summary statistics for avg.dp based on gt match
summary(avg.dp[avg.dp$X2 == "match",1])
sd(avg.dp[avg.dp$X2 == "match",1])
summary(avg.dp[avg.dp$X2 == "mismatch",1])
sd(avg.dp[avg.dp$X2 == "mismatch",1], na.rm = T)
library(ggplot2)
pdf("hist.avg.mismatch.dp.target.pdf")
ggplot(avg.dp, aes(x=X1, fill=X2)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of average DP based on mismatch GT by sample (Targeted Data)") +
xlab("Avg DP per individual") +
scale_fill_discrete(name = "GT match type")
dev.off()
###check normalized gq by mismatch in GT across individual
avg.mismatch.gq <- vector(length = ncol(target.gq))
avg.match.gq <- vector(length = ncol(target.gq))
for (i in 1: ncol(target.gq)){
if (length(unlist(inx[i]) != 0)){
a <- unlist(inx[i])
avg.mismatch.gq[i] <- mean(target.gq[a,i], na.rm = T)
avg.match.gq[i] <- mean(target.gq[-a,i], na.rm = T)
}
else{
avg.mismatch.gq[i] <- NA
avg.match.gq[i] <- mean(target.gq[,i], na.rm = T)
}
}
avg.gq <- data.frame(cbind(c(avg.match.gq, avg.mismatch.gq), c(rep("match", length(avg.match.gq)), rep("mismatch", length(avg.mismatch.gq)))),stringsAsFactors=FALSE)
avg.gq[,1] <- as.numeric(avg.gq[,1])
##create summary statistics for avg.gq based on gt match
summary(avg.gq[avg.gq$X2 == "match",1])
sd(avg.gq[avg.gq$X2 == "match",1])
summary(avg.gq[avg.gq$X2 == "mismatch",1])
sd(avg.gq[avg.gq$X2 == "mismatch",1], na.rm = T)
library(ggplot2)
pdf("hist.avg.mismatch.gq.target.pdf")
ggplot(avg.gq, aes(x=X1, fill=X2)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of average GQ based on mismatch GT by sample (Targeted Data)") +
xlab("Avg GQ per individual") +
scale_fill_discrete(name = "GT match type")
dev.off()
###look at individual level, pick some random samples, compare DP based on match in GT
sample.inx <- sample(ncol(gmkf.gq), 5, replace = F)
i=1
a <- unlist(inx[sample.inx[i]])
individual.gq <- data.frame(gmkf.gq[,sample.inx[i]])
individual.gq$mark <- "match"
individual.gq$mark[a] <- "mismatch"
colnames(individual.gq)[1] <- "dp"
pdf("sample.gq.comparison.pdf")
ggplot(individual.gq, aes(x=dp, fill=mark)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of sample-level GQ based on mismatch GT by sample 1") +
xlab("GQ for this sample across all positions") +
scale_fill_discrete(name = "GT match type")
dev.off()
i=2
a <- unlist(inx[sample.inx[i]])
individual.gq <- data.frame(gmkf.gq[,sample.inx[i]])
individual.gq$mark <- "match"
individual.gq$mark[a] <- "mismatch"
colnames(individual.gq)[1] <- "dp"
pdf("sample.gq.comparison2.pdf")
ggplot(individual.gq, aes(x=dp, fill=mark)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of sample-level GQ based on mismatch GT by sample 2") +
xlab("GQ for this sample across all positions") +
scale_fill_discrete(name = "GT match type")
dev.off()
i=3
a <- unlist(inx[sample.inx[i]])
individual.gq <- data.frame(gmkf.gq[,sample.inx[i]])
individual.gq$mark <- "match"
individual.gq$mark[a] <- "mismatch"
colnames(individual.gq)[1] <- "dp"
pdf("sample.gq.comparison3.pdf")
ggplot(individual.gq, aes(x=dp, fill=mark)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of sample-level GQ based on mismatch GT by sample 3") +
xlab("GQ for this sample across all positions") +
scale_fill_discrete(name = "GT match type")
dev.off()
i=4
a <- unlist(inx[sample.inx[i]])
individual.gq <- data.frame(gmkf.gq[,sample.inx[i]])
individual.gq$mark <- "match"
individual.gq$mark[a] <- "mismatch"
colnames(individual.gq)[1] <- "dp"
pdf("sample.gq.comparison4.pdf")
ggplot(individual.gq, aes(x=dp, fill=mark)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of sample-level GQ based on mismatch GT by sample 4") +
xlab("GQ for this sample across all positions") +
scale_fill_discrete(name = "GT match type")
dev.off()
i=5
a <- unlist(inx[sample.inx[i]])
individual.gq <- data.frame(gmkf.gq[,sample.inx[i]])
individual.gq$mark <- "match"
individual.gq$mark[a] <- "mismatch"
colnames(individual.gq)[1] <- "dp"
pdf("sample.gq.comparison5.pdf")
ggplot(individual.gq, aes(x=dp, fill=mark)) +
geom_histogram(alpha=0.4, position="identity") +
ggtitle("Histogram of sample-level GQ based on mismatch GT by sample 5") +
xlab("GQ for this sample across all positions") +
scale_fill_discrete(name = "GT match type")
dev.off()
|
162739b10b0bb361c3daeb160d3f41b2cb8d4f1d
|
6112802e8e27d4550c851c06fd39a1091e77e71a
|
/ProjectEuler/problem002.R
|
59fb6d12cbca9729262c5ce2f5a47cce9317dc6f
|
[] |
no_license
|
jlopezsi/r.prevos.net
|
f4e2e79f208cf913cc721178be4e9d1e4bc4e6e9
|
5b21be9b8b2f5a19d32506f06fca3bcba2829a18
|
refs/heads/master
| 2020-03-15T10:31:21.442805
| 2018-04-17T01:15:14
| 2018-04-17T01:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 664
|
r
|
problem002.R
|
# Problem 2: Even Fibonacci numbers
# https://projecteuler.net/problem=2
# By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
# http://r.prevos.net/euler-problem-2/
fib <- c(1, 2) #Define first two numbers
while (max(fib) < 4E+06) {
# Generate Fibonacci numbers until limit is reached
len <- length(fib)
fib <- c(fib, fib[len - 1] + fib[len])
}
answer <- sum(fib[fib %% 2 == 0])
print(answer)
library(gmp)
i <- 1
answer <- 0
fib <- fibnum(1)
while (fibnum(i) <= 4E6) {
fib <- fibnum(i)
if (fib %% 2 == 0) answer <- answer + fib
i <- i + 1
}
print(answer)
|
e4bd3e4995f141450491bd6c27a412961bacbe10
|
ee503bac3ea764666106b3eff49406903f066d7d
|
/R/compute_hydat_peak_frequencies.R
|
9a51bbadc1aae8db77e2a04683a784bc32e749e6
|
[
"Apache-2.0"
] |
permissive
|
bcgov/fasstr
|
a90a88702543084c7d36c7f7386745d4c24672b7
|
10da0bb28e2f55d0b9c2b71de8b028f5a4071c21
|
refs/heads/main
| 2023-04-02T17:38:35.947960
| 2023-03-22T20:25:08
| 2023-03-22T20:25:08
| 108,884,386
| 61
| 14
|
Apache-2.0
| 2023-03-22T20:26:18
| 2017-10-30T17:23:30
|
R
|
UTF-8
|
R
| false
| false
| 8,323
|
r
|
compute_hydat_peak_frequencies.R
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Perform a frequency analysis on annual peak statistics from HYDAT
#'
#' @description Performs a volume frequency analysis on annual peak statistics (instantaneous minimums or maximums) extracted from
#' HYDAT. Calculates statistics from all years, unless specified. The \code{data} argument is not available. Analysis
#' methodology replicates that from \href{https://www.hec.usace.army.mil/software/hec-ssp/}{HEC-SSP}. Returns a list of tibbles
#' and plots.
#'
#' @inheritParams compute_frequency_analysis
#' @inheritParams compute_annual_frequencies
#' @param station_number A character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of
#' which to extract annual peak minimum or maximum instantaneous streamflow data from a HYDAT database. Requires \code{tidyhydat}
#' package and a HYDAT database.
#'
#' @return A list with the following elements:
#' \item{Freq_Analysis_Data}{Data frame with computed annual summary statistics used in analysis.}
#' \item{Freq_Plot_Data}{Data frame with co-ordinates used in frequency plot.}
#' \item{Freq_Plot}{ggplot2 object with frequency plot}
#' \item{Freq_Fitting}{List of fitted objects from fitdistrplus.}
#' \item{Freq_Fitted_Quantiles}{Data frame with fitted quantiles.}
#'
#' @seealso \code{\link{compute_frequency_analysis}}
#'
#' @examples
#' \dontrun{
#'
#' # Working examples (see arguments for further analysis options):
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous lows)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010)
#'
#' # Compute an annual peak frequency analysis using default arguments (instantaneous highs)
#' results <- compute_hydat_peak_frequencies(station_number = "08NM116",
#' start_year = 1980,
#' end_year = 2010,
#' use_max = TRUE)
#'
#' }
#' @export
compute_hydat_peak_frequencies <- function(station_number,
use_max = FALSE,
use_log = FALSE,
prob_plot_position = c("weibull", "median", "hazen"),
prob_scale_points = c(.9999, .999, .99, .9, .5, .2, .1, .02, .01, .001, .0001),
fit_distr = c("PIII", "weibull"),
fit_distr_method = ifelse(fit_distr == "PIII", "MOM", "MLE"),
fit_quantiles = c(.975, .99, .98, .95, .90, .80, .50, .20, .10, .05, .01),
start_year,
end_year,
exclude_years,
plot_curve = TRUE){
# replicate the frequency analysis of the HEC-SSP program
# refer to Chapter 7 of the user manual
## ARGUMENT CHECKS
## ---------------
if (missing(station_number)) {
station_number <- NULL
}
if (missing(start_year)) {
start_year <- 0
}
if (missing(end_year)) {
end_year <- 9999
}
if (missing(exclude_years)) {
exclude_years <- NULL
}
years_checks(start_year, end_year, exclude_years)
if (!is.logical(use_log))
stop("use_log must be logical (TRUE/FALSE).", call. = FALSE)
if (!is.logical(use_max))
stop("use_max must be logical (TRUE/FALSE).", call. = FALSE)
if (!all(prob_plot_position %in% c("weibull","median","hazen")))
stop("prob_plot_position must be one of weibull, median, or hazen.", call. = FALSE)
if (!is.numeric(prob_scale_points))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(prob_scale_points > 0 & prob_scale_points < 1))
stop("prob_scale_points must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_distr %in% c("weibull", "PIII")))
stop("fit_distr must be one of weibull or PIII.", call. = FALSE)
if (!is.numeric(fit_quantiles))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (!all(fit_quantiles > 0 & fit_quantiles < 1))
stop("fit_quantiles must be numeric and between 0 and 1 (not inclusive).", call. = FALSE)
if (fit_distr[1] == 'weibull' & use_log)
stop("Cannot fit Weibull distribution on log-scale.", call. = FALSE)
if (fit_distr[1] != "PIII" & fit_distr_method[1] == "MOM")
stop('MOM only can be used with PIII distribution.', call. = FALSE)
if (is.null(station_number)) stop("A station_number must be provided.", call. = FALSE)
if (length(station_number) != 1) stop("Only one station_number can be provided for this function.", call. = FALSE)
if (!all(station_number %in% dplyr::pull(suppressMessages(tidyhydat::hy_stations()[1]))))
stop("station_number listed does not exist in HYDAT.", call. = FALSE)
# Get peak data
inst_peaks <- suppressMessages(suppressWarnings(tidyhydat::hy_annual_instant_peaks(station_number)))
if (nrow(inst_peaks) == 0) stop("No peak data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, Parameter == "Flow")
if (nrow(inst_peaks) == 0) stop("No peak flow data available for this station_number.", call. = FALSE)
inst_peaks <- dplyr::filter(inst_peaks, PEAK_CODE == ifelse(use_max, "MAX", "MIN"))
if (use_max & nrow(inst_peaks) == 0) stop("No maximum peak flow data available for this station_number.", call. = FALSE)
if (!use_max & nrow(inst_peaks) == 0) stop("No minimum peak flow data available for this station_number.", call. = FALSE)
inst_peaks$Year <- as.numeric(format(as.Date(inst_peaks$Date), format = "%Y"))
inst_peaks <- dplyr::select(inst_peaks, Year, Measure = PEAK_CODE, Value)
inst_peaks <- dplyr::mutate(inst_peaks, Measure = paste0("Instantaneous ", ifelse(use_max,"Maximum", "Minimum")))
# Filter peak data
inst_peaks <- inst_peaks[ inst_peaks$Year >= start_year & inst_peaks$Year <= end_year,]
inst_peaks <- dplyr::filter(inst_peaks, !(Year %in% exclude_years))
# Data checks
if (nrow(inst_peaks) < 3) stop(paste0("Need at least 3 years of observations for analysis. There are only ",
nrow(inst_peaks),
" years available."), call. = FALSE)
Q_stat <- inst_peaks
## COMPUTE THE ANALYSIS
## -------------------------------
analysis <- compute_frequency_analysis(data = Q_stat,
events = "Year",
values = "Value",
measures = "Measure",
use_max = use_max,
use_log = use_log,
prob_plot_position = prob_plot_position,
prob_scale_points = prob_scale_points,
fit_distr = fit_distr,
fit_distr_method = fit_distr_method,
fit_quantiles = fit_quantiles,
plot_curve = plot_curve)
return(analysis)
}
|
0ab86c9e14508212c8028d3c7ec88970f2b69351
|
65396ca6147fbb0dbed9c8bdf9c7671afb8dc210
|
/sbs_bayes/Untitled.R
|
e85c378743f99c180fc95a0fba6d6c7d3ae0eb01
|
[
"MIT"
] |
permissive
|
elahi/sbs_analysis
|
587f4352389cc7b208ffd1fd5b241137203bb25b
|
a3c05a84d4e74a6a6e6d9fd9a6d1bc8211748c91
|
refs/heads/master
| 2021-01-17T12:09:45.289489
| 2019-11-15T04:23:03
| 2019-11-15T04:23:03
| 39,157,467
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,605
|
r
|
Untitled.R
|
# Set-up the data generating mechainsm
set.seed(666)
N <- 500
x <- runif(N, max=10)
alpha <- 1
beta <- 2
y <- alpha + beta * x + rnorm(N, sd = .6 * x)
p <- 0.95
# The dataset to be used for estiamtion
data_set <- list(y = y, x = x, p = p)
data_frame <- as.data.frame(data_set)[-3]
jags_code <- "
model{
for(i in 1:length(y)){
mu[i] <- alpha + beta * x[i]
w[i] ~ dexp(tau)
me[i] <- (1 - 2 * p) / (p * (1 - p)) * w[i] + mu[i]
pe[i] <- (p * (1 - p) * tau) / (2 * w[i])
y[i] ~ dnorm(me[i], pe[i])
y.new[i] ~ dnorm(me[i], pe[i])
}
# Regression Priors
alpha ~ dnorm(0, 1E-6)
beta ~ dnorm(0, 1E-6)
lsigma ~ dunif(-5, 15)
sigma <- exp(lsigma / 2)
tau <- pow(sigma, -2)
# bayesian p-values
sd.data <- sd(y)
sd.new <- sd(y.new)
p.sd <- step(sd.new - sd.data)
mean.data <- mean(y)
mean.new <- mean(y.new)
p.mean <- step(mean.new - mean.data)
}
"
# Init the model
n_iter <- 1000
jags_model <- jags.model(file = textConnection(jags_code), data = data_set,
n.chains = 4, n.adapt = n_iter / 2)
# Run some MCMC iterations
params <- c("alpha", "beta", "sigma", "y.new", "p.sd", "p.mean")
zj <- coda.samples(jags_model, params, n.iter = n_iter)
# Results
t(apply(
data.frame(do.call(rbind, zj)), 2, function(x)
c(mean = mean(x), quantile(x, c(0.005, 0.25, 0.5, 0.75, 0.95)))
))
zj$p.mean
c(p.mean = mean(zj$p.mean), p.sd = mean(zj$p.sd))
# Compared observed vs simulated
hist(y, breaks = 20, freq=FALSE)
lines(density(zj$y.new), col="red")
|
ba7775242e8cced3e1ad8a29aa19e6cb8864a960
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.mobile/man/devicefarm_create_upload.Rd
|
399c1727802f8cfa107647d4203edfe086bf7b46
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 4,716
|
rd
|
devicefarm_create_upload.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/devicefarm_operations.R
\name{devicefarm_create_upload}
\alias{devicefarm_create_upload}
\title{Uploads an app or test scripts}
\usage{
devicefarm_create_upload(projectArn, name, type, contentType)
}
\arguments{
\item{projectArn}{[required] The ARN of the project for the upload.}
\item{name}{[required] The upload's file name. The name should not contain any forward slashes
(\code{/}). If you are uploading an iOS app, the file name must end with the
\code{.ipa} extension. If you are uploading an Android app, the file name
must end with the \code{.apk} extension. For all others, the file name must
end with the \code{.zip} file extension.}
\item{type}{[required] The upload's upload type.
Must be one of the following values:
\itemize{
\item ANDROID_APP
\item IOS_APP
\item WEB_APP
\item EXTERNAL_DATA
\item APPIUM_JAVA_JUNIT_TEST_PACKAGE
\item APPIUM_JAVA_TESTNG_TEST_PACKAGE
\item APPIUM_PYTHON_TEST_PACKAGE
\item APPIUM_NODE_TEST_PACKAGE
\item APPIUM_RUBY_TEST_PACKAGE
\item APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE
\item APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE
\item APPIUM_WEB_PYTHON_TEST_PACKAGE
\item APPIUM_WEB_NODE_TEST_PACKAGE
\item APPIUM_WEB_RUBY_TEST_PACKAGE
\item CALABASH_TEST_PACKAGE
\item INSTRUMENTATION_TEST_PACKAGE
\item UIAUTOMATION_TEST_PACKAGE
\item UIAUTOMATOR_TEST_PACKAGE
\item XCTEST_TEST_PACKAGE
\item XCTEST_UI_TEST_PACKAGE
\item APPIUM_JAVA_JUNIT_TEST_SPEC
\item APPIUM_JAVA_TESTNG_TEST_SPEC
\item APPIUM_PYTHON_TEST_SPEC
\item APPIUM_NODE_TEST_SPEC
\item APPIUM_RUBY_TEST_SPEC
\item APPIUM_WEB_JAVA_JUNIT_TEST_SPEC
\item APPIUM_WEB_JAVA_TESTNG_TEST_SPEC
\item APPIUM_WEB_PYTHON_TEST_SPEC
\item APPIUM_WEB_NODE_TEST_SPEC
\item APPIUM_WEB_RUBY_TEST_SPEC
\item INSTRUMENTATION_TEST_SPEC
\item XCTEST_UI_TEST_SPEC
}
If you call \code{\link[=devicefarm_create_upload]{create_upload}} with \code{WEB_APP}
specified, AWS Device Farm throws an \code{ArgumentException} error.}
\item{contentType}{The upload's content type (for example, \code{application/octet-stream}).}
}
\value{
A list with the following syntax:\preformatted{list(
upload = list(
arn = "string",
name = "string",
created = as.POSIXct(
"2015-01-01"
),
type = "ANDROID_APP"|"IOS_APP"|"WEB_APP"|"EXTERNAL_DATA"|"APPIUM_JAVA_JUNIT_TEST_PACKAGE"|"APPIUM_JAVA_TESTNG_TEST_PACKAGE"|"APPIUM_PYTHON_TEST_PACKAGE"|"APPIUM_NODE_TEST_PACKAGE"|"APPIUM_RUBY_TEST_PACKAGE"|"APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE"|"APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE"|"APPIUM_WEB_PYTHON_TEST_PACKAGE"|"APPIUM_WEB_NODE_TEST_PACKAGE"|"APPIUM_WEB_RUBY_TEST_PACKAGE"|"CALABASH_TEST_PACKAGE"|"INSTRUMENTATION_TEST_PACKAGE"|"UIAUTOMATION_TEST_PACKAGE"|"UIAUTOMATOR_TEST_PACKAGE"|"XCTEST_TEST_PACKAGE"|"XCTEST_UI_TEST_PACKAGE"|"APPIUM_JAVA_JUNIT_TEST_SPEC"|"APPIUM_JAVA_TESTNG_TEST_SPEC"|"APPIUM_PYTHON_TEST_SPEC"|"APPIUM_NODE_TEST_SPEC"|"APPIUM_RUBY_TEST_SPEC"|"APPIUM_WEB_JAVA_JUNIT_TEST_SPEC"|"APPIUM_WEB_JAVA_TESTNG_TEST_SPEC"|"APPIUM_WEB_PYTHON_TEST_SPEC"|"APPIUM_WEB_NODE_TEST_SPEC"|"APPIUM_WEB_RUBY_TEST_SPEC"|"INSTRUMENTATION_TEST_SPEC"|"XCTEST_UI_TEST_SPEC",
status = "INITIALIZED"|"PROCESSING"|"SUCCEEDED"|"FAILED",
url = "string",
metadata = "string",
contentType = "string",
message = "string",
category = "CURATED"|"PRIVATE"
)
)
}
}
\description{
Uploads an app or test scripts.
}
\section{Request syntax}{
\preformatted{svc$create_upload(
projectArn = "string",
name = "string",
type = "ANDROID_APP"|"IOS_APP"|"WEB_APP"|"EXTERNAL_DATA"|"APPIUM_JAVA_JUNIT_TEST_PACKAGE"|"APPIUM_JAVA_TESTNG_TEST_PACKAGE"|"APPIUM_PYTHON_TEST_PACKAGE"|"APPIUM_NODE_TEST_PACKAGE"|"APPIUM_RUBY_TEST_PACKAGE"|"APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE"|"APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE"|"APPIUM_WEB_PYTHON_TEST_PACKAGE"|"APPIUM_WEB_NODE_TEST_PACKAGE"|"APPIUM_WEB_RUBY_TEST_PACKAGE"|"CALABASH_TEST_PACKAGE"|"INSTRUMENTATION_TEST_PACKAGE"|"UIAUTOMATION_TEST_PACKAGE"|"UIAUTOMATOR_TEST_PACKAGE"|"XCTEST_TEST_PACKAGE"|"XCTEST_UI_TEST_PACKAGE"|"APPIUM_JAVA_JUNIT_TEST_SPEC"|"APPIUM_JAVA_TESTNG_TEST_SPEC"|"APPIUM_PYTHON_TEST_SPEC"|"APPIUM_NODE_TEST_SPEC"|"APPIUM_RUBY_TEST_SPEC"|"APPIUM_WEB_JAVA_JUNIT_TEST_SPEC"|"APPIUM_WEB_JAVA_TESTNG_TEST_SPEC"|"APPIUM_WEB_PYTHON_TEST_SPEC"|"APPIUM_WEB_NODE_TEST_SPEC"|"APPIUM_WEB_RUBY_TEST_SPEC"|"INSTRUMENTATION_TEST_SPEC"|"XCTEST_UI_TEST_SPEC",
contentType = "string"
)
}
}
\examples{
\dontrun{
# The following example creates a new Appium Python test package upload
# inside an existing project.
svc$create_upload(
name = "MyAppiumPythonUpload",
type = "APPIUM_PYTHON_TEST_PACKAGE",
projectArn = "arn:aws:devicefarm:us-west-2:123456789101:project:EXAMPLE-GUID-123-456"
)
}
}
\keyword{internal}
|
8834bcfac0aa07144eaacb3138781825ee72e27c
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH4/EX4.4.6/Ex4_4_6.R
|
1eaf8804a67cece9375e1d23224d5c7f8f3fc956
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 144
|
r
|
Ex4_4_6.R
|
#PAGE=94
q1=65.5+(2*3)/42
q3=68.5+(10*3)/27
q=(q3-q1)/2
q2=(q3+q1)/2
q2=round(q2,2)
q=round(q,2)
cat(q2,'+',q,'kg')
cat(q2,'-',q,'kg')
|
98e5bfd15aa6e4eb2b35ec7f615e27f8d8f7721f
|
7b73aadd5d76910a714e16ebfc31a27d2448aea2
|
/cachematrix.R
|
253e44fd2f37d06cef6a446cca968fcfb5a1e821
|
[] |
no_license
|
SUNILKUMARCHINNAMGARI/ProgrammingAssignment2
|
e64baae9c32cb2b7932b455813b35c9d4c7bf3bc
|
6883c67bb8cf6894003e3e905c4a788674fee23b
|
refs/heads/master
| 2020-12-25T05:02:37.790457
| 2014-06-21T17:14:15
| 2014-06-21T17:14:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special matrix that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
matinv <<- solve(x)
set <- function(y) {
x <<- y
matinv <<- NULL
}
get <- function() x
setmatinv <- function(matinv) matinv <<- matinv
getmatinv <- function() matinv
list(set = set, get = get,
setmatinv = setmatinv,
getmatinv = getmatinv)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has
## already been calculated (and the matrix has not changed), then cacheSolve will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
matinv <- x$getmatinv()
if (!is.null(matinv)) {
message("getting cached data")
return(matinv)
}
data <- x$get()
m <- solve(data, ...)
x$setmatinv(matinv)
matinv
}
|
6ae36b63ae4b4cbb9bb31f10b07b87d1e4a0c139
|
880c8d4a9401d2e08b62a23306fe5b5f4dfeeb78
|
/man/intersect_prices.Rd
|
b96bfd1db8e96f7d413f1699084f56711129453e
|
[
"MIT"
] |
permissive
|
zumthor86/OptionsAnalytics
|
6717ea8a76238f6a304171352e17e123db8dc088
|
a1a9d56a0c635729b333086272d8f8d3c4e8642c
|
refs/heads/master
| 2021-07-07T07:51:51.645454
| 2020-10-16T12:50:35
| 2020-10-16T12:50:35
| 196,432,023
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 388
|
rd
|
intersect_prices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/intersect_prices.R
\name{intersect_prices}
\alias{intersect_prices}
\title{Return commmon time series}
\usage{
intersect_prices(prices)
}
\arguments{
\item{prices}{List of price dataframes}
}
\value{
Input dataframes with common time series to all input dataframes
}
\description{
Return commmon time series
}
|
de311a9d097e2415ce30cc64b3a367750d19ccf0
|
5227e8fb4619e3b4212613f3f779df8b7c3706b2
|
/man/fake.phenos.Rd
|
206bc147588bcac30c3a502a4c20dfacde29b48f
|
[] |
no_license
|
phamasaur/qtlpvl
|
5a5b930b4f12d67c7cc00840a63e9725da3dbc2b
|
9c199f47c21a8deb5a50cda5c8b5423c49efa15c
|
refs/heads/master
| 2023-04-19T05:30:01.716383
| 2015-08-26T05:32:19
| 2015-08-26T05:32:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,222
|
rd
|
fake.phenos.Rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/qtlpvl-package.R
\docType{data}
\name{fake.phenos}
\alias{fake.phenos}
\title{Simulated Small Gene Expression Data Set}
\format{A matrix with 120 individuals in rows, 10 traits in columns.}
\description{
Simulated gene expression data, with 10 expression traits for 120
individuals.
}
\details{
These data were simulated using the genotype data from
the `listeria` data set (an F$_2$ population) provided with
R/qtl. The phenotypes were simulated using two markers from
chromosome 1 as QTL, with the first QTL having an additive allelic
effect, and with one of the alleles at the second QTL being
strictly dominant. There are 10 phenotypes. The first 5 are
controlled by the first QTL, and the other 5 traits are controlled
by the second QTL (and with a negative and larger effect). The 10
phenotypes were generated with these QTL effects plus independent,
normally distributed residual variation. Treating these traits as
gene expression measurements, we assigned genomic positions at
random. The phenotype data is stored in matrix `fake.phenos` and their
positions are stored in data frame `fake.probepos`.
}
\keyword{datasets}
|
596db34a6c5de08bf7220ad7c3ec5bb32d1665eb
|
f090937cedacfd819294faa2224cb3c8f24737f3
|
/R/dotplot.R
|
f7d49767c312e57ea5ccabc9e0b3987b84fb37dd
|
[] |
no_license
|
vitkl/DOSE
|
1334bfa29e7eeba322550232c98d75d39b8d1707
|
9c09b0928f6d31014733d3248b6a638b07b604a8
|
refs/heads/master
| 2021-01-20T13:17:45.418994
| 2017-05-08T09:18:54
| 2017-05-08T09:18:54
| 90,470,418
| 0
| 0
| null | 2017-05-06T14:57:30
| 2017-05-06T14:57:29
| null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
dotplot.R
|
##' @importFrom ggplot2 fortify
##' @importFrom ggplot2 ggplot
##' @importFrom ggplot2 aes_string
##' @importFrom ggplot2 geom_point
##' @importFrom ggplot2 scale_color_gradient
##' @importFrom ggplot2 xlab
##' @importFrom ggplot2 ylab
##' @importFrom ggplot2 ggtitle
##' @author Guangchuang Yu, Vitalii Kleshchevnikov modified to order and color by any column and give custom message
##' on the plot terms are ordered high to low, top to bottom
dotplot_internal <- function(object, x="geneRatio", colorBy="p.adjust", orderBy = "GeneRatio", showCategory=10, split=NULL, font.size=12, title="", xlabel = "") {
colorBy <- match.arg(colorBy, c("pvalue", "p.adjust", "qvalue", "enrichmentScore"))
if (x == "geneRatio" || x == "GeneRatio") {
x <- "GeneRatio"
size <- "Count"
} else if (x == "count" || x == "Count") {
x <- "Count"
size <- "GeneRatio"
} else {
stop("x should be geneRatio or count...")
}
df <- fortify(object, showCategory = showCategory, split=split)
## already parsed in fortify
## df$GeneRatio <- parse_ratio(df$GeneRatio)
if(colorBy == "enrichmentScore") {lows = "blue"; highs = "red"}
if(colorBy != "enrichmentScore") {lows = "red"; highs = "blue"}
idx <- order(df[,orderBy], decreasing = FALSE)
df$Description <- factor(df$Description, levels=df$Description[idx])
ggplot(df, aes_string(x=x, y="Description", size=size, color=colorBy)) +
geom_point() + scale_color_gradient(low=lows, high=highs) +
ggtitle(title) + theme_dose(font.size) +
ylab(ifelse(orderBy == "p.adjust",">> adjusted p-value increasing >>", ""))+
xlab(ifelse(x == "GeneRatio" & class(object) == "gseaResult" & xlabel != "",
xlabel, # the fraction of proteins from a gene set which are over- or underrepresented
ifelse(x == "GeneRatio" & class(object) == "enrichResult" & xlabel != "",
xlabel, # the fraction of proteins from a gene set in the analysed set
x)))
}
|
4689da920998233ce8b0f9337aeec53dce26b2ec
|
7f6a97022dfd69ee3c166d4fa2915ff70dd38ae2
|
/R/Rkoder/unrate/lasso_generaliseringer/lars/lasso/bic/covtest.R
|
dc9fa504f69ebd05a65715651bb1ebd2ebc13022
|
[] |
no_license
|
TrineGraff/p10
|
1fc59edab01c802721a5c730ecfa7c08be9e86e5
|
a6c3088af873507af4824f52a1e73c5c6e5c172e
|
refs/heads/master
| 2021-04-30T14:39:44.881380
| 2018-06-20T08:51:04
| 2018-06-20T08:51:04
| 121,222,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
r
|
covtest.R
|
source("shrinkage_metoder/lars/lasso/bic/insample.R")
covTest(lasso_fit, x_train, y_train)
which(coef(lasso_fit, s = lasso_bic$f_hat, mode = "fraction")!=0)
#variablerne 21, 35, 31,32,19, 79 tilføjes og fjernes igen
colnames(x_train)[79]
#variablen 78 bliver tilføjet, fjernet og tilføjet igen.
|
bb497267ae3420255758b4d341b441453fe2b0ea
|
b844a7aa7d03d64383931bf0b1dce19888e55e33
|
/11dtametadata/detecterror.R
|
5d21f0dd4d2c8718c5ab97cad49246a0828d65d9
|
[
"MIT"
] |
permissive
|
mephas/datasets
|
0c5d99944ac0eeac67b3c7c6d432dc424394a615
|
f89d3511efe7efe2f96a1fe45ac9644247055ebf
|
refs/heads/master
| 2023-01-04T09:28:11.020770
| 2022-12-23T08:32:43
| 2022-12-23T08:32:43
| 231,333,695
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
detecterror.R
|
data <- read.csv("anti-ccp.csv")
library(dtametasa)
sapply(seq(1,0.1,-0.2), function(p)
{
fit <- try(dtametasa.rc(data, p=p))
if(inherits(fit, "try-error")) par <- rep(NA,5) else
fit$par
}
)
|
685f079f8e67924bd9671b3f4072c5d34c390164
|
818eaa5d5a84d25a61f16e9567b82d80344984aa
|
/presentation_times/presentation_times.R
|
e8a68688a28bf3d98cfce8cd17811de322e55879
|
[] |
no_license
|
justone/r
|
6c7157774aa55a80c9352162b92c3cd1d685116c
|
5ed460ed95fb9bd650090d1cf59581e75796a49d
|
refs/heads/master
| 2021-01-10T18:30:16.596262
| 2012-05-11T03:45:42
| 2012-05-11T03:45:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,919
|
r
|
presentation_times.R
|
#!/usr/bin/env Rscript
# for melt
library("reshape")
# for pretty plotting
library("ggplot2")
# for help with date labels and scaling
library("scales")
# Read in raw times
times.raw <- read.csv('decimal_times.csv')
# make a copy to clean up
times.clean <- times.raw
# make rownames first column
rownames(times.clean) = times.clean[,1]
# remove first column
times.clean = times.clean[,-1]
# transpose and melt
times.melted <- melt(t(times.clean))
# set column names
colnames(times.melted) <- c('Date', 'Team', 'Time')
# parse date into date type
times.melted$Date <- strptime(times.melted$Date, "X%m.%d.%Y")
times.melted$Date <- as.POSIXct(times.melted$Date)
# subset for testing
#times.test <- times.melted[times.melted$Team == 'Lulu',]
# or could do it this way
# times.test <- subset(times.melted, Team == 'Lulu')
# just the teams
times.teams <- times.melted[times.melted$Team != 'Total',]
# order the factor
times.teams$Team <- factor(times.teams$Team, levels=c('Intro','Lulu','Blinky','Rosco','Fizbo','Skiddle','Checker','Twinkle','Bonker','Finish'))
# filter out the NAs
times.teams.clean <- times.teams[!is.na(times.teams$Time),]
# just the total
times.total <- times.melted[times.melted$Team == 'Total',]
# plot all teams
png(filename = "team_times.png", width=1200, height=750)
ggplot(times.teams.clean, aes(Date, Time)) + scale_x_datetime(labels = date_format("%m/%d"), breaks = date_breaks("1 week")) + geom_line(aes(colour=Team)) + geom_point(aes(colour=Team)) + facet_wrap(~ Team) + geom_hline(aes(yintercept=0.75)) + labs(x = "Presentation Date", y = "Length (Minutes)") + opts(title = 'Team Presentation Time')
# plot total time
png(filename = "total_time.png", width=750, height=425)
ggplot(times.total, aes(Date, Time)) + geom_smooth() + geom_point() + geom_hline(aes(yintercept=5)) + labs(x = "Presentation Date", y = "Length (Minutes)") + opts(title = 'Total Presentation Time')
dev.off()
|
868bc5650d7f704955c24f05679210f0e7aa3c09
|
ac8ba549475fe7caa6cc414b64c48bbc8c78dbdb
|
/chapter5/chapter5_resampling_lab.R
|
28d0f1a090fbaf67a85f96e4141d5169154f3d7c
|
[] |
no_license
|
Stochastic-Squirrel/ISLR_notes
|
5c2f1eec0b71b258e491c929e8ff7e0e141a07dc
|
3044dcffca23aba2077ff4f727bad339e2f9e843
|
refs/heads/master
| 2021-09-12T11:06:23.026588
| 2018-04-16T08:18:06
| 2018-04-16T08:18:06
| 113,603,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,203
|
r
|
chapter5_resampling_lab.R
|
library(tidyverse)
library(ISLR)
library(boot)
set.seed(1)
#THE VALIDATION SET APPROACH
#sample without replacement from numbers 1:392
train <- sample(392,196)
lm_model <- lm(mpg~horsepower, data = Auto , subset = train)
#Calculate MSE = average of (Actual - predicted)^2
mse_lm_model <- (Auto[-train,"mpg"] - predict(lm_model , newdata=Auto[-train,]))^2 %>% mean()
#Calculate MSE of polynomial regression
poly2_model <- lm(mpg~poly(horsepower,2), data = Auto , subset = train)
mse_poly2_model <- (Auto[-train,"mpg"] - predict(poly2_model , newdata=Auto[-train,]))^2 %>% mean()
poly3_model <- lm(mpg~poly(horsepower,3), data = Auto , subset = train)
mse_poly3_model <- (Auto[-train,"mpg"] - predict(poly3_model , newdata=Auto[-train,]))^2 %>% mean()
#NOTE. It is advisable to use Poly(x , power) rather than I(x^power) or x^power when specifying the model formula
#Read notes on orthogonal polynomials for regression
#LEAVE OUT ONE CROSS VALIDATION (LOOCV)
#note lm ~ glm if no family argument is passed to glm
glm_fit <- glm(mpg~horsepower , data = Auto)
cv_error <- cv.glm(data = Auto , glm_fit)
#cv.glm allows you to calculate cross validation MSE
# cverror$delta contains the estimated test MSE which is the average of errors for each prediction
#We will explore a case later where those two numbers will differ
cv_error <- numeric(5)
for (i in 1:5){
glm_fit <- glm(mpg~poly(horsepower,i) , data = Auto)
cv_error[i] <- cv.glm(data = Auto , glm_fit)$delta[1]
}
#NOTE. MSE estimates will ALWAYS be the same because of LOOCV, you are always going to be testing the same
#models against the same points. Therefore no variability in estimates
#K-FOLD CROSS VALIDATION
# cv.glm can also do K-fold, if you don't set the k parameter, it will set K=N , which is LOOCV
set.seed(17)
cv_error <- numeric(10)
for (i in 1:10){
glm_fit <- glm(mpg~poly(horsepower,i) , data = Auto)
cv_error[i] <- cv.glm(data = Auto , glm_fit, K = 10)$delta[1]
}
#Much faster computation than LOOCV, also has the perks of a more accurate estimate of MSE
# even though it will have slightly more bias, it reduces variance because instead of averaging MSE_i's of one point each
# as in LOOCV , you average MSE_k's which in itself, is an average, so it is an average of averages.
#Note. the two delta values differe slightly, first one is the standard K-fold MSE estimate, the other is a bias corrected estimate
#BOOTSTRAP
alpha_fn <- function( data , index){ #The metric you want to bootstrap
X <- data$X[index]
Y <- data$Y[index]
return((var(Y)- cov(X,Y)) / (var(X)+var(Y)-2*cov(X,Y)) )
}
#estimates allpha value or a portfolio allocation percentage to be optimal
set.seed(1)
alpha_fn(Portfolio,sample(100,100,replace = T))
#Now let's bootstrap the above process in order to get a robust CI for alpha
boot(Portfolio , alpha_fn , R=1000)
#SHows SE(alpha) = 0.0886 and estimate for alpha is 0.57583
#Let's apply this thinking to linear regression coefficient estimates compared to the analytical formulae for OLS estimation
boot_fn <- function(data , index){
return({
coef(lm(mpg~horsepower, data= data , subset = index))
})
}
boot_fn(Auto , sample(392,392, replace = TRUE))
#Bootstrap this
boot(data= Auto , boot_fn , R = 1000)
#compare this against estimates calculated from formula in the lm function
summary(lm(mpg~horsepower,data=Auto))$coef
#You can see that there is actually quite a bit of a difference in the standard error estimation. Why?
#Standard formulae rely on the sigma^2 estimate (noise variance) being accurate.It is onyl accurate if the model selection
# of it being linear is correct. In fact, there is a non-linear relationship so this estimate is over-inflated. Therefore the bootstrap is more accurate in this case.
#From textbook questions, the probability that any arbitrary O_j observation will be in a bootstrap resample of size n is
# 1 - (1- 1/n)^n
bootstrap_probs <- data_frame(n=1:100000,probability = numeric(100000))
bootstrap_probs$probability <- 1 - (1- 1/(bootstrap_probs$n) )^(bootstrap_probs$n)
ggplot(bootstrap_probs,aes(x=n , y=probability)) +ylim(0,1)+ geom_point()
#Notice how it quickly approaches the asymptote
|
9c80b288460a4be4626bbc581d1a8aa0198c035e
|
c410e65345f0c2d4cc6b68cdcac433696c887e97
|
/regression_curv.R
|
71a0551a4ff1bc5c0fed0d10b93b7b52fdad1d18
|
[] |
no_license
|
bgorillaz/Capstone
|
033467ef312048f8466aeb774373a2986c94f978
|
7b027a8875620f28c3e877d3c21fbe42436610b0
|
refs/heads/master
| 2023-01-22T12:29:14.955985
| 2020-12-08T02:54:11
| 2020-12-08T02:54:11
| 319,498,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,528
|
r
|
regression_curv.R
|
#load packages
library(data.table)
library(pscl)
library(MASS)
library(ndot.tools)
#set dir
setwd("/Users/ryanloos/Capstone/")
#load data
curv <- fread("clean_data/curv_for_model.csv")
#refactorize columns...
for(var in colnames(curv)) {
if (class(curv[[var]]) == "character") {
curv[[var]] <- as.factor(curv[[var]])
}
}
curv_id <- curv$CRSP2_Unique_Curve_ID
curv[, CRSP2_Unique_Curve_ID := NULL]
#### POISSON BASE ----
summary(fit_p <- glm(Severe_Crashes ~ Speed_Limit + Area_Type + ADT_vpd + Radius_feet + Lane_Width +
Shoulder_Type + Outside_Edge_Risk + Total_Outside_Shoulder_Width + Total_Xsect_width +
Adjacent_Intersection + Visual_Trap + Curve_Lighting,
data = curv, family = poisson(link = "log")))
sum(resid(fit_p, type = "pearson")^2) / (nrow(curv) - length(coef(fit_p))) #1.13
pchisq(fit_p$deviance, df=fit_p$df.residual, lower.tail=F) #pval of 1, do not reject null hypothesis. Model correctly specified.
logLik(fit_p) #-779
AIC(fit_p) #1598
sum(dpois(0,fitted(fit_p))) #7284
curv[,.N, by = Severe_Crashes] #7292
#### NEGATIVE BIOMIAL BASE ----
summary(fit_nb <- glm.nb(Severe_Crashes ~ Speed_Limit + Area_Type + ADT_vpd + Radius_feet + Lane_Width +
Shoulder_Type + Outside_Edge_Risk + Total_Outside_Shoulder_Width + Total_Xsect_width +
Adjacent_Intersection + Visual_Trap + Curve_Lighting, data = curv))
sum(resid(fit_nb, type = "pearson")^2) / (nrow(curv) - length(coef(fit_nb))) #1.13
pchisq(fit_nb$deviance, df=fit_nb$df.residual, lower.tail=F) #pval of 1, do not reject null hypothesis. Model correctly specified.
logLik(fit_nb) #-766
AIC(fit_nb) #1573
sum(dnbinom(0, mu = fitted(fit_nb), size = fit_nb$theta)) #7294
#### ZERO INFLATED POISSON BASE ----
summary(fit_zip <- zeroinfl(Severe_Crashes ~ Speed_Limit + Area_Type + Lane_Width +
Shoulder_Type + Outside_Edge_Risk + Total_Outside_Shoulder_Width +
Adjacent_Intersection + Visual_Trap + Curve_Lighting | ADT_vpd, data = curv),
zeroinfl.control(maxit = 10000))
sum(resid(fit_zip, type = "pearson")^2) / (nrow(curv) - length(coef(fit_zip))) #1.13
pchisq(fit_zip$deviance, df=fit_zip$df.residual, lower.tail=F) #pval of 1, do not reject null hypothesis. Model correctly specified.
logLik(fit_zip) #-784
AIC(fit_zip) #1573
sum(predict(fit_zip, type = "prob")[,1])
vuong(fit_zip, fit_nb) #vuong test indicates that the zero inflated model is better than the NB model
vuong(fit_zip, fit_p)
#### ZERO INFLATED NEGATIVE BINOMIAL BASE ----
summary(fit_zinb <- zeroinfl(Severe_Crashes ~ Speed_Limit + Area_Type + Lane_Width +
Shoulder_Type + Outside_Edge_Risk + Total_Outside_Shoulder_Width +
Adjacent_Intersection + Visual_Trap + Curve_Lighting | ADT_vpd, data = curv, dist = "negbin"))
sum(resid(fit_zinb, type = "pearson")^2) / (nrow(curv) - length(coef(fit_zinb))) #1.04
pchisq(fit_zinb$deviance, df=fit_zinb$df.residual, lower.tail=F) #pval of 1, do not reject null hypothesis. Model correctly specified.
logLik(fit_zinb) #-773
AIC(fit_zinb) #1587
sum(predict(fit_zinb, type = "prob")[,1]) #7292
vuong(fit_zinb, fit_nb) #vuong test indicates that the zero inflated model is better than the NB model
vuong(fit_zinb, fit_p)
vuong(fit_zinb, fit_zip)
vuong(fit_zinb, fit_nb) #raw = 1.51 #sig evidence to use zinb > nb
1/fit_nb$theta #4.36
#theta value would indivate an NB model, but with a Vuong statistic of 1.51 the test is inconclusive.
#rootograms
root_p <- rootogram(fit_p, style = "hanging", plot = F)
root_nb <- rootogram(fit_nb, style = "hanging", plot = F)
root_zip <- rootogram(fit_zip, style = "hanging", plot = F)
root_zinb <- rootogram(fit_zinb, style = "hanging", plot = F)
autoplot(root_p, title = "Base Rural Segment - Poisson")
autoplot(root_nb)
autoplot(root_zip)
autoplot(root_zinb)
xlims <- scale_x_continuous(breaks = 0:4, limits = c(-1,4)) #breaks = 0:6, limits = c(-1,6)
plot_grid(autoplot(root_p) + ggtitle("Poisson") + xlims,autoplot(root_nb) + ggtitle("NB") + xlims,
autoplot(root_zip) + ggtitle("ZIP") + xlims,autoplot(root_zinb) + ggtitle("ZINB") + xlims)
curve_base <- as.data.table(cbind(as.character(curv_id), curv$Severe_Crashes, round(fit_nb$fitted.values,3)))
fwrite(curve_base, "clean_data/curv_base_fitted.csv")
#### FINAL ----
summary(full_nb <- glm.nb(Severe_Crashes ~ ., data = curv))
step(full_nb, direction = "both")
summary(step_nb <- glm.nb(formula = Severe_Crashes ~ Length_feet + Delineation +
ADT_vpd + Adjacent_Intersection + Visual_Trap + Outside_Edge_Risk +
Urban_Rural, data = curv, init.theta = 0.2208415343, link = log))
sum(resid(step_nb, type = "pearson")^2) / (nrow(curv) - length(coef(step_nb))) #1.13
pchisq(step_nb$deviance, df=step_nb$df.residual, lower.tail=F) #pval of 1, do not reject null hypothesis. Model correctly specified.
logLik(step_nb) #-766
AIC(step_nb) #1573
sum(dnbinom(0, mu = fitted(step_nb), size = step_nb$theta)) #7294
vuong(step_nb, fit_nb)
root_step <- rootogram(step_nb, style = "hanging", plot = F)
autoplot(root_step)
curve_full <- as.data.table(cbind(as.character(curv_id), round(step_nb$fitted.values,3)))
fwrite(curve_full, "clean_data/curv_full_fitted.csv")
fit_compare <- fit_nb$fitted.values - step_nb$fitted.values
fit_compare <- sort(fit_compare, decreasing = T)
summary(fit_compare[1:7448])
|
b26632445aebf06497ecaad22f5bd4671f366bf4
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/sdetorus/man/periodicTrapRule1D.Rd
|
eda0348a76d59b70d3dcfc810b260da3d3536a23
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,358
|
rd
|
periodicTrapRule1D.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{periodicTrapRule1D}
\alias{periodicTrapRule1D}
\alias{periodicTrapRule2D}
\alias{periodicTrapRule3D}
\alias{integrateSimp1D}
\alias{integrateSimp2D}
\alias{integrateSimp3D}
\title{Quadrature rules in 1D, 2D and 3D}
\usage{
periodicTrapRule1D(fx, endsMatch = FALSE, na.rm = TRUE,
lengthInterval = 2 * pi)
periodicTrapRule2D(fxy, endsMatch = FALSE, na.rm = TRUE,
lengthInterval = rep(2 * pi, 2))
periodicTrapRule3D(fxyz, endsMatch = FALSE, na.rm = TRUE,
lengthInterval = rep(2 * pi, 3))
integrateSimp1D(fx, lengthInterval = 2 * pi, na.rm = TRUE)
integrateSimp2D(fxy, lengthInterval = rep(2 * pi, 2), na.rm = TRUE)
integrateSimp3D(fxyz, lengthInterval = rep(2 * pi, 3), na.rm = TRUE)
}
\arguments{
\item{fx}{vector containing the evaluation of the function to integrate over a uniform grid in \eqn{[x_1,x_2]}.}
\item{endsMatch}{flag to indicate whether the values of the last entries of \code{fx}, \code{fxy} or \code{fxyz} are the ones in the first entries (elements, rows, columns, slices). See examples for usage.}
\item{na.rm}{logical. Should missing values (including \code{NaN}) be
removed?}
\item{lengthInterval}{vector containing the lengths of the intervals of integration.}
\item{fxy}{matrix containing the evaluation of the function to integrate over a uniform grid in \eqn{[x_1,x_2]\times[y_1,y_2]}.}
\item{fxyz}{three dimensional array containing the evaluation of the function to integrate over a uniform grid in \eqn{[x_1,x_2]\times[y_1,y_2]\times[z_1,z_2]}.}
}
\value{
The value of the integral.
}
\description{
Quadrature rules for definite integrals over intervals in 1D, \eqn{\int_{x_1}^{x_2} f(x)dx}, rectangles in 2D,\cr \eqn{\int_{x_1}^{x_2}\int_{y_1}^{y_2} f(x,y)dydx} and cubes in 3D, \eqn{\int_{x_1}^{x_2}\int_{y_1}^{y_2}\int_{z_1}^{z_2} f(x,y,z)dzdydx}. The trapezoidal rules assume that the function is periodic, whereas the Simpson rules work for arbitrary functions.
}
\details{
The simple trapezoidal rule has a very good performance for periodic functions in 1D and 2D(order of error ). The higher dimensional extensions are obtained by iterative usage of the 1D rules.
}
\examples{
# In 1D. True value: 3.55099937
N <- 21
grid <- seq(-pi, pi, l = N)
fx <- sin(grid)^2 * exp(cos(grid))
periodicTrapRule1D(fx = fx, endsMatch = TRUE)
periodicTrapRule1D(fx = fx[-N], endsMatch = FALSE)
integrateSimp1D(fx = fx, lengthInterval = 2 * pi)
integrateSimp1D(fx = fx[-N]) # Worse, of course
# In 2D. True value: 22.31159
fxy <- outer(grid, grid, function(x, y) (sin(x)^2 * exp(cos(x)) +
sin(y)^2 * exp(cos(y))) / 2)
periodicTrapRule2D(fxy = fxy, endsMatch = TRUE)
periodicTrapRule2D(fxy = fxy[-N, -N], endsMatch = FALSE)
periodicTrapRule1D(apply(fxy[-N, -N], 1, periodicTrapRule1D))
integrateSimp2D(fxy = fxy)
integrateSimp1D(apply(fxy, 1, integrateSimp1D))
# In 3D. True value: 140.1878
fxyz <- array(fxy, dim = c(N, N, N))
for (i in 1:N) fxyz[i, , ] <- fxy
periodicTrapRule3D(fxyz = fxyz, endsMatch = TRUE)
integrateSimp3D(fxyz = fxyz)
}
\references{
Press, W. H., Teukolsky, S. A., Vetterling, W. T., Flannery, B. P. (1996). \emph{Numerical Recipes in Fortran 77: The Art of Scientific Computing (Vol. 1 of Fortran Numerical Recipes)}. Cambridge University Press, Cambridge.
}
|
0ed3b7d919d1849bdfe73c2adf0abf9f8bc1f672
|
a5e940b336f8d906d5e9b42eff22986bf1156cb3
|
/ui.R
|
8b63fac5c9b0c411fa83fd3489b4fdb99246f0f5
|
[] |
no_license
|
raulzr/compstat2016
|
be1322db403a0d28f1d000738d211ffb390c9f40
|
7cc5c77fb99779bab700d35573b56dc465db1647
|
refs/heads/master
| 2021-06-09T05:25:54.267668
| 2016-12-15T23:46:23
| 2016-12-15T23:46:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
tabsetPanel(
tabPanel("Uno", tarea1UI("A")),
tabPanel("Dos", tarea2UI("B")),
tabPanel("Cuatro", tarea4UI("C")),
tabPanel("Cinco", tarea5UI("D"))
)
)
)
|
57c78177afdc2c48db4fb711dd39ab73d490a4f7
|
6c8d711db1ada9398e5b36463476844ab0e9e553
|
/R/make_diagram.R
|
14ce01edc9274a621495ada56481fa150af794c9
|
[] |
no_license
|
wzbillings/flowdiagramr
|
8ca658c702f1c88b56fcbf55d1936fcaa7ce12cd
|
e048801c384aeca859ed86cc916dd2a65712e47c
|
refs/heads/main
| 2023-06-07T22:17:29.353419
| 2021-06-21T16:41:30
| 2021-06-21T16:41:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,536
|
r
|
make_diagram.R
|
#' Make a ggplot2 model diagram.
#'
#' @description
#' `make_diagram()` generates a **ggplot2** object based on the data frames
#' made with \code{\link{prepare_diagram}}. The function only applies
#' aesthetics that are not associated with x, y locations. Colors, linetypes,
#' and other graphical options can be set by the user.
#'
#' @param diagram_list A required list of data frames returned from the
#' \code{prepare_diagram} function. See that function for details
#' about this object.
#' @param diagram_settings An optional list of diagram aesthetic settings. The
#' following elements are supported and default values are provided:
#' \itemize{
#' \item `var_outline_color`: A character string or vector of character strings
#' specifying the color of variable outlines. If a vector, the colors will be
#' recycled in the order of the variables in the supplied data frame.
#' \item `var_fill_color`: A character string or vector of character strings
#' specifying the fill color of variables. If a vector, the colors will be
#' recycled in the order of the variables in the supplied data frame.
#' \item `var_label_on`: A logical indicating if the labels for the variables
#' should be plotted.
#' \item `var_label_color`: A character string or vector of character strings
#' specifying the text color for variable labels. If a vector, the colors will
#' be recycled in the order of the variables in the supplied data frame.
#' \item `var_label_size`: A numeric scalar specifying the text size for variable
#' labels. Note that any value supplied here overwrites
#' entries in the list structure returned by \code{\link{prepare_diagram}}.
#' Specifically, if you set this parameter when calling \code{\link{prepare_diagram}}
#' with \code{use_varnames = TRUE}, the value is used to compute box size,
#' but then the actual size of the label as provided here is applied.
#'
#' \item `main_flow_on`: A logical indicating if the main flow arrows should be plotted.
#' \item `main_flow_color`: A character string or vector of character strings
#' specifying the text color for non-interaction flow arrows.
#' If a vector, the values will be recycled in the order of the flows
#' in the supplied data frame.
#' \item `main_flow_linetype`: Either a numeric scalar/vector or a character scalar/vector
#' specifying the linetype for main flows (non-interaction flows). This
#' argument is passed to the \code{linetype} argument in ggplot2. From
#' the ggplot2 documentation: "The linetype aesthetic can be specified
#' with either an integer (0-6), a name (0 = blank, 1 = solid, 2 = dashed,
#' 3 = dotted, 4 = dotdash, 5 = longdash, 6 = twodash), a mapping to a
#' discrete variable, or a string of an even number (up to eight) of
#' hexadecimal digits which give the lengths in consecutive positions in
#' the string." Default is 1 (solid). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `main_flow_size`: A numeric scalar or vector specifying the line size for the
#' main flows (non-interaction flows). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `main_flow_label_on`: A logical indicating if the labels for the main
#' flows should be plotted.
#' \item `main_flow_label_color`: A character string or vector of character strings
#' specifying the text color for main flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `main_flow_label_size`: A scalar or numeric vector
#' specifying the text size for main flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#'
#' \item `interaction_flow_on`: A logical indicating if the interaction flow arrows should be plotted.
#' \item `interaction_flow_color`: A character string or vector of character strings
#' specifying the text color for non-interaction flow arrows.
#' If a vector, the values will be recycled in the order of the flows
#' in the supplied data frame.
#' \item `interaction_flow_linetype`: Either a numeric scalar/vector or a character scalar/vector
#' specifying the linetype for interaction flows. This
#' argument is passed to the \code{linetype} argument in ggplot2. From
#' the ggplot2 documentation: "The linetype aesthetic can be specified
#' with either an integer (0-6), a name (0 = blank, 1 = solid, 2 = dashed,
#' 3 = dotted, 4 = dotdash, 5 = longdash, 6 = twodash), a mapping to a
#' discrete variable, or a string of an even number (up to eight) of
#' hexadecimal digits which give the lengths in consecutive positions in
#' the string." Default is 1 (solid). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `interaction_flow_size`: A numeric scalar or vector specifying the line size for the
#' interaction flows (non-interaction flows). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `interaction_flow_label_on`: A logical indicating if the labels for the interaction
#' flows should be plotted.
#' \item `interaction_flow_label_color`: A character string or vector of character strings
#' specifying the text color for interaction flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `interaction_flow_label_size`: A scalar or numeric vector
#' specifying the text size for interaction flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#'
#' \item `external_flow_on`: A logical indicating if the external flow arrows should be plotted.
#' \item `external_flow_color`: A character string or vector of character strings
#' specifying the text color for non-interaction flow arrows.
#' If a vector, the values will be recycled in the order of the flows
#' in the supplied data frame.
#' \item `external_flow_linetype`: Either a numeric scalar/vector or a character scalar/vector
#' specifying the linetype for external flows. This
#' argument is passed to the \code{linetype} argument in ggplot2. From
#' the ggplot2 documentation: "The linetype aesthetic can be specified
#' with either an integer (0-6), a name (0 = blank, 1 = solid, 2 = dashed,
#' 3 = dotted, 4 = dotdash, 5 = longdash, 6 = twodash), a mapping to a
#' discrete variable, or a string of an even number (up to eight) of
#' hexadecimal digits which give the lengths in consecutive positions in
#' the string." Default is 1 (solid). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `external_flow_size`: A numeric scalar or vector specifying the line size for the
#' external flows (non-interaction flows). If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `external_flow_label_on`: A logical indicating if the labels for the external
#' flows should be plotted.
#' \item `external_flow_label_color`: A character string or vector of character strings
#' specifying the text color for external flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#' \item `external_flow_label_size`: A scalar or numeric vector
#' specifying the text size for external flow labels. If a vector, the values will
#' be recycled in the order of the flows in the supplied data frame.
#`
#' \item `with_grid` A logical indicating whether to return the ggplot
#' with a grid. Default is FALSE. The grid can be helpful if you
#' want/need to move items around.
#' }
#'
#' @return A ggplot2 object.
#' @examples
#' mymodel = list(varlabels = c("S","I","R"),
#' flows = list(S_flows = c("-b*S*I"),
#' I_flows = c("b*S*I","-g*I"),
#' R_flows = c("g*I") ) )
#' diagram_list <- prepare_diagram(model_list = mymodel)
#'
#' # make diagram without grid
#' diagram <- make_diagram(diagram_list)
#'
#' # make diagram with grid
#' diagram_with_grid <- make_diagram(diagram_list, diagram_settings = list(with_grid = TRUE))
#' @import ggplot2
#' @export
#'
make_diagram <- function (diagram_list,
diagram_settings = list(
var_outline_color = NA,
var_fill_color = "#6aa4c8",
var_label_on = TRUE,
var_label_color = "white",
var_label_size = NA,
main_flow_on = TRUE,
main_flow_color = "grey25",
main_flow_linetype = "solid",
main_flow_size = 0.7,
main_flow_label_on = TRUE,
main_flow_label_color = "black",
main_flow_label_size = 5,
interaction_flow_on = TRUE,
interaction_flow_color = "grey25",
interaction_flow_linetype = "dashed",
interaction_flow_size = 0.7,
interaction_flow_label_on = TRUE,
interaction_flow_label_color = "black",
interaction_flow_label_size = 5,
external_flow_on = TRUE,
external_flow_color = "grey25",
external_flow_linetype = "solid",
external_flow_size = 0.7,
external_flow_label_on = TRUE,
external_flow_label_color = "black",
external_flow_label_size = 5,
with_grid = FALSE)
) {
# TODO error checking
# unlist the data frames to objects
variables <- diagram_list$variables
flows <- diagram_list$flows
# assign default settings to be updated by user
defaults <- eval(formals(make_diagram)$diagram_settings)
# check user inputs provided in diagram_settings, if user supplies a non-recognized argument, stop
nonrecognized_inputs <- setdiff(names(diagram_settings), names(defaults))
if (length(nonrecognized_inputs>0) )
{
stop('These elements of diagram_settings are not recognized: ', nonrecognized_inputs)
}
# update defaults with user settings
defaults[names(diagram_settings)] <- diagram_settings
# check whether defaults are updated, except for with_grid
with_grid <- defaults$with_grid
defaults$with_grid <- NULL
def2 <- eval(formals(make_diagram)$diagram_settings)
def2$with_grid <- NULL
check <- all.equal(def2, defaults)
# if the two lists are different, user wants to update the settings,
# so we do so. otherwise we can just use the settings already in the
# dataframes.
if(check[1] != TRUE) {
# assign settings list to objects
for(i in 1:length(defaults)) {
assign(names(defaults)[i], defaults[[i]])
}
# if(interaction_flow_label_on == FALSE) {
# # This removes interaction segments and puts the flow label
# # back with the physical flow.
# flows <- move_interaction_label(flows)
# }
# if text size is not provided (NA), then use text sizes in the data
# frames. otherwise, override and use the provided sizes for all.
if(is.na(var_label_size)) {
var_label_size <- variables$label_size
} else {
var_label_size <- recycle_values(var_label_size, nrow(variables))
}
# setup linetypes mapping from numeric to text
ltys <- data.frame(code = 0:6,
text = c("blank", "solid", "dashed",
"dotted", "dotdash", "longdash",
"twodash"))
# recycle values as needed
variables$color <- recycle_values(var_outline_color, nrow(variables))
variables$fill <- recycle_values(var_fill_color, nrow(variables))
variables$label_color <- recycle_values(var_label_color, nrow(variables))
variables$label_size <- recycle_values(var_label_size, nrow(variables))
variables$plot_label_size <- NULL
mains <- subset(flows, type == "main")
mains$color <- recycle_values(main_flow_color, nrow(mains))
if(is.numeric(main_flow_linetype)){
main_flow_linetype <- subset(ltys, code == main_flow_linetype)[,"text"]
}
mains$linetype <- recycle_values(main_flow_linetype, nrow(mains))
mains$size <- recycle_values(main_flow_size, nrow(mains))
mains$label_color <- recycle_values(main_flow_label_color, nrow(mains))
mains$label_size <- recycle_values(main_flow_label_size, nrow(mains))
ints <- subset(flows, type == "interaction")
ints$color <- recycle_values(interaction_flow_color, nrow(ints))
if(is.numeric(interaction_flow_linetype)){
interaction_flow_linetype <- subset(ltys, code == interaction_flow_linetype)[,"text"]
}
ints$linetype <- recycle_values(interaction_flow_linetype, nrow(ints))
ints$size <- recycle_values(interaction_flow_size, nrow(ints))
ints$label_color <- recycle_values(interaction_flow_label_color, nrow(ints))
ints$label_size <- recycle_values(interaction_flow_label_size, nrow(ints))
exts <- subset(flows, type == "external")
exts$color <- recycle_values(external_flow_color, nrow(exts))
if(is.numeric(external_flow_linetype)){
external_flow_linetype <- subset(ltys, code == external_flow_linetype)[,"text"]
}
exts$linetype <- recycle_values(external_flow_linetype, nrow(exts))
exts$size <- recycle_values(external_flow_size, nrow(exts))
exts$label_color <- recycle_values(external_flow_label_color, nrow(exts))
exts$label_size <- recycle_values(external_flow_label_size, nrow(exts))
# recombine flows data frame with aesthetics as columns
flows <- rbind(mains, ints, exts)
flows$arrowsize <- 0.25 # default arrow size
# turn off flows completely by setting linetype to blank as needed
if(main_flow_on == FALSE) {
flows[flows$type == "main", "linetype"] <- "blank"
flows[flows$type == "main", "arrowsize"] <- 0
}
if(interaction_flow_on == FALSE) {
flows[flows$type == "interaction", "linetype"] <- "blank"
flows[flows$type == "interaction", "arrowsize"] <- 0
}
if(external_flow_on == FALSE) {
flows[flows$type == "external", "linetype"] <- "blank"
flows[flows$type == "external", "arrowsize"] <- 0
}
# set label to "" to suppress label if requested
# also don't show label if the flow itself is turned off
flows$math <- flows$label
if(main_flow_on == FALSE || main_flow_label_on == FALSE) {
flows[flows$type == "main", "label"] <- ""
}
if(interaction_flow_on == FALSE || interaction_flow_label_on == FALSE) {
flows[flows$type == "interaction", "label"] <- ""
}
if(external_flow_on == FALSE || external_flow_label_on == FALSE) {
flows[flows$type == "external", "label"] <- ""
}
}
# get the ggplot2 code as text
code <- get_code()
# evaluate the ggplot2 code using current environment args
theplot <- eval(parse(text = code))
return(theplot)
}
|
980150a500e51d35e9205118fd195a04f7886452
|
eaa300dce01424b7975c86fee975d1b5389f25f8
|
/AccountingDiscount.R
|
df9edfa073289fa3d56f97044c1dcfcf7207f57d
|
[] |
no_license
|
shineice/Py
|
9a4e4a6f1721f04d1cc501d6a8f3254bcbfd43c3
|
c23c24a4ebe9d7b0601e35495f9f39153a6b1fb4
|
refs/heads/master
| 2021-06-23T09:14:18.948316
| 2020-11-30T10:32:08
| 2020-11-30T10:32:08
| 163,953,206
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,679
|
r
|
AccountingDiscount.R
|
rm(list=ls())
library(RODBC)
library(dplyr)
library(stringr)
library(readxl)
library(lubridate)
library(mailR)
m <- as.numeric(format(Sys.Date(), "%m"))
m=m-1
ddd=1
##get week data
channel <- odbcConnect("NetSuite", uid="yao.guan@top-line.com", pwd="NetYG@Davis")
discount<-sqlQuery(channel,paste0(
"select
ITEMS.Name as Name,
ENTITY.Name as Entity,
ENTITY.ENTITY_ID,
TRANSACTION_LINES.ITEM_COUNT as 'qty',
TRANSACTION_LINES.AMOUNT as 'amount'
from
TRANSACTION_LINES
join (TRANSACTIONS join ENTITY on TRANSACTIONS.ENTITY_ID=ENTITY.ENTITY_ID)
on TRANSACTION_LINES.TRANSACTION_ID=TRANSACTIONS.TRANSACTION_ID
join LOCATIONS on TRANSACTION_LINES.LOCATION_ID=LOCATIONS.LOCATION_ID
join ITEMS on TRANSACTION_LINES.ITEM_ID=ITEMS.ITEM_ID
where
TRANSACTION_LINES.AMOUNT is not null and
TRANSACTIONS.TRANSACTION_TYPE='Invoice' and
TRANSACTIONS.STATUS in ('Open','Paid In Full') and
ENTITY.ENTITY_ID in (
'9352',
'11125',
'14877',
'21336',
'14525',
'24618',
'11124',
'22701',
'10228',
'12124',
'20567',
'24250',
'20587',
'10290',
'8790',
'6328',
'3958',
'8610',
'18652',
'12868',
'10057',
'5376',
'20010',
'22361',
'15932',
'7996' ) and
TRANSACTIONS.TRANDATE BETWEEN TO_DATE( '" , Sys.Date()-ddd ," 00:00:00', 'YYYY-MM-DD HH24:MI:SS')
AND TO_DATE('" , Sys.Date()-ddd+1," 00:00:00', 'YYYY-MM-DD HH24:MI:SS')
" ))
wfp<-sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Wayfair.com)' and p.ITEM_ID = Item_ID
")
wfcanp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Wayfair Canada)' and p.ITEM_ID = Item_ID
")
hdp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (HomeDepot.com)' and p.ITEM_ID = Item_ID
")
hayp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (NetShop)' and p.ITEM_ID = Item_ID
")
zup <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Zulily.com)' and p.ITEM_ID = Item_ID
")
balp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Bellacor.COM)' and p.ITEM_ID = Item_ID
")
houzp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Houzz.com)' and p.ITEM_ID = Item_ID
")
belp <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Bellacor.COM)' and p.ITEM_ID = Item_ID
")
pierpt <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Trading (Pier 1)' and p.ITEM_ID = Item_ID
")
pierpd <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Wh ( Pier 1)' and p.ITEM_ID = Item_ID
")
pierpI <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Pier 1)' and p.ITEM_ID = Item_ID
")
amap <- sqlQuery(channel,
"select i.Name, p.ITEM_UNIT_PRICE
from Items i, ITEM_PRICES p
where p.NAME = 'Dot Com (Amazon.com)' and p.ITEM_ID = Item_ID
")
colnames(wfp) <- c("Name","wfp")
colnames(wfcanp) <- c("Name","wfcanp")
colnames(hdp) <- c("Name","hdp")
colnames(hayp) <- c("Name","hayp")
colnames(zup) <- c("Name","zup")
colnames(balp) <- c("Name","balp")
colnames(houzp) <- c("Name","houzp")
colnames(belp) <- c("Name","belp")
colnames(pierpt) <- c("Name","pierpt")
colnames(pierpd) <- c("Name","pierpd")
colnames(pierpI) <- c("Name","pierpI")
colnames(amap) <- c("Name","amap")
basic <- sqlQuery(channel,"
select NAME,
SALESDESCRIPTION,
ITEM_STATUS.LIST_ITEM_NAME as Status,
CTNSPC,
N_2019_CAT,
CREATED,
WAYFAIR_COM_PARTNER_SKU,
WF_FIRST_ON_SITE_DATE
,OVERSTOCK_COM_SKU,FIRST_ON_SITE_DATE as OVERSTOCK_FIREST_ON_SITE_DATE
from ITEMS
Join ITEM_STATUS on STATUS_ID = ITEM_STATUS.LIST_ID
where ISINACTIVE='No'
")
basic <- select(basic,Name=NAME,Status)
odbcCloseAll()
mergedisc <- list(discount,wfp,wfcanp,hdp,hayp,zup,balp,houzp,belp,pierpt,pierpd,pierpI,amap,basic) %>%
Reduce(function(dtf1,dtf2) left_join(dtf1,dtf2,by="Name"), .)
table(mergedisc$Status)
c <- mergedisc$Status%in%c("Close Out","Discontinued" ,"Non-Replenishable (via DI)")
wf <- mergedisc$ENTITY_ID%in%c(9352,11125,14877,14525,24618,11124,22701,10228,12124,20567,24250,20587,10290,8790)
wc <- mergedisc$ENTITY_ID==21336
hd <- mergedisc$ENTITY_ID==6328
ha <- mergedisc$ENTITY_ID==3958
zu <- mergedisc$ENTITY_ID==8610
ba <- mergedisc$ENTITY_ID==18652
ho <- mergedisc$ENTITY_ID==12868
be <- mergedisc$ENTITY_ID==5376
pt <- mergedisc$ENTITY_ID==20010
pd <- mergedisc$ENTITY_ID==22361
pi <- mergedisc$ENTITY_ID==15932
am <- mergedisc$ENTITY_ID==7996
cwf <- c & wf
cwc <- c & wc
chd <- c & hd
cha <- c & ha
czu <- c & zu
cba <- c & ba
cho <- c & ho
cbe <- c & be
cpt <- c & pt
cpd <- c & pd
cpi <- c & pi
cam <- c & am
ewf <- !c & wf
ewc <- !c & wc
ehd <- !c & hd
eha <- !c & ha
ezu <- !c & zu
eba <- !c & ba
eho <- !c & ho
ebe <- !c & be
ept <- !c & pt
epd <- !c & pd
epi <- !c & pi
eam <- !c & am
mergedisc$EP <- 0
mergedisc$CD <- 0
mergedisc$qty <- abs(mergedisc$qty)
mergedisc$amount <- (-mergedisc$amount)
mergedisc$EP[ewf] <- mergedisc$wfp[ewf]*mergedisc$qty[ewf]- mergedisc$amount[ewf]
mergedisc$EP[ewc] <- mergedisc$wfcanp[ewc]*mergedisc$qty[ewc]- mergedisc$amount[ewc]
mergedisc$EP[ehd] <- mergedisc$hdp[ehd]*mergedisc$qty[ehd]- mergedisc$amount[ehd]
mergedisc$EP[eha] <- mergedisc$hayp[eha]*mergedisc$qty[eha]- mergedisc$amount[eha]
mergedisc$EP[ezu] <- mergedisc$zup[ezu]*mergedisc$qty[ezu]- mergedisc$amount[ezu]
mergedisc$EP[eba] <- mergedisc$balp[eba]*mergedisc$qty[eba]- mergedisc$amount[eba]
mergedisc$EP[eho] <- mergedisc$houzp[eho]*mergedisc$qty[eho]- mergedisc$amount[eho]
mergedisc$EP[ebe] <- mergedisc$belp[ebe]*mergedisc$qty[ebe]- mergedisc$amount[ebe]
mergedisc$EP[ept] <- mergedisc$pierpt[ept]*mergedisc$qty[ept]- mergedisc$amount[ept]
mergedisc$EP[epd] <- mergedisc$pierpd[epd]*mergedisc$qty[epd]- mergedisc$amount[epd]
mergedisc$EP[epi] <- mergedisc$pierpI[epi]*mergedisc$qty[epi]- mergedisc$amount[epi]
mergedisc$EP[eam] <- mergedisc$amap[eam]*mergedisc$qty[eam]- mergedisc$amount[eam]
mergedisc$CD[cwf] <- mergedisc$wfp[cwf]*mergedisc$qty[cwf]- mergedisc$amount[cwf]
mergedisc$CD[cwc] <- mergedisc$wfcanp[cwc]*mergedisc$qty[cwc]- mergedisc$amount[cwc]
mergedisc$CD[chd] <- mergedisc$hdp[chd]*mergedisc$qty[chd]- mergedisc$amount[chd]
mergedisc$CD[cha] <- mergedisc$hayp[cha]*mergedisc$qty[cha]- mergedisc$amount[cha]
mergedisc$CD[czu] <- mergedisc$zup[czu]*mergedisc$qty[czu]- mergedisc$amount[czu]
mergedisc$CD[cba] <- mergedisc$balp[cba]*mergedisc$qty[cba]- mergedisc$amount[cba]
mergedisc$CD[cho] <- mergedisc$houzp[cho]*mergedisc$qty[cho]- mergedisc$amount[cho]
mergedisc$CD[cbe] <- mergedisc$belp[cbe]*mergedisc$qty[cbe]- mergedisc$amount[cbe]
mergedisc$EP[cpt] <- mergedisc$pierpt[cpt]*mergedisc$qty[cpt]- mergedisc$amount[cpt]
mergedisc$EP[cpd] <- mergedisc$pierpd[cpd]*mergedisc$qty[cpd]- mergedisc$amount[cpd]
mergedisc$EP[cpi] <- mergedisc$pierpI[cpi]*mergedisc$qty[cpi]- mergedisc$amount[cpi]
mergedisc$EP[cam] <- mergedisc$amap[cam]*mergedisc$qty[cam]- mergedisc$amount[cam]
aggdisc <- aggregate(cbind(EP,CD)~Entity,mergedisc,sum)
colnames(aggdisc) <- c("Account","Event Promotion", "Close-Out Discount")
#send mail
list <- "Monthly dicount by account"
m1=Sys.Date()-ddd
path <- paste0("/home/topline/auto_report/",m1,"_Discount.csv")
write.csv(aggdisc,path,row.names = FALSE)
|
c895ac06b2073d7b933d8498a5e77ee34da2d8fe
|
8d4b15ba1c6004e6e2f40b01de3543c61df72dd7
|
/predict_validate.R
|
64f2900834bd47d09cb0cfcccc990914daaccdd0
|
[] |
no_license
|
prashantg123/PMnPred
|
4c4aba6b720e0a5e58fe30758f137b809252fc3e
|
7558b32c91ed12a233a4cfa2ed48fe02d298237f
|
refs/heads/master
| 2020-03-16T14:21:34.944811
| 2018-05-09T06:52:17
| 2018-05-09T06:52:17
| 132,714,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,984
|
r
|
predict_validate.R
|
library(devtools)
load_all("d:/InfoTrie/global")
zz <- file("d:/InfoTrie/output/error.txt", open="wt")
sink(zz, type="message")
args <- commandArgs(TRUE)
input <- args[1]
params <- ReadParam(input)
write.csv(input, paste0(params$app.path,"/output/input.csv"))
data<- read.csv(params$file.name, header = TRUE)
values <- data[, 4]
time.index <- as.POSIXct(strptime(data[, 1], '%m/%d/%Y %H:%M'))
time.nas <- which(!is.na(time.index))
# Dicard NAs
time.index <- time.index[time.nas]
values <- values[time.nas]
start.date <- params$start.date
if (start.date == "") {
start.date <- "2013-01-14"
}
end.date <- params$end.date
if (end.date == "") {
end.date <- "2014-06-14 0:50"
}
start <- which(time.index == as.POSIXct(start.date))
if (length(start) == 0) { # No exact match of input datetime
start <- FindApproxTime(time.index, start.date)
}
end <- which(time.index == as.POSIXct(end.date))
if (length(end) == 0) { # No exact match of input datetime
end <- FindApproxTime(time.index, end.date)
}
range <- start:end
points <- values[range]
min.basis <- as.numeric(params$min.basis)
if (is.na(min.basis)) {
min.basis <- 9
}
min.basis <- min.basis / 100
move.duration <- as.numeric(params$move.duration)
if (is.na(move.duration)) {
move.duration <- 180
}
move.angle <- as.numeric(params$move.angle)
if (is.na(move.angle)) {
move.angle <- 30
}
noise.basis <- as.numeric(params$noise.basis)
if (is.na(noise.basis)) {
noise.basis <- 2
}
noise.basis <- noise.basis / 100
noise.duration <- as.numeric(params$noise.duration)
if (is.na(noise.duration)) {
noise.duration <- 120
}
retrace.percent <- as.numeric(params$retrace.percent)
if (is.na(retrace.percent)) {
retrace.percent <- 30
}
retrace.min <- as.numeric(params$retrace.min)
if (is.na(retrace.min)) {
retrace.min <- 40
}
retrace.duration <- as.numeric(params$retrace.duration)
if (is.na(retrace.duration)) {
retrace.duration <- 3000
}
vol.max <- as.numeric(params$vol.max)
if (is.na(vol.max)) {
vol.max <- 0.01
}
width <- as.numeric(params$width)
if (is.na(width)) {
width <- 12
}
predict.time <- params$predict.date
if (predict.time == "") {
predict.time <- as.character(tail(time.index, 1))
}
current <- which(time.index == as.POSIXct(predict.time))
if (length(current) == 0) { # No exact match of input datetime
current <- FindApproxTime(time.index, predict.time)
}
# if (current - start < 60/interval * 24 * 125 + 168) {
# stop("Not enough training data. Use later time")
# }
range <- start:end
points <- values[range]
points.length <- length(points)
points.max <- max(points)
points.min <- min(points)
if (!is.na(as.numeric(params$min.basis)) && !is.na(as.numeric(params$move.angle))) { # input min.basis and move.angle
move.duration <- interval * (points.length - 1) * min.basis / (points.max - points.min) / tan(move.angle*pi/180)
} else if (!is.na(as.numeric(params$move.duration)) && !is.na(as.numeric(params$move.angle))) { # input move.duration and move.angle
min.basis <- (move.duration / interval) * (points.max - points.min) * tan(move.angle*pi/180) / (points.length - 1)
}
patterns <- FindAll(points, interval = interval,
min.basis = min.basis, move.duration = move.duration,
noise.basis = noise.basis, noise.duration = noise.duration,
retrace.percent = retrace.percent,
retrace.min = retrace.min, retrace.duration = retrace.duration)
period <- 60
current <- current - start + 1
if (exists("HFdata")) {
HFdata <- Delta(HFdata, points, current, patterns, interval, period)
} else {
HFdata <- Preprocess(points, current, patterns, interval, period)
}
output.file <- params$output.file
if (output.file == "") {
output.file <- "Chart.jpg"
}
trend <- tolower(params$trend)
if (trend == "") {
predict.time <- "up"
}
jpeg(output.file, width = 20, height = 9, units = "in", res = 300)
PredictTest(HFdata, current, time.index[range])
dev.off()
sink()
|
8ed21c1d917e435e3c2730aa04f0bc3472b884e8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spacejam/examples/plot.SJ.Rd.R
|
ea5697414c0a7398b65ab2d044c5885c1073979c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
plot.SJ.Rd.R
|
library(spacejam)
### Name: plot.SJ
### Title: plot an object of class 'SJ' or 'SJ.dag'
### Aliases: plot.SJ plot.SJ.dag
### ** Examples
p <- 100 #variables
n <- 50 #observations
#Generate Data
set.seed(20)
g <- rdag(p,80)
data <- generate.dag.data(g,n,basesd=c(1,0.5,0.5))
X <- data$X
#Fit conditional independence graph for sequence of 10 lambdas
fit1 <- SJ(X, length = 10)
par(mfrow=c(1,2))
layout <- plot(fit1, main = "min BIC")
plot(fit1, which=5, layout = layout, main = paste0("lambda = ",round(fit1$lambda[5],3)))
|
fc6bdb284c99a0b756109fc65cdf11f5746eeeaa
|
8541c4ed0784bf1d448131152f50f02c36c0dc0f
|
/exercicios_cap2.R
|
22cdcc745cd268a800731b9b723e1864c190fc08
|
[] |
no_license
|
aglotero/ct-234
|
55638f76798ca3efca7a605aaa09b84c41edd8c5
|
464eae766307419a63be6c5190c514e7c3cb3865
|
refs/heads/master
| 2021-01-10T14:56:00.547217
| 2016-03-12T20:59:21
| 2016-03-12T20:59:21
| 53,752,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,673
|
r
|
exercicios_cap2.R
|
naturaisCrescentes <- function(n){
if(n < 0)
{
return('')
}
naturaisCrescentes(n-1)
cat(paste0(n, ' '))
}
naturaisDecrescentes <- function(n){
cat(paste0(n, ' '))
if(n == 0)
{
return('')
}
naturaisDecrescentes(n-1)
}
maximoEmVetor <- function(vetor){
if(length(vetor) == 1){
return(vetor[1])
}else{
aux <- maximoEmVetor(vetor[2:(length(vetor))])
if(aux > vetor[1]){
return(aux)
}else{
return(vetor[1])
}
}
}
estaEmVetor <- function(vetor, valor){
if(vetor[1] == valor){
return(TRUE)
}else if(length(vetor) > 1){
estaEmVetor(vetor[2:length(vetor)], valor)
}else{
return(FALSE)
}
}
somaTodoVetor <- function(vetor){
if(length(vetor) == 1){
return(vetor[1])
}else {
return(vetor[1] + somaTodoVetor(vetor[2:length(vetor)]))
}
}
inverteVetor <- function(vetor){
if(length(vetor) == 1){
return(vetor[1])
}else {
return(c(inverteVetor(vetor[2:length(vetor)]), vetor[1]))
}
}
#outros exercicios
imprimirBinario <- function(numero){
#em R o resto da divisão é o operador %/%
resto <- numero %% 2
#em R a parte inteira da divisão é o operador %%
quociente <- numero %/% 2
if(quociente > 0){
imprimirBinario(quociente)
}
cat(paste0(resto, ' '))
}
naturaisCrescentes(10)
cat('\n')
naturaisDecrescentes(10)
cat('\n')
a <- c(1,2,3,4,5,6,7,8,9,10)
print(maximoEmVetor(a))
a <- c(1,2,30,4,5,6,7,8,9,10)
print(maximoEmVetor(a))
print(estaEmVetor(a,5))
print(estaEmVetor(a,99))
print(somaTodoVetor(a))
imprimirBinario(2)
cat('\n')
imprimirBinario(4)
cat('\n')
imprimirBinario(9)
cat('\n')
imprimirBinario(1024)
cat('\n')
|
23f1cc8cd779d0c43d970c5f8a7935f4ead3189b
|
dec2677d31a0cfc6b2930700c0856ac84b19de32
|
/doc/preprocessing.R
|
b47793cc163b9d6d973c7cf514dbe42322abd30b
|
[] |
no_license
|
yvonnechanlove97/Multi-stage-Financial-Modeling-R
|
41fea9e8c245bbe4f1dfd212f06817170ceda675
|
8052a8ec615941abe106f63dace23f9e78b683b5
|
refs/heads/master
| 2022-10-17T22:27:57.640942
| 2020-06-15T21:58:23
| 2020-06-15T21:58:23
| 271,899,407
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,771
|
r
|
preprocessing.R
|
## ----fig.width=7, fig.height=4, warning=F, message=F--------------------------
library(FinancialModelingR)
library(png)
library(grid)
img <- readPNG("private_data/july2020.PNG")
grid.raster(img)
## -----------------------------------------------------------------------------
contractsForJuly2020 <- FinancialModelingR::read_price(
in_file = "private_data/ActiveSoybeanContractsforJuly2020.xlsx", delta_price = T,
add_delta = T, subset = T, subset_min_date = "2017-01-01",
rename_price_columns = T, rename_prefix = "july_2020_", skip_lines = 3)
contractsForJuly2020$Date <- as.Date(contractsForJuly2020$Date, "%Y-%m-%d")
saveRDS(contractsForJuly2020, "preprocessed_data/contractsForJuly2020.Rds")
## ----fig.width=7, fig.height=4, warning=F, message=F--------------------------
img <- readPNG("private_data/tweets.PNG")
grid.raster(img)
## ----warning=F, message=F-----------------------------------------------------
library(data.table)
library(tm)
library(plyr)
library(dplyr)
library(qdap)
wd <- getwd()
setwd("raw_data/Tweets/")
tweet_files <- list.files(pattern = "\\.csv$")
for(file in tweet_files) {
for(processed in c("unprocessed")) {
process_text(file = file, processed = processed)
}
}
setwd(wd)
## ----warning=F, message=F-----------------------------------------------------
tweet_df <- data.frame(fread("raw_data/Tweets/China tweets @realDonaldTrump.csv"))
df1 <- data.frame(fread("raw_data/Tweets/FarmerTweets @realDonaldTrump.csv"))
tweet_df <- rbind(tweet_df, df1)
df1 <- data.frame(fread("raw_data/Tweets/soybeans tweets @realDonaldTrump.csv"))
tweet_df <- rbind(tweet_df, df1)
dtm <- get_dtm(text = tweet_df$text, thr = 50)
tweet_df <- cbind(data.frame(created_at = tweet_df$created_at), dtm)
tweet_df$created_at <- as.Date(tweet_df$created_at, format = "%m-%d-%Y")
tweet_df <- tweet_df[!is.na(tweet_df$created_at), ]
tweet_df <- tweet_df %>% group_by(created_at) %>% summarize_all(sum)
## -----------------------------------------------------------------------------
tweet_df <- readRDS("private_data/text_features.Rds")
colnames(tweet_df)[1] <- "Date"
sds <- sapply(tweet_df[, 2:ncol(tweet_df)], sd)
remove_cols <- 1 + which(sds == 0)
if(length(remove_cols) > 0) {
tweet_df <- tweet_df[, -remove_cols]
}
saveRDS(tweet_df, "preprocessed_data/tweet_df.Rds")
## ----fig.width=7, fig.height=4------------------------------------------------
library(png)
library(grid)
img <- readPNG("private_data/exports.PNG")
grid.raster(img)
## -----------------------------------------------------------------------------
library(janitor)
soybeanExports = read_exports(
file = "raw_data/ExportSalesDataByCommodity(Soybeans).csv", skip_lines = 4)
## -----------------------------------------------------------------------------
library(dplyr)
data("soybeanExports", package = "FinancialModelingR")
competitors <- c("ARGENTINA", "BRAZIL")
df_total_export <- soybeanExports %>% group_by(Country) %>%
summarize(Total_Export = sum(Weekly_Exports, na.rm = T))
top_countries <- head(x = df_total_export$Country[
order(df_total_export$Total_Export, decreasing = TRUE)], n = 10)
selected_countries <- c(competitors, top_countries)
df_top_export <- soybeanExports[sapply(
soybeanExports$Country, function(country) country %in% selected_countries), ]
saveRDS(df_top_export, "preprocessed_data/top_10_export_countries.Rds")
## ----fig.width=7, fig.height=4------------------------------------------------
img <- readPNG("private_data/crop_progress.PNG")
grid.raster(img)
## -----------------------------------------------------------------------------
soybeanCropProgress2019 <- read.csv("raw_data/ExportSalesDataByCommodity(Soybeans).csv")
soybeanCropProgress2019 <- soybeanCropProgress2019[, -c(2:5,7)]
## -----------------------------------------------------------------------------
data("soybeanCropProgressUSA2019", package = "FinancialModelingR")
soybeanCropProgress2019$WEEK.ENDING <-
as.Date(soybeanCropProgress2019$WEEK.ENDING, "%Y-%m-%d")
saveRDS(soybeanCropProgress2019, "preprocessed_data/soybeanCropProgress2019.Rds")
## ----fig.width=7, fig.height=4------------------------------------------------
img <- readPNG("private_data/wasde_folder.PNG")
grid.raster(img)
## ----fig.width=7, fig.height=4------------------------------------------------
img <- readPNG("private_data/wasde_file.PNG")
grid.raster(img)
## -----------------------------------------------------------------------------
library(readxl)
soybeanCombinedWASDE <- read_wasde(path = "raw_data/WASDE/")
## -----------------------------------------------------------------------------
data("soybeanCombinedWASDE")
soybeanWASDE_clean <- clean_wasde(combined_data = soybeanCombinedWASDE)
saveRDS(soybeanWASDE_clean, "preprocessed_data/soybeanWASDE_clean.Rds")
|
46901511f43553ae1c48b0c5bc6e9cbb3200e0ea
|
92e240738a4ccf673b9f3610386eaa08eef26d6f
|
/momentum/rebal-frequency/combine.R
|
2dd312f752e20ebba38fbe0aa64d17fdec2037b3
|
[] |
no_license
|
stockviz/blog
|
564a4671202b92a2d63f13f0207fd8a35810c0b6
|
e00c055742a1229c612669ee29d846a6e2475a43
|
refs/heads/master
| 2023-09-01T15:59:07.746886
| 2023-08-31T04:01:37
| 2023-08-31T04:01:37
| 138,372,618
| 12
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,072
|
r
|
combine.R
|
library('quantmod')
library('PerformanceAnalytics')
library('PortfolioAnalytics')
library('tidyverse')
library('lubridate')
options("scipen"=100)
options(stringsAsFactors = FALSE)
source("D:/StockViz/public/blog/common/plot.common.R")
reportPath <- "."
load(sprintf("%s/symRets.Rdata", reportPath)) #symRets
symRetDf <- symRets %>% pivot_wider(names_from = c(STRATEGY, REBAL_FREQ), values_from = RET) %>% as.data.frame()
symXts <- na.omit(xts(symRetDf[,-1], symRetDf[,1]))
statsDf <- data.frame(SharpeRatio.annualized(symXts))
statsDf <- rbind(statsDf, Return.annualized(symXts)*100)
statsDf <- data.frame(t(statsDf))
colnames(statsDf) <- c('SHARPE', 'RET')
statsDf$ID <- row.names(statsDf)
write.csv(statsDf, file=sprintf("%s/symStatsAll.csv", reportPath), row.names = F)
configs <- statsDf %>% filter(ID != 'BENCH_0') %>% slice_max(SHARPE, n = 30) %>% slice_max(RET, n=6) %>% select(ID)
toPlot <- symXts[, c(configs$ID, 'BENCH_0')]
Common.PlotCumReturns(toPlot, "Momentum", "skip-months/rebal-frequency", sprintf("%s/symRetsAll.png", reportPath), NULL)
|
5b7ddc284a6e512c1ac082e957603e5685dafabb
|
4e7044d8987736ca8e639f8467e4011cb57a7c54
|
/WP6/CLEFRDB/R/class1Trip.R
|
1c567dd395c8c0e97032687c23f4e62c59bc60c0
|
[
"MIT"
] |
permissive
|
ices-tools-dev/FishPi2
|
16ab67c7883e909f896dbca8ed0145f0a1765f35
|
d2818819f053a66008a4a94aa896c5777e17f1db
|
refs/heads/master
| 2020-05-22T15:23:06.977219
| 2019-08-29T22:55:34
| 2019-08-29T22:55:34
| 186,406,874
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,391
|
r
|
class1Trip.R
|
#' validity Trip method
#'
#' @param object a time object
#'
validTrip<-function(object){
#Triptype<-NULL
#utils::data(Triptype,package="fishpi2qc")
#print(Triptype)
check<-TRUE
#data length
#if(F){
#object<-new("Trip")
nomslot<-methods::slotNames(object)
lengthall<-c()
for(i in nomslot){
len0<-length(methods::slot(object,i))
lengthall<-c(lengthall,len0)
}
if(all(lengthall[1]==lengthall)){
check<-TRUE&check
}else{
print(paste0("lengths of parameters are not equal"))
check<-FALSE&check
}
#type
if(all(object@type%in%CLEFRDB::Triptype)){
check<-TRUE&check
}else{
id<-which(!object@type%in%CLEFRDB::Triptype)
print(paste0("wrong type at: ",paste0(id,collapse=",")))
check<-FALSE&check
}
#method
if(all(object@method%in%CLEFRDB::Tripmethodtype)){
check<-TRUE&check
}else{
id<-which(!object@method%in%CLEFRDB::Tripmethodtype)
print(paste0("wrong method at: ",paste0(id,collapse=",")))
check<-FALSE&check
}
#project:not needed
#country
if(all(object@country%in%CLEFRDB::defcountry$id)){
check<-TRUE&check
}else{
id<-which(!object@country%in%CLEFRDB::defcountry$id)
print(paste0("wrong country at: ",paste0(id,collapse=",")))
check<-FALSE&check
}
#}
return(check)
}
#' Class Trip
#'
#' @slot vessel
#' @slot sampling
#' @slot time
#' @slot space
#' @slot nbhaul
#' @slot daysatsea
#'
setClass(Class="Trip",
slots=c(nbhaul="integer",
daysatsea="integer"
),
contains=c("Vessel",
"Sampling",
"Time",
"Space"
),
prototype=prototype(nbhaul=integer(),
daysatsea=integer(),
Vessel=methods::new("Vessel"),
Sampling=methods::new("Sampling"),
Time=methods::new("Time"),
Space=methods::new("Space")
),
validity=validTrip
)
setMethod("initialize","Trip",function(.Object,...){
if(F){
# dots<-list(Space=new("Space",SpaceType="ICESdiv",SpacePlace="27.7.g"),Time=new("Time",TimeDate=Sys.time(),TimeType="date"))
# .Object<-methods::new("Trip")
}
dots<-list(...)
if(length(dots)>0){
testusedots<-lapply(dots,function(a){a<-FALSE})
#class inheritance in value
for(namedots in names(dots)){
#class to class
#print(namedots)
if(inherits(.Object,namedots)){
#print("class")
testusedots[[namedots]]<-TRUE
slotobj<-methods::slotNames(.Object)
slotdots<-methods::slotNames(dots[[namedots]])
for(idslot in slotdots){
print(idslot)
methods::slot(.Object,idslot)<-methods::slot(dots[[namedots]],idslot)
}
}
#slot to slot
if(any(methods::slotNames(.Object)%in%namedots)){
#print("slot")
testusedots[[namedots]]<-TRUE
methods::slot(.Object,namedots)<-dots[[namedots]]
}
}
wrongdots<-unlist(testusedots)
wrongdots<-names(wrongdots)[!wrongdots]
#print(wrongdots)
#print(unlist(testusedots))
#if(any(unlist(testusedots))){
if(length(wrongdots)>0){
wrongdots<-unlist(testusedots)
wrongdots<-names(wrongdots)[!wrongdots]
warning(paste0("parameters ",paste0(wrongdots,collapse=",")," unknown not used"))
}
}
return(.Object)
})
#if(F){
#
# library(CLEFRDB)
# source("00function.R")
# aa<-new("Trip")
# aa<-new("Trip",VesselId="date")
# aa<-new("Trip",VesselId="geooorges",robert="jjj")
# aa<-new("Trip",robert="jjj")
# pipo<-new("Trip")
# tt<-new("Vessel",id=10)
# new("Trip",vessel=tt)
# pipo
#
# load("../data/Triptype.rda")
# load("../data/Tripmethodtype.rda")
# load("../data/defcountry.rda")
# pipo<-new("Time",TimeType="youlgi")
# pipo@TimeType<-"oupu"
# new("Trip",Time=pipo)
# new("Trip",country="FRA",type="M")
# new("Trip",country="FRA",type="U")
#setClass(Class="pipo",
# slots=c(nbhaul="integer",
# daysatsea="integer"
# ),
# contains=c("Vessel",
# "Space","Time"
# ),
# #validity=validTrip
# )
#
#setMethod("initialize", "pipo",
# function(.Object,
# vessel=new("Vessel"),
# space=new("Space"),
# time=new("Time"),
# nbhaul=integer(),
# daysatsea=integer(),
# ...){
# .Object <- methods::callNextMethod()
# .Object<-importSlots(vessel,.Object)
# .Object<-importSlots(space,.Object)
# .Object<-importSlots(time,.Object)
# .Object@nbhaul<-nbhaul
# .Object@daysatsea<-daysatsea
# #methods::validObject(.Object)
# return(.Object)
# }
# )
#
#load("../data/Timetype.rda")
#source("00function.R")
#source("class0Time.R")
#source("class0Vessel.R")
#source("class0Space.R")
#new("pipo")
#
#}
|
05559f74a3974cd6ac9f4b2cf3150dac91249e9f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/DJL/R/map.soa.sf.R
|
c32fd835c8fb4ffdfc66def7523dc092f66bfc81
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,208
|
r
|
map.soa.sf.R
|
map.soa.sf <-
function(xdata,ydata,date,rts,g,w=NULL,sg="ssm",mk="dmu"){
# Initial checks
if(is.na(match(rts,c("crs","vrs","irs","drs")))){stop('rts must be "crs", "vrs", "irs", or "drs".')}
if(is.na(match(sg,c("ssm","max","min")))){stop('sg must be "ssm", "max", or "min".')}
if(is.na(match(mk,c("dmu","eff")))){stop('mk must be either "dmu" or "eff".')}
# Subset index
till<-function(x,y){
t<-0
while(x[t+1]<=y&&t<nrow(x)){t<-t+1}
return(t)
}
# Parameters
xdata<-as.matrix(xdata);ydata<-as.matrix(ydata);date<-as.matrix(date);g<-as.matrix(g) # format input data as matrix
n<-nrow(xdata); m<-ncol(xdata); s<-ncol(ydata)
o<-matrix(c(1:n),ncol=1) # original data order
# Sort data ascending order
x<-matrix(c(xdata[order(date),]),ncol=m)
y<-matrix(c(ydata[order(date),]),ncol=s)
d<-matrix(c(date[order(date),]),ncol=1)
g<-matrix(c(g[order(date),]),ncol=m+s)
o<-matrix(c(o[order(date),]),ncol=1)
# max map size
c<-nrow(unique(d))
ud<-unique(d)
# map frame
fanta<-matrix(c(NA),nrow=n,ncol=c);colnames(fanta)<-ud
# generate the map
for(i in 1:c){
# subset data
e<-till(d,ud[i])
x_s<-matrix(x[1:e,],nrow=e)
y_s<-matrix(y[1:e,],nrow=e)
g_s<-matrix(g[1:e,],nrow=e)
# run distance measure
dj<-dm.sf(x_s,y_s,rts,g_s,w,se=0,sg)
# soa set
soa<-which(round(dj$eff,8)==0)
# fill the map
if(mk=="dmu"){
j<-sum(soa>0)
q<-1
for(k in 1:j){
if(ud[i]==ud[1]){fanta[k,1]<-o[soa[k],]}
else{
l<-which(fanta[,i-1]==o[soa[k],])
if(length(l)>0){fanta[l,i]<-o[soa[k],]}
else{
p<-n
while(is.na(fanta[p,i-1])){p<-p-1}
fanta[p+q,i]<-o[soa[k],]
q<-q+1
}
}
}
}
if(mk=="eff"){
if(i==1){gsoa<-NULL}
gsoa<-union(gsoa,soa);l<-length(gsoa)
fanta[1:l,i]<-dj$eff[gsoa,]
}
}
p<-n;while(is.na(fanta[p,i])){p<-p-1}
fanta<-fanta[1:p,]
if(mk=="dmu"){rownames(fanta)<-na.omit(unique(c(fanta)))}
if(mk=="eff"){rownames(fanta)<-c(o[gsoa,])}
print(fanta)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.