blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a022909d892b4903efb384d09e0772b35902e7b
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.cloudfront/R/paws.cloudfront_interfaces.R
|
27dd2628da217e02171f4eb152b81d39a1817bba
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 253,427
|
r
|
paws.cloudfront_interfaces.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
NULL
create_cloud_front_origin_access_identity_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "CloudFrontOriginAccessIdentityConfig",
type = "structure"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentityConfig"))
return(populate(args, shape))
}
create_cloud_front_origin_access_identity_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentity = structure(list(Id = structure(logical(0),
tags = list(type = "string")), S3CanonicalUserId = structure(logical(0),
tags = list(type = "string")), CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentity"))
return(populate(args, shape))
}
create_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(locationName = "DistributionConfig",
type = "structure"))), tags = list(type = "structure",
payload = "DistributionConfig"))
return(populate(args, shape))
}
create_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Distribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), InProgressInvalidationBatches = structure(logical(0),
tags = list(type = "integer")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "structure")), Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "Distribution"))
return(populate(args, shape))
}
create_distribution_with_tags_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionConfigWithTags = structure(list(DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure")),
Tags = structure(list(Items = structure(list(structure(list(Key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L,
pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")),
Value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 0L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"))),
tags = list(locationName = "Tag", type = "structure"))),
tags = list(locationNameList = "Tag", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "DistributionConfigWithTags",
type = "structure"))), tags = list(type = "structure",
payload = "DistributionConfigWithTags"))
return(populate(args, shape))
}
create_distribution_with_tags_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Distribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), InProgressInvalidationBatches = structure(logical(0),
tags = list(type = "integer")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "structure")), Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "Distribution"))
return(populate(args, shape))
}
create_field_level_encryption_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(locationName = "FieldLevelEncryptionConfig",
type = "structure"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionConfig"))
return(populate(args, shape))
}
create_field_level_encryption_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryption = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "structure")), Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryption"))
return(populate(args, shape))
}
create_field_level_encryption_profile_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(locationName = "FieldLevelEncryptionProfileConfig",
type = "structure"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfileConfig"))
return(populate(args, shape))
}
create_field_level_encryption_profile_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfile = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Location = structure(logical(0), tags = list(location = "header",
locationName = "Location", type = "string")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfile"))
return(populate(args, shape))
}
create_invalidation_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionId = structure(logical(0),
tags = list(location = "uri", locationName = "DistributionId",
type = "string")), InvalidationBatch = structure(list(Paths = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Path", type = "string"))),
tags = list(locationNameList = "Path", type = "list"))),
tags = list(type = "structure")), CallerReference = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "InvalidationBatch",
type = "structure"))), tags = list(type = "structure",
payload = "InvalidationBatch"))
return(populate(args, shape))
}
create_invalidation_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), Invalidation = structure(list(Id = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), CreateTime = structure(logical(0),
tags = list(type = "timestamp")), InvalidationBatch = structure(list(Paths = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Path", type = "string"))),
tags = list(locationNameList = "Path", type = "list"))),
tags = list(type = "structure")), CallerReference = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure",
payload = "Invalidation"))
return(populate(args, shape))
}
create_public_key_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "PublicKeyConfig",
type = "structure"))), tags = list(type = "structure",
payload = "PublicKeyConfig"))
return(populate(args, shape))
}
create_public_key_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKey = structure(list(Id = structure(logical(0),
tags = list(type = "string")), CreatedTime = structure(logical(0),
tags = list(type = "timestamp")), PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), Location = structure(logical(0),
tags = list(location = "header", locationName = "Location",
type = "string")), ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "PublicKey"))
return(populate(args, shape))
}
create_streaming_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "StreamingDistributionConfig",
type = "structure"))), tags = list(type = "structure",
payload = "StreamingDistributionConfig"))
return(populate(args, shape))
}
create_streaming_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Location = structure(logical(0), tags = list(location = "header",
locationName = "Location", type = "string")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "StreamingDistribution"))
return(populate(args, shape))
}
create_streaming_distribution_with_tags_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistributionConfigWithTags = structure(list(StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure")), Tags = structure(list(Items = structure(list(structure(list(Key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")),
Value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 0L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"))),
tags = list(locationName = "Tag", type = "structure"))),
tags = list(locationNameList = "Tag", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "StreamingDistributionConfigWithTags",
type = "structure"))), tags = list(type = "structure",
payload = "StreamingDistributionConfigWithTags"))
return(populate(args, shape))
}
create_streaming_distribution_with_tags_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Location = structure(logical(0), tags = list(location = "header",
locationName = "Location", type = "string")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "StreamingDistribution"))
return(populate(args, shape))
}
delete_cloud_front_origin_access_identity_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_cloud_front_origin_access_identity_output <- function ()
{
return(list())
}
delete_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_distribution_output <- function ()
{
return(list())
}
delete_field_level_encryption_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_field_level_encryption_config_output <- function ()
{
return(list())
}
delete_field_level_encryption_profile_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_field_level_encryption_profile_output <- function ()
{
return(list())
}
delete_public_key_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_public_key_output <- function ()
{
return(list())
}
delete_streaming_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
delete_streaming_distribution_output <- function ()
{
return(list())
}
get_cloud_front_origin_access_identity_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_cloud_front_origin_access_identity_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentity = structure(list(Id = structure(logical(0),
tags = list(type = "string")), S3CanonicalUserId = structure(logical(0),
tags = list(type = "string")), CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentity"))
return(populate(args, shape))
}
get_cloud_front_origin_access_identity_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_cloud_front_origin_access_identity_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentityConfig"))
return(populate(args, shape))
}
get_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Distribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), InProgressInvalidationBatches = structure(logical(0),
tags = list(type = "integer")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "Distribution"))
return(populate(args, shape))
}
get_distribution_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_distribution_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "DistributionConfig"))
return(populate(args, shape))
}
get_field_level_encryption_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_field_level_encryption_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryption = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryption"))
return(populate(args, shape))
}
get_field_level_encryption_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_field_level_encryption_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionConfig"))
return(populate(args, shape))
}
get_field_level_encryption_profile_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_field_level_encryption_profile_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfile = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfile"))
return(populate(args, shape))
}
get_field_level_encryption_profile_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_field_level_encryption_profile_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfileConfig"))
return(populate(args, shape))
}
get_invalidation_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionId = structure(logical(0),
tags = list(location = "uri", locationName = "DistributionId",
type = "string")), Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_invalidation_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Invalidation = structure(list(Id = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), CreateTime = structure(logical(0),
tags = list(type = "timestamp")), InvalidationBatch = structure(list(Paths = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Path", type = "string"))),
tags = list(locationNameList = "Path", type = "list"))),
tags = list(type = "structure")), CallerReference = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure",
payload = "Invalidation"))
return(populate(args, shape))
}
get_public_key_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_public_key_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKey = structure(list(Id = structure(logical(0),
tags = list(type = "string")), CreatedTime = structure(logical(0),
tags = list(type = "timestamp")), PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "PublicKey"))
return(populate(args, shape))
}
get_public_key_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_public_key_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "PublicKeyConfig"))
return(populate(args, shape))
}
get_streaming_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_streaming_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "StreamingDistribution"))
return(populate(args, shape))
}
get_streaming_distribution_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
get_streaming_distribution_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "StreamingDistributionConfig"))
return(populate(args, shape))
}
list_cloud_front_origin_access_identities_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_cloud_front_origin_access_identities_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentityList = structure(list(Marker = structure(logical(0),
tags = list(type = "string")), NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), IsTruncated = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), S3CanonicalUserId = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "CloudFrontOriginAccessIdentitySummary",
type = "structure"))), tags = list(locationNameList = "CloudFrontOriginAccessIdentitySummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "CloudFrontOriginAccessIdentityList"))
return(populate(args, shape))
}
list_distributions_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_distributions_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionList = structure(list(Marker = structure(logical(0),
tags = list(type = "string")), NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), IsTruncated = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), PriceClass = structure(logical(0),
tags = list(type = "string", enum = c("PriceClass_100",
"PriceClass_200", "PriceClass_All"))), Enabled = structure(logical(0),
tags = list(type = "boolean")), ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only", "vip"))),
MinimumProtocolVersion = structure(logical(0), tags = list(type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1_2016", "TLSv1.1_2016",
"TLSv1.2_2018"))), Certificate = structure(logical(0),
tags = list(deprecated = TRUE, type = "string")),
CertificateSource = structure(logical(0), tags = list(deprecated = TRUE,
type = "string", enum = c("cloudfront", "iam", "acm")))),
tags = list(type = "structure")), Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist", "whitelist",
"none"))), Quantity = structure(logical(0), tags = list(type = "integer")),
Items = structure(list(structure(logical(0), tags = list(locationName = "Location",
type = "string"))), tags = list(locationNameList = "Location",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), WebACLId = structure(logical(0),
tags = list(type = "string")), HttpVersion = structure(logical(0),
tags = list(type = "string", enum = c("http1.1", "http2"))),
IsIPV6Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "DistributionSummary", type = "structure"))),
tags = list(locationNameList = "DistributionSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "DistributionList"))
return(populate(args, shape))
}
list_distributions_by_web_acl_id_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string")), WebACLId = structure(logical(0),
tags = list(location = "uri", locationName = "WebACLId",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_distributions_by_web_acl_id_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionList = structure(list(Marker = structure(logical(0),
tags = list(type = "string")), NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), IsTruncated = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), PriceClass = structure(logical(0),
tags = list(type = "string", enum = c("PriceClass_100",
"PriceClass_200", "PriceClass_All"))), Enabled = structure(logical(0),
tags = list(type = "boolean")), ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only", "vip"))),
MinimumProtocolVersion = structure(logical(0), tags = list(type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1_2016", "TLSv1.1_2016",
"TLSv1.2_2018"))), Certificate = structure(logical(0),
tags = list(deprecated = TRUE, type = "string")),
CertificateSource = structure(logical(0), tags = list(deprecated = TRUE,
type = "string", enum = c("cloudfront", "iam", "acm")))),
tags = list(type = "structure")), Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist", "whitelist",
"none"))), Quantity = structure(logical(0), tags = list(type = "integer")),
Items = structure(list(structure(logical(0), tags = list(locationName = "Location",
type = "string"))), tags = list(locationNameList = "Location",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), WebACLId = structure(logical(0),
tags = list(type = "string")), HttpVersion = structure(logical(0),
tags = list(type = "string", enum = c("http1.1", "http2"))),
IsIPV6Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "DistributionSummary", type = "structure"))),
tags = list(locationNameList = "DistributionSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "DistributionList"))
return(populate(args, shape))
}
list_field_level_encryption_configs_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_field_level_encryption_configs_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionList = structure(list(NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(locationName = "FieldLevelEncryptionSummary",
type = "structure"))), tags = list(locationNameList = "FieldLevelEncryptionSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "FieldLevelEncryptionList"))
return(populate(args, shape))
}
list_field_level_encryption_profiles_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_field_level_encryption_profiles_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfileList = structure(list(NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), Name = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "FieldLevelEncryptionProfileSummary",
type = "structure"))), tags = list(locationNameList = "FieldLevelEncryptionProfileSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "FieldLevelEncryptionProfileList"))
return(populate(args, shape))
}
list_invalidations_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionId = structure(logical(0),
tags = list(location = "uri", locationName = "DistributionId",
type = "string")), Marker = structure(logical(0),
tags = list(location = "querystring", locationName = "Marker",
type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_invalidations_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(InvalidationList = structure(list(Marker = structure(logical(0),
tags = list(type = "string")), NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), IsTruncated = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), CreateTime = structure(logical(0),
tags = list(type = "timestamp")), Status = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "InvalidationSummary",
type = "structure"))), tags = list(locationNameList = "InvalidationSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "InvalidationList"))
return(populate(args, shape))
}
list_public_keys_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_public_keys_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKeyList = structure(list(NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), CreatedTime = structure(logical(0),
tags = list(type = "timestamp")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "PublicKeySummary",
type = "structure"))), tags = list(locationNameList = "PublicKeySummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "PublicKeyList"))
return(populate(args, shape))
}
list_streaming_distributions_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Marker = structure(logical(0), tags = list(location = "querystring",
locationName = "Marker", type = "string")), MaxItems = structure(logical(0),
tags = list(location = "querystring", locationName = "MaxItems",
type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
list_streaming_distributions_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistributionList = structure(list(Marker = structure(logical(0),
tags = list(type = "string")), NextMarker = structure(logical(0),
tags = list(type = "string")), MaxItems = structure(logical(0),
tags = list(type = "integer")), IsTruncated = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
Comment = structure(logical(0), tags = list(type = "string")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "StreamingDistributionSummary",
type = "structure"))), tags = list(locationNameList = "StreamingDistributionSummary",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure", payload = "StreamingDistributionList"))
return(populate(args, shape))
}
list_tags_for_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Resource = structure(logical(0),
tags = list(location = "querystring", locationName = "Resource",
type = "string", pattern = "arn:aws:cloudfront::[0-9]+:.*"))),
tags = list(type = "structure"))
return(populate(args, shape))
}
list_tags_for_resource_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Tags = structure(list(Items = structure(list(structure(list(Key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")),
Value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 0L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"))),
tags = list(locationName = "Tag", type = "structure"))),
tags = list(locationNameList = "Tag", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure",
payload = "Tags"))
return(populate(args, shape))
}
tag_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Resource = structure(logical(0),
tags = list(location = "querystring", locationName = "Resource",
type = "string", pattern = "arn:aws:cloudfront::[0-9]+:.*")),
Tags = structure(list(Items = structure(list(structure(list(Key = structure(logical(0),
tags = list(type = "string", max = 128L, min = 1L,
pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$")),
Value = structure(logical(0), tags = list(type = "string",
max = 256L, min = 0L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"))),
tags = list(locationName = "Tag", type = "structure"))),
tags = list(locationNameList = "Tag", type = "list"))),
tags = list(locationName = "Tags", type = "structure"))),
tags = list(type = "structure", payload = "Tags"))
return(populate(args, shape))
}
tag_resource_output <- function ()
{
return(list())
}
untag_resource_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Resource = structure(logical(0),
tags = list(location = "querystring", locationName = "Resource",
type = "string", pattern = "arn:aws:cloudfront::[0-9]+:.*")),
TagKeys = structure(list(Items = structure(list(structure(logical(0),
tags = list(locationName = "Key", type = "string",
max = 128L, min = 1L, pattern = "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"))),
tags = list(locationNameList = "Key", type = "list"))),
tags = list(locationName = "TagKeys", type = "structure"))),
tags = list(type = "structure", payload = "TagKeys"))
return(populate(args, shape))
}
untag_resource_output <- function ()
{
return(list())
}
update_cloud_front_origin_access_identity_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "CloudFrontOriginAccessIdentityConfig",
type = "structure")), Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentityConfig"))
return(populate(args, shape))
}
update_cloud_front_origin_access_identity_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(CloudFrontOriginAccessIdentity = structure(list(Id = structure(logical(0),
tags = list(type = "string")), S3CanonicalUserId = structure(logical(0),
tags = list(type = "string")), CloudFrontOriginAccessIdentityConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "CloudFrontOriginAccessIdentity"))
return(populate(args, shape))
}
update_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(locationName = "DistributionConfig",
type = "structure")), Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure",
payload = "DistributionConfig"))
return(populate(args, shape))
}
update_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(Distribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), InProgressInvalidationBatches = structure(logical(0),
tags = list(type = "integer")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), DistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), DefaultRootObject = structure(logical(0),
tags = list(type = "string")), Origins = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), DomainName = structure(logical(0),
tags = list(type = "string")), OriginPath = structure(logical(0),
tags = list(type = "string")), CustomHeaders = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(HeaderName = structure(logical(0),
tags = list(type = "string")), HeaderValue = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginCustomHeader",
type = "structure"))), tags = list(locationNameList = "OriginCustomHeader",
type = "list"))), tags = list(type = "structure")), S3OriginConfig = structure(list(OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
CustomOriginConfig = structure(list(HTTPPort = structure(logical(0),
tags = list(type = "integer")), HTTPSPort = structure(logical(0),
tags = list(type = "integer")), OriginProtocolPolicy = structure(logical(0),
tags = list(type = "string", enum = c("http-only",
"match-viewer", "https-only"))), OriginSslProtocols = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "SslProtocol", type = "string",
enum = c("SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2")))),
tags = list(locationNameList = "SslProtocol", type = "list"))),
tags = list(type = "structure")), OriginReadTimeout = structure(logical(0),
tags = list(type = "integer")), OriginKeepaliveTimeout = structure(logical(0),
tags = list(type = "integer"))), tags = list(type = "structure"))),
tags = list(locationName = "Origin", type = "structure"))),
tags = list(locationNameList = "Origin", type = "list",
min = 1L))), tags = list(type = "structure")), OriginGroups = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Id = structure(logical(0),
tags = list(type = "string")), FailoverCriteria = structure(list(StatusCodes = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "StatusCode", type = "integer"))),
tags = list(locationNameList = "StatusCode", type = "list",
min = 1L))), tags = list(type = "structure"))), tags = list(type = "structure")),
Members = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(OriginId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "OriginGroupMember",
type = "structure"))), tags = list(locationNameList = "OriginGroupMember",
type = "list", max = 2L, min = 2L))), tags = list(type = "structure"))),
tags = list(locationName = "OriginGroup", type = "structure"))),
tags = list(locationNameList = "OriginGroup", type = "list"))),
tags = list(type = "structure")), DefaultCacheBehavior = structure(list(TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(type = "structure")), CacheBehaviors = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PathPattern = structure(logical(0),
tags = list(type = "string")), TargetOriginId = structure(logical(0),
tags = list(type = "string")), ForwardedValues = structure(list(QueryString = structure(logical(0),
tags = list(type = "boolean")), Cookies = structure(list(Forward = structure(logical(0),
tags = list(type = "string", enum = c("none", "whitelist",
"all"))), WhitelistedNames = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
Headers = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure")), QueryStringCacheKeys = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Name", type = "string"))),
tags = list(locationNameList = "Name", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
ViewerProtocolPolicy = structure(logical(0), tags = list(type = "string",
enum = c("allow-all", "https-only", "redirect-to-https"))),
MinTTL = structure(logical(0), tags = list(type = "long")),
AllowedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list")), CachedMethods = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Method", type = "string",
enum = c("GET", "HEAD", "POST", "PUT", "PATCH",
"OPTIONS", "DELETE")))), tags = list(locationNameList = "Method",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), SmoothStreaming = structure(logical(0),
tags = list(type = "boolean")), DefaultTTL = structure(logical(0),
tags = list(type = "long")), MaxTTL = structure(logical(0),
tags = list(type = "long")), Compress = structure(logical(0),
tags = list(type = "boolean")), LambdaFunctionAssociations = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(LambdaFunctionARN = structure(logical(0),
tags = list(type = "string")), EventType = structure(logical(0),
tags = list(type = "string", enum = c("viewer-request",
"viewer-response", "origin-request", "origin-response"))),
IncludeBody = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "LambdaFunctionAssociation",
type = "structure"))), tags = list(locationNameList = "LambdaFunctionAssociation",
type = "list"))), tags = list(type = "structure")),
FieldLevelEncryptionId = structure(logical(0), tags = list(type = "string"))),
tags = list(locationName = "CacheBehavior", type = "structure"))),
tags = list(locationNameList = "CacheBehavior", type = "list"))),
tags = list(type = "structure")), CustomErrorResponses = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(ErrorCode = structure(logical(0),
tags = list(type = "integer")), ResponsePagePath = structure(logical(0),
tags = list(type = "string")), ResponseCode = structure(logical(0),
tags = list(type = "string")), ErrorCachingMinTTL = structure(logical(0),
tags = list(type = "long"))), tags = list(locationName = "CustomErrorResponse",
type = "structure"))), tags = list(locationNameList = "CustomErrorResponse",
type = "list"))), tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), IncludeCookies = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean")),
ViewerCertificate = structure(list(CloudFrontDefaultCertificate = structure(logical(0),
tags = list(type = "boolean")), IAMCertificateId = structure(logical(0),
tags = list(type = "string")), ACMCertificateArn = structure(logical(0),
tags = list(type = "string")), SSLSupportMethod = structure(logical(0),
tags = list(type = "string", enum = c("sni-only",
"vip"))), MinimumProtocolVersion = structure(logical(0),
tags = list(type = "string", enum = c("SSLv3", "TLSv1",
"TLSv1_2016", "TLSv1.1_2016", "TLSv1.2_2018"))),
Certificate = structure(logical(0), tags = list(deprecated = TRUE,
type = "string")), CertificateSource = structure(logical(0),
tags = list(deprecated = TRUE, type = "string",
enum = c("cloudfront", "iam", "acm")))), tags = list(type = "structure")),
Restrictions = structure(list(GeoRestriction = structure(list(RestrictionType = structure(logical(0),
tags = list(type = "string", enum = c("blacklist",
"whitelist", "none"))), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "Location", type = "string"))),
tags = list(locationNameList = "Location", type = "list"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
WebACLId = structure(logical(0), tags = list(type = "string")),
HttpVersion = structure(logical(0), tags = list(type = "string",
enum = c("http1.1", "http2"))), IsIPV6Enabled = structure(logical(0),
tags = list(type = "boolean"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "Distribution"))
return(populate(args, shape))
}
update_field_level_encryption_config_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(locationName = "FieldLevelEncryptionConfig",
type = "structure")), Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionConfig"))
return(populate(args, shape))
}
update_field_level_encryption_config_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryption = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), QueryArgProfileConfig = structure(list(ForwardWhenQueryArgProfileIsUnknown = structure(logical(0),
tags = list(type = "boolean")), QueryArgProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(QueryArg = structure(logical(0),
tags = list(type = "string")), ProfileId = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "QueryArgProfile",
type = "structure"))), tags = list(locationNameList = "QueryArgProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ContentTypeProfileConfig = structure(list(ForwardWhenContentTypeIsUnknown = structure(logical(0),
tags = list(type = "boolean")), ContentTypeProfiles = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(Format = structure(logical(0),
tags = list(type = "string", enum = "URLEncoded")), ProfileId = structure(logical(0),
tags = list(type = "string")), ContentType = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "ContentTypeProfile",
type = "structure"))), tags = list(locationNameList = "ContentTypeProfile",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryption"))
return(populate(args, shape))
}
update_field_level_encryption_profile_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(locationName = "FieldLevelEncryptionProfileConfig",
type = "structure")), Id = structure(logical(0),
tags = list(location = "uri", locationName = "Id", type = "string")),
IfMatch = structure(logical(0), tags = list(location = "header",
locationName = "If-Match", type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfileConfig"))
return(populate(args, shape))
}
update_field_level_encryption_profile_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(FieldLevelEncryptionProfile = structure(list(Id = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), FieldLevelEncryptionProfileConfig = structure(list(Name = structure(logical(0),
tags = list(type = "string")), CallerReference = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string")), EncryptionEntities = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(PublicKeyId = structure(logical(0),
tags = list(type = "string")), ProviderId = structure(logical(0),
tags = list(type = "string")), FieldPatterns = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "FieldPattern", type = "string"))),
tags = list(locationNameList = "FieldPattern", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "EncryptionEntity",
type = "structure"))), tags = list(locationNameList = "EncryptionEntity",
type = "list"))), tags = list(type = "structure"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "FieldLevelEncryptionProfile"))
return(populate(args, shape))
}
update_public_key_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(locationName = "PublicKeyConfig",
type = "structure")), Id = structure(logical(0), tags = list(location = "uri",
locationName = "Id", type = "string")), IfMatch = structure(logical(0),
tags = list(location = "header", locationName = "If-Match",
type = "string"))), tags = list(type = "structure",
payload = "PublicKeyConfig"))
return(populate(args, shape))
}
update_public_key_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(PublicKey = structure(list(Id = structure(logical(0),
tags = list(type = "string")), CreatedTime = structure(logical(0),
tags = list(type = "timestamp")), PublicKeyConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), Name = structure(logical(0),
tags = list(type = "string")), EncodedKey = structure(logical(0),
tags = list(type = "string")), Comment = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure"))),
tags = list(type = "structure")), ETag = structure(logical(0),
tags = list(location = "header", locationName = "ETag",
type = "string"))), tags = list(type = "structure",
payload = "PublicKey"))
return(populate(args, shape))
}
update_streaming_distribution_input <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(locationName = "StreamingDistributionConfig",
type = "structure")), Id = structure(logical(0),
tags = list(location = "uri", locationName = "Id", type = "string")),
IfMatch = structure(logical(0), tags = list(location = "header",
locationName = "If-Match", type = "string"))), tags = list(type = "structure",
payload = "StreamingDistributionConfig"))
return(populate(args, shape))
}
update_streaming_distribution_output <- function (...)
{
args <- c(as.list(environment()), list(...))
shape <- structure(list(StreamingDistribution = structure(list(Id = structure(logical(0),
tags = list(type = "string")), ARN = structure(logical(0),
tags = list(type = "string")), Status = structure(logical(0),
tags = list(type = "string")), LastModifiedTime = structure(logical(0),
tags = list(type = "timestamp")), DomainName = structure(logical(0),
tags = list(type = "string")), ActiveTrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(list(AwsAccountNumber = structure(logical(0),
tags = list(type = "string")), KeyPairIds = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "KeyPairId", type = "string"))),
tags = list(locationNameList = "KeyPairId", type = "list"))),
tags = list(type = "structure"))), tags = list(locationName = "Signer",
type = "structure"))), tags = list(locationNameList = "Signer",
type = "list"))), tags = list(type = "structure")), StreamingDistributionConfig = structure(list(CallerReference = structure(logical(0),
tags = list(type = "string")), S3Origin = structure(list(DomainName = structure(logical(0),
tags = list(type = "string")), OriginAccessIdentity = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
Aliases = structure(list(Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "CNAME", type = "string"))),
tags = list(locationNameList = "CNAME", type = "list"))),
tags = list(type = "structure")), Comment = structure(logical(0),
tags = list(type = "string")), Logging = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Bucket = structure(logical(0),
tags = list(type = "string")), Prefix = structure(logical(0),
tags = list(type = "string"))), tags = list(type = "structure")),
TrustedSigners = structure(list(Enabled = structure(logical(0),
tags = list(type = "boolean")), Quantity = structure(logical(0),
tags = list(type = "integer")), Items = structure(list(structure(logical(0),
tags = list(locationName = "AwsAccountNumber", type = "string"))),
tags = list(locationNameList = "AwsAccountNumber",
type = "list"))), tags = list(type = "structure")),
PriceClass = structure(logical(0), tags = list(type = "string",
enum = c("PriceClass_100", "PriceClass_200", "PriceClass_All"))),
Enabled = structure(logical(0), tags = list(type = "boolean"))),
tags = list(type = "structure"))), tags = list(type = "structure")),
ETag = structure(logical(0), tags = list(location = "header",
locationName = "ETag", type = "string"))), tags = list(type = "structure",
payload = "StreamingDistribution"))
return(populate(args, shape))
}
|
911745cf6e21e134d2320f049e0b7c0da9d18e01
|
5db52748f7af1e976d9c5ff550e0128410b2c2ce
|
/SELECTION-GO_ANALYSIS/TOpGO.R
|
d5a18d92c02f21599588f0d72fb00149beca469d
|
[] |
no_license
|
htnani/african_rice
|
3609d385826fe3e2ffeb8d469ced3b758745f03d
|
930d4e5e746a475060a15c98117740b9bfcb2057
|
refs/heads/master
| 2020-03-30T16:25:18.496279
| 2018-05-14T09:16:06
| 2018-05-14T09:16:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,372
|
r
|
TOpGO.R
|
#===============================================================================
# author: Benedicte Rhone
# date: 03/2016
#-------------------------------------------------------------------------------
# Description: GOterms enrichment analysis on a set of genomic region under
# selection using the topGO R package
#
# Citation: Alexa A and Rahnenfuhrer J (2016). topGO: Enrichment Analysis for
# Gene Ontology. R package version 2.22.0
#-------------------------------------------------------------------------------
# Inmput files description: 2 files
# GOterms-anotation-file.txt: A table containing at least the gene ID and the
# corresponding GO terms annotation
# datafile.txt: A file containing at least a list of the name of the genes under
# selection
#
# Steps of the script:
# 1) Read Input data
# 2) Data preparation for topGO analysis
# 3) topGO Analysis
#===============================================================================
### Install packages
source("http://bioconductor.org/biocLite.R")
biocLite("topGO")
biocLite("Rgraphviz")
### Set the working directory
setwd("..........")
### Load packages
library(topGO)
library(Rgraphviz)
#######################
# 1) Read Input data #
#######################
#read annotation file
Annot<-read.table("GOterms-anotation-file.txt", header=FALSE, sep="\t", fill=TRUE)
Annot<-Annot[,c(1,2)]
names(Annot)<-c("geneid","GOterm")
#return number of annotated genes
length(unique(Annot$geneid))
###datafile : list of genes identified as under selection
data<-read.table("datafile.txt", header=FALSE, sep="\t", fill=TRUE)
names(data)<-c("geneid")
dataDF<-as.data.frame(data)
#return no. genes found as selected
length(unique(dataDF$geneid))
###########################################
# 2) Data preparation for topGO analysis #
###########################################
# list of the annotated genes - eliminate redundancy
ListAnnotUnique<-as.vector(unique(Annot$geneid))
geneID2GO <- list(NULL)
for (i in 1 : length(ListAnnotUnique)) {
temp<-Annot[Annot$geneid==ListAnnotUnique[i],]
geneID2GO[[i]]<-as.character(temp$GOterm)
}
names(geneID2GO)<-as.character(ListAnnotUnique)
#Building the list of genes of interest
geneNames2GO <- names(geneID2GO)
geneListdataGO <- factor(as.integer(geneNames2GO %in% dataDF$geneid))
names(geneListdataGO) <- geneNames2GO
length(which(geneListdataGO==1)) #returns the number of genes under selection and annotated thus usable for the analysis
str(geneListdataGO)
######################
# 3) topGO Analysis #
######################
# creation of a topGO data object ####
GOdata <- new("topGOdata",
description = "GO analysis on genomic regions under selection, BP",
ontology = "BP", # or CC or MF
allGenes = geneListdataGO,
nodeSize =5, # delete categories that have too few genes : in our case, a high number of genes gives similar results with a nodeSize of 5 or of 10 (in the tutorial: values between 5 and 10 give more stable results)
annot = annFUN.gene2GO,
gene2GO = geneID2GO
)
#returns the description of the topGO object
GOdata
#### 5 types of statistical tests and 6 algorithms dealing with the GO graph structure are available in the topGO R package (See description in the tutorial)
#### Here we used a Fisher exact test based on genes counting combine with 2 algorithms
#### Fisher test with Classic algorithm not taking into account the hierarchical link between GOterms
Fisherclassic <- runTest(GOdata, algorithm = "classic", statistic = "fisher")
#returns a table listing the top 20 GO terms found as significantly enriched
Fisherclassic.table<-GenTable(GOdata, classicFisher = Fisherclassic, topNodes=20)
Fisherclassic.table
write.table(Fisherclassic.table, "resultFisherclassic.table.txt" , sep=";", quote=FALSE)
#returns a subgraph induced by the top 4 GO terms found as significantly enriched
showSigOfNodes(GOdata, score(Fisherclassic), firstSigNodes = 4, useInfo ='all')
#generates a pdf file of the subgraph induced by the top 5 GO terms found as significantly enriched
printGraph(GOdata, Fisherclassic, firstSigNodes = 5, useInfo = "all", pdfSW = TRUE)
#### Fisher test with weight01 algorithm taking into account the hierarchical link between GOterms
FisherWeight01<-runTest(GOdata, algorithm = "weight01", statistic = "fisher")
#returns a table listing the top 50 GO terms found as significantly enriched
FisherWeight01.table<-GenTable(GOdata, Weight01 = FisherWeight01, topNodes=50)
FisherWeight01.table
write.table(FisherWeight01.table, "FisherWeight01.table.txt" , sep=";", quote=FALSE)
#returns a subgraph induced by the top 4 GO terms found as significantly enriched
showSigOfNodes(GOdata, score(FisherWeight01), firstSigNodes = 4, useInfo ='all')
#generates a pdf file of the subgraph induced by the top 5 GO terms found as significantly enriched
printGraph(GOdata, FisherWeight01, firstSigNodes = 5, useInfo = "all", pdfSW = TRUE)
#returns a summary table of the top 10 GO terms found as significantly enriched with the FisherWeight01 compared to the Fisherclassic tests of enrichment
allRes<-GenTable(GOdata, classicFisher = Fisherclassic, weight01=FisherWeight01, classic=Fisherclassic, orderBy="weight01", ranksOf="classic", topNodes=10)
allRes
write.table(allRes , "allRes.pcadapt.txt" , sep=";", quote=FALSE)
|
5d58fc96210a5b69ca254d10d43bbd95a9904463
|
7fc84b730a056db55239d42a62457ca0a334b93e
|
/linmech/gen-data/gen_data.R
|
409d6d0d5882bf9ed6668405c366cd51574b2a66
|
[] |
no_license
|
topherconley/sim-spacemap
|
90b241b7565ed25110d14eeedef523b5d82b18b5
|
590ebcd72d2da1f30d3a8c61ad418777987a971a
|
refs/heads/master
| 2021-01-25T06:45:53.911366
| 2017-02-07T00:25:37
| 2017-02-07T00:25:37
| 80,661,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
gen_data.R
|
#not found
#source("~/repos/sim-spacemap/linmech/space_score_functions_10082011.txt") ##space.shd, and space.shd.adj
load("dense_p500_adj_matrix.Rdata")
true.v=vstructures(true.dir)
############################# part II: generate Guassin linear mechanism based on pancr.adj
p=nrow(true.dir)
n=100
nrep=10
source("~/repos/sim-spacemap/linmech/network_gene_function_10172012.R")
data_generation<-function(n,p,pancr.adj){
Y.n=NULL
panc.simu.Data=gene.pancr.label(n, pancr.adj,SN=runif(nrow(pancr.adj), min=0.5, max=1.5))
Y=panc.simu.Data$data
Y.gm=apply(Y, 2, mean)
Y.gsd=apply(Y, 2, sd)
Y.n=(Y-matrix(Y.gm, n, p, byrow=T))/matrix(Y.gsd, n, p, byrow=T)
return(Y.n=Y.n)
}
##look at gene.pancr.label
set.seed(2000)
seeds=sample(2:1000,10)
for(rep in 1:nrep){
print(rep)
set.seed(seeds[rep])
Y.n=data_generation(n,p,true.dir)
file.name=paste("dense_p500_",n,"_rep",rep,".Rdata",sep='')
save(Y.n,file=file.name)
}
|
c98b6793dfffa468d51d2d791df5f5e1fd89afb7
|
dd462b8781178eb309a7f76c94a4c0537e6513a9
|
/eda/KAO/16_KAO_merging_CD3.1_results_with_Lipidex_output.R
|
70f6ab1326410ebcce11b8c67391ecff87f5a1ab
|
[
"MIT"
] |
permissive
|
jsgro/COVID-19_Multi-Omics
|
3198c75f5c5a17b55e829e66754c01ebb15d0689
|
1b7e6f3eb3aa78529c8a2c28a58f4c2c0cbeeafa
|
refs/heads/master
| 2023-01-02T04:20:38.025786
| 2020-10-29T21:01:54
| 2020-10-29T21:01:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
16_KAO_merging_CD3.1_results_with_Lipidex_output.R
|
###### 16_KAO_merging_CD3.1_results_with_Lipidex_output.R #######
## To help identifiy some of the unknown features we searched raw files
## with extra nodes in Compound Discoverer 3.1: MZ Cloud, chemspider,
## mzvault, and formula.
## These results were further filtered down in excel to include only
## features (putative IDs) with matches to mz Cloud ("Full Match") or
## mz Vault ("Full Match"). Next, because Compound discover output
## collaspes the different adduct m/z into one compound. Another
## 2 columns were added: m/z and adduct. If a compound had more than
## one adduct, a separate row was generated for each adduct with
## a copy of the identification results. This strategy, in theory,
## should help with matching the unknowns from LipiDex output since
## Lipidex output report features m/z and RT.
##
## This script is intendend to match unknown features by m/z and RT
## between lipidex results and these modified CD3.1 output results.
## The matching will be done by first rounding RT and m/z values and
## then merging the 2 documents.
library(DBI)
library(RSQLite)
## load in lipid data from db
# connect
con <- dbConnect(RSQLite::SQLite(), dbname = "P:/All_20200428_COVID_plasma_multiomics/SQLite Database/Covid-19 Study DB.sqlite")
# pull pvalues
lipids <- dbGetQuery(con, "SELECT * FROM biomolecules
WHERE omics_id = 2 AND keep = 1
")
# disconnect
dbDisconnect(con)
## load in CD3.1 results file
cd_results <- read.csv("P:/All_20200428_COVID_plasma_multiomics/Lipidomics/CD3_all_discovery_metabolomics_filtered.csv", stringsAsFactors = F)
names(cd_results)
cd_results <- cd_results[,1:28]
#### lipids standardized names contains mz and RT, need to round ####
names(lipids)
lipids_unknowns <- lipids[grep("nknown", lipids$standardized_name),]
lipids_RT <- apply(lipids_unknowns, 1, function(x) unlist(strsplit(x[2], "RT_"))[2])
lipids_RT_round <- round(as.numeric(lipids_RT), digits = 2)
lipids_MZ <- apply(lipids_unknowns, 1, function(x) unlist(strsplit(unlist(strsplit(x[2], "mz_"))[2], "_"))[1])
lipids_MZ_round <- round(as.numeric(lipids_MZ), digits = 2)
## checking to see if any potential matches
table(lipids_MZ_round %in% round(cd_results$m.z, digits =2 ))
length(lipids_MZ_round)
length(cd_results$m.z)
table(lipids_RT_round %in% round(cd_results$RT..min., digits = 2))
cd_results$mz_RT <- paste(round(cd_results$m.z, digits = 2), round(cd_results$RT..min., digits = 2), sep ="_")
lipids_unknowns$mz_RT <- paste(lipids_MZ_round, lipids_RT_round, sep = "_")
##### merging two data sets####
merge_unknowns <- merge(lipids_unknowns, cd_results, by ="mz_RT")
write.csv(merge_unknowns, "data/Sup_table_2_merge_unknowns.csv")
##### Appending this information to the metadata table in DB ####
## read current metdata table
con <- dbConnect(RSQLite::SQLite(), dbname = "P:/All_20200428_COVID_plasma_multiomics/SQLite Database/Covid-19 Study DB.sqlite")
# pull pvalues
metadata <- dbGetQuery(con, "SELECT * FROM metadata
")
# disconnect
dbDisconnect(con)
names(metadata)
df_metadata_append <- data.frame(metadata_id = NA,
biomolecule_id = merge_unknowns$biomolecule_id,
metadata_type = "Potential_annotation_through_secondary_db_searching",
metadata_value = merge_unknowns$Name)
df_metadata_append$metadata_id <- seq(max(metadata$metadata_id)+1, length.out = nrow(df_metadata_append), by = 1)
## Establish a connection to the DB
con <- dbConnect(RSQLite::SQLite(), dbname = "P:/All_20200428_COVID_plasma_multiomics/SQLite Database/Covid-19 Study DB.sqlite")
## write table to DB
dbWriteTable(con, "metadata", df_metadata_append, append = T)
# check
metadata <- dbReadTable(con, "metadata")
# disconnect
dbDisconnect(con)
|
46e4906d6f826eecb3b871b7c6b98dbd75e7b9e6
|
2605ed5c32e799ddfd7b1f739800e35093fbc24e
|
/R/lib/RSienaTest/sienascript
|
68c264965f4dfe53d18fd47bdba0ee532c106fea
|
[] |
no_license
|
BRICOMATA/Bricomata_
|
fcf0e643ff43d2d5ee0eacb3c27e868dec1f0e30
|
debde25a4fd9b6329ba65f1172ea9e430586929c
|
refs/heads/master
| 2021-10-16T06:47:43.129087
| 2019-02-08T15:39:01
| 2019-02-08T15:39:01
| 154,360,424
| 1
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
sienascript
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(RSienaTest))
suppressPackageStartupMessages(library(tcltk))
RSienaTest:::DONE(FALSE)
while(!RSienaTest:::DONE()) {Sys.sleep(0.1)}
|
|
248c96453b6454a2b1b5b7b42b799c7c1eaa2f88
|
5150cf610a34c6c5be9b598277db1834d8fb16b4
|
/man/brood_check.Rd
|
d60fc6f76f95783f598fcfc6a181335669418825
|
[] |
no_license
|
SPI-Birds/pipelines
|
f3ab78668e526a47bd298b0f7f4127e274a4dfd0
|
cb4bd41bc26d991fa54e520bb15b54333696b4cb
|
refs/heads/master
| 2023-08-16T18:15:29.835023
| 2023-08-09T09:51:56
| 2023-08-09T09:51:56
| 153,275,927
| 0
| 3
| null | 2022-12-04T14:48:00
| 2018-10-16T11:42:17
|
R
|
UTF-8
|
R
| false
| true
| 3,401
|
rd
|
brood_check.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brood_check.R
\name{brood_check}
\alias{brood_check}
\title{Perform quality checks on brood data}
\usage{
brood_check(
Brood_data,
Individual_data,
Capture_data,
Location_data,
approved_list,
output,
skip
)
}
\arguments{
\item{Brood_data}{Data frame. Brood data output from pipeline.}
\item{Individual_data}{Data frame. Individual data output from pipeline.}
\item{Capture_data}{Data frame. Capture data output from pipeline.}
\item{Location_data}{Data frame. Location data output from pipeline.}
\item{approved_list}{List object. List of approved records from brood_approved_list.csv,
capture_approved_list.csv, individual_approved_list.csv, location_approved_list.csv.}
\item{output}{Character. Run checks on potential errors ("errors"), warnings ("warnings"), or both ("both"; default).}
\item{skip}{Character. Identifiers of the individual quality checks (CheckID) that should be skipped.}
}
\value{
A list of:
\item{CheckList}{A summary dataframe of check warnings and errors.}
\item{WarningRows}{A vector of rows with warnings.}
\item{ErrorRows}{A vector of rows with errors.}
\item{Warnings}{A list of row-by-row warnings.}
\item{Errors}{A list of row-by-row errors.}
}
\description{
A wrapper that runs all single checks related to \code{Brood_data}.
}
\details{
The following brood data checks are performed:
\itemize{
\item \strong{B1}: Compare clutch size and brood size per brood using \code{\link{compare_clutch_brood}}.
\item \strong{B2}: Compare brood size and fledgling number per brood using \code{\link{compare_brood_fledglings}}.
\item \strong{B3}: Compare lay date and hatch date per brood using \code{\link{compare_laying_hatching}}.
\item \strong{B4}: Compare hatch date and fledge date per brood using \code{\link{compare_hatching_fledging}}.
\item \strong{B5a-f}: Check brood variable values against reference values using \code{\link{check_values_brood}}. Brood variables checked: ClutchSize_observed, BroodSize_observed, NumberFledged_observed, LayDate_observed, HatchDate_observed, FledgeDate_observed.
\item \strong{B6}: Compare brood size with number of chicks in Individual_data using \code{\link{compare_broodsize_chicknumber}}.
\item \strong{B7}: Check if the IDs of broods are unique using \code{\link{check_unique_BroodID}}.
\item \strong{B8}: Check if the order of clutch types for multiple breeding attempts per female per season is correct using \code{\link{check_clutch_type_order}}.
\item \strong{B9}: Check if parents of a brood are the same species using \code{\link{compare_species_parents}}.
\item \strong{B10}: Check if the brood and the parents of that brood are recorded as the same species using \code{\link{compare_species_brood_parents}}.
\item \strong{B11}: Check if the brood and the chicks in that brood are recorded as the same species using \code{\link{compare_species_brood_chicks}}.
\item \strong{B12}: Check if the sex of mothers listed under FemaleID are female using \code{\link{check_sex_mothers}}.
\item \strong{B13}: Check if the sex of fathers listed under MaleID are male using \code{\link{check_sex_fathers}}.
\item \strong{B14}: Check that both parents appear in Capture_data using \code{\link{check_parents_captures}}.
\item \strong{B15}: Check that nest locations appear in Location_data using \code{\link{check_brood_locations}}.
}
}
|
919d59fc5f88696753be81bfaaffcd323b620857
|
770a3350ed49746a49f7407f7b2eb906c0aeec5f
|
/11.5.2021/ex1.r
|
dfdaaa5111e43691cc8ce51750e5dbb3342ab42f
|
[] |
no_license
|
ntdthanh1409/BaiTapR
|
2343509abc6c312b9c1a3d8a6d4a2b69bc9f9ec0
|
5cee11f67f750cfaf4120765aa2c5828a0fd500d
|
refs/heads/main
| 2023-05-02T18:52:53.833392
| 2021-05-27T10:28:22
| 2021-05-27T10:28:22
| 365,108,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
ex1.r
|
setwd("C:/Users/PC/Desktop/New folder1")
Owls <- read.table('Owls.txt', header = TRUE,dec = ".")
Owls
#You should check all variables (column), it should available for analysing.
names(Owls)
str(Owls)
#Our function:
#Check how many station (tram khi tuong) in our dataset
Allnests <- unique(Owls$Nest)
N <- length(Allnests)
for (i in 1:N) {
nest.i <-as.character(Allnests[i])
print(nest.i)
Owlsi <- Owls[Owls$Nest==nest.i,]
YourFileName <- paste(nest.i, ".jpg", sep = "")
jpeg(file = YourFileName)
plot(x = Owlsi$SiblingNegotiation, y = Owlsi$ArrivalTime,
xlab = 'SiblingNegotiation',
ylab= 'ArrivalTime',
main = nest.i)
dev.off()
}
|
9b0e0aa95e97262d78d726309fdb9d706ea1c884
|
ec16c798bf80bcbde5d0bb621554d3f2d906a974
|
/man/predict.gp.Rd
|
a2196a7bb38255408fa5d0ead60fd38f698c1820
|
[
"Apache-2.0"
] |
permissive
|
mickash/SimpleGPs
|
8dfec3353dd38ec259f0306657ba11d78ae4fc61
|
4c5c8adfa5b7cb20a73fd93998fdb7cbd4a07144
|
refs/heads/master
| 2020-09-09T11:45:45.032197
| 2019-11-13T12:33:47
| 2019-11-13T12:33:47
| 221,213,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 527
|
rd
|
predict.gp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.gp}
\alias{predict.gp}
\title{Predict}
\usage{
\method{predict}{gp}(model, x, full = quicker(model, x))
}
\arguments{
\item{model}{The gp object}
\item{x}{The X data, as a matrix. Vectors will be interpreted as single row matrices.}
\item{full}{Should the full covariance matrix be used.}
}
\value{
A matrix, with the first column giving predictions (means) and the second
column giving variance.
}
\description{
Predict
}
|
ff90d332a23c81c95cd91346860abacb319b58b0
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842006-test.R
|
3192943f80eeacbfc68f02990ba326aba8b75e59
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
1615842006-test.R
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(-7.36599172844076e+192, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482505e+93, 1.08231311223032e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 1.99751774328904e-220, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52614199898143e+43, -1.49815227045093e+197, -2.61605801986535e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.50794136200998e+160, 2.00994342527714e-162, 1.81541609065161e-79, 3.9626685912151e-09, 1.75512488375807e+50, 7.89363005555832e+139))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
9f8b356fed0c678168ba4cce4e648ebca3eb5fa4
|
2b32eae2b801ef212ea9817721e9c561e3aa9536
|
/man/local_interactions.Rd
|
b10e6109d6bb974f091642467275c76eefb60457
|
[] |
no_license
|
agosiewska/iBreakDown
|
b8784c34ef73c274a5a792555eb3d52f57099849
|
11efabd6e5cb78b48f88034ffcb9ed9565d2f49b
|
refs/heads/master
| 2020-04-28T06:43:16.600905
| 2019-03-18T19:16:46
| 2019-03-18T19:16:46
| 175,069,697
| 0
| 0
| null | 2019-03-18T19:16:47
| 2019-03-11T19:25:35
|
Jupyter Notebook
|
UTF-8
|
R
| false
| true
| 3,128
|
rd
|
local_interactions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/local_interactions.R
\name{local_interactions}
\alias{local_interactions}
\alias{local_interactions.explainer}
\alias{local_interactions.default}
\title{Model Agnostic Sequential Variable Attributions with Interactions}
\usage{
local_interactions(x, ...)
\method{local_interactions}{explainer}(x, new_observation,
keep_distributions = FALSE, ...)
\method{local_interactions}{default}(x, data, predict_function = predict,
new_observation, label = class(x)[1], keep_distributions = FALSE,
order = NULL, interaction_preference = 1, ...)
}
\arguments{
\item{x}{a model to be explained, or an explainer created with function `DALEX::explain()`.}
\item{...}{other parameters.}
\item{new_observation}{a new observation with columns that correspond to variables used in the model.}
\item{keep_distributions}{if `TRUE`, then the distribution of partial predictions is stored in addition to the average.}
\item{data}{validation dataset, will be extracted from `x` if it's an explainer.}
\item{predict_function}{predict function, will be extracted from `x` if it's an explainer.}
\item{label}{character - the name of the model. By default it's extracted from the 'class' attribute of the model.}
\item{order}{if not `NULL`, then it will be a fixed order of variables. It can be a numeric vector or vector with names of variables/interactions.}
\item{interaction_preference}{a constant that set the preference for interactions. By default `1`. The larger the more frequently intereactions will be presented in explanations.}
}
\value{
an object of the `break_down` class.
}
\description{
This function implements decomposition of model predictions with identification
of interactions.
The complexity of this function is O(2*p) for additive models and O(2*p^2) for interactions.
This function works in a similar way to step-up and step-down greedy approximations in function `breakDown::break_down()`.
The main difference is that in the first step the order of variables and interactions is determined.
And in the second step the impact is calculated.
}
\examples{
\dontrun{
library("DALEX")
library("iBreakDown")
library("randomForest")
set.seed(1313)
# example with interaction
# classification for HR data
model <- randomForest(status ~ . , data = HR)
new_observation <- HR_test[1,]
explainer_rf <- explain(model,
data = HR[1:1000,1:5],
y = HR$status[1:1000])
bd_rf <- local_interactions(explainer_rf,
new_observation)
bd_rf
plot(bd_rf)
# example for regression - apartment prices
# here we do not have intreactions
model <- randomForest(m2.price ~ . , data = apartments)
explainer_rf <- explain(model,
data = apartments_test[1:1000,2:6],
y = apartments_test$m2.price[1:1000])
new_observation <- apartments_test[1,]
bd_rf <- local_interactions(explainer_rf,
new_observation,
keep_distributions = TRUE)
bd_rf
plot(bd_rf)
plot(bd_rf, plot_distributions = TRUE)
}
}
\seealso{
\code{\link{break_down}}, \code{\link{local_attributions}}
}
|
1829fdf3e308849379ebbabaec4f0c76232e1961
|
a52338b0bc2e42d3e51bd329144e70e67f242386
|
/cachematrix.R
|
46c1e0587b1d7d7a5f6fa365c3d1c785e03ca3a8
|
[] |
no_license
|
RedaAitOuahmed/ProgrammingAssignment2
|
aaf65b5145de81fb5b7efb8e0d48584af0ba36fb
|
f6cdd2ef3555c30c806924fe12f0625df9e55003
|
refs/heads/master
| 2020-03-10T21:20:39.720050
| 2018-04-15T08:50:37
| 2018-04-15T08:50:37
| 129,591,836
| 0
| 0
| null | 2018-04-15T08:37:45
| 2018-04-15T08:37:45
| null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
cachematrix.R
|
## makeCacheMatrix creates an object that allows to cache data about the matrix
## cacheSolve solve the inverse of a makeCacheMatrix object
## it tries to find a cached inverse, if it can't it solves the inverse and cache it
## returns a list of functions to get or set the matrix
## and to get or set the Inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inv) inverse <<- inv
getInverse <- function() inverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve returns the argument matrix's inverse
## it checks first if it was already calculated and in this case
## returns the cached inverse matrix
## else it calls solve to get the matrix inverse and then cache the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if(!is.null(inverse)) {
message("getting cached inversed matrix")
return(inverse)
}
message("no cached inversed matrix")
mat <- x$get()
inverse <- solve(mat, ...)
x$setInverse(inverse)
inverse
}
|
63bbe6633e2d375e7b78b281a7791a953de4b20c
|
d909c0457c4648f0986efa041f6e12513f99bb6d
|
/handlers/data_census_utility.R
|
c27a8b01bd6c3d34defd60e202ab90f055248ee6
|
[] |
no_license
|
michaelgaunt404/uhsgt_dashboard
|
6f0431f9b103b219f9e4870fff6067dce3b12529
|
45efcf45f1120763a7f6bd9b4405a52e9163d5fc
|
refs/heads/main
| 2023-02-06T11:09:28.325261
| 2020-12-22T19:13:13
| 2020-12-22T19:13:13
| 311,736,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,403
|
r
|
data_census_utility.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Utility script for all US and CA Census layers
#
# By: mike gaunt, michael.gaunt@wsp.com
#
# README: this script gets us census data
#-------- it uses tidycensus, cancensus, and tigris packages
#-------- it is not robust enough to full automate the process
#-------- seperate opertions are perfromed for layers with/without state level fidelity
#-------- also seperate write-out for Tribal lands since only produces tabular data which needs to be merged with spatail
#-------- puts files in application_shapefiles so that they can be processed to map_ready
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#SETUP==========================================================================
#library
library(tigris)
library(tidycensus)
library(cancensus)
# install.packages("tigris")
# install.packages("tidycensus")
# install.packages("cancensus")
#path
# # library(magrittr)
# #
# if (!exists("BEING_SOURCED_FROM_SOMEWHERE")){
# setwd("~/")
# rstudioapi::getSourceEditorContext()$path %>%
# as.character() %>%
# gsub("/R.*","\\1", .) %>%
# path.expand() %>%
# setwd()
# }
#
#global environemnt variables
# census_api_key("242ce7c9a4b28df96a99abda6972ad638d5d5afc", install = TRUE)
#functions
get_acs_clean = function(selection, states){
get_acs(geography = unique(selection$boundary),
variables = selection$variable,
state = states,
geometry = TRUE,
year = 2018) %>%
st_transform(crs = 4326) %>%
select(-GEOID, -moe) %>%
merge(., selection[, c("variable", "var_name")]) %>%
select(-variable) %>%
spread(var_name, estimate) %>%
select(NAME, selection$var_name) %>%
st_filter(corrdior_buffer)
}
#DATA IMPORT====================================================================
#state level data and manual operations
tmp = readxl::read_xlsx("data_source_list.xlsx", sheet = "tidycensus") %>%
janitor::remove_empty(c("cols", "rows")) %>%
mutate(unique_id = rownames(.)) %>%
group_by(processed_name) %>%
nest() %>%
.[which(.$processed_name %in% c("US_Congressional_Districts", "US_Census")),] %>%
mutate(data_spatial = map(data, get_acs_clean, c("WA", "OR")))
tmp[which(tmp$processed_name %in% "US_Census"), "data_spatial"] =
tmp[which(tmp$processed_name %in% "US_Census"), "data_spatial"] %>%
unnest(cols = data_spatial) %>%
mutate(`Population at or Below Poverty` = round(100*`Population at or Below Poverty`/`Total Population (20-64yrs)`, 1)) %>%
mutate_at(vars(contains("alone")), list((~(100*./`Total Population`) %>%
round(1)))) %>%
separate(col = "NAME", sep = ", ", into = c("Tract", "County", "State")) %>%
group_by(County) %>%
mutate(`County Median Income (dollars)` = median(`Median income (dollars)`, na.rm = T)) %>%
ungroup() %>%
mutate(`Median income status` = ifelse(`Median income (dollars)`<`County Median Income (dollars)`,
"Below County Median", "Above County Median")) %>%
nest(cols = everything())
#national level data and manual operations
tmp_sep = readxl::read_xlsx("data_source_list.xlsx", sheet = "tidycensus") %>%
janitor::remove_empty(c("cols", "rows")) %>%
filter(processed_name != "US_First_Peoples") %>%
mutate(unique_id = rownames(.)) %>%
group_by(processed_name) %>%
nest() %>%
.[which(.$processed_name %nin% c("US_Congressional_Districts", "US_Census")),] %>%
mutate(data_spatial_or = map(data, get_acs_clean, c("OR")),
data_spatial_wa = map(data, get_acs_clean, c("WA")),
data_spatial = list(data_spatial_or, data_spatial_wa) %>%
pmap(function(x,y)
rbindlist(list(x,y))
))
wirte_out_file_names = list(c(tmp$processed_name, tmp_sep$processed_name),
c(tmp$data_spatial, tmp_sep$data_spatial))
path = "application_shapefiles/"
wirte_out_file_names[[1]] %>%
lapply(function(x) paste0(path, x) %>%
unlink(recursive = T))
wirte_out_file_names[[1]] %>%
unlist() %>%
lapply(function(x) paste0(path, x) %>%
dir.create())
list(wirte_out_file_names[[1]],
wirte_out_file_names[[2]]) %>%
pmap(function(x, y)
y %>% st_as_sf() %>%
st_write(., paste0(path, x, "/", x, ".shp"))
)
#Tribal Lands===================================================================
#tidycensus does not have first peoples layer so we have to use 'tigris' and merge with shapefiles
#data import
data_first_people = readxl::read_xlsx("data_source_list.xlsx", sheet = "tidycensus") %>%
janitor::remove_empty(c("cols", "rows")) %>%
filter(processed_name == "US_First_Peoples")
#gey tabular data from tidy census
first_peoples_metrics = data_first_people %>%
list(.$boundary, .$variable) %>%
.[-1] %>%
pmap(function(x,y) get_acs(geography = x,
variables = y,
geometry = F) )
#get tigerlines and merge with tabular data
first_peoples_metrics_sf = first_peoples_metrics %>%
rbindlist() %>%
# unique() %>%
mutate(variable = fct_inorder(variable)) %>%
pivot_wider(id_cols = -moe,
names_from = variable, values_from = estimate) %>%
merge(native_areas() %>%
select(GEOID), ., by = "GEOID") %>%
st_transform(crs = 4326) %>%
st_filter(corrdior_buffer) %>%
select(-GEOID) %>%
set_names(c("Name", data_first_people$var_name, "geometry"))
location = "application_shapefiles"
names = "US_First_Peoples"
names %>%
map(function(x)
paste0(location, "/", x) %>%
unlink(recursive = T))
names %>%
map(function(x)
paste0(location, "/", x) %>%
dir.create())
st_write(first_peoples_metrics_sf,
paste0(location, "/", names, "/", names, ".shp"),
# "application_shapefiles/US_First_Peoples/yolo2.shp",
append=FALSE,
)
#Canada Census Layers===========================================================
options(cancensus.cache_path = 'ca_census_cache')
options(cancensus.api_key = 'CensusMapper_a81cdd3e133a13fa5dd3e8b67652ad70')
# for questions go to here: https://cran.r-project.org/web/packages/cancensus/vignettes/cancensus.html
# var = list_census_vectors("CA16")
# var %>% view()
data_ca_census = readxl::read_xlsx("data_source_list.xlsx", sheet = "ca_census") %>%
janitor::remove_empty(c("cols", "rows")) %>%
filter(exclude != "Y")
#pulls census level data for vancouver and abbotsford CMAs
census_data_CT <- get_census(dataset='CA16',
regions = list(CMA = c("59933", "59932")),
vectors = data_ca_census$variable,
level='CT',
geo_format = 'sf')
census_data_CT_final = census_data_CT %>%
select(`Region Name`, `Area (sq km)`, contains("v_CA16")) %>%
rename_all(
~gsub('.*:', "\\1", .x) %>%
str_trim()
)
location = "application_shapefiles"
names = "CA_Census"
names %>%
map(function(x)
paste0(location, "/", x) %>%
unlink(recursive = T))
names %>%
map(function(x)
paste0(location, "/", x) %>%
dir.create())
st_write(census_data_CT_final,
paste0(location, "/", names, "/", names, ".shp")
)
|
a90e71eec51d69c586d65ab7aa68b1c11feffc0c
|
c5bc2307bcead541658ccd7f49db4eda9a6a3762
|
/R/leslie.R
|
5157d25caa45d42699fa48d2333e6e58258e04b0
|
[] |
no_license
|
shfischer/FLife
|
b2216da5bdf3f463cc7ea354e49115f598ecafe0
|
4979df14be234debeb468d89cf2659bb2f659836
|
refs/heads/master
| 2021-08-18T07:43:51.998014
| 2020-08-02T18:35:08
| 2020-08-02T18:35:08
| 238,467,826
| 0
| 0
| null | 2020-02-05T14:20:48
| 2020-02-05T14:20:46
| null |
UTF-8
|
R
| false
| false
| 4,580
|
r
|
leslie.R
|
#' @title Leslie matrix
#'
#' @description
#' Creates a Leslie Matrix from a \code{FLBRP} object that represents a population at equilibrium
#'
#' @param object \code{FLBRP}
#' @param fbar \code{numeric} F at whicj survival calculated
#' @param numbers \code{boolean} numbers or biomass, numbers bt default
#' @param ... any other arguments
#'
#' @aliases leslie leslie-method leslie,FLBRP-method
#'
#' @return \code{matrix}
#'
#' @export
#' @docType methods
#' @rdname leslie
#'
#' @seealso \code{\link{lhRef}}, \code{\link{lhPar}}, \code{\link{lhEql}}
#'
#'
#' @examples
#' \dontrun{
#' eql=lhEql(lhPar(FLPar(linf=100)))
#' leslie(eql)
#' }
setMethod("leslie", signature(object="FLBRP"),
function(object,fbar=FLQuant(0),numbers=TRUE,...){
fbar(object)=fbar
object=brp(object)
names(dimnames(fbar(object)))[1]=names(dimnames(object@m))[1]
ages=dims(object)$min:dims(object)$max
mx=array(0, dim =c(length(ages),length(ages),dims(ssb(object))$iter),
dimnames=list(age =ages,age=ages,
iter=seq(dims(ssb(object))$iter)))
#survivorship
z=exp(-(object@m))
for (i in seq(dims(object)$iter)){
diag(mx[-1,-length(ages),i]) =FLCore::iter(z[-length(ages)],i)
if (range(object)["plusgroup"]==range(object)["max"])
mx[length(ages),length(ages),i]=FLCore::iter(z[length(ages)],i)
}
#recruitment
#tmp =mat(object)*stock.wt(object)*stock.n(object)[,1]
#tmp2 =apply(tmp,2:6,sum)
#mx[1,,]=(rec(object)[,1]%*%tmp%/%tmp2)%/%stock.n(object)[,1]
# a/b slope at orign for bevholt
mx[1,,]=sweep((mat(object)%*%stock.wt(object)),2,(rec(object)[,1]%/%ssb(object)[,1]), "*")
#Mass
if (!numbers){
#recruitment
mx[1,,]=(rec(object)[,1]%*%stock.wt(object)[1,1]%/%ssb(object)[,1])%*%mat(object)[,1]
#Growth
incr=stock.wt(object)[-1,1]%/%stock.wt(object)[-length(ages),1]
for (i in seq(dims(object)$iter))
diag(mx[-1,-length(ages),i])=iter(incr[-length(ages)],i)*diag(mx[-1,-length(ages),i])
}
# diag(mx[-1,-length(ages)])=c(stock.wt(object)[-1,1])/c(stock.wt(object)[-length(ages),1])
# mx[1,]=c(stock.wt(object)[,1])*mx[1,]
mx=FLPar(mx)
mx[is.na(mx)]=0
return(mx)})
#' @title Population growth rate
#' @description
#' Estimates population growth rate for a Leslie matrix
#'
#' @param m \code{FLPar}
#' @param fec \code{missing}
#' @param ... any other arguments
#'
#' @aliases r-method r,FLPar-method
#'
#' @return \code{FLPar} with growth rate a small population size
#'
#' @export
#'
#' @docType methods
#' @rdname lambda
#'
#' @seealso \code{\link{leslie}}, \code{\link{lhRef}}
#'
#' @examples
#' \dontrun{
#' library(popbio)
#' eql=lhEql(lhPar(FLPar(linf=100)))
#' L=leslie(eql)
#' lambda(L[drop=TRUE])
#' }
#'
setMethod("r", signature(m="FLPar",fec="missing"),
function(m,...){
if (!requireNamespace("popbio", quietly = TRUE)) {
stop("Package \"popbio\" needed for this function to work. Please install it.",
call. = FALSE)}
object=m
dmns=dimnames(object)[-2]
dmns[1]="r"
dm =seq(length(dim(object)))[-(1:2)]
res=alply(object,dm,function(x) {
rtn=try(lambda(x))
if ("character" %in% mode(rtn)) rtn=NA
rtn})
log(FLPar(array(res,dim =unlist(laply(dmns,length)),
dimnames=dmns)))
})
#setMethod("leslie", signature(object="FLBRP"),
oldLeslie=function(object,fbar=FLQuant(0),numbers=TRUE,...){
args=list(...)
for (slt in names(args)[names(args) %in% names(getSlots("FLBRP"))])
slot(object, slt)=args[[slt]]
fbar(object)=fbar
ages=dims(object)$min:dims(object)$max
mx =matrix(0,nrow=length(ages),ncol=length(ages),dimnames=list(age=ages,age=ages))
#survivorship
diag(mx[-1,-length(ages)]) =exp(-m(object)[-length(ages)])
if (range(object)["plusgroup"]==range(object)["max"])
mx[length(ages),length(ages)]=exp(-m(object)[length(ages)])
#recruitment
tmp = mat(object)*stock.wt(object)*stock.n(object)[,1]
tmp2 = apply(tmp,2:6,sum)
mx[1,]= rec(object)[,1]%*%tmp%/%tmp2%/%stock.n(object)[,1]
if (!numbers){
diag(mx[-1,-length(ages)])=diag(mx[-1,-length(ages)])*c(stock.wt(object)[-1,1])/c(stock.wt(object)[-length(ages),1])
mx[1,]=c(stock.wt(object)[,1])*mx[1,]
}
mx[is.na(mx)]=0
return(mx)}
#)
|
b229c1a237a321586f63b155e94bf3fcfb9e5181
|
5678884f50ec1e751a0d5af60e4c1f406bc394dd
|
/R/setup.R
|
35abfd38ed871d8e939acf46bd0289133714ff48
|
[] |
no_license
|
trafficonese/coinmarketcap_v2
|
a4e30ae0c127b73d91aa2dc4c6dec45db210822f
|
0e75e8eb23a78fbd470e9e1d55c4bdb19ce31b5e
|
refs/heads/master
| 2020-06-18T04:53:57.073784
| 2019-08-08T08:10:27
| 2019-08-08T08:10:27
| 196,170,203
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,558
|
r
|
setup.R
|
#' Setup
#'
#' Specifies API Key and the base URL for session
#'
#' @param api_key Your Coinmarketcap API key.
#' @param sandbox Sets the base URL for the API. If set to TRUE, the sandbox-API
#' is called. The default is FALSE.
#'
#' @examples
#' setup("xXXXXxxxXXXxx")
#' get_setup()
#'
#' @export
#' @name setup
setup <- function(api_key = NULL, sandbox = FALSE) {
if (!is.null(api_key)) {
Sys.setenv("COINMARKETCAP_APIKEY" = api_key)
}
url <- ifelse (sandbox,
"sandbox-api.coinmarketcap.com",
"pro-api.coinmarketcap.com")
options("COINMARKETCAP_URL" = url)
}
#' @rdname setup
#' @export
get_setup <- function(){
key <- Sys.getenv("COINMARKETCAP_APIKEY")
url <- getOption("COINMARKETCAP_URL")
.prt <- function(val, what){
cat(crayon::green(cli::symbol$tick),
sprintf("%s is set up", what), "\n")
}
l <- list(
api_key = key,
url = url
)
names <- c("API-KEY", "Base-URL")
lapply(1:length(l), function(x) .prt(l[[x]], names[[x]]))
invisible(l)
}
#' @rdname setup
#' @export
reset_setup <- function(api_key = TRUE, sandbox = TRUE){
.prt <- function(what){
cat(crayon::green(cli::symbol$tick),
sprintf("%s sucessfully reset", what),"\n")
}
if (isTRUE(api_key)) {
Sys.unsetenv("COINMARKETCAP_APIKEY")
.prt("API Key")
}
if (isTRUE(sandbox)) {
options("COINMARKETCAP_URL" = NULL)
.prt("Base URL")
}
}
.get_api_key <- function(){
Sys.getenv("COINMARKETCAP_APIKEY")
}
.get_baseurl <- function(){
getOption("COINMARKETCAP_URL")
}
|
1de00a46b907ca4df63057af86d3f51d09afa77a
|
5042c3a97c9a9fa4d0a5d6794960eec8146afa47
|
/lotteryEstimator/man/srsworJointInclusionProbabilityMatrix.Rd
|
3d9869a18f2ac42123b9232c8c43e46c618f7fcd
|
[] |
no_license
|
Sea2Data/CatchLotteryEstimation
|
eef044b949aa382a0ca70a23c9c32d2ca2a1a4d5
|
e364b505969e5bd861684bdb2fc55f6fe96d2b8f
|
refs/heads/master
| 2020-12-22T06:29:23.504177
| 2020-10-25T09:12:56
| 2020-10-25T09:13:52
| 236,696,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 706
|
rd
|
srsworJointInclusionProbabilityMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/srs.R
\name{srsworJointInclusionProbabilityMatrix}
\alias{srsworJointInclusionProbabilityMatrix}
\title{SRS joint inclusion probability}
\usage{
srsworJointInclusionProbabilityMatrix(n, N)
}
\arguments{
\item{n}{sample size}
\item{N}{population size}
}
\value{
the pairwise joint inclusion probability matrix (n X n)
}
\description{
calculates the pariwise joint inclusion probability for simple random sampling with replacement, based on sample size and population size
}
\details{
For more convenient incorporation in generic estimators, the result is return as a n X n matrix with the joint inclusion probability repeated
}
|
608a7b933c60e5b3e69b2f9d43b42a9c01721f36
|
85b3bd2f0116db14d4ea3a1882f06291b45e6db4
|
/man/get.edges.Rd
|
21724e6edda22eb1a37a7f128239d9148e31604f
|
[] |
no_license
|
huginexpert/RHugin
|
13576c8d6a0eb02886bf33884ecb9efe330cb777
|
71508b780b5362b624273010550efc1ae35b7b3a
|
refs/heads/master
| 2023-07-06T21:00:06.086570
| 2023-06-28T11:49:59
| 2023-06-28T11:49:59
| 182,980,699
| 2
| 0
| null | 2023-06-27T06:43:19
| 2019-04-23T09:26:00
|
C
|
UTF-8
|
R
| false
| false
| 1,192
|
rd
|
get.edges.Rd
|
\name{get.edges}
\alias{get.edges}
\title{Get Edges}
\description{
List the edges in an RHugin domain.
}
\usage{
get.edges(domain)
}
\arguments{
\item{domain}{an RHugin domain.}
}
\value{
a list with one element for each node in \sQuote{domain}. Each element is in turn a list with a single element \sQuote{edges} which is a character vector containing the names of the node's children. An empty character vector indicates that the node has no children. This design is similar to the edge lists used in the \pkg{graph} package except that RHugin refers to the children by name while \pkg{graph} package uses their index.
}
\references{
HUGIN API Reference Manual \url{http://download.hugin.com/webdocs/manuals/api-manual.pdf}: \code{h_node_get_children}.
}
\author{Kjell Konis \email{kjell.konis@icloud.com}}
\examples{
# Create an RHugin domain
apple <- hugin.domain()
# Add nodes
add.node(apple, "Sick", states = c("yes", "no"))
add.node(apple, "Dry", states = c("yes", "no"))
add.node(apple, "Loses", states = c("yes", "no"))
# Add edges
add.edge(apple, "Loses", "Sick")
add.edge(apple, "Loses", "Dry")
# List the edges in apple
get.edges(apple)
}
\keyword{programming}
|
ac91c4743b09279a7e067c8f7d30376e4404188e
|
63f527aecf9e477e30b319661c836f41f27da2de
|
/man/dimnames.msexperiment.Rd
|
8ef9b2a362a306ead4257d9ce6f1ada46dd3d18d
|
[] |
no_license
|
wolski/imsbInfer
|
f0adbb4c114aa660d11c675c7d5b64d649c4b582
|
3bec123f2a5edb7285fafcf1e151e6e941d9d70f
|
refs/heads/master
| 2021-06-11T01:48:24.771188
| 2021-03-27T09:10:06
| 2021-03-27T09:10:06
| 14,570,168
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 475
|
rd
|
dimnames.msexperiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swathDataUtils.R
\name{dimnames.msexperiment}
\alias{dimnames.msexperiment}
\title{show colnames (it does'nt let you set the columns)}
\usage{
\method{dimnames}{msexperiment}(x)
}
\arguments{
\item{x}{msexperiment}
}
\description{
show colnames (it does'nt let you set the columns)
}
\examples{
data(feature_alignment_requant)
SDat = read2msExperiment(feature_alignment_requant)
dimnames(SDat)
}
|
31c23ead81baa1b73a1d8c6e56518c44913cebee
|
47672a3453dc21b3a4469878dd2801a19337f638
|
/Antigenic_advance/Childhood_sens_analysis2.R
|
7a2c9bebe299db7043536996733c26ed2233dadf
|
[
"MIT"
] |
permissive
|
kgostic/2018_seasonal_flu_manuscript
|
8d9a28e3f2be62d8452725aae03f1fa40c53ee37
|
0ddcfdb90606627ed1d38b22f1f7eecb50cc5e47
|
refs/heads/master
| 2020-04-10T07:33:36.920411
| 2019-09-15T21:15:21
| 2019-09-15T21:15:21
| 160,883,450
| 0
| 0
|
MIT
| 2019-07-06T00:13:34
| 2018-12-07T22:52:50
|
HTML
|
UTF-8
|
R
| false
| false
| 22,182
|
r
|
Childhood_sens_analysis2.R
|
## GENERATE AN ALTERNATE VERSION OF FIG. 4A USING AGES 2-7 TO DEFINE CHILDREN
## SEE "ANTIGENIC_ADVANCE.R" FOR MAIN TEXT ANALYSES.
## Clear memory
rm(list = ls())
#setwd('2017_AZ/')
library(lubridate)
library(ggplot2)
library(ggpubr)
library(reshape2)
library(gridExtra)
setwd('~/Dropbox/R/2018_seasonal_flu/Antigenic_advance/')
## Set the minimum number of cases per year to include in analysis
min.obs = 100
#######################################
## Load data, model inputs, and likelihood function
######################################
## Load nextstrain antigenic advace data
H3N2_aa = read.delim(file = 'nextstrain_data/nextstrain_staging_flu_seasonal_h3n2_ha_21y_metadata.tsv', sep = "\t", header = TRUE)
H1N1_aa_post2009 = read.table(file = 'nextstrain_data/nextstrain_flu_seasonal_h1n1pdm_ha_12y_metadata.tsv', sep = "\t", header = TRUE)
H1N1_aa_pre2009 = read.table(file = "nextstrain_data/elife-01914-fig3-data1-v1.tsv", sep = '\t', header = TRUE)
H3N2_aa_bedford_elife = subset(H1N1_aa_pre2009, lineage == 'H3N2')
H1N1_aa_pre2009 = subset(H1N1_aa_pre2009, lineage == 'H1N1' & year > 2001) # Extract seasonal H1N1 cases from the relevant time period
## Convert pre_2009 data to decimal date format
H1N1_aa_pre2009$Full.date = as.character(H1N1_aa_pre2009$Full.date) # First convert to character
H1N1_aa_pre2009$Full.date = as.Date(H1N1_aa_pre2009$Full.date, format = '%m/%d/%y') # Then to date
H1N1_aa_pre2009$decimal.date = decimal_date(H1N1_aa_pre2009$Full.date) # Then to decimal date
## Fill in entries with no clear date info using the raw year
replace = which(is.na(H1N1_aa_pre2009$decimal.date))
H1N1_aa_pre2009$decimal.date[replace] = H1N1_aa_pre2009$year[replace]
#####################################################################################################################
### Rescale estimates from bedford et al., eLife to match the magnitude of estimates from nextstrain (uses neher et al. methods)
## H1N1 estimates are not available from the same time periods in both data sets, but H3N2 estimates from 1997-2011 are available in both data sets
## Use paired H3N2 estimates to re-scale the Bedford et al. estimates to match nextstrain estimates
## First, find the mean antigenic location along dimension 1 per year
bedford_H3N2_yearly = sapply(1997:2011, function(xx){valid = H3N2_aa_bedford_elife$year == xx; mean(H3N2_aa_bedford_elife$ag1[valid])}); names(bedford_H3N2_yearly) = 1997:2011
## Repeat for Nextstrain data
## Tree model
neher_H3N2_yearly_tree = sapply(1997:2011, function(xx){valid = floor(H3N2_aa$Num.Date) == xx; mean(H3N2_aa$CTiter[valid], na.rm = TRUE)})
## Sub model
neher_H3N2_yearly_sub = sapply(1997:2011, function(xx){valid = floor(H3N2_aa$Num.Date) == xx; mean(H3N2_aa$CTiterSub[valid], na.rm = TRUE)})
### Find the scaling factor that standardizes estimates to span the same range
scale_factor = diff(range(neher_H3N2_yearly_tree))/diff(range(bedford_H3N2_yearly))
## Visualise the raw mean locations
par(mfrow = c(1,1))
## Rescale and visualize the rescaled points
plot(1997:2011, (bedford_H3N2_yearly-min(bedford_H3N2_yearly))*scale_factor, xlab = 'calendar year of isolate collection', ylab = 'mean antigenic location')
points(1997:2011, neher_H3N2_yearly_tree-min(neher_H3N2_yearly_tree), col = 'red')
legend('topleft', c('Bedford et al., eLife, 2014', 'Nextstrain, tree model (CTiter)\n(see Neher et al., PNAS, 2016)'), col = c('black', 'red'), pch = 1)
## Rescale the bedford H1N1 estimates
H1N1_aa_pre2009$ag1 = H1N1_aa_pre2009$ag1*scale_factor
#####################################################################################################################
## CALCULATE ANTIGENIC ADVANCE FOR FOUR CATEGORIES:
## 1. H3N2 (all years)
## 2. H1N1, pre-2009 (re-scaled to match nextstrain/neher estimates)
## 3. 2009 pandemic H1N1 (assume antigenic advance = 0 since this is a new strain)
## 4. H1N1 post-2009 pandemic
####################################################################################################################
############################################# H3N2 #############################################
## - Separate isolates into influenza seasons
## - Find the mean antigenic location per season
## - Find the season-to-season differnce between means to measure average antigenic advance per year
## Our definition is that the NH influenza sesaon begins in week 40
## There are 52.14 weeks in a year
## NH flu season begins in decimal week 40/52.14 = 0.77
## Separate all specimens into NH flu seasons, starting with the 1997-1998 season
## Find average divergence from strains that circulated in 1997, prior to week 40
## Then find the difference between season-to-season divergence
yrs = floor(min(H3N2_aa$Num.Date, na.rm = TRUE)):max(floor(H3N2_aa$Num.Date), na.rm = TRUE) # Years of interest
CTiter.raw = CTiterSub.raw = numeric(length(yrs)) # Initialize raw divergence
names(CTiter.raw) = names(CTiterSub.raw) = paste(yrs, yrs+1, sep = "-")
baseline = colMeans(subset(H3N2_aa, Num.Date < 1997.77, select = c('CTiter', 'CTiterSub'))) # Get initial values
# Get means per year
for(yy in 1:length(yrs)){
valid = which(H3N2_aa$Num.Date >= yrs[yy]+.77 & H3N2_aa$Num.Date < yrs[yy]+1.77) # Extract all the sample indices from a given NH season (week 40-week 39)
CTiter.raw[yy] = mean(H3N2_aa[valid, 'CTiter'])
CTiterSub.raw[yy] = mean(H3N2_aa[valid, 'CTiterSub'])
}
plot(yrs, CTiter.raw)
points(yrs, CTiterSub.raw, col = 'blue')
legend('topleft', c('tree model', 'sub model'), fill = c(1,4))
## Find the season-to-season difference
CTiter.H3N2 = c('1997-1998' = 0, diff(CTiter.raw))
CTiterSub.H3N2 = c('1997-1998' = 0, diff(CTiterSub.raw))
plot(seq(1997.5, 2018.5, by = 1), CTiter.H3N2, col = 'red', main = 'H3N2 CTiter', ylab = 'delta_postion'); abline(h = 0)
plot(seq(1997.5, 2018.5, by = 1), CTiterSub.H3N2, col = 'red', main = 'H3N2 CTiterSub', ylab = 'delta_position'); abline(h = 0)
############################################# Post-pandemic H1N1 #############################################
## --------------------- aa measured on same scale as H3N2 using nextstrain data ----------------------------
## Establish a baseline
yrs = 2009:2018 # Years of interest
CTiter.raw = CTiterSub.raw = numeric(length(yrs)) # Initialize raw divergence
names(CTiter.raw) = names(CTiterSub.raw) = paste(yrs, yrs+1, sep = "-")
baseline = colMeans(subset(H1N1_aa_post2009, Num.Date < 2009+.77, select = c('CTiter', 'CTiterSub')))
for(yy in 1:length(yrs)){
valid = which(H1N1_aa_post2009$Num.Date >= yrs[yy]+.77 & H1N1_aa_post2009$Num.Date < yrs[yy]+1.77) # Extract all the sample indices from a given NH season (week 40-week 39)
CTiter.raw[yy] = mean(H1N1_aa_post2009[valid, 'CTiter'])
CTiterSub.raw[yy] = mean(H1N1_aa_post2009[valid, 'CTiterSub'])
}
plot(yrs, CTiter.raw, col = 'blue', main = 'H1N1 post-pandemic CTiter')
points(yrs, CTiterSub.raw, col = 'red', main = 'H1N1 post-pandemic CTiterSub')
legend('topleft', c('tree model', 'sub model'), fill = c(4,2))
## Find the season-to-season difference
CTiter.H1N1 = c('2009-2010' = 0, diff(CTiter.raw))
CTiterSub.H1N1 = c('2009-2010' = 0, diff(CTiterSub.raw))
plot(seq(2009.5, 2018.5, by = 1), CTiter.H1N1, col = 'blue',ylab = 'delta_postion'); abline(h = 0)
points(seq(2009.5, 2018.5, by = 1), CTiterSub.H1N1, col = 'red',ylab = 'delta_postion'); abline(h = 0)
legend('bottomleft', c('tree model', 'sub model'), fill = c(4,2))
############################################# pandemic H1N1 #############################################
## -------------------------- Only one year of circulation, aa = NA ------------------------------
############################################# pre-pandemic H1N1 #############################################
## ----------------- aa originally measured on a different scale than post-pandemic H1N1 and H3N2-----------------------
## antigenic location esimates were rescled above to match post-pandemic and H3N2 estimates
yrs = 2002:2008 # Years of interest
aassn.raw = numeric(length(yrs)) # Initialize raw divergence
names(aassn.raw) = paste(yrs, yrs+1, sep = "-")
baseline = colMeans(subset(H1N1_aa_pre2009, decimal.date < 2002+.77, select = c('ag1')))
for(yy in 1:length(yrs)){
valid = which(H1N1_aa_pre2009$decimal.date >= yrs[yy]+.77 & H1N1_aa_pre2009$decimal.date < yrs[yy]+1.77) # Extract all the sample indices from a given NH season (week 40-week 39)
aassn.raw[yy] = mean(H1N1_aa_pre2009[valid, 'ag1'])
}
plot(yrs, aassn.raw, col = 'blue')
## Find the season-to-season difference
aassn = c('2002-2003' = 0, diff(aassn.raw))
plot(seq(2002.5, 2008.5, by = 1), aassn, col = 'blue', ylab = 'delta_postion'); abline(h = 0)
###### Import case data
setwd('../2017_AZ/')
source('00-Inputs_multinomial.R')
setwd('../Antigenic_advance/')
#######################################
## Reformat H3N2 data for plotting
######################################
H3.master = H3.master[-which(as.numeric(rownames(H3.master))<200203),] # Drop seassons before 200203 season, the first year with corresponding data on antigenic advance.
## Remove data from earlier seasons
num.season = as.numeric(gsub(rownames(H3.master), pattern = '(\\d{4})\\d{2}', replacement = '\\1'))+1 ## extract the second year of each sesaon. Use this to convert birth years to ages
age.mat = t(sapply(num.season, FUN = function(xx){xx-2015:1918})) # Entires correspond to the age of each entry in H3.master
## Enter case counts for ages 0-85 into each season starting with 02-03
H3.dat = matrix(NA, nrow = nrow(H3.master), ncol = 86, dimnames = list(rownames(H3.master), 0:85))
for(ii in 1:nrow(H3.dat)){
H3.dat[ii, ] = H3.master[ii, which(age.mat[ii,] %in% 0:85)]
}
## Get rid of seasons with fewer than min.obs
H3.dat = H3.dat[rowSums(H3.dat)>=min.obs, ]
## Calculte frequencies for plotting
H3.freq = H3.dat/rowSums(H3.dat)
H3.freq_cumulative = t(apply(H3.freq, 1, cumsum))
## Melt the age-specific frequencies into a data frame, with antigenic advance ('CTiter') as ID vars
H3.summary = melt(as.data.frame(cbind(freq = H3.freq,
'CTiter' = CTiter.H3N2[gsub(rownames(H3.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'CTiterSub' = CTiterSub.H3N2[gsub(rownames(H3.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'season' = rownames(H3.freq))), id.vars = c('CTiter', 'CTiterSub', 'season'), variable_name = 'age', value.name = 'frequency')
## Add cumulative frequency
H3.summary$c.freq = melt(as.data.frame(cbind(c.freq = H3.freq_cumulative,
'season' = rownames(H3.dat))),
id.vars = 'season', variable_name = 'age', value_name = 'c.freq')$value
## Add counts
H3.summary$count = melt(as.data.frame(cbind(count = H3.dat,
'season' = rownames(H3.dat))),
id.vars = 'season', variable_name = 'age', value_name = 'count')$value
## Define a function to reformat data to create histograms
age_bins = function(age_tab){
out = cbind(rowSums(age_tab[,as.character(2:7)]),
rowSums(age_tab[,as.character(8:40)]),
rowSums(age_tab[,as.character(41:60)]),
rowSums(age_tab[,as.character(61:85)]))
out = out/rowSums(out)
colnames(out) = c('2-7', '8-40', '41-60', '61-85')
as.data.frame(out)
}
## Bin case counts into broad age groups
H3.age.bins = cbind(age_bins(H3.dat), 'CTiter' = CTiter.H3N2[gsub(rownames(H3.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], 'season' = rownames(H3.dat))
H3.age.bins_sub = cbind(age_bins(H3.dat), 'CTiterSub' = CTiterSub.H3N2[gsub(rownames(H3.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], 'season' = rownames(H3.dat))
#######################################
## Refromat post-pandemic H1N1 for plotting
## Because we have antigenic advance data on the same scale as H3N2 data, we can plot these on the same panel
######################################
H1.master = H1.master[-(which(as.numeric(rownames(H1.master))<200203)),] # Drop seassons before 200203 season, the first year with corresponding data on antigenic advance.
## Extract post-pandemic cases
H1.post.pandemic = H1.master[which(as.numeric(rownames(H1.master))>200900), ]
## Remove data from earlier seasons
num.season = as.numeric(gsub(rownames(H1.post.pandemic), pattern = '(\\d{4})\\d{2}', replacement = '\\1'))+1 ## extract the second year of each sesaon. Use this to convert birth years to ages
age.mat = t(sapply(num.season, FUN = function(xx){xx-2015:1918})) # Entires correspond to the age of each entry in H3.master
## Enter case counts for ages 0-85 into each season starting with 02-03
H1.dat = matrix(NA, nrow = nrow(H1.post.pandemic), ncol = 86, dimnames = list(rownames(H1.post.pandemic), 0:85))
for(ii in 1:nrow(H1.dat)){
H1.dat[ii, ] = H1.post.pandemic[ii, which(age.mat[ii,] %in% 0:85)]
}
## Get rid of seasons with fewer than 50 observations
H1.dat = H1.dat[rowSums(H1.dat)>=min.obs, ]
## Calculte frequencies for plotting
H1.freq = H1.dat/rowSums(H1.dat)
H1.freq_cumulative = t(apply(H1.freq, 1, cumsum))
## Melt the age-specific frequencies into a data frame, with antigenic advance ('CTiter') as ID vars
H1.summary = melt(as.data.frame(cbind(freq = H1.freq,
'CTiter' = CTiter.H1N1[gsub(rownames(H1.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'CTiterSub' = CTiterSub.H1N1[gsub(rownames(H1.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'season' = rownames(H1.freq))), id.vars = c('CTiter', 'CTiterSub', 'season'), variable_name = 'age', value.name = 'frequency')
## Add cumulative frequency
H1.summary$c.freq = melt(as.data.frame(cbind(c.freq = H1.freq_cumulative,
'season' = rownames(H1.dat))),
id.vars = 'season', variable_name = 'age', value_name = 'c.freq')$value
## Add counts
H1.summary$count = melt(as.data.frame(cbind(count = H1.dat,
'season' = rownames(H1.dat))),
id.vars = 'season', variable_name = 'age', value_name = 'count')$value
H1.age.bins = cbind(age_bins(H1.dat), 'CTiter' = CTiter.H1N1[gsub(rownames(H1.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], 'season' = rownames(H1.dat))
H1.age.bins_sub = cbind(age_bins(H1.dat), 'CTiterSub' = CTiterSub.H1N1[gsub(rownames(H1.dat), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], 'season' = rownames(H1.dat))
#######################################
## Refromat pre-pandemic H1N1 for plotting
## Because we have antigenic advance data on the same scale as H3N2 data, we can plot these on the same panel
######################################
## Extract post-pandemic cases
H1.pre.pandemic = H1.master[which(as.numeric(rownames(H1.master))<200900), ]
## Remove data from earlier seasons
num.season = as.numeric(gsub(rownames(H1.pre.pandemic), pattern = '(\\d{4})\\d{2}', replacement = '\\1'))+1 ## extract the second year of each sesaon. Use this to convert birth years to ages
age.mat = t(sapply(num.season, FUN = function(xx){xx-2015:1918})) # Entires correspond to the age of each entry in H3.master
## Enter case counts for ages 0-85 into each season starting with 02-03
H1.dat.pre = matrix(NA, nrow = nrow(H1.pre.pandemic), ncol = 86, dimnames = list(rownames(H1.pre.pandemic), 0:85))
for(ii in 1:nrow(H1.dat.pre)){
H1.dat.pre[ii, ] = H1.pre.pandemic[ii, which(age.mat[ii,] %in% 0:85)]
}
## Get rid of seasons with fewer than 50 observations
H1.dat.pre = H1.dat.pre[rowSums(H1.dat.pre)>=min.obs, ]
## Calculte frequencies for plotting
H1.freq.pre = H1.dat.pre/rowSums(H1.dat.pre)
H1.freq.pre_cumulative = t(apply(H1.freq.pre, 1, cumsum))
## Melt the age-specific frequencies into a data frame, with antigenic advance ('CTiter') as ID vars
H1.summary.pre = melt(as.data.frame(cbind(freq = H1.freq.pre,
'CTiter' = aassn[gsub(rownames(H1.dat.pre), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'CTiterSub' = aassn[gsub(rownames(H1.dat.pre), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")],
'season' = rownames(H1.freq.pre))), id.vars = c('CTiter', 'CTiterSub', 'season'), variable_name = 'age', value.name = 'frequency')
## Add cumulative frequency
H1.summary.pre$c.freq = melt(as.data.frame(cbind(c.freq = H1.freq.pre_cumulative,
'season' = rownames(H1.dat.pre))),
id.vars = 'season', variable_name = 'age', value_name = 'c.freq')$value
## Add counts
H1.summary.pre$count = melt(as.data.frame(cbind(count = H1.dat.pre,
'season' = rownames(H1.dat.pre))),
id.vars = 'season', variable_name = 'age', value_name = 'count')$value
H1.pre.age.bins = cbind(age_bins(H1.dat.pre), 'CTiter' = aassn[gsub(rownames(H1.dat.pre), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], season = rownames(H1.dat.pre))
# Just make a copy with different var names.
H1.pre.age.bins_sub = cbind(age_bins(H1.dat.pre), 'CTiterSub' = aassn[gsub(rownames(H1.dat.pre), pattern = "(\\d{4})(\\d{2})", replacement = "\\1-20\\2")], season = rownames(H1.dat.pre))
## Study Ctiter method
H1.age.bins = melt(H1.age.bins, id.vars = c('CTiter', 'season')); H1.age.bins$lineage = 'H1N1_post_2009'
H3.age.bins = melt(H3.age.bins, id.vars = c('CTiter', 'season')); H3.age.bins$lineage = 'H3N2'
H1.pre.age.bins = melt(H1.pre.age.bins, id.vars = c('CTiter', 'season')); H1.pre.age.bins$lineage = 'H1N1_seasonal'
full.age.bins = rbind(H1.age.bins, H3.age.bins, H1.pre.age.bins)
full.age.bins$seasontype = paste(full.age.bins$season, full.age.bins$lineage, sep = '_')
## Reorder seasons according to CTiter
useasontype = unique(full.age.bins$seasontype)
uaa = unique(full.age.bins$CTiter)
full.age.bins$seasontype = factor(full.age.bins$seasontype,
levels = useasontype[order(uaa)])
## Repeat for sub model
H1.age.bins_sub = melt(H1.age.bins_sub, id.vars = c('CTiterSub', 'season')); H1.age.bins_sub$lineage = 'H1N1_post_2009'
H3.age.bins_sub = melt(H3.age.bins_sub, id.vars = c('CTiterSub', 'season')); H3.age.bins_sub$lineage = 'H3N2'
H1.pre.age.bins_sub = melt(H1.pre.age.bins_sub, id.vars = c('CTiterSub', 'season')); H1.pre.age.bins_sub$lineage = 'H1N1_seasonal'
full.age.bins_sub = rbind(H1.age.bins_sub, H3.age.bins_sub, H1.pre.age.bins_sub)
full.age.bins_sub$seasontype = paste(full.age.bins_sub$season, full.age.bins_sub$lineage, sep = '_')
## Reorder seasons according to CTiter
useasontype = unique(full.age.bins_sub$seasontype)
uaa = unique(full.age.bins_sub$CTiterSub)
full.age.bins_sub$seasontype = factor(full.age.bins_sub$seasontype,
levels = useasontype[order(uaa)])
## Barplots of the fraction of cases in each age group, by season, color by antigenic advance
barplots = ggplot()+
geom_bar(stat = 'identity', data = full.age.bins, aes(x = variable, y = value, fill = CTiter, group = seasontype, color = lineage), position = 'dodge')+
scale_fill_viridis_c(option = 'plasma', na.value = 'gray')+
scale_discrete_manual(values = c('black', 'gray', 'white'), aesthetics = 'color')
barplots
## Repeat for sub model
barplots_sub = ggplot()+
geom_bar(stat = 'identity', data = full.age.bins_sub, aes(x = variable, y = value, fill = CTiterSub, group = seasontype, color = lineage), position = 'dodge')+
scale_fill_viridis_c(option = 'plasma', na.value = 'gray')+
scale_discrete_manual(values = c('black', 'gray', 'white'), aesthetics = 'color')
barplots_sub
### Set up anova-like plot
## Calculate spearman correlation coefficients for each variable
## Only calculate for H3N2 because there are too few data points for other types
get.cor = function(xx){
valid = subset(H3.age.bins, variable == xx)
out = cor.test(valid$CTiter, valid$value, method = 'spearman')
c(r = as.numeric(out$estimate), p = as.numeric(out$p.value), variable = xx)
}
cor.df = as.data.frame(t(sapply(unique(H3.age.bins$variable), FUN = get.cor)))
cor.df$variable = unique(H3.age.bins$variable)
cor.df$label = paste('r=', round(cor.df$r,2), ' p=', round(cor.df$p,2), sep = '')
cor.df$lineage = 'H3N2'
cor.df$CTiter = NA
## Repeat for sub model
get.cor = function(xx){
valid = subset(H3.age.bins_sub, variable == xx)
out = cor.test(valid$CTiterSub, valid$value, method = 'spearman')
c(r = as.numeric(out$estimate), p = as.numeric(out$p.value), variable = xx)
}
cor.df.sub = as.data.frame(t(sapply(unique(H3.age.bins_sub$variable), FUN = get.cor)))
cor.df.sub$variable = unique(H3.age.bins_sub$variable)
cor.df.sub$label = paste('r=', round(cor.df.sub$r,2), ' p=', round(cor.df.sub$p,2), sep = '')
cor.df.sub$lineage = 'H3N2'
cor.df.sub$CTiterSub = NA
## Rename type as factor so labels look nice
full.age.bins$lineage = factor(full.age.bins$lineage, levels = c('H1N1_post_2009', 'H1N1_seasonal', 'H3N2'), labels = c('H1N1 post-2009', 'H1N1 pre-2009', 'H3N2'))
## Tree model
anova_like_plot = ggplot()+
facet_grid(.~variable) +
geom_smooth(data = subset(full.age.bins, lineage == 'H3N2'), aes(x = CTiter, y = value, group = lineage, color = lineage), method = 'lm', na.rm = TRUE, lwd = .5, lty = 2, se = FALSE, show.legend = FALSE) +
geom_point(data = full.age.bins, aes(x = CTiter, y = value, color = lineage, shape = lineage)) +
theme_bw() +
geom_label(data = cor.df, aes(x = -.1, y = .58, label = label), hjust = 0, color = 4) +
xlab('Antigenic advance, relative to previous season') +
ylab('Fraction cases') +
theme(legend.position="top")
anova_like_plot
ggsave(filename = '../figures/Antigenic_advance_corplot_2child7.tiff', height = 3, width = 7)
|
dd7745e9668d79e3df415798153764961a637986
|
2ae1860f940aef07f514afc7398f567ffb81f2b9
|
/projects/WhoWillLeaveCompany.R
|
f036c1caa2b20d27373fe36e57b73adc64a1cc2b
|
[] |
no_license
|
sebastianBIanalytics/ESEUR-code-data
|
fbc9d99b01b2e9cc10e7bf95064163f1356ecf56
|
9b216c79d280e77cac30a27f61f9d5475a7a864a
|
refs/heads/master
| 2022-04-25T15:26:39.569497
| 2020-04-29T02:31:24
| 2020-04-29T02:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,558
|
r
|
WhoWillLeaveCompany.R
|
#
# WhoWillLeaveCompany.R, 13 Feb 19
# Data from:
# Who will leave the company?: {A} large-scale industry study of developer turnover by mining monthly work report
# Lingfeng Bao and Zhenchang Xing and Xin Xia and David Lo and Shanping Li
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG developer employment project
source("ESEUR_config.r")
library("plyr")
mk_long=function(df)
{
return(data.frame(id=rep(df$id, 6),
hours=t(subset(df, select=grepl("hour[1-6]", colnames(df)))),
person=t(subset(df, select=grepl("person$", colnames(df)))),
p_hour_mean=t(subset(df, select=grepl("_hour_mean", colnames(df)))),
p_hour_sum=t(subset(df, select=grepl("_hour_sum", colnames(df)))),
p_hour_std=t(subset(df, select=grepl("_hour_std", colnames(df)))),
p_person_change=t(subset(df, select=grepl("[1-6]_person_change", colnames(df)))),
project_num=rep(df$project_num, 6),
multi_project=rep(df$mutli_project, 6),
is_leave=rep(df$is_leave, 6))
)
}
mrhrs=read.csv(paste0(ESEUR_dir, "projects/WhoWillLeaveCompany.csv.xz"), as.is=TRUE)
# Remove what look like incorrect entries
mrhrs=subset(mrhrs, hour1 != 0)
hrs=ddply(mrhrs, .(id), mk_long)
stay=subset(hrs, is_leave == "no")
leave=subset(hrs, is_leave == "yes")
plot(0, type="n",
xlim=c(1, 6), ylim=c(0, 400))
d_ply(stay, .(id), function(df) lines(df$X1))
proj_1=subset(mrhrs, project_num == 1)
l_mod=glm(is_leave=="yes" ~
# hour1+hour2+hour3+hour4+hour5+hour6+
hour_sum+
# hour_mean+
hour_median+hour_std+hour_max+
task_len_sum+task_len_mean+
task_len_median+task_len_std+
task_len_max+task_zero+
token_sum+token_mean+
token_median+token_std+
token_max+
# flesch+smog+kincaid+
# coleman_liau+automated_readability_index+
# dale_chall+difficult_words+
# linsear_write+gunning_fog+
mutli_project+
p1_person+
I(p1_hour_mean/hour_mean)+
p1_hour_sum+p1_hour_std+
# p1_person_change+
# p2_person+
I(p2_hour_mean/hour_mean)+
p2_hour_sum+p2_hour_std+
p2_person_change+
# p3_person+
I(p3_hour_mean/hour_mean)+
p3_hour_sum+p3_hour_std+
p3_person_change+
# p4_person+
I(p4_hour_mean/hour_mean)+
p4_hour_sum+p4_hour_std+
p4_person_change+
# p5_person+
I(p5_hour_mean/hour_mean)+
p5_hour_sum+p5_hour_std+
p5_person_change+
# p6_person+
I(p6_hour_mean/hour_mean)+
p6_hour_sum+p6_hour_std+
p6_person_change+
avg_person_change+less_zero+
equal_zero
# +larger_zero
, data=proj_1, family=binomial)
summary(l_mod)
|
d51e14ca050a97746755f89070218440eaaaca33
|
8a643c71bdc17b738b136f9aee845300398a56d9
|
/AssignmentWeek2.R
|
101718021afc74172dfec27e5fa50273486b9820
|
[] |
no_license
|
ybag/DevelopingDataProducts
|
0ff1d325e8843e94699aefcc7cbd99b59b200478
|
284c3c1ce1ad5d381a1c274e63c25edcb59c2637
|
refs/heads/master
| 2021-01-22T11:28:33.645708
| 2017-05-29T03:13:38
| 2017-05-29T03:13:38
| 92,699,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
AssignmentWeek2.R
|
library(leaflet)
map <- leaflet() %>%
addTiles()
map <- map %>%
addMarkers(lat=55.45, lng=37.37,
popup="The Kremlin and Red Square , Moscow" )
map
|
0707df95fd96e820c182c79e70aac5c9dbd29e1f
|
4630a28100fbb60d6dbaf71540c0547346760bc3
|
/tests/testthat/test_install.R
|
caab05d389d8686c6e6f4b3b832332f8c4dd1f55
|
[] |
no_license
|
Bioconductor/BiocManager
|
e202aa74fb2db70cbfed2295958c88d416209d3f
|
125d50a723caaea36d3c27d241f78f7d96e2a3d7
|
refs/heads/devel
| 2023-09-01T01:22:18.656330
| 2023-08-21T20:11:04
| 2023-08-21T20:11:04
| 33,965,307
| 74
| 23
| null | 2023-09-08T13:39:13
| 2015-04-15T01:04:01
|
R
|
UTF-8
|
R
| false
| false
| 12,099
|
r
|
test_install.R
|
context("install()")
test_that("Arguments are validated", {
expect_error(
install("foo", "bar"),
"all '...' arguments to 'install\\(\\)' must be named"
)
expect_error(install(TRUE), "is.character\\(pkgs\\) is not TRUE")
expect_error(install(ask="foo"), "is.logical\\(ask\\) is not TRUE")
})
test_that("Helpers filter the right packages", {
.install_filter_r_repos <- BiocManager:::.install_filter_r_repos
.install_filter_github_repos <- BiocManager:::.install_filter_github_repos
r <- "foo"
http <- c("http://foo.bar/baz", "https://foo.bar/baz")
github <- c("foo/bar", "foo/bar@baz")
all <- c(r, http, github)
expect_identical(c(r, http), .install_filter_r_repos(all))
expect_identical(github, .install_filter_github_repos(all))
})
test_that(".install_repos() works", {
.skip_if_misconfigured()
skip_if_offline()
repos <- repositories()
old_pkgs <- matrix(
c("pkgB", "/home/user/dir"), 1, 2,
dimnames=list("pkgB", c("Package", "LibPath")))
inst_pkgs <- matrix(
c("pkgA", "/home/user/dir"), 1, 2,
dimnames=list("pkgA", c("Package", "LibPath")))
expect_identical(
character(0),
.install_repos(
character(), old_pkgs, inst_pkgs, repos = repos, force = FALSE
)
)
})
test_that(".install_github() works", {
.skip_if_misconfigured()
skip_if_offline()
repos <- repositories()
expect_identical(
character(0),
.install_github(character(), repos = repos, update = FALSE, ask = TRUE)
)
})
test_that("Versions are checked in install", {
expect_error(install(version = "0.1"))
expect_error(install(1:3))
expect_error(install(NA))
expect_error(install(c("BioStrings", "S4Vectors", NA)))
expect_error(install(site_repository = c("string1", "string2")))
expect_error(install(TRUE))
expect_error(install(ask = "No"))
expect_error(install(ask = c("No", "Yes", NA)))
expect_error(install(version = c("3.7", "3.6")))
expect_error(install(version = character()))
expect_error(install(version = ""))
expect_error(install(version = "3.4.2"))
})
test_that("pkgs are not re-downloaded when force=FALSE", {
.filter <- BiocManager:::.install_filter_up_to_date
old_pkgs <- matrix(
c("pkgB", "/home/user/dir"), 1, 2,
dimnames=list("pkgB", c("Package", "LibPath")))
inst_pkgs <- matrix(
c("pkgA", "pkgB", "/home/user/dir", "/home/user/dir"), 2, 2,
dimnames=list(c("pkgA", "pkgB"), c("Package", "LibPath")))
# installed and not old
expect_warning(.filter("pkgA", inst_pkgs, old_pkgs, FALSE))
# installed and not old but force
expect_identical(.filter("pkgA", inst_pkgs, old_pkgs, TRUE), "pkgA")
# installed and old
expect_identical(.filter("pkgB", inst_pkgs, old_pkgs, FALSE), "pkgB")
expect_identical(.filter("pkgB", inst_pkgs, old_pkgs, TRUE), "pkgB")
# not installed and no info on old
expect_identical(.filter("pkgC", inst_pkgs, old_pkgs, FALSE), "pkgC")
})
context("install(update = TRUE) filters un-updatable packages")
test_that("masked packages are filtered", {
.filter <- BiocManager:::.package_filter_masked
pkgs0 <- matrix(
character(), 0, 2,
dimnames=list(NULL, c("Package", "LibPath")))
expect_identical(pkgs0, .filter(pkgs0))
paths <- c(tempfile(), tempfile())
for (path in paths) dir.create(path)
oLibPaths <- .libPaths()
on.exit(.libPaths(oLibPaths))
.libPaths(paths)
pkgs <- matrix(
c("Foo", "Bar", "Baz", "Bim", paths, paths), 4, 2,
dimnames=list(c("Foo", "Bar", "Baz", "Bim"), c("Package", "LibPath")))
expect_identical(pkgs, .filter(pkgs))
expect_identical(pkgs[c(1, 3, 2),], .filter(pkgs[c(1, 3, 2),]))
pkgs <- matrix(
c("Foo", "Bar", "Foo", paths, paths[2]), 3, 2,
dimnames=list(c("Foo", "Bar", "Foo"), c("Package", "LibPath")))
expect_identical(pkgs[1:2,], .filter(pkgs))
pkgs <- pkgs[3:1,]
expect_identical(pkgs[2:3,], .filter(pkgs))
})
test_that("unwriteable packages are not considered", {
.filter <- BiocManager:::.package_filter_unwriteable
## setup
dir.create(p0 <- tempfile())
on.exit(unlink(p0, recursive=TRUE))
pkgs0 <- matrix(
character(), 0, 2,
dimnames=list(NULL, c("Package", "LibPath")))
pkgs <- pkgs0
expect_identical(pkgs, .filter(pkgs, NULL))
expect_identical(pkgs, .filter(pkgs, character()))
expect_identical(pkgs, .filter(pkgs, tempdir()))
pkgs <- matrix(c("Foo", p0), 1, byrow=TRUE,
dimnames=list("Foo", c("Package", "LibPath")))
expect_identical(pkgs, .filter(pkgs, NULL))
expect_identical(pkgs, .filter(pkgs, p0))
p1 <- tempfile()
pkgs <- matrix(c("Foo", p1), 1, byrow=TRUE,
dimnames=list("Foo", c("Package", "LibPath")))
expect_identical(pkgs[FALSE,, drop=FALSE], .filter(pkgs, NULL))
expect_identical(pkgs[FALSE,, drop=FALSE], .filter(pkgs, p1))
expect_identical(pkgs, .filter(pkgs, p0))
pkgs <- matrix(
c("Foo", p0, "Bar", p1, "Baz", p0), 3, 2, byrow=TRUE,
dimnames=list(c("Foo", "Bar", "Baz"), c("Package", "LibPath")))
expect_identical(pkgs[c(1, 3),], .filter(pkgs, NULL))
expect_identical(pkgs, .filter(pkgs, p0))
expect_identical(pkgs0, .filter(pkgs, p1))
expect_message(.filter(pkgs, NULL), "^Installation paths not writeable")
if (.Platform$OS.type == "windows")
## how to create a read-only directory?
return(TRUE)
isDirRnW <- dir.create(p2 <- tempfile(), mode="0400") # read but not write
skip_if_not(isDirRnW)
pkgs <- matrix(c("Foo", p2), 1, byrow=TRUE,
dimnames=list("Foo", c("Package", "LibPath")))
expect_identical(pkgs0, .filter(pkgs, NULL))
pkgs <- matrix(
c("Foo", p0, "Bar", p2, "Baz", p0), 3, 2, byrow=TRUE,
dimnames=list(c("Foo", "Bar", "Baz"), c("Package", "LibPath")))
expect_identical(pkgs[c(1, 3),], .filter(pkgs, NULL))
expect_identical(pkgs0, .filter(pkgs, p2))
Sys.chmod(p2, mode="0700")
unlink(p2, recursive=TRUE)
})
test_that("packages can be written", {
skip("too idiosyncratic for standardized testing")
lib <- system.file(package="BiocManager", "tests", "cases",
"lib", "Biobase")
dir.create(locked <- tempfile())
file.copy(lib, locked, recursive=TRUE)
oLibPaths <- .libPaths()
on.exit(.libPaths(oLibPaths))
.libPaths(c(locked, .libPaths()))
Sys.chmod(locked, mode="0500")
install()
Sys.chmod(locked, mode="0700")
})
context("install(version =, ask=...) works")
test_that(".install_ask_up_or_down_grade() works non-interactively", {
skip_if(interactive())
expect_equal(
FALSE,
.install_ask_up_or_down_grade("xx", npkgs = 1L, cmp = 1L, ask = TRUE)
)
expect_equal(
TRUE,
.install_ask_up_or_down_grade("xx", npkgs = 1L, cmp = 1L, ask = FALSE)
)
})
test_that("install() fails with different version (non-interactive)", {
map <- BiocManager:::.version_map()
incr <- 1L
version <-
package_version(paste(version()$major, version()$minor + incr, sep="."))
expect_error(install(version = version))
})
test_that("install() passes the force argument to .install", {
.skip_if_misconfigured()
skip_if_offline()
expect_true(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['force']]
},
`BiocManager:::.version_compare` = function(...) {
0L
},
suppressMessages(
install(force = TRUE, update = FALSE)
)
)
)
expect_false(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['force']]
},
`BiocManager:::.version_compare` = function(...) {
0L
},
suppressMessages(
install(force = FALSE, update = FALSE)
)
)
)
expect_true(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['force']]
},
`BiocManager:::.version_compare` = function(...) {
1L
},
`BiocManager:::.install_n_invalid_pkgs` = function(...) {
0L
},
`BiocManager:::.install_updated_version` = function(...) {
pkgs <<- list(...)[['force']]
},
suppressMessages(
install(force = TRUE, update = FALSE, ask = FALSE)
)
)
)
expect_false(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['force']]
},
`BiocManager:::.version_compare` = function(...) {
1L
},
`BiocManager:::.install_n_invalid_pkgs` = function(...) {
0L
},
`BiocManager:::.install_updated_version` = function(...) {
pkgs <<- list(...)[['force']]
},
suppressMessages(
install(force = FALSE, update = FALSE, ask = FALSE)
)
)
)
expect_false(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['update']]
},
`BiocManager:::.version_compare` = function(...) {
1L
},
`BiocManager:::.install_n_invalid_pkgs` = function(...) {
0L
},
`BiocManager:::.install_updated_version` = function(...) {
pkgs <<- list(...)[['update']]
},
suppressMessages(
install(force = FALSE, update = FALSE, ask = FALSE)
)
)
)
expect_false(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['ask']]
},
`BiocManager:::.version_compare` = function(...) {
1L
},
`BiocManager:::.install_n_invalid_pkgs` = function(...) {
0L
},
`BiocManager:::.install_updated_version` = function(...) {
pkgs <<- list(...)[['ask']]
},
suppressMessages(
install(force = FALSE, update = FALSE, ask = FALSE)
)
)
)
expect_null(
with_mock(
`BiocManager:::.install` = function(...) {
list(...)[['checkBuilt']]
},
`BiocManager:::.version_compare` = function(...) {
1L
},
`BiocManager:::.install_n_invalid_pkgs` = function(...) {
0L
},
`BiocManager:::.install_updated_version` = function(...) {
pkgs <<- list(...)[['checkBuilt']]
},
suppressMessages(
install(
force = FALSE, checkBuilt = TRUE,
update = FALSE, ask = FALSE
)
)
)
)
})
test_that("install() without package names passes ... to install.packages", {
.skip_if_misconfigured()
object <- FALSE
with_mock(
available.packages = function(...) {
cbind(
Package = "BiocGenerics", Version = "0.33.0",
LibPath = .libPaths()[1]
)
},
old.packages = function(...) {
## claim that BiocGenerics is out-of-date
cbind(
Package = "BiocGenerics", Version = "0.32.0",
LibPath = .libPaths()[1]
)
},
install.packages = function(pkgs, ..., INSTALL_opts) {
object <<-
identical(pkgs, c(Package = "BiocGenerics")) &&
identical(INSTALL_opts, "--build")
},
install(ask = FALSE, INSTALL_opts = "--build")
)
expect_true(object)
})
|
80fcf7051825536fd8a5c6a76fd5fee5db9a9d63
|
2ba6f0a982c3092e70de12fff4ccac047feecab0
|
/pkg/tests/CDS.test.R
|
82dd33b26cd4723ede884e2fd5bdd0b9e8c65eca
|
[] |
no_license
|
kanishkamalik/CDS2
|
7b77cce8a5ae9fa367ff6818311ea8f745affbab
|
18644a02c9e8031de42d548618717b09ed327053
|
refs/heads/master
| 2020-04-05T03:15:34.360446
| 2014-05-03T16:29:47
| 2014-05-03T16:29:47
| 19,469,784
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
r
|
CDS.test.R
|
## CDS.R test case
library(CDS)
## truth1 <- CDS(TDate = "2014-01-14",
## maturity = "5Y",
## parSpread = 32,
## couponRate = 100,
## recoveryRate = 0.4,
## isPriceClean = FALSE,
## notional = 1e7)
## save(truth1, file = "CDS.test.RData")
load("CDS.test.RData")
result1 <- CDS(TDate = "2014-01-14",
maturity = "5Y",
parSpread = 32,
couponRate = 100,
recoveryRate = 0.4,
isPriceClean = FALSE,
notional = 1e7)
stopifnot(all.equal(truth1, result1))
|
82d2d8121349aac590b073e196d3aea004853694
|
2a7655dc0c233967a41b99369eed3eb4a6be3371
|
/3-Get_Earth_Observations/Meteorological_variables/Process_NAM_data_step5_lower_RAM.R
|
b67842d0a72bb104d3a56876469e40c56262e14f
|
[
"MIT"
] |
permissive
|
earthlab/Western_states_daily_PM2.5
|
0977b40d883842d7114139ef041e13a63e1f9210
|
3f5121cee6659f5f5a5c14b0d3baec7bf454d4bb
|
refs/heads/master
| 2023-02-25T14:32:20.755570
| 2021-02-04T00:08:03
| 2021-02-04T00:08:03
| 117,896,754
| 2
| 1
| null | 2021-01-27T22:19:14
| 2018-01-17T21:48:29
|
R
|
UTF-8
|
R
| false
| false
| 13,889
|
r
|
Process_NAM_data_step5_lower_RAM.R
|
# Process_NAM_data_step5.R - take 24-hr summaries of NAM weather data
#### Clear variables and sinks; define working directory ####
rm(list = ls()) # clear all variables
options(warn = 2) # throw an error when there's a warning and stop the code from running further
if (max(dev.cur())>1) { # make sure it isn't outputting to any figure files
dev.off(which = dev.cur())
} # if (max(dev.cur())>1) {
while (sink.number()>0) { # close any sink files
sink()
} # while (sink.number()>0) {
working.directory <- "/home/rstudio" # define working directory
setwd(working.directory) # set working directory
#### Call Packages (Library) ####
library(parallel) # see http://gforge.se/2015/02/how-to-go-parallel-in-r-basics-tips/
library(lubridate) # https://cran.r-project.org/web/packages/lubridate/lubridate.pdf
#### Source functions I've written ####
source(file.path("estimate-pm25","General_Project_Functions","general_project_functions.R"))
functions_list <-c("replace_character_in_string.fn","define_file_paths.fn") # put functions in a vector to be exported to cluster
#### Define Constants ####
NAM_folder <- "NAM_data" # define folder for NAM data
input_sub_folder <- "NAM_Step4" # define location of input files
input_sub_sub_folder <- "NAM_Step4_Intermediary_Files" # define subfolder location
output_sub_folder <- "NAM_Step5" # define location for output files
output_file_name <- paste("NAM_Step5_processed_",Sys.Date(),sep = "") # define name of output file
this_batch_date <- define_study_constants.fn("NAM_batch_date") # get batch date
output_sub_sub_folder <- paste("NAM_Step5_batch",this_batch_date,sep = "") # define output sub-sub-folder
# create NAM_Step5 folder if it doesn't already exist
if(dir.exists(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder)) == FALSE) { # create directory if it doesn't already exist
dir.create(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder))
} # if(exists(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder)) == FALSE) { # create directory if it doesn't already exist
# create NAM_Step5 sub-folder if it doesn't already exist
if(dir.exists(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder)) == FALSE) { # create directory if it doesn't already exist
dir.create(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder))
} # if(exists(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder)) == FALSE) { # create directory if it doesn't already exist
#### Load and Process Data ####
# Step 4 intermediary files
file_name_pattern <- "\\.csv$" # only looking for .csv files (don't want to pick up the sub-folder)
step4_file_list <- list.files(path = file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,input_sub_folder,input_sub_sub_folder,"."), pattern = file_name_pattern, all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE) # get list of all .csv file in this folder
print(paste("There are ",length(step4_file_list),"files for NAM Step 4 data (Intermediary files)")) # optional output statement
date_list <- unlist(lapply(step4_file_list, function(x){ # start lapply and start defining function used in lapply
data_date <- substr(x,nchar(x)-35,nchar(x)-39) # identify the time stamp for the file in this iteration
return(data_date) # return the new file name so a new list of files can be created
}))
print(paste("there are",length(step4_file_list),"NAM Step4 files to be processed"))
# load information about meteo variables
this_source_file <- paste("MeteoVariablesNAM.csv")
MeteoVarsMultiType <- read.csv(file.path(define_file_paths.fn("NAM_Code.directory"),this_source_file))
# grab the list of relevant meteo variables for this file type from MeteoVars
which_meteo <- which(MeteoVarsMultiType$file_type == "grib2") # get grib2 files because grib1 files will be converted to grib2
MeteoVars <- MeteoVarsMultiType[which_meteo,] # matrix with just the relevant rows
all_dates <- seq(as.Date(define_study_constants.fn("start_date")), as.Date(define_study_constants.fn("end_date")), by="days")#unique(Step4_NAM_data$Local.Date)
#### Set up for parallel processing ####
n_cores <- detectCores() - 1 # Calculate the number of cores
print(paste(n_cores,"cores available for parallel processing",sep = " "))
this_cluster <- makeCluster(n_cores) # # Initiate cluster
clusterExport(cl = this_cluster, varlist = c("this_batch_date","step4_file_list","all_dates","NAM_folder","input_sub_folder","input_sub_sub_folder","output_sub_folder","output_sub_sub_folder","step4_file_list","MeteoVars",functions_list), envir = .GlobalEnv) # export functions and variables to parallel clusters (libaries handled with clusterEvalQ)
#### call parallel function ####
print("start parLapply function")
# X = 1:length(all_dates)
par_output <- parLapply(this_cluster,X = 1:length(all_dates), fun = function(x){ # call parallel function
this_date <- all_dates[x] # get the date to be processed in this iteration
this_next_day <- this_date+1 # get the date after the date to be processed
print(paste("Processing NAM data for",this_date))
new_file_name <- paste("NAM_Step5_",this_date,"_batch",this_batch_date,".csv",sep = "") # name of file to be output
if (file.exists(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder,new_file_name))) { # does this file already exist?
print(paste(new_file_name,"already exists and will not be processed again"))
} else { # file does not exist # if (file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder,new_file_name)) { # does this file already exist?
print(paste(new_file_name,"does not yet exist and needs to be processed"))
# list all files that could have data for this date (depends on daylight savings and time zone)
files_to_check <- c(paste("Step4_NAM_Step2_",this_date,"_00UTC_batch",this_batch_date,"_time.csv",sep = ""),
paste("Step4_NAM_Step2_",this_date,"_06UTC_batch",this_batch_date,"_time.csv",sep = ""),
paste("Step4_NAM_Step2_",this_date,"_12UTC_batch",this_batch_date,"_time.csv",sep = ""),
paste("Step4_NAM_Step2_",this_date,"_18UTC_batch",this_batch_date,"_time.csv",sep = ""),
paste("Step4_NAM_Step2_",this_next_day,"_00UTC_batch",this_batch_date,"_time.csv",sep = ""),
paste("Step4_NAM_Step2_",this_next_day,"_06UTC_batch",this_batch_date,"_time.csv",sep = ""))
which_files_present <- which(files_to_check %in% step4_file_list) # which of the files listed exist?
if (length(which_files_present) > 0) { # only try to process data if there is data to process
files_to_process <- files_to_check[which_files_present] # list of the files that exist that could have data for this local date
# Merge all of the files that could have data for this date into one data frame
NAM_data_date_step <- lapply(1:length(files_to_process), function(z){ # start of lapply to open each file
this_file_data <- read.csv(file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,input_sub_folder,input_sub_sub_folder,files_to_process[z])) # open file
}) # end of lapply - NAM_data_date_step <- lapply(1:length(files_to_process), function(z){
NAM_data_date_step <- do.call("rbind",NAM_data_date_step) # merge files into one data frame
NAM_data_date_step$Latitude <- round(NAM_data_date_step$Latitude,5) # round latitude to 5 digits
NAM_data_date_step$Longitude <- round(NAM_data_date_step$Longitude,5) # round longitude to 5 digits
NAM_data_date_step$Local.Date <- as.Date(NAM_data_date_step$Local.Date) # recognize dates as dates
NAM_data_date_step$Local.Date.Time <- as.Date(NAM_data_date_step$Local.Date.Time) # recognize datetime as such
NAM_data_date_step$TimeZone <- as.character(NAM_data_date_step$TimeZone) # recognize times zones as characters
print(paste("x = ",x,"date = ",this_date))
# isolate all data for this date
which_this_date <- which(NAM_data_date_step$Local.Date == this_date) # which rows have data for this local date?
NAM_data_date <- NAM_data_date_step[which_this_date, ] # data frame with data for just this local date
rm(NAM_data_date_step) # clear variable
All_date_loc <- unique(NAM_data_date[ ,c("Latitude","Longitude")]) # get a list of dates/locations
# cycle through all locations on this date
Step5_NAM_date_list <- lapply(X = 1:dim(All_date_loc)[1], FUN = function(y){ # start lapply and start defining function used in lapply
#print(paste("location y_i =",y))
# find all data points with this date/loc
which_this_date_loc <- which(NAM_data_date$Latitude == All_date_loc[y, c("Latitude")] & NAM_data_date$Longitude == All_date_loc[y, c("Longitude")])
this_date_loc_step <- NAM_data_date[which_this_date_loc, ] # data frame with data for this location on this date (of this iteration)
rm(which_this_date_loc)
drop_cols <- c("State_FIPS", "County_FIPS","Tract_code","ZCTA5_code")
this_date_loc_step2 <- this_date_loc_step[ , !(names(this_date_loc_step) %in% drop_cols)] # drop columns from data frame
rm(this_date_loc_step) # clear variable
this_date_loc <- this_date_loc_step2[!duplicated(this_date_loc_step2), ]
rm(this_date_loc_step2)
# can have 5 on the daylight savings switchover, but there should never be more than 5 rows
if (dim(this_date_loc)[1]>5) {stop(paste("Check code and data - should not have more than 5 NAM data points for given day/location. date = ",all_dates[x]," x=",x," y=",y))}
Step5_NAM_row <- data.frame(matrix(NA,nrow=1,ncol=length(colnames(NAM_data_date)))) # create data frame for input_mat1
names(Step5_NAM_row) <- colnames(NAM_data_date) # assign the header to input_mat1
# drop extraneous columns that don't apply to 24-hr data
drop_cols <- c("Time.UTC","Date","Local.Date.Time","UTC.Date.Time") # define unnecessary columns
Step5_NAM_row <- Step5_NAM_row[ , !(names(Step5_NAM_row) %in% drop_cols)] # drop unnecessary columns
Step5_NAM_row[1, c("Latitude","Longitude", "TimeZone")] <- unique(this_date_loc[ , c("Latitude","Longitude", "TimeZone")]) # input meta data into step 5
Step5_NAM_row$Local.Date <- unique(this_date_loc$Local.Date) # input dates
for (meteo_var_counter in 1:dim(MeteoVars)[1]) { # cycle through variables(levels) of interest
#print(meteo_var_counter)
thisMeteo_var_Name <- MeteoVars[meteo_var_counter,c("VariableName")] # get variable full name
thisMeteo_variable <- MeteoVars[meteo_var_counter,c("VariableCode")] # get variable coded name
thisMeteo_level <- MeteoVars[meteo_var_counter,c("AtmosLevelCode")] # get variable level name
thisMeteo_units <- MeteoVars[meteo_var_counter,c("Units")] # get variable units
thisMeteo_24_summary <- MeteoVars[meteo_var_counter,c("X24.hr.summary")]
this_col_name_step <- as.character(paste(thisMeteo_variable,".",thisMeteo_level,sep = ""))
this_col_name <- replace_character_in_string.fn(input_char = this_col_name_step,char2replace = " ",replacement_char = ".")
#print(this_col_name)
if (thisMeteo_24_summary == "max") {
this_meteo_value <- max(this_date_loc[ , this_col_name]) # what is the value for this variable at this level?
} else if (thisMeteo_24_summary == "mean") {
this_meteo_value <- mean(this_date_loc[ , this_col_name]) # what is the value for this variable at this level?
} else if (thisMeteo_24_summary == "sum") {
this_meteo_value <- sum(this_date_loc[ , this_col_name]) # what is the value for this variable at this level?
}
Step5_NAM_row[1, this_col_name] <- this_meteo_value
} # for (meteo_var_counter in 1:dim(MeteoVars)[1]) { # cycle through variables(levels) of interest
return(Step5_NAM_row)
}) # end of lapply function # Step5_NAM_date_list <- lapply(X = 1:dim(All_date_loc)[1], FUN = function(y){ # start lapply and start defining function used in lapply
Step5_NAM_date <- do.call("rbind", Step5_NAM_date_list) # re-combine data for all locations for this date
#new_file_name <- paste("NAM_Step5_",this_date,"_batch",this_batch_date,".csv",sep = "")
write.csv(Step5_NAM_date,file = file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder,new_file_name),row.names = FALSE) # write data for this date to file
} # if (length(which_files_present) > 0) { # only try to process data if there is data to process
} # if (file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,output_sub_sub_folder,new_file_name)) { # does this file already exist?
return(1) # output from function #(Step5_NAM_date)
} ) # call parallel function
# #### Combine output from parLapply/lapply ####
#print("combine output from parLapply")
# #NAM_data <- do.call("rbind", par_output) #concatinate the output from each iteration
# # write step 5 data to csv file
# print("Write Step 5 data to file")
# write.csv(NAM_data,file = file.path(define_file_paths.fn("ProcessedData.directory"),NAM_folder,output_sub_folder,paste(output_file_name,".csv",sep = "")),row.names = FALSE) # write data to file
#### End use of parallel computing #####
stopCluster(this_cluster) # stop the cluster
rm(this_cluster,par_output)
# clear variables
rm(NAM_folder,input_sub_folder,output_sub_folder,output_file_name,working.directory) # NAM_data,
rm(MeteoVars,MeteoVarsMultiType)#,Step4_NAM_data)
print(paste("Process_NAM_data_step5.R completed at",Sys.time(),sep = " ")) # print time of completion to sink file
|
bf1625810880a0f186dca700508a56fce82d522f
|
7365ceab9a0ecb9ff25a00ac3375ec472b57a989
|
/Singularity_analysis_KED.R
|
f045a7356758381588af5f24f620f9a54c47efb3
|
[] |
no_license
|
fcecinati/R
|
ffb471b8709b7c86be7cd98779575e7bb7030985
|
e96466d434b970e446bd81488eb52777bd449f95
|
refs/heads/master
| 2021-01-20T17:19:26.543460
| 2016-07-20T13:57:40
| 2016-07-20T13:57:40
| 63,785,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,144
|
r
|
Singularity_analysis_KED.R
|
########################################################################################################################
########## This scripts performs KED merging with singularity analysis ########
########################################################################################################################
rm(list = ls()) # clean memory
gc()
######################################## Input ###################################################
# Rain gauge file
RGfile <- "/home/fc14509/UK/P3/Dataset/EA_RG/EA"
RGdates <- "/home/fc14509/UK/P3/Dataset/EA_RG/dates"
RGcoord <- "/home/fc14509/UK/P3/Dataset/EA_RG/coord"
# Radar folder
Radfolder <- "/home/fc14509/UK/P3/Dataset/Radar/"
Radcoord <- "/home/fc14509/UK/P3/Dataset/Radar/alex_catchment_xy.txt"
# Projection parameters in proj4 format
myproj <- "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +datum=OSGB36 +units=m +no_defs"
# Transformed variable folder
Transformed <- "/home/fc14509/UK/P3/Transformed/Singularity Analysis/"
# Result folder
Results <- "/home/fc14509/UK/P3/Results/Singularity Analysis/"
###################################### load libraries ####################################
library(sp)
library(maptools)
library(gstat)
library(spacetime)
library(raster)
library(minpack.lm)
library(lubridate)
library(plyr)
library(gdata)
library(ggplot2)
library(plotKML)
library(RSAGA)
library(abind)
source("Negentropy.R")
###########################################################################################
# load rain gauge data, coord, and dates
load(RGfile)
RG <- EA
RG_points <- dim(RG)[2]
RG_steps <- dim(RG)[1]
RG[RG<0] <- 0
RG_ns <- RG
load(RGdates)
RGdates <- dates
load(RGcoord)
RGcoord <- coord
toremove <- which(RGcoord[,1]<320000 | RGcoord[,1]>520000 | RGcoord[,2]<350000 | RGcoord[,2]>550000)
RG <- RG[,-toremove]
RGcoord <- RGcoord[-toremove,]
rm(EA,dates,coord)
# loop on the monthly radar files
for(i in 1:12){
#load the radar data
radfile <- paste0(Radfolder, "radar2009", sprintf("%02d",i), ".txt")
rad <- read.table(radfile)
raddates <- rad[,1:6]
rad <- rad[,7:dim(rad)[2]]
radcoord <- read.table(Radcoord)
rad_steps <- dim(rad)[1]
rad_ns <- rad
ked_ns <- rad
#loop on the available time steps
for (j in 1:rad_steps) {
# Prepare the spatial data
R <- data.frame(t(rad[j,]), radcoord)
names(R) <- c("rain","x","y")
dat <- raddates[j,]
RG_position <- which(RGdates[,1]==as.numeric(dat[1]) & RGdates[,2]==as.numeric(dat[2]) & RGdates[,3]==as.numeric(dat[3]) & RGdates[,4]==as.numeric(dat[4]))
G <- data.frame(t(RG[RG_position,]),RGcoord)
names(G) <- c("rain","x","y")
G <- G[complete.cases(G),]
coordinates(G) <- ~x+y
G@proj4string@projargs <- myproj
coordinates(R) <- ~x+y
bb <- R@bbox
bb[,2] <- bb[,2]+1000
R <- vect2rast(R, cell.size=1000, bbox=bb)
R@proj4string@projargs <- myproj
#loop on each pixel
X <- seq(from=R@bbox[1,1]+500, to=R@bbox[1,2]-500, by=1000)
Y <- seq(from=R@bbox[2,1]+500, to=R@bbox[2,2]-500, by=1000)
res <- c(1,3,5,7,9)
eps <- res/max(res)
rho <- matrix(0,1,length(res))
R_ns <- R
cr <- 0
xside <- R_ns@grid@cells.dim[1]
yside <- R_ns@grid@cells.dim[2]
l <- matrix(0,xside, yside)
for (r in res){
cr <- cr+1
# to make the areal average more efficient, we define a larger matrix with more cells on the external frame
# and we move the original matrix along x and y summing the cells at each step
# then we divide by the total mumber of steps taken
bigm <- matrix(0, xside+2*(r-1),yside+2*(r-1))
movex <- xside+2*(r-1) - xside +1
movey <- yside+2*(r-1) - yside +1
for (x in 1:movex){
for (y in 1:movey){
bigm[x:(x-1+xside),y:(y-1+yside)] <- bigm[x:(x-1+xside),y:(y-1+yside)] + as.matrix(R_ns)
}
}
bigm <- bigm/(movex*movey)
l<- abind(l,bigm[r:(xside+(r-1)),r:(yside+(r-1))], along=3)
}
l <- l[,,2:dim(l)[3]] # just to exclude the first layer, defined as zeros only to create the stack
cp <- 0
#loop on each pixel
for (x in 1:xside){
for (y in 1:yside){
cp <- cp+1
rho <- log(as.numeric(l[x,y,]))
df <- data.frame(log(eps),as.numeric(rho))
names(df) <- c("eps", "rho")
regression <- lm(rho~eps, data=df)
R_ns$rain[cp] <- coefficients(regression)[1]
}
}
# Save teh results
rad_ns[j,] <- R_ns$rain
# Apply the correction to Rain Gauges as well
ratio <- (over(G,R_ns)+0.0001)/(over(G,R)+0.0001) #simple trick to avoid the division by 0 and excessive ratio values, it shouldn't really affect the other results
G$rain <- G$rain*ratio
G$radar <- over(G,R_ns)$rain
RG_ns[RG_position,is.na(RG_ns[RG_position,])==F] <- RG_ns[RG_position,is.na(RG_ns[RG_position,])==F]*t(ratio)
# Check for zeros in the radar at RG locations (indeterminate system)
checkzeros <- sum(G$rain)+sum(G$radar, na.rm=T)
rgcheckzeros <- sum(G$rain)
radcheckzeros <- sum(G$radar, na.rm=T)
if (checkzeros==0) { #Case in which both the radar and the gauges say it didn't rain: pred = 0, var = NA
ked <- R_ns
names(ked) <- c("pred")
ked$pred <- ked$pred*0
} else if (!(rgcheckzeros==0) & radcheckzeros==0) { # case in which it is impossible to do UK and we do OK
# variogram calculation
v <- variogram(rain~1, G)
v <- fit.variogram(v, vgm(nugget=0, psill=mean(v$gamma), range=30000, model="Exp"))
if(v[2,2]==0){ # If the fitted sill is zero, we change it to the mean of the observed variogram
v <- variogram(rain~1, G)
v=vgm(nugget=0, psill=mean(v$gamma), range=30000, model="Exp")
}
if (v[1,2]>v[2,2]){ # if the nugget is larger than the sill, we set them equal
v[1,2] <- v[2,2]
}
# Ordinary kriging
pred_grid <- R_ns
names(pred_grid) <- "radar"
ked <- krige(rain~1, G, newdata=pred_grid, v, na.action = na.pass)
names(ked) <- c("pred", "var")
} else {
# residual scaling for the KED variogram
if (sum((G$radar - mean(G$radar))^2)==0){
r2 <- 1
} else {
r2 <- 1-sum((G$radar-G$rain)^2)/sum((G$radar - mean(G$radar))^2)
}
# variogram calculation
v <- variogram(rain~1, R_ns)
v <- fit.variogram(v, vgm(nugget=0, psill=mean(v$gamma), range=30000, model="Exp"))
if(v[2,2]==0){
v <- variogram(rain~1, R_ns)
v=vgm(nugget=0, psill=mean(v$gamma), range=30000, model="Exp")
}
if (v[1,2]>v[2,2]){
v[1,2] <- v[2,2]
}
v[2,2] <- v[2,2]*(1-r2)
# Universal kriging
pred_grid <- R_ns
names(pred_grid) <- "radar"
ked <- krige(rain~radar, G, newdata=pred_grid, v, na.action = na.pass)
names(ked) <- c("pred", "var")
}
ked_ns[j,] <- ked$pred
}
save(rad_ns, file=paste0(Transformed, "radar2009", sprintf("%02d",i), "_ns"))
save(ked_ns, file=paste0(Results, "ked2009", sprintf("%02d",i), "_ns"))
}
save(RG_ns, file=paste0(Transformed, "EA_ns"))
|
9ccd61832fa7f13bcb92e9505bbf1b30b190b734
|
e37580222b47cd2811fd90d876dbe54e295039b7
|
/man/CurrencyPair.Rd
|
674ddfa701db93d7df2fc7cb7afe6430c44ff06d
|
[] |
no_license
|
farzadwp/fmbasics
|
0c9183244c25be3f4e77474405ad7dae15a27419
|
6fee375ceec0ac5ff199e9b3dfd146c65078a476
|
refs/heads/master
| 2020-06-12T18:08:01.217445
| 2019-11-28T02:31:47
| 2019-11-28T02:31:47
| 181,423,968
| 0
| 0
| null | 2019-04-15T06:17:02
| 2019-04-15T06:17:02
| null |
UTF-8
|
R
| false
| true
| 748
|
rd
|
CurrencyPair.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/currency-pair-class.R
\name{CurrencyPair}
\alias{CurrencyPair}
\title{CurrencyPair class}
\usage{
CurrencyPair(base_ccy, quote_ccy, calendar = NULL)
}
\arguments{
\item{base_ccy}{a \link[=Currency]{Currency} object}
\item{quote_ccy}{a \link[=Currency]{Currency} object}
\item{calendar}{a \link[=JointCalendar]{JointCalendar} object. Defaults to
\code{NULL} which sets this to the joint calendar of the two currencies and
removes any \link[=USNYCalendar]{USNYCalendar} object to allow currency pair
methods to work correctly}
}
\value{
a \code{CurrencyPair} object
}
\description{
Create an object of class \code{CurrencyPair}
}
\examples{
CurrencyPair(AUD(), USD())
}
|
f1a031a761a44259ccb0ce4922be37c7f451358a
|
961727a721c8e1a02b52c69bfb9d274d6dca2b3c
|
/server.R
|
5ad810fed7cdd5ab01561be58315cbdc19be0fc9
|
[] |
no_license
|
olivierchabot17/coin_flips
|
a62524704d24b306a8f40f6ce6d1c1d7f0c40ee1
|
17947f74ccd2a36b6af902723b21d1a48dace1b0
|
refs/heads/main
| 2023-07-02T20:57:51.282818
| 2021-07-29T12:41:42
| 2021-07-29T12:41:42
| 390,719,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,989
|
r
|
server.R
|
# Load Libraries
library(shiny)
library(tidyverse) #dplyr and ggplot would suffice
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Only flip the coins when the "Let if flip!" button is pressed
big_sample <- eventReactive(input$run, {
# Create an empty dataframe to be filled by the for loop
df <- data.frame(
matrix(
data = NA, nrow = input$n_players, ncol = input$n_flips_big,
dimnames = list(
paste("player_", 1:input$n_players, sep = ""),
paste("flip_", 1:input$n_flips_big, sep = "")
)
))
# Flip coins for the big team
for(i in 1:input$n_players){
df[i, ] <- sample(
x = c(1, 0), # Head == 1, Tails == 0
size = input$n_flips_big,
replace = TRUE,
prob = c(input$prob_heads, 1 - input$prob_heads)
)
}
df
})
# Only flip the coins when the "Let if flip!" button is pressed
small_sample <- eventReactive(input$run, {
# Create an empty dataframe to be filled by the for loop
df <- data.frame(
matrix(
data = NA, nrow = input$n_players, ncol = input$n_flips_small,
dimnames = list(
paste("player_", 1:input$n_players, sep = ""),
paste("flip_", 1:input$n_flips_small, sep = "")
)
))
# Flip coins for the small team
for(i in 1:input$n_players){
df[i, ] <- sample(
x = c(1, 0), # Head == 1, Tails == 0
size = input$n_flips_small,
replace = TRUE,
prob = c(input$prob_heads, 1 - input$prob_heads)
)
}
df
})
# Create output table objects that will be displayed in the "Raw Flips" Tab
output$big_raw <- renderTable(
big_sample()
)
output$small_raw <- renderTable(
small_sample()
)
# Summary Tables
big_summary <- reactive({
big_sample() %>%
rowwise() %>% # Get stats for each player
summarise(
n_heads = sum(c_across()),
prop_heads = n_heads/input$n_flips_big)
})
small_summary <- reactive({
small_sample() %>%
rowwise() %>%
summarise(
n_heads = sum(c_across()),
prop_heads = n_heads/input$n_flips_small)
})
game_summary <- reactive({
rbind(
big_summary() %>% summarise(
team = "Big",
flips_per_player = input$n_flips_big,
max_n_heads = max(n_heads),
max_prop_heads = max(prop_heads),
total_prop = sum(n_heads) / (input$n_players * input$n_flips_big)
),
small_summary() %>% summarise(
team = "Small",
flips_per_player = input$n_flips_small,
max_n_heads = max(n_heads),
max_prop_heads = max(prop_heads),
total_prop = sum(n_heads) / (input$n_players * input$n_flips_small)
)
)
})
# Create output table objects that will be displayed in the "Results" Tab
output$big_row_sum <- renderTable(
big_summary()
)
output$small_row_sum <- renderTable(
small_summary()
)
# Create a summary table that will be displayed in the "Results" Tab
output$summary_table <- renderTable(
game_summary()
)
# Create a reactive text object to display who wins.
winner <- eventReactive(input$run, {
if(game_summary()$max_prop_heads[1] >= game_summary()$max_prop_heads[2]){
index <- 1
} else{
index <- 2
}
paste(
"Team",
game_summary()$team[index],
"won! Their best flipper flipped a head",
(game_summary()$max_prop_heads[index])*100,
"% of the time."
)
})
# Output simulation result sentence.
output$game_results <- renderText(
winner()
)
# Reactive dataframes with theoretical probabilities for each proportion for both teams
prob_dist <- reactive({
rbind(
# Team Big
data.frame(matrix(data = NA, nrow = input$n_flips_big + 1, ncol = 1)) %>%
transmute(
team = "Big",
n_heads = 0:input$n_flips_big,
proportion = n_heads / input$n_flips_big,
probability = dbinom(
x = 0:input$n_flips_big,
size = input$n_flips_big,
prob = input$prob_heads
)
),
# Team Small
data.frame(matrix(data = NA, nrow = input$n_flips_small + 1, ncol = 1)) %>%
transmute(
team = "Small",
n_heads = 0:input$n_flips_small,
proportion = n_heads / input$n_flips_small,
probability = dbinom(
x = 0:input$n_flips_small ,
size = input$n_flips_small,
prob = input$prob_heads
)
)
)
})
# Plot theoretical probabilities for both teams using ggplot
output$distributions <- renderPlot({
prob_dist() %>%
ggplot(aes(x = proportion, y = probability, group = team, colour = team)) +
geom_point(alpha = 0.8) +
geom_vline(xintercept = input$prob_heads, linetype = "dashed", alpha = 0.2) +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
theme_classic() +
labs(
title = "Theoretical Probability of Observing a Proportion of Heads",
x = "Proportion of Heads",
y = " Probability"
)
})
# Max Number of heads simulation
# "Head-to-Head" tab
simulation <- eventReactive(input$sim, { # Only run when the button is pressed
# Empty dataframe to store results of simulation
df <- data.frame(matrix(data = NA, nrow = input$replications, ncol = 2))
for(i in 1:input$replications){
# Empty dataframe for flips of Team Big
df_big <- data.frame(
matrix(
data = NA,
nrow = input$n_players,
ncol = input$n_flips_big
)
)
# Flip the coins for Team Big
for(j in 1:input$n_players){
df_big[j, ] <- sample(
x = c(1, 0),
size = input$n_flips_big,
replace = TRUE,
prob = c(input$prob_heads, 1 - input$prob_heads)
)
}
# Count proportion of heads for each player
big_row_sum <- df_big %>%
rowwise() %>%
summarise(
n_heads = sum(c_across()),
prop_heads = n_heads/input$n_flips_big)
# Empty dataframe for flips of Team Small
df_small <- data.frame(
matrix(
data = NA,
nrow = input$n_players,
ncol = input$n_flips_small
)
)
# Flip the coins for Team Small
for(k in 1:input$n_players){
df_small[k, ] <- sample(
x = c(1, 0),
size = input$n_flips_small,
replace = TRUE,
prob = c(input$prob_heads, 1 - input$prob_heads)
)
}
# Count proportion of heads for each player
small_row_sum <- df_small %>%
rowwise() %>%
summarise(
n_heads = sum(c_across()),
prop_heads = n_heads/input$n_flips_small)
# Create new columns in results df with the highest proportions
df$max_big_prop[i] <- max(big_row_sum$prop_heads)
df$max_small_prop[i] <- max(small_row_sum$prop_heads)
}
# Add a third column with a logical that test whether Team Small won
df <- df %>%
select(-1, -2) %>%
mutate(
small_win = case_when(
max_big_prop >= max_small_prop ~ FALSE,
max_big_prop < max_small_prop ~ TRUE
)
)
df
})
# Output simulation table
output$sim_table <- renderTable(
simulation()
)
# Create a reactive text object
text_results <- eventReactive(input$sim, {
paste(
"The small team had a higher proportion of heads",
sum(simulation()$small_win),
"of the", input$replications, "replications (",
(sum(simulation()$small_win)/input$replications)*100, "%)."
)
})
# Output simulation result sentence.
output$sim_results <- renderText(
text_results()
)
}) # Close Server
|
9d6f5e97eb6808f8245ba62d5aa4b2ca428e169c
|
c25ca7b930919db4299d8ee392daa3ed5c651180
|
/man/prPrepareCss.Rd
|
abb52c3859030955dfacbe000dc615d61319cfe3
|
[] |
no_license
|
gforge/htmlTable
|
ecd0e56b54da74a085c5fc545c78181eb254fcb1
|
82ffe152c9b59559686a8a7bb74c9121d9539cf3
|
refs/heads/master
| 2022-07-21T22:01:41.100252
| 2022-07-07T18:14:47
| 2022-07-07T18:14:47
| 28,265,082
| 74
| 32
| null | 2022-07-06T14:53:56
| 2014-12-20T11:17:53
|
R
|
UTF-8
|
R
| false
| true
| 1,188
|
rd
|
prPrepareCss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/htmlTable_helpers_prepareCss.R
\name{prPrepareCss}
\alias{prPrepareCss}
\title{Prepares the cell style}
\usage{
prPrepareCss(
x,
css,
rnames,
header = NULL,
name = deparse(substitute(css)),
style_list = NULL
)
}
\arguments{
\item{x}{The matrix/data.frame with the data. For the \code{print} and \code{knit_print}
it takes a string of the class \code{htmlTable} as \code{x} argument.}
\item{css}{The CSS styles that are to be converted into
a matrix.}
\item{rnames}{Default row names are generated from \code{\link[base:colnames]{rownames(x)}}. If you
provide \code{FALSE} then it will skip the row names. \emph{Note:} For \code{data.frames}
if you do \code{\link[base:colnames]{rownames(my_dataframe) <- NULL}} it still has
row names. Thus you need to use \code{FALSE} if you want to
supress row names for \code{data.frames}.}
\item{header}{A vector of character strings specifying column
header, defaulting to \code{\link[base:colnames]{colnames(x)}}}
\item{name}{The name of the CSS style that is prepared}
}
\value{
\code{matrix}
}
\description{
Prepares the cell style
}
\keyword{internal}
|
4514a10fdd8cc24b2ec2cae182d44b4388aa176e
|
076155f2a494bccb5f68654ca84a396904b3cbe0
|
/ui.R
|
c2e1949bf33a0d129f35b512dbd672c7ed926308
|
[] |
no_license
|
bactkinson/Plume_Detection_with_DBSCAN
|
db504d86004cdfcb9a758744274db7056a8df160
|
83d19f8ad8402a3737208137f1d08f79db4a4dc6
|
refs/heads/main
| 2023-06-09T12:24:43.694309
| 2023-05-30T03:41:47
| 2023-05-30T03:41:47
| 483,829,076
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,349
|
r
|
ui.R
|
## UI for the DBSCAN Plume Shiny App
ui <- fluidPage(
titlePanel("DBSCAN Plume Detection Tool"),
## Upload file button
sidebarLayout(
sidebarPanel(
fileInput("upload", "Upload Data File"),
## Choose epsilon value
numericInput("f_val","f value for MinPts estimate", value = 0.01, step = 0.01),
## Select columns to analyze
checkboxGroupInput("analytes","Select the variables to be analyzed",
choices = c("NULL"),
select = NULL),
## Analyze action button
actionButton("analyze", "ANALYZE"),
## Select variable for x axis
selectInput("x_value", "Choose variable to be plotted on x-axis",
choices = NULL,
selected = NULL),
## Select variable for y axis
selectInput("y_value", "Choose variable to be plotted on y-axis",
choices = NULL,
selected = NULL),
## Save results button
downloadButton("results","Save output to .csv"),
## Save plot button
downloadButton("output_plot", "Save plot to .png")
),
mainPanel(
## Time series graphic
plotOutput("ts_plot"),
tableOutput("table_output")
)
)
)
|
6c33499ac98f5a122d971c4121bdb1ce6cebe4db
|
1678bf365571c0cacfb1aca1096ac3b613b71d81
|
/man/mlVAR0.Rd
|
7070908ffbaa121e7282626752a087b01863a25f
|
[] |
no_license
|
SachaEpskamp/mlVAR
|
88a3d8a4112e697ad2d057006c08e908ce03c784
|
99c9c1db70665a0b115227e6408db928e13bae96
|
refs/heads/master
| 2023-05-25T01:28:34.646653
| 2023-05-16T11:39:37
| 2023-05-16T11:39:37
| 25,199,915
| 3
| 6
| null | 2022-03-29T15:55:05
| 2014-10-14T09:43:01
|
R
|
UTF-8
|
R
| false
| false
| 6,751
|
rd
|
mlVAR0.Rd
|
\name{mlVAR0}
\alias{mlVAR0}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Multilevel VAR Estimation for Multiple Time Series
}
\description{
The function \code{mlVAR0} computes estimates of the multivariate vector autoregression model as introduced by Bringmann et al. (2013) which can be extended through treatment effects, covariates and pre- and post assessment effects.
FUNCTION IS DEPRECATED AND WILL BE REMOVED SOON.
}
\usage{
mlVAR0(data, vars, idvar, lags = 1, dayvar, beepvar,
periodvar, treatmentvar, covariates, timevar,
maxTimeDiff, control = list(optimizer = "bobyqa"),
verbose = TRUE, orthogonal, estimator = c("lmer",
"lmmlasso"), method = c("default", "stepwise",
"movingWindow"), laginteractions = c("none", "mains",
"interactions"), critFun = BIC, lambda = 0,
center = c("inSubject","general","none"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
Data frame
}
\item{vars}{
Vectors of variables to include in the analysis
}
\item{idvar}{
String indicating the subject ID
}
\item{lags}{
Vector indicating the lags to include
}
\item{dayvar}{
String indicating assessment day (if missing, every assessment is set to one day)
}
\item{beepvar}{
String indicating assessment beep per day (if missing, is added)
}
\item{periodvar}{
String indicating the period (baseline, treatment period, etc.) of assessment (if missing, every assessment is set to one period)
}
\item{treatmentvar}{
Character vector indicating treatment
}
\item{covariates}{
Character indicating covariates independent of assessment.
}
\item{timevar}{
Character indicating the time variable
}
\item{maxTimeDiff}{
Maximum time differece to include observation pairs
}
\item{control}{
A list of arguments sent to \code{\link[lme4]{lmerControl}}
}
\item{verbose}{
Logical to print progress to the console
}
\item{orthogonal}{
Logical to indicate if orthogonal estimation (no correlated random effects) should be used. Defaults to \code{FALSE} if the number of nodes is less than 6 and \code{TRUE} otherwise
}
\item{estimator}{
Estimator to use. Note: \code{lmmlasso} implementation is very experimental
}
\item{method}{
Method to use. Experimental
}
\item{laginteractions}{
Experimental, do not use.
}
\item{critFun}{
Experimental, do not use.
}
\item{lambda}{
lmmlasso lambda parameter
}
\item{center}{
Centering to be used. \code{"inSubject"} uses within-person centering, \code{"general"} uses grand-mean centering and \code{"none"} does not use centering. IMPORTANT NOTE: \code{"inSubject"} leads to coefficients to resemble within-person slopes, the other centering option leads to coefficients to be a blend of within and between person slopes.
}
}
\details{
mlVAR0 has been built to extract individual network dynamics by estimating a multilevel vector autoregression model that models the time dynamics of selected variables both within an individual and on group level. For example, in a lag-1-model each variable at time point t is regressed to a lagged version of itself at time point t-1 and all other variables at time point t-1. In psychological research, for example, this analysis can be used to relate the dynamics of symptoms on one day (as assessed by experience sampling methods) to the dynamics of these symptoms on the consecutive day. }
\value{
mlVAR0 returns a 'mlVAR0' object containing
\item{fixedEffects}{A matrix that contains all fixed effects coefficients with dependent variables as rows and the lagged independent variables as columns.}
\item{se.fixedEffects}{A matrix that contains all standard errors of the fixed effects.}
\item{randomEffects}{A list of matrices that contain the random effects coefficients.}
\item{randomEffectsVariance}{A matrix containing the estimated variances between the random-effects terms}
\item{pvals}{A matrix that contains p-values for all fixed effects.}
\item{pseudologlik}{The pseudo log-likelihood.}
\item{BIC}{Bayesian Information Criterion, i.e. the sum of all univariate models' BICs}
\item{input}{List containing the names of variables used in the analysis}
}
\references{
Bringmann, L. F., Vissers, N., Wichers, M., Geschwind, N., Kuppens, P., Peeters, F., ... & Tuerlinckx, F. (2013). A network approach to psychopathology: New insights into clinical longitudinal data. PloS one, 8(4), e60188.
}
\author{
Sacha Epskamp (mail@sachaepskamp.com), Marie K. Deserno (m.k.deserno@uva.nl) and Laura F. Bringmann (laura.bringmann@ppw.kuleuven.be)
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{fixedEffects}}, \code{\link{fixedEffects}}
}
\examples{
\dontrun{
### Small network ###
nVar <- 3
nPerson <- 25
nTime <- 25
# Simulate model and data:
Model <- mlVARsim0(nPerson,nVar,nTime,sparsity = 0.5)
# Run mlVAR0:
Res <- mlVAR0(Model)
# Compare true fixed model with significant edges of estimated fixed model:
layout(t(1:2))
plot(Model,"fixed", title = "True model",layout="circle", edge.labels = TRUE)
plot(Res,"fixed", title = "Estimated model", layout = "circle", onlySig = TRUE,
alpha = 0.05, edge.labels = TRUE)
# Compare true and estimated individual differences in parameters:
layout(t(1:2))
plot(Model,"fixed", title = "True model",layout="circle", edge.color = "blue",
edge.labels = TRUE)
plot(Res,"fixed", title = "Estimated model", layout = "circle", edge.color = "blue",
edge.labels = TRUE)
# Compare networks of subject 1:
layout(t(1:2))
plot(Model,"subject",subject = 1, title = "True model",layout="circle",
edge.labels = TRUE)
plot(Res,"subject",subject = 1,title = "Estimated model", layout = "circle",
edge.labels = TRUE)
### Large network ###
nVar <- 10
nPerson <- 50
nTime <- 50
# Simulate model and data:
Model <- mlVARsim0(nPerson,nVar,nTime, sparsity = 0.5)
# Run orthogonal mlVAR:
Res <- mlVAR0(Model, orthogonal = TRUE)
# Compare true fixed model with significant edges of estimated fixed model:
layout(t(1:2))
plot(Model,"fixed", title = "True model",layout="circle")
plot(Res,"fixed", title = "Estimated model", layout = "circle", onlySig = TRUE,
alpha = 0.05)
# Compare true and estimated individual differences in parameters:
layout(t(1:2))
plot(Model,"fixed", title = "True model",layout="circle", edge.color = "blue")
plot(Res,"fixed", title = "Estimated model", layout = "circle", edge.color = "blue")
# Compare networks of subject 1:
layout(t(1:2))
plot(Model,"subject",subject = 1, title = "True model",layout="circle")
plot(Res,"subject",subject = 1,title = "Estimated model", layout = "circle")
}
}
|
f91c91aea65a84682408c821900bbb20eba5ecee
|
8a2b7c55acffa68c7ce43e441b136a0ea6fa6971
|
/man/evolution_plot.Rd
|
a1d7551bc38b2393298c56881e182a35f45e4d43
|
[] |
no_license
|
DeveauP/QuantumClone
|
0e0dfa6fdf64135af73d2942d31ef2fe753da782
|
45b595bd7f5387cc7a094ba67bdd2957eedf90a1
|
refs/heads/master
| 2021-10-28T03:52:33.780314
| 2021-10-27T13:54:14
| 2021-10-27T13:54:14
| 38,614,225
| 10
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 502
|
rd
|
evolution_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{evolution_plot}
\alias{evolution_plot}
\title{Evolution plot}
\usage{
evolution_plot(QC_out, Sample_names = NULL)
}
\arguments{
\item{QC_out}{: Output from One_step_clustering}
\item{Sample_names}{: character vector of the names of each sample (in the same order as the data)}
}
\value{
ggplot object
}
\description{
Plots evolution in time of clones
}
\examples{
require(ggplot2)
evolution_plot(QC_output)
}
|
9957dfb2a1b631ef1da62312ed5f58f164ff8418
|
15f54cf88824f8ef2581f0a00f8d7fd208f7d0f4
|
/R/wiot2006_data.R
|
2fadce2a9847a6469698836c6340fc611a67f521
|
[] |
no_license
|
MatthewSmith430/GVTr
|
83ee4761156096ca65a4406bd34204a36233c640
|
c6cb9dd3f09a62c6df09238bdca96ad2785eabb7
|
refs/heads/master
| 2022-11-07T19:44:14.133448
| 2022-11-04T09:04:09
| 2022-11-04T09:04:09
| 115,260,573
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 151
|
r
|
wiot2006_data.R
|
#' @title WIOD 2006 Data
#'
#' @description WIOD 2006 dataset
#' @name wiot2006
#' @docType data
#' @usage wiot2006
#' @keywords datasets
NULL
|
611edeca4385d34730aa5e04b5412f317971f331
|
262aec5f3ed3b4fb55e832c02a91e90c12b617c3
|
/run_analysis.R
|
453ecbf90ee624f5eab47ba867f48b8b8ea8267a
|
[] |
no_license
|
r4sn4/Getting-and-Cleaning-Data-Course-Project
|
f574992dc31d37f1d3397c82501bc28038dea5c4
|
38e7f000496e2b98c4947f373237351a22bd0217
|
refs/heads/master
| 2021-01-25T05:22:47.708054
| 2015-01-26T07:27:40
| 2015-01-26T07:27:40
| 29,815,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,267
|
r
|
run_analysis.R
|
#load library dplyr
library(dplyr)
#create working directory
setwd("E:/rasna/D drive/Datascience/GettingAndCleaningData/WorkingDir")
#Section 1 - Merge the training and the testing data sets
# read rain file data and test file data into data frame
train.dataset <- read.table("./X_train.txt")
test.dataset <- read.table("./X_test.txt")
combined.traintest <- bind_rows(train.dataset , test.dataset)
train.subjectid <- as.vector(read.table("./subject_train.txt"))
test.subjectid <- as.vector(read.table("./subject_test.txt"))
subject.ids <- bind_rows(train.subjectid,test.subjectid)
train.activityid <- as.vector(read.table("./y_train.txt"))
test.activityid <- as.vector(read.table("y_test.txt"))
activity.ids <- bind_rows(train.activityid,test.activityid)
#Section 1 ends
features <- read.table("./features.txt")
features <- as.vector(features[, 2])
colnames(combined.traintest) <- features
#Section 2 - Extracts measurements on mean and standard deviation
mean.dataset <- combined.traintest[,grepl("mean",colnames(combined.traintest),ignore.case = TRUE)]
stddev.dataset <- combined.traintest[,grepl("std",colnames(combined.traintest),ignore.case = TRUE)]
mean.std.dataset <- bind_cols(mean.dataset,stddev.dataset)
# Add Activity Id and subject Id to final dataset
final.dataset <- bind_cols(subject.ids, activity.ids, mean.std.dataset)
colnames(final.dataset) <- c("subject.ids","activities",colnames(mean.std.dataset))
#Section 2 Ends
##Section 3 - descriptive activity names to name the activities in the data set
final.dataset$activities <- as.character(final.dataset$activities)
final.dataset$activities[final.dataset$activities==1] <- "WALKING"
final.dataset$activities[final.dataset$activities==2] <- "WALKING UPSTAIRS"
final.dataset$activities[final.dataset$activities==3] <- "WALKING DOWNSTAIRS"
final.dataset$activities[final.dataset$activities==4] <- "SITTING"
final.dataset$activities[final.dataset$activities==5] <- "STANDING"
final.dataset$activities[final.dataset$activities==6] <- "LAYING"
final.dataset$activities <- as.factor(final.dataset$activities)
#Section 3 ends
#Section 4 - Appropriately labels the data set with descriptive variable names
# Remove special characters such as " - , ( ) "from features
pattern <- "-|\\(|\\)|,"
colnames(final.dataset) <- sapply(colnames(final.dataset), function(X) gsub(pattern,"",X))
# Give descriptive name to columns
names(final.dataset) <- gsub("^t","time",colnames(final.dataset))
names(final.dataset) <- gsub("^f","frequency",colnames(final.dataset))
names(final.dataset) <- gsub("Acc","Accelerator",colnames(final.dataset))
names(final.dataset) <- gsub("Gyro","Gyroscope",colnames(final.dataset))
names(final.dataset) <- gsub("Mag","Magnitude",colnames(final.dataset))
#Section 4 ends
#Section 5 - tidy data set with the average of each variable for each activity and each subject
tidy.data <- aggregate(. ~ activities + subject.ids, final.dataset, mean)
#rearrange columns so that subject.ids is first column, acivities is 2nd, and rest of the columns comes after these two columns
tidy.data <- select(tidy.data,subject.ids,activities, timeBodyAcceleratormeanX : frequencyBodyBodyGyroscopeJerkMagnitudestd)
write.table(tidy.data, './tidyData.txt',row.names=FALSE,sep='\t')
#Section 5 ends
|
dd640526bddb22c075c6cf48d820c1a517ccdcaa
|
57c8ee0f413e86f8aad75fe9cd526386fdc743c9
|
/man/power5.Rd
|
7d19715e874249875f7f3843c22a51c78dc163e4
|
[] |
no_license
|
beanumber/colleges
|
b4844a67a9d2f66f514987266f2589179d0e5f34
|
e56a43b8b28515aade94b8f46425bf16bbb49f3e
|
refs/heads/master
| 2021-09-26T19:22:26.933143
| 2018-11-01T15:56:39
| 2018-11-01T15:56:39
| 111,578,793
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 345
|
rd
|
power5.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{power5}
\alias{power5}
\title{Subset of FBS in Power Five conferences}
\format{An object of class \code{data.frame} with 910 rows and 43 columns.}
\usage{
power5
}
\description{
Subset of FBS in Power Five conferences
}
\keyword{datasets}
|
d7712a4875f9121bcb723c7a0549fae71fcf3da6
|
ff3dbad87ea3b38d3191869793a0e1ec9ed60b8f
|
/man/buildAnnotationStore.Rd
|
16007fe96f8dcc23d5bf0799b119df3db054b2dc
|
[
"Artistic-2.0"
] |
permissive
|
pmoulos/recoup
|
edc2b872bfc7a0ab36dc26da0d352e06e6521b75
|
eb2f8497e913024ee5c902de9bb47300b1f6602b
|
refs/heads/master
| 2023-06-08T18:44:10.636803
| 2023-04-25T14:45:35
| 2023-04-25T14:45:35
| 49,005,951
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,837
|
rd
|
buildAnnotationStore.Rd
|
\name{buildAnnotationStore}
\alias{buildAnnotationStore}
\title{Build a local annotation database for recoup}
\usage{
buildAnnotationStore(organisms, sources,
home = file.path(path.expand("~"), ".recoup"),
forceDownload = TRUE, rc = NULL)
}
\arguments{
\item{organisms}{a character vector of organisms
for which to download and build annotations. Check
the main \code{\link{recoup}} help page for details
on supported organisms.}
\item{sources}{a character vector of public sources
from which to download and build annotations. Check
the main \code{\link{recoup}} help page for details
on supported annotation sources.}
\item{home}{a valid path (accessible at least by the
current user) where the annotation database will be
set up. It defaults to \code{".recoup"} inside the
current user's home directory.}
\item{forceDownload}{by default,
\code{buildAnnotationStore} will not download an
existing annotation again (\code{FALSE}). Set to
\code{TRUE} if you wish to update the annotation
database.}
\item{rc}{fraction (0-1) of cores to use in a multicore
system. It defaults to \code{NULL} (no parallelization).
It is used in the case of \code{type="exon"} to process
the return value of the query to the UCSC Genome
Browser database.}
}
\value{
The function does not return anything. Only the annotation
directory and contents are created.
}
\description{
*This function is defunct! Please use
\code{\link{buildAnnotationDatabase}}.*
This function creates a local annotation database to be
used with recoup so as to avoid long time on the fly
annotation downloads and formatting.
}
\examples{
\donttest{
buildAnnotationStore("mm10","ensembl")
}
}
\author{
Panagiotis Moulos
}
|
e062c294c63897e27c2c18fe5438c005f37da50d
|
8c2dae1f77505691c0c14015dff9b04cd9eae24c
|
/bin/RHive.r
|
ccca2a474ee0ed8eb685659ee56745ed6e26b642
|
[] |
no_license
|
wuhujun/Rhive
|
ca2e3fae2874d9c2b8c05bf3701f99cb2c040a66
|
5b77a9297bdaf2ea1ac7d6cd6f6c54570cf2257b
|
refs/heads/master
| 2021-01-22T08:29:50.637152
| 2013-06-20T08:15:59
| 2013-06-20T08:15:59
| 7,715,340
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 721
|
r
|
RHive.r
|
#!/usr/local/bin/Rscript
library(rJava)
library(Rserve)
library(RHive)
rhive.init();
rhive.connect();
d <- rhive.query('select * from ia')
#rhive.query('load data inpath \'input/data.txt\' overwrite into table gener ')
dataframe <-d;
tableinformation<-rhive.query('show tables ');
tableinformation
class(dataframe);
summary(dataframe);
gener<-rhive.query('select * from gener');
colnames(gener)
class(gener)
str(gener)
#summary(gener)
t1<-as.numeric(as.character(gener[,1]))
gener<-gener[,-1];
gener[,1]<-as.numeric(as.character(gener[,1]))
gener[,2]<-as.numeric(as.character(gener[,2]))
gener[,3]<-as.numeric(as.character(gener[,3]))
gener
model<-lm(col2~col3,data=gener);
summary(model);
model
#gener
rhive.close()
|
2e24c1c910835d139250a7ebfb05f04a45dfbbe3
|
ad3780d60c680b22fc1bf8ea5cfd380de31c1151
|
/R/sample_text.R
|
6fcac6cf8add432e8d16850245093726e6f9db2e
|
[
"MIT"
] |
permissive
|
nproellochs/textsampler
|
e7167f3631a9eec8b9d451112420aa70dd495cb4
|
462ded91d4704e5083f5cf8142a048da1bec3bf0
|
refs/heads/master
| 2020-07-23T13:06:57.146338
| 2019-09-10T17:02:17
| 2019-09-10T17:02:17
| 207,567,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,506
|
r
|
sample_text.R
|
#' Sample texts from a predefined text source
#'
#' Performs text sampling. Requires input data in the form of raw texts.
#'
#' @param n Number of texts to be sampled. \code{n} is an integer greater than 0. By default, \code{n} is set to 1.
#' @param source Text source. A vector of characters, a \code{data.frame}, or an object of type \code{\link[tm]{Corpus}}. Alternatively, one can
#' load a predefined dataset by specifiying a string. In the latter case, possible values are \code{imdb_sentences}, \code{amazon_sentences},
#' \code{yelp_sentences} and \code{english_words}.
#' @param type Type of texts to be sampled. Possible values are texts, paragraphs, sentences, words, and characters.
#' @param sub_token A string specifying the text unit for filtering texts by length via \code{min_length} and \code{max_length}.
#' Possible values are texts, paragraphs, sentences, words, and characters.
#' @param max_length Maximum length of the texts to be sampled. \code{max_length} is an integer greater than 0. By default, \code{max_length} is set to 1.
#' @param min_length Minimum length of the texts to be sampled. \code{min_length} is an integer greater than 0. By default, \code{min_length} is set to 1.
#' @param word_list A word list.
#' @param shuffle If \code{true}, the text samples are returned in random order. Default is \code{true}.
#' @param input A string defining the column name of the raw text data in \code{source}. The value is ignored if \code{source} is not of type \code{dataframe}.
#' @param tbl If \code{true}, the output is returned as a tibble. Default: \code{true}.
#' @param clean If \code{true}, the texts are cleaned before text sampling. Default is \code{true}.
#' @param ... Additional parameters passed to function for e.g. preprocessing.
#' @return An object of class \code{data.frame}.
#' @examples
#' # Sample three sentences from Yelp reviews.
#' sample_text(n = 3, source = "yelp_sentences", type = "sentences")
#' @importFrom magrittr %>%
#' @export
"sample_text" <- function(n = 1, source = "yelp_sentences",
type = "sentences", sub_token = "words", max_length = 50, min_length = 1,
word_list = NULL,
shuffle = T, input = NULL, tbl = T, clean = T, ...) {
UseMethod("sample_text", source)
}
#' @export
"sample_text.data.frame" <- function(n = 1, source = "yelp_sentences",
type = "sentences", sub_token = "words", max_length = 50, min_length = 1,
word_list = NULL,
shuffle = T, input = NULL, tbl = T, clean = T, ...) {
data_vec <- source[, c(input)] %>% unlist()
sample_text(n, source = data_vec, type, sub_token, max_length, min_length, word_list, shuffle, input, tbl, clean, ...)
}
#' @export
"sample_text.character" <- function(n = 1, source = "yelp_sentences",
type = "sentences", sub_token = "words", max_length = 50, min_length = 1,
word_list = NULL,
shuffle = T, input = NULL, tbl = T, clean = T, ...) {
if (!int_greater_zero(n)) {
stop("Argument 'n' should be an integer > 0.")
}
if (!(int_greater_zero(max_length) & int_greater_zero(min_length))) {
stop("Arguments 'min_length' and 'max_length' must be integers > 0.")
}
if (!(is.character(type) && is.character(sub_token))) {
stop("Arguments 'type' and 'sub_token' must be of type 'character'.")
}
if (!(sub_token %in% c("words", "sentences", "paragraphs", "lines", "characters"))) {
stop("Argument 'sub_token' is invalid.")
}
if (!(is.null(word_list) | is.character(word_list))) {
stop("Argument 'word_list' must be of type 'character'.")
}
if (!(is.null(input) | is.character(input))) {
stop("Argument 'input' must be of type 'character'.")
}
if (!(is.logical(shuffle) & is.logical(tbl) & is.logical(clean))) {
stop("Arguments 'shuffle', 'tbl', and 'clean' must be of type 'logical'.")
}
## Load corpus
if(length(source) == 1) {
corpus <- load_corpus(source, type = type, sub_token = sub_token)
} else {
corpus <- generate_corpus(text = source, type = type, sub_token = sub_token, clean = clean)
}
## Filter corpus
corpus_filtered <- subset_text(corpus, min_length = min_length, max_length = max_length, word_list = word_list)
if (nrow(corpus_filtered) < n) {
warning(paste0("The parameter 'n' exceeds the number of observations in the corpus. Generated ", nrow(corpus_filtered), " texts"))
}
## Shuffle
if (shuffle == TRUE) {
out <- corpus_filtered %>% dplyr::sample_n(min(nrow(corpus_filtered), n))
} else {
out <- corpus_filtered %>% dplyr::slice(1:min(nrow(corpus_filtered), n))
}
## Select output format
if(tbl == TRUE) {
out <- out %>% dplyr::as_tibble() %>% dplyr::select(Id, Text, Length = N)
} else {
out <- out$Text[1:min(nrow(corpus_filtered), n)]
}
return(out)
}
#' @export
"sample_text.Corpus" <- function(n = 1, source = "yelp_sentences",
type = "sentences", sub_token = "words", max_length = 50, min_length = 1,
word_list = NULL,
shuffle = T, input = NULL, tbl = T, clean = T, ...) {
data_vec <- get("content", tm_corpus)
sample_text(n, source = data_vec, type, sub_token, max_length, min_length, word_list, shuffle, input, tbl, clean, ...)
}
|
03c35d1146aed2bb0078a65ee63f16ec3ba77e1e
|
7afbb148ec11b3105aaead6bdd900f847e49eb18
|
/man/recipes-internal.Rd
|
df18ea00713c5d14be7c71d4ef5e50366969b60e
|
[
"MIT"
] |
permissive
|
tidymodels/recipes
|
88135cc131b4ff538a670d956cf6622fa8440639
|
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
|
refs/heads/main
| 2023-08-15T18:12:46.038289
| 2023-08-11T12:32:05
| 2023-08-11T12:32:05
| 76,614,863
| 383
| 123
|
NOASSERTION
| 2023-08-26T13:43:51
| 2016-12-16T02:40:24
|
R
|
UTF-8
|
R
| false
| true
| 2,057
|
rd
|
recipes-internal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/YeoJohnson.R, R/misc.R, R/printing.R
\name{yj_transform}
\alias{yj_transform}
\alias{estimate_yj}
\alias{ellipse_check}
\alias{printer}
\alias{prepare}
\alias{is_trained}
\alias{sel2char}
\alias{print_step}
\title{Internal Functions}
\usage{
yj_transform(x, lambda, ind_neg = NULL, eps = 0.001)
estimate_yj(
dat,
limits = c(-5, 5),
num_unique = 5,
na_rm = TRUE,
call = caller_env(2)
)
ellipse_check(...)
printer(
tr_obj = NULL,
untr_obj = NULL,
trained = FALSE,
width = max(20, options()$width - 30)
)
prepare(x, ...)
is_trained(x)
sel2char(x)
print_step(
tr_obj = NULL,
untr_obj = NULL,
trained = FALSE,
title = NULL,
width = max(20, options()$width - 30),
case_weights = NULL
)
}
\arguments{
\item{x}{A list of selectors}
\item{...}{Arguments pass in from a call to \code{step}}
\item{tr_obj}{A character vector of names that have been
resolved during preparing the recipe (e.g. the \code{columns} object
of \code{\link[=step_log]{step_log()}}).}
\item{untr_obj}{An object of selectors prior to prepping the
recipe (e.g. \code{terms} in most steps).}
\item{trained}{A logical for whether the step has been trained.}
\item{width}{An integer denoting where the output should be wrapped.}
\item{title}{A character, shortly describing the action the step takes.}
}
\value{
If not empty, a list of quosures. If empty, an error is thrown.
\code{NULL}, invisibly.
A logical
A character vector
\code{NULL}, invisibly.
}
\description{
These are not to be used directly by the users.
\code{ellipse_check()} is deprecated. Instead, empty selections should be
supported by all steps.
This internal function is used for printing steps.
This internal function takes a list of selectors (e.g. \code{terms}
in most steps) and returns a character vector version for
printing.
This internal function is used for printing steps.
}
\seealso{
\link{developer_functions}
\link{developer_functions}
\link{developer_functions}
}
\keyword{internal}
|
65c7fa95b5640b44ff593d5c927b3b12616fe757
|
94a6d258ea38c7a962c5eb87092aa6d492dafc13
|
/R/process_assignment_for_courseworks.R
|
5b456e363c344f466471a5f4d743ac2e38b6dbc9
|
[] |
no_license
|
P8105/p8105.helpers
|
bc66d685140c69cabe601b6eb3f2de375cdfba94
|
3c8b04d0c7d01a536934f9aac316c29d28a0c99a
|
refs/heads/master
| 2022-09-26T20:40:54.289225
| 2022-09-15T19:50:40
| 2022-09-15T19:50:40
| 106,289,967
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
process_assignment_for_courseworks.R
|
#' Process assignment for courseworks
#'
#' For a single assignment, process the google spreadsheet to submit assignment grades to courseworks
#'
#' @param path character; path to file containing all grades
#' @param assignment character; homework to process (e.g. "hw1")
#'
#' @import tidyverse
#' @importFrom janitor clean_names
#' @importFrom readxl read_excel
#' @export
#'
process_assignment_for_courseworks = function(path = "p8105_grades.xlsx", assignment = NULL) {
## should the grades be downloaded each time this is run?
grades = read_excel(path = path, sheet = assignment)
grades
}
|
6f534933161a37556b78a48c1c8b73ffb6b4e12d
|
49ff0bc7c07087584b907d08e68d398e7293d910
|
/mbg/mbg_core_code/mbg_central/LBDCore/man/qsub_sing_envs.Rd
|
864b5334206f15cf771b751ba2f5b0d22cc313bf
|
[] |
no_license
|
The-Oxford-GBD-group/typhi_paratyphi_modelling_code
|
db7963836c9ce9cec3ca8da3a4645c4203bf1352
|
4219ee6b1fb122c9706078e03dd1831f24bdaa04
|
refs/heads/master
| 2023-07-30T07:05:28.802523
| 2021-09-27T12:11:17
| 2021-09-27T12:11:17
| 297,317,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,560
|
rd
|
qsub_sing_envs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qsub_sing_envs.R
\name{qsub_sing_envs}
\alias{qsub_sing_envs}
\title{Adds environmental variables to a qsub string}
\usage{
qsub_sing_envs(qsub_stub, envs, image)
}
\arguments{
\item{qsub_stub}{A string containing the initial qsub string to which
environmental variables will be concatenated to in the form of
'-v ENV=VALUE'.}
\item{envs}{This should be a named list of environmental variables.
\code{qsub_sing_envs} will check that the names of the list members passed
in match the environmental variables that the shell_sing.sh script knows
about: 'SET_OMP_THREADS' and/or 'SET_MKL_THREADS'. Passing in other
environmental names in the list will result in an error. If this is left
as 'NULL' and a Singularity image is used, SET_OMP_THREADS and
SET_MKL_THREADS will remain unset and the shell_sing.sh script will use
the default setting of SET_OMP_THREADS=1 and SET_MKL_THREADS={max_threads}
(see shell_sing.sh comments). For example SET_OMP_THREADS=1 and
SET_MKL_THREADS=4 can be achieved by passing in
\code{envs = list(SET_OMP_THREADS=1, SET_MKL_THREADS=4)}}
\item{image}{The keyword (e.g. 'default') or path to the Singularity image.
This should have been defined by \code{get_singularity} so likely
\code{get_singularity} should have been run on the 'singularity' argument
(in \code{make_qsub_share} or \code{parallelize} for example) before this
function is run.}
}
\value{
Returns a string with at least '-v sing_image=image' and possibly
other environmental variables values if they were passed into the
'singularity_opts' argument of functions like \code{make_qsub_share}.
}
\description{
\code{qsub_sing_envs} assumes that a qsub string is being built to launch a
Singularity container. It always adds in the '-v sing_image=sing_image' as
expected by lbd_core/mbg_central/shell_sing.sh script that ultimately
launches the container. Optionally, users may want to pass the additional
environmental variables 'SET_OMP_THREADS' and/or 'SET_MKL_THREADS' to
shell_sing.sh. If one or both of those are passed into \code{qsub_sing_envs}
it will add those environmental variables and their values as additional
'-v' flags in the construction of the qsub command.
}
\seealso{
The function \code{\link{get_singularity}} should likely be run
before this function is run. This function is used by:
\code{\link{parallelize}}
\code{\link{make_qsub}}
\code{\link{make_qsub_share}}
\code{\link{make_qsub_postest}}
\code{\link{submit_aggregation_script}}
}
|
f56f92510239934b12a697a75ef1eace7d5b9030
|
9d24829289bb8a48bf6ce4954e04bb7a4e2abc42
|
/functions/tSNE.R
|
4f857e841469536d322e39a68d9ba6c21b4ad248
|
[] |
no_license
|
michellemeier27/Semesterproject
|
231a51168f104c5bce4e13ca1a4d4782a29301e8
|
fe923ad7974e06a1b45271f466785e0465912c86
|
refs/heads/master
| 2022-12-09T13:13:02.536402
| 2020-08-31T15:57:58
| 2020-08-31T15:57:58
| 262,081,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 279
|
r
|
tSNE.R
|
##FUNCTION TSNE
#loading library
library(Rtsne)
#tSNE function for individual series
tSNE <- function(expression_results, perplexity_wanted=5){
transpose = t(expression_results)
tsne_results <- Rtsne(transpose, perplexity = perplexity_wanted , check_duplicates = FALSE)
}
|
cf15f1c1f4fd179a2ab80e59900db6ef82738b21
|
7e50563d67158e361915e7311f143bee47661e7a
|
/R/praiseme.r
|
e3e07a806783ac93fc1e7dcb5871e57472baed01
|
[] |
no_license
|
perikarya/praiseme
|
cd33e464a0948a46c2a2e02de9ad5382fb3c82b0
|
7ba4af6d06dc4fd70d98dc91aa004327b3bf38f6
|
refs/heads/master
| 2021-07-05T18:23:56.097390
| 2021-01-17T12:05:25
| 2021-01-17T12:05:25
| 218,241,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 582
|
r
|
praiseme.r
|
#' Print a short message of praise to the user
#'
#' @description Takes a string as input and returns a short message of praise using the string.
#'
#' @param praisefor The verb or subject to praise the user for, entered as a string. If blank, a generic praise message will be returned.
#'
#' @return A short praise message as a string.
#'
#' @examples
#' praiseme()
#' praiseme("art")
#' praiseme("coding")
#'
#' @export
praiseme <- function(praisefor) {
if (missing(praisefor)) {
print("You're great!")
} else {
print(paste0("You're great at ", praisefor, "!"))
}
}
|
090ae3e96bbeec5f09870beabb9d756ae0178ab9
|
5811929a423984b8e0ef5637a03c47214e620887
|
/man/resid-methods.Rd
|
e21224b323f5a4895474685ec74967d2b1bfbe6f
|
[] |
no_license
|
cran/cold
|
b7d6713df201004ad0ec1e8d3f6cbbdaf33b8acc
|
16be37d90686c7c880ac3ed44b7dfc1a3f4a4d6b
|
refs/heads/master
| 2021-12-31T10:22:07.475445
| 2021-08-25T09:00:02
| 2021-08-25T09:00:02
| 17,695,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
rd
|
resid-methods.Rd
|
\name{resid-methods}
\docType{methods}
\alias{resid-methods}
\alias{resid,cold-method}
\title{Methods for function \code{residd}}
\description{Methods for function \code{resid} extracting residual values of a fitted model object from class \code{\link[=cold-class]{cold}}. }
\usage{
\S4method{resid}{cold}(object, type = c( "pearson","response","null"),...)
}
\arguments{
\item{object}{an object of class \code{\link[=cold-class]{cold}}.}
\item{type}{ an optional character string specifying the type of residuals to be used.
Two types are allowed: pearson and response. Defaults to "pearson".}
\item{...}{other arguments.}
}
\section{Methods}{
\describe{
\item{\code{signature(object="cold")}:}{residuals for \code{\link{cold}} object.}
}}
\examples{
##### data = seizure
seiz1M <- cold(y ~ lage + lbase + v4 + trt + trt:lbase, data = seizure,
start = NULL, dependence = "AR1")
resid(seiz1M)[1:16]
}
\keyword{methods}
|
8602d4a2e4606d18801f9da0051d4dee915967f6
|
b5bb0e21cd74d970e6ccf1e99e51e4137a9b1d3f
|
/R/utils.R
|
19407fc439d238c1f52d2be6e5395299e5419c26
|
[
"MIT"
] |
permissive
|
reside-ic/fstorr
|
559fb209bf6063e35c6e6402aaca43a18643a52d
|
a5fed651b59f7df0e1a846f76360218ff1f31173
|
refs/heads/master
| 2022-12-13T14:31:05.119498
| 2020-04-26T10:28:42
| 2020-04-26T10:28:42
| 258,980,872
| 2
| 0
|
NOASSERTION
| 2020-04-26T10:28:44
| 2020-04-26T08:39:27
|
Makefile
|
UTF-8
|
R
| false
| false
| 114
|
r
|
utils.R
|
`%||%` <- function(x, y) { # nolint
if (is.null(x)) y else x
}
squote <- function(x) {
sprintf("'%s'", x)
}
|
db9476c311d77ff879db49d5aa94a210f8b1e362
|
4a2f21f44561040d0b362abeb809fabb85c79d9e
|
/charls_education.R
|
6eef8949fc806d6e2c1f94797e431f8213053f9b
|
[] |
no_license
|
lt2710/parental-wealth-impact
|
b3936b7597d33103f633ce8ffab9827d7cf1b8d1
|
4c94ded78c654687a4b3bcd07d26d5602526204f
|
refs/heads/master
| 2021-09-27T21:31:51.824819
| 2021-09-12T14:25:19
| 2021-09-12T14:25:19
| 207,786,312
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,529
|
r
|
charls_education.R
|
## ----setup-------------------------------------------------------------------------------------------------------------------------
#set working directory
path_to_code<-rstudioapi::getActiveDocumentContext()$path
main_directory<-strsplit(path_to_code,"/[a-zA-Z0-9_-]*.R$")[[1]]
setwd(main_directory)
#Set time variables to debug date transformation
Sys.setlocale("LC_TIME", "C")
Sys.setenv(TZ="Europe/Berlin")
## ----packages, message = FALSE-----------------------------------------------------------------------------------------------------
# Load packages.
packages <- c(
"tidyverse",
"data.table",
# below is for output summary
"VGAM",
"jtools",
"huxtable",
"officer",
"flextable",
"gtsummary"
)
packages <- lapply(
packages,
FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x)
library(x, character.only = TRUE)
}
}
)
select <- dplyr::select
# eda ----------------------------------
load("output/charls.RData")
# make data
plot_data = charls %>%
mutate(
diploma_parent = case_when(
education_years_parent %in% c(0:5) ~ "1 < Primary",
education_years_parent %in% 6 ~ "2 Primary",
education_years_parent %in% 9 ~ "3 Junior High",
education_years_parent %in% c(12:22) ~ "4 Senior High >"
)
) %>%
group_by(asset_total_quant, diploma_parent) %>%
summarise(num = n(),
metric = mean(education_years, na.rm = T)) %>%
drop_na()
# plot
ggplot(aes(x = diploma_parent,
y = metric,
group = asset_total_quant,
color = asset_total_quant),
data = plot_data) +
geom_point() +
geom_line() +
xlab("Father's Education") +
ylab("Average Years of Schooling") +
labs(color = "Parental Net Worth") +
theme_classic()
# modeling -----------------------------
# make models
varlist <-
paste(
# child
"age"
,"male"
# parent
,"urbanhukou_parent"
,"education_years_parent"
,"party_parent"
,"job_parent"
,"asset_total_logged"
,sep = " + "
)
model_list =
list(
lm(formula(paste("education_years ~",
varlist)),
data = charls)
,glm(
formula(paste("(education_years>=15) ~",
varlist)),
data = charls %>%
filter(education_years >= 12),
family = "binomial"
)
,glm(formula(paste("(education_years>=12) ~ ",
varlist)),
data = charls %>%
filter(education_years >= 9),
family = "binomial")
,glm(formula(paste("(education_years>=9) ~ ",
varlist)),
data = charls %>%
filter(education_years >= 6),
family = "binomial")
)
# tabular summary
jtools::export_summs(
model_list,
error_pos = "same",
to.file = "xlsx",
file.name = file.path("output",paste0("charls_education.xlsx"))
)
# plot summary
# list_coef = c(
# "25~50%" = "asset_total_quant(1.04e+05,3.09e+05]"
# ,"50%~75%" = "asset_total_quant(3.09e+05,7.42e+05]"
# ,"75>%" = "asset_total_quant(7.42e+05,2.14e+07]"
# )
# list_modelnames = c(
# "M2 (Adjusted)"
# ,"M1 (Bivariate)"
# )
#
# for (i in c(2,4,6)){
# plot = jtools::plot_summs(
# list(model_list[[i]],model_list[[i-1]])
# ,coefs = list_coef
# ,legend.title = ""
# ,ci_level = 0.95
# ,point.shape = FALSE
# ,model.names = list_modelnames
# ) +
# xlab("Offspring Years of Education") +
# ylab("Parental Net Worth Quantile") +
# theme_classic()
# print(plot)
# }
|
8e0ca1f569004e8dfda1a9b32d44797a6d80af45
|
ef576eff03fcc17685d7b50cf70535dd4b31ceef
|
/cachematrix.R
|
ec69e7d8a57f663822b70c08057716f178b742ed
|
[] |
no_license
|
hairb/ProgrammingAssignment2
|
75d4789228f2a86287b7314e09f637704476b6c1
|
c6ba4fde6a106c13f4cfda9feb783f3526a345d2
|
refs/heads/master
| 2020-12-31T03:41:42.751209
| 2014-09-20T13:41:46
| 2014-09-20T13:41:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
cachematrix.R
|
## These pair functions provide the functionality of computing the
## inverse of a matrix while using caching to improve performance and
## reduce computations.
##
## The functions assumes that the matrix supplied is a
## square invertiable matrix.
##
## MakeCacheMatrix - This function creates an object of a square matrix
## in which in addition to the matrix itself - contains its inverse, for
## caching purpose
## i - the inverse of the matrix (will be NULL, if not yet calculated)
makeCacheMatrix <- function(x = matrix()) {
i = NULL
set <- function(y){
x <<-y
i <<-NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list (set = set, get = get,
setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve - This function returns the inverse of the matrix obtained
## in makeCacheMatrix (the function above). If the inverse of the matrix
## is already calculated - It returns the existing calculation. If the
## inverse is not yet calculated (NULL was returned from getinverse() )-
## It calculated it, sets it in the "matrix" of makeCacheMatrix (using
## setinverse(i)), and returns it.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if (!is.null(i)){
message("getting cached inverse data")
return (i)
}
data <- x$get()
message("calculating the inverse")
i <- solve(data,...)
x$setinverse(i)
i
}
|
46f6043083a2c70a9c0bb2e22f9e10f64a0d9eb5
|
eaaee85c6d83360015b75fc13e4f0dfddb5b8b0d
|
/man/edf.all.Rd
|
974153e0b73e4ad0a4e4b9f22a95d2be07564ea8
|
[] |
no_license
|
jashubbard/edfR
|
4f526b52892332d1cad5cdd71213a04f25ceec33
|
fafff856c9cfc9c670ef0c3c1d8e87bac9bbb94a
|
refs/heads/master
| 2022-08-24T17:07:36.840610
| 2022-08-04T09:41:25
| 2022-08-04T09:41:25
| 42,969,569
| 24
| 5
| null | 2016-05-11T16:02:40
| 2015-09-23T00:49:36
|
C++
|
UTF-8
|
R
| false
| false
| 1,342
|
rd
|
edf.all.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/edf_api.R
\name{edf.all}
\alias{edf.all}
\title{Load all data from EDF file}
\usage{
edf.all(EDFfile, samples = FALSE, eventmask = FALSE)
}
\arguments{
\item{EDFfile}{path to an EDF file}
\item{samples}{logical indicating whether to import samples (default=FALSE)}
\item{eventmask}{logical indicating whether to add an \code{\link{eventmask}} to samples (default=FALSE)}
}
\value{
The output will be a list with 4 named elements (fixations, saccades, blinks, and samples)
each element being a data frame
}
\description{
\code{edf.all} returns all of the most common information from
an SR-Research EyeLink EDF file (fixations, saccades, blinks, and samples)
}
\details{
edf.all is useful for obtaining fixations, saccades, blinks, and (optionally)
samples from an EDF in one shot. If you need only 1 of these (i.e., just fixations)
then use \code{\link{edf.events}}, \code{\link{edf.samples}}, \code{\link{edf.messages}}, or
\code{\link{edf.recordings}}. By default it grabs only event data. Use the \code{samples}
argument to get sample data as well.
}
\examples{
\dontrun{
output <- edf.all('/path/to/file.edf',samples=TRUE)
output$fixations #data frame
output$saccades #another data frame
}
}
\author{
Jason Hubbard, \email{hubbard3@uoregon.edu}
}
|
0d859dcf4af6e67721016c21f296a0b685c7a5b4
|
813eeacdbd82197708189b38945e0635c1063313
|
/rscripts/scatterplot.rsx
|
6bc00340e30f69471626b9cc596da2965b8566b2
|
[] |
no_license
|
sukhjitsehra/QGIS_Processing_ToolBox_Scripts
|
c3a582c045592ad98fe99e679062ec6d42f032b3
|
79de10c8c952f743ca82f0d15adc1c2906ca54ec
|
refs/heads/master
| 2021-12-14T20:16:32.850285
| 2021-12-08T03:40:34
| 2021-12-08T03:40:34
| 185,743,853
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
rsx
|
scatterplot.rsx
|
##[Sehra]=group
##showplots
##Layer=vector
##X=Field Layer
library(ggplot2)
#qplot(Layer[[X]])
plot(density(log10(Layer[[X]]). adjust=0.5))
|
fb88862cbff7cfc158578fe949d96723270c81b5
|
0485c00604cf3448cedb45e6efb2f85d88790c85
|
/Ch05/5_1_Barplot.R
|
ec81b565a3835fd1d4aeb9be38b3ce4638b3c07a
|
[] |
no_license
|
zsx29/R
|
d89af4ec46b8068f25d1e2447f98085a6721537f
|
1f349b7b3f1981010677530e05d4d467f2c6e95e
|
refs/heads/master
| 2023-06-11T11:28:40.607617
| 2021-07-02T02:17:13
| 2021-07-02T02:17:13
| 380,932,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
5_1_Barplot.R
|
# 날짜 : 2021-06-29
# 이름 : 박재형
# 내용 : barplot 막대차트 - p140
# 기본 막대차트
count <- c(1, 2, 3, 4, 5)
barplot(count)
score <- c(50, 25, 32, 21, 66)
names(score) <- c("김유신", "홍길동", "강감찬", "장보고", "김춘추")
barplot(score)
# 범주형 막대차트
season <- c("winter", "summer", "spring", "summer", "summer", "autumn", "summer", "autumn", "spring")
season
ds <- table(season)
ds
barplot(ds, main = "Season",
col = rainbow(4))
barplot(ds, main = "Season",
col = terrain.colors(4))
barplot(ds, main = "Season",
col = terrain.colors(4),
xlab = "계절",
ylab = "빈도수",
horiz = T)
# 누적 막대그래프
df_sample <- read.csv("../file/sample_population.csv")
df_sample
matrix_sample <- as.matrix(df_sample)
matrix_sample
barplot(matrix_sample, main = "population", col = rainbow(3),
beside = T,
legend.text = c("0~14세", "15~64세", "65~")
)
|
0d5267046b568a808c60e3c8cb0cc557480bc58d
|
9f66bb2cb478de5363af4ed856ec9409b437af4b
|
/bayesian_tvar_dram.R
|
6ba62f2e66d8b99f720e571b1335595fc0b93f14
|
[] |
no_license
|
alexhaider/Bayes_VARs
|
f6a77c7dabf157e965ee090710d753648b936e3a
|
b1d05c5b809178d2aca9fd106349e626ff32704d
|
refs/heads/master
| 2020-08-04T04:16:17.504291
| 2019-10-17T13:26:31
| 2019-10-17T13:26:31
| 212,000,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,865
|
r
|
bayesian_tvar_dram.R
|
#tar_prior: list with distr and input as elements
bayesian_tvar <- function(data, P, tar_variable, max_d, tar_prior, tar_scale, tar_transform = NULL,
reps = 10000, burn = reps / 2, stability = T, max_attempts = 1e4, deltas = NULL,
lambda = 0.1, tau = 10 * lambda, eps = 1e-4, sum_of_coeff = T,
train_sample = NULL, quiet = F, forecast_horizon = 24, irf_settings = NULL,
dram_settings = list(start = 100, adapt = 50, adapt_scale = 2.4^2,
drscale = 2, delta = 1e-5)) {
#ar_coeff = F, #taken out from function call
#tar scale is for the random walk MH; while the variance of the prior is in tar_prior
#tar scale and tar prior provide variances -> sqrt to sd is done !!!!!!!!
out_call <- match.call()
if (reps <= burn)
stop("Argument 'reps' has to be bigger than argument 'burn'.")
if (is.character(tar_variable)) {
tar_variable_character <- tar_variable
tar_variable <- which(colnames(data) == tar_variable)
if (length(tar_variable) == 0)
stop("Variable `", tar_variable_character, "' not found.")
}
N <- ncol(data)
size <- N * P #for Bc_template and df for iW, also for computing n_crit and B_sample
zero_vec <- rep(0, N) #for evaluating likelihoods with residuals -> expected value is zero vector
#====================================
# stdev <- F #for dummy prior
# ar_p <- F #for dummy prior
# ar_coeff <- T
#===================================
tar_prior$distr <- as.character(tar_prior$distr)
T_complete <- nrow(data)
d <- 1:max_d #all possible d values; current d will be saved in d_sample
#Transform
tardata <- as.matrix(data[, tar_variable, drop = F]) #later to be transformed maybe
if (is.null(tar_transform))
tar_transform <- list(fun = "ident", inp = NA)
tardata <- tar_fn_cpp(tardata, tar_transform) #transform
T_tardata <- nrow(tardata)
tar_scale_adpat <- tar_scale / dram_settings$drscale #tar scale is still the VARIANCE! and so is tar_scale_adpat; done differently in DRAM: chol(), than div by 2(3)
tar_scale <- sqrt(tar_scale) #for rnorm adjusted to sd
tar_scale_adpat <- sqrt(tar_scale_adpat) #for rnorm adjusted to sd
dtar <- match.fun(paste0("d", tar_prior$distr)) #density of tar variable
if (tar_prior$distr == "norm") {
tar_inp1 <- mean(tardata, na.rm = T) #tardata is not lagged yet!
tar_inp2 <- sqrt(as.integer(tar_prior$inp)) #variance -> sd
}
if (tar_prior$distr == "unif") { #STILL TO TEST
if (is.null(tar_prior$inp)) {
tar_inp1 <- min(tardata)
tar_inp2 <- max(tardata)
} else {
tar_inp1 <- tar_prior$inp[1]
tar_inp2 <- tar_prior$inp[2]
}
}
#lag tar independently because of possible tranformation
ZZ <- embed(tardata, max_d + 1) #creating threshold values
ZZ <- ZZ[complete.cases(ZZ), -1, drop = F] #getting rid of current value
data_embed <- embed(as.matrix(data), P + 1)
diff_rows <- nrow(data_embed) - nrow(ZZ)
#adjusting number of rows for ZZ or data_embed
if (diff_rows < 0)
ZZ <- ZZ[-c(1:abs(diff_rows)), ]
if (diff_rows > 0)
data_embed <- data_embed[-c(1:diff_rows), ]
#delete train samples: STILL TO TEST; BUT NOT DONE IN ANY PAPERS -> COMMENTED OUT
#------------------------------------
# if (train_sample != T_complete) {
# ZZ <- ZZ[-c(1:train_sample), ]
# data_embed <- data_embed[-c(1:train_sample), ]
# }
#------------------------------------
T_embed <- nrow(data_embed)
YY <- data_embed[, 1:N]
XX <- cbind(data_embed[, -c(1:N)], 1)
n_crit <- size + 1 #min number of observations in each regime
if (!stability)
max_attempts <- 0 #no eigenvalues will be computed in B_sample, i.e. no stability test
#dummy Prior
if (is.null(train_sample))
train_sample <- T_complete
# dummy_p <- create_dummy_prior(data[1:train_sample, ], P, lambda, tau, eps, ar_coeff, ar_p,
# stdev, sum_of_coeff)
#DELTAS ARE ALL EQUAL TO ZERO BY DEFAULT!!!!
if (is.null(deltas))
deltas <- rep(0, N)
#NEXT ONE IS STANDARD
dummy_p <- create_dummy_prior_man(data[1:train_sample, ], P = P, deltas = deltas, lambda = lambda,
eps = eps, sum_of_coeff = sum_of_coeff, tau = tau)
# mus <- colMeans(YY)
# dummy_p <- create_dummy_prior_man2(data[1:train_sample, ], P = P, deltas = deltas, lambda = lambda,
# eps = eps, sum_of_coeff = sum_of_coeff, tau = tau, mus = mus)
YD <- dummy_p$Yd
XD <- dummy_p$Xd
#starting values
d_sample <- sample(d, 1)
curr_ZZ <- ZZ[, d_sample]
#starting tar value
curr_ZZ_sorted <- sort(curr_ZZ)
curr_ZZ_sorted <- curr_ZZ_sorted[-c(1:(n_crit - 1), (T_embed - n_crit + 1):T_embed)] #guarantees number of obs sufficient
tar_value <- sample(curr_ZZ_sorted, 1)
reg1 <- curr_ZZ <= tar_value
reg2 <- !reg1
Y1 <- rbind(YY[reg1, ], YD); X1 <- rbind(XX[reg1, ], XD)
Y2 <- rbind(YY[reg2, ], YD); X2 <- rbind(XX[reg2, ], XD)
fit_reg1 <- .lm.fit(X1, Y1)
fit_reg2 <- .lm.fit(X2, Y2)
B1_sample <- c(fit_reg1$coefficients) #just in case we do not get stable results for B1, B2 in first Gibbs iteration
B1_sample_mat <- t(fit_reg1$coefficients)
B2_sample <- c(fit_reg2$coefficients)
B2_sample_mat <- t(fit_reg2$coefficients)
Sigma1_lm <- crossprod(fit_reg1$residuals) #/ nrow(Y1)
Sigma2_lm <- crossprod(fit_reg2$residuals) # / nrow(Y2)
repeat { #does that work?
# Sigma1_sample <- riwish_cpp(nrow(Y1) + 2 - (size + 1), Sigma1_lm) #random starting value; nrow(Y1) is after dummies appended -> correct
# Sigma2_sample <- riwish_cpp(nrow(Y2) + 2 - (size + 1), Sigma2_lm)
Sigma1_sample <- riwish_cpp(nrow(Y1), Sigma1_lm) #random starting value; nrow(Y1) is after dummies appended -> correct
Sigma2_sample <- riwish_cpp(nrow(Y2), Sigma2_lm)
# Sigma1_sample <- diag(N); Sigma2_sample <- diag(N)
if (min(eigen(Sigma1_sample, only.values = TRUE)$values,
eigen(Sigma2_sample, only.values = TRUE)$values) > 0)
break
}
out_start <- list(Sigma1_start = Sigma1_sample, Sigma2_start = Sigma2_sample, #starting values for return
start_tar = tar_value, start_d = d_sample)
if (!quiet)
print(paste0("Starting TVAR estimation with ", reps, " replications."), quote = F)
out_beta1 <- matrix(NA_real_, reps - burn, length(fit_reg1$coefficients))
out_beta2 <- matrix(NA_real_, reps - burn, length(fit_reg2$coefficients))
out_sigma1 <- array(NA_real_, dim = c(reps - burn, dim(Sigma1_sample)))
out_sigma2 <- array(NA_real_, dim = c(reps - burn, dim(Sigma2_sample)))
out_resid <- array(NA_real_, dim = c(reps - burn, dim(YY)))
if (!is.null(forecast_horizon)) {
forecast_start_period <- T_embed - P + 1 #first value to be used in forecast (as lag P)
out_yhat <- array(NA_real_, dim = c(reps - burn, P + forecast_horizon, N))
start_forecast <- YY[forecast_start_period:T_embed, ]
}
out_tar <- rep(NA_real_, reps) #has to be reps, not reps - burn for DRAM procedure; later size is reduced before returned
out_delay <- rep(NA_real_, reps - burn)
out_post <- rep(NA_real_, reps - burn)
if (!is.null(irf_settings)) {
shocked_variable <- irf_settings$shocked_variable
shock_size <- irf_settings$shock_size
irf_horizon <- irf_settings$horizon
restrict <- irf_settings$restrict
type <- irf_settings$type
out_ir1 <- array(NA_real_, dim = c(reps - burn, irf_horizon, N))
out_ir2 <- array(NA_real_, dim = c(reps - burn, irf_horizon, N))
}
n_accept <- 0
adjust_runs <- min(reps * .75, burn * .95) #when to stop the AM adjustment
Bc_template <- rbind(matrix(0, N, size), cbind(diag(N * (P - 1)), matrix(0, N * (P - 1), N)))
for (iter in 1:reps) {
#sample B1 and Sigma1
reg1 <- curr_ZZ <= tar_value
reg2 <- !reg1
T_reg1 <- sum(reg1) #for deleting the residuals from prior when finding new thresh value (see eval_post)
T_reg2 <- sum(reg2)
Y1 <- rbind(YY[reg1, ], YD)
X1 <- rbind(XX[reg1, ], XD)
B1_star <- c(.lm.fit(X1, Y1)$coefficients)
xpx1_inv <- xpx_inv_cpp(X1)
B_return <- sample_B_fast(B1_star, Sigma1_sample, xpx1_inv, Bc_template, size, N, P, max_attempts)
chk1 <- B_return$chk #stable
if (chk1 || !stability) {
B1_sample <- B_return$B_sample
B1_sample_mat <- B_return$B_mat
}
resid1 <- resids_cpp(Y1, X1, B1_sample_mat) #all resids, including from dummies for next step
# Sigma1_sample <- riwish_cpp(nrow(Y1) + 2 - (size + 1), crossprod(resid1))
Sigma1_sample <- riwish_cpp(nrow(Y1), crossprod(resid1))
#sample B2 and Sigma2
Y2 <- rbind(YY[reg2, ], YD)
X2 <- rbind(XX[reg2, ], XD)
B2_star <- c(.lm.fit(X2, Y2)$coefficients)
xpx2_inv <- xpx_inv_cpp(X2)
B_return <- sample_B_fast(B2_star, Sigma2_sample, xpx2_inv, Bc_template, size, N, P, max_attempts)
chk2 <- B_return$chk #stable
if (chk2 || !stability) {
B2_sample <- B_return$B_sample
B2_sample_mat <- B_return$B_mat
}
resid2 <- resids_cpp(Y2, X2, B2_sample_mat)
# Sigma2_sample <- riwish_cpp(nrow(Y2) + 2 - (size + 1), crossprod(resid2))
Sigma2_sample <- riwish_cpp(nrow(Y2), crossprod(resid2))
#sample tar: MH step
tar_value_star <- rnorm(1, tar_value, tar_scale) #sample new tar value )
post_old <- dtar(tar_value, tar_inp1, tar_inp2, log = T) +
lik_cpp(resid1[1:T_reg1, ], zero_vec, Sigma1_sample, loglik = T) +
lik_cpp(resid2[1:T_reg2, ], zero_vec, Sigma2_sample, loglik = T)
reg1 <- curr_ZZ <= tar_value_star
reg2 <- !reg1
Y1_new <- YY[reg1,, drop = F]; X1_new <- XX[reg1,, drop = F]
Y2_new <- YY[reg2,, drop = F]; X2_new <- XX[reg2,, drop = F]
if (min(nrow(Y1_new), nrow(Y2_new)) >= n_crit) { #also done in matlab by setting post to -Inf if nobs < n_crit, so never accepted
resid1 <- resids_cpp(Y1_new, X1_new, B1_sample_mat)
resid2 <- resids_cpp(Y2_new, X2_new, B2_sample_mat)
post_new <- dtar(tar_value_star, tar_inp1, tar_inp2, log = T) + #don't have to delete "dummy resids" because not included here
lik_cpp(resid1, zero_vec, Sigma1_sample, loglik = T) +
lik_cpp(resid2, zero_vec, Sigma2_sample, loglik = T)
alpha_12 <- min(1, exp(post_new - post_old)) #12...moving from 1 to 2
if (alpha_12 > runif(1)) {
tar_value <- tar_value_star
n_accept <- n_accept + 1
} else {#if (iter < adjust_runs) {#we didn't accept the first attempt
tar_value_star_2 <- rnorm(1, tar_value, tar_scale_adpat) #sample new tar value with smaller scale
reg1 <- curr_ZZ <= tar_value_star_2
reg2 <- !reg1
Y1_new <- YY[reg1,, drop = F]; X1_new <- XX[reg1,, drop = F]
Y2_new <- YY[reg2,, drop = F]; X2_new <- XX[reg2,, drop = F]
if (min(nrow(Y1_new), nrow(Y2_new)) >= n_crit) {
resid1 <- resids_cpp(Y1_new, X1_new, B1_sample_mat)
resid2 <- resids_cpp(Y2_new, X2_new, B2_sample_mat)
post_new_2 <- dtar(tar_value_star_2, tar_inp1, tar_inp2, log = T) +
lik_cpp(resid1, zero_vec, Sigma1_sample, loglik = T) +
lik_cpp(resid2, zero_vec, Sigma2_sample, loglik = T)
alpha_32 <- min(1, exp(post_new - post_new_2)) #moving from 3 to 2
q_ratio <- dnorm(tar_value_star, tar_value_star_2, tar_scale, log = T) -
dnorm(tar_value_star, tar_value, tar_scale, log = T) #moving from star_2 to star compared to moving from current to star
ll_ratio <- post_new_2 - post_old
alpha_13 <- exp(ll_ratio + q_ratio) * (1 - alpha_32) / (1 - alpha_12)
if (alpha_13 > runif(1)) {
tar_value <- tar_value_star_2
n_accept <- n_accept + 1
}
}
}
}
a_rate <- n_accept / iter
out_tar[iter] <- tar_value
#cat(tar_scale, " ", a_rate, "\n")
#adjust tar scale
if (iter < adjust_runs && iter >= dram_settings$start && iter %% dram_settings$adapt == 0) {
tar_scale_new <- var(out_tar[1:iter]) + dram_settings$delta #find variance, but we need sd!
#tar_scale_new <- var(out_tar[1:iter]) * dram_settings$adapt_scale
if (tar_scale_new != 0) {
tar_scale <- tar_scale_new * dram_settings$adapt_scale #variance
tar_scale_adpat <- tar_scale / dram_settings$drscale #variance
tar_scale <- sqrt(tar_scale) #make sd
tar_scale_adpat <- sqrt(tar_scale_adpat) #make sd
}
}
#sample delay
probs <- eval_delay_thresh_cpp(ZZ, tar_value, YY, XX, B1_sample_mat, B2_sample_mat,
Sigma1_sample, Sigma2_sample)
d_sample <- sample(d, 1, prob = probs)
curr_ZZ <- ZZ[, d_sample]
# if (!quiet && iter == (burn + 1))
# print("Burn in phase done.", quote = F)
if (iter > burn) {
out_beta1[iter - burn, ] <- B1_sample
out_beta2[iter - burn, ] <- B2_sample
out_sigma1[iter - burn,, ] <- Sigma1_sample
out_sigma2[iter - burn,, ] <- Sigma2_sample
out_delay[iter - burn] <- d_sample
#saving post value
reg1 <- curr_ZZ <= tar_value
reg2 <- !reg1
Y1_new <- YY[reg1, ]; X1_new <- XX[reg1, ]
Y2_new <- YY[reg2, ]; X2_new <- XX[reg2, ]
resid1 <- resids_cpp(Y1_new, X1_new, B1_sample_mat)
resid2 <- resids_cpp(Y2_new, X2_new, B2_sample_mat)
out_resid[iter - burn, which(reg1), ] <- resid1 #correct order now
out_resid[iter - burn, which(reg2), ] <- resid2
# resid1 <- Y1_new - X1_new %*% t(B1_sample_mat)
# resid2 <- Y2_new - X2_new %*% t(B2_sample_mat)
out_post[iter - burn] <- dtar(tar_value, tar_inp1, tar_inp2, log = T) +
lik_cpp(resid1, zero_vec, Sigma1_sample, loglik = T) +
lik_cpp(resid2, zero_vec, Sigma2_sample, loglik = T)
if (((chk1 && chk2) || !stability) && !is.null(forecast_horizon)) {
out_yhat[iter - burn,, ] <- tvar_forecast_cpp(start_forecast, tar_variable, P, N,
forecast_horizon, d_sample,T_tardata,
tar_value, tardata,B1_sample_mat,
B2_sample_mat, Sigma1_sample,
Sigma2_sample, tar_transform, data)
}
if (((chk1 && chk2) || !stability) && !is.null(irf_settings)) {
out_ir1[iter - burn,, ] <- irf(type, B1_sample_mat, Sigma1_sample, shocked_variable,
shock_size, irf_horizon, restrict)
out_ir2[iter - burn,, ] <- irf(type, B2_sample_mat, Sigma2_sample, shocked_variable,
shock_size, irf_horizon, restrict)
}
}
if (!quiet && iter %% 1000 == 0)
print(paste0("Replication ", iter, " of ", reps, ". Acceptance Ratio = ",
round(a_rate, 5), ". Sqrt tarscale = ", round(tar_scale, 5), "."), quote = F)
}
#CHANGED out_yhat!!!!!!
ret_list <- list(out_beta1 = out_beta1, out_beta2 = out_beta2, out_sigma1 = out_sigma1,
out_sigma2 = out_sigma2, out_yhat = NULL, out_tar = out_tar[-c(1:burn)],
out_delay = out_delay, out_post = out_post, out_resid = out_resid)
if (!is.null(irf_settings)) {
ret_list$out_ir1 <- out_ir1
ret_list$out_ir2 <- out_ir2
}
if (!is.null(forecast_horizon))
ret_list$out_yhat <- out_yhat
ret_list$acceptance_rate <- a_rate
ret_list$tar_scale <- tar_scale^2 #CHANGED TO ^2 SO WE RETURN VARIANCE AGAIN (input is variance and then transformed to sd)
ret_list$starting_values <- out_start
ret_list$model_specific <- list(ZZ = ZZ, data_embed = data_embed, dummy_p = dummy_p)
ret_list$out_call <- out_call
ret_list$dataset <- data
#ret_list$all_probs <- all_probs
return(ret_list)
}
|
8bad09cf11599abde97989d170270ad3253b2fd6
|
af553244366d4049d8aae54980ac10d854eda727
|
/scRNAseq_R_scripts/Fig3/Fig3_4_pseudo time monocle.R
|
b2444563b23363bf59082a38bc53c0a830660e3e
|
[] |
no_license
|
lihong1github/Zhan-et-al-2019-scRNAseq
|
ecf90ebf4498b9c0ce525548a9578cafab8a13e5
|
e4f7a8e9d13be80c505fcdd2eb001fb9303e3393
|
refs/heads/master
| 2020-06-13T21:20:24.048411
| 2019-07-03T18:15:59
| 2019-07-03T18:15:59
| 194,791,168
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,204
|
r
|
Fig3_4_pseudo time monocle.R
|
# source("http://bioconductor.org/biocLite.R")
# biocLite("monocle")
library(monocle)
load(file="~/desktop/PLX_Rdata/Data_RunTSNE.Rdata")
head(experiment.aggregate@meta.data)
# add cluster_id into meta.data
cluster_id <- experiment.aggregate@ident
experiment.aggregate <- AddMetaData(
object = experiment.aggregate,
metadata = cluster_id,
col.name = "cluster_id")
head(experiment.aggregate@meta.data)
table(experiment.aggregate@meta.data$cluster_id)
class(experiment.aggregate@meta.data$cluster_id)
seurat_import <-importCDS(experiment.aggregate)
mono_object <- newCellDataSet(exprs(seurat_import),
phenoData = new("AnnotatedDataFrame", data = pData(seurat_import)),
featureData = new("AnnotatedDataFrame", data = fData(seurat_import)),
lowerDetectionLimit = 0.5,
expressionFamily = negbinomial.size())
mono_object <- estimateSizeFactors(mono_object)
mono_object <- estimateDispersions(mono_object)
mono_object <- detectGenes(mono_object, min_expr = 0.1)
print(head(fData(mono_object)))
print(head(pData(mono_object)))
expressed_genes <- row.names(subset(fData(mono_object),
num_cells_expressed >= 10))
pData(mono_object)$Total_mRNAs <- Matrix::colSums(exprs(mono_object))
mono_object <- mono_object[,pData(mono_object)$Total_mRNAs < 1e6]
upper_bound <- 10^(mean(log10(pData(mono_object)$Total_mRNAs)) +
2*sd(log10(pData(mono_object)$Total_mRNAs)))
lower_bound <- 10^(mean(log10(pData(mono_object)$Total_mRNAs)) -
2*sd(log10(pData(mono_object)$Total_mRNAs)))
qplot(Total_mRNAs, data = pData(mono_object), color=cluster_id, geom =
"density") +
geom_vline(xintercept = lower_bound) +
geom_vline(xintercept = upper_bound)
library(reshape2)
L <- log(exprs(mono_object[expressed_genes,]))
# Standardize each gene, so that they are all on the same scale,
# Then melt the data with plyr so we can plot it easily
melted_dens_df <- melt(Matrix::t(scale(Matrix::t(L))))
# Plot the distribution of the standardized gene expression values.
qplot(value, geom = "density", data = melted_dens_df) +
stat_function(fun = dnorm, size = 0.5, color = 'red') +
xlab("Standardized log(FPKM)") +
ylab("Density")
# generate cluster without markers
disp_table <- dispersionTable(mono_object)
unsup_clustering_genes <- subset(disp_table, mean_expression >= 0.1)
mono_object <- setOrderingFilter(mono_object, unsup_clustering_genes$gene_id)
plot_ordering_genes(mono_object)
plot_pc_variance_explained(mono_object, return_all = F) # norm_method='log'
mono_object <- reduceDimension(mono_object, max_components = 2, num_dim = 22,
reduction_method = 'tSNE', verbose = T)
mono_object <- clusterCells(mono_object, num_clusters = 12)
mono_object@phenoData
plot_cell_clusters(mono_object, 1,2, color="cluster_id")
plot_cell_clusters(mono_object, 1,2, color="orig.ident")
### making pseudo time
diff_test_res <- differentialGeneTest(mono_object[expressed_genes,],
fullModelFormulaStr = "~cluster_id")
ordering_genes <- row.names (subset(diff_test_res, qval < 0.01))
mono_object <- setOrderingFilter(mono_object, ordering_genes)
plot_ordering_genes(mono_object)
mono_object <- reduceDimension(mono_object, max_components = 2,
method = 'DDRTree')
mono_object <- orderCells(mono_object)
g <- plot_cell_trajectory(mono_object, color_by = "Pseudotime", cell_size = 0.5) +
theme(aspect.ratio = 2, legend.position = "bottom")
ggsave("pseduotime_plot_legend.pdf", plot = g, device = "pdf", path = "~/Desktop/pseudotime/",
scale = 0.8, width = 14, height = 4, units = c("in"),
dpi = 600, limitsize = FALSE)
g <- plot_cell_trajectory(mono_object, color_by = c("cluster_id"), cell_size = 0.5) +
theme(aspect.ratio = 2) +
facet_wrap(~cluster_id, nrow = 1) +
theme(legend.position="none")
ggsave("Clusters_pseduotime.pdf", plot = g, device = "pdf", path = "~/Desktop/pseudotime/",
scale = 0.8, width = 14, height = 4, units = c("in"),
dpi = 600, limitsize = FALSE)
|
3ed675c15481e334b82474eddf581539d87aff6f
|
f54d317f00a84cb4d5bb80a679301f93958cd5df
|
/man/read_vcf.Rd
|
dc435124fda1f74d95a22be90cf643c6ae5a9982
|
[
"MIT"
] |
permissive
|
dyndna/MutationalPatterns
|
3adbfde2a0fab47d406fa8ec97ce8a9f1dc93c14
|
b1d9d0fc69b6c8931e3b12a3b4dccc2c475d1695
|
refs/heads/master
| 2021-01-24T23:50:14.127615
| 2016-09-02T17:34:06
| 2016-09-02T17:34:06
| 67,236,097
| 0
| 0
| null | 2016-09-02T15:57:14
| 2016-09-02T15:57:13
| null |
UTF-8
|
R
| false
| true
| 558
|
rd
|
read_vcf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_vcf.R
\name{read_vcf}
\alias{read_vcf}
\title{Read vcf files into list of CollapsedVCF objects}
\usage{
read_vcf(vcf_files, sample_names, genome = "-")
}
\arguments{
\item{vcf_files}{Character vector of vcf file names}
\item{sample_names}{Character vector of sample names}
\item{genome}{A character or Seqinfo object}
}
\value{
List of GRanges objects
}
\description{
Function reads Variant Call Format VCF files into a GRanges object and combines them in a list object
}
|
2c69f05bb3b4f170eaf4a34e9a89f1046b9fc28c
|
b90227dc489c9a9a91284d4aeb861485a20577db
|
/aux_scripts/product_store_gen.r
|
a4df9d75234df8f0932be4e9c52f3160d892a105
|
[] |
no_license
|
prj9267/Retail-Database
|
d50de1a4e0c720b94f9ec6120d2bcde2ac46379a
|
1c7545cfef70ee8c58d2272115e2c769c62860f3
|
refs/heads/master
| 2020-09-11T07:33:49.048693
| 2019-11-15T19:32:48
| 2019-11-15T19:32:48
| 221,990,109
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,735
|
r
|
product_store_gen.r
|
# file: product_store_gen.r
# author: Dylan R. Wagner
# Desc:
# Generates product store relation data for use in
# the database.
#
library(data.table)
MAX_PRICE <- 50
MAX_INV <- 1000
# Read in the source store data
stores_data <- fread("../data/stores.csv")[, "Store_ID"]
items_source <- c("../data/items.csv", "../data/foods.csv", "../data/beverage.csv", "../data/pharma.csv")
item_enum_cnt <- 0
read_items <- function(path){
dta <- fread(path, select=c("upc14"), colClasses=c(upc14="character"))
dta <- dta[, tbl_enum := item_enum_cnt]
item_enum_cnt <<- item_enum_cnt + 1
dta
}
# Read in all item files then combine them
l <- lapply(items_source, read_items)
items_data <- unique(rbindlist(l))
setkey(items_data, upc14)
# gen_tuples: Generates random inventory sample per store
# Args:
# - store_id: Used to link the inventory to a store
#
# Return: the newly created sample with structure:
# upc14,store_ID,inventory,price
#
gen_tuples <- function(store_id) {
# Generates random sequence of random length over all items
rand_seq <- sample(seq(from = 0, to = nrow(items_data)), size=sample(1:nrow(items_data), 1))
# Subset the items space
items_rand <- items_data[rand_seq, ]
# Generate additional attributes and add in the store id
items_rand <- items_rand[ ,c("store_id", "inventory", "price") := list(store_id, sample(1:MAX_INV, nrow(items_rand)), sample(seq(from = 1, to = MAX_PRICE, by=0.01), nrow(items_rand)))]
# Return the new sample
items_rand
}
# Create sample for each store
rand_expand_data_lst <- lapply(stores_data[[1]], gen_tuples)
rand_expand_data <- rbindlist(rand_expand_data_lst)
fwrite(rand_expand_data, file="../data/prod_store.csv")
|
a489badbf8df91b7f9a40491a6a9d4e4f01c4742
|
8dd6b95b372d598de24bc87e16e6ff972e7219e9
|
/man/offTargetAnalysis.Rd
|
b05250de3ff325d28f89773bb6970d21e529cce0
|
[] |
no_license
|
LihuaJulieZhu/CRISPRseek
|
83efe310342e3b5fbc8989d5771287760264adb8
|
6970b7d1bdc967ab7176642e19dcb8d10a1b1fa4
|
refs/heads/master
| 2022-07-11T23:29:46.834606
| 2022-06-21T15:45:29
| 2022-06-21T15:45:29
| 126,867,552
| 4
| 2
| null | 2022-01-13T19:00:55
| 2018-03-26T17:51:03
|
R
|
UTF-8
|
R
| false
| true
| 26,659
|
rd
|
offTargetAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/offTargetAnalysis.R
\name{offTargetAnalysis}
\alias{offTargetAnalysis}
\title{Design target-specific guide RNAs for CRISPR-Cas9 system in one function}
\usage{
offTargetAnalysis(
inputFilePath,
format = "fasta",
header = FALSE,
gRNAoutputName,
findgRNAs = TRUE,
exportAllgRNAs = c("all", "fasta", "genbank", "no"),
findgRNAsWithREcutOnly = FALSE,
REpatternFile = system.file("extdata", "NEBenzymes.fa", package = "CRISPRseek"),
minREpatternSize = 4,
overlap.gRNA.positions = c(17, 18),
findPairedgRNAOnly = FALSE,
annotatePaired = TRUE,
paired.orientation = c("PAMout", "PAMin"),
enable.multicore = FALSE,
n.cores.max = 6,
min.gap = 0,
max.gap = 20,
gRNA.name.prefix = "",
PAM.size = 3,
gRNA.size = 20,
PAM = "NGG",
BSgenomeName,
chromToSearch = "all",
chromToExclude = c("chr17_ctg5_hap1", "chr4_ctg9_hap1", "chr6_apd_hap1",
"chr6_cox_hap2", "chr6_dbb_hap3", "chr6_mann_hap4", "chr6_mcf_hap5", "chr6_qbl_hap6",
"chr6_ssto_hap7"),
max.mismatch = 3,
PAM.pattern = "NNG$|NGN$",
allowed.mismatch.PAM = 1,
gRNA.pattern = "",
baseEditing = FALSE,
targetBase = "C",
editingWindow = 4:8,
editingWindow.offtargets = 4:8,
primeEditing = FALSE,
PBS.length = 13L,
RT.template.length = 8:28,
RT.template.pattern = "D$",
corrected.seq,
targeted.seq.length.change,
bp.after.target.end = 15L,
target.start,
target.end,
primeEditingPaired.output = "pairedgRNAsForPE.xls",
min.score = 0,
topN = 1000,
topN.OfftargetTotalScore = 10,
annotateExon = TRUE,
txdb,
orgAnn,
ignore.strand = TRUE,
outputDir,
fetchSequence = TRUE,
upstream = 200,
downstream = 200,
upstream.search = 0,
downstream.search = 0,
weights = c(0, 0, 0.014, 0, 0, 0.395, 0.317, 0, 0.389, 0.079, 0.445, 0.508, 0.613,
0.851, 0.732, 0.828, 0.615, 0.804, 0.685, 0.583),
baseBeforegRNA = 4,
baseAfterPAM = 3,
featureWeightMatrixFile = system.file("extdata", "DoenchNBT2014.csv", package =
"CRISPRseek"),
useScore = TRUE,
useEfficacyFromInputSeq = FALSE,
outputUniqueREs = TRUE,
foldgRNAs = FALSE,
gRNA.backbone = "GUUUUAGAGCUAGAAAUAGCAAGUUAAAAUAAGGCUAGUCCGUUAUCAACUUGAAAAAGUGGCACCGAGUCGGUGCUUUUUU",
temperature = 37,
overwrite = FALSE,
scoring.method = c("Hsu-Zhang", "CFDscore"),
subPAM.activity = hash(AA = 0, AC = 0, AG = 0.259259259, AT = 0, CA = 0, CC = 0, CG =
0.107142857, CT = 0, GA = 0.069444444, GC = 0.022222222, GG = 1, GT = 0.016129032, TA
= 0, TC = 0, TG = 0.038961039, TT = 0),
subPAM.position = c(22, 23),
PAM.location = "3prime",
rule.set = c("Root_RuleSet1_2014", "Root_RuleSet2_2016", "CRISPRscan", "DeepCpf1"),
chrom_acc,
calculategRNAefficacyForOfftargets = TRUE,
mismatch.activity.file = system.file("extdata",
"NatureBiot2016SuppTable19DoenchRoot.csv", package = "CRISPRseek"),
predIndelFreq = FALSE,
predictIndelFreq.onTargetOnly = TRUE,
method.indelFreq = "Lindel",
baseBeforegRNA.indelFreq = 13,
baseAfterPAM.indelFreq = 24
)
}
\arguments{
\item{inputFilePath}{Sequence input file path or a DNAStringSet object that
contains sequences to be searched for potential gRNAs}
\item{format}{Format of the input file, fasta, fastq and bed are supported,
default fasta}
\item{header}{Indicate whether the input file contains header, default
FALSE, only applies to bed format}
\item{gRNAoutputName}{Specify the name of the gRNA outupt file when
inputFilePath is DNAStringSet object instead of file path}
\item{findgRNAs}{Indicate whether to find gRNAs from the sequences in the
input file or skip the step of finding gRNAs, default TRUE. Set it to FALSE
if the input file contains user selected gRNAs plus PAM already.}
\item{exportAllgRNAs}{Indicate whether to output all potential gRNAs to a
file in fasta format, genbank format or both. Default to both.}
\item{findgRNAsWithREcutOnly}{Indicate whether to find gRNAs overlap with
restriction enzyme recognition pattern}
\item{REpatternFile}{File path containing restriction enzyme cut patterns}
\item{minREpatternSize}{Minimum restriction enzyme recognition pattern
length required for the enzyme pattern to be searched for, default 4}
\item{overlap.gRNA.positions}{The required overlap positions of gRNA and
restriction enzyme cut site, default 17 and 18. For Cpf1, you can set it to 19 and 23.}
\item{findPairedgRNAOnly}{Choose whether to only search for paired gRNAs in
such an orientation that the first one is on minus strand called reverse
gRNA and the second one is on plus strand called forward gRNA. TRUE or
FALSE, default FALSE}
\item{annotatePaired}{Indicate whether to output paired information, default
TRUE}
\item{paired.orientation}{PAMin orientation means the two adjacent PAMs on
the sense and antisense strands face inwards towards each other like N21GG
and CCN21 whereas PAMout orientation means they face away from each other
like CCN21 and N21GG}
\item{enable.multicore}{Indicate whether enable parallel processing, default
FALSE. For super long sequences with lots of gRNAs, suggest set it to TRUE}
\item{n.cores.max}{Indicating maximum number of cores to use in multi core
mode, i.e., parallel processing, default 6. Please set it to 1 to disable
multicore processing for small dataset.}
\item{min.gap}{Minimum distance between two oppositely oriented gRNAs to be
valid paired gRNAs. Default 0}
\item{max.gap}{Maximum distance between two oppositely oriented gRNAs to be
valid paired gRNAs. Default 20}
\item{gRNA.name.prefix}{The prefix used when assign name to found gRNAs,
default gRNA, short for guided RNA.}
\item{PAM.size}{PAM length, default 3}
\item{gRNA.size}{The size of the gRNA, default 20}
\item{PAM}{PAM sequence after the gRNA, default NGG}
\item{BSgenomeName}{BSgenome object. Please refer to available.genomes in
BSgenome package. For example,
\itemize{
\item{BSgenome.Hsapiens.UCSC.hg19} - for hg19,
\item{BSgenome.Mmusculus.UCSC.mm10} - for mm10
\item{BSgenome.Celegans.UCSC.ce6} - for ce6
\item{BSgenome.Rnorvegicus.UCSC.rn5} - for rn5
\item{BSgenome.Drerio.UCSC.danRer7} - for Zv9
\item{BSgenome.Dmelanogaster.UCSC.dm3} - for dm3
}}
\item{chromToSearch}{Specify the chromosome to search, default to all,
meaning search all chromosomes. For example, chrX indicates searching for
matching in chromosome X only}
\item{chromToExclude}{Specify the chromosome not to search. If specified as
"", meaning to search chromosomes specified by chromToSearch. By default, to
exclude haplotype blocks from offtarget search in hg19, i.e., chromToExclude
= c("chr17_ctg5_hap1","chr4_ctg9_hap1", "chr6_apd_hap1", "chr6_cox_hap2",
"chr6_dbb_hap3", "chr6_mann_hap4", "chr6_mcf_hap5","chr6_qbl_hap6",
"chr6_ssto_hap7")}
\item{max.mismatch}{Maximum mismatch allowed in off target search, default
3. Warning: will be considerably slower if set > 3}
\item{PAM.pattern}{Regular expression of protospacer-adjacent motif (PAM),
default NNG$|NGN$ for spCas9. For cpf1, ^TTTN since it is a 5 prime PAM
sequence}
\item{allowed.mismatch.PAM}{Maximum number of mismatches allowed in the PAM
sequence for offtarget search, default to 1 to allow NGN and NNG PAM pattern
for offtarget identification.}
\item{gRNA.pattern}{Regular expression or IUPAC Extended Genetic Alphabet to
represent gRNA pattern, default is no restriction. To specify that the gRNA
must start with GG for example, then set it to ^GG. Please see
help(translatePattern) for a list of IUPAC Extended Genetic Alphabet.}
\item{baseEditing}{Indicate whether to design gRNAs for base editing.
Default to FALSE If TRUE, please set baseEditing = TRUE, targetBase and
editingWidow accordingly.}
\item{targetBase}{Applicable only when baseEditing is set to TRUE. It is
used to indicate the target base for base editing systems, default to C for
converting C to T in the CBE system. Please change it to A if you intend to
use the ABE system.}
\item{editingWindow}{Applicable only when baseEditing is set to TRUE. It is
used to indicate the effective editing window, default to 4 to 8 which is
for the original CBE system. Please change it accordingly if the system you
use have a different editing window.}
\item{editingWindow.offtargets}{Applicable only when baseEditing is set to
TRUE. It is used to indicate the effective editing window to consider for
the offtargets search only, default to 4 to 8 (1 means the most distal site
from the 3' PAM, the most proximla site from the 5' PAM), which is for the
original CBE system. Please change it accordingly if the system you use
have a different editing window, or you would like to include offtargets
with the target base in a larger editing window.}
\item{primeEditing}{Indicate whether to design gRNAs for prime editing.
Default to FALSE. If true, please set PBS.length, RT.template.length,
RT.template.pattern, targeted.seq.length.change, bp.after.target.end,
target.start, and target.end accordingly}
\item{PBS.length}{Applicable only when primeEditing is set to TRUE. It is
used to specify the number of bases to ouput for primer binding site.}
\item{RT.template.length}{Applicable only when primeEditing is set to TRUE.
It is used to specify the number of bases required for RT template, default
to 8 to 18. Please increase the length if the edit is large insertion. Only
gRNAs with calculated RT.template.length falling into the specified range
will be in the output. It is calculated as the following. RT.template.length
= target.start – cut.start + (target.end - target.start) +
targeted.seq.length.change + bp.after.target.end}
\item{RT.template.pattern}{Applicable only when primeEditing is set to TRUE.
It is used to specify the RT template sequence pattern, default to not
ending with C according to https://doi.org/10.1038/s41586-019-1711-4}
\item{corrected.seq}{Applicable only when primeEditing is set to TRUE. It is
used to specify the mutated or inserted sequences after successful editing.}
\item{targeted.seq.length.change}{Applicable only when primeEditing is set
to TRUE. It is used to specify the number of targeted sequence length
change. Please set it to 0 for base changes, positive numbers for insersion,
and negative number for deletion. For example, 10 means that the corrected
sequence will have 10bp insertion, -10 means that the corrected sequence
will have 10bp deletion, and 0 means only bases have been changed and the
sequence length remains the same}
\item{bp.after.target.end}{Applicable only when primeEditing is set to TRUE.
It is used to specify the number of bases to add after the target change end
site as part of RT template. Please refer to RT.template.length for how this
parameter influences the RT.template.length calculation which is used as a
filtering criteria in pregRNA selection.}
\item{target.start}{Applicable only when primeEditing is set to TRUE. It is
used to specify the start location in the input sequence to make changes,
which will be used to obtain the RT template sequence. Please also refer to
RT.template.length for how this parameter influences the RT.template.length
calculation which is used as a filtering criteria in pregRNA selection.}
\item{target.end}{Applicable only when primeEditing is set to TRUE. It is
used to specify the end location in the input sequnence to make changes,
which will be used to obtain the RT template sequence. Please also refer to
RT.template.length for how this parameter influences the RT.template.length
calculation which is used as a filtering criteria in pregRNA selection.}
\item{primeEditingPaired.output}{Applicable only when primeEditing is set to
TRUE. It is used to specify the file path to save pegRNA and the second gRNA
with PBS, RT.template, gRNA sequences, default pairedgRNAsForPE.xls}
\item{min.score}{minimum score of an off target to included in the final
output, default 0}
\item{topN}{top N off targets to be included in the final output, default
1000}
\item{topN.OfftargetTotalScore}{top N off target used to calculate the total
off target score, default 10}
\item{annotateExon}{Choose whether or not to indicate whether the off target
is inside an exon or not, default TRUE}
\item{txdb}{TxDb object, for creating and using TxDb object, please refer to
GenomicFeatures package. For a list of existing TxDb object, please search
for annotation package starting with Txdb at
http://www.bioconductor.org/packages/release/BiocViews.html#___AnnotationData,
such as
\itemize{
\item{TxDb.Rnorvegicus.UCSC.rn5.refGene} - for rat
\item{TxDb.Mmusculus.UCSC.mm10.knownGene} - for mouse
\item{TxDb.Hsapiens.UCSC.hg19.knownGene} - for human
\item{TxDb.Dmelanogaster.UCSC.dm3.ensGene} - for Drosophila
\item{TxDb.Celegans.UCSC.ce6.ensGene} - for C.elegans
}}
\item{orgAnn}{organism annotation mapping such as org.Hs.egSYMBOL in
org.Hs.eg.db package for human}
\item{ignore.strand}{default to TRUE when annotating to gene}
\item{outputDir}{the directory where the off target analysis and reports
will be written to}
\item{fetchSequence}{Fetch flank sequence of off target or not, default TRUE}
\item{upstream}{upstream offset from the off target start, default 200}
\item{downstream}{downstream offset from the off target end, default 200}
\item{upstream.search}{upstream offset from the bed input starts to search
for gRNAs, default 0}
\item{downstream.search}{downstream offset from the bed input ends to search
for gRNAs, default 0}
\item{weights}{Applicable only when scoring.method is set to Hsu-Zhang a
numeric vector size of gRNA length, default c(0, 0, 0.014, 0, 0, 0.395,
0.317, 0, 0.389, 0.079, 0.445, 0.508, 0.613, 0.851, 0.732, 0.828, 0.615,
0.804, 0.685, 0.583) which is used in Hsu et al., 2013 cited in the
reference section}
\item{baseBeforegRNA}{Number of bases before gRNA used for calculating gRNA
efficiency, default 4 Please note, for PAM located on the 5 prime, need to
specify the number of bases before the PAM sequence plus PAM size.}
\item{baseAfterPAM}{Number of bases after PAM used for calculating gRNA
efficiency, default 3 for spCas9 Please note, for PAM located on the 5
prime, need to include the length of the gRNA plus the extended sequence on
the 3 prime}
\item{featureWeightMatrixFile}{Feature weight matrix file used for
calculating gRNA efficiency. By default DoenchNBT2014 weight matrix is used.
To use alternative weight matrix file, please input a csv file with first
column containing significant features and the second column containing the
corresponding weights for the features. Please see Doench et al., 2014 for
details.}
\item{useScore}{Default TRUE, display in gray scale with the darkness
indicating the gRNA efficacy. The taller bar shows the Cas9 cutting site.
If set to False, efficacy will not show. Instead, gRNAs in plus strand will
be colored red and gRNAs in negative strand will be colored green.}
\item{useEfficacyFromInputSeq}{Default FALSE. If set to TRUE, summary file
will contain gRNA efficacy calculated from input sequences instead of from
off-target analysis. Set it to TRUE if the input sequence is from a
different species than the one used for off-target analysis.}
\item{outputUniqueREs}{Default TRUE. If set to TRUE, summary file will
contain REs unique to the cleavage site within 100 or 200 bases surrounding
the gRNA sequence.}
\item{foldgRNAs}{Default FALSE. If set to TRUE, summary file will contain
minimum free energy of the secondary structure of gRNA with gRNA backbone
from GeneRfold package provided that GeneRfold package has been installed.}
\item{gRNA.backbone}{gRNA backbone constant region sequence. Default to the
sequence in Sp gRNA backbone.}
\item{temperature}{temperature in celsius. Default to 37 celsius.}
\item{overwrite}{overwrite the existing files in the output directory or
not, default FALSE}
\item{scoring.method}{Indicates which method to use for offtarget cleavage
rate estimation, currently two methods are supported, Hsu-Zhang and CFDscore}
\item{subPAM.activity}{Applicable only when scoring.method is set to
CFDscore A hash to represent the cleavage rate for each alternative sub PAM
sequence relative to preferred PAM sequence}
\item{subPAM.position}{Applicable only when scoring.method is set to
CFDscore The start and end positions of the sub PAM. Default to 22 and 23
for spCas9 with 20bp gRNA and NGG as preferred PAM. For Cpf1, it could be
c(1,2).}
\item{PAM.location}{PAM location relative to gRNA. For example, default to
3prime for spCas9 PAM. Please set to 5prime for cpf1 PAM since it's PAM is
located on the 5 prime end}
\item{rule.set}{Specify a rule set scoring system for calculating gRNA
efficacy. Please note that Root_RuleSet2_2016 requires the following python
packages with specified verion and python 2.7. 1. scikit-learn 0.16.1 2.
pickle 3. pandas 4. numpy 5. scipy}
\item{chrom_acc}{Optional binary variable indicating chromatin accessibility
information with 1 indicating accessible and 0 not accessible.}
\item{calculategRNAefficacyForOfftargets}{Default to TRUE to output gRNA
efficacy for offtargets as well as ontargets. Set it to FALSE if only need
gRNA efficacy calculated for ontargets only to speed up the analysis. Please
refer to https://support.bioconductor.org/p/133538/#133661 for potential use
cases of offtarget efficacies.}
\item{mismatch.activity.file}{Applicable only when scoring.method is set to
CFDscore A comma separated (csv) file containing the cleavage rates for all
possible types of single nucleotide mismatche at each position of the gRNA.
By default, using the supplemental Table 19 from Doench et al., Nature
Biotechnology 2016}
\item{predIndelFreq}{Default to FALSE. Set it to TRUE to output the
predicted indels and their frequencies.}
\item{predictIndelFreq.onTargetOnly}{Default to TRUE, indicating that indels
and their frequencies will be predicted for ontargets only. Usually,
researchers are only interested in predicting the editing outcome for the
ontargets since any editing in the offtargets are unwanted. Set it to FALSE
if you are interested in predicting indels and their frequencies for
offtargets. It will take longer time to run if you set it to FALSE.}
\item{method.indelFreq}{Currently only Lindel method has been implemented.
Please let us know if you think additional methods should be made available.
Lindel is compatible with both Python2.7 and Python3.5 or higher. Please
type help(predictRelativeFreqIndels) to get more details.}
\item{baseBeforegRNA.indelFreq}{Default to 13 for Lindel method.}
\item{baseAfterPAM.indelFreq}{Default to 24 for Lindel method.}
}
\value{
Four tab delimited files are generated in the output directory:
\item{OfftargetAnalysis.xls}{ - detailed information of off targets}
\item{Summary.xls}{ - summary of the gRNAs}
\item{REcutDetails.xls}{ - restriction enzyme cut sites of each gRNA}
\item{pairedgRNAs.xls}{ - potential paired gRNAs}
}
\description{
Design target-specific guide RNAs (gRNAs) and predict relative indel
fequencies for CRISPR-Cas9 system by automatically calling findgRNAs,
filtergRNAs, searchHits, buildFeatureVectorForScoring, getOfftargetScore,
filterOfftarget, calculating gRNA cleavage efficiency, and predict gRNA
efficacy, indels and their frequencies.
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\note{
%% ~~further notes~~
}
\examples{
library(CRISPRseek)
library("BSgenome.Hsapiens.UCSC.hg19")
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(org.Hs.eg.db)
outputDir <- getwd()
inputFilePath <- system.file("extdata", "inputseq.fa",
package = "CRISPRseek")
REpatternFile <- system.file("extdata", "NEBenzymes.fa",
package = "CRISPRseek")
results <- offTargetAnalysis(inputFilePath, findgRNAsWithREcutOnly = TRUE,
REpatternFile = REpatternFile, findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 1,
outputDir = outputDir, overwrite = TRUE)
#### predict indels and their frequecies for target sites
if (interactive())
{
results <- offTargetAnalysis(inputFilePath,findgRNAsWithREcutOnly = TRUE,
findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 1,
outputDir = outputDir, overwrite = TRUE,
predIndelFreq=TRUE, predictIndelFreq.onTargetOnly= TRUE)
names(results$indelFreq)
head(results$indelFreq[[1]])
### save the indel frequences to tab delimited files, one file for each target/offtarget site.
mapply(write.table, results$indelFreq, file=paste0(names(results$indelFreq), '.xls'), sep = "\t", row.names = FALSE)
#### predict gRNA efficacy using CRISPRscan
featureWeightMatrixFile <- system.file("extdata", "Morenos-Mateo.csv",
package = "CRISPRseek")
results <- offTargetAnalysis(inputFilePath, findgRNAsWithREcutOnly = TRUE,
REpatternFile = REpatternFile, findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 1,
rule.set = "CRISPRscan",
baseBeforegRNA = 6, baseAfterPAM = 6,
featureWeightMatrixFile = featureWeightMatrixFile,
outputDir = outputDir, overwrite = TRUE)
######## PAM is on the 5 prime side, e.g., Cpf1
results <- offTargetAnalysis(inputFilePath = system.file("extdata",
"cpf1-2.fa", package = "CRISPRseek"), findgRNAsWithREcutOnly = FALSE,
findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens,
chromToSearch = "chr8",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 4,
baseBeforegRNA = 8, baseAfterPAM = 26,
rule.set = "DeepCpf1",
overlap.gRNA.positions = c(19, 23),
useEfficacyFromInputSeq = FALSE,
outputDir = getwd(),
overwrite = TRUE, PAM.location = "5prime",PAM.size = 4,
PAM = "TTTN", PAM.pattern = "^TNNN", allowed.mismatch.PAM = 2,
subPAM.position = c(1,2))
results1 <- offTargetAnalysis(inputFilePath, findgRNAsWithREcutOnly = FALSE,
REpatternFile = REpatternFile, findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 4,
outputDir = outputDir, overwrite = TRUE, PAM.location = "5prime",
PAM = "TGT", PAM.pattern = "^T[A|G]N", allowed.mismatch.PAM = 2,
subPAM.position = c(1,2), baseEditing = TRUE, editingWindow =20, targetBase = "G")
results.testBE <- offTargetAnalysis(inputFilePath, findgRNAsWithREcutOnly = FALSE,
REpatternFile = REpatternFile, findPairedgRNAOnly = FALSE,
annotatePaired = FALSE,
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 4,
outputDir = outputDir, overwrite = TRUE, PAM.location = "5prime",
PAM = "TGT", PAM.pattern = "^T[A|G]N", allowed.mismatch.PAM = 2,
subPAM.position = c(1,2), baseEditing = TRUE,
editingWindow = 10:20, targetBase = "A")
inputFilePath <- DNAStringSet(paste(
"CCAGTTTGTGGATCCTGCTCTGGTGTCCTCCACACCAGAATCAGGGATCGAAAA",
"CTCATCAGTCGATGCGAGTCATCTAAATTCCGATCAATTTCACACTTTAAACG", sep =""))
names(inputFilePath) <- "testPE"
results3 <- offTargetAnalysis(inputFilePath,
gRNAoutputName = "testPEgRNAs",
BSgenomeName = Hsapiens, chromToSearch = "chrX",
txdb = TxDb.Hsapiens.UCSC.hg19.knownGene,
orgAnn = org.Hs.egSYMBOL, max.mismatch = 1,
outputDir = outputDir, overwrite = TRUE,
PAM.size = 3L,
gRNA.size = 20L,
overlap.gRNA.positions = c(17L,18L),
PBS.length = 15,
corrected.seq = "T",
RT.template.pattern = "D$",
RT.template.length = 8:30,
targeted.seq.length.change = 0,
bp.after.target.end = 15,
target.start = 20,
target.end = 20,
paired.orientation = "PAMin", min.gap = 20, max.gap = 90,
primeEditing = TRUE, findPairedgRNAOnly = TRUE)
}
}
\references{
Patrick D Hsu, David A Scott, Joshua A Weinstein, F Ann Ran,
Silvana Konermann, Vineeta Agarwala, Yinqing Li, Eli J Fine, Xuebing Wu,
Ophir Shalem, Thomas J Cradick, Luciano A Marraffini, Gang Bao & Feng Zhang
(2013) DNA targeting specificity of rNA-guided Cas9 nucleases. Nature
Biotechnology 31:827-834
Doench JG, Hartenian E, Graham DB, Tothova Z, Hegde M, Smith I, Sullender M,
Ebert BL, Xavier RJ, Root DE. Rational design of highly active sgRNAs for
CRISPR-Cas9-mediated gene inactivation. Nat Biotechnol. 2014 Sep 3. doi:
10.1038 nbt.3026
Lihua Julie Zhu, Benjamin R. Holmes, Neil Aronin and Michael Brodsky.
CRISPRseek: a Bioconductor package to identify target-specific guide RNAs
for CRISPR-Cas9 genome-editing systems. Plos One Sept 23rd 2014
Moreno-Mateos, M., Vejnar, C., Beaudoin, J. et al. CRISPRscan: designing
highly efficient sgRNAs for CRISPR-Cas9 targeting in vivo. Nat Methods 12,
982–988 (2015) doi:10.1038/nmeth.3543
Doench JG et al., Optimized sgRNA design to maximize activity and minimize
off-target effects of CRISPR-Cas9. Nature Biotechnology Jan 18th 2016
Anzalone et al., Search-and-replace genome editing without double-strand
breaks or donor DNA. Nature October 2019
https://www.nature.com/articles/s41586-019-1711-4
Wei Chen, Aaron McKenna, Jacob Schreiber et al., Massively parallel
profiling and predictive modeling of the outcomes of CRISPR/Cas9-mediated
double-strand break repair, Nucleic Acids Research, Volume 47, Issue 15, 05
September 2019, Pages 7989–8003, https://doi.org/10.1093/nar/gkz487
Kim et al., Deep learning improves prediction of CRISPR–Cpf1
guide RNA activityNat Biotechnol 36, 239–241 (2018).
https://doi.org/10.1038/nbt.4061
}
\seealso{
CRISPRseek
}
\author{
Lihua Julie Zhu
}
\keyword{misc}
|
91ce05ca80c2c2b0b02b50f938e21daa1ddd83ef
|
9e8936a8cc7beae524251c8660fa755609de9ce5
|
/man/details_rand_forest_partykit.Rd
|
25184df2ea24471a757a26a988087087610534fc
|
[
"MIT"
] |
permissive
|
tidymodels/parsnip
|
bfca10e2b58485e5b21db64517dadd4d3c924648
|
907d2164a093f10cbbc1921e4b73264ca4053f6b
|
refs/heads/main
| 2023-09-05T18:33:59.301116
| 2023-08-17T23:45:42
| 2023-08-17T23:45:42
| 113,789,613
| 451
| 93
|
NOASSERTION
| 2023-08-17T23:43:21
| 2017-12-10T22:48:42
|
R
|
UTF-8
|
R
| false
| true
| 3,866
|
rd
|
details_rand_forest_partykit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rand_forest_partykit.R
\name{details_rand_forest_partykit}
\alias{details_rand_forest_partykit}
\title{Random forests via partykit}
\description{
\code{\link[partykit:cforest]{partykit::cforest()}} fits a model that creates a large number of decision
trees, each independent of the others. The final prediction uses all
predictions from the individual trees and combines them.
}
\details{
For this engine, there are multiple modes: censored regression,
regression, and classification
\subsection{Tuning Parameters}{
This model has 3 tuning parameters:
\itemize{
\item \code{trees}: # Trees (type: integer, default: 500L)
\item \code{min_n}: Minimal Node Size (type: integer, default: 20L)
\item \code{mtry}: # Randomly Selected Predictors (type: integer, default: 5L)
}
}
\subsection{Translation from parsnip to the original package (regression)}{
The \strong{bonsai} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(bonsai)
rand_forest() \%>\%
set_engine("partykit") \%>\%
set_mode("regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Random Forest Model Specification (regression)
##
## Computational engine: partykit
##
## Model fit template:
## parsnip::cforest_train(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg())
}\if{html}{\out{</div>}}
}
\subsection{Translation from parsnip to the original package (classification)}{
The \strong{bonsai} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(bonsai)
rand_forest() \%>\%
set_engine("partykit") \%>\%
set_mode("classification") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Random Forest Model Specification (classification)
##
## Computational engine: partykit
##
## Model fit template:
## parsnip::cforest_train(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg())
}\if{html}{\out{</div>}}
\code{parsnip::cforest_train()} is a wrapper around
\code{\link[partykit:cforest]{partykit::cforest()}} (and other functions) that
makes it easier to run this model.
}
}
\section{Translation from parsnip to the original package (censored regression)}{
The \strong{censored} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(censored)
rand_forest() \%>\%
set_engine("partykit") \%>\%
set_mode("censored regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Random Forest Model Specification (censored regression)
##
## Computational engine: partykit
##
## Model fit template:
## parsnip::cforest_train(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg())
}\if{html}{\out{</div>}}
\code{censored::cond_inference_surv_cforest()} is a wrapper around
\code{\link[partykit:cforest]{partykit::cforest()}} (and other functions) that
makes it easier to run this model.
\subsection{Preprocessing requirements}{
This engine does not require any special encoding of the predictors.
Categorical predictors can be partitioned into groups of factor levels
(e.g. \verb{\{a, c\}} vs \verb{\{b, d\}}) when splitting at a node. Dummy variables
are not required for this model.
}
\subsection{Other details}{
Predictions of type \code{"time"} are predictions of the median survival
time.
}
\subsection{References}{
\itemize{
\item \href{https://jmlr.org/papers/v16/hothorn15a.html}{partykit: A Modular Toolkit for Recursive Partytioning in R}
\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
\keyword{internal}
|
89127199baf7aec8f023ae8ab8bb728ce3c7ed81
|
3244df900eb5aafe74a49c02f5f332824f220554
|
/carthefts.R
|
e063b94b53b9b027e4707f7d6ff454fcd1577886
|
[] |
no_license
|
Studentenfutter/cars-inequality
|
d1d94aa0922006cf7c29276600c94ae19dfd3b3e
|
2a5c62a507c5863a7ab21c5c7218587c35b27ac7
|
refs/heads/master
| 2022-12-07T12:51:21.547515
| 2020-08-20T12:23:07
| 2020-08-20T12:23:07
| 200,675,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,251
|
r
|
carthefts.R
|
# Scrape car thefts per Landkreis
theft_url <- "https://www.bka.de/SharedDocs/Downloads/DE/Publikationen/PolizeilicheKriminalstatistik/2018/BKATabellen/FaelleLaenderKreiseStaedte/BKA-LKS-F-04-T01-Kreise-Fallentwicklung_csv.csv"
curl::curl_download(theft_url, "data/other_data/theft.csv")
# Files have been manually cleaned
# Import ger-Spatial data-frame from leaflet.R
# load("leaflet.R") - to load ger vector
theft <- read_csv("data/other_data/theft.csv")
counties <- as.data.frame(ger)
# Calculate Missings
theft$`Gemeinde-schlüssel` %in% counties$CC_2
setdiff(theft$`Gemeinde-schlüssel`, counties$CC_2)
# Fix error in Göttingen, from 03152 to 03159 in ger
counties$CC_2[counties$CC_2 == "03152"] <- "03159"
# Change Name of CC2
colnames(counties)[which(names(counties) == "Gemeindeschlüssel")] <- "CC_2"
colnames(theft)[which(names(theft) == "Gemeinde-schlüssel")] <- "CC_2"
# Join row of cases with the countries dataset both datasets
crimes_counties <- left_join(counties, theft, by = "CC_2") #Join matching rows from theft to counties
# Save Output
save(crimes_counties, file = "data/other_data/crimes_counties.rda")
# crimes_counties <- full_join(theft, counties, by = "CC_2")
# crimes_counties <- arrange(crimes_counties, CC_2)
|
a814be6f80cfad643ae77bd923e0bbd6b224a689
|
1f0c447b18085ba2452b7288c81852de360d37ba
|
/08-2020_nba_playoffs_excitement_index/01-main-v2.R
|
010402b7882c85102d35ad4f6ff6bb4fe0ddf30e
|
[] |
no_license
|
tRackRR/sports_viz
|
dcc968768c44bcf3f2e07d3f609847b5995bf62e
|
692031f49bb628a821aaff6ed7de57eef8d7810e
|
refs/heads/master
| 2023-05-29T01:15:26.297604
| 2021-06-18T11:53:23
| 2021-06-18T11:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,674
|
r
|
01-main-v2.R
|
library(tidyverse)
dir_proj <- fs::path('08-2020_nba_playoffs_excitement_index')
dir_data <- fs::path(dir_proj, 'data')
fs::dir_create(dir_data)
path_export <- fs::path(dir_data, 'nba_playoffs_excitement_index_2.rds')
path_gif <- fs::path(dir_proj, '2020_nba_playoffs_excitement_index_20201010.gif')
n_sec <- 20
fps <- 20
n_sec_end <- 3
height <- 600
width <- 900
n_frame <- (n_sec + n_sec_end) * fps # 150
if(!fs::file_exists(path_export)) {
host <- 'http://stats.inpredictable.com/'
sess <- host %>% polite::bow()
sess
# https://adv-r.hadley.nz/function-operators.html
delay_by <- function(f, amount = 5) {
force(f)
force(amount)
function(...) {
Sys.sleep(amount)
f(...)
}
}
retrieve_excitement_index <- function(date, id_game, verbose = TRUE) {
assertthat::is.count(date)
assertthat::is.number(id_game)
year <- date %>% lubridate::year()
month <- date %>% lubridate::month()
if(verbose) {
x <- glue::glue('Retrieving excitement index for `date = "{date}"` and `id_game = "{id_game}"`')
cli::cat_line(x)
}
headers <- c(
`Connection` = 'close',
`Accept` = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
`User-Agent` = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
`Referer` = 'http://stats.inpredictable.com/nba/wpBox.php', # ?season=2019&month=09&date=2020-09-05&gid=0041900232',
`Accept-Encoding` = 'gzip, deflate',
`Accept-Language` = 'en-US,en;q=0.9'
)
# url <- 'http://stats.inpredictable.com/nba/wpBox.php?season=2019&month=09&date=2020-09-04&gid=0041900203'
url <- 'http://stats.inpredictable.com/nba/wpBox.php'
q <- list(season = as.character(year - 1), month = sprintf('%02d', month), date = strftime(date, '%Y-%m-%d'), gid = paste0('00', id_game))
req <- url %>% httr::GET(httr::add_headers(headers), query = q)
httr::stop_for_status(req)
cont <- req %>% httr::content(encoding = 'UTF-8')
node <- cont %>% rvest::html_nodes(xpath = '//*[@id="score"]/table/tbody/tr[1]/td[5]')
assertthat::assert_that(length(node) == 1L)
val <- node %>% rvest::html_text() %>% str_remove_all('\\s.*$') %>% as.double()
val
}
# seasons <- 1996:2016
seasons <- 2020
f_q <- quietly(nbastatR::game_logs)
logs <-
seasons %>%
map_dfr(
~f_q(.x, result_types = 'team', season_types = 'Playoffs', assign_to_environment = FALSE) %>% pluck('result')
) %>%
janitor::clean_names()
logs
logs_slim <-
logs %>%
filter(slug_team == slug_team_winner) %>%
select(year = year_season, date = date_game, id_game, tm_w = slug_team_winner, tm_l = slug_team_loser, slug_matchup) %>%
filter(date >= lubridate::ymd('20200907')) %>%
group_by(year) %>%
mutate(idx_season = row_number(date)) %>%
ungroup()
logs_slim
f_s <- safely(retrieve_excitement_index, otherwise = NA_real_)
f_ss <- delay_by(f_s)
vals <-
logs_slim %>%
mutate(val = map2_dbl(date, id_game, ~f_ss(..1, ..2) %>% purrr::pluck('result')))
vals
fs::dir_create(dirname(path_export))
write_rds(vals, path_export)
} else {
vals <- read_rds(path_export)
}
vals
vals_1 <- path_export %>% str_remove('_2') %>% read_rds()
vals_2 <- path_export %>% read_rds()
# vals_1 %>% tail()
# vals_2 %>% head()
vals <-
bind_rows(vals_1, vals_2) %>%
group_by(year) %>%
mutate(
idx_season = row_number(date)
) %>%
ungroup()
season_idx_combos <-
crossing(
vals %>% distinct(year),
vals %>% distinct(idx_season)
)
idx_season_max <-
vals %>%
filter(year == 2020) %>%
filter(idx_season == max(idx_season)) %>%
pull(idx_season)
idx_season_max
vals_proc <-
vals %>%
full_join(season_idx_combos) %>%
# filter(idx_season <= idx_season_max) %>%
replace_na(list(val = 0)) %>%
arrange(year, idx_season) %>%
group_by(year) %>%
mutate(val_cumu = cumsum(val)) %>%
ungroup() %>%
mutate(grp = sprintf('%04d-%02d', year - 1, year %% 100)) %>%
group_by(idx_season) %>%
mutate(
rnk = row_number(desc(val_cumu))
) %>%
ungroup()
vals_proc
res_best <-
seq.int(1L, 27L, by = 3) %>%
tibble(n_top = .) %>%
mutate(
rnk =
map_int(
n_top,
~vals_proc %>%
group_by(year) %>%
# slice_max(n = 5, order_by = desc(val)) %>%
# arrange(desc(val), .by_group = TRUE) %>%
filter(row_number(desc(val)) <= .x) %>%
ungroup() %>%
group_by(year) %>%
summarize(across(val, sum)) %>%
ungroup() %>%
mutate(rnk = row_number(desc(val))) %>%
filter(year == 2020) %>%
pull(rnk)
)
)
res_best
vals_proc_filt <-
vals_proc %>%
# mutate(keep = idx_season %% 5L == 0 & rnk <= 15L) %>%
filter(year != 1996) %>%
mutate(keep = idx_season %% 4L == 0 | idx_season == max(idx_season)) %>%
filter(keep)
vals_proc_filt
vals_proc_filt %>% filter(idx_season == 89L) %>% arrange(desc(val_cumu))
vals_proc_filt %>% filter(idx_season == 80) # idx_season_max)
vals_proc %>% filter(idx_season > 82L) %>% count(year)
# vals_proc %>% filter(year == 2020L) %>% arrange(-rnk)
do_theme_set()
viz <-
vals_proc_filt %>%
ggplot() +
aes(y = -rnk, group = grp) +
# geom_col(fill = 'grey20') +
geom_tile(
data = vals_proc_filt %>% filter(year != 2020),
aes(x = val_cumu / 2, width = val_cumu, height = 0.9, fill = year), color = NA # , fill = 'grey20'
) +
scale_fill_gradient(low = 'grey70', high = 'grey20') + # palette = 'Greys') +
guides(fill = FALSE) +
geom_tile(
data = vals_proc_filt %>% filter(year == 2020),
aes(x = val_cumu / 2, width = val_cumu, height = 0.9), color = NA, fill = 'blue'
) +
geom_text(
aes(x = val_cumu - 1, label = grp),
hjust = 1.1,
family = 'Karla',
size = 5,
fontface = 'bold',
color = 'white'
) +
# gganimate::transition_states(idx_season, transition_length = 4, state_length = 1) +
theme(
# axis.text.y = element_markdown(),
axis.text.y = element_blank(),
# plot.subtitle = element_markdown(),
# panel.grid.major.x = element_blank(),
plot.caption = element_text(size = 12),
# plot.tag = ggtext::element_markdown('Karla', size = 12, color = 'gray20', hjust = 0),
panel.grid.major.y = element_blank(),
plot.title = ggtext::element_markdown(size = 16),
plot.margin = margin(10, 10, 10, 10),
plot.tag.position = c(.01, 0.01),
) +
# coord_cartesian(clip = 'off', expand = FALSE) +
labs(
title = 'Excitement of <b><span style="color:blue">this year\'s<span></b> NBA playoffs compared to playoffs since 1997-98',
x = 'Total excitement index',
# subtitle = 'NBA, 2019-20 Restart',
subtitle = 'After {closest_state} games',
# subtitle = 'After {idx_season} games',
tag = 'Viz: @TonyElHabr | Data: https://www.inpredictable.com/',
caption = 'Excitement index: total in-game win probability change',
x = NULL,
y = NULL
)
viz
# ggsave(plot = viz, filename = fs::path(dir_proj, '2020_nba_playoffs_excitement_index_20201007.png'), width = 10.5, height = 10.5, type = 'cairo')
viz_anim <-
viz +
gganimate::transition_states(idx_season, transition_length = 4, state_length = 0.1, wrap = FALSE) +
gganimate::view_follow(fixed_x = FALSE, fixed_y = FALSE)
# viz_anim
gganimate::animate(
viz_anim,
nframe = n_frame,
fps = fps,
height = height,
width = width,
renderer = gganimate::gifski_renderer(path_gif),
end_pause = n_sec_end * fps
)
|
1506cde7c1ab01580aa79106ddfe6d49b2e5b507
|
af65f9ce96bd5f04015afeb34cdc14bc2dcecdca
|
/R/make_analysis_data.R
|
a3b8830f75f057bcb6e72c3facf01c9b3d484394
|
[] |
no_license
|
bcjaeger/LDL-imputation
|
2af09ae49ffaf3ad999886910e4ae3e335509996
|
75578de048b1f0cdda2b75d5de513d97c91cbbfb
|
refs/heads/master
| 2022-12-14T15:06:15.312440
| 2020-09-07T16:20:51
| 2020-09-07T16:20:51
| 293,577,304
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
make_analysis_data.R
|
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##'
##' @title
##' @param nhanes
##' @param miss_perc
make_analysis_data <- function(nhanes) {
data_analysis <- nhanes[c("pop_one", "pop_two")] %>%
map(
~ mutate(.x, ldl_to_impute = chol_ldl_mgdl_sampson) %>%
rename(ldl_observed = chol_ldl_mgdl_sampson) %>%
select(
psu,
strata,
starts_with('wts'),
starts_with('ldl'),
starts_with('meds'),
starts_with('bp'),
triglycerides_mgdl,
chol_hdl_mgdl,
chol_total_mgdl,
age,
sex,
race_ethnicity,
hba1c_perc,
egfr_ckdepi,
diabetes,
smk_current,
ever_had_ascvd,
ascvd_risk_pcr
)
)
# harmonize names for weights
data_analysis$pop_one %<>%
rename(wts = wts_af_2yr) %>%
select(-starts_with('wts_'))
data_analysis$pop_two %<>%
rename(wts = wts_mec_2yr) %>%
select(-starts_with('wts_'))
data_analysis
}
|
ce4c684b5a3f430e7672c9e7282b9ac0cb68b545
|
2e96f176654ecefecdbc5be48f800d68c16878d4
|
/sfg-aqua-scripts/aqua_functions.R
|
a4ee5c623fbab3b3c1f675f96822de2bcd662749
|
[] |
no_license
|
tclavelle/sfg-aqua
|
c74491a515e4c3a554d8d3c734f861cbfae9c0ad
|
ae8edef5f80731f27d72ddaa6c74834e8085fae0
|
refs/heads/master
| 2020-04-18T06:41:33.577533
| 2019-03-01T22:27:50
| 2019-03-01T22:27:50
| 65,924,148
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
aqua_functions.R
|
##############################################################
##
## Functions script for aquaculture-fisheries model
##
##############################################################
# r = 1.5
# p = 850
# c = 20
# q = 0.0001
# Kmax = 1000
# phi = 0.5
# A = 300
# delta = 0.05
biomassGrowth <- function(r, B, K) {
b_Growth <- r * B * (1 - B / K)
return(b_Growth)
}
carryingCapacity <- function(Kmax, phi, A) {
K <- Kmax - phi * A
return(K)
}
fisheryHarvest <- function(q, B, E) {
h_f <- q * B * E
return(h_f)
}
# Equation 3
stockGrowth <- function(r, B, k, phi, A, harvest) {
B_out <- r * B * (1 - B / k) - harvest
return(B_out)
}
# Equation 4
optimalStock <- function(Kmax, c, p, delta, r, phi, A, k) {
B_star <- k / 4 * ((c/(p*q*k) + 1 - delta/r) + ((c/(p*q*k) + 1 - delta/r)^2 + (8*c*delta) / (p*q*r*k))^0.5)
return(B_star)
}
quotaPrice <- function(K, p, c, q, B) {
q_value <- p - (c/(q*B*K))
return(q_value)
}
demandFunc <- function(choke_p, h_f, h_a, slope) {
p_out <- choke_p - slope * (h_f + h_a)
}
|
040d3d54c1a043804c723d07b8e16b7361e74e10
|
f210b3be7b705c76280ba3c5e543a73f0bd47b5c
|
/scripts/potential_gdp_calculations.R
|
07ecbb26e61d55381caa0e3758d2546bf46567ba
|
[] |
no_license
|
ricardomayerb/new_normal
|
0356fab0412b7bda8527b196689035727ddf3379
|
011128b071c80ed6e3c5ed242fe45cf9e8ff485a
|
refs/heads/master
| 2021-01-19T20:46:29.203141
| 2017-06-13T22:00:38
| 2017-06-13T22:00:38
| 88,550,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,510
|
r
|
potential_gdp_calculations.R
|
library(dplyr) # use dplyr::first and dplyr::last
library(ggplot2)
library(xts) # use xts::first and xts::last
library(tidyr)
library(lubridate)
library(tibble)
library(tidyquant)
source("./functions/funcs_for_new_normal.R")
load("./produced_data/WEOApr2017_cepal_and_others")
subject_dict_co <- WEOApr2017cepal18_others_long %>%
filter(iso == "CHL" & year == 2000) %>%
select(-c(value, iso, country, country_series_specific_notes,
weo_country_code, estimates_start_after, scale, year))
subject_dict_wo <- WEOApr2017cepal18_others_long %>%
filter(country == "World" & year == 2000) %>%
select(-c(value, iso, country_series_specific_notes,
weo_country_code, estimates_start_after, scale, year))
weo_few <- WEOApr2017cepal18_others_long %>%
select(iso, country, year, weo_subject_code, value) %>%
filter(weo_subject_code %in% c("NGDP_R", "NGDP_RPCH", "NGAP_NPGDP"))
real_gdp_long <- weo_few %>%
filter(weo_subject_code %in% c("NGDP_R")) %>%
mutate(date = ymd(paste0(year, "-12-31")))
# foo <- add_ts_filters(real_gdp_long , date_colname = "date", value_colname = "value", country_colname = "iso")
real_gdp_hp <- add_ts_filters(real_gdp_long) %>% arrange(country, date) %>%
group_by(country) %>%
mutate(trend_growth_pct = 100*(hp_trend / dplyr::lag(hp_trend)-1) )
trend_growth_2003_2008 <- real_gdp_hp %>%
filter(year>=2003 & year <= 2008) %>%
summarise(avg_tg_2003_2008 = mean(trend_growth_pct))
trend_growth_2010_2016 <- real_gdp_hp %>%
filter(year>=2010 & year <= 2016) %>%
summarise(avg_tg_2010_2016 = mean(trend_growth_pct))
trend_growth_2003_2008_2010_2016 <- left_join(trend_growth_2003_2008,
trend_growth_2010_2016,
by = "country") %>%
mutate(dif = avg_tg_2010_2016 - avg_tg_2003_2008 )
real_gdp_country_wide <- real_gdp_long %>%
spread(key = country, value=value)
real_gdp_growth_long <- weo_few %>%
filter(weo_subject_code %in% c("NGDP_RPCH"))
weo_long_EU_AE_G7 <- subset(weo_few ,
country %in% c("Major advanced economies (G7)",
"Euro area " , "Advanced economies")) %>%
select(-iso) %>% arrange(country, year)
weo_cwide_EU_AE_G7 <- weo_long_EU_AE_G7 %>% spread(weo_subject_code, value) %>%
group_by(country) %>%
mutate(gross_gap = 1 + NGAP_NPGDP/100,
gross_rate_gdp = 1 + NGDP_RPCH/100,
gross_rate_potetial_gdp = gross_rate_gdp*dplyr::lag(gross_gap)/gross_gap,
growth_potential_pct = 100*(gross_rate_potetial_gdp-1))
EU_AE_G7_tg_2003_2008 <- weo_cwide_EU_AE_G7 %>%
filter(year>=2003 & year <= 2008) %>%
summarise(avg_tg_2003_2008 = mean(growth_potential_pct))
EU_AE_G7_tg_2010_2016 <- weo_cwide_EU_AE_G7 %>%
filter(year>=2010 & year <= 2016) %>%
summarise(avg_tg_2010_2016 = mean(growth_potential_pct))
trend_growth_AE_G7_EU_2003_2008_2010_2016 <- left_join(EU_AE_G7_tg_2003_2008,
EU_AE_G7_tg_2010_2016,
by = "country") %>%
mutate(dif = avg_tg_2010_2016-avg_tg_2003_2008)
real_gdp_gap_weo_long <- weo_few %>%
filter(weo_subject_code %in% c("NGAP_NPGDP"))
real_gdp_wide <- real_gdp_long %>%
spread(key=year, value=value)
real_gdp_growth_wide <- real_gdp_growth_long %>%
spread(key=year, value=value)
real_gdp_gap_weo_wide <- real_gdp_gap_weo_long %>%
spread(key=year, value=value)
|
3976fa700a5e1786b360979f92b55d3739e4a7ee
|
633aaea8f18b9baea73a50addb1e20cfe4623f1e
|
/journal_dashboard/altmetrics.R
|
5f9322334da837c9cd0dc10b708c4e4667578eb8
|
[] |
no_license
|
BohdanTkachuk/journal_dashboard
|
d2158b8a917c0b2fbed86e48e8907ceff84653c1
|
6f73ce75bd0436888a41d155235273fb0e99a568
|
refs/heads/master
| 2022-11-13T14:29:38.801012
| 2020-06-28T16:28:43
| 2020-06-28T16:28:43
| 271,330,530
| 0
| 0
| null | 2020-06-26T20:16:44
| 2020-06-10T16:34:57
| null |
UTF-8
|
R
| false
| false
| 3,311
|
r
|
altmetrics.R
|
library(shinyWidgets)
library(shinydashboard)
library(data.table)
library(plotly)
library(dplyr)
source("load_data.R")
# https://stackoverflow.com/questions/34093169/horizontal-vertical-line-in-plotly
vline <- function(x = 0, color = "red") {
list(
type = "line",
y0 = 0,
y1 = 1,
yref = "paper",
x0 = x,
x1 = x,
line = list(color = color)
)}
altmetrics_aggregate_barchart <- function(input){
max <- aggregate(altmetric_score ~ journal_name, alt, max)
min <- aggregate(altmetric_score ~ journal_name, alt, min)
mean <- aggregate(altmetric_score ~ journal_name, alt, mean)
median <- aggregate(altmetric_score ~ journal_name, alt, median)
data <- switch(input,
"Maximum" = max,
"Minimum" = min,
"Mean" = mean,
"Median" = median
)
data[data == ''] <- NA # Set empty journal name to NA
data <- na.omit(data) # Remove NA
# Get average for current selection
avg <- mean(data[['altmetric_score']])
sorted_data <- data[order(data$altmetric_score), ]
fig <- plot_ly(data, x=~altmetric_score, y=~journal_name, orientation='h', type='bar', name="test")
fig <- fig %>% layout(
xaxis = list(title="Altmetric Score"),
yaxis = list(title="Journals", tickfont=list(size=10), margin=list(pad=50),
categoryorder = "array",
categoryarray = sorted_data$journal_name),
shapes = list(vline(avg)) # add a line to indicate average across journals
)
return(fig)
}
# Some notes on pie chartss
# https://observablehq.com/@didoesdigital/16-may-2020-donut-charts-and-pie-charts?collection=@didoesdigital/journal-getting-started-with-data-viz-collection
# https://www.data-to-viz.com/caveat/pie.html
altmetrics_pie <- function(sources, journal){
summary <- setDT(alt)[, c(lapply(.SD[, c(10:27), with=FALSE], sum)), by=journal_name]
sub <- data.frame(subset(summary, journal_name == journal))
flipped <- as.data.frame(t(sub))
flipped <- setDT(flipped, keep.rownames = TRUE)[]
names(flipped)[1] <- 'key'
names(flipped)[2] <- 'value'
# remove first row which has a string >> "journal_name"
flipped <- flipped[-1,]
# make sure there are no strings
flipped$values <- as.numeric(as.character(flipped$value))
# limit to just the options selected for sources
flipped <- flipped[flipped$key %in% sources, ]
fig <- plot_ly(flipped, labels=~key, values = ~values, type='pie') %>%
layout(title = journal,
xaxis = list(showgrid=FALSE, zeroline=FALSE, showticklabels=FALSE),
yaxis = list(showgrid=FALSE, zeroline=FALSE, showticklabels=FALSE))
return(fig)
}
altmetrics_social_bar_comp <- function(journals, types){
alt_simp <- alt_simp[alt_simp$journal_name %in% journals, ] # limit to selected journals
keep <- c('journal_name', types)
data <- subset(alt_simp, select = keep)
data <- setNames(data.frame(t(data)), data[,1])
setDT(data, keep.rownames = "Sources")[]
data = as.data.frame(data[-1,])
fig <- plot_ly(data, type='bar')
for(i in 2:ncol(data)){
fig <- add_trace(fig, x = ~Sources, y = data[,i], name = colnames(data)[i])
}
fig <- fig %>% layout(yaxis = list(title = 'Count'), barmode = 'group')
return(fig)
}
|
f02187b77bbe7b61a35b3d2b86068806ecfbeb37
|
b3a5c21adf890f0b66790f23332f0082e7f1b40a
|
/man/cli_li.Rd
|
cbb1761640018dde02f95bcfd9b1fe7ae5b7cfeb
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
r-lib/cli
|
96886f849fe69f8435f2d22fccf5d00dee7a5ce4
|
c36066ca6a208edbeb37ab13467a4dc6f5b5bbe2
|
refs/heads/main
| 2023-08-29T14:19:41.629395
| 2023-08-18T13:18:33
| 2023-08-18T13:18:33
| 89,723,016
| 560
| 69
|
NOASSERTION
| 2023-09-13T11:46:10
| 2017-04-28T16:10:28
|
R
|
UTF-8
|
R
| false
| true
| 2,828
|
rd
|
cli_li.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{cli_li}
\alias{cli_li}
\title{CLI list item(s)}
\usage{
cli_li(
items = NULL,
labels = names(items),
id = NULL,
class = NULL,
.auto_close = TRUE,
.envir = parent.frame()
)
}
\arguments{
\item{items}{Character vector of items, or \code{NULL}.}
\item{labels}{For definition lists the item labels.}
\item{id}{Id of the new container. Can be used for closing it with
\code{\link[=cli_end]{cli_end()}} or in themes. If \code{NULL}, then an id is generated and
returned invisibly.}
\item{class}{Class of the item container. Can be used in themes.}
\item{.auto_close}{Whether to close the container, when the calling
function finishes (or \code{.envir} is removed, if specified).}
\item{.envir}{Environment to evaluate the glue expressions in. It is
also used to auto-close the container if \code{.auto_close} is \code{TRUE}.}
}
\value{
The id of the new container element, invisibly.
}
\description{
A list item is a container, see \link{containers}.
}
\details{
\subsection{Nested lists}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{fun <- function() \{
ul <- cli_ul()
cli_li("one:")
cli_ol(letters[1:3])
cli_li("two:")
cli_li("three")
cli_end(ul)
\}
fun()
}\if{html}{\out{</div>}}\if{html}{\out{
<div class="asciicast" style="color: #172431;font-family: 'Fira Code',Monaco,Consolas,Menlo,'Bitstream Vera Sans Mono','Powerline Symbols',monospace;line-height: 1.300000"><pre>
#> • one:
#> 1. a
#> 2. b
#> 3. c
#> • two:
#> • three
</pre></div>
}}
}
}
\seealso{
This function supports \link[=inline-markup]{inline markup}.
Other functions supporting inline markup:
\code{\link{cli_abort}()},
\code{\link{cli_alert}()},
\code{\link{cli_blockquote}()},
\code{\link{cli_bullets_raw}()},
\code{\link{cli_bullets}()},
\code{\link{cli_dl}()},
\code{\link{cli_h1}()},
\code{\link{cli_ol}()},
\code{\link{cli_process_start}()},
\code{\link{cli_progress_along}()},
\code{\link{cli_progress_bar}()},
\code{\link{cli_progress_message}()},
\code{\link{cli_progress_output}()},
\code{\link{cli_progress_step}()},
\code{\link{cli_rule}},
\code{\link{cli_status_update}()},
\code{\link{cli_status}()},
\code{\link{cli_text}()},
\code{\link{cli_ul}()},
\code{\link{format_error}()},
\code{\link{format_inline}()}
}
\concept{functions supporting inline markup}
|
9b6465657d81cff0a5bd52c8a12c9480fbf0263e
|
3053a557531d328b430b69fb7851dcb2dde22c93
|
/dataone/man/MNode-class.Rd
|
129be90ba417584f8a657b29bda701b294041810
|
[
"Apache-2.0"
] |
permissive
|
KillEdision/rdataone
|
e3bfe188ed1eba1f01d6e256f3a98a64104125ef
|
3ec0efb67cc3ba951d44ce13e5750bfec8caaac4
|
refs/heads/master
| 2021-01-15T20:24:17.028477
| 2015-07-29T01:16:47
| 2015-07-29T01:16:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,701
|
rd
|
MNode-class.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/D1Node.R, R/MNode.R
\docType{methods}
\name{listObjects,D1Node-method}
\alias{MNode,D1Node-method}
\alias{MNode,character-method}
\alias{MNode-class}
\alias{archive,MNode,character-method}
\alias{create,MNode,character-method}
\alias{describe,MNode,character-method}
\alias{generateIdentifier,MNode-method}
\alias{get,MNode,character-method}
\alias{getCapabilities,MNode-method}
\alias{getChecksum,MNode,character-method}
\alias{getSystemMetadata,MNode,character-method}
\alias{listObjects,D1Node-method}
\alias{update,MNode,character-method}
\alias{uploadDataObject,MNode,DataObject-method}
\alias{uploadDataPackage,MNode,DataPackage-method}
\title{Retrieve the list of objects present on the MN that match the calling parameters.}
\usage{
\S4method{listObjects}{D1Node}(node, fromDate = as.character(NA),
toDate = as.character(NA), formatId = as.character(NA),
replicaStatus = as.logical(TRUE), start = as.integer(0),
count = as.integer(1000))
\S4method{MNode}{character}(x)
\S4method{MNode}{D1Node}(x)
\S4method{getCapabilities}{MNode}(mnode)
\S4method{get}{MNode,character}(node, pid, check = as.logical(FALSE))
\S4method{getSystemMetadata}{MNode,character}(node, pid)
\S4method{describe}{MNode,character}(node, pid)
\S4method{getChecksum}{MNode,character}(node, pid,
checksumAlgorithm = "SHA-1")
\S4method{create}{MNode,character}(mnode, pid, filepath, sysmeta)
\S4method{update}{MNode,character}(mnode, pid, filepath, newpid, sysmeta)
\S4method{archive}{MNode,character}(mnode, pid)
\S4method{generateIdentifier}{MNode}(mnode, scheme = "UUID",
fragment = NULL)
\S4method{uploadDataPackage}{MNode,DataPackage}(mn, dp, replicate = NA,
numberReplicas = NA, preferredNodes = NA, public = as.logical(FALSE),
accessRules = NA, ...)
\S4method{uploadDataObject}{MNode,DataObject}(mn, do,
replicate = as.logical(FALSE), numberReplicas = NA, preferredNodes = NA,
public = as.logical(FALSE), accessRules = NA, ...)
}
\arguments{
\item{node}{The MNode or CNode instance from which the checksum will be retrieved}
\item{fromDate}{Entries with a modified date greater than \code{'fromDate'} will be returned.
This value must be specified in ISO 8601 format, i.e. "YYYY-MM-DDTHH:MM:SS.mmm+00:00"}
\item{toDate}{Entries with a modified date less than \code{'toDate'} will be returned.
This value must be specified in ISO 8601 format, i.e. "YYYY-MM-DDTHH:MM:SS.mmm+00:00"}
\item{formatId}{The format to match, for example "eml://ecoinformatics.org/eml-2.1.1"}
\item{replicaStatus}{A logical value that determines if replica (object not on it's origin node) should be returned. Default is TRUE.}
\item{start}{An integer that specifies the first element of the result set that will be returned}
\item{count}{An integer that specifies how many results will be returned}
\item{mnode}{The MNode instance from which the identifier will be generated}
\item{pid}{The identifier of the object to be downloaded}
\item{check}{Check if the requested pid has been obsoleted and print a warning if true}
\item{node}{The MNode instance from which the pid will be downloaded}
\item{node}{The MNode instance from which the SystemMetadata will be downloaded}
\item{pid}{The identifier of the object}
\item{pid}{Identifier for the object in question. May be either a PID or a SID. Transmitted as
part of the URL path and must be escaped accordingly.}
\item{node}{The MNode instance from which the checksum will be retrieved}
\item{pid}{The identifier of the object}
\item{checksumAlgorith}{The algorithm used to calculate the checksum. Default="SHA-1"}
}
\value{
list Objects that met the search criteria
the bytes of the object
SystemMetadata for the object
A list of header elements
character the checksum value, with the checksum algorithm as the attribute "algorithm"
}
\description{
Retrieve the list of objects present on the MN that match the calling parameters.
MNode provides functions interacting with the a DataONE Member Node repository, which
is a repository that provides access for reading and writing data and metadata using the common
DataONE service API. The MNode API includes functions for retrieving data and metadata based on its
unique persistent identifier (pid), as well as for creating, updating, and archiving these data and
metadata objects.
Get the bytes associated with an object on this Member Node.
The SystemMetadata includes information about the identity, type, access control, and other system
level details about the object.
This method provides a lighter weight mechanism than getSystemMetadata() for a client to
determine basic properties of the referenced object.
A checksum is calculated for an object when it is uploaded to DataONE and
is submitted with the object's system metadata. The \code{'getChecksum'} method retrieves
the checksum from the specified member node
}
\details{
The list of objects that is returned is paged according to the \code{'start'} and
\code{'count'} values, so that large result sets can be returned over multiple calls.
Methods that perform write operations on the Member Node generally require
authentication, which is managed via a client-side X.509 certificate via
CILogon \url{https://cilogon.org/?skin=DataONE}. See \code{\link{{CertificateManager}}} for details.
This operation acts as the 'public' anonymous user unless an X.509 certificate is
present in the default location of the file system, in which case the access will be authenticated.
This operation acts as the 'public' anonymous user unless an X.509 certificate is
present in the default location of the file system, in which case the access will be authenticated.
}
\section{Methods (by generic)}{
\itemize{
\item \code{listObjects}:
\item \code{MNode}:
\item \code{MNode}:
\item \code{getCapabilities}:
\item \code{get}:
\item \code{getSystemMetadata}:
\item \code{describe}:
\item \code{getChecksum}:
\item \code{create}:
\item \code{update}:
\item \code{archive}:
\item \code{generateIdentifier}:
\item \code{uploadDataPackage}:
\item \code{uploadDataObject}:
}}
\section{Slots}{
\describe{
\item{\code{endpoint}}{The url to access node services, which is the baseURL plus the version string}
}}
\examples{
\dontrun{
cn <- CNode("STAGING2")
mn <- getMNode(cn, "urn:node:mnTestKNB")
mnid <- mn@identifier
newid <- generateIdentifier(mn, "UUID")
cm <- CertificateManager()
u <- showClientSubject(cm)
testdf <- data.frame(x=1:10,y=11:20)
csvfile <- paste(tempfile(), ".csv", sep="")
write.csv(testdf, csvfile, row.names=FALSE)
f <- "text/csv"
size <- file.info(csvfile)$size
sha1 <- digest(csvfile, algo="sha1", serialize=FALSE, file=TRUE)
sysmeta <- new("SystemMetadata", identifier=newid, formatId=f, size=size, submitter=u, rightsHolder=u, checksum=sha1, originMemberNode=mnid, authoritativeMemberNode=mnid)
response <- create(mn, newid, csvfile, sysmeta)
response <- archive(mn, newid)
}
\dontrun{
mn_uri <- "https://knb.ecoinformatics.org/knb/d1/mn/v1"
mn <- MNode(mn_uri)
pid <- "knb.473.1"
describe(mn, pid)
describe(mn, "adfadf") # warning message when wrong pid
}
}
\author{
Matthew Jones
Scott Chamberlain
}
\seealso{
\url{http://mule1.dataone.org/ArchitectureDocs-current/apis/MN_APIs.html#MN_read.listObjects}
\url{http://mule1.dataone.org/ArchitectureDocs-current/apis/MN_APIs.html#MNRead.get}
\url{http://mule1.dataone.org/ArchitectureDocs-current/apis/MN_APIs.html#MNRead.getSystemMetadata}
\url{http://mule1.dataone.org/ArchitectureDocs-current/apis/MN_APIs.html#MNRead.describe}
\url{http://mule1.dataone.org/ArchitectureDocs-current/apis/MN_APIs.html#MNRead.getChecksum}
}
\keyword{classes}
|
5dbcab79a8d59420a6c999e3d06a745a02d3b45f
|
2a5e4fea8a2661320eaab0ea9069affb7e72fd44
|
/twitter/code/plotting.R
|
507c36797422577f45cd38c2402b8d7289ad1c7b
|
[] |
no_license
|
luiscape/hdx_management_dashboard
|
23a3556dad06553be1511acf03f43b81727c2c7f
|
2a701eccaed2d9b1a133f675ec37abd4d671e5c8
|
refs/heads/master
| 2016-09-15T17:24:47.448858
| 2015-04-06T14:39:48
| 2015-04-06T14:39:48
| 22,891,628
| 0
| 0
| null | 2014-08-15T22:09:05
| 2014-08-12T20:34:42
|
JavaScript
|
UTF-8
|
R
| false
| false
| 691
|
r
|
plotting.R
|
## Plotting ##
# timeline with the number of followers
ggplot(twitter_friends) + theme_bw() +
geom_line(aes(date, followers), stat = 'identity', color = "#F2645A", size = 1.3) +
geom_area(aes(date, followers), stat = 'identity', fill = "#F2645A", alpha = .3) +
geom_bar(aes(date, new_followers), stat = 'identity', fill = "#1EBFB3")
ggplot(hdxTimeline) + theme_bw() +
geom_line(aes(created), stat = 'bin', color = "#F2645A", size = 1.3) +
geom_area(aes(created), stat = 'bin', fill = "#F2645A", alpha = .3)
ggplot(data) + theme_bw() +
geom_line(aes(created), stat = 'bin', color = '#0988bb', size = 1.3) +
geom_area(aes(created), stat = 'bin', fill = '#0988bb', alpha = .3)
|
1444d861f79dd747b3931cc4ce28f4af24175676
|
a230cd371d7f8e27c1324b0d38dda14f0e97aaf3
|
/R/cleanup.R
|
650f3a6f56107bfd0502a587396e35292c625f01
|
[] |
no_license
|
Eforberger/preview
|
51233a4219ba70483c376bbc641e6406fe86baf6
|
b7e1bab3e6393a5a312b3c0b723b8999dd1d777c
|
refs/heads/master
| 2022-11-23T12:59:13.298908
| 2020-07-30T15:33:18
| 2020-07-30T15:33:18
| 283,580,480
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
cleanup.R
|
#' Cleanup
#'
#' Removes the first two rows of the data frame df. Qualtrics data has two extra rows at the top that can mess up data manipulation.
#'
#'@param df A data frame from qualtrics.
#'
#'@return A data frame
#'@export
cleanup <- function(df)
{
# this just gets rids of the extra rows qualtrics adds
df <- df[-c(2),]
df <- df[-c(1),]
}
|
5d90f8e649e2d20680f9c6f0ffc482266e679087
|
65b5253f00d430d3f7013309b4dc33de0dd9220c
|
/R/getWeight.R
|
9063c6482df2b5bf8f2405e2f5e9771ad4a5e142
|
[
"BSD-2-Clause"
] |
permissive
|
gvegayon/mcMST
|
dcc5180e0e9a7fa9cbf547e43544ba48b6d39fcf
|
617269003bff20412795603a67fd5a0d41ffd07f
|
refs/heads/master
| 2021-01-23T07:33:54.846484
| 2017-09-05T17:43:52
| 2017-09-05T17:43:52
| 102,508,528
| 0
| 0
| null | 2017-09-05T17:05:49
| 2017-09-05T17:05:47
|
R
|
UTF-8
|
R
| false
| false
| 800
|
r
|
getWeight.R
|
#' Get the overall costs/weight of a subgraph given its edgelist.
#'
#' @template arg_mcGP
#' @template arg_edgelist
#' @return [\code{numeric(2)}] Weight vector.
#' @examples
#' # generate a random bi-objective graph
#' g = genRandomMCGP(5)
#'
#' # generate a random Pruefer code, i.e., a random spanning tree of g
#' pcode = sample(1:5, 3, replace = TRUE)
#'
#' getWeight(g, prueferToEdgeList(pcode))
#' @export
getWeight = function(graph, edgelist) {
assertClass(graph, "mcGP")
assertMatrix(edgelist)
m = ncol(edgelist)
n.weights = graph$n.weights
# finally compute weights
ws = numeric(n.weights)
#FIXME: inefficient
for (i in seq_len(m)) {
for (j in seq_len(n.weights)) {
ws[j] = ws[j] + graph$weights[[j]][edgelist[1L, i], edgelist[2L, i]]
}
}
return(ws)
}
|
b8d2df0b2473f31c233e209f4346a452f984d528
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/FitAR/R/Get1G.R
|
79b285e35eb78cfabaafb1f90717bf8c68c626cf
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 144
|
r
|
Get1G.R
|
`Get1G` <-
function(phi,n){
p<-length(phi)
x0<-sum(c(1,-phi))^2
x<-x0-rowSums(GetB(phi))-GetKappa(phi)
c(x,rep(x0,n-2*p),rev(x))
}
|
83754389287689c331a33fdc3e3598a656942dbe
|
818cb255f3f00080a7aa68282e65f4c1d0310c77
|
/Programming_Projects/R Projects/glmnet/man/cv.glmnet.Rd
|
a56faa8b0f9693b606b32ca762ba3a04c52c38cd
|
[] |
no_license
|
pmnyc/Data_Engineering_Collections
|
fdca0f9a3de71f5c9855e5bbb45c574d1062077d
|
b7d29cd4c134cb1252e5c45dd500d969fe0f6029
|
refs/heads/master
| 2021-06-24T22:15:32.913229
| 2020-11-08T10:12:04
| 2020-11-08T10:12:04
| 153,053,634
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,978
|
rd
|
cv.glmnet.Rd
|
\name{cv.glmnet}
\alias{cv.glmnet}
\title{Cross-validation for glmnet}
\description{Does k-fold cross-validation for glmnet, produces a plot,
and returns a value for \code{lambda}}
\usage{
cv.glmnet(x, y, weights, offset, lambda, type.measure, nfolds, foldid, grouped, keep,
parallel, ...)
}
\arguments{
\item{x}{\code{x} matrix as in \code{glmnet}.}
\item{y}{response \code{y} as in \code{glmnet}.}
\item{weights}{Observation weights; defaults to 1 per observation}
\item{offset}{Offset vector (matrix) as in \code{glmnet}}
\item{lambda}{Optional user-supplied lambda sequence; default is
\code{NULL}, and \code{glmnet} chooses its own sequence}
\item{nfolds}{number of folds - default is 10. Although \code{nfolds}
can be as large as the sample size (leave-one-out CV), it is not
recommended for large datasets. Smallest value allowable is \code{nfolds=3}}
\item{foldid}{an optional vector of values between 1 and \code{nfold}
identifying what fold each observation is in. If supplied,
\code{nfold} can be missing.}
\item{type.measure}{loss to use for cross-validation. Currently five
options, not all available for all models. The default is \code{type.measure="deviance"}, which uses
squared-error for gaussian models (a.k.a \code{type.measure="mse"} there), deviance
for logistic and poisson
regression, and partial-likelihood for the Cox
model. \code{type.measure="class"} applies to binomial and multinomial logistic regression only,
and gives misclassification error. \code{type.measure="auc"} is for
two-class logistic regression only, and gives area under the ROC
curve. \code{type.measure="mse"} or \code{type.measure="mae"} (mean absolute error)
can be used by all models except the \code{"cox"}; they measure the
deviation from the fitted mean to the response.}
\item{grouped}{This is an experimental argument, with default
\code{TRUE}, and can be ignored by most users. For all models
except the \code{"cox"}, this refers to computing \code{nfolds}
separate statistics, and then using their mean and estimated
standard error to describe the CV curve. If \code{grouped=FALSE},
an error matrix is built up at the observation level from the predictions
from the \code{nfold} fits, and then summarized (does not apply to
\code{type.measure="auc"}). For the \code{"cox"} family,
\code{grouped=TRUE} obtains the CV partial likelihood for the Kth
fold by \emph{subtraction}; by subtracting the log partial
likelihood evaluated on the full dataset from that evaluated on
the on the (K-1)/K dataset. This makes more efficient use of risk
sets. With \code{grouped=FALSE} the log partial likelihood is
computed only on the Kth fold}
\item{keep}{If \code{keep=TRUE}, a \emph{prevalidated} array is
returned containing fitted values for each observation and each
value of \code{lambda}. This means these fits are computed with
this observation and the rest of its fold omitted. The
\code{folid} vector is also returned. Default is {keep=FALSE}}
\item{parallel}{If \code{TRUE}, use parallel \code{foreach} to fit each fold.
Must register parallel before hand, such as \code{doMC} or others.
See the example below.}
\item{\dots}{Other arguments that can be passed to \code{glmnet}}
}
\details{The function runs \code{glmnet} \code{nfolds}+1 times; the
first to get the \code{lambda} sequence, and then the remainder to
compute the fit with each of the folds omitted. The error is
accumulated, and the average error and standard deviation over the
folds is computed.
Note that \code{cv.glmnet} does NOT search for
values for \code{alpha}. A specific value should be supplied, else
\code{alpha=1} is assumed by default. If users would like to
cross-validate \code{alpha} as well, they should call \code{cv.glmnet}
with a pre-computed vector \code{foldid}, and then use this same fold vector
in separate calls to \code{cv.glmnet} with different values of
\code{alpha}.
}
\value{an object of class \code{"cv.glmnet"} is returned, which is a
list with the ingredients of the cross-validation fit.
\item{lambda}{the values of \code{lambda} used in the fits.}
\item{cvm}{The mean cross-validated error - a vector of length
\code{length(lambda)}.}
\item{cvsd}{estimate of standard error of \code{cvm}.}
\item{cvup}{upper curve = \code{cvm+cvsd}.}
\item{cvlo}{lower curve = \code{cvm-cvsd}.}
\item{nzero}{number of non-zero coefficients at each \code{lambda}.}
\item{name}{a text string indicating type of measure (for plotting
purposes).}
\item{glmnet.fit}{a fitted glmnet object for the full data.}
\item{lambda.min}{value of \code{lambda} that gives minimum
\code{cvm}.}
\item{lambda.1se}{largest value of \code{lambda} such that error is
within 1 standard error of the minimum.}
\item{fit.preval}{if \code{keep=TRUE}, this is the array of
prevalidated fits. Some entries can be \code{NA}, if that and
subsequent values of \code{lambda} are not reached for that fold}
\item{foldid}{if \code{keep=TRUE}, the fold assignments used}
}
\references{Friedman, J., Hastie, T. and Tibshirani, R. (2008)
\emph{Regularization Paths for Generalized Linear Models via Coordinate
Descent}, \url{http://www.stanford.edu/~hastie/Papers/glmnet.pdf}\cr
\emph{Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010}\cr
\url{http://www.jstatsoft.org/v33/i01/}\cr
Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011)
\emph{Regularization Paths for Cox's Proportional Hazards Model via
Coordinate Descent, Journal of Statistical Software, Vol. 39(5)
1-13}\cr
\url{http://www.jstatsoft.org/v39/i05/}
}
\author{Jerome Friedman, Trevor Hastie and Rob Tibshirani\cr
Noah Simon helped develop the 'coxnet' function.\cr
Jeffrey Wong and B. Narasimhan helped with the parallel option\cr
Maintainer: Trevor Hastie \email{hastie@stanford.edu}}
\seealso{\code{glmnet} and \code{plot}, \code{predict}, and \code{coef} methods for \code{"cv.glmnet"} object.}
\examples{
set.seed(1010)
n=1000;p=100
nzc=trunc(p/10)
x=matrix(rnorm(n*p),n,p)
beta=rnorm(nzc)
fx= x[,seq(nzc)] \%*\% beta
eps=rnorm(n)*5
y=drop(fx+eps)
px=exp(fx)
px=px/(1+px)
ly=rbinom(n=length(px),prob=px,size=1)
set.seed(1011)
cvob1=cv.glmnet(x,y)
plot(cvob1)
coef(cvob1)
predict(cvob1,newx=x[1:5,], s="lambda.min")
title("Gaussian Family",line=2.5)
set.seed(1011)
cvob1a=cv.glmnet(x,y,type.measure="mae")
plot(cvob1a)
title("Gaussian Family",line=2.5)
set.seed(1011)
par(mfrow=c(2,2),mar=c(4.5,4.5,4,1))
cvob2=cv.glmnet(x,ly,family="binomial")
plot(cvob2)
title("Binomial Family",line=2.5)
frame()
set.seed(1011)
cvob3=cv.glmnet(x,ly,family="binomial",type.measure="class")
plot(cvob3)
title("Binomial Family",line=2.5)
set.seed(1011)
cvob3a=cv.glmnet(x,ly,family="binomial",type.measure="auc")
plot(cvob3a)
title("Binomial Family",line=2.5)
set.seed(1011)
mu=exp(fx/10)
y=rpois(n,mu)
cvob4=cv.glmnet(x,y,family="poisson")
plot(cvob4)
title("Poisson Family",line=2.5)
# Multinomial
n=500;p=30
nzc=trunc(p/10)
x=matrix(rnorm(n*p),n,p)
beta3=matrix(rnorm(30),10,3)
beta3=rbind(beta3,matrix(0,p-10,3))
f3=x\%*\% beta3
p3=exp(f3)
p3=p3/apply(p3,1,sum)
g3=rmult(p3)
set.seed(10101)
cvfit=cv.glmnet(x,g3,family="multinomial")
plot(cvfit)
title("Multinomial Family",line=2.5)
# Cox
beta=rnorm(nzc)
fx=x[,seq(nzc)]\%*\%beta/3
hx=exp(fx)
ty=rexp(n,hx)
tcens=rbinom(n=n,prob=.3,size=1)# censoring indicator
y=cbind(time=ty,status=1-tcens) # y=Surv(ty,1-tcens) with library(survival)
foldid=sample(rep(seq(10),length=n))
fit1_cv=cv.glmnet(x,y,family="cox",foldid=foldid)
plot(fit1_cv)
title("Cox Family",line=2.5)
\dontrun{
# Parallel
require(doMC)
registerDoMC(cores=4)
x = matrix(rnorm(1e5 * 100), 1e5, 100)
y = rnorm(1e5)
system.time(cv.glmnet(x,y))
system.time(cv.glmnet(x,y,parallel=TRUE))
}
}
\keyword{models}
\keyword{regression}
|
adf9899f50f515d95c429ba29f1c20311c0e2627
|
3700053c7b6331b485bb0a2cec93640f564197da
|
/pm25_plot3.R
|
06c504a0c34a97ba3061f8e376c3748a534cb0d3
|
[] |
no_license
|
jxs3221/pm25
|
8ccf9c91cfd69685fa3297559a0dd7854ac03ac4
|
31bbe259926d81b98420ff3e4069345ae5a12bb4
|
refs/heads/master
| 2020-06-05T17:40:25.412396
| 2014-09-21T19:25:17
| 2014-09-21T19:25:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 737
|
r
|
pm25_plot3.R
|
# pm25_plot3.R
# This will create a plot that shows what type of source of emissions of PM2.5 decreased in
# Baltimore from 1999 to 2008
# Read in the source datasets
library(ggplot2)
library(plyr)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Subset the data for Baltimore (fips 24510)
balt_data <- subset(NEI, fips=="24510", select=c(year, Emissions, type))
balt_data_type <- ddply(balt_data, .(type,year), summarize, Emissions = sum(Emissions))
qplot(year, Emissions, data=balt_data_type, group=type, color=type,
geom = c("point", "line"), ylab = "Total Emissions",
xlab = "Year", main = "Total Emissions in U.S. by Type of Pollutant")
ggsave(file="plot3.png")
dev.off()
|
515b36566fe2b51380a367ebba7bc484a8b68044
|
69f4d5a9d333d6cfcc7169a31b6f70fff475d4ca
|
/scripts/sim.R
|
a1d0765e74c354601c38733c5777824927899d66
|
[] |
no_license
|
michalim/ua-time-series
|
39d639aa62f04b2c332990789eae6e396c795389
|
b83b11c83e720c4af2fe570f022c04b468fcc9b4
|
refs/heads/master
| 2020-05-18T01:07:00.065785
| 2011-10-20T19:35:29
| 2011-10-20T19:35:29
| 37,431,596
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,962
|
r
|
sim.R
|
# this library provides the means to generate random p-dimensional Gaussian
library(MASS)
# this is a function to create an episode
# arguments are
#
# ep.length - length of episode
# mn.vec - men vector of Gaussian
# cov.mat - covariance matrix of Gaussian
# cut.ponts - boundaries to partition variables
create.episode <- function(ep.length,mn.vec,cov.mat,cut.points){
# generate Gaussian vectors
jj1 <- mvrnorm(ep.length,mn.vec,cov.mat)
# partition each vector
jj2 <- apply(jj1,2,cut,breaks=cut.points,labels=F)
# hack to handle missing (sorry Wes, I am a pirate coder)
jj2 <- apply(jj2,2,function(x) {x[is.na(x)] <- 1; x})
# turn cuts into letter
jj3 <- t(apply(jj2,1,function(x) letters[x]))
jj3
}
args <- commandArgs(TRUE)
# initial arguments
# prefix - where you want the file written
# p - the number of streams.
# episode.length -- the length of each episode
# mean - the mean value to shift the middle episode
# cov.pct -- the amount to modify the covariance for the middle episode (0 - 1)
prefix <- args[1] # File location with class label
p <- as.integer(args[2]) # Number of streams
ep.len <- as.integer(args[3]) # Length of individual episodes
mean <- as.real(args[4]) # Mean value to shift the middle episode
means <- rep(mean, p)
cov.pct <- as.real(args[5]) # Amount to modify covariance for middle episode (0-1)
ntrain <- as.integer(args[6])
alphabet.size <- 7
n.episodes <- 3 # used to be 5
# these cuts are based on quantiles of a standard Gaussian
# THIS IS WHAT I USED SO FAR
cut.points <- qnorm(seq(0.05,0.95,len=alphabet.size+1))
# create some data
# this is the number of training instances
# fixed for both classes (so we have P(C_1)=P(C_2))
cov.mat.A <- diag(1/50,p)
cov.mat.B <- diag(1/10,p)
cov.mat.B[2:5,2:5] <- 0.5
print(ep.len)
print(means)
for (i in 1:ntrain) {
c2 <- NULL
# Normal episodes.
for (j in 1:floor(n.episodes/2)) {
c2 <- rbind(c2,create.episode(ep.len,rep(0,p),diag(1/50,p),cut.points))
}
# Change the structure of this episode.
# change in covariance
cov.mat <- cov.mat.A + (cov.pct * (cov.mat.B - cov.mat.A))
c2 <- rbind(c2,create.episode(ep.len,means,cov.mat,cut.points))
#Normal episodes.
for (j in 1:floor(n.episodes/2)) {
c2 <- rbind(c2,create.episode(ep.len,rep(0,p),diag(1/50,p),cut.points))
}
write.table(c2,paste(prefix,i,sep=""),row=F,col=F)
}
# tinkering with the mean will shift
# Experiment 1
# Only 1 episode is changing (mean and length)
# 1 same as class 0
# 2 shift the mean -- as mean goes up we shift further from random -- means are in terms of Gaussian distribution
# 3 same as class 1
# Experiment 2
# Change in the covariance structure
# Systematic change covariance
# Big Experiment
# Random # of episodes and Random lengths of episodes
# identity on one end and extremely correlated on the other end.
|
889c182afc4396ea63cb5246d304aa6586d56273
|
0c61299c0bfab751bfb5b5eac3f58ee2eae2e4b0
|
/metadata_lit.R
|
9ddff0f40c3f458d9a3d5b4cdeab332aa555d6c6
|
[] |
no_license
|
jwerba14/Species-Traits
|
aa2b383ce0494bc6081dff0be879fc68ed24e9c2
|
242673c2ec6166d4537e8994d00a09477fea3f79
|
refs/heads/master
| 2022-10-13T10:57:54.711688
| 2020-06-12T01:57:21
| 2020-06-12T01:57:21
| 105,941,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
metadata_lit.R
|
## literature metadata
library(tidyverse)
lit <- read.csv("meta_lit.csv")
## downloaded per search
down <- lit %>%
group_by(Search) %>%
summarize (num_cite = n_distinct(Title), download = n())
## extracted data per search
ext <- lit %>%
filter(Data.Extracted == "yes") %>%
group_by(Search) %>%
summarize (num_cite = n_distinct(Title), download = n())
|
406243258cb90477293420c4208133c6b96b5681
|
721236736dbc7fdd5e67fe650f12edcb145f27f1
|
/code/analysis/grf_examples.R
|
f32daad3ea724b03b4d71517ebe46b293d2c33d0
|
[] |
no_license
|
NikiJane/name_matching
|
e05d2e4f1c6c4e3351573429d52c828798f700ea
|
57784b7d36a7840322c823ceaccce916e3c396d8
|
refs/heads/master
| 2020-08-30T15:28:52.512893
| 2019-05-16T14:12:22
| 2019-05-16T14:12:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,581
|
r
|
grf_examples.R
|
# Thom Covert, April 2019
# example use of grf to possibly classify our name pairs better
#===========
# standard setup
#===========
root <- getwd()
while(basename(root) != "name_matching") {
root <- dirname(root)
}
source(file.path(root, "data.R"))
# TC's random forest utilities
source(file.path(root, "code", "functions", "random_forest_utils.R"))
#===========
# needed libraries
#===========
library(tidyverse)
library(grf)
#===========
# data read-in
#===========
# pre-labeled pairs for training
df <-
c(paste(file.path(dropbox, 'archive'),
c('lease_match_sample.csv', 'new_lease_sample.csv'), sep='/')) %>%
map_df(read_csv) %>%
filter(!is.na(keep)) %>%
distinct(name, match, .keep_all = T)
# all name matches
name_matches <- read_csv(file.path(vdir, 'leases_matches.csv'))
#===========
# example grf use
#===========
func <-
paste("shared_words", "cosine_similarity", "jw_distance", sep = "+") %>%
paste("keep", ., sep = "~") %>%
as.formula()
rf <-
func %>%
regression_forest2(df)
sample_fig <-
rf %>%
predict %>%
as_tibble %>%
bind_cols(df) %>%
ggplot(aes(x = predictions, fill = as.factor(keep))) +
geom_histogram(position = 'dodge') +
scale_x_continuous(breaks = seq(.0,1,.1)) +
scale_fill_discrete(name = "keep")
name_matches_fig <-
rf %>%
predict2(func, name_matches) %>%
as_tibble() %>%
bind_cols(name_matches) %>%
ggplot(aes(x = predictions, fill = as.factor(keep))) +
geom_histogram(position = 'dodge') +
scale_x_continuous(breaks = seq(.0,1,.1)) +
scale_fill_discrete(name = "keep")
|
0286f9524802d3ce87f7db91ca9671796ed08cf6
|
d233138052e7037e924f4e79fa683af8163bd9bb
|
/cloud_script.R
|
88eeb6c0fc9dad8c932396c0f05abc44df61b039
|
[] |
no_license
|
sergioquadros/radar
|
67691f870bbff240ccf8aee082c3f06ce0a46e87
|
e257ebc676cac3ddd713ff870967d52c721682a6
|
refs/heads/master
| 2020-03-22T16:14:49.387571
| 2018-07-09T20:13:57
| 2018-07-09T20:13:57
| 140,313,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,536
|
r
|
cloud_script.R
|
# diretories: RMSP(with 38 746 files), Saida_Pluv(with 108 files) at workdir one
# Files at workdir diretory:
# cloud_script.R; navegacao_rmsp.dat; coordenadas_pluv_saisp2.dat
library(knitr); library(rmarkdown);library(tidyverse)
library(gridExtra); library(corrplot); library(magrittr)
library(lubridate); library(parallel)
# numbers of cores --> initialize clusters
no_cores <- detectCores()-1
cl <- makeCluster(no_cores)
# dim(mask_lon)[1]*dim(mask_lon)[2]=nx*ny=25776 ptos de grade
ny <- 179
nx <- 144
navegacao <- read.table("navegacao_rmsp.dat")
navegacao[,1:2] <- navegacao[,1:2]+1
# masks for navigation
mask_lon <- array(dim = c(nx, ny))
mask_lat <- array(dim = c(nx, ny))
for(m in 1:25776){
i <- navegacao[m,1]
j <- navegacao[m,2]
mask_lon[i,j] <- navegacao[m,3] # recebe lon
mask_lat[i,j] <- navegacao[m,4] # recebe lat
}
caminho <- list.files(path=paste0(getwd(), "/RMSP"), full.names = TRUE)
dia <- list.files(path=paste0(getwd(), "/RMSP"))
radarF <- cbind.data.frame(caminho, dia)
radarF$dia %<>% gsub(pattern = ".bin", replacement = "")
radarF$caminho %<>% as.character
caminho <- dia <- NULL
# id lat lon de 108 pluviometers
pluviometro <- read.csv(file="coordenadas_pluv_saisp2.dat")
# add full path for and (i,j) from masks
pluviometro %<>% mutate(linha=0.0, coluna=0.0,
arquivo=paste0(getwd(),"/Saida_Pluv/", id, ".dat"))
for (p in 1:108) {
onde_min <- which.min((navegacao$V4-pluviometro$lat[p])^2+(navegacao$V3-pluviometro$lon[p])^2)
pluviometro$linha[p] <- navegacao[onde_min,1]
pluviometro$coluna[p] <- navegacao[onde_min,2]
}
pluviometro$id %<>% as.character
pluviometro$id %<>% as.factor
# the faz_nove function copies nine values around the given coordinate
faz_nove <- function(chuva, i, j, nx, ny){
Aux <- rep_len(0.0,9)
Aux[5] <- chuva[i,j]
if(j-1==0 | j+1>ny){
Aux[4] <- NA
Aux[6] <- NA
}else{
Aux[4] <- chuva[i,j-1]
Aux[2] <- chuva[i,j+1]
}
if(i-1==0){
Aux[1] <- NA
Aux[2] <- NA
Aux[3] <- NA
}else{
Aux[1] <- chuva[i-1,j-1]
Aux[2] <- chuva[i-1,j]
Aux[3] <- chuva[i-1,j+1]
}
if(i+1>nx){
Aux[7] <- NA
Aux[8] <- NA
Aux[9] <- NA
}else{
Aux[7] <- chuva[i+1,j-1]
Aux[8] <- chuva[i+1,j]
Aux[9] <- chuva[i+1,j+1]
}
return(Aux)
}
plu <- 1
pingo <- read.table(file(pluviometro$arquivo[plu]), header = FALSE,
colClasses = c("character", "character", "character",
"character", "character", "numeric"))
colnames(pingo) <- c("ano", "mes", "dia", "hora", "minuto", "Rpluv")
pingo %<>% mutate(id = as.factor(pluviometro$id[plu]),
linha = pluviometro$linha[plu],
coluna = pluviometro$coluna[plu],
tempo = paste0(ano,mes,dia,hora,minuto))
pingo <- pingo[,-c(1:5)]
for(plu in 2:108){
aux <- read.table(file(pluviometro$arquivo[plu]), header = FALSE,
colClasses = c("character", "character", "character",
"character", "character", "numeric"))
colnames(aux) <- c("ano", "mes", "dia", "hora", "minuto", "Rpluv")
aux %<>% mutate(id = as.factor(pluviometro$id[plu]),
linha = pluviometro$linha[plu],
coluna = pluviometro$coluna[plu],
tempo = paste0(ano,mes,dia,hora,minuto))
aux <- aux[,-c(1:5)]
pingo %<>% rbind.data.frame(aux)
}
aux <- NULL
# Adição de variáveis Rradar(média) e sd_Rradar(desvio-padrão) à df "pingo"
pingo %<>% mutate(Rradar = 0.0, sd_Rradar = 0.0)
# two loop
# clusterExport: pluviometro, nx, ny, pingo, faz_nove
clusterExport(cl,c("faz_nove","pingo","nx", "ny", "pluviometro"))
# the first loop
for(obs in 1:38746){
radar <- file(radarF$caminho[obs], "rb")
bindata <- readBin(radar, numeric(), size=4, n=25776)
close(radar)
hora <- radarF$dia[obs]
chuva <- matrix(data = bindata, nrow = nx, ncol = ny)
# for each loop a modified "hora" variable and "chuva" matrix
clusterExport(cl,c("hora","chuva"))
# the second loop is more time consuming
plu <- 1:108
parSapply(cl, plu, function(plu){
i <- pluviometro$linha[plu]
j <- pluviometro$coluna[plu]
xx <- faz_nove(chuva, i, j, nx, ny)
este <- which(pingo$tempo==hora & pingo$id==pluviometro$id[plu])
pingo$Rradar[este] <- mean(xx, na.rm = TRUE)
pingo$sd_Rradar[este] <- sd(xx, na.rm = TRUE)
})
}
chuva <- bindata <- NULL
# the desired file
write.table(pingo, "output.csv", sep = ",", col.names = T)
# end of cluster
stopCluster(cl)
|
2992fb6580857ba7af585fecb2a22258fcb03c3a
|
f7eb46fb3b16b16d66cf5f8c95e0893fce7aa6db
|
/code_files/simulation/extended_main_sim.R
|
a8ee214a662a193c013e0c8b67b3ce52e5d19549
|
[
"CC-BY-4.0"
] |
permissive
|
bjoelle/Poorly_dated_fossils_SI
|
eca8df7f40e03175026fe3199bf9d76181626f49
|
6613ba27385a64454ad7f4bf568db67a635fac25
|
refs/heads/main
| 2023-04-15T11:33:42.043820
| 2022-10-11T12:54:25
| 2022-10-11T12:54:25
| 352,789,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,983
|
r
|
extended_main_sim.R
|
# simulate datasets for additional conditions
# (no deposit, burst deposit, low morphological clock rate & relaxed morphological clock)
run_simulation_extended = function(save.folder) {
library(FossilSim)
library(phyclust)
#### Fixed args for all simulations
args = list(
# Output parameters
ntrees = 100,
save_folder = save.folder,
seed = 451,
# Tip numbers
nextant = 25,
nfossils = 50,
# Molecular parameters
mol_length = 4500,
mol_model = "-mHKY -f 0.35 0.16 0.21 0.28 -t 2.33 -a0.35 -g5",
mol_clock_rate = 5e-3, # from Brunke et al. 2017
# Morphological parameters
morph_length = 120,
morph_alpha = 0.55,
morph_ncats = 5,
morph_props = c(0.7,0.2,0.1), # these are target proportions, not checked after
prop_extant_only = 0.05,
# Deposit parameters - 1 = precise-date, 2 = imprecise-date
prop_undated = 0.1,
rate1to2 = 0.6,
rate2to1 = 0.7,
# Fossil parameters
age_range_mult = 0.1,
# BD parameters
origin_time = 120,
spec_rate = 0.05, # from Brunke et al. 2017, range 0.05-0.1
ext_rate = 0.02, # calibrated for lambda, origin and nextant
rho = 0.5,
# Imprecise deposit parameters
undated_min_age = 30,
undated_max_age = 50 )
for(opt in c("morph_relaxed", "low_morph", "no_deposit", "burst_deposit")) .run.sim(args, opt)
}
.run.sim = function(args, option = c("morph_relaxed", "low_morph", "no_deposit", "burst_deposit")) {
if(length(option) > 1) option = "morph_relaxed"
set.seed(args$seed)
full_args = c(args, .dependent_args(opt = option))
.core_loop(full_args, option)
}
### Arguments dependent on simulation setup
.dependent_args = function(opt) {
if(opt == "low_morph") mcl = 0.01
else if(opt == "morph_relaxed") mcl = function(n) rexp(n, 10)
else mcl = 0.1 # from Farrell et al. 2004
args = list(
morph_clock_rate = mcl,
sampl_rate_up = if(opt == "burst_deposit") 0.2 else 0.04,
sampl_rate = 0.03, # calibrated for nfossils
name = if(opt == "no_deposit") paste0("_prop_0_age_0.1_", opt) else paste0("_prop_0.1_age_0.1_", opt)
)
args
}
### Core simulation
.core_loop = function(args, option = c("morph_relaxed", "low_morph", "no_deposit", "burst_deposit")) {
if(length(option) > 1) option = "morph_relaxed"
name = paste0("DS_seed_", args$seed, args$name)
dir.create(paste0(args$save_folder, name), showWarnings = F)
name = paste0(name,"/",name)
# simulating trees and fossils with parameters
trees = list()
fossils = list()
samp_trees = list()
while(length(trees) < args$ntrees) {
nsim = args$ntrees - length(trees)
r_phy = r_ext = r_fos = r_prop = 0
print(paste("Simulations remaining", nsim))
trees = c(trees, TreeSim::sim.bd.age(args$origin_time, nsim, args$spec_rate, args$ext_rate, complete = T))
for (i in args$ntrees:(args$ntrees-nsim+1)) {
if(class(trees[[i]]) != "phylo") {
trees = trees[-i]
fossils = fossils[-i]
r_phy = r_phy +1
next
}
# filter on number of extant samples
ext_samples = length(sampled.tree.from.combined(trees[[i]])$tip.label)*args$rho
if(ext_samples > args$nextant*1.2 || ext_samples < args$nextant*0.8) {
trees = trees[-i]
fossils = fossils[-i]
r_ext = r_ext +1
next
}
# add burst deposit and no deposit here
if(option == "no_deposit") {
fossils[[i]] = sim.fossils.intervals(tree = trees[[i]], interval.ages = c(0, 130), rates = args$sampl_rate)
fossils[[i]]$trait = 1
}
else if(option == "burst_deposit") {
start_int = runif(1, 30, 50)
fssls1 = sim.fossils.intervals(tree = trees[[i]], interval.ages = c(0, 130), rates = args$sampl_rate)
fssls2 = sim.fossils.intervals(tree = trees[[i]], interval.ages = c(start_int, start_int + 2), rates = args$sampl_rate_up)
if(length(fssls1$edge) > 0) fssls1$trait = 1
if(length(fssls2$edge) > 0) fssls2$trait = 2
fossils[[i]] = rbind(fssls1, fssls2)
}
else fossils[[i]] = sim.fossils.intervals(tree = trees[[i]], interval.ages = c(0, args$undated_min_age, args$undated_max_age, 130),
rates = c(args$sampl_rate, args$sampl_rate_up, args$sampl_rate))
# filter on number of fossils
if(length(fossils[[i]]$edge) < args$nfossils*0.9 ||
length(fossils[[i]]$edge) > args$nfossils*1.1) {
#print(length(fossils[[i]]$edge))
fossils = fossils[-i]
trees = trees[-i]
r_fos = r_fos +1
next
}
if(!option %in% c("no_deposit", "burst_deposit")) {
traits = sim.deposit.values(trees[[i]], c(args$rate1to2, args$rate2to1), args$undated_min_age, args$undated_max_age)
fossils[[i]] = assign.traits(fossils[[i]], traits)
}
if(option != "no_deposit") {
# filter on undated proportion
undated = which(fossils[[i]]$trait == 2)
p = length(undated)/length(fossils[[i]]$trait)
up_tol = 1.1
low_tol = 0.9
if(p < args$prop_undated*low_tol || p > args$prop_undated*up_tol) {
#print(p)
fossils = fossils[-i]
trees = trees[-i]
r_prop = r_prop +1
next
}
}
}
print(paste("Rejected for extinction", r_phy, ", for n_extant", r_ext,
", for n_fossils", r_fos, ", for undated prop", r_prop))
}
mol_seqs = morph_seqs = list()
for (i in 1:args$ntrees) {
fossils[[i]]$h = (fossils[[i]]$hmin + fossils[[i]]$hmax)/2
fossils[[i]] = fossils[[i]][order(fossils[[i]]$sp, -fossils[[i]]$h), ]
# adding uncertainty to fossil ages
undated = which(fossils[[i]]$trait == 2)
fossils[[i]]$hmax[undated] = args$undated_max_age
fossils[[i]]$hmin[undated] = args$undated_min_age
if(option != "no_deposit") {
intervals = sample.intervals(fossils[[i]][-undated,], args$age_range_mult)
fossils[[i]]$hmax[-undated] = intervals$max
fossils[[i]]$hmin[-undated] = intervals$min
}
else {
intervals = sample.intervals(fossils[[i]], args$age_range_mult)
fossils[[i]]$hmax = intervals$max
fossils[[i]]$hmin = intervals$min
}
# simulating sequences on the trees
full = SAtree.from.fossils(trees[[i]],fossils[[i]])
ftree = full$tree
fossils[[i]] = full$fossils
tree = sampled.tree.from.combined(ftree, rho = args$rho)
samp_trees[[i]] = tree
extant_tips = tree$tip.label[1:(length(tree$tip.label)-length(fossils[[i]]$sp))]
fossil_tips = tree$tip.label[(length(tree$tip.label)-length(fossils[[i]]$sp)):length(tree$tip.label)]
if(option != "morph_relaxed") {
morph_seqs[[i]] = sim.morph.seqs(samp_trees[[i]], args$morph_length, args$morph_clock_rate, args$morph_alpha,
args$morph_ncats, args$morph_props, extant_tips, args$prop_extant_only, 0, 1, fossil_tips[-undated])
}
else morph_seqs[[i]] = sim.morph.seqs.relaxed(samp_trees[[i]], args$morph_length, args$morph_clock_rate, args$morph_alpha,
args$morph_ncats, args$morph_props, extant_tips, args$prop_extant_only)
mol_seqs[[i]] = sim.mol.seqs(samp_trees[[i]], args$mol_clock_rate, args$mol_length, args$mol_model)
mol_seqs[[i]] = mol_seqs[[i]][names(mol_seqs[[i]]) %in% extant_tips]
.write.nexus.data(mol_seqs[[i]],file = paste0(args$save_folder, name, "_mol_",i,".nex"))
.write.nexus.data(morph_seqs[[i]], format = "standard",
file = paste0(args$save_folder, name, "_morph_", i, ".nex"))
write.fossil.ages(samp_trees[[i]], fossils[[i]], file = paste0(args$save_folder, name, "_fossil_ages_", i, ".txt"))
}
save(trees, fossils, samp_trees, mol_seqs, morph_seqs, file = paste0(args$save_folder, name, ".RData"))
}
|
1908dc9f05f6fd980d73c5971ac2f51bc8115063
|
5906b6e56fd54b7a038961372318632a8f4009d1
|
/man/unifrac.Rd
|
169dcd6c4a354dc6a4b19d34dfaad07ec1a74c11
|
[] |
no_license
|
skembel/picante
|
dc8c8b38c45f6d2088563d4e9119a0aa21e8f115
|
b891440afaa83185442f98d45db90a515cf6ab8a
|
refs/heads/master
| 2023-09-04T02:58:33.047287
| 2023-07-10T15:17:01
| 2023-07-10T15:17:01
| 13,666,942
| 25
| 14
| null | 2023-07-10T15:12:30
| 2013-10-18T02:14:54
|
R
|
UTF-8
|
R
| false
| false
| 1,572
|
rd
|
unifrac.Rd
|
\name{unifrac}
\alias{unifrac}
\title{ Unweighted UniFrac distance between communities }
\description{ Calculates unweighted UniFrac, a phylogenetic beta diversity metric of the the unique (non-shared) fraction of total phylogenetic diversity (branch-length) between two communities. }
\usage{
unifrac(comm, tree)
}
\arguments{
\item{comm}{ Community data matrix }
\item{tree}{ Object of class phylo - a rooted phylogeny}
}
\value{A dist object of the unweighted UniFrac distances between communities (the unique (non-shared) fraction of total phylogenetic diversity (branch-length) between two communities).}
\references{
Lozupone, C., Hamady, M., and Knight, R. 2006. UniFrac - an online tool for comparing microbial community diversity in a phylogenetic context. BMC Bioinformatics 7:371.
}
\author{ Steven Kembel <steve.kembel@gmail.com> }
\seealso{\code{\link{pd}}}
\note{
The supplied tree must be rooted. Single-species samples will be assigned a PD value equal to the distance from the root to the present.
}
\section{Warning }{
The UniFrac distance between samples will include the branch length connecting taxa in those samples and the root of the supplied tree. The root of the supplied tree may not be spanned by any taxa in the sample. If you want the root of your tree to correspond to the most recent ancestor of the taxa actually present in your samples, you should prune the tree before running \code{unifrac}:
\code{prunedTree <- prune.sample(sample,tree)}
}
\examples{
data(phylocom)
unifrac(phylocom$sample, phylocom$phylo)}
\keyword{univar}
|
e94d93be866735e5c3bc4bb1b8428b2416413bc6
|
ba5ee64247395ad6f288b4ccb2e13f14f98e5fd0
|
/process_file.R
|
d5384d2b0d956cb1f6cde642449885fe78e28405
|
[] |
no_license
|
iceiony/word_count
|
2a395449796b68656345d4a6dc3f063d3044a654
|
0305df0d41b672e11fabd80e73968c0ced97819d
|
refs/heads/master
| 2021-01-22T05:24:03.924371
| 2017-02-12T01:01:46
| 2017-02-12T01:01:46
| 81,657,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
process_file.R
|
extract_sentences <- function(doc){
sent_ann <- Maxent_Sent_Token_Annotator()
doc_ann <- annotate(doc, sent_ann)
doc[doc_ann]
}
process_file <- function(doc){
sentences <- readChar(doc, file.info(doc)$size) %>%
as.String() %>%
extract_sentences() %>%
as.factor()
words <- mclapply(sentences,
function(s){
words <- as.character(s) %>%
str_extract_all("['$£,\\w/]+") %>%
unlist()
words <- as.data.frame(words, stringsAsFactors = F)
cbind(words, sentence = s)
}) %>% bind_rows()
words <- words %>%
group_by(words) %>%
summarise(count = n(),
sentences = list(sentence))
cbind(words, document = doc)
}
|
06806433f0319f20e2caeb01193558d20fddf065
|
82a835dcfcf9388ad76e728bf5071975b05c22b4
|
/ML-Toolbox/Code recipes/R/caret/rfe/rfe.r
|
4fbc97c1b75895788cd3b2f96f5c0d0dd097911a
|
[] |
no_license
|
nmanwaring/ML-Resources
|
36788c2279b7a82ce5cabc7bd5518c06d1244f46
|
43a9811bb436a43b7ac9be257690da8095878239
|
refs/heads/master
| 2021-09-28T08:00:54.309901
| 2018-11-15T16:56:22
| 2018-11-15T16:56:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
rfe.r
|
library(readr)
votes <- read_csv("~/ML Toolbox/R Scripts/caret/rfe/house-votes-84.csv")
View(votes)
house_votes_84[rev_house_votes_84 == '?'] <- NA
is.na(votes)
cleaned_votes <- na.omit(house_votes_84)
View(cleaned_votes)
#load library and set seed
library(caret)
set.seed(998)
# define an 75%/25% train/test split of the dataset
inTraining <- createDataPartition(cleaned_votes$party, p = .75, list = FALSE)
training <- cleaned_votes[inTraining,]
testing <- cleaned_votes[-inTraining,]
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(clean_votes_features[1:16], clean_votes_dep_var[1], sizes=c(1:16), rfeControl=control)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o"))
|
4e14d8d03782108f3497a412aa13084bd36ea9cf
|
be9b0aacf8f18680d58a25f2a47430169e566ecb
|
/4.survival/AdvancedSurvivalAnalysis.R
|
6b87f0e22e3ea0d05fc2150122569d0283507b1a
|
[] |
no_license
|
zhangyupisa/Arrrgh
|
db0d0f3ce96586fb7be42c4d7e24ddb0cc30971c
|
96538054651b06f6697932dd761246e982e548a1
|
refs/heads/master
| 2020-09-22T02:36:25.627853
| 2016-11-20T08:07:49
| 2016-11-20T08:07:49
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,200
|
r
|
AdvancedSurvivalAnalysis.R
|
## Univariate Cox Template
## This program generates univariate Cox models for multiple variables and outputs the results for further analyses
## It is designed as a compromise between genericity and flexibility
# 0. Preparations
# Loading necessary packages
library(survival)
library(MASS)
library(ggplot2)
library(survMisc)
rm(list = ls())
# Setting preliminary parameters - USE DOUBLE BACKSLASHES FOR WINDOWS PATHS
work_dir <- "C:\\Users\\ntachfine\\Desktop\\Celgene\\Survie\\Scripts" ### CHANGE IF NECESSARY
data_file <- "data.csv" ### CHANGE IF NECESSARY
setwd(work_dir)
data <- read.table(file=data_file, sep=";", head=T)
View(data)
# Setting survival analysis variables
events <- data$Infect18 ### CHANGE IF NECESSARY
times <- data$Durée ### CHANGE IF NECESSARY
event_name <- "event" ### CHANGE IF NECESSARY
survival_object <- Surv(time=times, event=events==event_name)
# --------------------------------------------------------------------------------------------------------------------------
# 1. Univariate Analysis w.r.t a single variable
# Running a PH Cox model on a single variable
analysis_var <- data$Alimentary.tract.and.metabolism_ATC1..cmed. ### VARIABLE OF INTEREST - CHANGE IF NECESSARY
model <- coxph(survival_object ~ analysis_var)
model # elementary results
summary(model) # more details
# Plotting KM Survival Curves
kmfit <- survfit(survival_object ~ analysis_var)
kmfit
summary(kmfit)
autoplot(kmfit, xLab="No. of months", ylab="Survival", title="Comparison of survival times w.r.t infections",
legTitle="Infection in the first 18 months", legLabs=c("Not infected", "Infected"), censShape=3,
legTitleSize = 20, legLabSize=20, titleSize=30, palette="Set1")$plot + theme_classic()
# --------------------------------------------------------------------------------------------------------------------------
# 2. Univariate Analysis w.r.t multiple variables - NUMERIC
first_col <- 4 # number of column containing first NUMERIC variable
last_col <- ncol(data)
cox_fct = function(current_var) {
model <- coxph(survival_object ~ current_var)
Beta <- c(summary(model)$coefficients[1])
expBeta <- c(summary(model)$coefficients[2])
stdErr <- c(summary(model)$coefficients[3])
zScore <- c(summary(model)$coefficients[4])
pValue <- c(summary(model)$coefficients[5])
# pValueWald <- pchisq( summary(model)$waldtest["test"], summary(model)$waldtest["df"], lower.tail=FALSE)
return( matrix(c(Beta, expBeta, stdErr, zScore, pValue), nrow=1) )
}
results <- t( apply(data[,c(first_col:last_col)],2,cox_fct) )
labels <- c("Beta", "HR", "std err", "z-score", "pValue" ) ### CHANGE IF NECESSARY
colnames(results) <- labels
View(results)
# --------------------------------------------------------------------------------------------------------------------------
# 3. Univariate Analysis w.r.t multiple variables - CATEGORICAL
first_col <- 4 # number of column containing first CATEGORICAL variable
last_col <- 15 # ncol(data)
cox_fct = function(current_var) {
model <- coxph(survival_object ~ current_var)
Beta <- c(summary(model)$coefficients[1])
expBeta <- c(summary(model)$coefficients[2])
stdErr <- c(summary(model)$coefficients[3])
zScore <- c(summary(model)$coefficients[4])
# pValue <- c(summary(model)$coefficients[5])
pValueWald <- pchisq( summary(model)$waldtest["test"], summary(model)$waldtest["df"], lower.tail=FALSE)
return( matrix(c(Beta, expBeta, stdErr, zScore, pValueWald), nrow=1) )
}
results <- t( apply(data[,first_col:last_col],2,cox_fct) )
labels <- c("Beta", "HR", "std err", "z-score", "pValueWald" ) ### CHANGE IF NECESSARY
colnames(results) <- labels
View(results)
# --------------------------------------------------------------------------------------------------------------------------
# 4. Multivariate Analysis
# Running a Multivariate PH Cox model on a single variable
first_col <- 5
last_col <- 10 # ncol(data)
analysis_vars <- names(data)[c(first_col:last_col)]
f <- as.formula( paste0( "survival_object ~", paste0(analysis_vars, collapse = "+") ) )
model <- coxph(f, data=data)
model # elementary results
summary(model) # more details
results <- stepAIC(model, direction="forward")
summary(results)
# Advanced model selection
model <- results
pValues <- summary(model)$coefficients[,5]
maxPV <- max( pValues )
varToRemove <- which.max(pValues)
while(maxPV >= 0.05){
print(length(analysis_vars));
analysis_vars <- row.names( summary(model)$coefficients )[ -varToRemove ]
f <- as.formula( paste0( "survival_object ~", paste0(analysis_vars, collapse = "+") ) )
model <- coxph(f, data=data)
pValues <- summary(model)$coefficients[,5]
maxPV <- max( pValues )
varToRemove <- which.max(pValues)
}
model
# --------------------------------------------------------------------------------------------------------------------------
# 6. Printing results
file_name <- "toto" ### CHANGE IF NECESSARY
View(results)
write.csv(as.data.frame(results), paste0(file_name, ".csv"))
|
395512addb2455430ab738b82676dc065df6a1ea
|
e54e7a8f0140a33da41e420f4149c5c737175a89
|
/R/extract_weather_data.R
|
e4666428cb79343320d78824f0da8f10131d59fd
|
[] |
no_license
|
one-acre-fund/arc2weather
|
0d80547adca12a0bcdd1a299d9886ce192dc16cd
|
25b6ab102f8cf74e6e60b48dac760ba07d198e86
|
refs/heads/master
| 2020-03-28T04:33:39.962589
| 2019-02-04T16:49:59
| 2019-02-04T16:49:59
| 147,722,497
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
extract_weather_data.R
|
#' Combines the full functionality to take the raw inputs and produce data.frame of extracted weather data and dates.
#'
#' @param rawRasterData list of raw raster data from Arc2 weather site
#' @param gpsFile GPS file, probably from data warehouse, of GPS points for which we want to extract weather values
#' @param latCol The Latitude column in the gpsFile
#' @param lonCol The Longitude column in the gpsFile
#' @inheritParams create_date_vector
#' @return A data.frame with the extracted weather values and the date for each GPS point in the file.
#' @export
#' @examples
#' weatherValues <- extract_weather_data(dates, gpsData, "Latitude", "Longitude")
extract_weather_data <- function(start_date,
end_date,
gps_file,
lat_col,
lon_col){
dates <- create_date_vector(start_date, end_date)
dat_extract <- extract_velox_gps(
veloxRaster = convert_to_velox(convert_tibble_to_raster(arc2_api_download(dates)), dates),
spdf = convert_spdf(gps_file, lon_col, lat_col))
return(dat_extract)
}
|
a1303a490ee605bc26e279ed065bd95d106f14f3
|
bbff57b6e8029c2626077269790eea6d9932aff8
|
/man/filter_bmdk.Rd
|
2d10fde0830da9fc73e819051c38562541b83f5a
|
[
"MIT"
] |
permissive
|
abcsFrederick/BMDK
|
303869902486228c5d91f335875a74dc1400b70c
|
cd774919ea7440837beab48281c6277db1263d5d
|
refs/heads/master
| 2023-07-03T04:33:47.243351
| 2021-08-06T14:38:34
| 2021-08-06T14:38:34
| 269,632,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 817
|
rd
|
filter_bmdk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_bmdk.R
\name{filter_bmdk}
\alias{filter_bmdk}
\title{Runs the BMDK features data through a series of filtering methods}
\usage{
filter_bmdk(dat)
}
\arguments{
\item{dat}{a list containing 3 elements: case, a list of case/control statuses;
feat, a matrix of normalized feature data; maxfeat, a list of max features
from each column in feat}
}
\value{
dat a list containing 4 elements: case, a list of case/control statuses;
feat, a matrix of normalized feature data; maxfeat, a list of max features
from each column in feat; testresults, a list of statistical test results
}
\description{
Utilizes the Wilcoxon Rank Sum test, the t test, and the Decision Tree Gini
Index to identify the significance of each feature.
}
|
735adc0259749a283b41a85494fa6b664789563b
|
4d216630e99eda5974b2655baf8928ca7da754bd
|
/man/load_observations.Rd
|
8b6b4464019f9b8fef8e9714a89f00f85e6728dc
|
[] |
no_license
|
ashiklom/edr-da
|
467861ec61cd8953eb272e2844414a522db7268f
|
b092600954b73fa064300c6e7b21d0413d115b94
|
refs/heads/master
| 2021-07-12T18:59:20.190169
| 2021-04-12T14:00:17
| 2021-04-12T14:00:17
| 71,824,349
| 2
| 5
| null | 2018-02-01T13:29:03
| 2016-10-24T19:26:27
|
R
|
UTF-8
|
R
| false
| true
| 609
|
rd
|
load_observations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_observations.R
\name{load_observations}
\alias{load_observations}
\title{Load AVIRIS observations into a list}
\usage{
load_observations(
sites,
aviris_specfile = here::here("aviris/aviris.rds"),
use_waves = seq(400, 1300, by = 10)
)
}
\arguments{
\item{sites}{Character vector of site tags}
\item{aviris_specfile}{Path to AVIRIS spectra H5 file. Default = "aviris/aviris.rds"}
\item{use_waves}{Wavelengths to use for observation. Default = 400 to 1300, by 10nm}
}
\description{
Load AVIRIS observations into a list
}
|
43df3d142c98a2c5bc37aabdcc49e499148838f7
|
3197c8c3a176cff9e2c81cbaf4ed44338eec3b72
|
/R/post_model_modifications.R
|
4dbc74a1d05c48ed72cf1fc01f5c189a5cd985d3
|
[] |
no_license
|
glep/pricing_game_submit
|
c1918349dd14026496fd5b629d6458f36d008309
|
cedd0080ca524316193830daab8329c4a9e65bdb
|
refs/heads/main
| 2023-04-04T22:07:44.725540
| 2021-03-29T00:51:04
| 2021-03-29T00:51:04
| 332,944,272
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,104
|
r
|
post_model_modifications.R
|
train_model_correction <- function(model, df, pred, type) {
if ("makemodel" %in% type) {
model <-
train_make_model_correction(
model = model,
df = df,
pred = pred
)
}
if ("city" %in% type) {
model <-
train_city_correction(
model = model,
df = df,
pred = pred
)
}
if ("claims" %in% type) {
model <-
train_claims_correction(
model = model,
df = df,
pred = pred
)
}
return(model)
}
apply_model_correction <- function(model, newdata, df_pred) {
if ("veh_correction" %in% names(model)) {
message("Post model make_model correction")
df_pred <- apply_veh_correction(model, newdata, df_pred)
}
if ("city_correction" %in% names(model)) {
df_pred <- apply_city_correction(model, newdata, df_pred)
message("Post model city correction")
}
if ("claims_correction" %in% names(model)) {
df_pred <- apply_claims_correction(model, newdata, df_pred)
message("Post model Claims correction")
}
df_pred
}
# make_model correction ---------------------------------------------------
train_make_model_correction <- function(model, df, pred) {
df_veh_correction <- df %>%
select(unique_id, vh_make_model, uncapped_amount) %>%
left_join(pred, by = "unique_id") %>%
group_by(vh_make_model) %>%
summarise(lr = sum(uncapped_amount) / sum(pred)) %>%
mutate(correction = pmax(1, lr)) %>%
select(vh_make_model, correction)
model$veh_correction <- df_veh_correction
model
}
apply_veh_correction <- function(model, newdata, df_pred) {
newdata %>%
select(unique_id, vh_make_model) %>%
left_join(model$veh_correction, by = "vh_make_model") %>%
# When a vehicule is unknown, surcharge by 50%, just in case.
replace_na(list(correction = 1.5)) %>%
left_join(df_pred, by = "unique_id") %>%
mutate(pred = pred * correction) %>%
select(unique_id, pred)
}
# City correction ---------------------------------------------------------
# Would be better to split by renewal/new business
# For renewal, I should merge by id_policy and exclude the claims from
# city or vehicle correction
# Bwah, I'll just lose a few risky policies, no big deal.
train_city_correction <- function(model, df, pred) {
df_city_correction <- df %>%
select(unique_id, claim_amount, population, town_surface_area) %>%
left_join(pred, by = "unique_id") %>%
group_by(population, town_surface_area) %>%
summarise(
expo = n(),
lr = sum(claim_amount) / sum(pred),
.groups = "drop"
) %>%
mutate(correction = pmax(1, lr)) %>%
select(population, town_surface_area, correction)
model$city_correction <- df_city_correction
model
}
apply_city_correction <- function(model, newdata, df_pred) {
newdata %>%
select(unique_id, population, town_surface_area) %>%
left_join(model$city_correction, by = c("population", "town_surface_area")) %>%
# When city is unknown, no surcharge
replace_na(list(correction = 1)) %>%
left_join(df_pred, by = "unique_id") %>%
mutate(pred = pred * correction) %>%
select(unique_id, pred)
}
# Claims correction -------------------------------------------------------
train_claims_correction <- function(model, df, pred) {
# Not proud, but I have to hard-code this one
claims_correction <-
tribble(
~nb_claim, ~correction,
0, 1,
1, 1,
2, 1.2,
3, 1.3,
4, 2
)
model$claims_correction <- df %>%
group_by(id_policy) %>%
summarise(nb_claim = sum(claim_amount > 0)) %>%
left_join(claims_correction, by = "nb_claim") %>%
select(id_policy, correction)
model
}
apply_claims_correction <- function(model, newdata, df_pred) {
newdata %>%
select(unique_id, id_policy) %>%
left_join(model$claims_correction, by = "id_policy") %>%
replace_na(list(correction = 1)) %>%
left_join(df_pred, by = "unique_id") %>%
mutate(pred = pred * correction) %>%
select(unique_id, pred)
}
|
fda88bb8e929121f69048ae4772936bb2617144a
|
fbc5705f3a94f34e6ca7b9c2b9d724bf2d292a26
|
/DCamp/Importing data in R_1/readr data.table/read_csv().R
|
3e17ae7a1b44670b7f8aeb1698302abae58e4cba
|
[] |
no_license
|
shinichimatsuda/R_Training
|
1b766d9f5dfbd73490997ae70a9c25e9affdf2f2
|
df9b30f2ff0886d1b6fa0ad6f3db71e018b7c24d
|
refs/heads/master
| 2020-12-24T20:52:10.679977
| 2018-12-14T15:20:15
| 2018-12-14T15:20:15
| 58,867,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 126
|
r
|
read_csv().R
|
# Load the readr package
library(readr)
# Import potatoes.csv with read_csv(): potatoes
potatoes <- read_csv("potatoes.csv")
|
91eec877bb03e30dc488841ec5c1bcc6876567d5
|
5c1ec4aeaf4a90466984737f81b639e9f96b950b
|
/R/cofaTest_helpers.R
|
f3b48c0183166c1b782217a298a9b79cd251ab82
|
[] |
no_license
|
halleewong/cofa
|
b45f41a53a915d1ec2ecb1f1cd6729570571a48e
|
e4c9e4412edd4dac9c608bcb8fb90e5d5bd85fbb
|
refs/heads/master
| 2020-03-20T10:45:43.684862
| 2019-10-27T03:25:00
| 2019-10-27T03:25:00
| 137,383,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,466
|
r
|
cofaTest_helpers.R
|
# takes the list of frequency matrices <results> and flattens them for
# histogram plotting
gatherFMValues <- function(results,k=10){
values = c()
for (i in 1:length(results)){
fm <- results[[i]]
values <- c(values,as.numeric(fm[lower.tri(fm, diag=FALSE)]))
}
tbl <- data.frame(values=values)
tbl$label <- as.factor(unlist(lapply(1:10, FUN=rep, length(values)/k ),
recursive=TRUE))
return(tbl)
}
# plots a simple histogram of the aggregated cofa values
plotCofaValues <- function(df=tbl){
ggplot(df, aes(x=values)) + xlim(-0.1,1.1) +
geom_histogram(binwidth=0.01) +
theme_minimal()
}
# plots density curves for each forest in the trial
plotTrials <- function(df=tbl){
ggplot(df, aes(x=values, color=label)) + xlim(-0.1,1.1) +
geom_density(na.rm=TRUE) +
theme_minimal()
}
# summary: Retrieves all test statistics for level1-level2 from the results
# parameters:
# results -
# level1, level2 -
# returns: vector of values
#
valueDist <- function(results, level1, level2){
vals = c()
for (i in 1:length(results)){
vals = c(vals, results[[i]]$freqMat[level1,level2])
}
return(vals)
}
# summary: makes historgram ggplot showing test statistics and distribution of
# values
# parameters:
# result0 - list with fm and tot matrices of the results for th real data
# results - list of list(fm=matrix, tot=matrix)
# level1, level2 - names of levels (must match colnames in the matrices)
# returns:
# ggplot object
#
plotLevelsDist <- function(result0, results, level1, level2, binwidth=0.01, normal_distribution=TRUE){
temp = data.frame(vals=valueDist(results, level1, level2))
p <- ggplot(temp, aes(x=vals)) +
geom_vline(xintercept=0.5, col='gray', lty=2) +
geom_histogram(binwidth=binwidth) +
geom_vline(xintercept=result0$freqMat[level1,level2], size=1) +
scale_x_continuous(breaks=seq(0,1,0.2), limits=c(-0.01,1.01)) +
#scale_y_continuous(breaks=seq(0,70,20), limits=c(0,75)) +
theme_light() +
labs(title=paste(level1, level2,
": stat = ", round(result0$freqMat[level1,level2],3),
", total = ", result0$totalMat[level1,level2] ),
x="Value", y="Count") +
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
text = element_text(size=20))
if (normal_distribution == TRUE){
p <- p + stat_function(size=1,fun = function(x) {
dnorm(x, mean = mean(temp$vals), sd = sd(temp$vals)) * nrow(temp) * binwidth
})
}
return(p)
}
# summary: finds average statistic value for every pair of levels
# parameters
# result - list of pairs of matrices
# returns:
# matrix of mean values
meanMatrix <- function(result){
fmat = result[[1]]$freqMat
for (i in 2:length(result)){
fmat = fmat + result[[i]]$freqMat
}
return(fmat/length(result))
}
# summary: helper function for making various plots, where level names not important
# parameters:
# result - list of pairs of matrices (fm and tot)
# returns:
# data.frame object with all values from each null-hypothesis distribution
aggregateDistributions <- function(result){
levelNames = rownames(result[[1]]$freqMat)
allDist = c()
group = c()
level1name = c()
level2name = c()
count = 1
for (m in 2:length(levelNames)){
for (n in 1:(m-1) ){
# pair of levels
level1 = levelNames[n]
level2 = levelNames[m]
# get statistic value from all trials
vals = c()
for (i in 2:length(result)){
vals = c(vals, result[[i]]$freqMat[level1,level2])
}
allDist = c(allDist, vals)
group = c(group, rep(count, times=length(vals)))
level1name = c(level1name, rep(level1, times=length(vals)))
level2name = c(level2name, rep(level2, times=length(vals)))
count = count + 1
}
}
return(data.frame(values=allDist, label=factor(group), level1=level1name, level2=level2name))
}
## --- Scores -----------------------------------------------------------------
# summary: calculates a p-value by counting number of values in the
# distribution that are more extreme than the value given
# parameters:
# value - real number
# dist - vector of values
# returns: single value between 0 and 1
#
pValue <- function(value, dist){
vals = dist - mean(dist)
val0 = value - mean(dist)
return((sum(vals <= -abs(val0)) + sum(vals >= abs(val0)))/length(dist))
}
# summary: calculates a z-score using the mean and sd of the dist
# parameters:
# value - real number
# dist - vector of values
# returns: single value
#
zScore <- function(value, dist){
mean = mean(dist)
sd = sd(dist)
return((value - mean)/sd)
}
# summary: Calculates scores for all pairs of levels
# parameters:
# result0 - a list with a fm matrix and tot matrix
# results - a list of pairs of matrices
# pValue, zScore - boolean for type of metric
# returns: a matrix of z-scores or p-values
#
metricMat <- function(result0, results, metric){
if (!(metric %in% c("pValue","zScore"))){
warning("Either pValue or zScore must be TRUE but not both")
}
# create empty matrix
levels <- colnames(result0$freqMat)
mat <- matrix(NA, nrow = length(levels), ncol=length(levels))
colnames(mat) = rownames(mat) = levels
# claculate scores
for (i in 1:length(levels)){
for (j in 1:i-1){
level1 = levels[i]
level2 = levels[j]
if (metric == "zScore"){
z = zScore(value=result0$freqMat[level1,level2],
dist=valueDist(results,level1,level2))
}
else if (metric == "pValue"){
z = pValue(value=result0$freqMat[level1,level2],
dist=valueDist(results,level1,level2))
} else {z = NA}
mat[level1,level2] = mat[level2,level1] = z
}
}
return(mat)
}
# returns:
# returns:
# matrix object
getMaskedMat <- function(result0, results, metric, cutoff){
if (metric=="pValue"){
mask = abs(metricMat(result0, results, metric=metric)) < cutoff
} else if (metric=="zScore"){
mask = abs(metricMat(result0, results, metric=metric)) > cutoff
}
masked_mat = result0$freqMat
masked_mat[mask==FALSE | is.na(mask)] <- NA
diag(masked_mat) <- NA
return(masked_mat)
}
# summary: Returns a
# parameters:
# result0 - list with fm and tot matrix
# results - list of pairs of fm and tot matrices
# metric - either "pValue" or "zScore"
# cutoff - will be upper bound if using p-value and lower bound if using
# z-score
# order - boolean passed to vizCoFreqMat
# size - text size
# returns:
# ggplot object of the matrix
vizMaskedMatrix <- function(result0, results, metric, cutoff, order, size=1){
masked_mat <- getMaskedMat(result0, results, metric, cutoff)
if (order == FALSE){
masked_mat_ordered <- masked_mat
} else {
hc <- cluster_mat(result0$freqMat)
masked_mat_ordered <- masked_mat[hc$order, hc$order]
}
# all lower tri tiles
mt2 <- masked_mat_ordered
mt2[is.na(mt2)] <- 0.5
diag(mt2) <- 0.5
# non signif tiles
nullmat <- mt2
nullmat[nullmat != 0.5] <- NA
nullmat_data <- meltForViz(get_lower_tri(nullmat))
p <- vizCoFreqMat(mt2, order=FALSE, alph=FALSE, text=FALSE) +
geom_tile(data=nullmat_data, aes(x=var1, y=var2), size=1, fill="gray98", colour=NA) +
geom_text(data=meltForViz(get_lower_tri(round(masked_mat_ordered,1))),
aes(x=var1, y=var2, label=value), size=1, alpha=0.7)
return(p)
}
|
9efab0f829c7d49fcff71110ca107aa3544cfb73
|
47481d6045728644b18c4e5e5ebcfbeb88701dc9
|
/cachematrix.R
|
7d07f9ec9e07da0519674fb68458ec8fc56eb084
|
[] |
no_license
|
shimshock/ProgrammingAssignment2
|
83bbf85efa028fc3d62be32937042857d2862796
|
83b4ae4420f3b5eb93f3c35023e29abf8340c179
|
refs/heads/master
| 2020-05-29T08:52:53.342614
| 2016-09-30T03:03:24
| 2016-09-30T03:03:24
| 69,582,183
| 0
| 0
| null | 2016-09-29T15:41:33
| 2016-09-29T15:41:32
| null |
UTF-8
|
R
| false
| false
| 883
|
r
|
cachematrix.R
|
## This function will cache the inverse of a matrix
## When trying to invert a matrix it will first check to see if thee is a cached verison
## if there is it will use it
## Creates the matrix in cache
makeCacheMatrix <- function(x = matrix()) {##defines the function
inv<- NULL
set<- function(y){
x<<-y
inv<-NULL
}
get<-function()x
setinverse<-function(inverse) inv<<-inverse
getinverse<-function() inv
list(set=set,get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## This provides the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
94aa24a6e04ae82412903b55480a8315e37ff86f
|
a629eab419035f8eb96cf4b9c2cfd56f353df3c8
|
/submission-project1/plot1.R
|
5d37cba64aecab76b2fc443c50d265de1e760f17
|
[] |
no_license
|
eegithub/ExData_Plotting1
|
14f8e0017eb3f9449b30a63a8c219b61e380d377
|
c18bd89edb49650169a309a251778c105ec45af1
|
refs/heads/master
| 2021-01-22T12:02:28.164226
| 2016-03-03T00:45:34
| 2016-03-03T00:45:34
| 52,935,669
| 0
| 0
| null | 2016-03-02T05:08:35
| 2016-03-02T05:08:35
| null |
UTF-8
|
R
| false
| false
| 481
|
r
|
plot1.R
|
#Pre-requisite : create_data.R has been executed => project-data.csv exists in RWork directory
#Getting & parsing data
read.csv("project-data.csv",header=TRUE, stringsAsFactors = FALSE)->pdata
as.Date(pdata$Date)->pdata$Date
strptime(pdata$Time, format="%Y-%m-%d %H:%M:%S")->pdata$Time
#Creating 1st plot
hist(pdata$Global_active_power, main="Global Active Power", col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png,'plot1.png',width = 480, height = 480)
dev.off()
|
437592d3b0fd0574bf64fc94d092bda035518dbf
|
7e7bb7bfdf62c24b7fecf78f5247d28839728710
|
/Student Recruiting/munge/01-A.R
|
cceb3f8d2f6faf950753d0c46128e204ad9c9218
|
[] |
no_license
|
kippchicago/Data_Analysis
|
1ad042d24c7a1e11e364f39c694692f5829363a4
|
8854db83e5c60bc7941654d22cbe6b9c63613a7f
|
refs/heads/master
| 2022-04-09T10:10:43.355762
| 2020-02-20T18:03:40
| 2020-02-20T18:03:40
| 5,903,341
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,463
|
r
|
01-A.R
|
# Munge student and mailer data
student_addr<-stus %>%
select(first=first_name,
last=last_name,
grade=grade_level,
address=street,
lat=lat.x,
long=lon.x
) %>%
mutate(type="KIPPster",
cohort=NA)
mailer_addr_selected<-mailer_addr %>%
select(first,
last,
address,
lat,
long) %>%
mutate(grade=NA,
cohort=NA,
type="Postcard")
alumni_addr_selected<-alumni_addr %>%
select(first=FirstName,
last=LastName,
address=complete_address,
lat,
long,
cohort=Cohort) %>%
mutate(grade=NA,
type="Alumni")
combined_addresses<-rbind_list(student_addr, mailer_addr_selected, alumni_addr_selected) %>%
filter(!is.na(lat), !is.na(long))
# load shapefiles ####
# community areas ####
# get cummunity areas ####
community_areas_shapefile<-readOGR("shapefiles/CommAreas/",
"CommAreas")
community_areas_shapefile<-spTransform(community_areas_shapefile,
CRS("+proj=longlat +datum=WGS84"))
# prep community areas for ggplot
community_areas_shapefile@data$id <- rownames(community_areas_shapefile@data)
community_areas <- fortify(community_areas_shapefile, region="id")
community_areas.df<-merge(community_areas,
community_areas_shapefile,
by="id") %>%
arrange(id, order) %>%
as.data.frame
# add_municipalities
municipalities_shapefile<-readOGR("shapefiles/Municipalities/",
"Municipalities")
municipalities_shapefile<-spTransform(municipalities_shapefile,
CRS("+proj=longlat +datum=WGS84"))
# prep community areas for ggplot
municipalities_shapefile@data$id <- rownames(municipalities_shapefile@data)
municipalities <- fortify(municipalities_shapefile, region="id")
municipalities.df<-merge(municipalities,
municipalities_shapefile,
by="id") %>%
arrange(id, order) %>%
as.data.frame
# assign addresses to community areas to aid in subsetting
combined_sp <- combined_addresses %>% select(long, lat)%>% as.data.frame
coordinates(combined_sp) <- ~long+lat
proj4string(combined_sp)<- CRS("+proj=longlat +datum=WGS84")
cas_overlaid<-over(combined_sp, community_areas_shapefile)
combined_addresses$community_area<- cas_overlaid$COMMUNITY
|
b8ae49941232dee1de87dc256d6a8d8097c6da3c
|
9a8ccb09b9cf666761760992f0238e9af3e9873f
|
/R/categories_risk.R
|
56ec2425dde0f9fed75ad20f4c0a0de68492a3b0
|
[
"MIT"
] |
permissive
|
emraher/tbat
|
68bee38086a1a7abd71fb792d4015bd3d6cfd82e
|
947e8cdd292249e2998b6e8b85a370ac5da507da
|
refs/heads/master
| 2023-04-28T12:42:31.851122
| 2023-04-05T20:38:00
| 2023-04-05T20:38:00
| 315,597,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 998
|
r
|
categories_risk.R
|
#' Get parameters and codes for risk data from TBB
#'
#' @return A tibble
#'
#' @examples
#'
#' \dontrun{
#' dt <- categories_risk()
#' }
#'
#' @export
#'
categories_risk <- function() {
categories <- httr::POST("https://verisistemi.tbb.org.tr/api/router",
body = '{"filters":[],"route":"kkbKategorilerAll"}',
httr::add_headers("LANG" = "tr", "ID" = "null"),
httr::accept_json(),
config = httr::config(ssl_verifypeer = FALSE)
) %>%
httr::content(as = "text") %>%
jsonlite::fromJSON() %>%
dplyr::select(
.data$UK_RAPOR, .data$RAPOR_ADI, .data$RAPOR_ADI_EN,
.data$UK_KATEGORI, .data$KATEGORI, .data$KATEGORI_EN,
.data$ALT_KATEGORI_1, .data$ALT_KATEGORI_1_EN,
.data$ALT_KATEGORI_2, .data$ALT_KATEGORI_2_EN,
tidyselect::everything()
) %>%
dplyr::arrange(
.data$RAPOR_ADI, .data$KATEGORI, .data$ALT_KATEGORI_1,
.data$ALT_KATEGORI_2
) %>%
janitor::clean_names() %>%
tibble::as_tibble()
return(categories)
}
|
c4341c5571418c0d33ecd2e785c9f7ece25c541e
|
dee458bc9dc3660f216b27f8d69d41d280639215
|
/cachematrix.R
|
79a3997b06de55d115e8a8878c9bedc64ae63db1
|
[] |
no_license
|
tvanelferen/ProgrammingAssignment2
|
909c58927ad0a5449ca246fc1df46f56da160d52
|
dc12b0d83143d7abbdab1ade8031d957a69d34ab
|
refs/heads/master
| 2020-12-24T22:59:22.035739
| 2015-09-20T10:16:21
| 2015-09-20T10:16:21
| 42,780,257
| 0
| 0
| null | 2015-09-19T16:24:05
| 2015-09-19T16:24:04
| null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
cachematrix.R
|
## This R.programm contains two main-functions and does two things:
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse
## and cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## Created by Tobias van Elferen (NL) for the Coursera Course "R-programming" (rprog-032)
## ---------------------------------
## This is the "makeCacheMatrix" part. It creates an matrix, then caches it's inverse.
## It does that in 4 steps: set and get the value of the vector,
## then set and get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
## set the value of the vector
set <- function(y) {
x <<- y
m <<- NULL
}
## get the value of the vector
get <- function() x
## set the value of the inverse
setInverse <- function(solve) m <<- solve
## get the value of the inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## ---------------------------------
## This part computes the inverse of the special "matrix". It takes it from the cache,
## created by "MakeCacheMatrix" and when that is not-available, creates it instantly.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
## Check if cached data is available...
if(!is.null(m)) {
message("getting cached data")
## return it when available
return(m)
}
## when inverse-matrix isn't available, compute it on the spot
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
## Return a matrix that is the inverse of 'x'
m
}
|
15f54f55854b0e8f113833084c9a9985537518e0
|
81c4acf23d5db8910522cdc0caab8e6a7ba5cc31
|
/xlsx_to_spss.R
|
c65a94ec2d2a10c316b9a33c62806bba021b231f
|
[] |
no_license
|
ruhulali/R_Codes
|
ff2d12dc6450ae1da748c4df6ab51600dd48e7aa
|
e2b3b3f090e7fd8a43746ed29e750b023035b3f1
|
refs/heads/master
| 2021-06-08T06:44:39.003256
| 2021-04-23T16:21:16
| 2021-04-23T16:21:16
| 158,611,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
xlsx_to_spss.R
|
setwd("Z:/eBay/Item Delivery tpNPS/US/Coder/Allocation/12_Dec'17")
library(xlsx)
mydata <- read.xlsx("duplicate_removal_file.xlsx",sheetName="Sheet1")
library(foreign)
write.foreign(mydata, "Z:/eBay/Item Delivery tpNPS/US/Coder/Allocation/12_Dec'17/us_dec17.txt",
"Z:/eBay/Item Delivery tpNPS/US/Coder/Allocation/12_Dec'17/us_dec17.sps", package="SPSS")
|
1ed8dadab08f5058d5fdc5235f6d813149339c1a
|
df68b9ef313b9a22e4fec5be4ee90752815d3db2
|
/man/GenomicDistributions.Rd
|
2482bcd30b8ac018a93db5105dd51a6761ea3446
|
[] |
no_license
|
joseverdezoto/GenomicDistributions
|
05e69f98b7a7cde473f06d8ff6e0b5424e711c35
|
8a1a1067932cd52686b94f355e1a972f71d54591
|
refs/heads/master
| 2020-08-23T15:04:45.503463
| 2019-09-20T14:12:50
| 2019-09-20T14:12:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 831
|
rd
|
GenomicDistributions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{GenomicDistributions}
\alias{GenomicDistributions}
\alias{GenomicDistributions-package}
\title{Produces summaries and plots of features distributed across genomes}
\description{
If you have a set of genomic ranges, the GenomicDistributions R package can
help you with some simple visualizations. Currently, it can produce two kinds
of plots: First, the chromosome distribution plot, which visualizes how your
regions are distributed over chromosomes; and second, the feature
distribution plot, which visualizes how your regions are distributed relative
to a feature of interest, like Transcription Start Sites (TSSs).
}
\references{
\url{http://github.com/databio/GenomicDistributions}
}
\author{
Nathan C. Sheffield
}
|
be8e2d4ec0c5ba74bf23e65a21608b1da9adf01a
|
8e4f92643e35a4f3c828de5fcf2d9cb3b96c0e2b
|
/CE_Traits_Correlate_Scripts/ceMT_Trait_COR.R
|
9a68dc421184363367f8771f84a8546717dd1d0c
|
[] |
no_license
|
RWilcox90/MOST
|
b82122e9498e1541d48cbf7ec66090402296baca
|
33e3de88cec5cd722dd006471796b7fbca614602
|
refs/heads/master
| 2021-01-23T01:18:02.755051
| 2017-06-06T16:15:59
| 2017-06-06T16:15:59
| 92,867,169
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,359
|
r
|
ceMT_Trait_COR.R
|
ce <- read.csv("lmeCE_Trait_Files/ceMT_Traits.csv")
Int <- as.data.frame(list(Var="Int", CSQ1=cor(ce$Intercept, ce$CSQ_1, use="complete.obs"),
CSQ2=cor(ce$Intercept, ce$CSQ_2, use="complete.obs"),
PHQAnx=cor(ce$Intercept, ce$PHQ12_Anx, use="complete.obs"),
PHQDep=cor(ce$Intercept, ce$PHQ34_Dep, use="complete.obs"),
RRQ=cor(ce$Intercept, ce$RRQ, use="complete.obs"),
FFMQ=cor(ce$Intercept, ce$FFMQ, use="complete.obs"),
Social=cor(ce$Intercept, ce$Social, use="complete.obs"),
Purpose=cor(ce$Intercept, ce$Purpose, use="complete.obs"),
Barratt=cor(ce$Intercept, ce$Barratt, use="complete.obs"),
CSQ=cor(ce$Intercept, ce$CSQ, use="complete.obs"),
CFQ=cor(ce$Intercept, ce$CFQ, use="complete.obs"),
SWLS=cor(ce$Intercept, ce$SWLS, use="complete.obs")))
OnTask <- as.data.frame(list(Var="OnTask", CSQ1=cor(ce$ThinkingWhat.I.was.doing, ce$CSQ_1, use="complete.obs"),
CSQ2=cor(ce$ThinkingWhat.I.was.doing, ce$CSQ_2, use="complete.obs"),
PHQAnx=cor(ce$ThinkingWhat.I.was.doing, ce$PHQ12_Anx, use="complete.obs"),
PHQDep=cor(ce$ThinkingWhat.I.was.doing, ce$PHQ34_Dep, use="complete.obs"),
RRQ=cor(ce$ThinkingWhat.I.was.doing, ce$RRQ, use="complete.obs"),
FFMQ=cor(ce$ThinkingWhat.I.was.doing, ce$FFMQ, use="complete.obs"),
Social=cor(ce$ThinkingWhat.I.was.doing, ce$Social, use="complete.obs"),
Purpose=cor(ce$ThinkingWhat.I.was.doing, ce$Purpose, use="complete.obs"),
Barratt=cor(ce$ThinkingWhat.I.was.doing, ce$Barratt, use="complete.obs"),
CSQ=cor(ce$ThinkingWhat.I.was.doing, ce$CSQ, use="complete.obs"),
CFQ=cor(ce$ThinkingWhat.I.was.doing, ce$CFQ, use="complete.obs"),
SWLS=cor(ce$ThinkingWhat.I.was.doing, ce$SWLS, use="complete.obs")))
MT_final <- rbind(Int, OnTask)
write.csv(MT_final, file = "Cor_Matrix_MTTrait.csv", row.names = FALSE)
|
8aedf4a638230c4984d7456d80a0b5e6fc81ecd0
|
0dcf732360bdcd82a50d534e6b5cdd1587f66163
|
/plot1.R
|
568ba87ddefd4650e6f0c457331e4d94d8e9e4db
|
[] |
no_license
|
rmnmrgrd/ExData_Plotting1
|
dbe4b8e7978152baa291e38752c78d71fb6150d7
|
f8ae2a89bc1b1bb04145557e7c8e8d0727a0b8cb
|
refs/heads/master
| 2020-03-27T21:13:06.333802
| 2018-09-02T22:12:25
| 2018-09-02T22:12:25
| 147,125,423
| 0
| 0
| null | 2018-09-02T22:09:38
| 2018-09-02T22:09:37
| null |
UTF-8
|
R
| false
| false
| 388
|
r
|
plot1.R
|
library(dplyr)
d <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
d <- mutate(tbl_df(d[d$Date == "1/2/2007" | d$Date == "2/2/2007", ]), Global_active_power_num = as.numeric(Global_active_power))
png("Plot1.png")
# Plot 1
hist(d$Global_active_power_num, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
9da4f4786b981d89926a1841d5924b4ba6582359
|
b4c641bacf51d5aa00983f75de026f15216d6864
|
/man/si_style_nolines.Rd
|
1e2a956e504e11d2bd6be78207660f2d7f4c17ac
|
[
"MIT"
] |
permissive
|
USAID-OHA-SI/glitr
|
c932a66475afe3b8506aa83478e074b93d211131
|
0ed30aa9c39e863fc3ef52111057686f30878914
|
refs/heads/main
| 2023-02-16T22:33:04.983558
| 2023-02-15T14:42:37
| 2023-02-15T14:42:37
| 250,594,305
| 7
| 1
|
MIT
| 2023-09-07T19:10:26
| 2020-03-27T17:05:36
|
R
|
UTF-8
|
R
| false
| true
| 672
|
rd
|
si_style_nolines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/si_themes.R
\name{si_style_nolines}
\alias{si_style_nolines}
\title{SI style plot with blank canvass}
\usage{
si_style_nolines(...)
}
\arguments{
\item{...}{inherits parameters from \code{\link[=si_style]{si_style()}}}
}
\description{
Incorporates the default si_style graph and strips all gridlines. Useful for when you want to focus attention on a few parts of a plot -- such as a bar graph with fewer than four categories.
}
\examples{
\dontrun{
library(extrafont)
library(ggplot2)
ggplot(iris, aes(Sepal.Length, y = Sepal.Width, colour = Species)) +
geom_point() + si_style_nolines()
}
}
|
138bf38000642881a443955183352cb3e0d79050
|
ef94b469c66c612e7709e9df86c79e57892d7b7d
|
/A4SriramanKrishnamurthy.r
|
6cd8ed179f8eea18ccaf8ddbb5470332a2746785
|
[] |
no_license
|
rksriram91/RAssignmentsUconn
|
0f421f90419591bd1e954fd99d0fc36d71457015
|
eb2002a9295087e22bdb022de101afd962f295f6
|
refs/heads/master
| 2021-01-09T05:59:12.373337
| 2017-04-03T04:10:22
| 2017-04-03T04:10:22
| 80,881,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
A4SriramanKrishnamurthy.r
|
#Create a vector called x and place values 10 to 1000.
x<-10:1000 # creates x vector as specified
# Create a y vector that takes the square root of the log of numbers in x.
y<-sqrt(log(x)) # creates y vector as specified
#y<-log(x)
#NOTE : The question asks to take sqrt of log values of x. However the Graph shown in the picture is log x function
#and not sqrt of log x function. I chose to proceed with the wordings given than the visuals.You can comment line 5
#and uncomment line 7 to get the exact visualization given in assignment pdf.
#Create a z vector with values 50/x.
z<-50/x # creates z vector as specified
#Plot x, y, and z as shown below.
plot(x,y,col="blue",xlab="",ylab="",ylim=c(0,10))
#the plot function plots values of x and y and colors them blue. xlab and y lab
#are intentionally given empty to not label axis as of now
#ylim marks y axis from 0 to 10(setting boundary to y axis)
par(new=TRUE)
# The above command allows us to add new plot over the existing plot.It is required to plot z values in the same plot.
#The y function is plotted in blue as given by col and as a line l as given by type
plot(x,z,col="red",xlab="",ylab="",axes=FALSE,ylim=c(0,10))
#The above command plots x and z values and the x,y axes labels are intentionally kept blank.
#The z function is plotted in red as given by col and as a line l as given by type
text(400,4,expression(y==alpha^2+gamma+sqrt(beta)))
# the above command the helps plotting the expression given in the visualization in the x and y coordinate that we require
title(main="Assignment4",
xlab="x", ylab="y and z")
# The above command labels the plot ,x axis and Y axis
|
0d024d57a37527b8d3c8b9af705e39001535838e
|
0bf2f4a9c118a423b1eb3dbe2ab3296c89a64aa7
|
/server.R
|
d84e0142f573edc1fc8e5ca4ca2f4137223a76c3
|
[] |
no_license
|
josephine-doerre/module_9_data_products_assignment
|
2d5b1f6d8cab9fbf71957c69647c9d835027ccd9
|
ad9e29eddd8c2816f11cf3d7f3675116381c8929
|
refs/heads/main
| 2023-04-11T20:31:49.711033
| 2021-04-27T13:15:03
| 2021-04-27T13:15:03
| 360,106,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
server.R
|
server <- function(input, output, session) {
output$samplesize <- renderText({
size <- sample.size.mean(input$e, input$S, N = Inf, level = 0.95)
paste0('Assuming N=Inf the Sample Size needed is: ', size$n)
})
}
shinyApp(ui, server)
|
e2b9a220a31a3f774d30830a60cfdd6102e6a820
|
a3540659efafb4664485bcac2b01b54558ea8b87
|
/R/urls.r
|
b4476533e1f5c79b28839d11a49df82fe1488f4f
|
[] |
no_license
|
jefferis/gscraper
|
d9f254717f15a5df7296bc3386eeb21eb6c399ef
|
6f00ddcc6697887c6c8e177ea611f09c54a843d2
|
refs/heads/master
| 2021-05-25T11:21:24.631511
| 2020-10-22T09:23:35
| 2020-10-22T09:23:35
| 10,742,048
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
urls.r
|
#' Find paths on server of resources defined by urls
#'
#' Does not attempt to parse queries etc
#' @param url Character vector of one or more urls
#' @return character vector of paths
#' @author jefferis
#' @export
#' @seealso \code{\link{parseURI}}
#' @examples
#' remotepath("http://cran.r-project.org/web/packages/scrapeR/scrapeR.pdf")
remotepath<-function(url){
# simple fn to return the path of a file on the remote server
# http://server.com/some/path/file -> some/path/file
sub("^[a-z]+://([^/]+)/+(.*)","\\2",url)
}
|
0c5a920177badc757ffc7ccb0ce04e5989f9bd60
|
0db61575fb70f8a8212550245d33fbab48dc69b4
|
/MachineLearningRepos/Assignment 3/ExploratoryDataAnalysis/Expl_Data.R
|
3676a94fe03c1645f7695f79826ce3be3e3b153f
|
[] |
no_license
|
SathyaSrini/SoloProjects
|
e364fee143f171751bb0e9b6c95a225611ea71ba
|
6eaab6cc0f29054dd7988f2e5c1db913a3ad49e4
|
refs/heads/master
| 2020-05-21T19:12:42.628164
| 2016-10-23T06:06:31
| 2016-10-23T06:06:31
| 64,565,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,455
|
r
|
Expl_Data.R
|
#install.packages('caTools',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('moments',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('nortest',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('fitdistrplus',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('gplots',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('gridExtra',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('grid',dependencies = TRUE,repos="http://cran.rstudio.com/")
#install.packages('gtable',dependencies = TRUE,repos="http://cran.rstudio.com/")
library(caTools)
library(moments)
library(nortest)
library(fitdistrplus)
library(gplots)
library(gridExtra)
library(grid)
library(gtable)
options(digits=7)
setPdf<-function(title,tableInput,...)
{
grid.newpage()
grid.text(title,y = unit(0.65, "npc"),gp=gpar(fontsize=20, col="red"))
grid.table(tableInput,rows=row.names(tableInput),cols = colnames(tableInput))
}
#Read input from CSV
pima_Data <-
read.csv(
"https://archive.ics.uci.edu/ml/machine-learning-databases/pima-indians-diabetes/pima-indians-diabetes.data",header = FALSE
)
#Adding column Names to the dataset
colnames(pima_Data) <-
c("pregnant","glucose","blood","triceps","insulin","bmi","pedigree","age","test")
col <- ncol(pima_Data) - 1 #To omit the testClass
pima_DataForCor <- pima_Data
pdf(file='Plots.pdf')
#Observed some zero values in the columns <- Setting them to NA
pima_Data$blood[pima_Data$blood == 0] <-
NA # set zero values in the variable blood to "NA"
pima_Data$glucose[pima_Data$glucose == 0] <-
NA # set zero values in the variable glucose to "NA"
pima_Data$triceps[pima_Data$triceps == 0] <-
NA # set zero values in the variable triceps to "NA"
pima_Data$insulin[pima_Data$insulin == 0] <-
NA # set zero values in the variable insulin to "NA"
pima_Data$bmi[pima_Data$bmi == 0] <-
NA # set zero values in the variable bmi to "NA"
#Converting test variable to a factor in R
pima_Data$test <- factor(pima_Data$test)
summary(pima_Data$test)
#Assigning 0 and 1 to the levels of the test column
levels(pima_Data$test) <- c("No","Yes")
summary(pima_Data)
#Plots - Bar + Histogram and Checking Normal Distribtuion
lapply(1:col,function(i)
hist(pima_Data[,i], main = paste("Histogram of",names(pima_Data)[i])))
lapply(1:col,function(i)
barplot(pima_Data[,i], main = paste("BarPlot of",names(pima_Data)[i])))
lapply(1:col,function(i)
qqnorm(pima_Data[,i], main = paste("Q-Q Plot of",names(pima_Data)[i])))
#Determining Type of distribution
lapply(1:col,function(i)
descdist(pima_DataForCor[,i], discrete = TRUE,boot=500,method = "unbiased"))
dev.off()
#Calculating Skewness and Kurtosis using Moments Package
pdf(file='Measurements.pdf', height=11, width=8.5)
skewNessValues <-
lapply(1:col,function(x) skewness(as.numeric(pima_Data[,x]),na.rm = TRUE))
setPdf("Skewness Values",skewNessValues)
kurtoSisValues <-
lapply(1:col,function(x) kurtosis(as.numeric(pima_Data[,x]),na.rm = TRUE))
setPdf("kurtosis Values",kurtoSisValues)
#Calculating the ShapiroWilk test Values
ShapiroList<-
lapply(1:col,function(x) as.double
(shapiro.test(as.numeric(pima_Data[,x]))$p.value))
setPdf("Shapiro-Wilk Test p-Values",ShapiroList)
# Calculting Lilliefors Test
LillieforsList<-
lapply(1:col,function(x) as.double
(lillie.test(as.numeric(pima_Data[,x]))$p.value))
setPdf("Lilliefors p-Values",LillieforsList)
# Calculting Anderson-Darling Test for Normality Test
AndersonList<-
lapply(1:col,function(x) as.double
(ad.test(as.numeric(pima_Data[,x]))$p.value))
setPdf(" Anderson-Darling p-Values",AndersonList)
#The data contains missing values - as highlighted here - http://blog.revolutionanalytics.com/2015/06/pairwise-complete-correlation-considered-dangerous.html
# I am using use = everything.
classCorrelation<- lapply(1:col,function(x) as.double(cor(as.numeric(pima_Data[,x]),as.numeric(pima_Data[,col+1],use = "everything"))))
setPdf("Correlation with Class Variable",classCorrelation)
classCorrelationWithoutMissingValues<- lapply(1:col,function(x) as.double(cor(as.numeric(pima_DataForCor[,x]),as.numeric(pima_DataForCor[,col+1]),use = "everything")))
setPdf("Correlation with Class Variable without considering missing values",classCorrelationWithoutMissingValues)
previousMax = cor(as.numeric(pima_DataForCor[,1]),as.numeric(pima_DataForCor[,2]))
previousLeft = colnames(pima_DataForCor[1])
previousRight = colnames(pima_DataForCor[2])
AttributeCorrelation = NULL
TitleofTable = NULL
for(i in 1:(ncol(pima_Data)-1))
{
#print(i)
for(j in 1:(ncol(pima_Data)-1))
{
#print(j)
if(i==j)
{
#print("reached i=j")
}
else
{
AttributeCorrelation = cor(as.numeric(pima_DataForCor[,i]),as.numeric(pima_DataForCor[,j]))
TitleofTable<-paste("Correlation between",colnames(pima_DataForCor[i])," and ",colnames(pima_DataForCor[j]))
setPdf(TitleofTable,AttributeCorrelation)
if(previousMax<AttributeCorrelation)
{
previousMax = AttributeCorrelation
previousLeft = colnames(pima_DataForCor[i])
previousRight = colnames(pima_DataForCor[j])
}
}
}
}
TitleofTable<-paste("Maximum Correlation between",previousLeft," and ",previousRight)
setPdf(TitleofTable,previousMax)
dev.off()
|
2daebfb1fbfc599e45c3ecf6a7b4c6003ef9ffd8
|
b373edb8d6860bda63ebf1b974e9241bed4c17f2
|
/analysis/nh fig global metanetwork.R
|
7b5b10630efe9211a06b72de5ba6d2ad0ce47bf0
|
[] |
no_license
|
evancf/network-homogenization
|
96df8b5f30dedff9eea6f36f0654a85bde6a6849
|
0ced631fb8e01f52d4c54e6417514fb405498a6d
|
refs/heads/main
| 2023-01-24T13:33:14.893819
| 2020-12-05T21:04:17
| 2020-12-05T21:04:17
| 318,887,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,744
|
r
|
nh fig global metanetwork.R
|
# Need some other packages for this
ipak(c("gridExtra", "base2grob", "gplots"))
# First metanetworks with and without introduced interactions ------------------
node.size <- 0.25
# Native metanetwork
p1.labs <- as.data.frame(c("Americas", "Europe", "Africa", "Asia",
"New Zealand", "Hawaii", "Madagascar", "Australia"))
colnames(p1.labs) <- "reg"
p1.labs$x <- c(-0.75,-0.35,-0.375,-0.15,
-0.4,-0.52,-0.16, -0.06)
p1.labs$y <- c(0.55,0.58,0.27,0.67,
0,0.99,0.21,0.45)
p1.labs$size <- c(rep(3,4),rep(2.8,4))
gnet.nat$col <- ifelse(gnet.nat$vertex.names %in% colnames(net.all.nat),
rgb(90,180,172, maxColorValue = 255), rgb(216,179,101, maxColorValue = 255))
p1 <- ggplot() +
geom_edges(data=gnet.nat,
aes(x=-x, y=y, xend=-xend, yend=yend),
color="grey50", curvature=0.1, size=0.1, alpha=1/2) +
geom_nodes(data=gnet.nat,
size = node.size,
aes(x=-x, y=y),
color=rep(c(plant.rgb(), animal.rgb()), times = dim(net.all.nat)),
alpha=0.9) +
theme_void() +
theme(legend.position="none") +
geom_text(data = p1.labs, aes(x,y, label = reg), size = p1.labs$size) +
annotate("text", x = -1, y = 0.09, label = "Native\ninteractions\nonly", hjust = 0, size = 3.5, fontface = "italic", lineheight = 0.9) +
annotate("text", x = -1, y = 1, label = "a", hjust = 0, size = 4, fontface = "bold")
# All metanetwork
gnet.all$col <- ifelse(gnet.all$vertex.names %in% colnames(net.all),
rgb(90,180,172, maxColorValue = 255), rgb(216,179,101, maxColorValue = 255))
p2 <- ggplot() +
geom_edges(data=gnet.all,
aes(x=-x, y=y, xend=-xend, yend=yend),
color="grey50", curvature=0.1, size=0.1, alpha=1/2) +
geom_nodes(data=gnet.all,
size = node.size,
aes(x=-x, y=y),
color=rep(c(plant.rgb(), animal.rgb()), times = dim(net.all)),
alpha=0.9) +
theme_void() +
theme(legend.position="none") +
annotate("text", x = -1, y = 0.09, label = "Including\nintroduced\ninteractions", hjust = 0, size = 3.5, fontface = "italic", lineheight = 0.9) +
annotate("text", x = -1, y = 1, label = "b", hjust = 0, size = 4, fontface = "bold") +
annotate("point", x = -0.16, y = 0.995, size = 2, color = plant.rgb()) +
annotate("text", x = -0.08, y = 0.996, label = "plants", size = 3) +
annotate("point", x = -0.16, y = 0.945, size = 2, color = animal.rgb()) +
annotate("text", x = -0.066, y = 0.946, label = "animals", size = 3)
# Make a figure showing spatial distribution of networks ----------------------
metanet$reg.for.col <- ifelse(metanet$oceanic.island == "yes",
"Oceanic Islands",
metanet$reg) %>% factor()
metanet$reg.for.col <- relevel(metanet$reg.for.col, ref = "Oceanic Islands")
set.seed(123)
reg.cols <- brewer.pal(12, "Paired")[sample(1:length(levels(metanet$reg.for.col)),
length(levels(metanet$reg.for.col)),
replace = F)]
cex.pt.map <- 0.5
cex.lab <- 0.8
cex.inner.text <- 0.65
cex.axis <- 0.65
p3.fun <- function(){
blank.map(col = alpha(reg.cols[as.numeric(metanet$reg.for.col)], 0.5),
cex = cex.pt.map,
add.box = F)
legend(20, -55, xpd = T,
bty = "n",
pch = 16,
pt.cex = cex.pt.map * 1.2,
cex = cex.axis,
col = reg.cols,
legend = levels(metanet$reg.for.col),
ncol = 3,
#x.intersp = 4,
y.intersp = 1.2,
xjust = 0.5,
text.width = 110)
text(x = -180, y = 87.5, "c", font = 2)
}
p3.null <- base2grob(plot.new)
p3 <- base2grob(p3.fun)
# A figure showing the distribution of links separating species ----------------
my.tck <- -0.01
my.tick.label.line <- 0.5
p4.fun <- function(){
# op <- par()
# par(pty = "s")
set.seed(4)
plot(close.dat$closeness.nat.ig,
close.dat$closeness.all.ig,
xlim = round(range(c(close.dat$closeness.nat.ig, close.dat$closeness.all.ig), na.rm=T), 2),
ylim = round(range(c(close.dat$closeness.nat.ig, close.dat$closeness.all.ig), na.rm=T), 2),
xlab = "",
ylab = "",
pch = 16,
cex = 0.3,
asp = 1,
las = 1,
frame = F,
cex.lab = cex.lab,
col = rgb(0.6,0.6,0.6),
# col = ifelse(close.dat$node.type == "animal",
# animal.rgb(190),
# plant.rgb(190)),
xaxt = "n",
yaxt = "n"
)
mtext(side = 2, line = 1.5, "Closeness (observed)", cex = cex.lab)
mtext(side = 1, line = 1, "Closeness (native only)", adj = 0.7, cex = cex.lab)
lab1 <- c(0.1, 0.2, 0.3)
axis(1, at = lab1, labels = rep("", length(lab1)), cex.axis = cex.axis, tck = my.tck)
axis(1, at = lab1, lwd = 0, lwd.ticks = 0, line = -0.9, cex.axis = cex.axis)
axis(2, at = lab1, labels = rep("", length(lab1)), cex.axis = cex.axis, las = 1, tck = my.tck)
axis(2, at = lab1, lwd = 0, lwd.ticks = 0, line = -0.5, cex.axis = cex.axis, las = 1)
curve(x*1, add = T, lty = 2, xpd = F)
curve(coef(close.sma.mod)[1] + x * coef(close.sma.mod)[2],
add = T, lwd = 1, col = 1,
from = min(close.dat$closeness.nat.ig, na.rm = T), xpd = F)
x <- seq(min(close.dat$closeness.nat.ig, na.rm = T),
max(close.dat$closeness.all.ig, na.rm = T), length.out = 100)
y1 <- close.sma.mod$groupsummary$Int_lowCI[1] + x * close.sma.mod$groupsummary$Slope_lowCI[1]
y2 <- close.sma.mod$groupsummary$Int_highCI[1] + x * close.sma.mod$groupsummary$Slope_highCI[1]
xx <- c(x, rev(x))
yy <- c(y1, rev(y2))
yy <- ifelse(yy < 0, 0, yy)
yy <- ifelse(yy > 1, 1, yy)
polygon(xx, yy, col = rgb(0,0,0,0.3), border = F, xpd = F)
#par(op)
text(x = 0.1 - 0.2*.27, y = 0.3 + (0.2)*.2, "e", font = 2)
text(x = 0.29, y = 0.25, "1:1\nline", font = 3, cex = cex.inner.text)
}
p4 <- base2grob(p4.fun)
# A figure showing how nodes are distributed within clusters -------------------
p5.fun <- function(){
plot(NA,
xlim = c(0,17),
ylim = c(0,0.3),
cex.lab = cex.lab,
las = 1,
xaxt = "n",
yaxt = "n",
#ann = F,
xlab = "", #Degrees of separation
ylab = "", # Proportion #"Portion of species pairs"
frame = F)
#axis(1, at = seq(0, 15, by = 5), cex.axis = cex.axis, tck = my.tck)
#axis(2, at = seq(0, 0.3, by = 0.1), cex.axis = cex.axis, las = 1, tck = my.tck)
mtext(side = 2, line = 1.5, "Proportion", cex = cex.lab)
mtext(side = 1, line = 1, "Degrees of separation", adj = 0.7, cex = cex.lab)
lab1 <- seq(0, 15, by = 5)
lab2 <- seq(0, 0.3, by = 0.1)
axis(1, at = lab1, labels = rep("", length(lab1)), cex.axis = cex.axis, tck = my.tck)
axis(1, at = lab1, lwd = 0, lwd.ticks = 0, line = -0.9, cex.axis = cex.axis)
axis(2, at = lab2, labels = rep("", length(lab1)), cex.axis = cex.axis, las = 1, tck = my.tck)
axis(2, at = lab2, lwd = 0, lwd.ticks = 0, line = -0.5, cex.axis = cex.axis, las = 1)
dens.bw <- 0.6
nat.dens <- density(dist.net.nat.ig, bw = dens.bw, from = 0, to = net.nat.diam)
polygon(nat.dens, col = native.rgb(75), border = native.rgb(190), lwd = 2)
all.dens <- density(dist.net.ig, bw = dens.bw, from = 0, to = net.diam)
polygon(all.dens, col = all.rgb(75), border = all.rgb(190), xpd = F, lwd = 2)
text(9,.27, "Including \nintroduced \ninteractions", pos = 4, cex = cex.inner.text, font = 3)
segments(x0 = 7.5, x1 = 9, y0 = 0.28, lwd = 2, col = all.rgb(190))
text(9,.155, "Native \ninteractions \nonly", pos = 4, cex = cex.inner.text, font = 3)
segments(x0 = 7.5, x1 = 9, y0 = 0.166, lwd = 2, col = native.rgb(190))
text(x = 0-17*.27, y = 0.3*1.2, "d", font = 2)
}
p5 <- base2grob(p5.fun)
width.fig1 <- 7.25
height.fig1 <- 6
if(make.pdf){
setwd(paste(top.wd, "analysis", "homogenization figures", sep = "/"))
pdf(file = "Figure 1.pdf", width = width.fig1, height = height.fig1)
}
grid.arrange(
p1,p2,p3.null,p5,p4,
widths = c(1,1,1,1),
layout_matrix = matrix(c(1,1,2,2,
1,1,2,2,
1,1,2,2,
3,3,4,5,
3,3,4,5),
ncol=4, byrow = T))
vp <- grid::viewport(x=0.22,y=0.215, width = 0.5, height = 0.75)
grid::pushViewport(vp)
grid::grid.draw(p3)
if(make.pdf){
dev.off()
}
if(make.pdf){
setwd(paste(top.wd, "analysis", "homogenization figures", sep = "/"))
pdf(file = "Extended Data Figure 1.pdf", width = 6, height = 4.5)
}
q.nat.boot.dens <- density(q.nat.boot)
q.all.boot.dens <- density(q.all.boot)
q.reduced.null.dens <- density(q.reduced.null)
q.biome.null.dens <- density(q.biome.null)
par(mfrow=c(1,1))
plot(NA, xlim = c(0,0.7), ylim = c(0,100),
xlab = "Modularity",
ylab = "Probability density",
las = 1,
frame = F)
polygon(q.nat.boot.dens, col = native.rgb(100), lwd = 2, border = F)
polygon(q.all.boot.dens, col = all.rgb(100), lwd = 2, border = F)
lines(q.reduced.null.dens, col = all.rgb(175), lty = 2, lwd = 2)
lines(q.biome.null.dens, col = rgb(0,0,0,0.7), lty = 2, lwd = 2)
legend(x = 0.2, y = 105,
legend = c("Native interactions only", "Including introduced interactions",
"Null: reduced", "Null: randomized by biome"),
lty = c(1,1,2,2),
col = c(native.rgb(100),
all.rgb(100),
all.rgb(175),
rgb(0,0,0,0.7)),
lwd = c(6,6,2,2),
cex = 0.8,
bty = "n",
text.font = 3)
if(make.pdf){
dev.off()
}
|
c24b5293f55383c57859d2affc3f1143bdd5658d
|
de71c62e745b048c95c08f7e516d4aaa215a0194
|
/man/betamle.Rd
|
17e5a91022bdef8bdafad35b2a23db03ca94fab5
|
[
"MIT"
] |
permissive
|
jjbrehm/BadApple
|
55db2fa7208a5231f06b4aded8b9838dd12fd174
|
0fe9a9742c53fdafa8788c90f905f8d2d3c7d913
|
refs/heads/master
| 2023-02-05T21:42:05.624824
| 2020-12-22T18:45:47
| 2020-12-22T18:45:47
| 277,906,381
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 328
|
rd
|
betamle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betamle.r
\name{betamle}
\alias{betamle}
\title{Title}
\usage{
betamle(fmu, method = "BFGS", data, brange)
}
\arguments{
\item{fmu}{formula object}
\item{method}{character string}
\item{data}{dataset}
\item{brange}{vector}
}
\description{
Title
}
|
8bca42c5e62d85a701acb5cab2aeb7810d228893
|
ded1169a3fdc34017f372c04a4b5f49e6441a9e9
|
/rakesh/fileconv.R
|
108af696a4157441d9ff0201ef8488b73a592308
|
[] |
no_license
|
shantanudas/fmri
|
da7ead2d0a56d900e645cd48e2eb792cce848fc7
|
6a78d65944d14a4063d74eb9d6a70c3212584521
|
refs/heads/master
| 2021-01-12T00:22:06.745964
| 2017-01-11T23:40:10
| 2017-01-11T23:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
fileconv.R
|
p2n <- function(x) as.numeric(sub("%","", x))/100
err1 <- read.table("rakesh/error_rates/err_rates.delim", sep = " ", header = TRUE)
View(err1)
err1$TrainErr <- err1$TrainErr/100
err1$TestErr <- err1$TestErr/100
err_knn400 <- read.table("rakesh/error_rates/knn400.txt", header = FALSE)
colnames(err_knn400) <- c("k", "tr", "te")
err_knn400[, 2] <- p2n(err_knn400[, 2])
err_knn400[, 3] <- p2n(err_knn400[, 3])
View(err_knn400)
lprobs <- list()
fl <- list.files("rakesh/error_rates/nnet_probs")
for (ff in fl) {
tab <- read.table(paste0("rakesh/error_rates/nnet_probs/", ff), header = FALSE)
lprobs[[ff]] <- tab
}
knnprobs <- list()
(fl <- list.files("rakesh/error_rates/knn_probs"))
for (ff in fl) {
tab <- read.table(paste0("rakesh/error_rates/knn_probs/", ff), header = FALSE)
knnprobs[[ff]] <- tab
}
save(err_knn400, err1, knnprobs, lprobs, file = "rakesh/converted1.rda")
####
## New 100-class
####
lprobs <- list()
fl <- list.files("rakesh/sub_sub_runs/")
for (ff in fl) {
tab <- read.table(paste0("rakesh/sub_sub_runs/", ff), header = FALSE)
lprobs[[ff]] <- tab
}
saveRDS(lprobs, file = "rakesh/converted2.rds")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.